index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/RateLimitingFilter.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.appinfo.AbstractEurekaIdentity;
import com.netflix.appinfo.EurekaClientIdentity;
import com.netflix.eureka.util.EurekaMonitors;
import com.netflix.discovery.util.RateLimiter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Rate limiting filter, with configurable threshold above which non-privileged clients
* will be dropped. This feature enables cutting off non-standard and potentially harmful clients
* in case of system overload. Since it is critical to always allow client registrations and heartbeats into
* the system, which at the same time are relatively cheap operations, the rate limiting is applied only to
* full and delta registry fetches. Furthermore, since delta fetches are much smaller than full fetches,
* and if not served my result in following full registry fetch from the client, they have relatively
* higher priority. This is implemented by two parallel rate limiters, one for overall number of
* full/delta fetches (higher threshold) and one for full fetches only (low threshold).
* <p>
* The client is identified by {@link AbstractEurekaIdentity#AUTH_NAME_HEADER_KEY} HTTP header
* value. The privileged group by default contains:
* <ul>
* <li>
* {@link EurekaClientIdentity#DEFAULT_CLIENT_NAME} - standard Java eureka-client. Applications using
* this client automatically belong to the privileged group.
* </li>
* <li>
* {@link com.netflix.eureka.EurekaServerIdentity#DEFAULT_SERVER_NAME} - connections from peer Eureka servers
* (internal only, traffic replication)
* </li>
* </ul>
* It is possible to turn off privileged client filtering via
* {@link EurekaServerConfig#isRateLimiterThrottleStandardClients()} property.
* <p>
* Rate limiting is not enabled by default, but can be turned on via configuration. Even when disabled,
* the throttling statistics are still counted, although on a separate counter, so it is possible to
* measure the impact of this feature before activation.
*
* <p>
* Rate limiter implementation is based on token bucket algorithm. There are two configurable
* parameters:
* <ul>
* <li>
* burst size - maximum number of requests allowed into the system as a burst
* </li>
* <li>
* average rate - expected number of requests per second
* </li>
* </ul>
*
* @author Tomasz Bak
*/
@Singleton
public class RateLimitingFilter implements Filter {
private static final Logger logger = LoggerFactory.getLogger(RateLimitingFilter.class);
private static final Set<String> DEFAULT_PRIVILEGED_CLIENTS = new HashSet<>(
Arrays.asList(EurekaClientIdentity.DEFAULT_CLIENT_NAME, EurekaServerIdentity.DEFAULT_SERVER_NAME)
);
private static final Pattern TARGET_RE = Pattern.compile("^.*/apps(/[^/]*)?$");
enum Target {FullFetch, DeltaFetch, Application, Other}
/**
* Includes both full and delta fetches.
*/
private static final RateLimiter registryFetchRateLimiter = new RateLimiter(TimeUnit.SECONDS);
/**
* Only full registry fetches.
*/
private static final RateLimiter registryFullFetchRateLimiter = new RateLimiter(TimeUnit.SECONDS);
private EurekaServerConfig serverConfig;
@Inject
public RateLimitingFilter(EurekaServerContext server) {
this.serverConfig = server.getServerConfig();
}
// for non-DI use
public RateLimitingFilter() {
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
if (serverConfig == null) {
EurekaServerContext serverContext = (EurekaServerContext) filterConfig.getServletContext()
.getAttribute(EurekaServerContext.class.getName());
serverConfig = serverContext.getServerConfig();
}
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
Target target = getTarget(request);
if (target == Target.Other) {
chain.doFilter(request, response);
return;
}
HttpServletRequest httpRequest = (HttpServletRequest) request;
if (isRateLimited(httpRequest, target)) {
incrementStats(target);
if (serverConfig.isRateLimiterEnabled()) {
((HttpServletResponse) response).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
return;
}
}
chain.doFilter(request, response);
}
private static Target getTarget(ServletRequest request) {
Target target = Target.Other;
if (request instanceof HttpServletRequest) {
HttpServletRequest httpRequest = (HttpServletRequest) request;
String pathInfo = httpRequest.getRequestURI();
if ("GET".equals(httpRequest.getMethod()) && pathInfo != null) {
Matcher matcher = TARGET_RE.matcher(pathInfo);
if (matcher.matches()) {
if (matcher.groupCount() == 0 || matcher.group(1) == null || "/".equals(matcher.group(1))) {
target = Target.FullFetch;
} else if ("/delta".equals(matcher.group(1))) {
target = Target.DeltaFetch;
} else {
target = Target.Application;
}
}
}
if (target == Target.Other) {
logger.debug("URL path {} not matched by rate limiting filter", pathInfo);
}
}
return target;
}
private boolean isRateLimited(HttpServletRequest request, Target target) {
if (isPrivileged(request)) {
logger.debug("Privileged {} request", target);
return false;
}
if (isOverloaded(target)) {
logger.debug("Overloaded {} request; discarding it", target);
return true;
}
logger.debug("{} request admitted", target);
return false;
}
private boolean isPrivileged(HttpServletRequest request) {
if (serverConfig.isRateLimiterThrottleStandardClients()) {
return false;
}
Set<String> privilegedClients = serverConfig.getRateLimiterPrivilegedClients();
String clientName = request.getHeader(AbstractEurekaIdentity.AUTH_NAME_HEADER_KEY);
return privilegedClients.contains(clientName) || DEFAULT_PRIVILEGED_CLIENTS.contains(clientName);
}
private boolean isOverloaded(Target target) {
int maxInWindow = serverConfig.getRateLimiterBurstSize();
int fetchWindowSize = serverConfig.getRateLimiterRegistryFetchAverageRate();
boolean overloaded = !registryFetchRateLimiter.acquire(maxInWindow, fetchWindowSize);
if (target == Target.FullFetch) {
int fullFetchWindowSize = serverConfig.getRateLimiterFullFetchAverageRate();
overloaded |= !registryFullFetchRateLimiter.acquire(maxInWindow, fullFetchWindowSize);
}
return overloaded;
}
private void incrementStats(Target target) {
if (serverConfig.isRateLimiterEnabled()) {
EurekaMonitors.RATE_LIMITED.increment();
if (target == Target.FullFetch) {
EurekaMonitors.RATE_LIMITED_FULL_FETCH.increment();
}
} else {
EurekaMonitors.RATE_LIMITED_CANDIDATES.increment();
if (target == Target.FullFetch) {
EurekaMonitors.RATE_LIMITED_FULL_FETCH_CANDIDATES.increment();
}
}
}
@Override
public void destroy() {
}
// For testing purposes
static void reset() {
registryFetchRateLimiter.reset();
registryFullFetchRateLimiter.reset();
}
}
| 6,900 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/EurekaBootStrap.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.util.Date;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.CloudInstanceConfig;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.MyDataCenterInstanceConfig;
import com.netflix.appinfo.providers.EurekaConfigBasedInstanceInfoProvider;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DeploymentContext;
import com.netflix.discovery.DefaultEurekaClientConfig;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.converters.JsonXStream;
import com.netflix.discovery.converters.XmlXStream;
import com.netflix.eureka.aws.AwsBinder;
import com.netflix.eureka.aws.AwsBinderDelegate;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.registry.AwsInstanceRegistry;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl;
import com.netflix.eureka.resources.DefaultServerCodecs;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.util.EurekaMonitors;
import com.thoughtworks.xstream.XStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The class that kick starts the eureka server.
*
* <p>
* The eureka server is configured by using the configuration
* {@link EurekaServerConfig} specified by <em>eureka.server.props</em> in the
* classpath. The eureka client component is also initialized by using the
* configuration {@link EurekaInstanceConfig} specified by
* <em>eureka.client.props</em>. If the server runs in the AWS cloud, the eureka
* server binds it to the elastic ip as specified.
* </p>
*
* @author Karthik Ranganathan, Greg Kim, David Liu
*
*/
public class EurekaBootStrap implements ServletContextListener {
private static final Logger logger = LoggerFactory.getLogger(EurekaBootStrap.class);
private static final String TEST = "test";
private static final String ARCHAIUS_DEPLOYMENT_ENVIRONMENT = "archaius.deployment.environment";
private static final String EUREKA_ENVIRONMENT = "eureka.environment";
private static final String CLOUD = "cloud";
private static final String DEFAULT = "default";
private static final String ARCHAIUS_DEPLOYMENT_DATACENTER = "archaius.deployment.datacenter";
private static final String EUREKA_DATACENTER = "eureka.datacenter";
protected volatile EurekaServerContext serverContext;
protected volatile AwsBinder awsBinder;
private EurekaClient eurekaClient;
/**
* Construct a default instance of Eureka boostrap
*/
public EurekaBootStrap() {
this(null);
}
/**
* Construct an instance of eureka bootstrap with the supplied eureka client
*
* @param eurekaClient the eureka client to bootstrap
*/
public EurekaBootStrap(EurekaClient eurekaClient) {
this.eurekaClient = eurekaClient;
}
/**
* Initializes Eureka, including syncing up with other Eureka peers and publishing the registry.
*
* @see
* javax.servlet.ServletContextListener#contextInitialized(javax.servlet.ServletContextEvent)
*/
@Override
public void contextInitialized(ServletContextEvent event) {
try {
initEurekaEnvironment();
initEurekaServerContext();
ServletContext sc = event.getServletContext();
sc.setAttribute(EurekaServerContext.class.getName(), serverContext);
} catch (Throwable e) {
logger.error("Cannot bootstrap eureka server :", e);
throw new RuntimeException("Cannot bootstrap eureka server :", e);
}
}
/**
* Users can override to initialize the environment themselves.
*/
protected void initEurekaEnvironment() throws Exception {
logger.info("Setting the eureka configuration..");
String dataCenter = ConfigurationManager.getConfigInstance().getString(EUREKA_DATACENTER);
if (dataCenter == null) {
logger.info("Eureka data center value eureka.datacenter is not set, defaulting to default");
ConfigurationManager.getConfigInstance().setProperty(ARCHAIUS_DEPLOYMENT_DATACENTER, DEFAULT);
} else {
ConfigurationManager.getConfigInstance().setProperty(ARCHAIUS_DEPLOYMENT_DATACENTER, dataCenter);
}
String environment = ConfigurationManager.getConfigInstance().getString(EUREKA_ENVIRONMENT);
if (environment == null) {
ConfigurationManager.getConfigInstance().setProperty(ARCHAIUS_DEPLOYMENT_ENVIRONMENT, TEST);
logger.info("Eureka environment value eureka.environment is not set, defaulting to test");
}
}
/**
* init hook for server context. Override for custom logic.
*/
protected void initEurekaServerContext() throws Exception {
EurekaServerConfig eurekaServerConfig = new DefaultEurekaServerConfig();
// For backward compatibility
JsonXStream.getInstance().registerConverter(new V1AwareInstanceInfoConverter(), XStream.PRIORITY_VERY_HIGH);
XmlXStream.getInstance().registerConverter(new V1AwareInstanceInfoConverter(), XStream.PRIORITY_VERY_HIGH);
logger.info("Initializing the eureka client...");
logger.info(eurekaServerConfig.getJsonCodecName());
ServerCodecs serverCodecs = new DefaultServerCodecs(eurekaServerConfig);
ApplicationInfoManager applicationInfoManager = null;
if (eurekaClient == null) {
EurekaInstanceConfig instanceConfig = isCloud(ConfigurationManager.getDeploymentContext())
? new CloudInstanceConfig()
: new MyDataCenterInstanceConfig();
applicationInfoManager = new ApplicationInfoManager(
instanceConfig, new EurekaConfigBasedInstanceInfoProvider(instanceConfig).get());
EurekaClientConfig eurekaClientConfig = new DefaultEurekaClientConfig();
eurekaClient = new DiscoveryClient(applicationInfoManager, eurekaClientConfig);
} else {
applicationInfoManager = eurekaClient.getApplicationInfoManager();
}
PeerAwareInstanceRegistry registry;
if (isAws(applicationInfoManager.getInfo())) {
registry = new AwsInstanceRegistry(
eurekaServerConfig,
eurekaClient.getEurekaClientConfig(),
serverCodecs,
eurekaClient
);
awsBinder = new AwsBinderDelegate(eurekaServerConfig, eurekaClient.getEurekaClientConfig(), registry, applicationInfoManager);
awsBinder.start();
} else {
registry = new PeerAwareInstanceRegistryImpl(
eurekaServerConfig,
eurekaClient.getEurekaClientConfig(),
serverCodecs,
eurekaClient
);
}
PeerEurekaNodes peerEurekaNodes = getPeerEurekaNodes(
registry,
eurekaServerConfig,
eurekaClient.getEurekaClientConfig(),
serverCodecs,
applicationInfoManager
);
serverContext = new DefaultEurekaServerContext(
eurekaServerConfig,
serverCodecs,
registry,
peerEurekaNodes,
applicationInfoManager
);
EurekaServerContextHolder.initialize(serverContext);
serverContext.initialize();
logger.info("Initialized server context");
// Copy registry from neighboring eureka node
int registryCount = registry.syncUp();
registry.openForTraffic(applicationInfoManager, registryCount);
// Register all monitoring statistics.
EurekaMonitors.registerAllStats();
}
protected PeerEurekaNodes getPeerEurekaNodes(PeerAwareInstanceRegistry registry, EurekaServerConfig eurekaServerConfig, EurekaClientConfig eurekaClientConfig, ServerCodecs serverCodecs, ApplicationInfoManager applicationInfoManager) {
PeerEurekaNodes peerEurekaNodes = new PeerEurekaNodes(
registry,
eurekaServerConfig,
eurekaClientConfig,
serverCodecs,
applicationInfoManager
);
return peerEurekaNodes;
}
/**
* Handles Eureka cleanup, including shutting down all monitors and yielding all EIPs.
*
* @see javax.servlet.ServletContextListener#contextDestroyed(javax.servlet.ServletContextEvent)
*/
@Override
public void contextDestroyed(ServletContextEvent event) {
try {
logger.info("{} Shutting down Eureka Server..", new Date());
ServletContext sc = event.getServletContext();
sc.removeAttribute(EurekaServerContext.class.getName());
destroyEurekaServerContext();
destroyEurekaEnvironment();
} catch (Throwable e) {
logger.error("Error shutting down eureka", e);
}
logger.info("{} Eureka Service is now shutdown...", new Date());
}
/**
* Server context shutdown hook. Override for custom logic
*/
protected void destroyEurekaServerContext() throws Exception {
EurekaMonitors.shutdown();
if (awsBinder != null) {
awsBinder.shutdown();
}
if (serverContext != null) {
serverContext.shutdown();
}
}
/**
* Users can override to clean up the environment themselves.
*/
protected void destroyEurekaEnvironment() throws Exception {
}
protected boolean isAws(InstanceInfo selfInstanceInfo) {
boolean result = DataCenterInfo.Name.Amazon == selfInstanceInfo.getDataCenterInfo().getName();
logger.info("isAws returned {}", result);
return result;
}
protected boolean isCloud(DeploymentContext deploymentContext) {
logger.info("Deployment datacenter is {}", deploymentContext.getDeploymentDatacenter());
return CLOUD.equals(deploymentContext.getDeploymentDatacenter());
}
}
| 6,901 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/Names.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
/**
* @author Tomasz Bak
*/
public class Names {
/**
* Eureka metric names consist of three parts [source].[component].[detailed name]:
* <ul>
* <li>source - fixed to eurekaServer (and eurekaClient on the client side)</li>
* <li>component - Eureka component, like REST layer, replication, etc</li>
* <li>detailed name - a detailed metric name explaining its purpose</li>
* </ul>
*/
public static final String METRIC_PREFIX = "eurekaServer.";
public static final String METRIC_REPLICATION_PREFIX = METRIC_PREFIX + "replication.";
public static final String METRIC_REGISTRY_PREFIX = METRIC_PREFIX + "registry.";
public static final String REMOTE = "remote";
}
| 6,902 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/ServerRequestAuthFilter.java | package com.netflix.eureka;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import com.google.common.base.Strings;
import com.netflix.appinfo.AbstractEurekaIdentity;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
/**
* An auth filter for client requests. For now, it only logs supported client identification data from header info
*/
@Singleton
public class ServerRequestAuthFilter implements Filter {
public static final String UNKNOWN = "unknown";
private static final String NAME_PREFIX = "DiscoveryServerRequestAuth_Name_";
private EurekaServerConfig serverConfig;
@Inject
public ServerRequestAuthFilter(EurekaServerContext server) {
this.serverConfig = server.getServerConfig();
}
// for non-DI use
public ServerRequestAuthFilter() {
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
if (serverConfig == null) {
EurekaServerContext serverContext = (EurekaServerContext) filterConfig.getServletContext()
.getAttribute(EurekaServerContext.class.getName());
serverConfig = serverContext.getServerConfig();
}
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
logAuth(request);
chain.doFilter(request, response);
}
@Override
public void destroy() {
// nothing to do here
}
protected void logAuth(ServletRequest request) {
if (serverConfig.shouldLogIdentityHeaders()) {
if (request instanceof HttpServletRequest) {
HttpServletRequest httpRequest = (HttpServletRequest) request;
String clientName = getHeader(httpRequest, AbstractEurekaIdentity.AUTH_NAME_HEADER_KEY);
String clientVersion = getHeader(httpRequest, AbstractEurekaIdentity.AUTH_VERSION_HEADER_KEY);
DynamicCounter.increment(MonitorConfig.builder(NAME_PREFIX + clientName + "-" + clientVersion).build());
}
}
}
protected String getHeader(HttpServletRequest request, String headerKey) {
String value = request.getHeader(headerKey);
return Strings.isNullOrEmpty(value) ? UNKNOWN : value;
}
}
| 6,903 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/DefaultEurekaServerContext.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.util.EurekaMonitors;
import com.netflix.eureka.util.ServoControl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* Represent the local server context and exposes getters to components of the
* local server such as the registry.
*
* @author David Liu
*/
@Singleton
public class DefaultEurekaServerContext implements EurekaServerContext {
private static final Logger logger = LoggerFactory.getLogger(DefaultEurekaServerContext.class);
private final EurekaServerConfig serverConfig;
private final ServerCodecs serverCodecs;
private final PeerAwareInstanceRegistry registry;
private final PeerEurekaNodes peerEurekaNodes;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public DefaultEurekaServerContext(EurekaServerConfig serverConfig,
ServerCodecs serverCodecs,
PeerAwareInstanceRegistry registry,
PeerEurekaNodes peerEurekaNodes,
ApplicationInfoManager applicationInfoManager) {
this.serverConfig = serverConfig;
this.serverCodecs = serverCodecs;
this.registry = registry;
this.peerEurekaNodes = peerEurekaNodes;
this.applicationInfoManager = applicationInfoManager;
}
@PostConstruct
@Override
public void initialize() {
logger.info("Initializing ...");
peerEurekaNodes.start();
try {
registry.init(peerEurekaNodes);
} catch (Exception e) {
throw new RuntimeException(e);
}
logger.info("Initialized");
}
@PreDestroy
@Override
public void shutdown() {
logger.info("Shutting down ...");
registry.shutdown();
peerEurekaNodes.shutdown();
ServoControl.shutdown();
EurekaMonitors.shutdown();
logger.info("Shut down");
}
@Override
public EurekaServerConfig getServerConfig() {
return serverConfig;
}
@Override
public PeerEurekaNodes getPeerEurekaNodes() {
return peerEurekaNodes;
}
@Override
public ServerCodecs getServerCodecs() {
return serverCodecs;
}
@Override
public PeerAwareInstanceRegistry getRegistry() {
return registry;
}
@Override
public ApplicationInfoManager getApplicationInfoManager() {
return applicationInfoManager;
}
}
| 6,904 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/EurekaServerConfig.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
import com.netflix.eureka.aws.AwsBindingStrategy;
/**
* Configuration information required by the eureka server to operate.
*
* <p>
* Most of the required information is provided by the default configuration
* {@link com.netflix.eureka.DefaultEurekaServerConfig}.
*
* Note that all configurations are not effective at runtime unless and
* otherwise specified.
* </p>
*
* @author Karthik Ranganathan
*
*/
public interface EurekaServerConfig {
/**
* Gets the <em>AWS Access Id</em>. This is primarily used for
* <em>Elastic IP Biding</em>. The access id should be provided with
* appropriate AWS permissions to bind the EIP.
*
* @return
*/
String getAWSAccessId();
/**
* Gets the <em>AWS Secret Key</em>. This is primarily used for
* <em>Elastic IP Biding</em>. The access id should be provided with
* appropriate AWS permissions to bind the EIP.
*
* @return
*/
String getAWSSecretKey();
/**
* Gets the number of times the server should try to bind to the candidate
* EIP.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of times the server should try to bind to the
* candidate EIP.
*/
int getEIPBindRebindRetries();
/**
* Get the interval with which the server should check if the EIP is bound
* and should try to bind in the case if it is already not bound, iff the EIP
* is not currently bound.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getEIPBindingRetryIntervalMsWhenUnbound();
/**
* Gets the interval with which the server should check if the EIP is bound
* and should try to bind in the case if it is already not bound, iff the EIP
* is already bound. (so this refresh is just for steady state checks)
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getEIPBindingRetryIntervalMs();
/**
* Checks to see if the eureka server is enabled for self preservation.
*
* <p>
* When enabled, the server keeps track of the number of <em>renewals</em>
* it should receive from the server. Any time, the number of renewals drops
* below the threshold percentage as defined by
* {@link #getRenewalPercentThreshold()}, the server turns off expirations
* to avert danger.This will help the server in maintaining the registry
* information in case of network problems between client and the server.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true to enable self preservation, false otherwise.
*/
boolean shouldEnableSelfPreservation();
/**
* The minimum percentage of renewals that is expected from the clients in
* the period specified by {@link #getRenewalThresholdUpdateIntervalMs()}.
* If the renewals drop below the threshold, the expirations are disabled if
* the {@link #shouldEnableSelfPreservation()} is enabled.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return value between 0 and 1 indicating the percentage. For example,
* <code>85%</code> will be specified as <code>0.85</code>.
*/
double getRenewalPercentThreshold();
/**
* The interval with which the threshold as specified in
* {@link #getRenewalPercentThreshold()} needs to be updated.
*
* @return time in milliseconds indicating the interval.
*/
int getRenewalThresholdUpdateIntervalMs();
/**
* The interval with which clients are expected to send their heartbeats. Defaults to 30
* seconds. If clients send heartbeats with different frequency, say, every 15 seconds, then
* this parameter should be tuned accordingly, otherwise, self-preservation won't work as
* expected.
*
* @return time in seconds indicating the expected interval
*/
int getExpectedClientRenewalIntervalSeconds();
/**
* The interval with which the information about the changes in peer eureka
* nodes is updated. The user can use the DNS mechanism or dynamic
* configuration provided by <a href="https://github.com/Netflix/archaius">Archaius</a> to
* change the information dynamically.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return timer in milliseconds indicating the interval.
*/
int getPeerEurekaNodesUpdateIntervalMs();
/**
* If set to true, the replicated data send in the request will be always compressed.
* This does not define response path, which is driven by "Accept-Encoding" header.
*/
boolean shouldEnableReplicatedRequestCompression();
/**
* Get the number of times the replication events should be retried with
* peers.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of retries.
*/
int getNumberOfReplicationRetries();
/**
* Gets the interval with which the status information about peer nodes is
* updated.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return time in milliseconds indicating the interval.
*/
int getPeerEurekaStatusRefreshTimeIntervalMs();
/**
* Gets the time to wait when the eureka server starts up unable to get
* instances from peer nodes. It is better not to start serving rightaway
* during these scenarios as the information that is stored in the registry
* may not be complete.
*
* When the instance registry starts up empty, it builds over time when the
* clients start to send heartbeats and the server requests the clients for
* registration information.
*
* @return time in milliseconds.
*/
int getWaitTimeInMsWhenSyncEmpty();
/**
* Gets the timeout value for connecting to peer eureka nodes for
* replication.
*
* @return timeout value in milliseconds.
*/
int getPeerNodeConnectTimeoutMs();
/**
* Gets the timeout value for reading information from peer eureka nodes for
* replication.
*
* @return timeout value in milliseconds.
*/
int getPeerNodeReadTimeoutMs();
/**
* Gets the total number of <em>HTTP</em> connections allowed to peer eureka
* nodes for replication.
*
* @return total number of allowed <em>HTTP</em> connections.
*/
int getPeerNodeTotalConnections();
/**
* Gets the total number of <em>HTTP</em> connections allowed to a
* particular peer eureka node for replication.
*
* @return total number of allowed <em>HTTP</em> connections for a peer
* node.
*/
int getPeerNodeTotalConnectionsPerHost();
/**
* Gets the idle time after which the <em>HTTP</em> connection should be
* cleaned up.
*
* @return idle time in seconds.
*/
int getPeerNodeConnectionIdleTimeoutSeconds();
/**
* Get the time for which the delta information should be cached for the
* clients to retrieve the value without missing it.
*
* @return time in milliseconds
*/
long getRetentionTimeInMSInDeltaQueue();
/**
* Get the time interval with which the clean up task should wake up and
* check for expired delta information.
*
* @return time in milliseconds.
*/
long getDeltaRetentionTimerIntervalInMs();
/**
* Get the time interval with which the task that expires instances should
* wake up and run.
*
* @return time in milliseconds.
*/
long getEvictionIntervalTimerInMs();
/**
* Whether to use AWS API to query ASG statuses.
*
* @return true if AWS API is used, false otherwise.
*/
boolean shouldUseAwsAsgApi();
/**
* Get the timeout value for querying the <em>AWS</em> for <em>ASG</em>
* information.
*
* @return timeout value in milliseconds.
*/
int getASGQueryTimeoutMs();
/**
* Get the time interval with which the <em>ASG</em> information must be
* queried from <em>AWS</em>.
*
* @return time in milliseconds.
*/
long getASGUpdateIntervalMs();
/**
* Get the expiration value for the cached <em>ASG</em> information
*
* @return time in milliseconds.
*/
long getASGCacheExpiryTimeoutMs();
/**
* Gets the time for which the registry payload should be kept in the cache
* if it is not invalidated by change events.
*
* @return time in seconds.
*/
long getResponseCacheAutoExpirationInSeconds();
/**
* Gets the time interval with which the payload cache of the client should
* be updated.
*
* @return time in milliseconds.
*/
long getResponseCacheUpdateIntervalMs();
/**
* The {@link com.netflix.eureka.registry.ResponseCache} currently uses a two level caching
* strategy to responses. A readWrite cache with an expiration policy, and a readonly cache
* that caches without expiry.
*
* @return true if the read only cache is to be used
*/
boolean shouldUseReadOnlyResponseCache();
/**
* Checks to see if the delta information can be served to client or not.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true if the delta information is allowed to be served, false
* otherwise.
*/
boolean shouldDisableDelta();
/**
* Get the idle time for which the status replication threads can stay
* alive.
*
* @return time in minutes.
*/
long getMaxIdleThreadInMinutesAgeForStatusReplication();
/**
* Get the minimum number of threads to be used for status replication.
*
* @return minimum number of threads to be used for status replication.
*/
int getMinThreadsForStatusReplication();
/**
* Get the maximum number of threads to be used for status replication.
*
* @return maximum number of threads to be used for status replication.
*/
int getMaxThreadsForStatusReplication();
/**
* Get the maximum number of replication events that can be allowed to back
* up in the status replication pool.
* <p>
* Depending on the memory allowed, timeout and the replication traffic,
* this value can vary.
* </p>
*
* @return the maximum number of replication events that can be allowed to
* back up.
*/
int getMaxElementsInStatusReplicationPool();
/**
* Checks whether to synchronize instances when timestamp differs.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true, to synchronize, false otherwise.
*/
boolean shouldSyncWhenTimestampDiffers();
/**
* Get the number of times that a eureka node would try to get the registry
* information from the peers during startup.
*
* @return the number of retries
*/
int getRegistrySyncRetries();
/**
* Get the wait/sleep time between each retry sync attempts, if the prev retry failed and there are
* more retries to attempt.
*
* @return the wait time in ms between each sync retries
*/
long getRegistrySyncRetryWaitMs();
/**
* Get the maximum number of replication events that can be allowed to back
* up in the replication pool. This replication pool is responsible for all
* events except status updates.
* <p>
* Depending on the memory allowed, timeout and the replication traffic,
* this value can vary.
* </p>
*
* @return the maximum number of replication events that can be allowed to
* back up.
*/
int getMaxElementsInPeerReplicationPool();
/**
* Get the idle time for which the replication threads can stay alive.
*
* @return time in minutes.
*/
long getMaxIdleThreadAgeInMinutesForPeerReplication();
/**
* Get the minimum number of threads to be used for replication.
*
* @return minimum number of threads to be used for replication.
*/
int getMinThreadsForPeerReplication();
/**
* Get the maximum number of threads to be used for replication.
*
* @return maximum number of threads to be used for replication.
*/
int getMaxThreadsForPeerReplication();
/**
* Get the minimum number of available peer replication instances
* for this instance to be considered healthy. The design of eureka allows
* for an instance to continue operating with zero peers, but that would not
* be ideal.
* <p>
* The default value of -1 is interpreted as a marker to not compare
* the number of replicas. This would be done to either disable this check
* or to run eureka in a single node configuration.
*
* @return minimum number of available peer replication instances
* for this instance to be considered healthy.
*/
int getHealthStatusMinNumberOfAvailablePeers();
/**
* Get the time in milliseconds to try to replicate before dropping
* replication events.
*
* @return time in milliseconds
*/
int getMaxTimeForReplication();
/**
* Checks whether the connections to replicas should be primed. In AWS, the
* firewall requires sometime to establish network connection for new nodes.
*
* @return true, if connections should be primed, false otherwise.
*/
boolean shouldPrimeAwsReplicaConnections();
/**
* Checks to see if the delta information can be served to client or not for
* remote regions.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true if the delta information is allowed to be served, false
* otherwise.
*/
boolean shouldDisableDeltaForRemoteRegions();
/**
* Gets the timeout value for connecting to peer eureka nodes for remote
* regions.
*
* @return timeout value in milliseconds.
*/
int getRemoteRegionConnectTimeoutMs();
/**
* Gets the timeout value for reading information from peer eureka nodes for
* remote regions.
*
* @return timeout value in milliseconds.
*/
int getRemoteRegionReadTimeoutMs();
/**
* Gets the total number of <em>HTTP</em> connections allowed to peer eureka
* nodes for remote regions.
*
* @return total number of allowed <em>HTTP</em> connections.
*/
int getRemoteRegionTotalConnections();
/**
* Gets the total number of <em>HTTP</em> connections allowed to a
* particular peer eureka node for remote regions.
*
* @return total number of allowed <em>HTTP</em> connections for a peer
* node.
*/
int getRemoteRegionTotalConnectionsPerHost();
/**
* Gets the idle time after which the <em>HTTP</em> connection should be
* cleaned up for remote regions.
*
* @return idle time in seconds.
*/
int getRemoteRegionConnectionIdleTimeoutSeconds();
/**
* Indicates whether the content fetched from eureka server has to be
* compressed for remote regions whenever it is supported by the server. The
* registry information from the eureka server is compressed for optimum
* network traffic.
*
* @return true, if the content need to be compressed, false otherwise.
*/
boolean shouldGZipContentFromRemoteRegion();
/**
* Get a map of region name against remote region discovery url.
*
* @return - An unmodifiable map of remote region name against remote region discovery url. Empty map if no remote
* region url is defined.
*/
Map<String, String> getRemoteRegionUrlsWithName();
/**
* Get the list of remote region urls.
* @return - array of string representing {@link java.net.URL}s.
* @deprecated Use {@link #getRemoteRegionUrlsWithName()}
*/
String[] getRemoteRegionUrls();
/**
* Returns a list of applications that must be retrieved from the passed remote region. <br/>
* This list can be <code>null</code> which means that no filtering should be applied on the applications
* for this region i.e. all applications must be returned. <br/>
* A global whitelist can also be configured which can be used when no setting is available for a region, such a
* whitelist can be obtained by passing <code>null</code> to this method.
*
* @param regionName Name of the region for which the application whitelist is to be retrieved. If null a global
* setting is returned.
*
* @return A set of application names which must be retrieved from the passed region. If <code>null</code> all
* applications must be retrieved.
*/
@Nullable
Set<String> getRemoteRegionAppWhitelist(@Nullable String regionName);
/**
* Get the time interval for which the registry information need to be fetched from the remote region.
* @return time in seconds.
*/
int getRemoteRegionRegistryFetchInterval();
/**
* Size of a thread pool used to execute remote region registry fetch requests. Delegating these requests
* to internal threads is necessary workaround to https://bugs.openjdk.java.net/browse/JDK-8049846 bug.
*/
int getRemoteRegionFetchThreadPoolSize();
/**
* Gets the fully qualified trust store file that will be used for remote region registry fetches.
* @return
*/
String getRemoteRegionTrustStore();
/**
* Get the remote region trust store's password.
*/
String getRemoteRegionTrustStorePassword();
/**
* Old behavior of fallback to applications in the remote region (if configured) if there are no instances of that
* application in the local region, will be disabled.
*
* @return {@code true} if the old behavior is to be disabled.
*/
boolean disableTransparentFallbackToOtherRegion();
/**
* Indicates whether the replication between cluster nodes should be batched for network efficiency.
* @return {@code true} if the replication needs to be batched.
*/
boolean shouldBatchReplication();
/**
* Allows to configure URL which Eureka should treat as its own during replication. In some cases Eureka URLs don't
* match IP address or hostname (for example, when nodes are behind load balancers). Setting this parameter on each
* node to URLs of associated load balancers helps to avoid replication to the same node where event originally came
* to. Important: you need to configure the whole URL including scheme and path, like
* <code>http://eureka-node1.mydomain.com:8010/eureka/v2/</code>
* @return URL Eureka will treat as its own
*/
String getMyUrl();
/**
* Indicates whether the eureka server should log/metric clientAuthHeaders
* @return {@code true} if the clientAuthHeaders should be logged and/or emitted as metrics
*/
boolean shouldLogIdentityHeaders();
/**
* Indicates whether the rate limiter should be enabled or disabled.
*/
boolean isRateLimiterEnabled();
/**
* Indicate if rate limit standard clients. If set to false, only non standard clients
* will be rate limited.
*/
boolean isRateLimiterThrottleStandardClients();
/**
* A list of certified clients. This is in addition to standard eureka Java clients.
*/
Set<String> getRateLimiterPrivilegedClients();
/**
* Rate limiter, token bucket algorithm property. See also {@link #getRateLimiterRegistryFetchAverageRate()}
* and {@link #getRateLimiterFullFetchAverageRate()}.
*/
int getRateLimiterBurstSize();
/**
* Rate limiter, token bucket algorithm property. Specifies the average enforced request rate.
* See also {@link #getRateLimiterBurstSize()}.
*/
int getRateLimiterRegistryFetchAverageRate();
/**
* Rate limiter, token bucket algorithm property. Specifies the average enforced request rate.
* See also {@link #getRateLimiterBurstSize()}.
*/
int getRateLimiterFullFetchAverageRate();
/**
* Name of the Role used to describe auto scaling groups from third AWS accounts.
*/
String getListAutoScalingGroupsRoleName();
/**
* @return the class name of the full json codec to use for the server. If none set a default codec will be used
*/
String getJsonCodecName();
/**
* @return the class name of the full xml codec to use for the server. If none set a default codec will be used
*/
String getXmlCodecName();
/**
* Get the configured binding strategy EIP or Route53.
* @return the configured binding strategy
*/
AwsBindingStrategy getBindingStrategy();
/**
*
* @return the ttl used to set up the route53 domain if new
*/
long getRoute53DomainTTL();
/**
* Gets the number of times the server should try to bind to the candidate
* Route53 domain.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of times the server should try to bind to the
* candidate Route53 domain.
*/
int getRoute53BindRebindRetries();
/**
* Gets the interval with which the server should check if the Route53 domain is bound
* and should try to bind in the case if it is already not bound.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getRoute53BindingRetryIntervalMs();
/**
* To avoid configuration API pollution when trying new/experimental or features or for the migration process,
* the corresponding configuration can be put into experimental configuration section.
*
* @return a property of experimental feature
*/
String getExperimental(String name);
/**
* Get the capacity of responseCache, default value is 1000.
*
* @return the capacity of responseCache.
*/
int getInitialCapacityOfResponseCache();
}
| 6,905 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/DefaultEurekaServerConfig.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
import javax.inject.Singleton;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.config.DynamicIntProperty;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
import com.netflix.config.DynamicStringSetProperty;
import com.netflix.eureka.aws.AwsBindingStrategy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* A default implementation of eureka server configuration as required by
* {@link EurekaServerConfig}.
*
* <p>
* The information required for configuring eureka server is provided in a
* configuration file.The configuration file is searched for in the classpath
* with the name specified by the property <em>eureka.server.props</em> and with
* the suffix <em>.properties</em>. If the property is not specified,
* <em>eureka-server.properties</em> is assumed as the default.The properties
* that are looked up uses the <em>namespace</em> passed on to this class.
* </p>
*
* <p>
* If the <em>eureka.environment</em> property is specified, additionally
* <em>eureka-server-<eureka.environment>.properties</em> is loaded in addition
* to <em>eureka-server.properties</em>.
* </p>
*
* @author Karthik Ranganathan
*
*/
@Singleton
public class DefaultEurekaServerConfig implements EurekaServerConfig {
private static final String ARCHAIUS_DEPLOYMENT_ENVIRONMENT = "archaius.deployment.environment";
private static final String TEST = "test";
private static final String EUREKA_ENVIRONMENT = "eureka.environment";
private static final Logger logger = LoggerFactory
.getLogger(DefaultEurekaServerConfig.class);
private static final DynamicPropertyFactory configInstance = com.netflix.config.DynamicPropertyFactory
.getInstance();
private static final DynamicStringProperty EUREKA_PROPS_FILE = DynamicPropertyFactory
.getInstance().getStringProperty("eureka.server.props",
"eureka-server");
private static final int TIME_TO_WAIT_FOR_REPLICATION = 30000;
private String namespace = "eureka.";
// These counters are checked for each HTTP request. Instantiating them per request like for the other
// properties would be too costly.
private final DynamicStringSetProperty rateLimiterPrivilegedClients =
new DynamicStringSetProperty(namespace + "rateLimiter.privilegedClients", Collections.<String>emptySet());
private final DynamicBooleanProperty rateLimiterEnabled = configInstance.getBooleanProperty(namespace + "rateLimiter.enabled", false);
private final DynamicBooleanProperty rateLimiterThrottleStandardClients = configInstance.getBooleanProperty(namespace + "rateLimiter.throttleStandardClients", false);
private final DynamicIntProperty rateLimiterBurstSize = configInstance.getIntProperty(namespace + "rateLimiter.burstSize", 10);
private final DynamicIntProperty rateLimiterRegistryFetchAverageRate = configInstance.getIntProperty(namespace + "rateLimiter.registryFetchAverageRate", 500);
private final DynamicIntProperty rateLimiterFullFetchAverageRate = configInstance.getIntProperty(namespace + "rateLimiter.fullFetchAverageRate", 100);
private final DynamicStringProperty listAutoScalingGroupsRoleName =
configInstance.getStringProperty(namespace + "listAutoScalingGroupsRoleName", "ListAutoScalingGroups");
private final DynamicStringProperty myUrl = configInstance.getStringProperty(namespace + "myUrl", null);
public DefaultEurekaServerConfig() {
init();
}
public DefaultEurekaServerConfig(String namespace) {
this.namespace = namespace;
init();
}
private void init() {
String env = ConfigurationManager.getConfigInstance().getString(
EUREKA_ENVIRONMENT, TEST);
ConfigurationManager.getConfigInstance().setProperty(
ARCHAIUS_DEPLOYMENT_ENVIRONMENT, env);
String eurekaPropsFile = EUREKA_PROPS_FILE.get();
try {
// ConfigurationManager
// .loadPropertiesFromResources(eurekaPropsFile);
ConfigurationManager
.loadCascadedPropertiesFromResources(eurekaPropsFile);
} catch (IOException e) {
logger.warn(
"Cannot find the properties specified : {}. This may be okay if there are other environment "
+ "specific properties or the configuration is installed with a different mechanism.",
eurekaPropsFile);
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#getAWSAccessId()
*/
@Override
public String getAWSAccessId() {
String aWSAccessId = configInstance.getStringProperty(
namespace + "awsAccessId", null).get();
if (null != aWSAccessId) {
return aWSAccessId.trim();
} else {
return null;
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#getAWSAccessId()
*/
@Override
public String getAWSSecretKey() {
String aWSSecretKey = configInstance.getStringProperty(
namespace + "awsSecretKey", null).get();
if (null != aWSSecretKey) {
return aWSSecretKey.trim();
} else {
return null;
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#getEIPBindRebindRetries()
*/
@Override
public int getEIPBindRebindRetries() {
return configInstance.getIntProperty(
namespace + "eipBindRebindRetries", 3).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#getEIPBindingRetryInterval()
*/
@Override
public int getEIPBindingRetryIntervalMsWhenUnbound() {
return configInstance.getIntProperty(
namespace + "eipBindRebindRetryIntervalMsWhenUnbound", (1 * 60 * 1000)).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#getEIPBindingRetryInterval()
*/
@Override
public int getEIPBindingRetryIntervalMs() {
return configInstance.getIntProperty(
namespace + "eipBindRebindRetryIntervalMs", (5 * 60 * 1000)).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.EurekaServerConfig#shouldEnableSelfPreservation()
*/
@Override
public boolean shouldEnableSelfPreservation() {
return configInstance.getBooleanProperty(
namespace + "enableSelfPreservation", true).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.eureka.EurekaServerConfig#getPeerEurekaNodesUpdateInterval()
*/
@Override
public int getPeerEurekaNodesUpdateIntervalMs() {
return configInstance
.getIntProperty(namespace + "peerEurekaNodesUpdateIntervalMs",
(10 * 60 * 1000)).get();
}
@Override
public int getRenewalThresholdUpdateIntervalMs() {
return configInstance.getIntProperty(
namespace + "renewalThresholdUpdateIntervalMs",
(15 * 60 * 1000)).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.eureka.EurekaServerConfig#getExpectedClientRenewalIntervalSeconds()
*/
@Override
public int getExpectedClientRenewalIntervalSeconds() {
final int configured = configInstance.getIntProperty(
namespace + "expectedClientRenewalIntervalSeconds",
30).get();
return configured > 0 ? configured : 30;
}
@Override
public double getRenewalPercentThreshold() {
return configInstance.getDoubleProperty(
namespace + "renewalPercentThreshold", 0.85).get();
}
@Override
public boolean shouldEnableReplicatedRequestCompression() {
return configInstance.getBooleanProperty(
namespace + "enableReplicatedRequestCompression", false).get();
}
@Override
public int getNumberOfReplicationRetries() {
return configInstance.getIntProperty(
namespace + "numberOfReplicationRetries", 5).get();
}
@Override
public int getPeerEurekaStatusRefreshTimeIntervalMs() {
return configInstance.getIntProperty(
namespace + "peerEurekaStatusRefreshTimeIntervalMs",
(30 * 1000)).get();
}
@Override
public int getWaitTimeInMsWhenSyncEmpty() {
return configInstance.getIntProperty(
namespace + "waitTimeInMsWhenSyncEmpty", (1000 * 60 * 5)).get();
}
@Override
public int getPeerNodeConnectTimeoutMs() {
return configInstance.getIntProperty(
namespace + "peerNodeConnectTimeoutMs", 1000).get();
}
@Override
public int getPeerNodeReadTimeoutMs() {
return configInstance.getIntProperty(
namespace + "peerNodeReadTimeoutMs", 5000).get();
}
@Override
public int getPeerNodeTotalConnections() {
return configInstance.getIntProperty(
namespace + "peerNodeTotalConnections", 1000).get();
}
@Override
public int getPeerNodeTotalConnectionsPerHost() {
return configInstance.getIntProperty(
namespace + "peerNodeTotalConnectionsPerHost", 500).get();
}
@Override
public int getPeerNodeConnectionIdleTimeoutSeconds() {
return configInstance.getIntProperty(
namespace + "peerNodeConnectionIdleTimeoutSeconds", 30).get();
}
@Override
public long getRetentionTimeInMSInDeltaQueue() {
return configInstance.getLongProperty(
namespace + "retentionTimeInMSInDeltaQueue", (3 * 60 * 1000))
.get();
}
@Override
public long getDeltaRetentionTimerIntervalInMs() {
return configInstance.getLongProperty(
namespace + "deltaRetentionTimerIntervalInMs", (30 * 1000))
.get();
}
@Override
public long getEvictionIntervalTimerInMs() {
return configInstance.getLongProperty(
namespace + "evictionIntervalTimerInMs", (60 * 1000)).get();
}
@Override
public boolean shouldUseAwsAsgApi() {
return configInstance.getBooleanProperty(namespace + "shouldUseAwsAsgApi", true).get();
}
@Override
public int getASGQueryTimeoutMs() {
return configInstance.getIntProperty(namespace + "asgQueryTimeoutMs",
300).get();
}
@Override
public long getASGUpdateIntervalMs() {
return configInstance.getIntProperty(namespace + "asgUpdateIntervalMs",
(5 * 60 * 1000)).get();
}
@Override
public long getASGCacheExpiryTimeoutMs() {
return configInstance.getIntProperty(namespace + "asgCacheExpiryTimeoutMs",
(10 * 60 * 1000)).get(); // defaults to longer than the asg update interval
}
@Override
public long getResponseCacheAutoExpirationInSeconds() {
return configInstance.getIntProperty(
namespace + "responseCacheAutoExpirationInSeconds", 180).get();
}
@Override
public long getResponseCacheUpdateIntervalMs() {
return configInstance.getIntProperty(
namespace + "responseCacheUpdateIntervalMs", (30 * 1000)).get();
}
@Override
public boolean shouldUseReadOnlyResponseCache() {
return configInstance.getBooleanProperty(
namespace + "shouldUseReadOnlyResponseCache", true).get();
}
@Override
public boolean shouldDisableDelta() {
return configInstance.getBooleanProperty(namespace + "disableDelta",
false).get();
}
@Override
public long getMaxIdleThreadInMinutesAgeForStatusReplication() {
return configInstance
.getLongProperty(
namespace + "maxIdleThreadAgeInMinutesForStatusReplication",
10).get();
}
@Override
public int getMinThreadsForStatusReplication() {
return configInstance.getIntProperty(
namespace + "minThreadsForStatusReplication", 1).get();
}
@Override
public int getMaxThreadsForStatusReplication() {
return configInstance.getIntProperty(
namespace + "maxThreadsForStatusReplication", 1).get();
}
@Override
public int getMaxElementsInStatusReplicationPool() {
return configInstance.getIntProperty(
namespace + "maxElementsInStatusReplicationPool", 10000).get();
}
@Override
public boolean shouldSyncWhenTimestampDiffers() {
return configInstance.getBooleanProperty(
namespace + "syncWhenTimestampDiffers", true).get();
}
@Override
public int getRegistrySyncRetries() {
return configInstance.getIntProperty(
namespace + "numberRegistrySyncRetries", 5).get();
}
@Override
public long getRegistrySyncRetryWaitMs() {
return configInstance.getIntProperty(
namespace + "registrySyncRetryWaitMs", 30 * 1000).get();
}
@Override
public int getMaxElementsInPeerReplicationPool() {
return configInstance.getIntProperty(
namespace + "maxElementsInPeerReplicationPool", 10000).get();
}
@Override
public long getMaxIdleThreadAgeInMinutesForPeerReplication() {
return configInstance.getIntProperty(
namespace + "maxIdleThreadAgeInMinutesForPeerReplication", 15)
.get();
}
@Override
public int getMinThreadsForPeerReplication() {
return configInstance.getIntProperty(
namespace + "minThreadsForPeerReplication", 5).get();
}
@Override
public int getMaxThreadsForPeerReplication() {
return configInstance.getIntProperty(
namespace + "maxThreadsForPeerReplication", 20).get();
}
@Override
public int getMaxTimeForReplication() {
return configInstance.getIntProperty(
namespace + "maxTimeForReplication",
TIME_TO_WAIT_FOR_REPLICATION).get();
}
@Override
public boolean shouldPrimeAwsReplicaConnections() {
return configInstance.getBooleanProperty(
namespace + "primeAwsReplicaConnections", true).get();
}
@Override
public boolean shouldDisableDeltaForRemoteRegions() {
return configInstance.getBooleanProperty(
namespace + "disableDeltaForRemoteRegions", false).get();
}
@Override
public int getRemoteRegionConnectTimeoutMs() {
return configInstance.getIntProperty(
namespace + "remoteRegionConnectTimeoutMs", 2000).get();
}
@Override
public int getRemoteRegionReadTimeoutMs() {
return configInstance.getIntProperty(
namespace + "remoteRegionReadTimeoutMs", 5000).get();
}
@Override
public int getRemoteRegionTotalConnections() {
return configInstance.getIntProperty(
namespace + "remoteRegionTotalConnections", 1000).get();
}
@Override
public int getRemoteRegionTotalConnectionsPerHost() {
return configInstance.getIntProperty(
namespace + "remoteRegionTotalConnectionsPerHost", 500).get();
}
@Override
public int getRemoteRegionConnectionIdleTimeoutSeconds() {
return configInstance.getIntProperty(
namespace + "remoteRegionConnectionIdleTimeoutSeconds", 30)
.get();
}
@Override
public boolean shouldGZipContentFromRemoteRegion() {
return configInstance.getBooleanProperty(
namespace + "remoteRegion.gzipContent", true).get();
}
/**
* Expects a property with name: [eureka-namespace].remoteRegionUrlsWithName and a value being a comma separated
* list of region name & remote url pairs, separated with a ";". <br/>
* So, if you wish to specify two regions with name region1 & region2, the property value will be:
<PRE>
eureka.remoteRegionUrlsWithName=region1;http://region1host/eureka/v2,region2;http://region2host/eureka/v2
</PRE>
* The above property will result in the following map:
<PRE>
region1->"http://region1host/eureka/v2"
region2->"http://region2host/eureka/v2"
</PRE>
* @return A map of region name to remote region URL parsed from the property specified above. If there is no
* property available, then an empty map is returned.
*/
@Override
public Map<String, String> getRemoteRegionUrlsWithName() {
String propName = namespace + "remoteRegionUrlsWithName";
String remoteRegionUrlWithNameString = configInstance.getStringProperty(propName, null).get();
if (null == remoteRegionUrlWithNameString) {
return Collections.emptyMap();
}
String[] remoteRegionUrlWithNamePairs = remoteRegionUrlWithNameString.split(",");
Map<String, String> toReturn = new HashMap<String, String>(remoteRegionUrlWithNamePairs.length);
final String pairSplitChar = ";";
for (String remoteRegionUrlWithNamePair : remoteRegionUrlWithNamePairs) {
String[] pairSplit = remoteRegionUrlWithNamePair.split(pairSplitChar);
if (pairSplit.length < 2) {
logger.error("Error reading eureka remote region urls from property {}. "
+ "Invalid entry {} for remote region url. The entry must contain region name and url "
+ "separated by a {}. Ignoring this entry.",
propName, remoteRegionUrlWithNamePair, pairSplitChar);
} else {
String regionName = pairSplit[0];
String regionUrl = pairSplit[1];
if (pairSplit.length > 2) {
StringBuilder regionUrlAssembler = new StringBuilder();
for (int i = 1; i < pairSplit.length; i++) {
if (regionUrlAssembler.length() != 0) {
regionUrlAssembler.append(pairSplitChar);
}
regionUrlAssembler.append(pairSplit[i]);
}
regionUrl = regionUrlAssembler.toString();
}
toReturn.put(regionName, regionUrl);
}
}
return toReturn;
}
@Override
public String[] getRemoteRegionUrls() {
String remoteRegionUrlString = configInstance.getStringProperty(
namespace + "remoteRegionUrls", null).get();
String[] remoteRegionUrl = null;
if (remoteRegionUrlString != null) {
remoteRegionUrl = remoteRegionUrlString.split(",");
}
return remoteRegionUrl;
}
@Nullable
@Override
public Set<String> getRemoteRegionAppWhitelist(@Nullable String regionName) {
if (null == regionName) {
regionName = "global";
} else {
regionName = regionName.trim().toLowerCase();
}
DynamicStringProperty appWhiteListProp =
configInstance.getStringProperty(namespace + "remoteRegion." + regionName + ".appWhiteList", null);
if (null == appWhiteListProp || null == appWhiteListProp.get()) {
return null;
} else {
String appWhiteListStr = appWhiteListProp.get();
String[] whitelistEntries = appWhiteListStr.split(",");
return new HashSet<String>(Arrays.asList(whitelistEntries));
}
}
@Override
public int getRemoteRegionRegistryFetchInterval() {
return configInstance.getIntProperty(
namespace + "remoteRegion.registryFetchIntervalInSeconds", 30)
.get();
}
@Override
public int getRemoteRegionFetchThreadPoolSize() {
return configInstance.getIntProperty(
namespace + "remoteRegion.fetchThreadPoolSize", 20)
.get();
}
@Override
public String getRemoteRegionTrustStore() {
return configInstance.getStringProperty(
namespace + "remoteRegion.trustStoreFileName", "").get();
}
@Override
public String getRemoteRegionTrustStorePassword() {
return configInstance.getStringProperty(
namespace + "remoteRegion.trustStorePassword", "changeit")
.get();
}
@Override
public boolean disableTransparentFallbackToOtherRegion() {
return configInstance.getBooleanProperty(namespace + "remoteRegion.disable.transparent.fallback", false).get();
}
@Override
public boolean shouldBatchReplication() {
return configInstance.getBooleanProperty(namespace + "shouldBatchReplication", false).get();
}
@Override
public String getMyUrl() {
return myUrl.get();
}
@Override
public boolean shouldLogIdentityHeaders() {
return configInstance.getBooleanProperty(namespace + "auth.shouldLogIdentityHeaders", true).get();
}
@Override
public String getJsonCodecName() {
return configInstance.getStringProperty(
namespace + "jsonCodecName", null).get();
}
@Override
public String getXmlCodecName() {
return configInstance.getStringProperty(
namespace + "xmlCodecName", null).get();
}
@Override
public boolean isRateLimiterEnabled() {
return rateLimiterEnabled.get();
}
@Override
public boolean isRateLimiterThrottleStandardClients() {
return rateLimiterThrottleStandardClients.get();
}
@Override
public Set<String> getRateLimiterPrivilegedClients() {
return rateLimiterPrivilegedClients.get();
}
@Override
public int getRateLimiterBurstSize() {
return rateLimiterBurstSize.get();
}
@Override
public int getRateLimiterRegistryFetchAverageRate() {
return rateLimiterRegistryFetchAverageRate.get();
}
@Override
public int getRateLimiterFullFetchAverageRate() {
return rateLimiterFullFetchAverageRate.get();
}
@Override
public String getListAutoScalingGroupsRoleName() {
return listAutoScalingGroupsRoleName.get();
}
@Override
public int getRoute53BindRebindRetries() {
return configInstance.getIntProperty(
namespace + "route53BindRebindRetries", 3).get();
}
@Override
public int getRoute53BindingRetryIntervalMs() {
return configInstance.getIntProperty(
namespace + "route53BindRebindRetryIntervalMs", (5 * 60 * 1000))
.get();
}
@Override
public long getRoute53DomainTTL() {
return configInstance.getLongProperty(
namespace + "route53DomainTTL", 30l)
.get();
}
@Override
public AwsBindingStrategy getBindingStrategy() {
return AwsBindingStrategy.valueOf(configInstance.getStringProperty(namespace + "awsBindingStrategy", AwsBindingStrategy.EIP.name()).get().toUpperCase());
}
@Override
public String getExperimental(String name) {
return configInstance.getStringProperty(namespace + "experimental." + name, null).get();
}
@Override
public int getHealthStatusMinNumberOfAvailablePeers() {
return configInstance.getIntProperty(
namespace + "minAvailableInstancesForPeerReplication", -1).get();
}
@Override
public int getInitialCapacityOfResponseCache() {
return configInstance.getIntProperty(namespace + "initialCapacityOfResponseCache", 1000).get();
}
}
| 6,906 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/V1AwareInstanceInfoConverter.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.converters.Converters.InstanceInfoConverter;
import com.netflix.eureka.resources.CurrentRequestVersion;
/**
* Support for {@link Version#V1}. {@link Version#V2} introduces a new status
* {@link InstanceStatus#OUT_OF_SERVICE}.
*
* @author Karthik Ranganathan, Greg Kim
*
*/
public class V1AwareInstanceInfoConverter extends InstanceInfoConverter {
@Override
public String getStatus(InstanceInfo info) {
Version version = CurrentRequestVersion.get();
if (version == null || version == Version.V1) {
InstanceStatus status = info.getStatus();
switch (status) {
case DOWN:
case STARTING:
case UP:
break;
default:
// otherwise return DOWN
status = InstanceStatus.DOWN;
break;
}
return status.name();
} else {
return super.getStatus(info);
}
}
}
| 6,907 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/GzipEncodingEnforcingFilter.java | package com.netflix.eureka;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.util.Enumeration;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicReference;
/**
* Originally Eureka supported non-compressed responses only. For large registries it was extremely
* inefficient, so gzip encoding was added. As nowadays all modern HTTP clients support gzip HTTP response
* transparently, there is no longer need to maintain uncompressed content. By adding this filter, Eureka
* server will accept only GET requests that explicitly support gzip encoding replies. In the coming minor release
* non-compressed replies will be dropped altogether, so this filter will become required.
*
* @author Tomasz Bak
*/
@Singleton
public class GzipEncodingEnforcingFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
HttpServletRequest httpRequest = (HttpServletRequest) request;
if ("GET".equals(httpRequest.getMethod())) {
String acceptEncoding = httpRequest.getHeader(HttpHeaders.ACCEPT_ENCODING);
if (acceptEncoding == null) {
chain.doFilter(addGzipAcceptEncoding(httpRequest), response);
return;
}
if (!acceptEncoding.contains("gzip")) {
((HttpServletResponse) response).setStatus(HttpServletResponse.SC_NOT_ACCEPTABLE);
return;
}
}
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
private static HttpServletRequest addGzipAcceptEncoding(HttpServletRequest request) {
return new HttpServletRequestWrapper(request) {
@Override
public Enumeration<String> getHeaders(String name) {
if (HttpHeaders.ACCEPT_ENCODING.equals(name)) {
return new EnumWrapper<String>("gzip");
}
return super.getHeaders(name);
}
@Override
public Enumeration<String> getHeaderNames() {
return new EnumWrapper<String>(super.getHeaderNames(), HttpHeaders.ACCEPT_ENCODING);
}
@Override
public String getHeader(String name) {
if (HttpHeaders.ACCEPT_ENCODING.equals(name)) {
return "gzip";
}
return super.getHeader(name);
}
};
}
private static class EnumWrapper<E> implements Enumeration<E> {
private final Enumeration<E> delegate;
private final AtomicReference<E> extraElementRef;
private EnumWrapper(E extraElement) {
this(null, extraElement);
}
private EnumWrapper(Enumeration<E> delegate, E extraElement) {
this.delegate = delegate;
this.extraElementRef = new AtomicReference<>(extraElement);
}
@Override
public boolean hasMoreElements() {
return extraElementRef.get() != null || delegate != null && delegate.hasMoreElements();
}
@Override
public E nextElement() {
E extra = extraElementRef.getAndSet(null);
if (extra != null) {
return extra;
}
if (delegate == null) {
throw new NoSuchElementException();
}
return delegate.nextElement();
}
}
}
| 6,908 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/EurekaServerContextHolder.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
/**
* A static holder for the server context for use in non-DI cases.
*
* @author David Liu
*/
public class EurekaServerContextHolder {
private final EurekaServerContext serverContext;
private EurekaServerContextHolder(EurekaServerContext serverContext) {
this.serverContext = serverContext;
}
public EurekaServerContext getServerContext() {
return this.serverContext;
}
private static EurekaServerContextHolder holder;
public static synchronized void initialize(EurekaServerContext serverContext) {
holder = new EurekaServerContextHolder(serverContext);
}
public static EurekaServerContextHolder getInstance() {
return holder;
}
}
| 6,909 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/StatusFilter.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
/**
* Filter to check whether the eureka server is ready to take requests based on
* its {@link InstanceStatus}.
*/
@Singleton
public class StatusFilter implements Filter {
private static final int SC_TEMPORARY_REDIRECT = 307;
/*
* (non-Javadoc)
*
* @see javax.servlet.Filter#destroy()
*/
public void destroy() {
// TODO Auto-generated method stub
}
/*
* (non-Javadoc)
*
* @see javax.servlet.Filter#doFilter(javax.servlet.ServletRequest,
* javax.servlet.ServletResponse, javax.servlet.FilterChain)
*/
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
InstanceStatus status = myInfo.getStatus();
if (status != InstanceStatus.UP && response instanceof HttpServletResponse) {
HttpServletResponse httpResponse = (HttpServletResponse) response;
httpResponse.sendError(SC_TEMPORARY_REDIRECT,
"Current node is currently not ready to serve requests -- current status: "
+ status + " - try another DS node: ");
}
chain.doFilter(request, response);
}
/*
* (non-Javadoc)
*
* @see javax.servlet.Filter#init(javax.servlet.FilterConfig)
*/
public void init(FilterConfig arg0) throws ServletException {
}
}
| 6,910 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/EurekaServerIdentity.java | package com.netflix.eureka;
import com.netflix.appinfo.AbstractEurekaIdentity;
/**
* This class holds metadata information related to eureka server auth with peer eureka servers
*/
public class EurekaServerIdentity extends AbstractEurekaIdentity {
public static final String DEFAULT_SERVER_NAME = "DefaultServer";
private final String serverVersion = "1.0";
private final String id;
public EurekaServerIdentity(String id) {
this.id = id;
}
@Override
public String getName() {
return DEFAULT_SERVER_NAME;
}
@Override
public String getVersion() {
return serverVersion;
}
@Override
public String getId() {
return id;
}
}
| 6,911 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/Version.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
/**
* Supported versions for Eureka.
*
* <p>The latest versions are always recommended.</p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
public enum Version {
V1, V2;
public static Version toEnum(String v) {
for (Version version : Version.values()) {
if (version.name().equalsIgnoreCase(v)) {
return version;
}
}
//Defaults to v2
return V2;
}
}
| 6,912 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/InstanceReplicationTask.java | package com.netflix.eureka.cluster;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
/**
* Base {@link ReplicationTask} class for instance related replication requests.
*
* @author Tomasz Bak
*/
public abstract class InstanceReplicationTask extends ReplicationTask {
/**
* For cancel request there may be no InstanceInfo object available so we need to store app/id pair
* explicitly.
*/
private final String appName;
private final String id;
private final InstanceInfo instanceInfo;
private final InstanceStatus overriddenStatus;
private final boolean replicateInstanceInfo;
protected InstanceReplicationTask(String peerNodeName, Action action, String appName, String id) {
super(peerNodeName, action);
this.appName = appName;
this.id = id;
this.instanceInfo = null;
this.overriddenStatus = null;
this.replicateInstanceInfo = false;
}
protected InstanceReplicationTask(String peerNodeName,
Action action,
InstanceInfo instanceInfo,
InstanceStatus overriddenStatus,
boolean replicateInstanceInfo) {
super(peerNodeName, action);
this.appName = instanceInfo.getAppName();
this.id = instanceInfo.getId();
this.instanceInfo = instanceInfo;
this.overriddenStatus = overriddenStatus;
this.replicateInstanceInfo = replicateInstanceInfo;
}
public String getTaskName() {
return appName + '/' + id + ':' + action + '@' + peerNodeName;
}
public String getAppName() {
return appName;
}
public String getId() {
return id;
}
public InstanceInfo getInstanceInfo() {
return instanceInfo;
}
public InstanceStatus getOverriddenStatus() {
return overriddenStatus;
}
public boolean shouldReplicateInstanceInfo() {
return replicateInstanceInfo;
}
}
| 6,913 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/PeerEurekaNodes.java | package com.netflix.eureka.cluster;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.endpoint.EndpointUtils;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.transport.JerseyReplicationClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to manage lifecycle of a collection of {@link PeerEurekaNode}s.
*
* @author Tomasz Bak
*/
@Singleton
public class PeerEurekaNodes {
private static final Logger logger = LoggerFactory.getLogger(PeerEurekaNodes.class);
protected final PeerAwareInstanceRegistry registry;
protected final EurekaServerConfig serverConfig;
protected final EurekaClientConfig clientConfig;
protected final ServerCodecs serverCodecs;
private final ApplicationInfoManager applicationInfoManager;
private volatile List<PeerEurekaNode> peerEurekaNodes = Collections.emptyList();
private volatile Set<String> peerEurekaNodeUrls = Collections.emptySet();
private ScheduledExecutorService taskExecutor;
@Inject
public PeerEurekaNodes(
PeerAwareInstanceRegistry registry,
EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ServerCodecs serverCodecs,
ApplicationInfoManager applicationInfoManager) {
this.registry = registry;
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.serverCodecs = serverCodecs;
this.applicationInfoManager = applicationInfoManager;
}
public List<PeerEurekaNode> getPeerNodesView() {
return Collections.unmodifiableList(peerEurekaNodes);
}
public List<PeerEurekaNode> getPeerEurekaNodes() {
return peerEurekaNodes;
}
public int getMinNumberOfAvailablePeers() {
return serverConfig.getHealthStatusMinNumberOfAvailablePeers();
}
public void start() {
taskExecutor = Executors.newSingleThreadScheduledExecutor(
new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r, "Eureka-PeerNodesUpdater");
thread.setDaemon(true);
return thread;
}
}
);
try {
updatePeerEurekaNodes(resolvePeerUrls());
Runnable peersUpdateTask = new Runnable() {
@Override
public void run() {
try {
updatePeerEurekaNodes(resolvePeerUrls());
} catch (Throwable e) {
logger.error("Cannot update the replica Nodes", e);
}
}
};
taskExecutor.scheduleWithFixedDelay(
peersUpdateTask,
serverConfig.getPeerEurekaNodesUpdateIntervalMs(),
serverConfig.getPeerEurekaNodesUpdateIntervalMs(),
TimeUnit.MILLISECONDS
);
} catch (Exception e) {
throw new IllegalStateException(e);
}
for (PeerEurekaNode node : peerEurekaNodes) {
logger.info("Replica node URL: {}", node.getServiceUrl());
}
}
public void shutdown() {
taskExecutor.shutdown();
List<PeerEurekaNode> toRemove = this.peerEurekaNodes;
this.peerEurekaNodes = Collections.emptyList();
this.peerEurekaNodeUrls = Collections.emptySet();
for (PeerEurekaNode node : toRemove) {
node.shutDown();
}
}
/**
* Resolve peer URLs.
*
* @return peer URLs with node's own URL filtered out
*/
protected List<String> resolvePeerUrls() {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String zone = InstanceInfo.getZone(clientConfig.getAvailabilityZones(clientConfig.getRegion()), myInfo);
List<String> replicaUrls = EndpointUtils
.getDiscoveryServiceUrls(clientConfig, zone, new EndpointUtils.InstanceInfoBasedUrlRandomizer(myInfo));
int idx = 0;
while (idx < replicaUrls.size()) {
if (isThisMyUrl(replicaUrls.get(idx))) {
replicaUrls.remove(idx);
} else {
idx++;
}
}
return replicaUrls;
}
/**
* Given new set of replica URLs, destroy {@link PeerEurekaNode}s no longer available, and
* create new ones.
*
* @param newPeerUrls peer node URLs; this collection should have local node's URL filtered out
*/
protected void updatePeerEurekaNodes(List<String> newPeerUrls) {
if (newPeerUrls.isEmpty()) {
logger.warn("The replica size seems to be empty. Check the route 53 DNS Registry");
return;
}
Set<String> toShutdown = new HashSet<>(peerEurekaNodeUrls);
toShutdown.removeAll(newPeerUrls);
Set<String> toAdd = new HashSet<>(newPeerUrls);
toAdd.removeAll(peerEurekaNodeUrls);
if (toShutdown.isEmpty() && toAdd.isEmpty()) { // No change
return;
}
// Remove peers no long available
List<PeerEurekaNode> newNodeList = new ArrayList<>(peerEurekaNodes);
if (!toShutdown.isEmpty()) {
logger.info("Removing no longer available peer nodes {}", toShutdown);
int i = 0;
while (i < newNodeList.size()) {
PeerEurekaNode eurekaNode = newNodeList.get(i);
if (toShutdown.contains(eurekaNode.getServiceUrl())) {
newNodeList.remove(i);
eurekaNode.shutDown();
} else {
i++;
}
}
}
// Add new peers
if (!toAdd.isEmpty()) {
logger.info("Adding new peer nodes {}", toAdd);
for (String peerUrl : toAdd) {
newNodeList.add(createPeerEurekaNode(peerUrl));
}
}
this.peerEurekaNodes = newNodeList;
this.peerEurekaNodeUrls = new HashSet<>(newPeerUrls);
}
protected PeerEurekaNode createPeerEurekaNode(String peerEurekaNodeUrl) {
HttpReplicationClient replicationClient = JerseyReplicationClient.createReplicationClient(serverConfig, serverCodecs, peerEurekaNodeUrl);
String targetHost = hostFromUrl(peerEurekaNodeUrl);
if (targetHost == null) {
targetHost = "host";
}
return new PeerEurekaNode(registry, targetHost, peerEurekaNodeUrl, replicationClient, serverConfig);
}
/**
* @deprecated 2016-06-27 use instance version of {@link #isThisMyUrl(String)}
*
* Checks if the given service url contains the current host which is trying
* to replicate. Only after the EIP binding is done the host has a chance to
* identify itself in the list of replica nodes and needs to take itself out
* of replication traffic.
*
* @param url the service url of the replica node that the check is made.
* @return true, if the url represents the current node which is trying to
* replicate, false otherwise.
*/
public static boolean isThisMe(String url) {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
String hostName = hostFromUrl(url);
return hostName != null && hostName.equals(myInfo.getHostName());
}
/**
* Checks if the given service url contains the current host which is trying
* to replicate. Only after the EIP binding is done the host has a chance to
* identify itself in the list of replica nodes and needs to take itself out
* of replication traffic.
*
* @param url the service url of the replica node that the check is made.
* @return true, if the url represents the current node which is trying to
* replicate, false otherwise.
*/
public boolean isThisMyUrl(String url) {
final String myUrlConfigured = serverConfig.getMyUrl();
if (myUrlConfigured != null) {
return myUrlConfigured.equals(url);
}
return isInstanceURL(url, applicationInfoManager.getInfo());
}
/**
* Checks if the given service url matches the supplied instance
*
* @param url the service url of the replica node that the check is made.
* @param instance the instance to check the service url against
* @return true, if the url represents the supplied instance, false otherwise.
*/
public boolean isInstanceURL(String url, InstanceInfo instance) {
String hostName = hostFromUrl(url);
String myInfoComparator = instance.getHostName();
if (clientConfig.getTransportConfig().applicationsResolverUseIp()) {
myInfoComparator = instance.getIPAddr();
}
return hostName != null && hostName.equals(myInfoComparator);
}
public static String hostFromUrl(String url) {
URI uri;
try {
uri = new URI(url);
} catch (URISyntaxException e) {
logger.warn("Cannot parse service URI {}", url, e);
return null;
}
return uri.getHost();
}
}
| 6,914 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/HttpReplicationClient.java | package com.netflix.eureka.cluster;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.eureka.cluster.protocol.ReplicationList;
import com.netflix.eureka.cluster.protocol.ReplicationListResponse;
import com.netflix.eureka.resources.ASGResource.ASGStatus;
/**
* @author Tomasz Bak
*/
public interface HttpReplicationClient extends EurekaHttpClient {
EurekaHttpResponse<Void> statusUpdate(String asgName, ASGStatus newStatus);
EurekaHttpResponse<ReplicationListResponse> submitBatchUpdates(ReplicationList replicationList);
}
| 6,915 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/DynamicGZIPContentEncodingFilter.java | package com.netflix.eureka.cluster;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import com.netflix.eureka.EurekaServerConfig;
import com.sun.jersey.api.client.AbstractClientRequestAdapter;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.ClientRequest;
import com.sun.jersey.api.client.ClientRequestAdapter;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.filter.ClientFilter;
/**
* Eureka specific GZIP content filter handler.
*/
public class DynamicGZIPContentEncodingFilter extends ClientFilter {
private static final String GZIP_ENCODING = "gzip";
private final EurekaServerConfig config;
public DynamicGZIPContentEncodingFilter(EurekaServerConfig config) {
this.config = config;
}
@Override
public ClientResponse handle(ClientRequest request) {
// If 'Accept-Encoding' is not set, assume gzip as a default
if (!request.getHeaders().containsKey(HttpHeaders.ACCEPT_ENCODING)) {
request.getHeaders().add(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING);
}
if (request.getEntity() != null) {
Object requestEncoding = request.getHeaders().getFirst(HttpHeaders.CONTENT_ENCODING);
if (GZIP_ENCODING.equals(requestEncoding)) {
request.setAdapter(new GzipAdapter(request.getAdapter()));
} else if (isCompressionEnabled()) {
request.getHeaders().add(HttpHeaders.CONTENT_ENCODING, GZIP_ENCODING);
request.setAdapter(new GzipAdapter(request.getAdapter()));
}
}
ClientResponse response = getNext().handle(request);
String responseEncoding = response.getHeaders().getFirst(HttpHeaders.CONTENT_ENCODING);
if (response.hasEntity() && GZIP_ENCODING.equals(responseEncoding)) {
response.getHeaders().remove(HttpHeaders.CONTENT_ENCODING);
decompressResponse(response);
}
return response;
}
private boolean isCompressionEnabled() {
return config.shouldEnableReplicatedRequestCompression();
}
private static void decompressResponse(ClientResponse response) {
InputStream entityInputStream = response.getEntityInputStream();
GZIPInputStream uncompressedIS;
try {
uncompressedIS = new GZIPInputStream(entityInputStream);
} catch (IOException ex) {
try {
entityInputStream.close();
} catch (IOException ignored) {
}
throw new ClientHandlerException(ex);
}
response.setEntityInputStream(uncompressedIS);
}
private static final class GzipAdapter extends AbstractClientRequestAdapter {
GzipAdapter(ClientRequestAdapter cra) {
super(cra);
}
@Override
public OutputStream adapt(ClientRequest request, OutputStream out) throws IOException {
return new GZIPOutputStream(getAdapter().adapt(request, out));
}
}
} | 6,916 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/AsgReplicationTask.java | package com.netflix.eureka.cluster;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
import com.netflix.eureka.resources.ASGResource.ASGStatus;
/**
* Base {@link ReplicationTask} class for ASG related replication requests.
*
* @author Tomasz Bak
*/
public abstract class AsgReplicationTask extends ReplicationTask {
private final String asgName;
private final ASGStatus newStatus;
protected AsgReplicationTask(String peerNodeName, Action action, String asgName, ASGStatus newStatus) {
super(peerNodeName, action);
this.asgName = asgName;
this.newStatus = newStatus;
}
@Override
public String getTaskName() {
return asgName + ':' + action + '@' + peerNodeName;
}
public String getAsgName() {
return asgName;
}
public ASGStatus getNewStatus() {
return newStatus;
}
}
| 6,917 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/ReplicationTaskProcessor.java | package com.netflix.eureka.cluster;
import java.io.IOException;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.eureka.cluster.protocol.ReplicationInstance;
import com.netflix.eureka.cluster.protocol.ReplicationInstance.ReplicationInstanceBuilder;
import com.netflix.eureka.cluster.protocol.ReplicationInstanceResponse;
import com.netflix.eureka.cluster.protocol.ReplicationList;
import com.netflix.eureka.cluster.protocol.ReplicationListResponse;
import com.netflix.eureka.util.batcher.TaskProcessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.cluster.protocol.ReplicationInstance.ReplicationInstanceBuilder.aReplicationInstance;
/**
* @author Tomasz Bak
*/
class ReplicationTaskProcessor implements TaskProcessor<ReplicationTask> {
private static final Logger logger = LoggerFactory.getLogger(ReplicationTaskProcessor.class);
private final HttpReplicationClient replicationClient;
private final String peerId;
private volatile long lastNetworkErrorTime;
private static final Pattern READ_TIME_OUT_PATTERN = Pattern.compile(".*read.*time.*out.*");
ReplicationTaskProcessor(String peerId, HttpReplicationClient replicationClient) {
this.replicationClient = replicationClient;
this.peerId = peerId;
}
@Override
public ProcessingResult process(ReplicationTask task) {
try {
EurekaHttpResponse<?> httpResponse = task.execute();
int statusCode = httpResponse.getStatusCode();
Object entity = httpResponse.getEntity();
if (logger.isDebugEnabled()) {
logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null);
}
if (isSuccess(statusCode)) {
task.handleSuccess();
} else if (statusCode == 503) {
logger.debug("Server busy (503) reply for task {}", task.getTaskName());
return ProcessingResult.Congestion;
} else {
task.handleFailure(statusCode, entity);
return ProcessingResult.PermanentError;
}
} catch (Throwable e) {
if (maybeReadTimeOut(e)) {
logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e);
//read timeout exception is more Congestion then TransientError, return Congestion for longer delay
return ProcessingResult.Congestion;
} else if (isNetworkConnectException(e)) {
logNetworkErrorSample(task, e);
return ProcessingResult.TransientError;
} else {
logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception",
peerId, task.getTaskName(), e);
return ProcessingResult.PermanentError;
}
}
return ProcessingResult.Success;
}
@Override
public ProcessingResult process(List<ReplicationTask> tasks) {
ReplicationList list = createReplicationListOf(tasks);
try {
EurekaHttpResponse<ReplicationListResponse> response = replicationClient.submitBatchUpdates(list);
int statusCode = response.getStatusCode();
if (!isSuccess(statusCode)) {
if (statusCode == 503) {
logger.warn("Server busy (503) HTTP status code received from the peer {}; rescheduling tasks after delay", peerId);
return ProcessingResult.Congestion;
} else {
// Unexpected error returned from the server. This should ideally never happen.
logger.error("Batch update failure with HTTP status code {}; discarding {} replication tasks", statusCode, tasks.size());
return ProcessingResult.PermanentError;
}
} else {
handleBatchResponse(tasks, response.getEntity().getResponseList());
}
} catch (Throwable e) {
if (maybeReadTimeOut(e)) {
logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e);
//read timeout exception is more Congestion then TransientError, return Congestion for longer delay
return ProcessingResult.Congestion;
} else if (isNetworkConnectException(e)) {
logNetworkErrorSample(null, e);
return ProcessingResult.TransientError;
} else {
logger.error("Not re-trying this exception because it does not seem to be a network exception", e);
return ProcessingResult.PermanentError;
}
}
return ProcessingResult.Success;
}
/**
* We want to retry eagerly, but without flooding log file with tons of error entries.
* As tasks are executed by a pool of threads the error logging multiplies. For example:
* 20 threads * 100ms delay == 200 error entries / sec worst case
* Still we would like to see the exception samples, so we print samples at regular intervals.
*/
private void logNetworkErrorSample(ReplicationTask task, Throwable e) {
long now = System.currentTimeMillis();
if (now - lastNetworkErrorTime > 10000) {
lastNetworkErrorTime = now;
StringBuilder sb = new StringBuilder();
sb.append("Network level connection to peer ").append(peerId);
if (task != null) {
sb.append(" for task ").append(task.getTaskName());
}
sb.append("; retrying after delay");
logger.error(sb.toString(), e);
}
}
private void handleBatchResponse(List<ReplicationTask> tasks, List<ReplicationInstanceResponse> responseList) {
if (tasks.size() != responseList.size()) {
// This should ideally never happen unless there is a bug in the software.
logger.error("Batch response size different from submitted task list ({} != {}); skipping response analysis", responseList.size(), tasks.size());
return;
}
for (int i = 0; i < tasks.size(); i++) {
handleBatchResponse(tasks.get(i), responseList.get(i));
}
}
private void handleBatchResponse(ReplicationTask task, ReplicationInstanceResponse response) {
int statusCode = response.getStatusCode();
if (isSuccess(statusCode)) {
task.handleSuccess();
return;
}
try {
task.handleFailure(response.getStatusCode(), response.getResponseEntity());
} catch (Throwable e) {
logger.error("Replication task {} error handler failure", task.getTaskName(), e);
}
}
private ReplicationList createReplicationListOf(List<ReplicationTask> tasks) {
ReplicationList list = new ReplicationList();
for (ReplicationTask task : tasks) {
// Only InstanceReplicationTask are batched.
list.addReplicationInstance(createReplicationInstanceOf((InstanceReplicationTask) task));
}
return list;
}
private static boolean isSuccess(int statusCode) {
return statusCode >= 200 && statusCode < 300;
}
/**
* Check if the exception is some sort of network timeout exception (ie)
* read,connect.
*
* @param e
* The exception for which the information needs to be found.
* @return true, if it is a network timeout, false otherwise.
*/
private static boolean isNetworkConnectException(Throwable e) {
do {
if (IOException.class.isInstance(e)) {
return true;
}
e = e.getCause();
} while (e != null);
return false;
}
/**
* Check if the exception is socket read time out exception
*
* @param e
* The exception for which the information needs to be found.
* @return true, if it may be a socket read time out exception.
*/
private static boolean maybeReadTimeOut(Throwable e) {
do {
if (IOException.class.isInstance(e)) {
String message = e.getMessage().toLowerCase();
Matcher matcher = READ_TIME_OUT_PATTERN.matcher(message);
if(matcher.find()) {
return true;
}
}
e = e.getCause();
} while (e != null);
return false;
}
private static ReplicationInstance createReplicationInstanceOf(InstanceReplicationTask task) {
ReplicationInstanceBuilder instanceBuilder = aReplicationInstance();
instanceBuilder.withAppName(task.getAppName());
instanceBuilder.withId(task.getId());
InstanceInfo instanceInfo = task.getInstanceInfo();
if (instanceInfo != null) {
String overriddenStatus = task.getOverriddenStatus() == null ? null : task.getOverriddenStatus().name();
instanceBuilder.withOverriddenStatus(overriddenStatus);
instanceBuilder.withLastDirtyTimestamp(instanceInfo.getLastDirtyTimestamp());
if (task.shouldReplicateInstanceInfo()) {
instanceBuilder.withInstanceInfo(instanceInfo);
}
String instanceStatus = instanceInfo.getStatus() == null ? null : instanceInfo.getStatus().name();
instanceBuilder.withStatus(instanceStatus);
}
instanceBuilder.withAction(task.getAction());
return instanceBuilder.build();
}
}
| 6,918 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/ReplicationTask.java | package com.netflix.eureka.cluster;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for all replication tasks.
*/
abstract class ReplicationTask {
private static final Logger logger = LoggerFactory.getLogger(ReplicationTask.class);
protected final String peerNodeName;
protected final Action action;
ReplicationTask(String peerNodeName, Action action) {
this.peerNodeName = peerNodeName;
this.action = action;
}
public abstract String getTaskName();
public Action getAction() {
return action;
}
public abstract EurekaHttpResponse<?> execute() throws Throwable;
public void handleSuccess() {
}
public void handleFailure(int statusCode, Object responseEntity) throws Throwable {
logger.warn("The replication of task {} failed with response code {}", getTaskName(), statusCode);
}
}
| 6,919 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/PeerEurekaNode.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.cluster;
import java.net.MalformedURLException;
import java.net.URL;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
import com.netflix.eureka.resources.ASGResource.ASGStatus;
import com.netflix.eureka.util.batcher.TaskDispatcher;
import com.netflix.eureka.util.batcher.TaskDispatchers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The <code>PeerEurekaNode</code> represents a peer node to which information
* should be shared from this node.
*
* <p>
* This class handles replicating all update operations like
* <em>Register,Renew,Cancel,Expiration and Status Changes</em> to the eureka
* node it represents.
* <p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
public class PeerEurekaNode {
/**
* A time to wait before continuing work if there is network level error.
*/
private static final long RETRY_SLEEP_TIME_MS = 100;
/**
* A time to wait before continuing work if there is congestion on the server side.
*/
private static final long SERVER_UNAVAILABLE_SLEEP_TIME_MS = 1000;
/**
* Maximum amount of time in ms to wait for new items prior to dispatching a batch of tasks.
*/
private static final long MAX_BATCHING_DELAY_MS = 500;
/**
* Maximum batch size for batched requests.
*/
private static final int BATCH_SIZE = 250;
private static final Logger logger = LoggerFactory.getLogger(PeerEurekaNode.class);
public static final String BATCH_URL_PATH = "peerreplication/batch/";
public static final String HEADER_REPLICATION = "x-netflix-discovery-replication";
private final String serviceUrl;
private final EurekaServerConfig config;
private final long maxProcessingDelayMs;
private final PeerAwareInstanceRegistry registry;
private final String targetHost;
private final HttpReplicationClient replicationClient;
private final TaskDispatcher<String, ReplicationTask> batchingDispatcher;
private final TaskDispatcher<String, ReplicationTask> nonBatchingDispatcher;
public PeerEurekaNode(PeerAwareInstanceRegistry registry, String targetHost, String serviceUrl, HttpReplicationClient replicationClient, EurekaServerConfig config) {
this(registry, targetHost, serviceUrl, replicationClient, config, BATCH_SIZE, MAX_BATCHING_DELAY_MS, RETRY_SLEEP_TIME_MS, SERVER_UNAVAILABLE_SLEEP_TIME_MS);
}
/* For testing */ PeerEurekaNode(PeerAwareInstanceRegistry registry, String targetHost, String serviceUrl,
HttpReplicationClient replicationClient, EurekaServerConfig config,
int batchSize, long maxBatchingDelayMs,
long retrySleepTimeMs, long serverUnavailableSleepTimeMs) {
this.registry = registry;
this.targetHost = targetHost;
this.replicationClient = replicationClient;
this.serviceUrl = serviceUrl;
this.config = config;
this.maxProcessingDelayMs = config.getMaxTimeForReplication();
String batcherName = getBatcherName();
ReplicationTaskProcessor taskProcessor = new ReplicationTaskProcessor(targetHost, replicationClient);
this.batchingDispatcher = TaskDispatchers.createBatchingTaskDispatcher(
batcherName,
config.getMaxElementsInPeerReplicationPool(),
batchSize,
config.getMaxThreadsForPeerReplication(),
maxBatchingDelayMs,
serverUnavailableSleepTimeMs,
retrySleepTimeMs,
taskProcessor
);
this.nonBatchingDispatcher = TaskDispatchers.createNonBatchingTaskDispatcher(
targetHost,
config.getMaxElementsInStatusReplicationPool(),
config.getMaxThreadsForStatusReplication(),
maxBatchingDelayMs,
serverUnavailableSleepTimeMs,
retrySleepTimeMs,
taskProcessor
);
}
/**
* Sends the registration information of {@link InstanceInfo} receiving by
* this node to the peer node represented by this class.
*
* @param info
* the instance information {@link InstanceInfo} of any instance
* that is send to this instance.
* @throws Exception
*/
public void register(final InstanceInfo info) throws Exception {
long expiryTime = System.currentTimeMillis() + getLeaseRenewalOf(info);
batchingDispatcher.process(
taskId("register", info),
new InstanceReplicationTask(targetHost, Action.Register, info, null, true) {
public EurekaHttpResponse<Void> execute() {
return replicationClient.register(info);
}
},
expiryTime
);
}
/**
* Send the cancellation information of an instance to the node represented
* by this class.
*
* @param appName
* the application name of the instance.
* @param id
* the unique identifier of the instance.
* @throws Exception
*/
public void cancel(final String appName, final String id) throws Exception {
long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs;
batchingDispatcher.process(
taskId("cancel", appName, id),
new InstanceReplicationTask(targetHost, Action.Cancel, appName, id) {
@Override
public EurekaHttpResponse<Void> execute() {
return replicationClient.cancel(appName, id);
}
@Override
public void handleFailure(int statusCode, Object responseEntity) throws Throwable {
super.handleFailure(statusCode, responseEntity);
if (statusCode == 404) {
logger.warn("{}: missing entry.", getTaskName());
}
}
},
expiryTime
);
}
/**
* Send the heartbeat information of an instance to the node represented by
* this class. If the instance does not exist the node, the instance
* registration information is sent again to the peer node.
*
* @param appName
* the application name of the instance.
* @param id
* the unique identifier of the instance.
* @param info
* the instance info {@link InstanceInfo} of the instance.
* @param overriddenStatus
* the overridden status information if any of the instance.
* @throws Throwable
*/
public void heartbeat(final String appName, final String id,
final InstanceInfo info, final InstanceStatus overriddenStatus,
boolean primeConnection) throws Throwable {
if (primeConnection) {
// We do not care about the result for priming request.
replicationClient.sendHeartBeat(appName, id, info, overriddenStatus);
return;
}
ReplicationTask replicationTask = new InstanceReplicationTask(targetHost, Action.Heartbeat, info, overriddenStatus, false) {
@Override
public EurekaHttpResponse<InstanceInfo> execute() throws Throwable {
return replicationClient.sendHeartBeat(appName, id, info, overriddenStatus);
}
@Override
public void handleFailure(int statusCode, Object responseEntity) throws Throwable {
super.handleFailure(statusCode, responseEntity);
if (statusCode == 404) {
logger.warn("{}: missing entry.", getTaskName());
if (info != null) {
logger.warn("{}: cannot find instance id {} and hence replicating the instance with status {}",
getTaskName(), info.getId(), info.getStatus());
register(info);
}
} else if (config.shouldSyncWhenTimestampDiffers()) {
InstanceInfo peerInstanceInfo = (InstanceInfo) responseEntity;
if (peerInstanceInfo != null) {
syncInstancesIfTimestampDiffers(appName, id, info, peerInstanceInfo);
}
}
}
};
long expiryTime = System.currentTimeMillis() + getLeaseRenewalOf(info);
batchingDispatcher.process(taskId("heartbeat", info), replicationTask, expiryTime);
}
/**
* Send the status information of of the ASG represented by the instance.
*
* <p>
* ASG (Autoscaling group) names are available for instances in AWS and the
* ASG information is used for determining if the instance should be
* registered as {@link InstanceStatus#DOWN} or {@link InstanceStatus#UP}.
*
* @param asgName
* the asg name if any of this instance.
* @param newStatus
* the new status of the ASG.
*/
public void statusUpdate(final String asgName, final ASGStatus newStatus) {
long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs;
nonBatchingDispatcher.process(
asgName,
new AsgReplicationTask(targetHost, Action.StatusUpdate, asgName, newStatus) {
public EurekaHttpResponse<?> execute() {
return replicationClient.statusUpdate(asgName, newStatus);
}
},
expiryTime
);
}
/**
*
* Send the status update of the instance.
*
* @param appName
* the application name of the instance.
* @param id
* the unique identifier of the instance.
* @param newStatus
* the new status of the instance.
* @param info
* the instance information of the instance.
*/
public void statusUpdate(final String appName, final String id,
final InstanceStatus newStatus, final InstanceInfo info) {
long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs;
batchingDispatcher.process(
taskId("statusUpdate", appName, id),
new InstanceReplicationTask(targetHost, Action.StatusUpdate, info, null, false) {
@Override
public EurekaHttpResponse<Void> execute() {
return replicationClient.statusUpdate(appName, id, newStatus, info);
}
},
expiryTime
);
}
/**
* Delete instance status override.
*
* @param appName
* the application name of the instance.
* @param id
* the unique identifier of the instance.
* @param info
* the instance information of the instance.
*/
public void deleteStatusOverride(final String appName, final String id, final InstanceInfo info) {
long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs;
batchingDispatcher.process(
taskId("deleteStatusOverride", appName, id),
new InstanceReplicationTask(targetHost, Action.DeleteStatusOverride, info, null, false) {
@Override
public EurekaHttpResponse<Void> execute() {
return replicationClient.deleteStatusOverride(appName, id, info);
}
},
expiryTime);
}
/**
* Get the service Url of the peer eureka node.
*
* @return the service Url of the peer eureka node.
*/
public String getServiceUrl() {
return serviceUrl;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((serviceUrl == null) ? 0 : serviceUrl.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
PeerEurekaNode other = (PeerEurekaNode) obj;
if (serviceUrl == null) {
if (other.serviceUrl != null) {
return false;
}
} else if (!serviceUrl.equals(other.serviceUrl)) {
return false;
}
return true;
}
/**
* Shuts down all resources used for peer replication.
*/
public void shutDown() {
batchingDispatcher.shutdown();
nonBatchingDispatcher.shutdown();
replicationClient.shutdown();
}
/**
* Synchronize {@link InstanceInfo} information if the timestamp between
* this node and the peer eureka nodes vary.
*/
private void syncInstancesIfTimestampDiffers(String appName, String id, InstanceInfo info, InstanceInfo infoFromPeer) {
try {
if (infoFromPeer != null) {
logger.warn("Peer wants us to take the instance information from it, since the timestamp differs,"
+ "Id : {} My Timestamp : {}, Peer's timestamp: {}", id, info.getLastDirtyTimestamp(), infoFromPeer.getLastDirtyTimestamp());
if (infoFromPeer.getOverriddenStatus() != null && !InstanceStatus.UNKNOWN.equals(infoFromPeer.getOverriddenStatus())) {
logger.warn("Overridden Status info -id {}, mine {}, peer's {}", id, info.getOverriddenStatus(), infoFromPeer.getOverriddenStatus());
registry.storeOverriddenStatusIfRequired(appName, id, infoFromPeer.getOverriddenStatus());
}
registry.register(infoFromPeer, true);
}
} catch (Throwable e) {
logger.warn("Exception when trying to set information from peer :", e);
}
}
public String getBatcherName() {
String batcherName;
try {
batcherName = new URL(serviceUrl).getHost();
} catch (MalformedURLException e1) {
batcherName = serviceUrl;
}
return "target_" + batcherName;
}
private static String taskId(String requestType, String appName, String id) {
return requestType + '#' + appName + '/' + id;
}
private static String taskId(String requestType, InstanceInfo info) {
return taskId(requestType, info.getAppName(), info.getId());
}
private static int getLeaseRenewalOf(InstanceInfo info) {
return (info.getLeaseInfo() == null ? Lease.DEFAULT_DURATION_IN_SECS : info.getLeaseInfo().getRenewalIntervalInSecs()) * 1000;
}
}
| 6,920 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/protocol/ReplicationList.java | package com.netflix.eureka.cluster.protocol;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.discovery.provider.Serializer;
/**
* @author Tomasz Bak
*/
@Serializer("jackson") // For backwards compatibility with DiscoveryJerseyProvider
public class ReplicationList {
private final List<ReplicationInstance> replicationList;
public ReplicationList() {
this.replicationList = new ArrayList<>();
}
@JsonCreator
public ReplicationList(@JsonProperty("replicationList") List<ReplicationInstance> replicationList) {
this.replicationList = replicationList;
}
public ReplicationList(ReplicationInstance replicationInstance) {
this(Collections.singletonList(replicationInstance));
}
public void addReplicationInstance(ReplicationInstance instance) {
replicationList.add(instance);
}
public List<ReplicationInstance> getReplicationList() {
return this.replicationList;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ReplicationList that = (ReplicationList) o;
return !(replicationList != null ? !replicationList.equals(that.replicationList) : that.replicationList != null);
}
@Override
public int hashCode() {
return replicationList != null ? replicationList.hashCode() : 0;
}
}
| 6,921 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/protocol/ReplicationInstanceResponse.java | package com.netflix.eureka.cluster.protocol;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.appinfo.InstanceInfo;
/**
* The jersey resource class that generates the replication indivdiual response.
*/
public class ReplicationInstanceResponse {
private final int statusCode;
private final InstanceInfo responseEntity;
@JsonCreator
public ReplicationInstanceResponse(
@JsonProperty("statusCode") int statusCode,
@JsonProperty("responseEntity") InstanceInfo responseEntity) {
this.statusCode = statusCode;
this.responseEntity = responseEntity;
}
public int getStatusCode() {
return statusCode;
}
public InstanceInfo getResponseEntity() {
return responseEntity;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ReplicationInstanceResponse that = (ReplicationInstanceResponse) o;
if (statusCode != that.statusCode)
return false;
if (responseEntity != null ? !responseEntity.equals(that.responseEntity) : that.responseEntity != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = statusCode;
result = 31 * result + (responseEntity != null ? responseEntity.hashCode() : 0);
return result;
}
public static final class Builder {
private int statusCode;
private InstanceInfo responseEntity;
public Builder setStatusCode(int statusCode) {
this.statusCode = statusCode;
return this;
}
public Builder setResponseEntity(InstanceInfo entity) {
this.responseEntity = entity;
return this;
}
public ReplicationInstanceResponse build() {
return new ReplicationInstanceResponse(statusCode, responseEntity);
}
}
}
| 6,922 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/protocol/ReplicationListResponse.java | package com.netflix.eureka.cluster.protocol;
import java.util.ArrayList;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.discovery.provider.Serializer;
/**
* The jersey resource class that generates the replication batch response.
*/
@Serializer("jackson") // For backwards compatibility with DiscoveryJerseyProvider
public class ReplicationListResponse {
private List<ReplicationInstanceResponse> responseList;
public ReplicationListResponse() {
this.responseList = new ArrayList<ReplicationInstanceResponse>();
}
@JsonCreator
public ReplicationListResponse(@JsonProperty("responseList") List<ReplicationInstanceResponse> responseList) {
this.responseList = responseList;
}
public List<ReplicationInstanceResponse> getResponseList() {
return responseList;
}
public void addResponse(ReplicationInstanceResponse singleResponse) {
responseList.add(singleResponse);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ReplicationListResponse that = (ReplicationListResponse) o;
return !(responseList != null ? !responseList.equals(that.responseList) : that.responseList != null);
}
@Override
public int hashCode() {
return responseList != null ? responseList.hashCode() : 0;
}
}
| 6,923 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/cluster/protocol/ReplicationInstance.java | package com.netflix.eureka.cluster.protocol;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
/**
* The jersey resource class that generates a particular replication event
*/
public class ReplicationInstance {
private String appName;
private String id;
private Long lastDirtyTimestamp;
private String overriddenStatus;
private String status;
private InstanceInfo instanceInfo;
private Action action;
@JsonCreator
public ReplicationInstance(@JsonProperty("appName") String appName,
@JsonProperty("id") String id,
@JsonProperty("lastDirtyTimestamp") Long lastDirtyTimestamp,
@JsonProperty("overriddenStatus") String overriddenStatus,
@JsonProperty("status") String status,
@JsonProperty("instanceInfo") InstanceInfo instanceInfo,
@JsonProperty("action") Action action) {
this.appName = appName;
this.id = id;
this.lastDirtyTimestamp = lastDirtyTimestamp;
this.overriddenStatus = overriddenStatus;
this.status = status;
this.instanceInfo = instanceInfo;
this.action = action;
}
public String getAppName() {
return appName;
}
public String getId() {
return id;
}
public Long getLastDirtyTimestamp() {
return lastDirtyTimestamp;
}
public String getOverriddenStatus() {
return overriddenStatus;
}
public String getStatus() {
return status;
}
public InstanceInfo getInstanceInfo() {
return instanceInfo;
}
public Action getAction() {
return action;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ReplicationInstance that = (ReplicationInstance) o;
if (appName != null ? !appName.equals(that.appName) : that.appName != null)
return false;
if (id != null ? !id.equals(that.id) : that.id != null)
return false;
if (lastDirtyTimestamp != null ? !lastDirtyTimestamp.equals(that.lastDirtyTimestamp) : that.lastDirtyTimestamp != null)
return false;
if (overriddenStatus != null ? !overriddenStatus.equals(that.overriddenStatus) : that.overriddenStatus != null)
return false;
if (status != null ? !status.equals(that.status) : that.status != null)
return false;
if (instanceInfo != null ? !instanceInfo.equals(that.instanceInfo) : that.instanceInfo != null)
return false;
return action == that.action;
}
@Override
public int hashCode() {
int result = appName != null ? appName.hashCode() : 0;
result = 31 * result + (id != null ? id.hashCode() : 0);
result = 31 * result + (lastDirtyTimestamp != null ? lastDirtyTimestamp.hashCode() : 0);
result = 31 * result + (overriddenStatus != null ? overriddenStatus.hashCode() : 0);
result = 31 * result + (status != null ? status.hashCode() : 0);
result = 31 * result + (instanceInfo != null ? instanceInfo.hashCode() : 0);
result = 31 * result + (action != null ? action.hashCode() : 0);
return result;
}
public static ReplicationInstanceBuilder replicationInstance() {
return ReplicationInstanceBuilder.aReplicationInstance();
}
public static class ReplicationInstanceBuilder {
private String appName;
private String id;
private Long lastDirtyTimestamp;
private String overriddenStatus;
private String status;
private InstanceInfo instanceInfo;
private Action action;
private ReplicationInstanceBuilder() {
}
public static ReplicationInstanceBuilder aReplicationInstance() {
return new ReplicationInstanceBuilder();
}
public ReplicationInstanceBuilder withAppName(String appName) {
this.appName = appName;
return this;
}
public ReplicationInstanceBuilder withId(String id) {
this.id = id;
return this;
}
public ReplicationInstanceBuilder withLastDirtyTimestamp(Long lastDirtyTimestamp) {
this.lastDirtyTimestamp = lastDirtyTimestamp;
return this;
}
public ReplicationInstanceBuilder withOverriddenStatus(String overriddenStatus) {
this.overriddenStatus = overriddenStatus;
return this;
}
public ReplicationInstanceBuilder withStatus(String status) {
this.status = status;
return this;
}
public ReplicationInstanceBuilder withInstanceInfo(InstanceInfo instanceInfo) {
this.instanceInfo = instanceInfo;
return this;
}
public ReplicationInstanceBuilder withAction(Action action) {
this.action = action;
return this;
}
public ReplicationInstanceBuilder but() {
return aReplicationInstance().withAppName(appName).withId(id).withLastDirtyTimestamp(lastDirtyTimestamp).withOverriddenStatus(overriddenStatus).withStatus(status).withInstanceInfo(instanceInfo).withAction(action);
}
public ReplicationInstance build() {
return new ReplicationInstance(
appName,
id,
lastDirtyTimestamp,
overriddenStatus,
status,
instanceInfo,
action
);
}
}
}
| 6,924 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/transport/EurekaServerHttpClients.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.transport;
import com.netflix.discovery.shared.dns.DnsServiceImpl;
import com.netflix.discovery.shared.resolver.ClusterResolver;
import com.netflix.discovery.shared.resolver.EurekaEndpoint;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.EurekaTransportConfig;
import com.netflix.discovery.shared.transport.TransportClientFactory;
import com.netflix.discovery.shared.transport.decorator.MetricsCollectingEurekaHttpClient;
import com.netflix.discovery.shared.transport.decorator.SessionedEurekaHttpClient;
import com.netflix.discovery.shared.transport.decorator.RedirectingEurekaHttpClient;
import com.netflix.discovery.shared.transport.decorator.RetryableEurekaHttpClient;
import com.netflix.discovery.shared.transport.decorator.ServerStatusEvaluators;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.Names;
import com.netflix.eureka.resources.ServerCodecs;
/**
* @author Tomasz Bak
*/
public final class EurekaServerHttpClients {
public static final long RECONNECT_INTERVAL_MINUTES = 30;
private EurekaServerHttpClients() {
}
/**
* {@link EurekaHttpClient} for remote region replication.
*/
public static EurekaHttpClient createRemoteRegionClient(EurekaServerConfig serverConfig,
EurekaTransportConfig transportConfig,
ServerCodecs serverCodecs,
ClusterResolver<EurekaEndpoint> clusterResolver) {
JerseyRemoteRegionClientFactory jerseyFactory = new JerseyRemoteRegionClientFactory(serverConfig, serverCodecs, clusterResolver.getRegion());
TransportClientFactory metricsFactory = MetricsCollectingEurekaHttpClient.createFactory(jerseyFactory);
SessionedEurekaHttpClient client = new SessionedEurekaHttpClient(
Names.REMOTE,
RetryableEurekaHttpClient.createFactory(
Names.REMOTE,
transportConfig,
clusterResolver,
createFactory(metricsFactory),
ServerStatusEvaluators.legacyEvaluator()),
RECONNECT_INTERVAL_MINUTES * 60 * 1000
);
return client;
}
public static TransportClientFactory createFactory(final TransportClientFactory delegateFactory) {
final DnsServiceImpl dnsService = new DnsServiceImpl();
return new TransportClientFactory() {
@Override
public EurekaHttpClient newClient(EurekaEndpoint endpoint) {
return new RedirectingEurekaHttpClient(endpoint.getServiceUrl(), delegateFactory, dnsService);
}
@Override
public void shutdown() {
delegateFactory.shutdown();
}
};
}
}
| 6,925 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/transport/JerseyReplicationClient.java | package com.netflix.eureka.transport;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response.Status;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.EurekaIdentityHeaderFilter;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.discovery.shared.transport.jersey.AbstractJerseyEurekaHttpClient;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClient;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClientImpl.EurekaJerseyClientBuilder;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.EurekaServerIdentity;
import com.netflix.eureka.cluster.DynamicGZIPContentEncodingFilter;
import com.netflix.eureka.cluster.HttpReplicationClient;
import com.netflix.eureka.cluster.PeerEurekaNode;
import com.netflix.eureka.cluster.protocol.ReplicationList;
import com.netflix.eureka.cluster.protocol.ReplicationListResponse;
import com.netflix.eureka.resources.ASGResource.ASGStatus;
import com.netflix.eureka.resources.ServerCodecs;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.WebResource.Builder;
import com.sun.jersey.api.client.filter.ClientFilter;
import com.sun.jersey.client.apache4.ApacheHttpClient4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.discovery.shared.transport.EurekaHttpResponse.anEurekaHttpResponse;
/**
* @author Tomasz Bak
*/
public class JerseyReplicationClient extends AbstractJerseyEurekaHttpClient implements HttpReplicationClient {
private static final Logger logger = LoggerFactory.getLogger(JerseyReplicationClient.class);
private final EurekaJerseyClient jerseyClient;
private final ApacheHttpClient4 jerseyApacheClient;
public JerseyReplicationClient(EurekaJerseyClient jerseyClient, String serviceUrl) {
super(jerseyClient.getClient(), serviceUrl);
this.jerseyClient = jerseyClient;
this.jerseyApacheClient = jerseyClient.getClient();
}
@Override
protected void addExtraHeaders(Builder webResource) {
webResource.header(PeerEurekaNode.HEADER_REPLICATION, "true");
}
/**
* Compared to regular heartbeat, in the replication channel the server may return a more up to date
* instance copy.
*/
@Override
public EurekaHttpResponse<InstanceInfo> sendHeartBeat(String appName, String id, InstanceInfo info, InstanceStatus overriddenStatus) {
String urlPath = "apps/" + appName + '/' + id;
ClientResponse response = null;
try {
WebResource webResource = jerseyClient.getClient().resource(serviceUrl)
.path(urlPath)
.queryParam("status", info.getStatus().toString())
.queryParam("lastDirtyTimestamp", info.getLastDirtyTimestamp().toString());
if (overriddenStatus != null) {
webResource = webResource.queryParam("overriddenstatus", overriddenStatus.name());
}
Builder requestBuilder = webResource.getRequestBuilder();
addExtraHeaders(requestBuilder);
response = requestBuilder.accept(MediaType.APPLICATION_JSON_TYPE).put(ClientResponse.class);
InstanceInfo infoFromPeer = null;
if (response.getStatus() == Status.CONFLICT.getStatusCode() && response.hasEntity()) {
infoFromPeer = response.getEntity(InstanceInfo.class);
}
return anEurekaHttpResponse(response.getStatus(), infoFromPeer).type(MediaType.APPLICATION_JSON_TYPE).build();
} finally {
if (logger.isDebugEnabled()) {
logger.debug("[heartbeat] Jersey HTTP PUT {}; statusCode={}", urlPath, response == null ? "N/A" : response.getStatus());
}
if (response != null) {
response.close();
}
}
}
@Override
public EurekaHttpResponse<Void> statusUpdate(String asgName, ASGStatus newStatus) {
ClientResponse response = null;
try {
String urlPath = "asg/" + asgName + "/status";
response = jerseyApacheClient.resource(serviceUrl)
.path(urlPath)
.queryParam("value", newStatus.name())
.header(PeerEurekaNode.HEADER_REPLICATION, "true")
.put(ClientResponse.class);
return EurekaHttpResponse.status(response.getStatus());
} finally {
if (response != null) {
response.close();
}
}
}
@Override
public EurekaHttpResponse<ReplicationListResponse> submitBatchUpdates(ReplicationList replicationList) {
ClientResponse response = null;
try {
response = jerseyApacheClient.resource(serviceUrl)
.path(PeerEurekaNode.BATCH_URL_PATH)
.accept(MediaType.APPLICATION_JSON_TYPE)
.type(MediaType.APPLICATION_JSON_TYPE)
.post(ClientResponse.class, replicationList);
if (!isSuccess(response.getStatus())) {
return anEurekaHttpResponse(response.getStatus(), ReplicationListResponse.class).build();
}
ReplicationListResponse batchResponse = response.getEntity(ReplicationListResponse.class);
return anEurekaHttpResponse(response.getStatus(), batchResponse).type(MediaType.APPLICATION_JSON_TYPE).build();
} finally {
if (response != null) {
response.close();
}
}
}
public void addReplicationClientFilter(ClientFilter clientFilter) {
jerseyApacheClient.addFilter(clientFilter);
}
@Override
public void shutdown() {
super.shutdown();
jerseyClient.destroyResources();
}
public static JerseyReplicationClient createReplicationClient(EurekaServerConfig config, ServerCodecs serverCodecs, String serviceUrl) {
String name = JerseyReplicationClient.class.getSimpleName() + ": " + serviceUrl + "apps/: ";
EurekaJerseyClient jerseyClient;
try {
String hostname;
try {
hostname = new URL(serviceUrl).getHost();
} catch (MalformedURLException e) {
hostname = serviceUrl;
}
String jerseyClientName = "Discovery-PeerNodeClient-" + hostname;
EurekaJerseyClientBuilder clientBuilder = new EurekaJerseyClientBuilder()
.withClientName(jerseyClientName)
.withUserAgent("Java-EurekaClient-Replication")
.withEncoderWrapper(serverCodecs.getFullJsonCodec())
.withDecoderWrapper(serverCodecs.getFullJsonCodec())
.withConnectionTimeout(config.getPeerNodeConnectTimeoutMs())
.withReadTimeout(config.getPeerNodeReadTimeoutMs())
.withMaxConnectionsPerHost(config.getPeerNodeTotalConnectionsPerHost())
.withMaxTotalConnections(config.getPeerNodeTotalConnections())
.withConnectionIdleTimeout(config.getPeerNodeConnectionIdleTimeoutSeconds());
if (serviceUrl.startsWith("https://") &&
"true".equals(System.getProperty("com.netflix.eureka.shouldSSLConnectionsUseSystemSocketFactory"))) {
clientBuilder.withSystemSSLConfiguration();
}
jerseyClient = clientBuilder.build();
} catch (Throwable e) {
throw new RuntimeException("Cannot Create new Replica Node :" + name, e);
}
String ip = null;
try {
ip = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
logger.warn("Cannot find localhost ip", e);
}
ApacheHttpClient4 jerseyApacheClient = jerseyClient.getClient();
jerseyApacheClient.addFilter(new DynamicGZIPContentEncodingFilter(config));
EurekaServerIdentity identity = new EurekaServerIdentity(ip);
jerseyApacheClient.addFilter(new EurekaIdentityHeaderFilter(identity));
return new JerseyReplicationClient(jerseyClient, serviceUrl);
}
private static boolean isSuccess(int statusCode) {
return statusCode >= 200 && statusCode < 300;
}
}
| 6,926 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/transport/JerseyRemoteRegionClientFactory.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.transport;
import javax.inject.Inject;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import com.netflix.discovery.EurekaIdentityHeaderFilter;
import com.netflix.discovery.shared.resolver.EurekaEndpoint;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.TransportClientFactory;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClient;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClientImpl.EurekaJerseyClientBuilder;
import com.netflix.discovery.shared.transport.jersey.JerseyApplicationClient;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.EurekaServerIdentity;
import com.netflix.eureka.resources.ServerCodecs;
import com.sun.jersey.api.client.filter.GZIPContentEncodingFilter;
import com.sun.jersey.client.apache4.ApacheHttpClient4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Tomasz Bak
*/
public class JerseyRemoteRegionClientFactory implements TransportClientFactory {
private static final Logger logger = LoggerFactory.getLogger(JerseyRemoteRegionClientFactory.class);
private final EurekaServerConfig serverConfig;
private final ServerCodecs serverCodecs;
private final String region;
private volatile EurekaJerseyClient jerseyClient;
private final Object lock = new Object();
@Inject
public JerseyRemoteRegionClientFactory(EurekaServerConfig serverConfig,
ServerCodecs serverCodecs,
String region) {
this.serverConfig = serverConfig;
this.serverCodecs = serverCodecs;
this.region = region;
}
@Override
public EurekaHttpClient newClient(EurekaEndpoint endpoint) {
return new JerseyApplicationClient(getOrCreateJerseyClient(region, endpoint).getClient(), endpoint.getServiceUrl(), Collections.<String, String>emptyMap());
}
@Override
public void shutdown() {
if (jerseyClient != null) {
jerseyClient.destroyResources();
}
}
private EurekaJerseyClient getOrCreateJerseyClient(String region, EurekaEndpoint endpoint) {
if (jerseyClient != null) {
return jerseyClient;
}
synchronized (lock) {
if (jerseyClient == null) {
EurekaJerseyClientBuilder clientBuilder = new EurekaJerseyClientBuilder()
.withUserAgent("Java-EurekaClient-RemoteRegion")
.withEncoderWrapper(serverCodecs.getFullJsonCodec())
.withDecoderWrapper(serverCodecs.getFullJsonCodec())
.withConnectionTimeout(serverConfig.getRemoteRegionConnectTimeoutMs())
.withReadTimeout(serverConfig.getRemoteRegionReadTimeoutMs())
.withMaxConnectionsPerHost(serverConfig.getRemoteRegionTotalConnectionsPerHost())
.withMaxTotalConnections(serverConfig.getRemoteRegionTotalConnections())
.withConnectionIdleTimeout(serverConfig.getRemoteRegionConnectionIdleTimeoutSeconds());
if (endpoint.isSecure()) {
clientBuilder.withClientName("Discovery-RemoteRegionClient-" + region);
} else if ("true".equals(System.getProperty("com.netflix.eureka.shouldSSLConnectionsUseSystemSocketFactory"))) {
clientBuilder.withClientName("Discovery-RemoteRegionSystemSecureClient-" + region)
.withSystemSSLConfiguration();
} else {
clientBuilder.withClientName("Discovery-RemoteRegionSecureClient-" + region)
.withTrustStoreFile(
serverConfig.getRemoteRegionTrustStore(),
serverConfig.getRemoteRegionTrustStorePassword()
);
}
jerseyClient = clientBuilder.build();
ApacheHttpClient4 discoveryApacheClient = jerseyClient.getClient();
// Add gzip content encoding support
boolean enableGZIPContentEncodingFilter = serverConfig.shouldGZipContentFromRemoteRegion();
if (enableGZIPContentEncodingFilter) {
discoveryApacheClient.addFilter(new GZIPContentEncodingFilter(false));
}
// always enable client identity headers
String ip = null;
try {
ip = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
logger.warn("Cannot find localhost ip", e);
}
EurekaServerIdentity identity = new EurekaServerIdentity(ip);
discoveryApacheClient.addFilter(new EurekaIdentityHeaderFilter(identity));
}
}
return jerseyClient;
}
}
| 6,927 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/StatusInfo.java | package com.netflix.eureka.util;
import java.lang.management.ManagementFactory;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.config.ConfigurationManager;
import com.netflix.discovery.provider.Serializer;
import com.thoughtworks.xstream.annotations.XStreamAlias;
import com.thoughtworks.xstream.annotations.XStreamOmitField;
/**
* An utility class for exposing status information of an instance.
*
* @author Greg Kim
*/
@Serializer("com.netflix.discovery.converters.EntityBodyConverter")
@XStreamAlias("status")
public class StatusInfo {
private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss Z";
private static final boolean ARCHAIUS_EXISTS = classExists("com.netflix.config.ConfigurationManager");
public static final class Builder {
@XStreamOmitField
private StatusInfo result;
private Builder() {
result = new StatusInfo();
}
public static Builder newBuilder() {
return new Builder();
}
public Builder isHealthy(boolean b) {
result.isHeathly = Boolean.valueOf(b);
return this;
}
public Builder withInstanceInfo(InstanceInfo instanceInfo) {
result.instanceInfo = instanceInfo;
return this;
}
/**
* Add any application specific status data.
*/
public Builder add(String key, String value) {
if (result.applicationStats == null) {
result.applicationStats = new HashMap<String, String>();
}
result.applicationStats.put(key, value);
return this;
}
/**
* Build the {@link StatusInfo}. General information are automatically
* built here too.
*/
public StatusInfo build() {
if (result.instanceInfo == null) {
throw new IllegalStateException("instanceInfo can not be null");
}
result.generalStats.put("server-uptime", getUpTime());
if (ARCHAIUS_EXISTS) {
result.generalStats.put("environment", ConfigurationManager
.getDeploymentContext().getDeploymentEnvironment());
}
Runtime runtime = Runtime.getRuntime();
int totalMem = (int) (runtime.totalMemory() / 1048576);
int freeMem = (int) (runtime.freeMemory() / 1048576);
int usedPercent = (int) (((float) totalMem - freeMem) / (totalMem) * 100.0);
result.generalStats.put("num-of-cpus",
String.valueOf(runtime.availableProcessors()));
result.generalStats.put("total-avail-memory",
String.valueOf(totalMem) + "mb");
result.generalStats.put("current-memory-usage",
String.valueOf(totalMem - freeMem) + "mb" + " ("
+ usedPercent + "%)");
return result;
}
}
private Map<String, String> generalStats = new HashMap<String, String>();
private Map<String, String> applicationStats;
private InstanceInfo instanceInfo;
private Boolean isHeathly;
private StatusInfo() {
}
public InstanceInfo getInstanceInfo() {
return instanceInfo;
}
public boolean isHealthy() {
return isHeathly.booleanValue();
}
public Map<String, String> getGeneralStats() {
return generalStats;
}
public Map<String, String> getApplicationStats() {
return applicationStats;
}
/**
* Output the amount of time that has elapsed since the given date in the
* format x days, xx:xx.
*
* @return A string representing the formatted interval.
*/
public static String getUpTime() {
long diff = ManagementFactory.getRuntimeMXBean().getUptime();
diff /= 1000 * 60;
long minutes = diff % 60;
diff /= 60;
long hours = diff % 24;
diff /= 24;
long days = diff;
StringBuilder buf = new StringBuilder();
if (days == 1) {
buf.append("1 day ");
} else if (days > 1) {
buf.append(Long.valueOf(days).toString()).append(" days ");
}
DecimalFormat format = new DecimalFormat();
format.setMinimumIntegerDigits(2);
buf.append(format.format(hours)).append(":")
.append(format.format(minutes));
return buf.toString();
}
public static String getCurrentTimeAsString() {
SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
return format.format(new Date());
}
private static boolean classExists(String className) {
try {
Class.forName(className);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
}
| 6,928 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/MeasuredRate.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.util;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility class for getting a count in last X milliseconds.
*
* @author Karthik Ranganathan,Greg Kim
*/
public class MeasuredRate {
private static final Logger logger = LoggerFactory.getLogger(MeasuredRate.class);
private final AtomicLong lastBucket = new AtomicLong(0);
private final AtomicLong currentBucket = new AtomicLong(0);
private final long sampleInterval;
private final Timer timer;
private volatile boolean isActive;
/**
* @param sampleInterval in milliseconds
*/
public MeasuredRate(long sampleInterval) {
this.sampleInterval = sampleInterval;
this.timer = new Timer("Eureka-MeasureRateTimer", true);
this.isActive = false;
}
public synchronized void start() {
if (!isActive) {
timer.schedule(new TimerTask() {
@Override
public void run() {
try {
// Zero out the current bucket.
lastBucket.set(currentBucket.getAndSet(0));
} catch (Throwable e) {
logger.error("Cannot reset the Measured Rate", e);
}
}
}, sampleInterval, sampleInterval);
isActive = true;
}
}
public synchronized void stop() {
if (isActive) {
timer.cancel();
isActive = false;
}
}
/**
* Returns the count in the last sample interval.
*/
public long getCount() {
return lastBucket.get();
}
/**
* Increments the count in the current sample interval.
*/
public void increment() {
currentBucket.incrementAndGet();
}
}
| 6,929 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/ServoControl.java | package com.netflix.eureka.util;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.StatsTimer;
import com.netflix.servo.stats.StatsConfig;
/**
* The sole purpose of this class is shutting down the {@code protected} executor of {@link StatsTimer}
*/
public class ServoControl extends StatsTimer {
public ServoControl(MonitorConfig baseConfig, StatsConfig statsConfig) {
super(baseConfig, statsConfig);
throw new UnsupportedOperationException(getClass().getName() + " is not meant to be instantiated.");
}
public static void shutdown() {
DEFAULT_EXECUTOR.shutdown();
}
}
| 6,930 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/StatusUtil.java | package com.netflix.eureka.util;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.cluster.PeerEurekaNode;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
/**
* @author David Liu
*/
public class StatusUtil {
private static final Logger logger = LoggerFactory.getLogger(StatusUtil.class);
private final String myAppName;
private final PeerAwareInstanceRegistry registry;
private final PeerEurekaNodes peerEurekaNodes;
private final InstanceInfo instanceInfo;
public StatusUtil(EurekaServerContext server) {
this.myAppName = server.getApplicationInfoManager().getInfo().getAppName();
this.registry = server.getRegistry();
this.peerEurekaNodes = server.getPeerEurekaNodes();
this.instanceInfo = server.getApplicationInfoManager().getInfo();
}
public StatusInfo getStatusInfo() {
StatusInfo.Builder builder = StatusInfo.Builder.newBuilder();
// Add application level status
int upReplicasCount = 0;
StringBuilder upReplicas = new StringBuilder();
StringBuilder downReplicas = new StringBuilder();
StringBuilder replicaHostNames = new StringBuilder();
for (PeerEurekaNode node : peerEurekaNodes.getPeerEurekaNodes()) {
if (replicaHostNames.length() > 0) {
replicaHostNames.append(", ");
}
replicaHostNames.append(node.getServiceUrl());
if (isReplicaAvailable(node.getServiceUrl())) {
upReplicas.append(node.getServiceUrl()).append(',');
upReplicasCount++;
} else {
downReplicas.append(node.getServiceUrl()).append(',');
}
}
builder.add("registered-replicas", replicaHostNames.toString());
builder.add("available-replicas", upReplicas.toString());
builder.add("unavailable-replicas", downReplicas.toString());
// Only set the healthy flag if a threshold has been configured.
if (peerEurekaNodes.getMinNumberOfAvailablePeers() > -1) {
builder.isHealthy(upReplicasCount >= peerEurekaNodes.getMinNumberOfAvailablePeers());
}
builder.withInstanceInfo(this.instanceInfo);
return builder.build();
}
private boolean isReplicaAvailable(String url) {
try {
Application app = registry.getApplication(myAppName, false);
if (app == null) {
return false;
}
for (InstanceInfo info : app.getInstances()) {
if (peerEurekaNodes.isInstanceURL(url, info)) {
return true;
}
}
} catch (Throwable e) {
logger.error("Could not determine if the replica is available ", e);
}
return false;
}
}
| 6,931 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/EurekaMonitors.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.util;
import java.util.concurrent.atomic.AtomicLong;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.DataCenterInfo.Name;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.monitor.Monitors;
/**
* The enum that encapsulates all statistics monitored by Eureka.
*
* <p>
* Eureka Monitoring is done using <a href="https://github.com/Netflix/servo">Servo</a>. The
* users who wants to take advantage of the monitoring should read up on
* <tt>Servo</tt>
* <p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
public enum EurekaMonitors {
RENEW("renewCounter", "Number of total renews seen since startup"),
CANCEL("cancelCounter", "Number of total cancels seen since startup"),
GET_ALL_CACHE_MISS("getAllCacheMissCounter", "Number of total registry queries seen since startup"),
GET_ALL_CACHE_MISS_DELTA("getAllCacheMissDeltaCounter",
"Number of total registry queries for delta seen since startup"),
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS("getAllWithRemoteRegionCacheMissCounter",
"Number of total registry with remote region queries seen since startup"),
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS_DELTA("getAllWithRemoteRegionCacheMissDeltaCounter",
"Number of total registry queries for delta with remote region seen since startup"),
GET_ALL_DELTA("getAllDeltaCounter", "Number of total deltas since startup"),
GET_ALL_DELTA_WITH_REMOTE_REGIONS("getAllDeltaWithRemoteRegionCounter",
"Number of total deltas with remote regions since startup"),
GET_ALL("getAllCounter", "Number of total registry queries seen since startup"),
GET_ALL_WITH_REMOTE_REGIONS("getAllWithRemoteRegionCounter",
"Number of total registry queries with remote regions, seen since startup"),
GET_APPLICATION("getApplicationCounter", "Number of total application queries seen since startup"),
REGISTER("registerCounter", "Number of total registers seen since startup"),
EXPIRED("expiredCounter", "Number of total expired leases since startup"),
STATUS_UPDATE("statusUpdateCounter", "Number of total admin status updates since startup"),
STATUS_OVERRIDE_DELETE("statusOverrideDeleteCounter", "Number of status override removals"),
CANCEL_NOT_FOUND("cancelNotFoundCounter", "Number of total cancel requests on non-existing instance since startup"),
RENEW_NOT_FOUND("renewNotFoundexpiredCounter", "Number of total renew on non-existing instance since startup"),
REJECTED_REPLICATIONS("numOfRejectedReplications", "Number of replications rejected because of full queue"),
FAILED_REPLICATIONS("numOfFailedReplications", "Number of failed replications - likely from timeouts"),
RATE_LIMITED("numOfRateLimitedRequests", "Number of requests discarded by the rate limiter"),
RATE_LIMITED_CANDIDATES("numOfRateLimitedRequestCandidates", "Number of requests that would be discarded if the rate limiter's throttling is activated"),
RATE_LIMITED_FULL_FETCH("numOfRateLimitedFullFetchRequests", "Number of full registry fetch requests discarded by the rate limiter"),
RATE_LIMITED_FULL_FETCH_CANDIDATES("numOfRateLimitedFullFetchRequestCandidates", "Number of full registry fetch requests that would be discarded if the rate limiter's throttling is activated");
private final String name;
private final String myZoneCounterName;
private final String description;
private EurekaMonitors(String name, String description) {
this.name = name;
this.description = description;
DataCenterInfo dcInfo = ApplicationInfoManager.getInstance().getInfo().getDataCenterInfo();
if (dcInfo.getName() == Name.Amazon) {
myZoneCounterName = ((AmazonInfo) dcInfo).get(MetaDataKey.availabilityZone) + "." + name;
} else {
myZoneCounterName = "dcmaster." + name;
}
}
@com.netflix.servo.annotations.Monitor(name = "count", type = DataSourceType.COUNTER)
private final AtomicLong counter = new AtomicLong();
@com.netflix.servo.annotations.Monitor(name = "count-minus-replication", type = DataSourceType.COUNTER)
private final AtomicLong myZoneCounter = new AtomicLong();
/**
* Increment the counter for the given statistic.
*/
public void increment() {
increment(false);
}
/**
* Increment the counter for the given statistic based on whether this is
* because of replication from other eureka servers or it is a eureka client
* initiated action.
*
* @param isReplication
* true if this a replication, false otherwise.
*/
public void increment(boolean isReplication) {
counter.incrementAndGet();
if (!isReplication) {
myZoneCounter.incrementAndGet();
}
}
/**
* Gets the statistic name of this monitor.
*
* @return the statistic name.
*/
public String getName() {
return name;
}
/**
* Gets the zone specific statistic name of this monitor. Applies only for
* AWS cloud.
*
* @return the zone specific statistic name.
*/
public String getZoneSpecificName() {
return myZoneCounterName;
}
/**
* Gets the description of this statistic means.
*
* @return the description of this statistic means.
*/
public String getDescription() {
return description;
}
/**
* Gets the actual counter value for this statistic.
*
* @return the long value representing the number of times this statistic
* has occurred.
*/
public long getCount() {
return counter.get();
}
/**
* Gets the zone specific counter value for this statistic. This is
* application only for AWS cloud environment.
*
* @return the long value representing the number of times this statistic
* has occurred.
*/
public long getZoneSpecificCount() {
return myZoneCounter.get();
}
/**
* Register all statistics with <tt>Servo</tt>.
*/
public static void registerAllStats() {
for (EurekaMonitors c : EurekaMonitors.values()) {
Monitors.registerObject(c.getName(), c);
}
}
/**
* Unregister all statistics from <tt>Servo</tt>.
*/
public static void shutdown() {
for (EurekaMonitors c : EurekaMonitors.values()) {
DefaultMonitorRegistry.getInstance().unregister(Monitors.newObjectMonitor(c.getName(), c));
}
}
}
| 6,932 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TaskDispatcher.java | package com.netflix.eureka.util.batcher;
/**
* Task dispatcher takes task from clients, and delegates their execution to a configurable number of workers.
* The task can be processed one at a time or in batches. Only non-expired tasks are executed, and if a newer
* task with the same id is scheduled for execution, the old one is deleted. Lazy dispatch of work (only on demand)
* to workers, guarantees that data are always up to date, and no stale task processing takes place.
* <h3>Task processor</h3>
* A client of this component must provide an implementation of {@link TaskProcessor} interface, which will do
* the actual work of task processing. This implementation must be thread safe, as it is called concurrently by
* multiple threads.
* <h3>Execution modes</h3>
* To create non batched executor call {@link TaskDispatchers#createNonBatchingTaskDispatcher(String, int, int, long, long, TaskProcessor)}
* method. Batched executor is created by {@link TaskDispatchers#createBatchingTaskDispatcher(String, int, int, int, long, long, TaskProcessor)}.
*
* @author Tomasz Bak
*/
public interface TaskDispatcher<ID, T> {
void process(ID id, T task, long expiryTime);
void shutdown();
}
| 6,933 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/AcceptorExecutor.java | package com.netflix.eureka.util.batcher;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.netflix.eureka.util.batcher.TaskProcessor.ProcessingResult;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.StatsTimer;
import com.netflix.servo.monitor.Timer;
import com.netflix.servo.stats.StatsConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.Names.METRIC_REPLICATION_PREFIX;
/**
* An active object with an internal thread accepting tasks from clients, and dispatching them to
* workers in a pull based manner. Workers explicitly request an item or a batch of items whenever they are
* available. This guarantees that data to be processed are always up to date, and no stale data processing is done.
*
* <h3>Task identification</h3>
* Each task passed for processing has a corresponding task id. This id is used to remove duplicates (replace
* older copies with newer ones).
*
* <h3>Re-processing</h3>
* If data processing by a worker failed, and the failure is transient in nature, the worker will put back the
* task(s) back to the {@link AcceptorExecutor}. This data will be merged with current workload, possibly discarded if
* a newer version has been already received.
*
* @author Tomasz Bak
*/
class AcceptorExecutor<ID, T> {
private static final Logger logger = LoggerFactory.getLogger(AcceptorExecutor.class);
private final String id;
private final int maxBufferSize;
private final int maxBatchingSize;
private final long maxBatchingDelay;
private final AtomicBoolean isShutdown = new AtomicBoolean(false);
private final BlockingQueue<TaskHolder<ID, T>> acceptorQueue = new LinkedBlockingQueue<>();
private final BlockingDeque<TaskHolder<ID, T>> reprocessQueue = new LinkedBlockingDeque<>();
private final Thread acceptorThread;
private final Map<ID, TaskHolder<ID, T>> pendingTasks = new HashMap<>();
private final Deque<ID> processingOrder = new LinkedList<>();
private final Semaphore singleItemWorkRequests = new Semaphore(0);
private final BlockingQueue<TaskHolder<ID, T>> singleItemWorkQueue = new LinkedBlockingQueue<>();
private final Semaphore batchWorkRequests = new Semaphore(0);
private final BlockingQueue<List<TaskHolder<ID, T>>> batchWorkQueue = new LinkedBlockingQueue<>();
private final TrafficShaper trafficShaper;
/*
* Metrics
*/
@Monitor(name = METRIC_REPLICATION_PREFIX + "acceptedTasks", description = "Number of accepted tasks", type = DataSourceType.COUNTER)
volatile long acceptedTasks;
@Monitor(name = METRIC_REPLICATION_PREFIX + "replayedTasks", description = "Number of replayedTasks tasks", type = DataSourceType.COUNTER)
volatile long replayedTasks;
@Monitor(name = METRIC_REPLICATION_PREFIX + "expiredTasks", description = "Number of expired tasks", type = DataSourceType.COUNTER)
volatile long expiredTasks;
@Monitor(name = METRIC_REPLICATION_PREFIX + "overriddenTasks", description = "Number of overridden tasks", type = DataSourceType.COUNTER)
volatile long overriddenTasks;
@Monitor(name = METRIC_REPLICATION_PREFIX + "queueOverflows", description = "Number of queue overflows", type = DataSourceType.COUNTER)
volatile long queueOverflows;
private final Timer batchSizeMetric;
AcceptorExecutor(String id,
int maxBufferSize,
int maxBatchingSize,
long maxBatchingDelay,
long congestionRetryDelayMs,
long networkFailureRetryMs) {
this.id = id;
this.maxBufferSize = maxBufferSize;
this.maxBatchingSize = maxBatchingSize;
this.maxBatchingDelay = maxBatchingDelay;
this.trafficShaper = new TrafficShaper(congestionRetryDelayMs, networkFailureRetryMs);
ThreadGroup threadGroup = new ThreadGroup("eurekaTaskExecutors");
this.acceptorThread = new Thread(threadGroup, new AcceptorRunner(), "TaskAcceptor-" + id);
this.acceptorThread.setDaemon(true);
this.acceptorThread.start();
final double[] percentiles = {50.0, 95.0, 99.0, 99.5};
final StatsConfig statsConfig = new StatsConfig.Builder()
.withSampleSize(1000)
.withPercentiles(percentiles)
.withPublishStdDev(true)
.build();
final MonitorConfig config = MonitorConfig.builder(METRIC_REPLICATION_PREFIX + "batchSize").build();
this.batchSizeMetric = new StatsTimer(config, statsConfig);
try {
Monitors.registerObject(id, this);
} catch (Throwable e) {
logger.warn("Cannot register servo monitor for this object", e);
}
}
void process(ID id, T task, long expiryTime) {
acceptorQueue.add(new TaskHolder<ID, T>(id, task, expiryTime));
acceptedTasks++;
}
void reprocess(List<TaskHolder<ID, T>> holders, ProcessingResult processingResult) {
reprocessQueue.addAll(holders);
replayedTasks += holders.size();
trafficShaper.registerFailure(processingResult);
}
void reprocess(TaskHolder<ID, T> taskHolder, ProcessingResult processingResult) {
reprocessQueue.add(taskHolder);
replayedTasks++;
trafficShaper.registerFailure(processingResult);
}
BlockingQueue<TaskHolder<ID, T>> requestWorkItem() {
singleItemWorkRequests.release();
return singleItemWorkQueue;
}
BlockingQueue<List<TaskHolder<ID, T>>> requestWorkItems() {
batchWorkRequests.release();
return batchWorkQueue;
}
void shutdown() {
if (isShutdown.compareAndSet(false, true)) {
Monitors.unregisterObject(id, this);
acceptorThread.interrupt();
}
}
@Monitor(name = METRIC_REPLICATION_PREFIX + "acceptorQueueSize", description = "Number of tasks waiting in the acceptor queue", type = DataSourceType.GAUGE)
public long getAcceptorQueueSize() {
return acceptorQueue.size();
}
@Monitor(name = METRIC_REPLICATION_PREFIX + "reprocessQueueSize", description = "Number of tasks waiting in the reprocess queue", type = DataSourceType.GAUGE)
public long getReprocessQueueSize() {
return reprocessQueue.size();
}
@Monitor(name = METRIC_REPLICATION_PREFIX + "queueSize", description = "Task queue size", type = DataSourceType.GAUGE)
public long getQueueSize() {
return pendingTasks.size();
}
@Monitor(name = METRIC_REPLICATION_PREFIX + "pendingJobRequests", description = "Number of worker threads awaiting job assignment", type = DataSourceType.GAUGE)
public long getPendingJobRequests() {
return singleItemWorkRequests.availablePermits() + batchWorkRequests.availablePermits();
}
@Monitor(name = METRIC_REPLICATION_PREFIX + "availableJobs", description = "Number of jobs ready to be taken by the workers", type = DataSourceType.GAUGE)
public long workerTaskQueueSize() {
return singleItemWorkQueue.size() + batchWorkQueue.size();
}
class AcceptorRunner implements Runnable {
@Override
public void run() {
long scheduleTime = 0;
while (!isShutdown.get()) {
try {
drainInputQueues();
int totalItems = processingOrder.size();
long now = System.currentTimeMillis();
if (scheduleTime < now) {
scheduleTime = now + trafficShaper.transmissionDelay();
}
if (scheduleTime <= now) {
assignBatchWork();
assignSingleItemWork();
}
// If no worker is requesting data or there is a delay injected by the traffic shaper,
// sleep for some time to avoid tight loop.
if (totalItems == processingOrder.size()) {
Thread.sleep(10);
}
} catch (InterruptedException ex) {
// Ignore
} catch (Throwable e) {
// Safe-guard, so we never exit this loop in an uncontrolled way.
logger.warn("Discovery AcceptorThread error", e);
}
}
}
private boolean isFull() {
return pendingTasks.size() >= maxBufferSize;
}
private void drainInputQueues() throws InterruptedException {
do {
drainReprocessQueue();
drainAcceptorQueue();
if (isShutdown.get()) {
break;
}
// If all queues are empty, block for a while on the acceptor queue
if (reprocessQueue.isEmpty() && acceptorQueue.isEmpty() && pendingTasks.isEmpty()) {
TaskHolder<ID, T> taskHolder = acceptorQueue.poll(10, TimeUnit.MILLISECONDS);
if (taskHolder != null) {
appendTaskHolder(taskHolder);
}
}
} while (!reprocessQueue.isEmpty() || !acceptorQueue.isEmpty() || pendingTasks.isEmpty());
}
private void drainAcceptorQueue() {
while (!acceptorQueue.isEmpty()) {
appendTaskHolder(acceptorQueue.poll());
}
}
private void drainReprocessQueue() {
long now = System.currentTimeMillis();
while (!reprocessQueue.isEmpty() && !isFull()) {
TaskHolder<ID, T> taskHolder = reprocessQueue.pollLast();
ID id = taskHolder.getId();
if (taskHolder.getExpiryTime() <= now) {
expiredTasks++;
} else if (pendingTasks.containsKey(id)) {
overriddenTasks++;
} else {
pendingTasks.put(id, taskHolder);
processingOrder.addFirst(id);
}
}
if (isFull()) {
queueOverflows += reprocessQueue.size();
reprocessQueue.clear();
}
}
private void appendTaskHolder(TaskHolder<ID, T> taskHolder) {
if (isFull()) {
pendingTasks.remove(processingOrder.poll());
queueOverflows++;
}
TaskHolder<ID, T> previousTask = pendingTasks.put(taskHolder.getId(), taskHolder);
if (previousTask == null) {
processingOrder.add(taskHolder.getId());
} else {
overriddenTasks++;
}
}
void assignSingleItemWork() {
if (!processingOrder.isEmpty()) {
if (singleItemWorkRequests.tryAcquire(1)) {
long now = System.currentTimeMillis();
while (!processingOrder.isEmpty()) {
ID id = processingOrder.poll();
TaskHolder<ID, T> holder = pendingTasks.remove(id);
if (holder.getExpiryTime() > now) {
singleItemWorkQueue.add(holder);
return;
}
expiredTasks++;
}
singleItemWorkRequests.release();
}
}
}
void assignBatchWork() {
if (hasEnoughTasksForNextBatch()) {
if (batchWorkRequests.tryAcquire(1)) {
long now = System.currentTimeMillis();
int len = Math.min(maxBatchingSize, processingOrder.size());
List<TaskHolder<ID, T>> holders = new ArrayList<>(len);
while (holders.size() < len && !processingOrder.isEmpty()) {
ID id = processingOrder.poll();
TaskHolder<ID, T> holder = pendingTasks.remove(id);
if (holder.getExpiryTime() > now) {
holders.add(holder);
} else {
expiredTasks++;
}
}
if (holders.isEmpty()) {
batchWorkRequests.release();
} else {
batchSizeMetric.record(holders.size(), TimeUnit.MILLISECONDS);
batchWorkQueue.add(holders);
}
}
}
}
private boolean hasEnoughTasksForNextBatch() {
if (processingOrder.isEmpty()) {
return false;
}
if (pendingTasks.size() >= maxBufferSize) {
return true;
}
TaskHolder<ID, T> nextHolder = pendingTasks.get(processingOrder.peek());
long delay = System.currentTimeMillis() - nextHolder.getSubmitTimestamp();
return delay >= maxBatchingDelay;
}
}
}
| 6,934 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TaskDispatchers.java | package com.netflix.eureka.util.batcher;
/**
* See {@link TaskDispatcher} for an overview.
*
* @author Tomasz Bak
*/
public class TaskDispatchers {
public static <ID, T> TaskDispatcher<ID, T> createNonBatchingTaskDispatcher(String id,
int maxBufferSize,
int workerCount,
long maxBatchingDelay,
long congestionRetryDelayMs,
long networkFailureRetryMs,
TaskProcessor<T> taskProcessor) {
final AcceptorExecutor<ID, T> acceptorExecutor = new AcceptorExecutor<>(
id, maxBufferSize, 1, maxBatchingDelay, congestionRetryDelayMs, networkFailureRetryMs
);
final TaskExecutors<ID, T> taskExecutor = TaskExecutors.singleItemExecutors(id, workerCount, taskProcessor, acceptorExecutor);
return new TaskDispatcher<ID, T>() {
@Override
public void process(ID id, T task, long expiryTime) {
acceptorExecutor.process(id, task, expiryTime);
}
@Override
public void shutdown() {
acceptorExecutor.shutdown();
taskExecutor.shutdown();
}
};
}
public static <ID, T> TaskDispatcher<ID, T> createBatchingTaskDispatcher(String id,
int maxBufferSize,
int workloadSize,
int workerCount,
long maxBatchingDelay,
long congestionRetryDelayMs,
long networkFailureRetryMs,
TaskProcessor<T> taskProcessor) {
final AcceptorExecutor<ID, T> acceptorExecutor = new AcceptorExecutor<>(
id, maxBufferSize, workloadSize, maxBatchingDelay, congestionRetryDelayMs, networkFailureRetryMs
);
final TaskExecutors<ID, T> taskExecutor = TaskExecutors.batchExecutors(id, workerCount, taskProcessor, acceptorExecutor);
return new TaskDispatcher<ID, T>() {
@Override
public void process(ID id, T task, long expiryTime) {
acceptorExecutor.process(id, task, expiryTime);
}
@Override
public void shutdown() {
acceptorExecutor.shutdown();
taskExecutor.shutdown();
}
};
}
}
| 6,935 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TaskHolder.java | package com.netflix.eureka.util.batcher;
/**
* @author Tomasz Bak
*/
class TaskHolder<ID, T> {
private final ID id;
private final T task;
private final long expiryTime;
private final long submitTimestamp;
TaskHolder(ID id, T task, long expiryTime) {
this.id = id;
this.expiryTime = expiryTime;
this.task = task;
this.submitTimestamp = System.currentTimeMillis();
}
public ID getId() {
return id;
}
public T getTask() {
return task;
}
public long getExpiryTime() {
return expiryTime;
}
public long getSubmitTimestamp() {
return submitTimestamp;
}
}
| 6,936 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TaskExecutors.java | package com.netflix.eureka.util.batcher;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.netflix.eureka.util.batcher.TaskProcessor.ProcessingResult;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.StatsTimer;
import com.netflix.servo.stats.StatsConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.Names.METRIC_REPLICATION_PREFIX;
/**
* {@link TaskExecutors} instance holds a number of worker threads that cooperate with {@link AcceptorExecutor}.
* Each worker sends a job request to {@link AcceptorExecutor} whenever it is available, and processes it once
* provided with a task(s).
*
* @author Tomasz Bak
*/
class TaskExecutors<ID, T> {
private static final Logger logger = LoggerFactory.getLogger(TaskExecutors.class);
private static final Map<String, TaskExecutorMetrics> registeredMonitors = new HashMap<>();
private final AtomicBoolean isShutdown;
private final List<Thread> workerThreads;
TaskExecutors(WorkerRunnableFactory<ID, T> workerRunnableFactory, int workerCount, AtomicBoolean isShutdown) {
this.isShutdown = isShutdown;
this.workerThreads = new ArrayList<>();
ThreadGroup threadGroup = new ThreadGroup("eurekaTaskExecutors");
for (int i = 0; i < workerCount; i++) {
WorkerRunnable<ID, T> runnable = workerRunnableFactory.create(i);
Thread workerThread = new Thread(threadGroup, runnable, runnable.getWorkerName());
workerThreads.add(workerThread);
workerThread.setDaemon(true);
workerThread.start();
}
}
void shutdown() {
if (isShutdown.compareAndSet(false, true)) {
for (Thread workerThread : workerThreads) {
workerThread.interrupt();
}
registeredMonitors.forEach(Monitors::unregisterObject);
}
}
static <ID, T> TaskExecutors<ID, T> singleItemExecutors(final String name,
int workerCount,
final TaskProcessor<T> processor,
final AcceptorExecutor<ID, T> acceptorExecutor) {
final AtomicBoolean isShutdown = new AtomicBoolean();
final TaskExecutorMetrics metrics = new TaskExecutorMetrics(name);
registeredMonitors.put(name, metrics);
return new TaskExecutors<>(idx -> new SingleTaskWorkerRunnable<>("TaskNonBatchingWorker-" + name + '-' + idx, isShutdown, metrics, processor, acceptorExecutor), workerCount, isShutdown);
}
static <ID, T> TaskExecutors<ID, T> batchExecutors(final String name,
int workerCount,
final TaskProcessor<T> processor,
final AcceptorExecutor<ID, T> acceptorExecutor) {
final AtomicBoolean isShutdown = new AtomicBoolean();
final TaskExecutorMetrics metrics = new TaskExecutorMetrics(name);
registeredMonitors.put(name, metrics);
return new TaskExecutors<>(idx -> new BatchWorkerRunnable<>("TaskBatchingWorker-" + name + '-' + idx, isShutdown, metrics, processor, acceptorExecutor), workerCount, isShutdown);
}
static class TaskExecutorMetrics {
@Monitor(name = METRIC_REPLICATION_PREFIX + "numberOfSuccessfulExecutions", description = "Number of successful task executions", type = DataSourceType.COUNTER)
volatile long numberOfSuccessfulExecutions;
@Monitor(name = METRIC_REPLICATION_PREFIX + "numberOfTransientErrors", description = "Number of transient task execution errors", type = DataSourceType.COUNTER)
volatile long numberOfTransientError;
@Monitor(name = METRIC_REPLICATION_PREFIX + "numberOfPermanentErrors", description = "Number of permanent task execution errors", type = DataSourceType.COUNTER)
volatile long numberOfPermanentError;
@Monitor(name = METRIC_REPLICATION_PREFIX + "numberOfCongestionIssues", description = "Number of congestion issues during task execution", type = DataSourceType.COUNTER)
volatile long numberOfCongestionIssues;
final StatsTimer taskWaitingTimeForProcessing;
TaskExecutorMetrics(String id) {
final double[] percentiles = {50.0, 95.0, 99.0, 99.5};
final StatsConfig statsConfig = new StatsConfig.Builder()
.withSampleSize(1000)
.withPercentiles(percentiles)
.withPublishStdDev(true)
.build();
final MonitorConfig config = MonitorConfig.builder(METRIC_REPLICATION_PREFIX + "executionTime").build();
taskWaitingTimeForProcessing = new StatsTimer(config, statsConfig);
try {
Monitors.registerObject(id, this);
} catch (Throwable e) {
logger.warn("Cannot register servo monitor for this object", e);
}
}
void registerTaskResult(ProcessingResult result, int count) {
switch (result) {
case Success:
numberOfSuccessfulExecutions += count;
break;
case TransientError:
numberOfTransientError += count;
break;
case PermanentError:
numberOfPermanentError += count;
break;
case Congestion:
numberOfCongestionIssues += count;
break;
}
}
<ID, T> void registerExpiryTime(TaskHolder<ID, T> holder) {
taskWaitingTimeForProcessing.record(System.currentTimeMillis() - holder.getSubmitTimestamp(), TimeUnit.MILLISECONDS);
}
<ID, T> void registerExpiryTimes(List<TaskHolder<ID, T>> holders) {
long now = System.currentTimeMillis();
for (TaskHolder<ID, T> holder : holders) {
taskWaitingTimeForProcessing.record(now - holder.getSubmitTimestamp(), TimeUnit.MILLISECONDS);
}
}
}
interface WorkerRunnableFactory<ID, T> {
WorkerRunnable<ID, T> create(int idx);
}
abstract static class WorkerRunnable<ID, T> implements Runnable {
final String workerName;
final AtomicBoolean isShutdown;
final TaskExecutorMetrics metrics;
final TaskProcessor<T> processor;
final AcceptorExecutor<ID, T> taskDispatcher;
WorkerRunnable(String workerName,
AtomicBoolean isShutdown,
TaskExecutorMetrics metrics,
TaskProcessor<T> processor,
AcceptorExecutor<ID, T> taskDispatcher) {
this.workerName = workerName;
this.isShutdown = isShutdown;
this.metrics = metrics;
this.processor = processor;
this.taskDispatcher = taskDispatcher;
}
String getWorkerName() {
return workerName;
}
}
static class BatchWorkerRunnable<ID, T> extends WorkerRunnable<ID, T> {
BatchWorkerRunnable(String workerName,
AtomicBoolean isShutdown,
TaskExecutorMetrics metrics,
TaskProcessor<T> processor,
AcceptorExecutor<ID, T> acceptorExecutor) {
super(workerName, isShutdown, metrics, processor, acceptorExecutor);
}
@Override
public void run() {
try {
while (!isShutdown.get()) {
List<TaskHolder<ID, T>> holders = getWork();
metrics.registerExpiryTimes(holders);
List<T> tasks = getTasksOf(holders);
ProcessingResult result = processor.process(tasks);
switch (result) {
case Success:
break;
case Congestion:
case TransientError:
taskDispatcher.reprocess(holders, result);
break;
case PermanentError:
logger.warn("Discarding {} tasks of {} due to permanent error", holders.size(), workerName);
}
metrics.registerTaskResult(result, tasks.size());
}
} catch (InterruptedException e) {
// Ignore
} catch (Throwable e) {
// Safe-guard, so we never exit this loop in an uncontrolled way.
logger.warn("Discovery WorkerThread error", e);
}
}
private List<TaskHolder<ID, T>> getWork() throws InterruptedException {
BlockingQueue<List<TaskHolder<ID, T>>> workQueue = taskDispatcher.requestWorkItems();
List<TaskHolder<ID, T>> result;
do {
result = workQueue.poll(1, TimeUnit.SECONDS);
} while (!isShutdown.get() && result == null);
return (result == null) ? new ArrayList<>() : result;
}
private List<T> getTasksOf(List<TaskHolder<ID, T>> holders) {
List<T> tasks = new ArrayList<>(holders.size());
for (TaskHolder<ID, T> holder : holders) {
tasks.add(holder.getTask());
}
return tasks;
}
}
static class SingleTaskWorkerRunnable<ID, T> extends WorkerRunnable<ID, T> {
SingleTaskWorkerRunnable(String workerName,
AtomicBoolean isShutdown,
TaskExecutorMetrics metrics,
TaskProcessor<T> processor,
AcceptorExecutor<ID, T> acceptorExecutor) {
super(workerName, isShutdown, metrics, processor, acceptorExecutor);
}
@Override
public void run() {
try {
while (!isShutdown.get()) {
BlockingQueue<TaskHolder<ID, T>> workQueue = taskDispatcher.requestWorkItem();
TaskHolder<ID, T> taskHolder;
while ((taskHolder = workQueue.poll(1, TimeUnit.SECONDS)) == null) {
if (isShutdown.get()) {
return;
}
}
metrics.registerExpiryTime(taskHolder);
if (taskHolder != null) {
ProcessingResult result = processor.process(taskHolder.getTask());
switch (result) {
case Success:
break;
case Congestion:
case TransientError:
taskDispatcher.reprocess(taskHolder, result);
break;
case PermanentError:
logger.warn("Discarding a task of {} due to permanent error", workerName);
}
metrics.registerTaskResult(result, 1);
}
}
} catch (InterruptedException e) {
// Ignore
} catch (Throwable e) {
// Safe-guard, so we never exit this loop in an uncontrolled way.
logger.warn("Discovery WorkerThread error", e);
}
}
}
}
| 6,937 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TaskProcessor.java | package com.netflix.eureka.util.batcher;
import java.util.List;
/**
* An interface to be implemented by clients for task execution.
*
* @author Tomasz Bak
*/
public interface TaskProcessor<T> {
/**
* A processed task/task list ends up in one of the following states:
* <ul>
* <li>{@code Success} processing finished successfully</li>
* <li>{@code TransientError} processing failed, but shall be retried later</li>
* <li>{@code PermanentError} processing failed, and is non recoverable</li>
* </ul>
*/
enum ProcessingResult {
Success, Congestion, TransientError, PermanentError
}
/**
* In non-batched mode a single task is processed at a time.
*/
ProcessingResult process(T task);
/**
* For batched mode a collection of tasks is run at a time. The result is provided for the aggregated result,
* and all tasks are handled in the same way according to what is returned (for example are rescheduled, if the
* error is transient).
*/
ProcessingResult process(List<T> tasks);
}
| 6,938 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/util/batcher/TrafficShaper.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.util.batcher;
import com.netflix.eureka.util.batcher.TaskProcessor.ProcessingResult;
/**
* {@link TrafficShaper} provides admission control policy prior to dispatching tasks to workers.
* It reacts to events coming via reprocess requests (transient failures, congestion), and delays the processing
* depending on this feedback.
*
* @author Tomasz Bak
*/
class TrafficShaper {
/**
* Upper bound on delay provided by configuration.
*/
private static final long MAX_DELAY = 30 * 1000;
private final long congestionRetryDelayMs;
private final long networkFailureRetryMs;
private volatile long lastCongestionError;
private volatile long lastNetworkFailure;
TrafficShaper(long congestionRetryDelayMs, long networkFailureRetryMs) {
this.congestionRetryDelayMs = Math.min(MAX_DELAY, congestionRetryDelayMs);
this.networkFailureRetryMs = Math.min(MAX_DELAY, networkFailureRetryMs);
}
void registerFailure(ProcessingResult processingResult) {
if (processingResult == ProcessingResult.Congestion) {
lastCongestionError = System.currentTimeMillis();
} else if (processingResult == ProcessingResult.TransientError) {
lastNetworkFailure = System.currentTimeMillis();
}
}
long transmissionDelay() {
if (lastCongestionError == -1 && lastNetworkFailure == -1) {
return 0;
}
long now = System.currentTimeMillis();
if (lastCongestionError != -1) {
long congestionDelay = now - lastCongestionError;
if (congestionDelay >= 0 && congestionDelay < congestionRetryDelayMs) {
return congestionRetryDelayMs - congestionDelay;
}
lastCongestionError = -1;
}
if (lastNetworkFailure != -1) {
long failureDelay = now - lastNetworkFailure;
if (failureDelay >= 0 && failureDelay < networkFailureRetryMs) {
return networkFailureRetryMs - failureDelay;
}
lastNetworkFailure = -1;
}
return 0;
}
}
| 6,939 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/DefaultServerCodecs.java | package com.netflix.eureka.resources;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.discovery.converters.wrappers.CodecWrapper;
import com.netflix.discovery.converters.wrappers.CodecWrappers;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.Key;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* @author David Liu
*/
@Singleton
public class DefaultServerCodecs implements ServerCodecs {
protected final CodecWrapper fullJsonCodec;
protected final CodecWrapper compactJsonCodec;
protected final CodecWrapper fullXmlCodec;
protected final CodecWrapper compactXmlCodec;
private static CodecWrapper getFullJson(EurekaServerConfig serverConfig) {
CodecWrapper codec = CodecWrappers.getCodec(serverConfig.getJsonCodecName());
return codec == null ? CodecWrappers.getCodec(CodecWrappers.LegacyJacksonJson.class) : codec;
}
private static CodecWrapper getFullXml(EurekaServerConfig serverConfig) {
CodecWrapper codec = CodecWrappers.getCodec(serverConfig.getXmlCodecName());
return codec == null ? CodecWrappers.getCodec(CodecWrappers.XStreamXml.class) : codec;
}
@Inject
public DefaultServerCodecs(EurekaServerConfig serverConfig) {
this (
getFullJson(serverConfig),
CodecWrappers.getCodec(CodecWrappers.JacksonJsonMini.class),
getFullXml(serverConfig),
CodecWrappers.getCodec(CodecWrappers.JacksonXmlMini.class)
);
}
protected DefaultServerCodecs(CodecWrapper fullJsonCodec,
CodecWrapper compactJsonCodec,
CodecWrapper fullXmlCodec,
CodecWrapper compactXmlCodec) {
this.fullJsonCodec = fullJsonCodec;
this.compactJsonCodec = compactJsonCodec;
this.fullXmlCodec = fullXmlCodec;
this.compactXmlCodec = compactXmlCodec;
}
@Override
public CodecWrapper getFullJsonCodec() {
return fullJsonCodec;
}
@Override
public CodecWrapper getCompactJsonCodec() {
return compactJsonCodec;
}
@Override
public CodecWrapper getFullXmlCodec() {
return fullXmlCodec;
}
@Override
public CodecWrapper getCompactXmlCodecr() {
return compactXmlCodec;
}
@Override
public EncoderWrapper getEncoder(Key.KeyType keyType, boolean compact) {
switch (keyType) {
case JSON:
return compact ? compactJsonCodec : fullJsonCodec;
case XML:
default:
return compact ? compactXmlCodec : fullXmlCodec;
}
}
@Override
public EncoderWrapper getEncoder(Key.KeyType keyType, EurekaAccept eurekaAccept) {
switch (eurekaAccept) {
case compact:
return getEncoder(keyType, true);
case full:
default:
return getEncoder(keyType, false);
}
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
protected CodecWrapper fullJsonCodec;
protected CodecWrapper compactJsonCodec;
protected CodecWrapper fullXmlCodec;
protected CodecWrapper compactXmlCodec;
protected Builder() {}
public Builder withFullJsonCodec(CodecWrapper fullJsonCodec) {
this.fullJsonCodec = fullJsonCodec;
return this;
}
public Builder withCompactJsonCodec(CodecWrapper compactJsonCodec) {
this.compactJsonCodec = compactJsonCodec;
return this;
}
public Builder withFullXmlCodec(CodecWrapper fullXmlCodec) {
this.fullXmlCodec = fullXmlCodec;
return this;
}
public Builder withCompactXmlCodec(CodecWrapper compactXmlEncoder) {
this.compactXmlCodec = compactXmlEncoder;
return this;
}
public Builder withEurekaServerConfig(EurekaServerConfig config) {
fullJsonCodec = CodecWrappers.getCodec(config.getJsonCodecName());
fullXmlCodec = CodecWrappers.getCodec(config.getXmlCodecName());
return this;
}
public ServerCodecs build() {
if (fullJsonCodec == null) {
fullJsonCodec = CodecWrappers.getCodec(CodecWrappers.LegacyJacksonJson.class);
}
if (compactJsonCodec == null) {
compactJsonCodec = CodecWrappers.getCodec(CodecWrappers.JacksonJsonMini.class);
}
if (fullXmlCodec == null) {
fullXmlCodec = CodecWrappers.getCodec(CodecWrappers.XStreamXml.class);
}
if (compactXmlCodec == null) {
compactXmlCodec = CodecWrappers.getCodec(CodecWrappers.JacksonXmlMini.class);
}
return new DefaultServerCodecs(
fullJsonCodec,
compactJsonCodec,
fullXmlCodec,
compactXmlCodec
);
}
}
}
| 6,940 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/ServerInfoResource.java | package com.netflix.eureka.resources;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
import java.util.Map;
/**
* @author David Liu
*/
@Produces("application/json")
@Path("/serverinfo")
public class ServerInfoResource {
private final PeerAwareInstanceRegistry registry;
@Inject
ServerInfoResource(EurekaServerContext server) {
this.registry = server.getRegistry();
}
public ServerInfoResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
@GET
@Path("statusoverrides")
public Response getOverrides() throws Exception {
Map<String, InstanceInfo.InstanceStatus> result = registry.overriddenInstanceStatusesSnapshot();
ObjectMapper objectMapper = new ObjectMapper();
String responseStr = objectMapper.writeValueAsString(result);
return Response.ok(responseStr).build();
}
}
| 6,941 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/VIPResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.registry.Key;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
/**
* A <em>jersey</em> resource for retrieving all instances with a given VIP address.
*
* @author Karthik Ranganathan
*
*/
@Path("/{version}/vips")
@Produces({"application/xml", "application/json"})
public class VIPResource extends AbstractVIPResource {
@Inject
VIPResource(EurekaServerContext server) {
super(server);
}
public VIPResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
@GET
@Path("{vipAddress}")
public Response statusUpdate(@PathParam("version") String version,
@PathParam("vipAddress") String vipAddress,
@HeaderParam("Accept") final String acceptHeader,
@HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept) {
return getVipResponse(version, vipAddress, acceptHeader,
EurekaAccept.fromString(eurekaAccept), Key.EntityType.VIP);
}
}
| 6,942 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/StatusResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import java.text.SimpleDateFormat;
import java.util.Date;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.util.StatusInfo;
import com.netflix.eureka.util.StatusUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An utility class for exposing information about peer nodes.
*
* @author Karthik Ranganathan, Greg Kim
*/
@Path("/{version}/status")
@Produces({"application/xml", "application/json"})
public class StatusResource {
private static final Logger logger = LoggerFactory.getLogger(StatusResource.class);
private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss Z";
private final StatusUtil statusUtil;
@Inject
StatusResource(EurekaServerContext server) {
this.statusUtil = new StatusUtil(server);
}
public StatusResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
@GET
public StatusInfo getStatusInfo() {
return statusUtil.getStatusInfo();
}
public static String getCurrentTimeAsString() {
SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
return format.format(new Date());
}
}
| 6,943 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/SecureVIPResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.registry.Key;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
/**
* A <em>jersey</em> resource for retrieving all instances with a given secure VIP address.
*
* @author Karthik Ranganathan
*
*/
@Path("/{version}/svips")
@Produces({"application/xml", "application/json"})
public class SecureVIPResource extends AbstractVIPResource {
@Inject
SecureVIPResource(EurekaServerContext server) {
super(server);
}
public SecureVIPResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
@GET
@Path("{svipAddress}")
public Response statusUpdate(@PathParam("version") String version,
@PathParam("svipAddress") String svipAddress,
@HeaderParam("Accept") final String acceptHeader,
@HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept) {
return getVipResponse(version, svipAddress, acceptHeader,
EurekaAccept.fromString(eurekaAccept), Key.EntityType.SVIP);
}
}
| 6,944 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/InstanceResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.UriInfo;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.cluster.PeerEurekaNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <em>jersey</em> resource that handles operations for a particular instance.
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Produces({"application/xml", "application/json"})
public class InstanceResource {
private static final Logger logger = LoggerFactory
.getLogger(InstanceResource.class);
private final PeerAwareInstanceRegistry registry;
private final EurekaServerConfig serverConfig;
private final String id;
private final ApplicationResource app;
InstanceResource(ApplicationResource app, String id, EurekaServerConfig serverConfig, PeerAwareInstanceRegistry registry) {
this.app = app;
this.id = id;
this.serverConfig = serverConfig;
this.registry = registry;
}
/**
* Get requests returns the information about the instance's
* {@link InstanceInfo}.
*
* @return response containing information about the the instance's
* {@link InstanceInfo}.
*/
@GET
public Response getInstanceInfo() {
InstanceInfo appInfo = registry
.getInstanceByAppAndId(app.getName(), id);
if (appInfo != null) {
logger.debug("Found: {} - {}", app.getName(), id);
return Response.ok(appInfo).build();
} else {
logger.debug("Not Found: {} - {}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
}
/**
* A put request for renewing lease from a client instance.
*
* @param isReplication
* a header parameter containing information whether this is
* replicated from other nodes.
* @param overriddenStatus
* overridden status if any.
* @param status
* the {@link InstanceStatus} of the instance.
* @param lastDirtyTimestamp
* last timestamp when this instance information was updated.
* @return response indicating whether the operation was a success or
* failure.
*/
@PUT
public Response renewLease(
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication,
@QueryParam("overriddenstatus") String overriddenStatus,
@QueryParam("status") String status,
@QueryParam("lastDirtyTimestamp") String lastDirtyTimestamp) {
boolean isFromReplicaNode = "true".equals(isReplication);
boolean isSuccess = registry.renew(app.getName(), id, isFromReplicaNode);
// Not found in the registry, immediately ask for a register
if (!isSuccess) {
logger.warn("Not Found (Renew): {} - {}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
// Check if we need to sync based on dirty time stamp, the client
// instance might have changed some value
Response response;
if (lastDirtyTimestamp != null && serverConfig.shouldSyncWhenTimestampDiffers()) {
response = this.validateDirtyTimestamp(Long.valueOf(lastDirtyTimestamp), isFromReplicaNode);
// Store the overridden status since the validation found out the node that replicates wins
if (response.getStatus() == Response.Status.NOT_FOUND.getStatusCode()
&& (overriddenStatus != null)
&& !(InstanceStatus.UNKNOWN.name().equals(overriddenStatus))
&& isFromReplicaNode) {
registry.storeOverriddenStatusIfRequired(app.getAppName(), id, InstanceStatus.valueOf(overriddenStatus));
}
} else {
response = Response.ok().build();
}
logger.debug("Found (Renew): {} - {}; reply status={}", app.getName(), id, response.getStatus());
return response;
}
/**
* Handles {@link InstanceStatus} updates.
*
* <p>
* The status updates are normally done for administrative purposes to
* change the instance status between {@link InstanceStatus#UP} and
* {@link InstanceStatus#OUT_OF_SERVICE} to select or remove instances for
* receiving traffic.
* </p>
*
* @param newStatus
* the new status of the instance.
* @param isReplication
* a header parameter containing information whether this is
* replicated from other nodes.
* @param lastDirtyTimestamp
* last timestamp when this instance information was updated.
* @return response indicating whether the operation was a success or
* failure.
*/
@PUT
@Path("status")
public Response statusUpdate(
@QueryParam("value") String newStatus,
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication,
@QueryParam("lastDirtyTimestamp") String lastDirtyTimestamp) {
try {
if (registry.getInstanceByAppAndId(app.getName(), id) == null) {
logger.warn("Instance not found: {}/{}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
boolean isSuccess = registry.statusUpdate(app.getName(), id,
InstanceStatus.valueOf(newStatus), lastDirtyTimestamp,
"true".equals(isReplication));
if (isSuccess) {
logger.info("Status updated: {} - {} - {}", app.getName(), id, newStatus);
return Response.ok().build();
} else {
logger.warn("Unable to update status: {} - {} - {}", app.getName(), id, newStatus);
return Response.serverError().build();
}
} catch (Throwable e) {
logger.error("Error updating instance {} for status {}", id,
newStatus);
return Response.serverError().build();
}
}
/**
* Removes status override for an instance, set with
* {@link #statusUpdate(String, String, String)}.
*
* @param isReplication
* a header parameter containing information whether this is
* replicated from other nodes.
* @param lastDirtyTimestamp
* last timestamp when this instance information was updated.
* @return response indicating whether the operation was a success or
* failure.
*/
@DELETE
@Path("status")
public Response deleteStatusUpdate(
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication,
@QueryParam("value") String newStatusValue,
@QueryParam("lastDirtyTimestamp") String lastDirtyTimestamp) {
try {
if (registry.getInstanceByAppAndId(app.getName(), id) == null) {
logger.warn("Instance not found: {}/{}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
InstanceStatus newStatus = newStatusValue == null ? InstanceStatus.UNKNOWN : InstanceStatus.valueOf(newStatusValue);
boolean isSuccess = registry.deleteStatusOverride(app.getName(), id,
newStatus, lastDirtyTimestamp, "true".equals(isReplication));
if (isSuccess) {
logger.info("Status override removed: {} - {}", app.getName(), id);
return Response.ok().build();
} else {
logger.warn("Unable to remove status override: {} - {}", app.getName(), id);
return Response.serverError().build();
}
} catch (Throwable e) {
logger.error("Error removing instance's {} status override", id);
return Response.serverError().build();
}
}
/**
* Updates user-specific metadata information. If the key is already available, its value will be overwritten.
* If not, it will be added.
* @param uriInfo - URI information generated by jersey.
* @return response indicating whether the operation was a success or
* failure.
*/
@PUT
@Path("metadata")
public Response updateMetadata(@Context UriInfo uriInfo) {
try {
InstanceInfo instanceInfo = registry.getInstanceByAppAndId(app.getName(), id);
// ReplicationInstance information is not found, generate an error
if (instanceInfo == null) {
logger.warn("Cannot find instance while updating metadata for instance {}/{}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
MultivaluedMap<String, String> queryParams = uriInfo.getQueryParameters();
Set<Entry<String, List<String>>> entrySet = queryParams.entrySet();
Map<String, String> metadataMap = instanceInfo.getMetadata();
// Metadata map is empty - create a new map
if (Collections.emptyMap().getClass().equals(metadataMap.getClass())) {
metadataMap = new ConcurrentHashMap<>();
InstanceInfo.Builder builder = new InstanceInfo.Builder(instanceInfo);
builder.setMetadata(metadataMap);
instanceInfo = builder.build();
}
// Add all the user supplied entries to the map
for (Entry<String, List<String>> entry : entrySet) {
metadataMap.put(entry.getKey(), entry.getValue().get(0));
}
registry.register(instanceInfo, false);
return Response.ok().build();
} catch (Throwable e) {
logger.error("Error updating metadata for instance {}", id, e);
return Response.serverError().build();
}
}
/**
* Handles cancellation of leases for this particular instance.
*
* @param isReplication
* a header parameter containing information whether this is
* replicated from other nodes.
* @return response indicating whether the operation was a success or
* failure.
*/
@DELETE
public Response cancelLease(
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication) {
try {
boolean isSuccess = registry.cancel(app.getName(), id,
"true".equals(isReplication));
if (isSuccess) {
logger.debug("Found (Cancel): {} - {}", app.getName(), id);
return Response.ok().build();
} else {
logger.info("Not Found (Cancel): {} - {}", app.getName(), id);
return Response.status(Status.NOT_FOUND).build();
}
} catch (Throwable e) {
logger.error("Error (cancel): {} - {}", app.getName(), id, e);
return Response.serverError().build();
}
}
private Response validateDirtyTimestamp(Long lastDirtyTimestamp,
boolean isReplication) {
InstanceInfo appInfo = registry.getInstanceByAppAndId(app.getName(), id, false);
if (appInfo != null) {
if ((lastDirtyTimestamp != null) && (!lastDirtyTimestamp.equals(appInfo.getLastDirtyTimestamp()))) {
Object[] args = {id, appInfo.getLastDirtyTimestamp(), lastDirtyTimestamp, isReplication};
if (lastDirtyTimestamp > appInfo.getLastDirtyTimestamp()) {
logger.debug(
"Time to sync, since the last dirty timestamp differs -"
+ " ReplicationInstance id : {},Registry : {} Incoming: {} Replication: {}",
args);
return Response.status(Status.NOT_FOUND).build();
} else if (appInfo.getLastDirtyTimestamp() > lastDirtyTimestamp) {
// In the case of replication, send the current instance info in the registry for the
// replicating node to sync itself with this one.
if (isReplication) {
logger.debug(
"Time to sync, since the last dirty timestamp differs -"
+ " ReplicationInstance id : {},Registry : {} Incoming: {} Replication: {}",
args);
return Response.status(Status.CONFLICT).entity(appInfo).build();
} else {
return Response.ok().build();
}
}
}
}
return Response.ok().build();
}
}
| 6,945 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/PeerReplicationResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.inject.Inject;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.cluster.protocol.ReplicationInstance;
import com.netflix.eureka.cluster.protocol.ReplicationInstanceResponse;
import com.netflix.eureka.cluster.protocol.ReplicationInstanceResponse.Builder;
import com.netflix.eureka.cluster.protocol.ReplicationList;
import com.netflix.eureka.cluster.protocol.ReplicationListResponse;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <em>jersey</em> resource that handles requests for replication purposes.
*
* @author Karthik Ranganathan
*
*/
@Path("/{version}/peerreplication")
@Produces({"application/xml", "application/json"})
public class PeerReplicationResource {
private static final Logger logger = LoggerFactory.getLogger(PeerReplicationResource.class);
private static final String REPLICATION = "true";
private final EurekaServerConfig serverConfig;
private final PeerAwareInstanceRegistry registry;
@Inject
PeerReplicationResource(EurekaServerContext server) {
this.serverConfig = server.getServerConfig();
this.registry = server.getRegistry();
}
public PeerReplicationResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
/**
* Process batched replication events from peer eureka nodes.
*
* <p>
* The batched events are delegated to underlying resources to generate a
* {@link ReplicationListResponse} containing the individual responses to the batched events
* </p>
*
* @param replicationList
* The List of replication events from peer eureka nodes
* @return A batched response containing the information about the responses of individual events
*/
@Path("batch")
@POST
public Response batchReplication(ReplicationList replicationList) {
try {
ReplicationListResponse batchResponse = new ReplicationListResponse();
for (ReplicationInstance instanceInfo : replicationList.getReplicationList()) {
try {
batchResponse.addResponse(dispatch(instanceInfo));
} catch (Exception e) {
batchResponse.addResponse(new ReplicationInstanceResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), null));
logger.error("{} request processing failed for batch item {}/{}",
instanceInfo.getAction(), instanceInfo.getAppName(), instanceInfo.getId(), e);
}
}
return Response.ok(batchResponse).build();
} catch (Throwable e) {
logger.error("Cannot execute batch Request", e);
return Response.status(Status.INTERNAL_SERVER_ERROR).build();
}
}
private ReplicationInstanceResponse dispatch(ReplicationInstance instanceInfo) {
ApplicationResource applicationResource = createApplicationResource(instanceInfo);
InstanceResource resource = createInstanceResource(instanceInfo, applicationResource);
String lastDirtyTimestamp = toString(instanceInfo.getLastDirtyTimestamp());
String overriddenStatus = toString(instanceInfo.getOverriddenStatus());
String instanceStatus = toString(instanceInfo.getStatus());
Builder singleResponseBuilder = new Builder();
switch (instanceInfo.getAction()) {
case Register:
singleResponseBuilder = handleRegister(instanceInfo, applicationResource);
break;
case Heartbeat:
singleResponseBuilder = handleHeartbeat(serverConfig, resource, lastDirtyTimestamp, overriddenStatus, instanceStatus);
break;
case Cancel:
singleResponseBuilder = handleCancel(resource);
break;
case StatusUpdate:
singleResponseBuilder = handleStatusUpdate(instanceInfo, resource);
break;
case DeleteStatusOverride:
singleResponseBuilder = handleDeleteStatusOverride(instanceInfo, resource);
break;
}
return singleResponseBuilder.build();
}
/* Visible for testing */ ApplicationResource createApplicationResource(ReplicationInstance instanceInfo) {
return new ApplicationResource(instanceInfo.getAppName(), serverConfig, registry);
}
/* Visible for testing */ InstanceResource createInstanceResource(ReplicationInstance instanceInfo,
ApplicationResource applicationResource) {
return new InstanceResource(applicationResource, instanceInfo.getId(), serverConfig, registry);
}
private static Builder handleRegister(ReplicationInstance instanceInfo, ApplicationResource applicationResource) {
applicationResource.addInstance(instanceInfo.getInstanceInfo(), REPLICATION);
return new Builder().setStatusCode(Status.OK.getStatusCode());
}
private static Builder handleCancel(InstanceResource resource) {
Response response = resource.cancelLease(REPLICATION);
return new Builder().setStatusCode(response.getStatus());
}
private static Builder handleHeartbeat(EurekaServerConfig config, InstanceResource resource, String lastDirtyTimestamp, String overriddenStatus, String instanceStatus) {
Response response = resource.renewLease(REPLICATION, overriddenStatus, instanceStatus, lastDirtyTimestamp);
int responseStatus = response.getStatus();
Builder responseBuilder = new Builder().setStatusCode(responseStatus);
if ("false".equals(config.getExperimental("bugfix.934"))) {
if (responseStatus == Status.OK.getStatusCode() && response.getEntity() != null) {
responseBuilder.setResponseEntity((InstanceInfo) response.getEntity());
}
} else {
if ((responseStatus == Status.OK.getStatusCode() || responseStatus == Status.CONFLICT.getStatusCode())
&& response.getEntity() != null) {
responseBuilder.setResponseEntity((InstanceInfo) response.getEntity());
}
}
return responseBuilder;
}
private static Builder handleStatusUpdate(ReplicationInstance instanceInfo, InstanceResource resource) {
Response response = resource.statusUpdate(instanceInfo.getStatus(), REPLICATION, toString(instanceInfo.getLastDirtyTimestamp()));
return new Builder().setStatusCode(response.getStatus());
}
private static Builder handleDeleteStatusOverride(ReplicationInstance instanceInfo, InstanceResource resource) {
Response response = resource.deleteStatusUpdate(REPLICATION, instanceInfo.getStatus(),
instanceInfo.getLastDirtyTimestamp().toString());
return new Builder().setStatusCode(response.getStatus());
}
private static <T> String toString(T value) {
if (value == null) {
return null;
}
return value.toString();
}
}
| 6,946 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/ApplicationsResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.UriInfo;
import java.util.Arrays;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.registry.AbstractInstanceRegistry;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.Version;
import com.netflix.eureka.registry.ResponseCache;
import com.netflix.eureka.registry.Key.KeyType;
import com.netflix.eureka.registry.ResponseCacheImpl;
import com.netflix.eureka.registry.Key;
import com.netflix.eureka.util.EurekaMonitors;
/**
* A <em>jersey</em> resource that handles request related to all
* {@link com.netflix.discovery.shared.Applications}.
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Path("/{version}/apps")
@Produces({"application/xml", "application/json"})
public class ApplicationsResource {
private static final String HEADER_ACCEPT = "Accept";
private static final String HEADER_ACCEPT_ENCODING = "Accept-Encoding";
private static final String HEADER_CONTENT_ENCODING = "Content-Encoding";
private static final String HEADER_CONTENT_TYPE = "Content-Type";
private static final String HEADER_GZIP_VALUE = "gzip";
private static final String HEADER_JSON_VALUE = "json";
private final EurekaServerConfig serverConfig;
private final PeerAwareInstanceRegistry registry;
private final ResponseCache responseCache;
@Inject
ApplicationsResource(EurekaServerContext eurekaServer) {
this.serverConfig = eurekaServer.getServerConfig();
this.registry = eurekaServer.getRegistry();
this.responseCache = registry.getResponseCache();
}
public ApplicationsResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
/**
* Gets information about a particular {@link com.netflix.discovery.shared.Application}.
*
* @param version
* the version of the request.
* @param appId
* the unique application identifier (which is the name) of the
* application.
* @return information about a particular application.
*/
@Path("{appId}")
public ApplicationResource getApplicationResource(
@PathParam("version") String version,
@PathParam("appId") String appId) {
CurrentRequestVersion.set(Version.toEnum(version));
try {
return new ApplicationResource(appId, serverConfig, registry);
} finally {
CurrentRequestVersion.remove();
}
}
/**
* Get information about all {@link com.netflix.discovery.shared.Applications}.
*
* @param version the version of the request.
* @param acceptHeader the accept header to indicate whether to serve JSON or XML data.
* @param acceptEncoding the accept header to indicate whether to serve compressed or uncompressed data.
* @param eurekaAccept an eureka accept extension, see {@link com.netflix.appinfo.EurekaAccept}
* @param uriInfo the {@link java.net.URI} information of the request made.
* @param regionsStr A comma separated list of remote regions from which the instances will also be returned.
* The applications returned from the remote region can be limited to the applications
* returned by {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)}
*
* @return a response containing information about all {@link com.netflix.discovery.shared.Applications}
* from the {@link AbstractInstanceRegistry}.
*/
@GET
public Response getContainers(@PathParam("version") String version,
@HeaderParam(HEADER_ACCEPT) String acceptHeader,
@HeaderParam(HEADER_ACCEPT_ENCODING) String acceptEncoding,
@HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept,
@Context UriInfo uriInfo,
@Nullable @QueryParam("regions") String regionsStr) {
boolean isRemoteRegionRequested = null != regionsStr && !regionsStr.isEmpty();
String[] regions = null;
if (!isRemoteRegionRequested) {
EurekaMonitors.GET_ALL.increment();
} else {
regions = regionsStr.toLowerCase().split(",");
Arrays.sort(regions); // So we don't have different caches for same regions queried in different order.
EurekaMonitors.GET_ALL_WITH_REMOTE_REGIONS.increment();
}
// Check if the server allows the access to the registry. The server can
// restrict access if it is not
// ready to serve traffic depending on various reasons.
if (!registry.shouldAllowAccess(isRemoteRegionRequested)) {
return Response.status(Status.FORBIDDEN).build();
}
CurrentRequestVersion.set(Version.toEnum(version));
KeyType keyType = Key.KeyType.JSON;
String returnMediaType = MediaType.APPLICATION_JSON;
if (acceptHeader == null || !acceptHeader.contains(HEADER_JSON_VALUE)) {
keyType = Key.KeyType.XML;
returnMediaType = MediaType.APPLICATION_XML;
}
Key cacheKey = new Key(Key.EntityType.Application,
ResponseCacheImpl.ALL_APPS,
keyType, CurrentRequestVersion.get(), EurekaAccept.fromString(eurekaAccept), regions
);
Response response;
if (acceptEncoding != null && acceptEncoding.contains(HEADER_GZIP_VALUE)) {
response = Response.ok(responseCache.getGZIP(cacheKey))
.header(HEADER_CONTENT_ENCODING, HEADER_GZIP_VALUE)
.header(HEADER_CONTENT_TYPE, returnMediaType)
.build();
} else {
response = Response.ok(responseCache.get(cacheKey))
.build();
}
CurrentRequestVersion.remove();
return response;
}
/**
* Get information about all delta changes in {@link com.netflix.discovery.shared.Applications}.
*
* <p>
* The delta changes represent the registry information change for a period
* as configured by
* {@link EurekaServerConfig#getRetentionTimeInMSInDeltaQueue()}. The
* changes that can happen in a registry include
* <em>Registrations,Cancels,Status Changes and Expirations</em>. Normally
* the changes to the registry are infrequent and hence getting just the
* delta will be much more efficient than getting the complete registry.
* </p>
*
* <p>
* Since the delta information is cached over a period of time, the requests
* may return the same data multiple times within the window configured by
* {@link EurekaServerConfig#getRetentionTimeInMSInDeltaQueue()}.The clients
* are expected to handle this duplicate information.
* <p>
*
* @param version the version of the request.
* @param acceptHeader the accept header to indicate whether to serve JSON or XML data.
* @param acceptEncoding the accept header to indicate whether to serve compressed or uncompressed data.
* @param eurekaAccept an eureka accept extension, see {@link com.netflix.appinfo.EurekaAccept}
* @param uriInfo the {@link java.net.URI} information of the request made.
* @return response containing the delta information of the
* {@link AbstractInstanceRegistry}.
*/
@Path("delta")
@GET
public Response getContainerDifferential(
@PathParam("version") String version,
@HeaderParam(HEADER_ACCEPT) String acceptHeader,
@HeaderParam(HEADER_ACCEPT_ENCODING) String acceptEncoding,
@HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept,
@Context UriInfo uriInfo, @Nullable @QueryParam("regions") String regionsStr) {
boolean isRemoteRegionRequested = null != regionsStr && !regionsStr.isEmpty();
// If the delta flag is disabled in discovery or if the lease expiration
// has been disabled, redirect clients to get all instances
if ((serverConfig.shouldDisableDelta()) || (!registry.shouldAllowAccess(isRemoteRegionRequested))) {
return Response.status(Status.FORBIDDEN).build();
}
String[] regions = null;
if (!isRemoteRegionRequested) {
EurekaMonitors.GET_ALL_DELTA.increment();
} else {
regions = regionsStr.toLowerCase().split(",");
Arrays.sort(regions); // So we don't have different caches for same regions queried in different order.
EurekaMonitors.GET_ALL_DELTA_WITH_REMOTE_REGIONS.increment();
}
CurrentRequestVersion.set(Version.toEnum(version));
KeyType keyType = Key.KeyType.JSON;
String returnMediaType = MediaType.APPLICATION_JSON;
if (acceptHeader == null || !acceptHeader.contains(HEADER_JSON_VALUE)) {
keyType = Key.KeyType.XML;
returnMediaType = MediaType.APPLICATION_XML;
}
Key cacheKey = new Key(Key.EntityType.Application,
ResponseCacheImpl.ALL_APPS_DELTA,
keyType, CurrentRequestVersion.get(), EurekaAccept.fromString(eurekaAccept), regions
);
final Response response;
if (acceptEncoding != null && acceptEncoding.contains(HEADER_GZIP_VALUE)) {
response = Response.ok(responseCache.getGZIP(cacheKey))
.header(HEADER_CONTENT_ENCODING, HEADER_GZIP_VALUE)
.header(HEADER_CONTENT_TYPE, returnMediaType)
.build();
} else {
response = Response.ok(responseCache.get(cacheKey)).build();
}
CurrentRequestVersion.remove();
return response;
}
}
| 6,947 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/InstancesResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import java.util.List;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <em>jersey</em> resource that gets information about a particular instance.
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Produces({"application/xml", "application/json"})
@Path("/{version}/instances")
public class InstancesResource {
private static final Logger logger = LoggerFactory
.getLogger(InstancesResource.class);
private final PeerAwareInstanceRegistry registry;
@Inject
InstancesResource(EurekaServerContext server) {
this.registry = server.getRegistry();
}
public InstancesResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
@GET
@Path("{id}")
public Response getById(@PathParam("version") String version,
@PathParam("id") String id) {
CurrentRequestVersion.set(Version.toEnum(version));
List<InstanceInfo> list = registry.getInstancesById(id);
CurrentRequestVersion.remove();
if (list != null && !list.isEmpty()) {
return Response.ok(list.get(0)).build();
} else {
logger.info("Not Found: {}", id);
return Response.status(Status.NOT_FOUND).build();
}
}
}
| 6,948 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/AbstractVIPResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.ws.rs.core.Response;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.Version;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.registry.ResponseCache;
import com.netflix.eureka.registry.Key;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstract class for the common functionality of a VIP/SVIP resource.
*
* @author Nitesh Kant (nkant@netflix.com)
*/
abstract class AbstractVIPResource {
private static final Logger logger = LoggerFactory.getLogger(AbstractVIPResource.class);
private final PeerAwareInstanceRegistry registry;
private final ResponseCache responseCache;
AbstractVIPResource(EurekaServerContext server) {
this.registry = server.getRegistry();
this.responseCache = registry.getResponseCache();
}
AbstractVIPResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
protected Response getVipResponse(String version, String entityName, String acceptHeader,
EurekaAccept eurekaAccept, Key.EntityType entityType) {
if (!registry.shouldAllowAccess(false)) {
return Response.status(Response.Status.FORBIDDEN).build();
}
CurrentRequestVersion.set(Version.toEnum(version));
Key.KeyType keyType = Key.KeyType.JSON;
if (acceptHeader == null || !acceptHeader.contains("json")) {
keyType = Key.KeyType.XML;
}
Key cacheKey = new Key(
entityType,
entityName,
keyType,
CurrentRequestVersion.get(),
eurekaAccept
);
String payLoad = responseCache.get(cacheKey);
CurrentRequestVersion.remove();
if (payLoad != null) {
logger.debug("Found: {}", entityName);
return Response.ok(payLoad).build();
} else {
logger.debug("Not Found: {}", entityName);
return Response.status(Response.Status.NOT_FOUND).build();
}
}
}
| 6,949 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/ASGResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.inject.Inject;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import com.netflix.eureka.EurekaServerContext;
import com.netflix.eureka.EurekaServerContextHolder;
import com.netflix.eureka.cluster.PeerEurekaNode;
import com.netflix.eureka.aws.AwsAsgUtil;
import com.netflix.eureka.registry.AwsInstanceRegistry;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <em>jersey</em> resource for handling updates to {@link ASGStatus}.
*
* <p>
* The ASG status is used in <em>AWS</em> environments to automatically
* enable/disable instance registration based on the status of the ASG. This is
* particularly useful in <em>red/black</em> deployment scenarios where it is
* easy to switch to a new version and incase of problems switch back to the old
* versions of the deployment.
* </p>
*
* <p>
* During such a scenario, when an ASG is disabled and the instances go away and
* get refilled by an ASG - which is normal in AWS environments,the instances
* automatically go in the {@link com.netflix.appinfo.InstanceInfo.InstanceStatus#OUT_OF_SERVICE} state when they
* are refilled by the ASG and if the ASG is disabled by as indicated by a flag
* in the ASG as described in {@link AwsAsgUtil#isASGEnabled}
* </p>
*
* @author Karthik Ranganathan
*
*/
@Path("/{version}/asg")
@Produces({"application/xml", "application/json"})
public class ASGResource {
private static final Logger logger = LoggerFactory.getLogger(ASGResource.class);
public enum ASGStatus {
ENABLED, DISABLED;
public static ASGStatus toEnum(String s) {
for (ASGStatus e : ASGStatus.values()) {
if (e.name().equalsIgnoreCase(s)) {
return e;
}
}
throw new RuntimeException("Cannot find ASG enum for the given string " + s);
}
}
protected final PeerAwareInstanceRegistry registry;
protected final AwsAsgUtil awsAsgUtil;
@Inject
ASGResource(EurekaServerContext eurekaServer) {
this.registry = eurekaServer.getRegistry();
if (registry instanceof AwsInstanceRegistry) {
this.awsAsgUtil = ((AwsInstanceRegistry) registry).getAwsAsgUtil();
} else {
this.awsAsgUtil = null;
}
}
public ASGResource() {
this(EurekaServerContextHolder.getInstance().getServerContext());
}
/**
* Changes the status information of the ASG.
*
* @param asgName the name of the ASG for which the status needs to be changed.
* @param newStatus the new status {@link ASGStatus} of the ASG.
* @param isReplication a header parameter containing information whether this is replicated from other nodes.
*
* @return response which indicates if the operation succeeded or not.
*/
@PUT
@Path("{asgName}/status")
public Response statusUpdate(@PathParam("asgName") String asgName,
@QueryParam("value") String newStatus,
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication) {
if (awsAsgUtil == null) {
return Response.status(400).build();
}
try {
logger.info("Trying to update ASG Status for ASG {} to {}", asgName, newStatus);
ASGStatus asgStatus = ASGStatus.valueOf(newStatus.toUpperCase());
awsAsgUtil.setStatus(asgName, (!ASGStatus.DISABLED.equals(asgStatus)));
registry.statusUpdate(asgName, asgStatus, Boolean.valueOf(isReplication));
logger.debug("Updated ASG Status for ASG {} to {}", asgName, asgStatus);
} catch (Throwable e) {
logger.error("Cannot update the status {} for the ASG {}", newStatus, asgName, e);
return Response.serverError().build();
}
return Response.ok().build();
}
}
| 6,950 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/CurrentRequestVersion.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import com.netflix.eureka.Version;
/**
* A thread-scoped value that holds the "current {@link com.netflix.eureka.Version}" for the
* request.
*
* <p>This is not intended as a general mechanism for passing data.
* Rather it is here to support those cases where someplace deep in
* a library we need to know about the context of the request that
* initially triggered the current request.</p>
*
* @author Karthik Ranganathan, Greg Kim
*/
public final class CurrentRequestVersion {
private static final ThreadLocal<Version> CURRENT_REQ_VERSION =
new ThreadLocal<>();
private CurrentRequestVersion() {
}
/**
* Gets the current {@link Version}
* Will return null if no current version has been set.
*/
public static Version get() {
return CURRENT_REQ_VERSION.get();
}
/**
* Sets the current {@link Version}.
*
* Use {@link #remove()} as soon as the version is no longer required
* in order to purge the ThreadLocal used for storing it.
*/
public static void set(Version version) {
CURRENT_REQ_VERSION.set(version);
}
/**
* Clears the {@link ThreadLocal} used to store the version.
*/
public static void remove() {
CURRENT_REQ_VERSION.remove();
}
}
| 6,951 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/ServerCodecs.java | package com.netflix.eureka.resources;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.discovery.converters.wrappers.CodecWrapper;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.eureka.registry.Key;
/**
* @author David Liu
*/
public interface ServerCodecs {
CodecWrapper getFullJsonCodec();
CodecWrapper getCompactJsonCodec();
CodecWrapper getFullXmlCodec();
CodecWrapper getCompactXmlCodecr();
EncoderWrapper getEncoder(Key.KeyType keyType, boolean compact);
EncoderWrapper getEncoder(Key.KeyType keyType, EurekaAccept eurekaAccept);
}
| 6,952 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/resources/ApplicationResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.resources;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.UniqueIdentifier;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.eureka.Version;
import com.netflix.eureka.cluster.PeerEurekaNode;
import com.netflix.eureka.registry.ResponseCache;
import com.netflix.eureka.registry.Key.KeyType;
import com.netflix.eureka.registry.Key;
import com.netflix.eureka.util.EurekaMonitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <em>jersey</em> resource that handles request related to a particular
* {@link com.netflix.discovery.shared.Application}.
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Produces({"application/xml", "application/json"})
public class ApplicationResource {
private static final Logger logger = LoggerFactory.getLogger(ApplicationResource.class);
private final String appName;
private final EurekaServerConfig serverConfig;
private final PeerAwareInstanceRegistry registry;
private final ResponseCache responseCache;
ApplicationResource(String appName,
EurekaServerConfig serverConfig,
PeerAwareInstanceRegistry registry) {
this.appName = appName.toUpperCase();
this.serverConfig = serverConfig;
this.registry = registry;
this.responseCache = registry.getResponseCache();
}
public String getAppName() {
return appName;
}
/**
* Gets information about a particular {@link com.netflix.discovery.shared.Application}.
*
* @param version
* the version of the request.
* @param acceptHeader
* the accept header of the request to indicate whether to serve
* JSON or XML data.
* @return the response containing information about a particular
* application.
*/
@GET
public Response getApplication(@PathParam("version") String version,
@HeaderParam("Accept") final String acceptHeader,
@HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept) {
if (!registry.shouldAllowAccess(false)) {
return Response.status(Status.FORBIDDEN).build();
}
EurekaMonitors.GET_APPLICATION.increment();
CurrentRequestVersion.set(Version.toEnum(version));
KeyType keyType = Key.KeyType.JSON;
if (acceptHeader == null || !acceptHeader.contains("json")) {
keyType = Key.KeyType.XML;
}
Key cacheKey = new Key(
Key.EntityType.Application,
appName,
keyType,
CurrentRequestVersion.get(),
EurekaAccept.fromString(eurekaAccept)
);
String payLoad = responseCache.get(cacheKey);
CurrentRequestVersion.remove();
if (payLoad != null) {
logger.debug("Found: {}", appName);
return Response.ok(payLoad).build();
} else {
logger.debug("Not Found: {}", appName);
return Response.status(Status.NOT_FOUND).build();
}
}
/**
* Gets information about a particular instance of an application.
*
* @param id
* the unique identifier of the instance.
* @return information about a particular instance.
*/
@Path("{id}")
public InstanceResource getInstanceInfo(@PathParam("id") String id) {
return new InstanceResource(this, id, serverConfig, registry);
}
/**
* Registers information about a particular instance for an
* {@link com.netflix.discovery.shared.Application}.
*
* @param info
* {@link InstanceInfo} information of the instance.
* @param isReplication
* a header parameter containing information whether this is
* replicated from other nodes.
*/
@POST
@Consumes({"application/json", "application/xml"})
public Response addInstance(InstanceInfo info,
@HeaderParam(PeerEurekaNode.HEADER_REPLICATION) String isReplication) {
logger.debug("Registering instance {} (replication={})", info.getId(), isReplication);
// validate that the instanceinfo contains all the necessary required fields
if (isBlank(info.getId())) {
return Response.status(400).entity("Missing instanceId").build();
} else if (isBlank(info.getHostName())) {
return Response.status(400).entity("Missing hostname").build();
} else if (isBlank(info.getIPAddr())) {
return Response.status(400).entity("Missing ip address").build();
} else if (isBlank(info.getAppName())) {
return Response.status(400).entity("Missing appName").build();
} else if (!appName.equals(info.getAppName())) {
return Response.status(400).entity("Mismatched appName, expecting " + appName + " but was " + info.getAppName()).build();
} else if (info.getDataCenterInfo() == null) {
return Response.status(400).entity("Missing dataCenterInfo").build();
} else if (info.getDataCenterInfo().getName() == null) {
return Response.status(400).entity("Missing dataCenterInfo Name").build();
}
// handle cases where clients may be registering with bad DataCenterInfo with missing data
DataCenterInfo dataCenterInfo = info.getDataCenterInfo();
if (dataCenterInfo instanceof UniqueIdentifier) {
String dataCenterInfoId = ((UniqueIdentifier) dataCenterInfo).getId();
if (isBlank(dataCenterInfoId)) {
boolean experimental = "true".equalsIgnoreCase(serverConfig.getExperimental("registration.validation.dataCenterInfoId"));
if (experimental) {
String entity = "DataCenterInfo of type " + dataCenterInfo.getClass() + " must contain a valid id";
return Response.status(400).entity(entity).build();
} else if (dataCenterInfo instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo) dataCenterInfo;
String effectiveId = amazonInfo.get(AmazonInfo.MetaDataKey.instanceId);
if (effectiveId == null) {
amazonInfo.getMetadata().put(AmazonInfo.MetaDataKey.instanceId.getName(), info.getId());
}
} else {
logger.warn("Registering DataCenterInfo of type {} without an appropriate id", dataCenterInfo.getClass());
}
}
}
registry.register(info, "true".equals(isReplication));
return Response.status(204).build(); // 204 to be backwards compatible
}
/**
* Returns the application name of a particular application.
*
* @return the application name of a particular application.
*/
String getName() {
return appName;
}
private boolean isBlank(String str) {
return str == null || str.isEmpty();
}
}
| 6,953 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/lease/Lease.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.lease;
import com.netflix.eureka.registry.AbstractInstanceRegistry;
/**
* Describes a time-based availability of a {@link T}. Purpose is to avoid
* accumulation of instances in {@link AbstractInstanceRegistry} as result of ungraceful
* shutdowns that is not uncommon in AWS environments.
*
* If a lease elapses without renewals, it will eventually expire consequently
* marking the associated {@link T} for immediate eviction - this is similar to
* an explicit cancellation except that there is no communication between the
* {@link T} and {@link LeaseManager}.
*
* @author Karthik Ranganathan, Greg Kim
*/
public class Lease<T> {
enum Action {
Register, Cancel, Renew
};
public static final int DEFAULT_DURATION_IN_SECS = 90;
private T holder;
private long evictionTimestamp;
private long registrationTimestamp;
private long serviceUpTimestamp;
// Make it volatile so that the expiration task would see this quicker
private volatile long lastUpdateTimestamp;
private long duration;
public Lease(T r, int durationInSecs) {
holder = r;
registrationTimestamp = System.currentTimeMillis();
lastUpdateTimestamp = registrationTimestamp;
duration = (durationInSecs * 1000);
}
/**
* Renew the lease, use renewal duration if it was specified by the
* associated {@link T} during registration, otherwise default duration is
* {@link #DEFAULT_DURATION_IN_SECS}.
*/
public void renew() {
lastUpdateTimestamp = System.currentTimeMillis() + duration;
}
/**
* Cancels the lease by updating the eviction time.
*/
public void cancel() {
if (evictionTimestamp <= 0) {
evictionTimestamp = System.currentTimeMillis();
}
}
/**
* Mark the service as up. This will only take affect the first time called,
* subsequent calls will be ignored.
*/
public void serviceUp() {
if (serviceUpTimestamp == 0) {
serviceUpTimestamp = System.currentTimeMillis();
}
}
/**
* Set the leases service UP timestamp.
*/
public void setServiceUpTimestamp(long serviceUpTimestamp) {
this.serviceUpTimestamp = serviceUpTimestamp;
}
/**
* Checks if the lease of a given {@link com.netflix.appinfo.InstanceInfo} has expired or not.
*/
public boolean isExpired() {
return isExpired(0l);
}
/**
* Checks if the lease of a given {@link com.netflix.appinfo.InstanceInfo} has expired or not.
*
* Note that due to renew() doing the 'wrong" thing and setting lastUpdateTimestamp to +duration more than
* what it should be, the expiry will actually be 2 * duration. This is a minor bug and should only affect
* instances that ungracefully shutdown. Due to possible wide ranging impact to existing usage, this will
* not be fixed.
*
* @param additionalLeaseMs any additional lease time to add to the lease evaluation in ms.
*/
public boolean isExpired(long additionalLeaseMs) {
return (evictionTimestamp > 0 || System.currentTimeMillis() > (lastUpdateTimestamp + duration + additionalLeaseMs));
}
/**
* Gets the milliseconds since epoch when the lease was registered.
*
* @return the milliseconds since epoch when the lease was registered.
*/
public long getRegistrationTimestamp() {
return registrationTimestamp;
}
/**
* Gets the milliseconds since epoch when the lease was last renewed.
* Note that the value returned here is actually not the last lease renewal time but the renewal + duration.
*
* @return the milliseconds since epoch when the lease was last renewed.
*/
public long getLastRenewalTimestamp() {
return lastUpdateTimestamp;
}
/**
* Gets the milliseconds since epoch when the lease was evicted.
*
* @return the milliseconds since epoch when the lease was evicted.
*/
public long getEvictionTimestamp() {
return evictionTimestamp;
}
/**
* Gets the milliseconds since epoch when the service for the lease was marked as up.
*
* @return the milliseconds since epoch when the service for the lease was marked as up.
*/
public long getServiceUpTimestamp() {
return serviceUpTimestamp;
}
/**
* Returns the holder of the lease.
*/
public T getHolder() {
return holder;
}
}
| 6,954 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/lease/LeaseManager.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.lease;
import com.netflix.eureka.registry.AbstractInstanceRegistry;
/**
* This class is responsible for creating/renewing and evicting a <em>lease</em>
* for a particular instance.
*
* <p>
* Leases determine what instances receive traffic. When there is no renewal
* request from the client, the lease gets expired and the instances are evicted
* out of {@link AbstractInstanceRegistry}. This is key to instances receiving traffic
* or not.
* <p>
*
* @author Karthik Ranganathan, Greg Kim
*
* @param <T>
*/
public interface LeaseManager<T> {
/**
* Assign a new {@link Lease} to the passed in {@link T}.
*
* @param r
* - T to register
* @param leaseDuration
* @param isReplication
* - whether this is a replicated entry from another eureka node.
*/
void register(T r, int leaseDuration, boolean isReplication);
/**
* Cancel the {@link Lease} associated w/ the passed in <code>appName</code>
* and <code>id</code>.
*
* @param appName
* - unique id of the application.
* @param id
* - unique id within appName.
* @param isReplication
* - whether this is a replicated entry from another eureka node.
* @return true, if the operation was successful, false otherwise.
*/
boolean cancel(String appName, String id, boolean isReplication);
/**
* Renew the {@link Lease} associated w/ the passed in <code>appName</code>
* and <code>id</code>.
*
* @param id
* - unique id within appName
* @param isReplication
* - whether this is a replicated entry from another ds node
* @return whether the operation of successful
*/
boolean renew(String appName, String id, boolean isReplication);
/**
* Evict {@link T}s with expired {@link Lease}(s).
*/
void evict();
}
| 6,955 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/PeerAwareInstanceRegistryImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.DataCenterInfo.Name;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.registry.rule.DownOrStartingRule;
import com.netflix.eureka.registry.rule.FirstMatchWinsCompositeRule;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
import com.netflix.eureka.registry.rule.LeaseExistsRule;
import com.netflix.eureka.registry.rule.OverrideExistsRule;
import com.netflix.eureka.resources.CurrentRequestVersion;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.Version;
import com.netflix.eureka.cluster.PeerEurekaNode;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.resources.ASGResource.ASGStatus;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.util.MeasuredRate;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import static com.netflix.eureka.Names.METRIC_REGISTRY_PREFIX;
/**
* Handles replication of all operations to {@link AbstractInstanceRegistry} to peer
* <em>Eureka</em> nodes to keep them all in sync.
*
* <p>
* Primary operations that are replicated are the
* <em>Registers,Renewals,Cancels,Expirations and Status Changes</em>
* </p>
*
* <p>
* When the eureka server starts up it tries to fetch all the registry
* information from the peer eureka nodes.If for some reason this operation
* fails, the server does not allow the user to get the registry information for
* a period specified in
* {@link com.netflix.eureka.EurekaServerConfig#getWaitTimeInMsWhenSyncEmpty()}.
* </p>
*
* <p>
* One important thing to note about <em>renewals</em>.If the renewal drops more
* than the specified threshold as specified in
* {@link com.netflix.eureka.EurekaServerConfig#getRenewalPercentThreshold()} within a period of
* {@link com.netflix.eureka.EurekaServerConfig#getRenewalThresholdUpdateIntervalMs()}, eureka
* perceives this as a danger and stops expiring instances.
* </p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Singleton
public class PeerAwareInstanceRegistryImpl extends AbstractInstanceRegistry implements PeerAwareInstanceRegistry {
private static final Logger logger = LoggerFactory.getLogger(PeerAwareInstanceRegistryImpl.class);
private static final String US_EAST_1 = "us-east-1";
private static final int PRIME_PEER_NODES_RETRY_MS = 30000;
private long startupTime = 0;
private boolean peerInstancesTransferEmptyOnStartup = true;
public enum Action {
Heartbeat, Register, Cancel, StatusUpdate, DeleteStatusOverride;
private com.netflix.servo.monitor.Timer timer = Monitors.newTimer(this.name());
public com.netflix.servo.monitor.Timer getTimer() {
return this.timer;
}
}
private static final Comparator<Application> APP_COMPARATOR = new Comparator<Application>() {
public int compare(Application l, Application r) {
return l.getName().compareTo(r.getName());
}
};
private final MeasuredRate numberOfReplicationsLastMin;
protected final EurekaClient eurekaClient;
protected volatile PeerEurekaNodes peerEurekaNodes;
private final InstanceStatusOverrideRule instanceStatusOverrideRule;
private Timer timer = new Timer(
"ReplicaAwareInstanceRegistry - RenewalThresholdUpdater", true);
@Inject
public PeerAwareInstanceRegistryImpl(
EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ServerCodecs serverCodecs,
EurekaClient eurekaClient
) {
super(serverConfig, clientConfig, serverCodecs);
this.eurekaClient = eurekaClient;
this.numberOfReplicationsLastMin = new MeasuredRate(1000 * 60 * 1);
// We first check if the instance is STARTING or DOWN, then we check explicit overrides,
// then we check the status of a potentially existing lease.
this.instanceStatusOverrideRule = new FirstMatchWinsCompositeRule(new DownOrStartingRule(),
new OverrideExistsRule(overriddenInstanceStatusMap), new LeaseExistsRule());
}
@Override
protected InstanceStatusOverrideRule getInstanceInfoOverrideRule() {
return this.instanceStatusOverrideRule;
}
@Override
public void init(PeerEurekaNodes peerEurekaNodes) throws Exception {
this.numberOfReplicationsLastMin.start();
this.peerEurekaNodes = peerEurekaNodes;
initializedResponseCache();
scheduleRenewalThresholdUpdateTask();
initRemoteRegionRegistry();
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry :", e);
}
}
/**
* Perform all cleanup and shutdown operations.
*/
@Override
public void shutdown() {
try {
DefaultMonitorRegistry.getInstance().unregister(Monitors.newObjectMonitor(this));
} catch (Throwable t) {
logger.error("Cannot shutdown monitor registry", t);
}
try {
peerEurekaNodes.shutdown();
} catch (Throwable t) {
logger.error("Cannot shutdown ReplicaAwareInstanceRegistry", t);
}
numberOfReplicationsLastMin.stop();
timer.cancel();
super.shutdown();
}
/**
* Schedule the task that updates <em>renewal threshold</em> periodically.
* The renewal threshold would be used to determine if the renewals drop
* dramatically because of network partition and to protect expiring too
* many instances at a time.
*
*/
private void scheduleRenewalThresholdUpdateTask() {
timer.schedule(new TimerTask() {
@Override
public void run() {
updateRenewalThreshold();
}
}, serverConfig.getRenewalThresholdUpdateIntervalMs(),
serverConfig.getRenewalThresholdUpdateIntervalMs());
}
/**
* Populates the registry information from a peer eureka node. This
* operation fails over to other nodes until the list is exhausted if the
* communication fails.
*/
@Override
public int syncUp() {
// Copy entire entry from neighboring DS node
int count = 0;
for (int i = 0; ((i < serverConfig.getRegistrySyncRetries()) && (count == 0)); i++) {
if (i > 0) {
try {
Thread.sleep(serverConfig.getRegistrySyncRetryWaitMs());
} catch (InterruptedException e) {
logger.warn("Interrupted during registry transfer..");
break;
}
}
Applications apps = eurekaClient.getApplications();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instance : app.getInstances()) {
try {
if (isRegisterable(instance)) {
register(instance, instance.getLeaseInfo().getDurationInSecs(), true);
count++;
}
} catch (Throwable t) {
logger.error("During DS init copy", t);
}
}
}
}
return count;
}
@Override
public void openForTraffic(ApplicationInfoManager applicationInfoManager, int count) {
// Renewals happen every 30 seconds and for a minute it should be a factor of 2.
this.expectedNumberOfClientsSendingRenews = count;
updateRenewsPerMinThreshold();
logger.info("Got {} instances from neighboring DS node", count);
logger.info("Renew threshold is: {}", numberOfRenewsPerMinThreshold);
this.startupTime = System.currentTimeMillis();
if (count > 0) {
this.peerInstancesTransferEmptyOnStartup = false;
}
DataCenterInfo.Name selfName = applicationInfoManager.getInfo().getDataCenterInfo().getName();
boolean isAws = Name.Amazon == selfName;
if (isAws && serverConfig.shouldPrimeAwsReplicaConnections()) {
logger.info("Priming AWS connections for all replicas..");
primeAwsReplicas(applicationInfoManager);
}
logger.info("Changing status to UP");
applicationInfoManager.setInstanceStatus(InstanceStatus.UP);
super.postInit();
}
/**
* Prime connections for Aws replicas.
* <p>
* Sometimes when the eureka servers comes up, AWS firewall may not allow
* the network connections immediately. This will cause the outbound
* connections to fail, but the inbound connections continue to work. What
* this means is the clients would have switched to this node (after EIP
* binding) and so the other eureka nodes will expire all instances that
* have been switched because of the lack of outgoing heartbeats from this
* instance.
* </p>
* <p>
* The best protection in this scenario is to block and wait until we are
* able to ping all eureka nodes successfully atleast once. Until then we
* won't open up the traffic.
* </p>
*/
private void primeAwsReplicas(ApplicationInfoManager applicationInfoManager) {
boolean areAllPeerNodesPrimed = false;
while (!areAllPeerNodesPrimed) {
String peerHostName = null;
try {
Application eurekaApps = this.getApplication(applicationInfoManager.getInfo().getAppName(), false);
if (eurekaApps == null) {
areAllPeerNodesPrimed = true;
logger.info("No peers needed to prime.");
return;
}
for (PeerEurekaNode node : peerEurekaNodes.getPeerEurekaNodes()) {
for (InstanceInfo peerInstanceInfo : eurekaApps.getInstances()) {
LeaseInfo leaseInfo = peerInstanceInfo.getLeaseInfo();
// If the lease is expired - do not worry about priming
if (System.currentTimeMillis() > (leaseInfo
.getRenewalTimestamp() + (leaseInfo
.getDurationInSecs() * 1000))
+ (2 * 60 * 1000)) {
continue;
}
peerHostName = peerInstanceInfo.getHostName();
logger.info("Trying to send heartbeat for the eureka server at {} to make sure the " +
"network channels are open", peerHostName);
// Only try to contact the eureka nodes that are in this instance's registry - because
// the other instances may be legitimately down
if (peerHostName.equalsIgnoreCase(new URI(node.getServiceUrl()).getHost())) {
node.heartbeat(
peerInstanceInfo.getAppName(),
peerInstanceInfo.getId(),
peerInstanceInfo,
null,
true);
}
}
}
areAllPeerNodesPrimed = true;
} catch (Throwable e) {
logger.error("Could not contact {}", peerHostName, e);
try {
Thread.sleep(PRIME_PEER_NODES_RETRY_MS);
} catch (InterruptedException e1) {
logger.warn("Interrupted while priming : ", e1);
areAllPeerNodesPrimed = true;
}
}
}
}
/**
* Checks to see if the registry access is allowed or the server is in a
* situation where it does not all getting registry information. The server
* does not return registry information for a period specified in
* {@link EurekaServerConfig#getWaitTimeInMsWhenSyncEmpty()}, if it cannot
* get the registry information from the peer eureka nodes at start up.
*
* @return false - if the instances count from a replica transfer returned
* zero and if the wait time has not elapsed, otherwise returns true
*/
@Override
public boolean shouldAllowAccess(boolean remoteRegionRequired) {
if (this.peerInstancesTransferEmptyOnStartup) {
if (!(System.currentTimeMillis() > this.startupTime + serverConfig.getWaitTimeInMsWhenSyncEmpty())) {
return false;
}
}
if (remoteRegionRequired) {
for (RemoteRegionRegistry remoteRegionRegistry : this.regionNameVSRemoteRegistry.values()) {
if (!remoteRegionRegistry.isReadyForServingData()) {
return false;
}
}
}
return true;
}
public boolean shouldAllowAccess() {
return shouldAllowAccess(true);
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "shouldAllowAccess", type = DataSourceType.GAUGE)
public int shouldAllowAccessMetric() {
return shouldAllowAccess() ? 1 : 0;
}
/**
* @deprecated use {@link com.netflix.eureka.cluster.PeerEurekaNodes#getPeerEurekaNodes()} directly.
*
* Gets the list of peer eureka nodes which is the list to replicate
* information to.
*
* @return the list of replica nodes.
*/
@Deprecated
public List<PeerEurekaNode> getReplicaNodes() {
return Collections.unmodifiableList(peerEurekaNodes.getPeerEurekaNodes());
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.registry.InstanceRegistry#cancel(java.lang.String,
* java.lang.String, long, boolean)
*/
@Override
public boolean cancel(final String appName, final String id,
final boolean isReplication) {
if (super.cancel(appName, id, isReplication)) {
replicateToPeers(Action.Cancel, appName, id, null, null, isReplication);
return true;
}
return false;
}
/**
* Registers the information about the {@link InstanceInfo} and replicates
* this information to all peer eureka nodes. If this is replication event
* from other replica nodes then it is not replicated.
*
* @param info
* the {@link InstanceInfo} to be registered and replicated.
* @param isReplication
* true if this is a replication event from other replica nodes,
* false otherwise.
*/
@Override
public void register(final InstanceInfo info, final boolean isReplication) {
int leaseDuration = Lease.DEFAULT_DURATION_IN_SECS;
if (info.getLeaseInfo() != null && info.getLeaseInfo().getDurationInSecs() > 0) {
leaseDuration = info.getLeaseInfo().getDurationInSecs();
}
super.register(info, leaseDuration, isReplication);
replicateToPeers(Action.Register, info.getAppName(), info.getId(), info, null, isReplication);
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.registry.InstanceRegistry#renew(java.lang.String,
* java.lang.String, long, boolean)
*/
public boolean renew(final String appName, final String id, final boolean isReplication) {
if (super.renew(appName, id, isReplication)) {
replicateToPeers(Action.Heartbeat, appName, id, null, null, isReplication);
return true;
}
return false;
}
/*
* (non-Javadoc)
*
* @see com.netflix.eureka.registry.InstanceRegistry#statusUpdate(java.lang.String,
* java.lang.String, com.netflix.appinfo.InstanceInfo.InstanceStatus,
* java.lang.String, boolean)
*/
@Override
public boolean statusUpdate(final String appName, final String id,
final InstanceStatus newStatus, String lastDirtyTimestamp,
final boolean isReplication) {
if (super.statusUpdate(appName, id, newStatus, lastDirtyTimestamp, isReplication)) {
replicateToPeers(Action.StatusUpdate, appName, id, null, newStatus, isReplication);
return true;
}
return false;
}
@Override
public boolean deleteStatusOverride(String appName, String id,
InstanceStatus newStatus,
String lastDirtyTimestamp,
boolean isReplication) {
if (super.deleteStatusOverride(appName, id, newStatus, lastDirtyTimestamp, isReplication)) {
replicateToPeers(Action.DeleteStatusOverride, appName, id, null, null, isReplication);
return true;
}
return false;
}
/**
* Replicate the <em>ASG status</em> updates to peer eureka nodes. If this
* event is a replication from other nodes, then it is not replicated to
* other nodes.
*
* @param asgName the asg name for which the status needs to be replicated.
* @param newStatus the {@link ASGStatus} information that needs to be replicated.
* @param isReplication true if this is a replication event from other nodes, false otherwise.
*/
@Override
public void statusUpdate(final String asgName, final ASGStatus newStatus, final boolean isReplication) {
// If this is replicated from an other node, do not try to replicate again.
if (isReplication) {
return;
}
for (final PeerEurekaNode node : peerEurekaNodes.getPeerEurekaNodes()) {
replicateASGInfoToReplicaNodes(asgName, newStatus, node);
}
}
@Override
public boolean isLeaseExpirationEnabled() {
if (!isSelfPreservationModeEnabled()) {
// The self preservation mode is disabled, hence allowing the instances to expire.
return true;
}
return numberOfRenewsPerMinThreshold > 0 && getNumOfRenewsInLastMin() > numberOfRenewsPerMinThreshold;
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "isLeaseExpirationEnabled", type = DataSourceType.GAUGE)
public int isLeaseExpirationEnabledMetric() {
return isLeaseExpirationEnabled() ? 1 : 0;
}
/**
* Checks to see if the self-preservation mode is enabled.
*
* <p>
* The self-preservation mode is enabled if the expected number of renewals
* per minute {@link #getNumOfRenewsInLastMin()} is lesser than the expected
* threshold which is determined by {@link #getNumOfRenewsPerMinThreshold()}
* . Eureka perceives this as a danger and stops expiring instances as this
* is most likely because of a network event. The mode is disabled only when
* the renewals get back to above the threshold or if the flag
* {@link EurekaServerConfig#shouldEnableSelfPreservation()} is set to
* false.
* </p>
*
* @return true if the self-preservation mode is enabled, false otherwise.
*/
@Override
public boolean isSelfPreservationModeEnabled() {
return serverConfig.shouldEnableSelfPreservation();
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "isSelfPreservationModeEnabled", type = DataSourceType.GAUGE)
public int isSelfPreservationModeEnabledMetric() {
return isSelfPreservationModeEnabled() ? 1 : 0;
}
@Override
public InstanceInfo getNextServerFromEureka(String virtualHostname, boolean secure) {
// TODO Auto-generated method stub
return null;
}
/**
* Updates the <em>renewal threshold</em> based on the current number of
* renewals. The threshold is a percentage as specified in
* {@link EurekaServerConfig#getRenewalPercentThreshold()} of renewals
* received per minute {@link #getNumOfRenewsInLastMin()}.
*/
private void updateRenewalThreshold() {
try {
Applications apps = eurekaClient.getApplications();
int count = 0;
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instance : app.getInstances()) {
if (this.isRegisterable(instance)) {
++count;
}
}
}
synchronized (lock) {
// Update threshold only if the threshold is greater than the
// current expected threshold or if self preservation is disabled.
if ((count) > (serverConfig.getRenewalPercentThreshold() * expectedNumberOfClientsSendingRenews)
|| (!this.isSelfPreservationModeEnabled())) {
this.expectedNumberOfClientsSendingRenews = count;
updateRenewsPerMinThreshold();
}
}
logger.info("Current renewal threshold is : {}", numberOfRenewsPerMinThreshold);
} catch (Throwable e) {
logger.error("Cannot update renewal threshold", e);
}
}
/**
* Gets the list of all {@link Applications} from the registry in sorted
* lexical order of {@link Application#getName()}.
*
* @return the list of {@link Applications} in lexical order.
*/
@Override
public List<Application> getSortedApplications() {
List<Application> apps = new ArrayList<>(getApplications().getRegisteredApplications());
Collections.sort(apps, APP_COMPARATOR);
return apps;
}
/**
* Gets the number of <em>renewals</em> in the last minute.
*
* @return a long value representing the number of <em>renewals</em> in the last minute.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfReplicationsInLastMin",
description = "Number of total replications received in the last minute",
type = com.netflix.servo.annotations.DataSourceType.GAUGE)
public long getNumOfReplicationsInLastMin() {
return numberOfReplicationsLastMin.getCount();
}
/**
* Checks if the number of renewals is lesser than threshold.
*
* @return 0 if the renewals are greater than threshold, 1 otherwise.
*/
@com.netflix.servo.annotations.Monitor(name = "isBelowRenewThreshold", description = "0 = false, 1 = true",
type = com.netflix.servo.annotations.DataSourceType.GAUGE)
@Override
public int isBelowRenewThresold() {
if ((getNumOfRenewsInLastMin() <= numberOfRenewsPerMinThreshold)
&&
((this.startupTime > 0) && (System.currentTimeMillis() > this.startupTime + (serverConfig.getWaitTimeInMsWhenSyncEmpty())))) {
return 1;
} else {
return 0;
}
}
/**
* Checks if an instance is registerable in this region. Instances from other regions are rejected.
*
* @param instanceInfo th instance info information of the instance
* @return true, if it can be registered in this server, false otherwise.
*/
public boolean isRegisterable(InstanceInfo instanceInfo) {
DataCenterInfo datacenterInfo = instanceInfo.getDataCenterInfo();
String serverRegion = clientConfig.getRegion();
if (AmazonInfo.class.isInstance(datacenterInfo)) {
AmazonInfo info = AmazonInfo.class.cast(instanceInfo.getDataCenterInfo());
String availabilityZone = info.get(MetaDataKey.availabilityZone);
// Can be null for dev environments in non-AWS data center
if (availabilityZone == null && US_EAST_1.equalsIgnoreCase(serverRegion)) {
return true;
} else if ((availabilityZone != null) && (availabilityZone.contains(serverRegion))) {
// If in the same region as server, then consider it registerable
return true;
}
}
return true; // Everything non-amazon is registrable.
}
/**
* Replicates all eureka actions to peer eureka nodes except for replication
* traffic to this node.
*
*/
private void replicateToPeers(Action action, String appName, String id,
InstanceInfo info /* optional */,
InstanceStatus newStatus /* optional */, boolean isReplication) {
Stopwatch tracer = action.getTimer().start();
try {
if (isReplication) {
numberOfReplicationsLastMin.increment();
}
// If it is a replication already, do not replicate again as this will create a poison replication
if (peerEurekaNodes == Collections.EMPTY_LIST || isReplication) {
return;
}
for (final PeerEurekaNode node : peerEurekaNodes.getPeerEurekaNodes()) {
// If the url represents this host, do not replicate to yourself.
if (peerEurekaNodes.isThisMyUrl(node.getServiceUrl())) {
continue;
}
replicateInstanceActionsToPeers(action, appName, id, info, newStatus, node);
}
} finally {
tracer.stop();
}
}
/**
* Replicates all instance changes to peer eureka nodes except for
* replication traffic to this node.
*
*/
private void replicateInstanceActionsToPeers(Action action, String appName,
String id, InstanceInfo info, InstanceStatus newStatus,
PeerEurekaNode node) {
try {
InstanceInfo infoFromRegistry;
CurrentRequestVersion.set(Version.V2);
switch (action) {
case Cancel:
node.cancel(appName, id);
break;
case Heartbeat:
InstanceStatus overriddenStatus = overriddenInstanceStatusMap.get(id);
infoFromRegistry = getInstanceByAppAndId(appName, id, false);
node.heartbeat(appName, id, infoFromRegistry, overriddenStatus, false);
break;
case Register:
node.register(info);
break;
case StatusUpdate:
infoFromRegistry = getInstanceByAppAndId(appName, id, false);
node.statusUpdate(appName, id, newStatus, infoFromRegistry);
break;
case DeleteStatusOverride:
infoFromRegistry = getInstanceByAppAndId(appName, id, false);
node.deleteStatusOverride(appName, id, infoFromRegistry);
break;
}
} catch (Throwable t) {
logger.error("Cannot replicate information to {} for action {}", node.getServiceUrl(), action.name(), t);
} finally {
CurrentRequestVersion.remove();
}
}
/**
* Replicates all ASG status changes to peer eureka nodes except for
* replication traffic to this node.
*/
private void replicateASGInfoToReplicaNodes(final String asgName,
final ASGStatus newStatus, final PeerEurekaNode node) {
CurrentRequestVersion.set(Version.V2);
try {
node.statusUpdate(asgName, newStatus);
} catch (Throwable e) {
logger.error("Cannot replicate ASG status information to {}", node.getServiceUrl(), e);
} finally {
CurrentRequestVersion.remove();
}
}
@Override
@com.netflix.servo.annotations.Monitor(name = "localRegistrySize",
description = "Current registry size", type = DataSourceType.GAUGE)
public long getLocalRegistrySize() {
return super.getLocalRegistrySize();
}
}
| 6,956 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/RemoteRegionRegistry.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import javax.inject.Inject;
import javax.ws.rs.core.MediaType;
import java.net.InetAddress;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.ActionType;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.EurekaIdentityHeaderFilter;
import com.netflix.discovery.TimedSupervisorTask;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.shared.LookupService;
import com.netflix.discovery.shared.resolver.ClusterResolver;
import com.netflix.discovery.shared.resolver.StaticClusterResolver;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.EurekaHttpResponse;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClient;
import com.netflix.discovery.shared.transport.jersey.EurekaJerseyClientImpl.EurekaJerseyClientBuilder;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.EurekaServerIdentity;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.transport.EurekaServerHttpClients;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.filter.GZIPContentEncodingFilter;
import com.sun.jersey.client.apache4.ApacheHttpClient4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.Names.METRIC_REGISTRY_PREFIX;
/**
* Handles all registry operations that needs to be done on a eureka service running in an other region.
*
* The primary operations include fetching registry information from remote region and fetching delta information
* on a periodic basis.
*
* TODO: a lot of the networking code in this class can be replaced by newer code in
* {@link com.netflix.discovery.DiscoveryClient}
*
* @author Karthik Ranganathan
*
*/
public class RemoteRegionRegistry implements LookupService<String> {
private static final Logger logger = LoggerFactory.getLogger(RemoteRegionRegistry.class);
private final ApacheHttpClient4 discoveryApacheClient;
private final EurekaJerseyClient discoveryJerseyClient;
private final com.netflix.servo.monitor.Timer fetchRegistryTimer;
private final URL remoteRegionURL;
private final ScheduledExecutorService scheduler;
// monotonically increasing generation counter to ensure stale threads do not reset registry to an older version
private final AtomicLong fetchRegistryGeneration = new AtomicLong(0);
private final Lock fetchRegistryUpdateLock = new ReentrantLock();
private final AtomicReference<Applications> applications = new AtomicReference<>(new Applications());
private final AtomicReference<Applications> applicationsDelta = new AtomicReference<>(new Applications());
private final EurekaServerConfig serverConfig;
private volatile boolean readyForServingData;
private final EurekaHttpClient eurekaHttpClient;
private long timeOfLastSuccessfulRemoteFetch = System.currentTimeMillis();
private long deltaSuccesses = 0;
private long deltaMismatches = 0;
@Inject
public RemoteRegionRegistry(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ServerCodecs serverCodecs,
String regionName,
URL remoteRegionURL) {
this.serverConfig = serverConfig;
this.remoteRegionURL = remoteRegionURL;
this.fetchRegistryTimer = Monitors.newTimer(this.remoteRegionURL.toString() + "_FetchRegistry");
EurekaJerseyClientBuilder clientBuilder = new EurekaJerseyClientBuilder()
.withUserAgent("Java-EurekaClient-RemoteRegion")
.withEncoderWrapper(serverCodecs.getFullJsonCodec())
.withDecoderWrapper(serverCodecs.getFullJsonCodec())
.withConnectionTimeout(serverConfig.getRemoteRegionConnectTimeoutMs())
.withReadTimeout(serverConfig.getRemoteRegionReadTimeoutMs())
.withMaxConnectionsPerHost(serverConfig.getRemoteRegionTotalConnectionsPerHost())
.withMaxTotalConnections(serverConfig.getRemoteRegionTotalConnections())
.withConnectionIdleTimeout(serverConfig.getRemoteRegionConnectionIdleTimeoutSeconds());
if (remoteRegionURL.getProtocol().equals("http")) {
clientBuilder.withClientName("Discovery-RemoteRegionClient-" + regionName);
} else if ("true".equals(System.getProperty("com.netflix.eureka.shouldSSLConnectionsUseSystemSocketFactory"))) {
clientBuilder.withClientName("Discovery-RemoteRegionSystemSecureClient-" + regionName)
.withSystemSSLConfiguration();
} else {
clientBuilder.withClientName("Discovery-RemoteRegionSecureClient-" + regionName)
.withTrustStoreFile(
serverConfig.getRemoteRegionTrustStore(),
serverConfig.getRemoteRegionTrustStorePassword()
);
}
discoveryJerseyClient = clientBuilder.build();
discoveryApacheClient = discoveryJerseyClient.getClient();
// should we enable GZip decoding of responses based on Response Headers?
if (serverConfig.shouldGZipContentFromRemoteRegion()) {
// compressed only if there exists a 'Content-Encoding' header whose value is "gzip"
discoveryApacheClient.addFilter(new GZIPContentEncodingFilter(false));
}
String ip = null;
try {
ip = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
logger.warn("Cannot find localhost ip", e);
}
EurekaServerIdentity identity = new EurekaServerIdentity(ip);
discoveryApacheClient.addFilter(new EurekaIdentityHeaderFilter(identity));
// Configure new transport layer (candidate for injecting in the future)
EurekaHttpClient newEurekaHttpClient = null;
try {
ClusterResolver clusterResolver = StaticClusterResolver.fromURL(regionName, remoteRegionURL);
newEurekaHttpClient = EurekaServerHttpClients.createRemoteRegionClient(
serverConfig, clientConfig.getTransportConfig(), serverCodecs, clusterResolver);
} catch (Exception e) {
logger.warn("Transport initialization failure", e);
}
this.eurekaHttpClient = newEurekaHttpClient;
try {
if (fetchRegistry()) {
this.readyForServingData = true;
} else {
logger.warn("Failed to fetch remote registry. This means this eureka server is not ready for serving "
+ "traffic.");
}
} catch (Throwable e) {
logger.error("Problem fetching registry information :", e);
}
// remote region fetch
Runnable remoteRegionFetchTask = new Runnable() {
@Override
public void run() {
try {
if (fetchRegistry()) {
readyForServingData = true;
} else {
logger.warn("Failed to fetch remote registry. This means this eureka server is not "
+ "ready for serving traffic.");
}
} catch (Throwable e) {
logger.error(
"Error getting from remote registry :", e);
}
}
};
ThreadPoolExecutor remoteRegionFetchExecutor = new ThreadPoolExecutor(
1, serverConfig.getRemoteRegionFetchThreadPoolSize(), 0, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); // use direct handoff
scheduler = Executors.newScheduledThreadPool(1,
new ThreadFactoryBuilder()
.setNameFormat("Eureka-RemoteRegionCacheRefresher_" + regionName + "-%d")
.setDaemon(true)
.build());
scheduler.schedule(
new TimedSupervisorTask(
"RemoteRegionFetch_" + regionName,
scheduler,
remoteRegionFetchExecutor,
serverConfig.getRemoteRegionRegistryFetchInterval(),
TimeUnit.SECONDS,
5, // exponential backoff bound
remoteRegionFetchTask
),
serverConfig.getRemoteRegionRegistryFetchInterval(), TimeUnit.SECONDS);
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the RemoteRegionRegistry :", e);
}
}
/**
* Check if this registry is ready for serving data.
* @return true if ready, false otherwise.
*/
public boolean isReadyForServingData() {
return readyForServingData;
}
/**
* Fetch the registry information from the remote region.
* @return true, if the fetch was successful, false otherwise.
*/
private boolean fetchRegistry() {
boolean success;
Stopwatch tracer = fetchRegistryTimer.start();
try {
// If the delta is disabled or if it is the first time, get all applications
if (serverConfig.shouldDisableDeltaForRemoteRegions()
|| (getApplications() == null)
|| (getApplications().getRegisteredApplications().size() == 0)) {
logger.info("Disable delta property : {}", serverConfig.shouldDisableDeltaForRemoteRegions());
logger.info("Application is null : {}", getApplications() == null);
logger.info("Registered Applications size is zero : {}", getApplications().getRegisteredApplications().isEmpty());
success = storeFullRegistry();
} else {
success = fetchAndStoreDelta();
}
logTotalInstances();
} catch (Throwable e) {
logger.error("Unable to fetch registry information from the remote registry {}", this.remoteRegionURL, e);
return false;
} finally {
if (tracer != null) {
tracer.stop();
}
}
if (success) {
timeOfLastSuccessfulRemoteFetch = System.currentTimeMillis();
}
return success;
}
private boolean fetchAndStoreDelta() throws Throwable {
long currGeneration = fetchRegistryGeneration.get();
Applications delta = fetchRemoteRegistry(true);
if (delta == null) {
logger.error("The delta is null for some reason. Not storing this information");
} else if (fetchRegistryGeneration.compareAndSet(currGeneration, currGeneration + 1)) {
this.applicationsDelta.set(delta);
} else {
delta = null; // set the delta to null so we don't use it
logger.warn("Not updating delta as another thread is updating it already");
}
if (delta == null) {
logger.warn("The server does not allow the delta revision to be applied because it is not "
+ "safe. Hence got the full registry.");
return storeFullRegistry();
} else {
String reconcileHashCode = "";
if (fetchRegistryUpdateLock.tryLock()) {
try {
updateDelta(delta);
reconcileHashCode = getApplications().getReconcileHashCode();
} finally {
fetchRegistryUpdateLock.unlock();
}
} else {
logger.warn("Cannot acquire update lock, aborting updateDelta operation of fetchAndStoreDelta");
}
// There is a diff in number of instances for some reason
if (!reconcileHashCode.equals(delta.getAppsHashCode())) {
deltaMismatches++;
return reconcileAndLogDifference(delta, reconcileHashCode);
} else {
deltaSuccesses++;
}
}
return delta != null;
}
/**
* Updates the delta information fetches from the eureka server into the
* local cache.
*
* @param delta
* the delta information received from eureka server in the last
* poll cycle.
*/
private void updateDelta(Applications delta) {
int deltaCount = 0;
for (Application app : delta.getRegisteredApplications()) {
for (InstanceInfo instance : app.getInstances()) {
++deltaCount;
if (ActionType.ADDED.equals(instance.getActionType())) {
Application existingApp = getApplications()
.getRegisteredApplications(instance.getAppName());
if (existingApp == null) {
getApplications().addApplication(app);
}
logger.debug("Added instance {} to the existing apps ",
instance.getId());
getApplications().getRegisteredApplications(
instance.getAppName()).addInstance(instance);
} else if (ActionType.MODIFIED.equals(instance.getActionType())) {
Application existingApp = getApplications()
.getRegisteredApplications(instance.getAppName());
if (existingApp == null) {
getApplications().addApplication(app);
}
logger.debug("Modified instance {} to the existing apps ",
instance.getId());
getApplications().getRegisteredApplications(
instance.getAppName()).addInstance(instance);
} else if (ActionType.DELETED.equals(instance.getActionType())) {
Application existingApp = getApplications()
.getRegisteredApplications(instance.getAppName());
if (existingApp == null) {
getApplications().addApplication(app);
}
logger.debug("Deleted instance {} to the existing apps ",
instance.getId());
getApplications().getRegisteredApplications(
instance.getAppName()).removeInstance(instance);
}
}
}
logger.debug(
"The total number of instances fetched by the delta processor : {}",
deltaCount);
}
/**
* Close HTTP response object and its respective resources.
*
* @param response
* the HttpResponse object.
*/
private void closeResponse(ClientResponse response) {
if (response != null) {
try {
response.close();
} catch (Throwable th) {
logger.error("Cannot release response resource :", th);
}
}
}
/**
* Gets the full registry information from the eureka server and stores it
* locally.
*
* @return the full registry information.
*/
public boolean storeFullRegistry() {
long currentGeneration = fetchRegistryGeneration.get();
Applications apps = fetchRemoteRegistry(false);
if (apps == null) {
logger.error("The application is null for some reason. Not storing this information");
} else if (fetchRegistryGeneration.compareAndSet(currentGeneration, currentGeneration + 1)) {
applications.set(apps);
applicationsDelta.set(apps);
logger.info("Successfully updated registry with the latest content");
return true;
} else {
logger.warn("Not updating applications as another thread is updating it already");
}
return false;
}
/**
* Fetch registry information from the remote region.
* @param delta - true, if the fetch needs to get deltas, false otherwise
* @return - response which has information about the data.
*/
private Applications fetchRemoteRegistry(boolean delta) {
logger.info("Getting instance registry info from the eureka server : {} , delta : {}", this.remoteRegionURL, delta);
if (shouldUseExperimentalTransport()) {
try {
EurekaHttpResponse<Applications> httpResponse = delta ? eurekaHttpClient.getDelta() : eurekaHttpClient.getApplications();
int httpStatus = httpResponse.getStatusCode();
if (httpStatus >= 200 && httpStatus < 300) {
logger.debug("Got the data successfully : {}", httpStatus);
return httpResponse.getEntity();
}
logger.warn("Cannot get the data from {} : {}", this.remoteRegionURL, httpStatus);
} catch (Throwable t) {
logger.error("Can't get a response from {}", this.remoteRegionURL, t);
}
} else {
ClientResponse response = null;
try {
String urlPath = delta ? "apps/delta" : "apps/";
response = discoveryApacheClient.resource(this.remoteRegionURL + urlPath)
.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
int httpStatus = response.getStatus();
if (httpStatus >= 200 && httpStatus < 300) {
logger.debug("Got the data successfully : {}", httpStatus);
return response.getEntity(Applications.class);
}
logger.warn("Cannot get the data from {} : {}", this.remoteRegionURL, httpStatus);
} catch (Throwable t) {
logger.error("Can't get a response from {}", this.remoteRegionURL, t);
} finally {
closeResponse(response);
}
}
return null;
}
/**
* Reconciles the delta information fetched to see if the hashcodes match.
*
* @param delta - the delta information fetched previously for reconciliation.
* @param reconcileHashCode - the hashcode for comparison.
* @return - response
* @throws Throwable
*/
private boolean reconcileAndLogDifference(Applications delta, String reconcileHashCode) throws Throwable {
logger.warn("The Reconcile hashcodes do not match, client : {}, server : {}. Getting the full registry",
reconcileHashCode, delta.getAppsHashCode());
long currentGeneration = fetchRegistryGeneration.get();
Applications apps = this.fetchRemoteRegistry(false);
if (apps == null) {
logger.error("The application is null for some reason. Not storing this information");
return false;
}
if (fetchRegistryGeneration.compareAndSet(currentGeneration, currentGeneration + 1)) {
applications.set(apps);
applicationsDelta.set(apps);
logger.warn("The Reconcile hashcodes after complete sync up, client : {}, server : {}.",
getApplications().getReconcileHashCode(),
delta.getAppsHashCode());
return true;
}else {
logger.warn("Not setting the applications map as another thread has advanced the update generation");
return true; // still return true
}
}
/**
* Logs the total number of non-filtered instances stored locally.
*/
private void logTotalInstances() {
int totInstances = 0;
for (Application application : getApplications().getRegisteredApplications()) {
totInstances += application.getInstancesAsIsFromEureka().size();
}
logger.debug("The total number of all instances in the client now is {}", totInstances);
}
@Override
public Applications getApplications() {
return applications.get();
}
@Override
public InstanceInfo getNextServerFromEureka(String arg0, boolean arg1) {
return null;
}
@Override
public Application getApplication(String appName) {
return this.applications.get().getRegisteredApplications(appName);
}
@Override
public List<InstanceInfo> getInstancesById(String id) {
List<InstanceInfo> list = new ArrayList<>(1);
for (Application app : applications.get().getRegisteredApplications()) {
InstanceInfo info = app.getByInstanceId(id);
if (info != null) {
list.add(info);
return list;
}
}
return Collections.emptyList();
}
public Applications getApplicationDeltas() {
return this.applicationsDelta.get();
}
private boolean shouldUseExperimentalTransport() {
if (eurekaHttpClient == null) {
return false;
}
String enabled = serverConfig.getExperimental("transport.enabled");
return enabled != null && "true".equalsIgnoreCase(enabled);
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "secondsSinceLastSuccessfulRemoteFetch", type = DataSourceType.GAUGE)
public long getTimeOfLastSuccessfulRemoteFetch() {
return (System.currentTimeMillis() - timeOfLastSuccessfulRemoteFetch) / 1000;
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "remoteDeltaSuccesses", type = DataSourceType.COUNTER)
public long getRemoteFetchSuccesses() {
return deltaSuccesses;
}
@com.netflix.servo.annotations.Monitor(name = METRIC_REGISTRY_PREFIX + "remoteDeltaMismatches", type = DataSourceType.COUNTER)
public long getRemoteFetchMismatches() {
return deltaMismatches;
}
}
| 6,957 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/ResponseCache.java | package com.netflix.eureka.registry;
import javax.annotation.Nullable;
import java.util.concurrent.atomic.AtomicLong;
/**
* @author David Liu
*/
public interface ResponseCache {
void invalidate(String appName, @Nullable String vipAddress, @Nullable String secureVipAddress);
AtomicLong getVersionDelta();
AtomicLong getVersionDeltaWithRegions();
/**
* Get the cached information about applications.
*
* <p>
* If the cached information is not available it is generated on the first
* request. After the first request, the information is then updated
* periodically by a background thread.
* </p>
*
* @param key the key for which the cached information needs to be obtained.
* @return payload which contains information about the applications.
*/
String get(Key key);
/**
* Get the compressed information about the applications.
*
* @param key the key for which the compressed cached information needs to be obtained.
* @return compressed payload which contains information about the applications.
*/
byte[] getGZIP(Key key);
/**
* Performs a shutdown of this cache by stopping internal threads and unregistering
* Servo monitors.
*/
void stop();
}
| 6,958 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/PeerAwareInstanceRegistry.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.resources.ASGResource;
import java.util.List;
/**
* @author Tomasz Bak
*/
public interface PeerAwareInstanceRegistry extends InstanceRegistry {
void init(PeerEurekaNodes peerEurekaNodes) throws Exception;
/**
* Populates the registry information from a peer eureka node. This
* operation fails over to other nodes until the list is exhausted if the
* communication fails.
*/
int syncUp();
/**
* Checks to see if the registry access is allowed or the server is in a
* situation where it does not all getting registry information. The server
* does not return registry information for a period specified in
* {@link com.netflix.eureka.EurekaServerConfig#getWaitTimeInMsWhenSyncEmpty()}, if it cannot
* get the registry information from the peer eureka nodes at start up.
*
* @return false - if the instances count from a replica transfer returned
* zero and if the wait time has not elapsed, otherwise returns true
*/
boolean shouldAllowAccess(boolean remoteRegionRequired);
void register(InstanceInfo info, boolean isReplication);
void statusUpdate(final String asgName, final ASGResource.ASGStatus newStatus, final boolean isReplication);
}
| 6,959 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/Key.java | package com.netflix.eureka.registry;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.Version;
import javax.annotation.Nullable;
import java.util.Arrays;
public class Key {
public enum KeyType {
JSON, XML
}
/**
* An enum to define the entity that is stored in this cache for this key.
*/
public enum EntityType {
Application, VIP, SVIP
}
private final String entityName;
private final String[] regions;
private final KeyType requestType;
private final Version requestVersion;
private final String hashKey;
private final EntityType entityType;
private final EurekaAccept eurekaAccept;
public Key(EntityType entityType, String entityName, KeyType type, Version v, EurekaAccept eurekaAccept) {
this(entityType, entityName, type, v, eurekaAccept, null);
}
public Key(EntityType entityType, String entityName, KeyType type, Version v, EurekaAccept eurekaAccept, @Nullable String[] regions) {
this.regions = regions;
this.entityType = entityType;
this.entityName = entityName;
this.requestType = type;
this.requestVersion = v;
this.eurekaAccept = eurekaAccept;
hashKey = this.entityType + this.entityName + (null != this.regions ? Arrays.toString(this.regions) : "")
+ requestType.name() + requestVersion.name() + this.eurekaAccept.name();
}
public String getName() {
return entityName;
}
public String getHashKey() {
return hashKey;
}
public KeyType getType() {
return requestType;
}
public Version getVersion() {
return requestVersion;
}
public EurekaAccept getEurekaAccept() {
return eurekaAccept;
}
public EntityType getEntityType() {
return entityType;
}
public boolean hasRegions() {
return null != regions && regions.length != 0;
}
public String[] getRegions() {
return regions;
}
public Key cloneWithoutRegions() {
return new Key(entityType, entityName, requestType, requestVersion, eurekaAccept);
}
@Override
public int hashCode() {
String hashKey = getHashKey();
return hashKey.hashCode();
}
@Override
public boolean equals(Object other) {
if (other instanceof Key) {
return getHashKey().equals(((Key) other).getHashKey());
} else {
return false;
}
}
public String toStringCompact() {
StringBuilder sb = new StringBuilder();
sb.append("{name=").append(entityName).append(", type=").append(entityType).append(", format=").append(requestType);
if(regions != null) {
sb.append(", regions=").append(Arrays.toString(regions));
}
sb.append('}');
return sb.toString();
}
}
| 6,960 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/AwsInstanceRegistry.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.aws.AwsAsgUtil;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.registry.rule.AsgEnabledRule;
import com.netflix.eureka.registry.rule.DownOrStartingRule;
import com.netflix.eureka.registry.rule.FirstMatchWinsCompositeRule;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
import com.netflix.eureka.registry.rule.LeaseExistsRule;
import com.netflix.eureka.registry.rule.OverrideExistsRule;
import com.netflix.eureka.resources.ServerCodecs;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* Override some methods with aws specific use cases.
*
* @author David Liu
*/
@Singleton
public class AwsInstanceRegistry extends PeerAwareInstanceRegistryImpl {
private AwsAsgUtil awsAsgUtil;
private InstanceStatusOverrideRule instanceStatusOverrideRule;
@Inject
public AwsInstanceRegistry(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ServerCodecs serverCodecs,
EurekaClient eurekaClient) {
super(serverConfig, clientConfig, serverCodecs, eurekaClient);
}
@Override
public void init(PeerEurekaNodes peerEurekaNodes) throws Exception {
super.init(peerEurekaNodes);
this.awsAsgUtil = new AwsAsgUtil(serverConfig, clientConfig, this);
// We first check if the instance is STARTING or DOWN, then we check explicit overrides,
// then we see if our ASG is UP, then we check the status of a potentially existing lease.
this.instanceStatusOverrideRule = new FirstMatchWinsCompositeRule(new DownOrStartingRule(),
new OverrideExistsRule(overriddenInstanceStatusMap), new AsgEnabledRule(this.awsAsgUtil),
new LeaseExistsRule());
}
@Override
protected InstanceStatusOverrideRule getInstanceInfoOverrideRule() {
return this.instanceStatusOverrideRule;
}
public AwsAsgUtil getAwsAsgUtil() {
return awsAsgUtil;
}
}
| 6,961 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/AbstractInstanceRegistry.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import javax.annotation.Nullable;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.AbstractQueue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.cache.CacheBuilder;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.ActionType;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.shared.Pair;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.util.MeasuredRate;
import com.netflix.servo.annotations.DataSourceType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.util.EurekaMonitors.*;
/**
* Handles all registry requests from eureka clients.
*
* <p>
* Primary operations that are performed are the
* <em>Registers</em>, <em>Renewals</em>, <em>Cancels</em>, <em>Expirations</em>, and <em>Status Changes</em>. The
* registry also stores only the delta operations
* </p>
*
* @author Karthik Ranganathan
*
*/
public abstract class AbstractInstanceRegistry implements InstanceRegistry {
private static final Logger logger = LoggerFactory.getLogger(AbstractInstanceRegistry.class);
private static final String[] EMPTY_STR_ARRAY = new String[0];
private final ConcurrentHashMap<String, Map<String, Lease<InstanceInfo>>> registry
= new ConcurrentHashMap<String, Map<String, Lease<InstanceInfo>>>();
protected Map<String, RemoteRegionRegistry> regionNameVSRemoteRegistry = new HashMap<String, RemoteRegionRegistry>();
protected final ConcurrentMap<String, InstanceStatus> overriddenInstanceStatusMap = CacheBuilder
.newBuilder().initialCapacity(500)
.expireAfterAccess(1, TimeUnit.HOURS)
.<String, InstanceStatus>build().asMap();
// CircularQueues here for debugging/statistics purposes only
private final CircularQueue<Pair<Long, String>> recentRegisteredQueue;
private final CircularQueue<Pair<Long, String>> recentCanceledQueue;
private ConcurrentLinkedQueue<RecentlyChangedItem> recentlyChangedQueue = new ConcurrentLinkedQueue<>();
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final Lock read = readWriteLock.readLock();
private final Lock write = readWriteLock.writeLock();
protected final Object lock = new Object();
private Timer deltaRetentionTimer = new Timer("Eureka-DeltaRetentionTimer", true);
private Timer evictionTimer = new Timer("Eureka-EvictionTimer", true);
private final MeasuredRate renewsLastMin;
private final AtomicReference<EvictionTask> evictionTaskRef = new AtomicReference<>();
protected String[] allKnownRemoteRegions = EMPTY_STR_ARRAY;
protected volatile int numberOfRenewsPerMinThreshold;
protected volatile int expectedNumberOfClientsSendingRenews;
protected final EurekaServerConfig serverConfig;
protected final EurekaClientConfig clientConfig;
protected final ServerCodecs serverCodecs;
protected volatile ResponseCache responseCache;
/**
* Create a new, empty instance registry.
*/
protected AbstractInstanceRegistry(EurekaServerConfig serverConfig, EurekaClientConfig clientConfig, ServerCodecs serverCodecs) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.serverCodecs = serverCodecs;
this.recentCanceledQueue = new CircularQueue<Pair<Long, String>>(1000);
this.recentRegisteredQueue = new CircularQueue<Pair<Long, String>>(1000);
this.renewsLastMin = new MeasuredRate(1000 * 60 * 1);
this.deltaRetentionTimer.schedule(getDeltaRetentionTask(),
serverConfig.getDeltaRetentionTimerIntervalInMs(),
serverConfig.getDeltaRetentionTimerIntervalInMs());
}
@Override
public synchronized void initializedResponseCache() {
if (responseCache == null) {
responseCache = new ResponseCacheImpl(serverConfig, serverCodecs, this);
}
}
protected void initRemoteRegionRegistry() throws MalformedURLException {
Map<String, String> remoteRegionUrlsWithName = serverConfig.getRemoteRegionUrlsWithName();
if (!remoteRegionUrlsWithName.isEmpty()) {
allKnownRemoteRegions = new String[remoteRegionUrlsWithName.size()];
int remoteRegionArrayIndex = 0;
for (Map.Entry<String, String> remoteRegionUrlWithName : remoteRegionUrlsWithName.entrySet()) {
RemoteRegionRegistry remoteRegionRegistry = new RemoteRegionRegistry(
serverConfig,
clientConfig,
serverCodecs,
remoteRegionUrlWithName.getKey(),
new URL(remoteRegionUrlWithName.getValue()));
regionNameVSRemoteRegistry.put(remoteRegionUrlWithName.getKey(), remoteRegionRegistry);
allKnownRemoteRegions[remoteRegionArrayIndex++] = remoteRegionUrlWithName.getKey();
}
}
logger.info("Finished initializing remote region registries. All known remote regions: {}",
(Object) allKnownRemoteRegions);
}
@Override
public ResponseCache getResponseCache() {
return responseCache;
}
public long getLocalRegistrySize() {
long total = 0;
for (Map<String, Lease<InstanceInfo>> entry : registry.values()) {
total += entry.size();
}
return total;
}
/**
* Completely clear the registry.
*/
@Override
public void clearRegistry() {
overriddenInstanceStatusMap.clear();
recentCanceledQueue.clear();
recentRegisteredQueue.clear();
recentlyChangedQueue.clear();
registry.clear();
}
// for server info use
@Override
public Map<String, InstanceStatus> overriddenInstanceStatusesSnapshot() {
return new HashMap<>(overriddenInstanceStatusMap);
}
/**
* Registers a new instance with a given duration.
*
* @see com.netflix.eureka.lease.LeaseManager#register(java.lang.Object, int, boolean)
*/
public void register(InstanceInfo registrant, int leaseDuration, boolean isReplication) {
read.lock();
try {
Map<String, Lease<InstanceInfo>> gMap = registry.get(registrant.getAppName());
REGISTER.increment(isReplication);
if (gMap == null) {
final ConcurrentHashMap<String, Lease<InstanceInfo>> gNewMap = new ConcurrentHashMap<String, Lease<InstanceInfo>>();
gMap = registry.putIfAbsent(registrant.getAppName(), gNewMap);
if (gMap == null) {
gMap = gNewMap;
}
}
Lease<InstanceInfo> existingLease = gMap.get(registrant.getId());
// Retain the last dirty timestamp without overwriting it, if there is already a lease
if (existingLease != null && (existingLease.getHolder() != null)) {
Long existingLastDirtyTimestamp = existingLease.getHolder().getLastDirtyTimestamp();
Long registrationLastDirtyTimestamp = registrant.getLastDirtyTimestamp();
logger.debug("Existing lease found (existing={}, provided={}", existingLastDirtyTimestamp, registrationLastDirtyTimestamp);
// this is a > instead of a >= because if the timestamps are equal, we still take the remote transmitted
// InstanceInfo instead of the server local copy.
if (existingLastDirtyTimestamp > registrationLastDirtyTimestamp) {
logger.warn("There is an existing lease and the existing lease's dirty timestamp {} is greater" +
" than the one that is being registered {}", existingLastDirtyTimestamp, registrationLastDirtyTimestamp);
logger.warn("Using the existing instanceInfo instead of the new instanceInfo as the registrant");
registrant = existingLease.getHolder();
}
} else {
// The lease does not exist and hence it is a new registration
synchronized (lock) {
if (this.expectedNumberOfClientsSendingRenews > 0) {
// Since the client wants to register it, increase the number of clients sending renews
this.expectedNumberOfClientsSendingRenews = this.expectedNumberOfClientsSendingRenews + 1;
updateRenewsPerMinThreshold();
}
}
logger.debug("No previous lease information found; it is new registration");
}
Lease<InstanceInfo> lease = new Lease<>(registrant, leaseDuration);
if (existingLease != null) {
lease.setServiceUpTimestamp(existingLease.getServiceUpTimestamp());
}
gMap.put(registrant.getId(), lease);
recentRegisteredQueue.add(new Pair<Long, String>(
System.currentTimeMillis(),
registrant.getAppName() + "(" + registrant.getId() + ")"));
// This is where the initial state transfer of overridden status happens
if (!InstanceStatus.UNKNOWN.equals(registrant.getOverriddenStatus())) {
logger.debug("Found overridden status {} for instance {}. Checking to see if needs to be add to the "
+ "overrides", registrant.getOverriddenStatus(), registrant.getId());
if (!overriddenInstanceStatusMap.containsKey(registrant.getId())) {
logger.info("Not found overridden id {} and hence adding it", registrant.getId());
overriddenInstanceStatusMap.put(registrant.getId(), registrant.getOverriddenStatus());
}
}
InstanceStatus overriddenStatusFromMap = overriddenInstanceStatusMap.get(registrant.getId());
if (overriddenStatusFromMap != null) {
logger.info("Storing overridden status {} from map", overriddenStatusFromMap);
registrant.setOverriddenStatus(overriddenStatusFromMap);
}
// Set the status based on the overridden status rules
InstanceStatus overriddenInstanceStatus = getOverriddenInstanceStatus(registrant, existingLease, isReplication);
registrant.setStatusWithoutDirty(overriddenInstanceStatus);
// If the lease is registered with UP status, set lease service up timestamp
if (InstanceStatus.UP.equals(registrant.getStatus())) {
lease.serviceUp();
}
registrant.setActionType(ActionType.ADDED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
registrant.setLastUpdatedTimestamp();
invalidateCache(registrant.getAppName(), registrant.getVIPAddress(), registrant.getSecureVipAddress());
logger.info("Registered instance {}/{} with status {} (replication={})",
registrant.getAppName(), registrant.getId(), registrant.getStatus(), isReplication);
} finally {
read.unlock();
}
}
/**
* Cancels the registration of an instance.
*
* <p>
* This is normally invoked by a client when it shuts down informing the
* server to remove the instance from traffic.
* </p>
*
* @param appName the application name of the application.
* @param id the unique identifier of the instance.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the instance was removed from the {@link AbstractInstanceRegistry} successfully, false otherwise.
*/
@Override
public boolean cancel(String appName, String id, boolean isReplication) {
return internalCancel(appName, id, isReplication);
}
/**
* {@link #cancel(String, String, boolean)} method is overridden by {@link PeerAwareInstanceRegistry}, so each
* cancel request is replicated to the peers. This is however not desired for expires which would be counted
* in the remote peers as valid cancellations, so self preservation mode would not kick-in.
*/
protected boolean internalCancel(String appName, String id, boolean isReplication) {
read.lock();
try {
CANCEL.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> leaseToCancel = null;
if (gMap != null) {
leaseToCancel = gMap.remove(id);
}
recentCanceledQueue.add(new Pair<Long, String>(System.currentTimeMillis(), appName + "(" + id + ")"));
InstanceStatus instanceStatus = overriddenInstanceStatusMap.remove(id);
if (instanceStatus != null) {
logger.debug("Removed instance id {} from the overridden map which has value {}", id, instanceStatus.name());
}
if (leaseToCancel == null) {
CANCEL_NOT_FOUND.increment(isReplication);
logger.warn("DS: Registry: cancel failed because Lease is not registered for: {}/{}", appName, id);
return false;
} else {
leaseToCancel.cancel();
InstanceInfo instanceInfo = leaseToCancel.getHolder();
String vip = null;
String svip = null;
if (instanceInfo != null) {
instanceInfo.setActionType(ActionType.DELETED);
recentlyChangedQueue.add(new RecentlyChangedItem(leaseToCancel));
instanceInfo.setLastUpdatedTimestamp();
vip = instanceInfo.getVIPAddress();
svip = instanceInfo.getSecureVipAddress();
}
invalidateCache(appName, vip, svip);
logger.info("Cancelled instance {}/{} (replication={})", appName, id, isReplication);
}
} finally {
read.unlock();
}
synchronized (lock) {
if (this.expectedNumberOfClientsSendingRenews > 0) {
// Since the client wants to cancel it, reduce the number of clients to send renews.
this.expectedNumberOfClientsSendingRenews = this.expectedNumberOfClientsSendingRenews - 1;
updateRenewsPerMinThreshold();
}
}
return true;
}
/**
* Marks the given instance of the given app name as renewed, and also marks whether it originated from
* replication.
*
* @see com.netflix.eureka.lease.LeaseManager#renew(java.lang.String, java.lang.String, boolean)
*/
public boolean renew(String appName, String id, boolean isReplication) {
RENEW.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> leaseToRenew = null;
if (gMap != null) {
leaseToRenew = gMap.get(id);
}
if (leaseToRenew == null) {
RENEW_NOT_FOUND.increment(isReplication);
logger.warn("DS: Registry: lease doesn't exist, registering resource: {} - {}", appName, id);
return false;
} else {
InstanceInfo instanceInfo = leaseToRenew.getHolder();
if (instanceInfo != null) {
// touchASGCache(instanceInfo.getASGName());
InstanceStatus overriddenInstanceStatus = this.getOverriddenInstanceStatus(
instanceInfo, leaseToRenew, isReplication);
if (overriddenInstanceStatus == InstanceStatus.UNKNOWN) {
logger.info("Instance status UNKNOWN possibly due to deleted override for instance {}"
+ "; re-register required", instanceInfo.getId());
RENEW_NOT_FOUND.increment(isReplication);
return false;
}
if (!instanceInfo.getStatus().equals(overriddenInstanceStatus)) {
logger.info(
"The instance status {} is different from overridden instance status {} for instance {}. "
+ "Hence setting the status to overridden status", instanceInfo.getStatus().name(),
overriddenInstanceStatus.name(),
instanceInfo.getId());
instanceInfo.setStatusWithoutDirty(overriddenInstanceStatus);
}
}
renewsLastMin.increment();
leaseToRenew.renew();
return true;
}
}
/**
* @deprecated this is expensive, try not to use. See if you can use
* {@link #storeOverriddenStatusIfRequired(String, String, InstanceStatus)} instead.
*
* Stores overridden status if it is not already there. This happens during
* a reconciliation process during renewal requests.
*
* @param id the unique identifier of the instance.
* @param overriddenStatus Overridden status if any.
*/
@Deprecated
@Override
public void storeOverriddenStatusIfRequired(String id, InstanceStatus overriddenStatus) {
InstanceStatus instanceStatus = overriddenInstanceStatusMap.get(id);
if ((instanceStatus == null)
|| (!overriddenStatus.equals(instanceStatus))) {
// We might not have the overridden status if the server got restarted -this will help us maintain
// the overridden state from the replica
logger.info(
"Adding overridden status for instance id {} and the value is {}",
id, overriddenStatus.name());
overriddenInstanceStatusMap.put(id, overriddenStatus);
List<InstanceInfo> instanceInfo = this.getInstancesById(id, false);
if ((instanceInfo != null) && (!instanceInfo.isEmpty())) {
instanceInfo.iterator().next().setOverriddenStatus(overriddenStatus);
logger.info(
"Setting the overridden status for instance id {} and the value is {} ",
id, overriddenStatus.name());
}
}
}
/**
* Stores overridden status if it is not already there. This happens during
* a reconciliation process during renewal requests.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param overriddenStatus overridden status if any.
*/
@Override
public void storeOverriddenStatusIfRequired(String appName, String id, InstanceStatus overriddenStatus) {
InstanceStatus instanceStatus = overriddenInstanceStatusMap.get(id);
if ((instanceStatus == null) || (!overriddenStatus.equals(instanceStatus))) {
// We might not have the overridden status if the server got
// restarted -this will help us maintain the overridden state
// from the replica
logger.info("Adding overridden status for instance id {} and the value is {}",
id, overriddenStatus.name());
overriddenInstanceStatusMap.put(id, overriddenStatus);
InstanceInfo instanceInfo = this.getInstanceByAppAndId(appName, id, false);
instanceInfo.setOverriddenStatus(overriddenStatus);
logger.info("Set the overridden status for instance (appname:{}, id:{}} and the value is {} ",
appName, id, overriddenStatus.name());
}
}
/**
* Updates the status of an instance. Normally happens to put an instance
* between {@link InstanceStatus#OUT_OF_SERVICE} and
* {@link InstanceStatus#UP} to put the instance in and out of traffic.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param newStatus the new {@link InstanceStatus}.
* @param lastDirtyTimestamp last timestamp when this instance information was updated.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the status was successfully updated, false otherwise.
*/
@Override
public boolean statusUpdate(String appName, String id,
InstanceStatus newStatus, String lastDirtyTimestamp,
boolean isReplication) {
read.lock();
try {
STATUS_UPDATE.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (gMap != null) {
lease = gMap.get(id);
}
if (lease == null) {
return false;
} else {
lease.renew();
InstanceInfo info = lease.getHolder();
// Lease is always created with its instance info object.
// This log statement is provided as a safeguard, in case this invariant is violated.
if (info == null) {
logger.error("Found Lease without a holder for instance id {}", id);
}
if ((info != null) && !(info.getStatus().equals(newStatus))) {
// Mark service as UP if needed
if (InstanceStatus.UP.equals(newStatus)) {
lease.serviceUp();
}
// This is NAC overridden status
overriddenInstanceStatusMap.put(id, newStatus);
// Set it for transfer of overridden status to replica on
// replica start up
info.setOverriddenStatus(newStatus);
long replicaDirtyTimestamp = 0;
info.setStatusWithoutDirty(newStatus);
if (lastDirtyTimestamp != null) {
replicaDirtyTimestamp = Long.parseLong(lastDirtyTimestamp);
}
// If the replication's dirty timestamp is more than the existing one, just update
// it to the replica's.
if (replicaDirtyTimestamp > info.getLastDirtyTimestamp()) {
info.setLastDirtyTimestamp(replicaDirtyTimestamp);
}
info.setActionType(ActionType.MODIFIED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
info.setLastUpdatedTimestamp();
invalidateCache(appName, info.getVIPAddress(), info.getSecureVipAddress());
}
return true;
}
} finally {
read.unlock();
}
}
/**
* Removes status override for a give instance.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param newStatus the new {@link InstanceStatus}.
* @param lastDirtyTimestamp last timestamp when this instance information was updated.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the status was successfully updated, false otherwise.
*/
@Override
public boolean deleteStatusOverride(String appName, String id,
InstanceStatus newStatus,
String lastDirtyTimestamp,
boolean isReplication) {
read.lock();
try {
STATUS_OVERRIDE_DELETE.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (gMap != null) {
lease = gMap.get(id);
}
if (lease == null) {
return false;
} else {
lease.renew();
InstanceInfo info = lease.getHolder();
// Lease is always created with its instance info object.
// This log statement is provided as a safeguard, in case this invariant is violated.
if (info == null) {
logger.error("Found Lease without a holder for instance id {}", id);
}
InstanceStatus currentOverride = overriddenInstanceStatusMap.remove(id);
if (currentOverride != null && info != null) {
info.setOverriddenStatus(InstanceStatus.UNKNOWN);
info.setStatusWithoutDirty(newStatus);
long replicaDirtyTimestamp = 0;
if (lastDirtyTimestamp != null) {
replicaDirtyTimestamp = Long.parseLong(lastDirtyTimestamp);
}
// If the replication's dirty timestamp is more than the existing one, just update
// it to the replica's.
if (replicaDirtyTimestamp > info.getLastDirtyTimestamp()) {
info.setLastDirtyTimestamp(replicaDirtyTimestamp);
}
info.setActionType(ActionType.MODIFIED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
info.setLastUpdatedTimestamp();
invalidateCache(appName, info.getVIPAddress(), info.getSecureVipAddress());
}
return true;
}
} finally {
read.unlock();
}
}
/**
* Evicts everything in the instance registry that has expired, if expiry is enabled.
*
* @see com.netflix.eureka.lease.LeaseManager#evict()
*/
@Override
public void evict() {
evict(0l);
}
public void evict(long additionalLeaseMs) {
logger.debug("Running the evict task");
if (!isLeaseExpirationEnabled()) {
logger.debug("DS: lease expiration is currently disabled.");
return;
}
// We collect first all expired items, to evict them in random order. For large eviction sets,
// if we do not that, we might wipe out whole apps before self preservation kicks in. By randomizing it,
// the impact should be evenly distributed across all applications.
List<Lease<InstanceInfo>> expiredLeases = new ArrayList<>();
for (Entry<String, Map<String, Lease<InstanceInfo>>> groupEntry : registry.entrySet()) {
Map<String, Lease<InstanceInfo>> leaseMap = groupEntry.getValue();
if (leaseMap != null) {
for (Entry<String, Lease<InstanceInfo>> leaseEntry : leaseMap.entrySet()) {
Lease<InstanceInfo> lease = leaseEntry.getValue();
if (lease.isExpired(additionalLeaseMs) && lease.getHolder() != null) {
expiredLeases.add(lease);
}
}
}
}
// To compensate for GC pauses or drifting local time, we need to use current registry size as a base for
// triggering self-preservation. Without that we would wipe out full registry.
int registrySize = (int) getLocalRegistrySize();
int registrySizeThreshold = (int) (registrySize * serverConfig.getRenewalPercentThreshold());
int evictionLimit = registrySize - registrySizeThreshold;
int toEvict = Math.min(expiredLeases.size(), evictionLimit);
if (toEvict > 0) {
logger.info("Evicting {} items (expired={}, evictionLimit={})", toEvict, expiredLeases.size(), evictionLimit);
Random random = new Random(System.currentTimeMillis());
for (int i = 0; i < toEvict; i++) {
// Pick a random item (Knuth shuffle algorithm)
int next = i + random.nextInt(expiredLeases.size() - i);
Collections.swap(expiredLeases, i, next);
Lease<InstanceInfo> lease = expiredLeases.get(i);
String appName = lease.getHolder().getAppName();
String id = lease.getHolder().getId();
EXPIRED.increment();
logger.warn("DS: Registry: expired lease for {}/{}", appName, id);
internalCancel(appName, id, false);
}
}
}
/**
* Returns the given app that is in this instance only, falling back to other regions transparently only
* if specified in this client configuration.
*
* @param appName the application name of the application
* @return the application
*
* @see com.netflix.discovery.shared.LookupService#getApplication(java.lang.String)
*/
@Override
public Application getApplication(String appName) {
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
return this.getApplication(appName, !disableTransparentFallback);
}
/**
* Get application information.
*
* @param appName The name of the application
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the application
*/
@Override
public Application getApplication(String appName, boolean includeRemoteRegion) {
Application app = null;
Map<String, Lease<InstanceInfo>> leaseMap = registry.get(appName);
if (leaseMap != null && leaseMap.size() > 0) {
for (Entry<String, Lease<InstanceInfo>> entry : leaseMap.entrySet()) {
if (app == null) {
app = new Application(appName);
}
app.addInstance(decorateInstanceInfo(entry.getValue()));
}
} else if (includeRemoteRegion) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Application application = remoteRegistry.getApplication(appName);
if (application != null) {
return application;
}
}
}
return app;
}
/**
* Get all applications in this instance registry, falling back to other regions if allowed in the Eureka config.
*
* @return the list of all known applications
*
* @see com.netflix.discovery.shared.LookupService#getApplications()
*/
public Applications getApplications() {
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
if (disableTransparentFallback) {
return getApplicationsFromLocalRegionOnly();
} else {
return getApplicationsFromAllRemoteRegions(); // Behavior of falling back to remote region can be disabled.
}
}
/**
* Returns applications including instances from all remote regions. <br/>
* Same as calling {@link #getApplicationsFromMultipleRegions(String[])} with a <code>null</code> argument.
*/
public Applications getApplicationsFromAllRemoteRegions() {
return getApplicationsFromMultipleRegions(allKnownRemoteRegions);
}
/**
* Returns applications including instances from local region only. <br/>
* Same as calling {@link #getApplicationsFromMultipleRegions(String[])} with an empty array.
*/
@Override
public Applications getApplicationsFromLocalRegionOnly() {
return getApplicationsFromMultipleRegions(EMPTY_STR_ARRAY);
}
/**
* This method will return applications with instances from all passed remote regions as well as the current region.
* Thus, this gives a union view of instances from multiple regions. <br/>
* The application instances for which this union will be done can be restricted to the names returned by
* {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} for every region. In case, there is no whitelist
* defined for a region, this method will also look for a global whitelist by passing <code>null</code> to the
* method {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} <br/>
* If you are not selectively requesting for a remote region, use {@link #getApplicationsFromAllRemoteRegions()}
* or {@link #getApplicationsFromLocalRegionOnly()}
*
* @param remoteRegions The remote regions for which the instances are to be queried. The instances may be limited
* by a whitelist as explained above. If <code>null</code> or empty no remote regions are
* included.
*
* @return The applications with instances from the passed remote regions as well as local region. The instances
* from remote regions can be only for certain whitelisted apps as explained above.
*/
public Applications getApplicationsFromMultipleRegions(String[] remoteRegions) {
boolean includeRemoteRegion = null != remoteRegions && remoteRegions.length != 0;
logger.debug("Fetching applications registry with remote regions: {}, Regions argument {}",
includeRemoteRegion, remoteRegions);
if (includeRemoteRegion) {
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS.increment();
} else {
GET_ALL_CACHE_MISS.increment();
}
Applications apps = new Applications();
apps.setVersion(1L);
for (Entry<String, Map<String, Lease<InstanceInfo>>> entry : registry.entrySet()) {
Application app = null;
if (entry.getValue() != null) {
for (Entry<String, Lease<InstanceInfo>> stringLeaseEntry : entry.getValue().entrySet()) {
Lease<InstanceInfo> lease = stringLeaseEntry.getValue();
if (app == null) {
app = new Application(lease.getHolder().getAppName());
}
app.addInstance(decorateInstanceInfo(lease));
}
}
if (app != null) {
apps.addApplication(app);
}
}
if (includeRemoteRegion) {
for (String remoteRegion : remoteRegions) {
RemoteRegionRegistry remoteRegistry = regionNameVSRemoteRegistry.get(remoteRegion);
if (null != remoteRegistry) {
Applications remoteApps = remoteRegistry.getApplications();
for (Application application : remoteApps.getRegisteredApplications()) {
if (shouldFetchFromRemoteRegistry(application.getName(), remoteRegion)) {
logger.info("Application {} fetched from the remote region {}",
application.getName(), remoteRegion);
Application appInstanceTillNow = apps.getRegisteredApplications(application.getName());
if (appInstanceTillNow == null) {
appInstanceTillNow = new Application(application.getName());
apps.addApplication(appInstanceTillNow);
}
for (InstanceInfo instanceInfo : application.getInstances()) {
appInstanceTillNow.addInstance(instanceInfo);
}
} else {
logger.debug("Application {} not fetched from the remote region {} as there exists a "
+ "whitelist and this app is not in the whitelist.",
application.getName(), remoteRegion);
}
}
} else {
logger.warn("No remote registry available for the remote region {}", remoteRegion);
}
}
}
apps.setAppsHashCode(apps.getReconcileHashCode());
return apps;
}
private boolean shouldFetchFromRemoteRegistry(String appName, String remoteRegion) {
Set<String> whiteList = serverConfig.getRemoteRegionAppWhitelist(remoteRegion);
if (null == whiteList) {
whiteList = serverConfig.getRemoteRegionAppWhitelist(null); // see global whitelist.
}
return null == whiteList || whiteList.contains(appName);
}
/**
* Get the registry information about all {@link Applications}.
*
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return applications
*
* @deprecated Use {@link #getApplicationsFromMultipleRegions(String[])} instead. This method has a flawed behavior
* of transparently falling back to a remote region if no instances for an app is available locally. The new
* behavior is to explicitly specify if you need a remote region.
*/
@Deprecated
public Applications getApplications(boolean includeRemoteRegion) {
GET_ALL_CACHE_MISS.increment();
Applications apps = new Applications();
apps.setVersion(1L);
for (Entry<String, Map<String, Lease<InstanceInfo>>> entry : registry.entrySet()) {
Application app = null;
if (entry.getValue() != null) {
for (Entry<String, Lease<InstanceInfo>> stringLeaseEntry : entry.getValue().entrySet()) {
Lease<InstanceInfo> lease = stringLeaseEntry.getValue();
if (app == null) {
app = new Application(lease.getHolder().getAppName());
}
app.addInstance(decorateInstanceInfo(lease));
}
}
if (app != null) {
apps.addApplication(app);
}
}
if (includeRemoteRegion) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Applications applications = remoteRegistry.getApplications();
for (Application application : applications
.getRegisteredApplications()) {
Application appInLocalRegistry = apps
.getRegisteredApplications(application.getName());
if (appInLocalRegistry == null) {
apps.addApplication(application);
}
}
}
}
apps.setAppsHashCode(apps.getReconcileHashCode());
return apps;
}
/**
* Get the registry information about the delta changes. The deltas are
* cached for a window specified by
* {@link EurekaServerConfig#getRetentionTimeInMSInDeltaQueue()}. Subsequent
* requests for delta information may return the same information and client
* must make sure this does not adversely affect them.
*
* @return all application deltas.
* @deprecated use {@link #getApplicationDeltasFromMultipleRegions(String[])} instead. This method has a
* flawed behavior of transparently falling back to a remote region if no instances for an app is available locally.
* The new behavior is to explicitly specify if you need a remote region.
*/
@Deprecated
public Applications getApplicationDeltas() {
GET_ALL_CACHE_MISS_DELTA.increment();
Applications apps = new Applications();
apps.setVersion(responseCache.getVersionDelta().get());
Map<String, Application> applicationInstancesMap = new HashMap<String, Application>();
write.lock();
try {
Iterator<RecentlyChangedItem> iter = this.recentlyChangedQueue.iterator();
logger.debug("The number of elements in the delta queue is : {}",
this.recentlyChangedQueue.size());
while (iter.hasNext()) {
Lease<InstanceInfo> lease = iter.next().getLeaseInfo();
InstanceInfo instanceInfo = lease.getHolder();
logger.debug(
"The instance id {} is found with status {} and actiontype {}",
instanceInfo.getId(), instanceInfo.getStatus().name(), instanceInfo.getActionType().name());
Application app = applicationInstancesMap.get(instanceInfo
.getAppName());
if (app == null) {
app = new Application(instanceInfo.getAppName());
applicationInstancesMap.put(instanceInfo.getAppName(), app);
apps.addApplication(app);
}
app.addInstance(new InstanceInfo(decorateInstanceInfo(lease)));
}
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
if (!disableTransparentFallback) {
Applications allAppsInLocalRegion = getApplications(false);
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Applications applications = remoteRegistry.getApplicationDeltas();
for (Application application : applications.getRegisteredApplications()) {
Application appInLocalRegistry =
allAppsInLocalRegion.getRegisteredApplications(application.getName());
if (appInLocalRegistry == null) {
apps.addApplication(application);
}
}
}
}
Applications allApps = getApplications(!disableTransparentFallback);
apps.setAppsHashCode(allApps.getReconcileHashCode());
return apps;
} finally {
write.unlock();
}
}
/**
* Gets the application delta also including instances from the passed remote regions, with the instances from the
* local region. <br/>
*
* The remote regions from where the instances will be chosen can further be restricted if this application does not
* appear in the whitelist specified for the region as returned by
* {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} for a region. In case, there is no whitelist
* defined for a region, this method will also look for a global whitelist by passing <code>null</code> to the
* method {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} <br/>
*
* @param remoteRegions The remote regions for which the instances are to be queried. The instances may be limited
* by a whitelist as explained above. If <code>null</code> all remote regions are included.
* If empty list then no remote region is included.
*
* @return The delta with instances from the passed remote regions as well as local region. The instances
* from remote regions can be further be restricted as explained above. <code>null</code> if the application does
* not exist locally or in remote regions.
*/
public Applications getApplicationDeltasFromMultipleRegions(String[] remoteRegions) {
if (null == remoteRegions) {
remoteRegions = allKnownRemoteRegions; // null means all remote regions.
}
boolean includeRemoteRegion = remoteRegions.length != 0;
if (includeRemoteRegion) {
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS_DELTA.increment();
} else {
GET_ALL_CACHE_MISS_DELTA.increment();
}
Applications apps = new Applications();
apps.setVersion(responseCache.getVersionDeltaWithRegions().get());
Map<String, Application> applicationInstancesMap = new HashMap<String, Application>();
write.lock();
try {
Iterator<RecentlyChangedItem> iter = this.recentlyChangedQueue.iterator();
logger.debug("The number of elements in the delta queue is :{}", this.recentlyChangedQueue.size());
while (iter.hasNext()) {
Lease<InstanceInfo> lease = iter.next().getLeaseInfo();
InstanceInfo instanceInfo = lease.getHolder();
logger.debug("The instance id {} is found with status {} and actiontype {}",
instanceInfo.getId(), instanceInfo.getStatus().name(), instanceInfo.getActionType().name());
Application app = applicationInstancesMap.get(instanceInfo.getAppName());
if (app == null) {
app = new Application(instanceInfo.getAppName());
applicationInstancesMap.put(instanceInfo.getAppName(), app);
apps.addApplication(app);
}
app.addInstance(new InstanceInfo(decorateInstanceInfo(lease)));
}
if (includeRemoteRegion) {
for (String remoteRegion : remoteRegions) {
RemoteRegionRegistry remoteRegistry = regionNameVSRemoteRegistry.get(remoteRegion);
if (null != remoteRegistry) {
Applications remoteAppsDelta = remoteRegistry.getApplicationDeltas();
if (null != remoteAppsDelta) {
for (Application application : remoteAppsDelta.getRegisteredApplications()) {
if (shouldFetchFromRemoteRegistry(application.getName(), remoteRegion)) {
Application appInstanceTillNow =
apps.getRegisteredApplications(application.getName());
if (appInstanceTillNow == null) {
appInstanceTillNow = new Application(application.getName());
apps.addApplication(appInstanceTillNow);
}
for (InstanceInfo instanceInfo : application.getInstances()) {
appInstanceTillNow.addInstance(new InstanceInfo(instanceInfo));
}
}
}
}
}
}
}
Applications allApps = getApplicationsFromMultipleRegions(remoteRegions);
apps.setAppsHashCode(allApps.getReconcileHashCode());
return apps;
} finally {
write.unlock();
}
}
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @return the information about the instance.
*/
@Override
public InstanceInfo getInstanceByAppAndId(String appName, String id) {
return this.getInstanceByAppAndId(appName, id, true);
}
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the information about the instance.
*/
@Override
public InstanceInfo getInstanceByAppAndId(String appName, String id, boolean includeRemoteRegions) {
Map<String, Lease<InstanceInfo>> leaseMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (leaseMap != null) {
lease = leaseMap.get(id);
}
if (lease != null
&& (!isLeaseExpirationEnabled() || !lease.isExpired())) {
return decorateInstanceInfo(lease);
} else if (includeRemoteRegions) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Application application = remoteRegistry.getApplication(appName);
if (application != null) {
return application.getByInstanceId(id);
}
}
}
return null;
}
/**
* @deprecated Try {@link #getInstanceByAppAndId(String, String)} instead.
*
* Get all instances by ID, including automatically asking other regions if the ID is unknown.
*
* @see com.netflix.discovery.shared.LookupService#getInstancesById(String)
*/
@Deprecated
public List<InstanceInfo> getInstancesById(String id) {
return this.getInstancesById(id, true);
}
/**
* @deprecated Try {@link #getInstanceByAppAndId(String, String, boolean)} instead.
*
* Get the list of instances by its unique id.
*
* @param id the unique id of the instance
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return list of InstanceInfo objects.
*/
@Deprecated
public List<InstanceInfo> getInstancesById(String id, boolean includeRemoteRegions) {
List<InstanceInfo> list = new ArrayList<>();
for (Iterator<Entry<String, Map<String, Lease<InstanceInfo>>>> iter =
registry.entrySet().iterator(); iter.hasNext(); ) {
Map<String, Lease<InstanceInfo>> leaseMap = iter.next().getValue();
if (leaseMap != null) {
Lease<InstanceInfo> lease = leaseMap.get(id);
if (lease == null || (isLeaseExpirationEnabled() && lease.isExpired())) {
continue;
}
if (list == Collections.EMPTY_LIST) {
list = new ArrayList<>();
}
list.add(decorateInstanceInfo(lease));
}
}
if (list.isEmpty() && includeRemoteRegions) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
for (Application application : remoteRegistry.getApplications()
.getRegisteredApplications()) {
InstanceInfo instanceInfo = application.getByInstanceId(id);
if (instanceInfo != null) {
list.add(instanceInfo);
return list;
}
}
}
}
return list;
}
private InstanceInfo decorateInstanceInfo(Lease<InstanceInfo> lease) {
InstanceInfo info = lease.getHolder();
// client app settings
int renewalInterval = LeaseInfo.DEFAULT_LEASE_RENEWAL_INTERVAL;
int leaseDuration = LeaseInfo.DEFAULT_LEASE_DURATION;
// TODO: clean this up
if (info.getLeaseInfo() != null) {
renewalInterval = info.getLeaseInfo().getRenewalIntervalInSecs();
leaseDuration = info.getLeaseInfo().getDurationInSecs();
}
info.setLeaseInfo(LeaseInfo.Builder.newBuilder()
.setRegistrationTimestamp(lease.getRegistrationTimestamp())
.setRenewalTimestamp(lease.getLastRenewalTimestamp())
.setServiceUpTimestamp(lease.getServiceUpTimestamp())
.setRenewalIntervalInSecs(renewalInterval)
.setDurationInSecs(leaseDuration)
.setEvictionTimestamp(lease.getEvictionTimestamp()).build());
info.setIsCoordinatingDiscoveryServer();
return info;
}
/**
* Servo route; do not call.
*
* @return servo data
*/
@com.netflix.servo.annotations.Monitor(name = "numOfRenewsInLastMin",
description = "Number of total heartbeats received in the last minute", type = DataSourceType.GAUGE)
@Override
public long getNumOfRenewsInLastMin() {
return renewsLastMin.getCount();
}
/**
* Gets the threshold for the renewals per minute.
*
* @return the integer representing the threshold for the renewals per
* minute.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfRenewsPerMinThreshold", type = DataSourceType.GAUGE)
@Override
public int getNumOfRenewsPerMinThreshold() {
return numberOfRenewsPerMinThreshold;
}
/**
* Get the N instances that are most recently registered.
*
* @return
*/
@Override
public List<Pair<Long, String>> getLastNRegisteredInstances() {
List<Pair<Long, String>> list = new ArrayList<>(recentRegisteredQueue);
Collections.reverse(list);
return list;
}
/**
* Get the N instances that have most recently canceled.
*
* @return
*/
@Override
public List<Pair<Long, String>> getLastNCanceledInstances() {
List<Pair<Long, String>> list = new ArrayList<>(recentCanceledQueue);
Collections.reverse(list);
return list;
}
private void invalidateCache(String appName, @Nullable String vipAddress, @Nullable String secureVipAddress) {
// invalidate cache
responseCache.invalidate(appName, vipAddress, secureVipAddress);
}
protected void updateRenewsPerMinThreshold() {
this.numberOfRenewsPerMinThreshold = (int) (this.expectedNumberOfClientsSendingRenews
* (60.0 / serverConfig.getExpectedClientRenewalIntervalSeconds())
* serverConfig.getRenewalPercentThreshold());
}
private static final class RecentlyChangedItem {
private long lastUpdateTime;
private Lease<InstanceInfo> leaseInfo;
public RecentlyChangedItem(Lease<InstanceInfo> lease) {
this.leaseInfo = lease;
lastUpdateTime = System.currentTimeMillis();
}
public long getLastUpdateTime() {
return this.lastUpdateTime;
}
public Lease<InstanceInfo> getLeaseInfo() {
return this.leaseInfo;
}
}
protected void postInit() {
renewsLastMin.start();
if (evictionTaskRef.get() != null) {
evictionTaskRef.get().cancel();
}
evictionTaskRef.set(new EvictionTask());
evictionTimer.schedule(evictionTaskRef.get(),
serverConfig.getEvictionIntervalTimerInMs(),
serverConfig.getEvictionIntervalTimerInMs());
}
/**
* Perform all cleanup and shutdown operations.
*/
@Override
public void shutdown() {
deltaRetentionTimer.cancel();
evictionTimer.cancel();
renewsLastMin.stop();
responseCache.stop();
}
@com.netflix.servo.annotations.Monitor(name = "numOfElementsinInstanceCache", description = "Number of overrides in the instance Cache", type = DataSourceType.GAUGE)
public long getNumberofElementsininstanceCache() {
return overriddenInstanceStatusMap.size();
}
/* visible for testing */ class EvictionTask extends TimerTask {
private final AtomicLong lastExecutionNanosRef = new AtomicLong(0l);
@Override
public void run() {
try {
long compensationTimeMs = getCompensationTimeMs();
logger.info("Running the evict task with compensationTime {}ms", compensationTimeMs);
evict(compensationTimeMs);
} catch (Throwable e) {
logger.error("Could not run the evict task", e);
}
}
/**
* compute a compensation time defined as the actual time this task was executed since the prev iteration,
* vs the configured amount of time for execution. This is useful for cases where changes in time (due to
* clock skew or gc for example) causes the actual eviction task to execute later than the desired time
* according to the configured cycle.
*/
long getCompensationTimeMs() {
long currNanos = getCurrentTimeNano();
long lastNanos = lastExecutionNanosRef.getAndSet(currNanos);
if (lastNanos == 0l) {
return 0l;
}
long elapsedMs = TimeUnit.NANOSECONDS.toMillis(currNanos - lastNanos);
long compensationTime = elapsedMs - serverConfig.getEvictionIntervalTimerInMs();
return compensationTime <= 0l ? 0l : compensationTime;
}
long getCurrentTimeNano() { // for testing
return System.nanoTime();
}
}
/* visible for testing */ static class CircularQueue<E> extends AbstractQueue<E> {
private final ArrayBlockingQueue<E> delegate;
private final int capacity;
public CircularQueue(int capacity) {
this.capacity = capacity;
this.delegate = new ArrayBlockingQueue<>(capacity);
}
@Override
public Iterator<E> iterator() {
return delegate.iterator();
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean offer(E e) {
while (!delegate.offer(e)) {
delegate.poll();
}
return true;
}
@Override
public E poll() {
return delegate.poll();
}
@Override
public E peek() {
return delegate.peek();
}
@Override
public void clear() {
delegate.clear();
}
@Override
public Object[] toArray() {
return delegate.toArray();
}
}
/**
* @return The rule that will process the instance status override.
*/
protected abstract InstanceStatusOverrideRule getInstanceInfoOverrideRule();
protected InstanceInfo.InstanceStatus getOverriddenInstanceStatus(InstanceInfo r,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
InstanceStatusOverrideRule rule = getInstanceInfoOverrideRule();
logger.debug("Processing override status using rule: {}", rule);
return rule.apply(r, existingLease, isReplication).status();
}
private TimerTask getDeltaRetentionTask() {
return new TimerTask() {
@Override
public void run() {
Iterator<RecentlyChangedItem> it = recentlyChangedQueue.iterator();
while (it.hasNext()) {
if (it.next().getLastUpdateTime() <
System.currentTimeMillis() - serverConfig.getRetentionTimeInMSInDeltaQueue()) {
it.remove();
} else {
break;
}
}
}
};
}
}
| 6,962 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/InstanceRegistry.java | package com.netflix.eureka.registry;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.shared.LookupService;
import com.netflix.discovery.shared.Pair;
import com.netflix.eureka.lease.LeaseManager;
import java.util.List;
import java.util.Map;
/**
* @author Tomasz Bak
*/
public interface InstanceRegistry extends LeaseManager<InstanceInfo>, LookupService<String> {
void openForTraffic(ApplicationInfoManager applicationInfoManager, int count);
void shutdown();
@Deprecated
void storeOverriddenStatusIfRequired(String id, InstanceStatus overriddenStatus);
void storeOverriddenStatusIfRequired(String appName, String id, InstanceStatus overriddenStatus);
boolean statusUpdate(String appName, String id, InstanceStatus newStatus,
String lastDirtyTimestamp, boolean isReplication);
boolean deleteStatusOverride(String appName, String id, InstanceStatus newStatus,
String lastDirtyTimestamp, boolean isReplication);
Map<String, InstanceStatus> overriddenInstanceStatusesSnapshot();
Applications getApplicationsFromLocalRegionOnly();
List<Application> getSortedApplications();
/**
* Get application information.
*
* @param appName The name of the application
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link java.net.URL} by this property
* {@link com.netflix.eureka.EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the application
*/
Application getApplication(String appName, boolean includeRemoteRegion);
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @return the information about the instance.
*/
InstanceInfo getInstanceByAppAndId(String appName, String id);
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link java.net.URL} by this property
* {@link com.netflix.eureka.EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the information about the instance.
*/
InstanceInfo getInstanceByAppAndId(String appName, String id, boolean includeRemoteRegions);
void clearRegistry();
void initializedResponseCache();
ResponseCache getResponseCache();
long getNumOfRenewsInLastMin();
int getNumOfRenewsPerMinThreshold();
int isBelowRenewThresold();
List<Pair<Long, String>> getLastNRegisteredInstances();
List<Pair<Long, String>> getLastNCanceledInstances();
/**
* Checks whether lease expiration is enabled.
* @return true if enabled
*/
boolean isLeaseExpirationEnabled();
boolean isSelfPreservationModeEnabled();
}
| 6,963 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/ResponseCacheImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.GZIPOutputStream;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Supplier;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.Version;
import com.netflix.eureka.resources.CurrentRequestVersion;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The class that is responsible for caching registry information that will be
* queried by the clients.
*
* <p>
* The cache is maintained in compressed and non-compressed form for three
* categories of requests - all applications, delta changes and for individual
* applications. The compressed form is probably the most efficient in terms of
* network traffic especially when querying all applications.
*
* The cache also maintains separate pay load for <em>JSON</em> and <em>XML</em>
* formats and for multiple versions too.
* </p>
*
* @author Karthik Ranganathan, Greg Kim
*/
public class ResponseCacheImpl implements ResponseCache {
private static final Key.KeyType[] KEY_TYPE_VALUES = Key.KeyType.values();
private static final Version[] VERSION_VALUES = Version.values();
private static final Logger logger = LoggerFactory.getLogger(ResponseCacheImpl.class);
public static final String ALL_APPS = "ALL_APPS";
public static final String ALL_APPS_DELTA = "ALL_APPS_DELTA";
// FIXME deprecated, here for backwards compatibility.
private static final AtomicLong versionDeltaLegacy = new AtomicLong(0);
private static final AtomicLong versionDeltaWithRegionsLegacy = new AtomicLong(0);
private static final String EMPTY_PAYLOAD = "";
private final java.util.Timer timer = new java.util.Timer("Eureka-CacheFillTimer", true);
private final AtomicLong versionDelta = new AtomicLong(0);
private final AtomicLong versionDeltaWithRegions = new AtomicLong(0);
private final Timer serializeAllAppsTimer = Monitors.newTimer("serialize-all");
private final Timer serializeDeltaAppsTimer = Monitors.newTimer("serialize-all-delta");
private final Timer serializeAllAppsWithRemoteRegionTimer = Monitors.newTimer("serialize-all_remote_region");
private final Timer serializeDeltaAppsWithRemoteRegionTimer = Monitors.newTimer("serialize-all-delta_remote_region");
private final Timer serializeOneApptimer = Monitors.newTimer("serialize-one");
private final Timer serializeViptimer = Monitors.newTimer("serialize-one-vip");
private final Timer compressPayloadTimer = Monitors.newTimer("compress-payload");
/**
* This map holds mapping of keys without regions to a list of keys with region (provided by clients)
* Since, during invalidation, triggered by a change in registry for local region, we do not know the regions
* requested by clients, we use this mapping to get all the keys with regions to be invalidated.
* If we do not do this, any cached user requests containing region keys will not be invalidated and will stick
* around till expiry. Github issue: https://github.com/Netflix/eureka/issues/118
*/
private final Multimap<Key, Key> regionSpecificKeys =
Multimaps.newListMultimap(new ConcurrentHashMap<Key, Collection<Key>>(), new Supplier<List<Key>>() {
@Override
public List<Key> get() {
return new CopyOnWriteArrayList<Key>();
}
});
private final ConcurrentMap<Key, Value> readOnlyCacheMap = new ConcurrentHashMap<Key, Value>();
private final LoadingCache<Key, Value> readWriteCacheMap;
private final boolean shouldUseReadOnlyResponseCache;
private final AbstractInstanceRegistry registry;
private final EurekaServerConfig serverConfig;
private final ServerCodecs serverCodecs;
ResponseCacheImpl(EurekaServerConfig serverConfig, ServerCodecs serverCodecs, AbstractInstanceRegistry registry) {
this.serverConfig = serverConfig;
this.serverCodecs = serverCodecs;
this.shouldUseReadOnlyResponseCache = serverConfig.shouldUseReadOnlyResponseCache();
this.registry = registry;
long responseCacheUpdateIntervalMs = serverConfig.getResponseCacheUpdateIntervalMs();
this.readWriteCacheMap =
CacheBuilder.newBuilder().initialCapacity(serverConfig.getInitialCapacityOfResponseCache())
.expireAfterWrite(serverConfig.getResponseCacheAutoExpirationInSeconds(), TimeUnit.SECONDS)
.removalListener(new RemovalListener<Key, Value>() {
@Override
public void onRemoval(RemovalNotification<Key, Value> notification) {
Key removedKey = notification.getKey();
if (removedKey.hasRegions()) {
Key cloneWithNoRegions = removedKey.cloneWithoutRegions();
regionSpecificKeys.remove(cloneWithNoRegions, removedKey);
}
}
})
.build(new CacheLoader<Key, Value>() {
@Override
public Value load(Key key) throws Exception {
if (key.hasRegions()) {
Key cloneWithNoRegions = key.cloneWithoutRegions();
regionSpecificKeys.put(cloneWithNoRegions, key);
}
Value value = generatePayload(key);
return value;
}
});
if (shouldUseReadOnlyResponseCache) {
timer.schedule(getCacheUpdateTask(),
new Date(((System.currentTimeMillis() / responseCacheUpdateIntervalMs) * responseCacheUpdateIntervalMs)
+ responseCacheUpdateIntervalMs),
responseCacheUpdateIntervalMs);
}
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
private TimerTask getCacheUpdateTask() {
return new TimerTask() {
@Override
public void run() {
logger.debug("Updating the client cache from response cache");
for (Key key : readOnlyCacheMap.keySet()) {
if (logger.isDebugEnabled()) {
logger.debug("Updating the client cache from response cache for key : {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType());
}
try {
CurrentRequestVersion.set(key.getVersion());
Value cacheValue = readWriteCacheMap.get(key);
Value currentCacheValue = readOnlyCacheMap.get(key);
if (cacheValue != currentCacheValue) {
readOnlyCacheMap.put(key, cacheValue);
}
} catch (Throwable th) {
logger.error("Error while updating the client cache from response cache for key {}", key.toStringCompact(), th);
} finally {
CurrentRequestVersion.remove();
}
}
}
};
}
/**
* Get the cached information about applications.
*
* <p>
* If the cached information is not available it is generated on the first
* request. After the first request, the information is then updated
* periodically by a background thread.
* </p>
*
* @param key the key for which the cached information needs to be obtained.
* @return payload which contains information about the applications.
*/
public String get(final Key key) {
return get(key, shouldUseReadOnlyResponseCache);
}
@VisibleForTesting
String get(final Key key, boolean useReadOnlyCache) {
Value payload = getValue(key, useReadOnlyCache);
if (payload == null || payload.getPayload().equals(EMPTY_PAYLOAD)) {
return null;
} else {
return payload.getPayload();
}
}
/**
* Get the compressed information about the applications.
*
* @param key
* the key for which the compressed cached information needs to
* be obtained.
* @return compressed payload which contains information about the
* applications.
*/
public byte[] getGZIP(Key key) {
Value payload = getValue(key, shouldUseReadOnlyResponseCache);
if (payload == null) {
return null;
}
return payload.getGzipped();
}
@Override
public void stop() {
timer.cancel();
Monitors.unregisterObject(this);
}
/**
* Invalidate the cache of a particular application.
*
* @param appName the application name of the application.
*/
@Override
public void invalidate(String appName, @Nullable String vipAddress, @Nullable String secureVipAddress) {
for (Key.KeyType type : KEY_TYPE_VALUES) {
for (Version v : VERSION_VALUES) {
invalidate(
new Key(Key.EntityType.Application, appName, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, appName, type, v, EurekaAccept.compact),
new Key(Key.EntityType.Application, ALL_APPS, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, ALL_APPS, type, v, EurekaAccept.compact),
new Key(Key.EntityType.Application, ALL_APPS_DELTA, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, ALL_APPS_DELTA, type, v, EurekaAccept.compact)
);
if (null != vipAddress) {
invalidate(new Key(Key.EntityType.VIP, vipAddress, type, v, EurekaAccept.full));
}
if (null != secureVipAddress) {
invalidate(new Key(Key.EntityType.SVIP, secureVipAddress, type, v, EurekaAccept.full));
}
}
}
}
/**
* Invalidate the cache information given the list of keys.
*
* @param keys the list of keys for which the cache information needs to be invalidated.
*/
public void invalidate(Key... keys) {
for (Key key : keys) {
logger.debug("Invalidating the response cache key : {} {} {} {}, {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(), key.getEurekaAccept());
readWriteCacheMap.invalidate(key);
Collection<Key> keysWithRegions = regionSpecificKeys.get(key);
if (null != keysWithRegions && !keysWithRegions.isEmpty()) {
for (Key keysWithRegion : keysWithRegions) {
logger.debug("Invalidating the response cache key : {} {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(), key.getEurekaAccept());
readWriteCacheMap.invalidate(keysWithRegion);
}
}
}
}
/**
* Gets the version number of the cached data.
*
* @return teh version number of the cached data.
*/
@Override
public AtomicLong getVersionDelta() {
return versionDelta;
}
/**
* Gets the version number of the cached data with remote regions.
*
* @return teh version number of the cached data with remote regions.
*/
@Override
public AtomicLong getVersionDeltaWithRegions() {
return versionDeltaWithRegions;
}
/**
* @deprecated use instance method {@link #getVersionDelta()}
*
* Gets the version number of the cached data.
*
* @return teh version number of the cached data.
*/
@Deprecated
public static AtomicLong getVersionDeltaStatic() {
return versionDeltaLegacy;
}
/**
* @deprecated use instance method {@link #getVersionDeltaWithRegions()}
*
* Gets the version number of the cached data with remote regions.
*
* @return teh version number of the cached data with remote regions.
*/
@Deprecated
public static AtomicLong getVersionDeltaWithRegionsLegacy() {
return versionDeltaWithRegionsLegacy;
}
/**
* Get the number of items in the response cache.
*
* @return int value representing the number of items in response cache.
*/
@Monitor(name = "responseCacheSize", type = DataSourceType.GAUGE)
public int getCurrentSize() {
return readWriteCacheMap.asMap().size();
}
/**
* Get the payload in both compressed and uncompressed form.
*/
@VisibleForTesting
Value getValue(final Key key, boolean useReadOnlyCache) {
Value payload = null;
try {
if (useReadOnlyCache) {
final Value currentPayload = readOnlyCacheMap.get(key);
if (currentPayload != null) {
payload = currentPayload;
} else {
payload = readWriteCacheMap.get(key);
readOnlyCacheMap.put(key, payload);
}
} else {
payload = readWriteCacheMap.get(key);
}
} catch (Throwable t) {
logger.error("Cannot get value for key : {}", key, t);
}
return payload;
}
/**
* Generate pay load with both JSON and XML formats for all applications.
*/
private String getPayLoad(Key key, Applications apps) {
EncoderWrapper encoderWrapper = serverCodecs.getEncoder(key.getType(), key.getEurekaAccept());
String result;
try {
result = encoderWrapper.encode(apps);
} catch (Exception e) {
logger.error("Failed to encode the payload for all apps", e);
return "";
}
if(logger.isDebugEnabled()) {
logger.debug("New application cache entry {} with apps hashcode {}", key.toStringCompact(), apps.getAppsHashCode());
}
return result;
}
/**
* Generate pay load with both JSON and XML formats for a given application.
*/
private String getPayLoad(Key key, Application app) {
if (app == null) {
return EMPTY_PAYLOAD;
}
EncoderWrapper encoderWrapper = serverCodecs.getEncoder(key.getType(), key.getEurekaAccept());
try {
return encoderWrapper.encode(app);
} catch (Exception e) {
logger.error("Failed to encode the payload for application {}", app.getName(), e);
return "";
}
}
/*
* Generate pay load for the given key.
*/
private Value generatePayload(Key key) {
Stopwatch tracer = null;
try {
String payload;
switch (key.getEntityType()) {
case Application:
boolean isRemoteRegionRequested = key.hasRegions();
if (ALL_APPS.equals(key.getName())) {
if (isRemoteRegionRequested) {
tracer = serializeAllAppsWithRemoteRegionTimer.start();
payload = getPayLoad(key, registry.getApplicationsFromMultipleRegions(key.getRegions()));
} else {
tracer = serializeAllAppsTimer.start();
payload = getPayLoad(key, registry.getApplications());
}
} else if (ALL_APPS_DELTA.equals(key.getName())) {
if (isRemoteRegionRequested) {
tracer = serializeDeltaAppsWithRemoteRegionTimer.start();
versionDeltaWithRegions.incrementAndGet();
versionDeltaWithRegionsLegacy.incrementAndGet();
payload = getPayLoad(key,
registry.getApplicationDeltasFromMultipleRegions(key.getRegions()));
} else {
tracer = serializeDeltaAppsTimer.start();
versionDelta.incrementAndGet();
versionDeltaLegacy.incrementAndGet();
payload = getPayLoad(key, registry.getApplicationDeltas());
}
} else {
tracer = serializeOneApptimer.start();
payload = getPayLoad(key, registry.getApplication(key.getName()));
}
break;
case VIP:
case SVIP:
tracer = serializeViptimer.start();
payload = getPayLoad(key, getApplicationsForVip(key, registry));
break;
default:
logger.error("Unidentified entity type: {} found in the cache key.", key.getEntityType());
payload = "";
break;
}
return new Value(payload);
} finally {
if (tracer != null) {
tracer.stop();
}
}
}
private static Applications getApplicationsForVip(Key key, AbstractInstanceRegistry registry) {
logger.debug(
"Retrieving applications from registry for key : {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType());
Applications toReturn = new Applications();
Applications applications = registry.getApplications();
for (Application application : applications.getRegisteredApplications()) {
Application appToAdd = null;
for (InstanceInfo instanceInfo : application.getInstances()) {
String vipAddress;
if (Key.EntityType.VIP.equals(key.getEntityType())) {
vipAddress = instanceInfo.getVIPAddress();
} else if (Key.EntityType.SVIP.equals(key.getEntityType())) {
vipAddress = instanceInfo.getSecureVipAddress();
} else {
// should not happen, but just in case.
continue;
}
if (null != vipAddress) {
String[] vipAddresses = vipAddress.split(",");
Arrays.sort(vipAddresses);
if (Arrays.binarySearch(vipAddresses, key.getName()) >= 0) {
if (null == appToAdd) {
appToAdd = new Application(application.getName());
toReturn.addApplication(appToAdd);
}
appToAdd.addInstance(instanceInfo);
}
}
}
}
toReturn.setAppsHashCode(toReturn.getReconcileHashCode());
logger.debug(
"Retrieved applications from registry for key : {} {} {} {}, reconcile hashcode: {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(),
toReturn.getReconcileHashCode());
return toReturn;
}
/**
* The class that stores payload in both compressed and uncompressed form.
*
*/
public class Value {
private final String payload;
private byte[] gzipped;
public Value(String payload) {
this.payload = payload;
if (!EMPTY_PAYLOAD.equals(payload)) {
Stopwatch tracer = compressPayloadTimer.start();
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
GZIPOutputStream out = new GZIPOutputStream(bos);
byte[] rawBytes = payload.getBytes();
out.write(rawBytes);
// Finish creation of gzip file
out.finish();
out.close();
bos.close();
gzipped = bos.toByteArray();
} catch (IOException e) {
gzipped = null;
} finally {
if (tracer != null) {
tracer.stop();
}
}
} else {
gzipped = null;
}
}
public String getPayload() {
return payload;
}
public byte[] getGzipped() {
return gzipped;
}
}
}
| 6,964 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/AlwaysMatchInstanceStatusRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches always and returns the current status of the instance.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class AlwaysMatchInstanceStatusRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(AlwaysMatchInstanceStatusRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
logger.debug("Returning the default instance status {} for instance {}", instanceInfo.getStatus(),
instanceInfo.getId());
return StatusOverrideResult.matchingStatus(instanceInfo.getStatus());
}
@Override
public String toString() {
return AlwaysMatchInstanceStatusRule.class.getName();
}
}
| 6,965 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/AsgEnabledRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.eureka.aws.AsgClient;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a rule that checks if the ASG for an instance is enabled or not and if not then it brings the instance
* OUT_OF_SERVICE.
*
* Created by Nikos Michalakis on 7/14/16.
*/
public class AsgEnabledRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(AsgEnabledRule.class);
private final AsgClient asgClient;
public AsgEnabledRule(AsgClient asgClient) {
this.asgClient = asgClient;
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo, Lease<InstanceInfo> existingLease, boolean isReplication) {
// If the ASGName is present- check for its status
if (instanceInfo.getASGName() != null) {
boolean isASGDisabled = !asgClient.isASGEnabled(instanceInfo);
logger.debug("The ASG name is specified {} and the value is {}", instanceInfo.getASGName(), isASGDisabled);
if (isASGDisabled) {
return StatusOverrideResult.matchingStatus(InstanceStatus.OUT_OF_SERVICE);
} else {
return StatusOverrideResult.matchingStatus(InstanceStatus.UP);
}
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return AsgEnabledRule.class.getName();
}
}
| 6,966 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/LeaseExistsRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches if we have an existing lease for the instance that is UP or OUT_OF_SERVICE.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class LeaseExistsRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(LeaseExistsRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
// This is for backward compatibility until all applications have ASG
// names, otherwise while starting up
// the client status may override status replicated from other servers
if (!isReplication) {
InstanceInfo.InstanceStatus existingStatus = null;
if (existingLease != null) {
existingStatus = existingLease.getHolder().getStatus();
}
// Allow server to have its way when the status is UP or OUT_OF_SERVICE
if ((existingStatus != null)
&& (InstanceInfo.InstanceStatus.OUT_OF_SERVICE.equals(existingStatus)
|| InstanceInfo.InstanceStatus.UP.equals(existingStatus))) {
logger.debug("There is already an existing lease with status {} for instance {}",
existingLease.getHolder().getStatus().name(),
existingLease.getHolder().getId());
return StatusOverrideResult.matchingStatus(existingLease.getHolder().getStatus());
}
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return LeaseExistsRule.class.getName();
}
}
| 6,967 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/DownOrStartingRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches if the instance is DOWN or STARTING.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class DownOrStartingRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(DownOrStartingRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
// ReplicationInstance is DOWN or STARTING - believe that, but when the instance says UP, question that
// The client instance sends STARTING or DOWN (because of heartbeat failures), then we accept what
// the client says. The same is the case with replica as well.
// The OUT_OF_SERVICE from the client or replica needs to be confirmed as well since the service may be
// currently in SERVICE
if ((!InstanceInfo.InstanceStatus.UP.equals(instanceInfo.getStatus()))
&& (!InstanceInfo.InstanceStatus.OUT_OF_SERVICE.equals(instanceInfo.getStatus()))) {
logger.debug("Trusting the instance status {} from replica or instance for instance {}",
instanceInfo.getStatus(), instanceInfo.getId());
return StatusOverrideResult.matchingStatus(instanceInfo.getStatus());
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return DownOrStartingRule.class.getName();
}
}
| 6,968 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/OverrideExistsRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
/**
* This rule checks to see if we have overrides for an instance and if we do then we return those.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class OverrideExistsRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(OverrideExistsRule.class);
private Map<String, InstanceInfo.InstanceStatus> statusOverrides;
public OverrideExistsRule(Map<String, InstanceInfo.InstanceStatus> statusOverrides) {
this.statusOverrides = statusOverrides;
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo, Lease<InstanceInfo> existingLease, boolean isReplication) {
InstanceInfo.InstanceStatus overridden = statusOverrides.get(instanceInfo.getId());
// If there are instance specific overrides, then they win - otherwise the ASG status
if (overridden != null) {
logger.debug("The instance specific override for instance {} and the value is {}",
instanceInfo.getId(), overridden.name());
return StatusOverrideResult.matchingStatus(overridden);
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return OverrideExistsRule.class.getName();
}
}
| 6,969 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/FirstMatchWinsCompositeRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import java.util.ArrayList;
import java.util.List;
/**
* This rule takes an ordered list of rules and returns the result of the first match or the
* result of the {@link AlwaysMatchInstanceStatusRule}.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class FirstMatchWinsCompositeRule implements InstanceStatusOverrideRule {
private final InstanceStatusOverrideRule[] rules;
private final InstanceStatusOverrideRule defaultRule;
private final String compositeRuleName;
public FirstMatchWinsCompositeRule(InstanceStatusOverrideRule... rules) {
this.rules = rules;
this.defaultRule = new AlwaysMatchInstanceStatusRule();
// Let's build up and "cache" the rule name to be used by toString();
List<String> ruleNames = new ArrayList<>(rules.length+1);
for (int i = 0; i < rules.length; ++i) {
ruleNames.add(rules[i].toString());
}
ruleNames.add(defaultRule.toString());
compositeRuleName = ruleNames.toString();
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
for (int i = 0; i < this.rules.length; ++i) {
StatusOverrideResult result = this.rules[i].apply(instanceInfo, existingLease, isReplication);
if (result.matches()) {
return result;
}
}
return defaultRule.apply(instanceInfo, existingLease, isReplication);
}
@Override
public String toString() {
return this.compositeRuleName;
}
}
| 6,970 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/InstanceStatusOverrideRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.registry.AbstractInstanceRegistry;
/**
* A single rule that if matched it returns an instance status.
* The idea is to use an ordered list of such rules and pick the first result that matches.
*
* It is designed to be used by
* {@link AbstractInstanceRegistry#getOverriddenInstanceStatus(InstanceInfo, Lease, boolean)}
*
* Created by Nikos Michalakis on 7/13/16.
*/
public interface InstanceStatusOverrideRule {
/**
* Match this rule.
*
* @param instanceInfo The instance info whose status we care about.
* @param existingLease Does the instance have an existing lease already? If so let's consider that.
* @param isReplication When overriding consider if we are under a replication mode from other servers.
* @return A result with whether we matched and what we propose the status to be overriden to.
*/
StatusOverrideResult apply(final InstanceInfo instanceInfo,
final Lease<InstanceInfo> existingLease,
boolean isReplication);
}
| 6,971 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/StatusOverrideResult.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
/**
* Container for a result computed by an {@link InstanceStatusOverrideRule}.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class StatusOverrideResult {
public static StatusOverrideResult NO_MATCH = new StatusOverrideResult(false, null);
public static StatusOverrideResult matchingStatus(InstanceInfo.InstanceStatus status) {
return new StatusOverrideResult(true, status);
}
// Does the rule match?
private final boolean matches;
// The status computed by the rule.
private final InstanceInfo.InstanceStatus status;
private StatusOverrideResult(boolean matches, InstanceInfo.InstanceStatus status) {
this.matches = matches;
this.status = status;
}
public boolean matches() {
return matches;
}
public InstanceInfo.InstanceStatus status() {
return status;
}
}
| 6,972 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/Route53Binder.java | package com.netflix.eureka.aws;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.route53.AmazonRoute53Client;
import com.amazonaws.services.route53.model.*;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
/**
* Route53 binder implementation. Will look for a free domain in the list of service url to bind itself to via Route53.
*/
@Singleton
public class Route53Binder implements AwsBinder {
private static final Logger logger = LoggerFactory
.getLogger(Route53Binder.class);
public static final String NULL_DOMAIN = "null";
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final ApplicationInfoManager applicationInfoManager;
/**
* the hostname to register under the Route53 CNAME
*/
private final String registrationHostname;
private final Timer timer;
private final AmazonRoute53Client amazonRoute53Client;
@Inject
public Route53Binder(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ApplicationInfoManager applicationInfoManager) {
this(getRegistrationHostnameFromAmazonDataCenterInfo(applicationInfoManager),
serverConfig,
clientConfig,
applicationInfoManager);
}
/**
* @param registrationHostname the hostname to register under the Route53 CNAME
*/
public Route53Binder(String registrationHostname, EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig, ApplicationInfoManager applicationInfoManager) {
this.registrationHostname = registrationHostname;
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.applicationInfoManager = applicationInfoManager;
this.timer = new Timer("Eureka-Route53Binder", true);
this.amazonRoute53Client = getAmazonRoute53Client(serverConfig);
}
private static String getRegistrationHostnameFromAmazonDataCenterInfo(ApplicationInfoManager applicationInfoManager) {
InstanceInfo myInfo = applicationInfoManager.getInfo();
AmazonInfo dataCenterInfo = (AmazonInfo) myInfo.getDataCenterInfo();
String ip = dataCenterInfo.get(AmazonInfo.MetaDataKey.publicHostname);
if (ip == null || ip.length() == 0) {
return dataCenterInfo.get(AmazonInfo.MetaDataKey.localHostname);
}
return ip;
}
@Override
@PostConstruct
public void start() {
try {
doBind();
timer.schedule(
new TimerTask() {
@Override
public void run() {
try {
doBind();
} catch (Throwable e) {
logger.error("Could not bind to Route53", e);
}
}
},
serverConfig.getRoute53BindingRetryIntervalMs(),
serverConfig.getRoute53BindingRetryIntervalMs());
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void doBind() throws InterruptedException {
List<ResourceRecordSetWithHostedZone> freeDomains = new ArrayList<>();
List<String> domains = getDeclaredDomains();
for(String domain : domains) {
ResourceRecordSetWithHostedZone rrs = getResourceRecordSetWithHostedZone(domain);
if (rrs != null) {
if (rrs.getResourceRecordSet() == null) {
ResourceRecordSet resourceRecordSet = new ResourceRecordSet();
resourceRecordSet.setName(domain);
resourceRecordSet.setType(RRType.CNAME);
resourceRecordSet.setTTL(serverConfig.getRoute53DomainTTL());
freeDomains.add(new ResourceRecordSetWithHostedZone(rrs.getHostedZone(), resourceRecordSet));
} else if (NULL_DOMAIN.equals(rrs.getResourceRecordSet().getResourceRecords().get(0).getValue())) {
freeDomains.add(rrs);
}
// already registered
if (hasValue(rrs, registrationHostname)) {
return;
}
}
}
for(ResourceRecordSetWithHostedZone rrs : freeDomains) {
if (createResourceRecordSet(rrs)) {
logger.info("Bind {} to {}" , registrationHostname, rrs.getResourceRecordSet().getName());
return;
}
}
logger.warn("Unable to find free domain in {}", domains);
}
private boolean createResourceRecordSet(ResourceRecordSetWithHostedZone rrs) throws InterruptedException {
rrs.getResourceRecordSet().setResourceRecords(Arrays.asList(new ResourceRecord(registrationHostname)));
Change change = new Change(ChangeAction.UPSERT, rrs.getResourceRecordSet());
if (executeChangeWithRetry(change, rrs.getHostedZone())) {
Thread.sleep(1000);
// check change not overwritten
ResourceRecordSet resourceRecordSet = getResourceRecordSet(rrs.getResourceRecordSet().getName(), rrs.getHostedZone());
if (resourceRecordSet != null) {
return resourceRecordSet.getResourceRecords().equals(rrs.getResourceRecordSet().getResourceRecords());
}
}
return false;
}
private List<String> toDomains(List<String> ec2Urls) {
List<String> domains = new ArrayList<>(ec2Urls.size());
for(String url : ec2Urls) {
try {
domains.add(extractDomain(url));
} catch(MalformedURLException e) {
logger.error("Invalid url {}", url, e);
}
}
return domains;
}
private String getMyZone() {
InstanceInfo info = applicationInfoManager.getInfo();
AmazonInfo amazonInfo = info != null ? (AmazonInfo) info.getDataCenterInfo() : null;
String zone = amazonInfo != null ? amazonInfo.get(AmazonInfo.MetaDataKey.availabilityZone) : null;
if (zone == null) {
throw new RuntimeException("Cannot extract availabilityZone");
}
return zone;
}
private List<String> getDeclaredDomains() {
final String myZone = getMyZone();
List<String> ec2Urls = clientConfig.getEurekaServerServiceUrls(myZone);
return toDomains(ec2Urls);
}
private boolean executeChangeWithRetry(Change change, HostedZone hostedZone) throws InterruptedException {
Throwable firstError = null;
for (int i = 0; i < serverConfig.getRoute53BindRebindRetries(); i++) {
try {
executeChange(change, hostedZone);
return true;
} catch (Throwable e) {
if (firstError == null) {
firstError = e;
}
Thread.sleep(1000);
}
}
if (firstError != null) {
logger.error("Cannot execute change {} {}", change, firstError, firstError);
}
return false;
}
private void executeChange(Change change, HostedZone hostedZone) {
logger.info("Execute change {} ", change);
ChangeResourceRecordSetsRequest changeResourceRecordSetsRequest = new ChangeResourceRecordSetsRequest();
changeResourceRecordSetsRequest.setHostedZoneId(hostedZone.getId());
ChangeBatch changeBatch = new ChangeBatch();
changeBatch.withChanges(change);
changeResourceRecordSetsRequest.setChangeBatch(changeBatch);
amazonRoute53Client.changeResourceRecordSets(changeResourceRecordSetsRequest);
}
private ResourceRecordSetWithHostedZone getResourceRecordSetWithHostedZone(String domain) {
HostedZone hostedZone = getHostedZone(domain);
if (hostedZone != null) {
return new ResourceRecordSetWithHostedZone(hostedZone, getResourceRecordSet(domain, hostedZone));
}
return null;
}
private ResourceRecordSet getResourceRecordSet(String domain, HostedZone hostedZone) {
ListResourceRecordSetsRequest request = new ListResourceRecordSetsRequest();
request.setMaxItems(String.valueOf(Integer.MAX_VALUE));
request.setHostedZoneId(hostedZone.getId());
ListResourceRecordSetsResult listResourceRecordSetsResult = amazonRoute53Client.listResourceRecordSets(request);
for(ResourceRecordSet rrs : listResourceRecordSetsResult.getResourceRecordSets()) {
if (rrs.getName().equals(domain)) {
return rrs;
}
}
return null;
}
private HostedZone getHostedZone(String domain) {
ListHostedZonesRequest listHostedZoneRequest = new ListHostedZonesRequest();
listHostedZoneRequest.setMaxItems(String.valueOf(Integer.MAX_VALUE));
ListHostedZonesResult listHostedZonesResult = amazonRoute53Client.listHostedZones(listHostedZoneRequest);
for(HostedZone hostedZone : listHostedZonesResult.getHostedZones()) {
if (domain.endsWith(hostedZone.getName())) {
return hostedZone;
}
}
return null;
}
private void unbindFromDomain(String domain) throws InterruptedException {
ResourceRecordSetWithHostedZone resourceRecordSetWithHostedZone = getResourceRecordSetWithHostedZone(domain);
if (hasValue(resourceRecordSetWithHostedZone, registrationHostname)) {
resourceRecordSetWithHostedZone.getResourceRecordSet().getResourceRecords().get(0).setValue(NULL_DOMAIN);
executeChangeWithRetry(new Change(ChangeAction.UPSERT, resourceRecordSetWithHostedZone.getResourceRecordSet()), resourceRecordSetWithHostedZone.getHostedZone());
}
}
private String extractDomain(String url) throws MalformedURLException {
return new URL(url).getHost() + ".";
}
@Override
@PreDestroy
public void shutdown() {
timer.cancel();
for(String domain : getDeclaredDomains()) {
try {
unbindFromDomain(domain);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
amazonRoute53Client.shutdown();
}
private AmazonRoute53Client getAmazonRoute53Client(EurekaServerConfig serverConfig) {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
return new AmazonRoute53Client(
new BasicAWSCredentials(aWSAccessId, aWSSecretKey),
clientConfiguration);
} else {
return new AmazonRoute53Client(
new InstanceProfileCredentialsProvider(),
clientConfiguration);
}
}
private boolean hasValue(ResourceRecordSetWithHostedZone resourceRecordSetWithHostedZone, String ip) {
if (resourceRecordSetWithHostedZone != null && resourceRecordSetWithHostedZone.getResourceRecordSet() != null) {
for (ResourceRecord rr : resourceRecordSetWithHostedZone.getResourceRecordSet().getResourceRecords()) {
if (ip.equals(rr.getValue())) {
return true;
}
}
}
return false;
}
private class ResourceRecordSetWithHostedZone {
private final HostedZone hostedZone;
private final ResourceRecordSet resourceRecordSet;
public ResourceRecordSetWithHostedZone(HostedZone hostedZone, ResourceRecordSet resourceRecordSet) {
this.hostedZone = hostedZone;
this.resourceRecordSet = resourceRecordSet;
}
public HostedZone getHostedZone() {
return hostedZone;
}
public ResourceRecordSet getResourceRecordSet() {
return resourceRecordSet;
}
}
} | 6,973 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBinder.java | package com.netflix.eureka.aws;
/**
* Binds the Eureka server to a EIP, Route53 or else...
*/
public interface AwsBinder {
void start() throws Exception;
void shutdown() throws Exception;
} | 6,974 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/ElasticNetworkInterfaceBinder.java | package com.netflix.eureka.aws;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.*;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.net.InetAddresses;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.endpoint.EndpointUtils;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Collection;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
/**
* Amazon ENI binder for instances.
*
* Candidate ENI's discovery is done using the same mechanism as Elastic ip binder, via dns records or service urls.
*
* The dns records and the service urls should use the ENI private dns or private ip
*
* Dns record examples
* txt.us-east-1.eureka="us-east-1a.eureka" "us-east-1b.eureka"
* txt.us-east-1a.eureka="ip-172-31-y-y.ec2.internal"
* txt.us-east-1b.eureka="ip-172-31-x-x.ec2.internal"
* where "ip-172-31-x-x.ec2.internal" is the ENI private dns
*
* Service url example:
* eureka.serviceUrl.us-east-1a=http://ip-172-31-x-x.ec2.internal:7001/eureka/v2/
*
* ENI Binding strategy should be configured via property like:
*
* eureka.awsBindingStrategy=ENI
*
* If there are no available ENI's for the availability zone, it will not attach any already attached ENI
*/
public class ElasticNetworkInterfaceBinder implements AwsBinder {
private static final Logger logger = LoggerFactory.getLogger(ElasticNetworkInterfaceBinder.class);
private static final int IP_BIND_SLEEP_TIME_MS = 1000;
private static final Timer timer = new Timer("Eureka-ElasticNetworkInterfaceBinder", true);
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final PeerAwareInstanceRegistry registry;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public ElasticNetworkInterfaceBinder(
EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.applicationInfoManager = applicationInfoManager;
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
@PostConstruct
public void start() {
int retries = serverConfig.getEIPBindRebindRetries();
for (int i = 0; i < retries; i++) {
try {
if (alreadyBound()) {
break;
} else {
bind();
}
} catch (Throwable e) {
logger.error("Cannot bind to IP", e);
try {
Thread.sleep(IP_BIND_SLEEP_TIME_MS);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
// Schedule a timer which periodically checks for IP binding.
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
@PreDestroy
public void shutdown() {
timer.cancel();
for (int i = 0; i < serverConfig.getEIPBindRebindRetries(); i++) {
try {
unbind();
break;
} catch (Exception e) {
logger.warn("Cannot unbind the IP from the instance");
try {
Thread.sleep(IP_BIND_SLEEP_TIME_MS);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
public boolean alreadyBound() throws MalformedURLException {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
AmazonEC2 ec2Service = getEC2Service();
List<InstanceNetworkInterface> instanceNetworkInterfaces = instanceData(myInstanceId, ec2Service).getNetworkInterfaces();
List<String> candidateIPs = getCandidateIps();
for (String ip : candidateIPs) {
for(InstanceNetworkInterface ini: instanceNetworkInterfaces) {
if (ip.equals(ini.getPrivateIpAddress())) {
logger.info("My instance {} seems to be already associated with the ip {}", myInstanceId, ip);
return true;
}
}
}
return false;
}
/**
* Binds an ENI to the instance.
*
* The candidate ENI's are deduced in the same wa the EIP binder works: Via dns records or via service urls,
* depending on configuration.
*
* It will try to attach the first ENI that is:
* Available
* For this subnet
* In the list of candidate ENI's
*
* @throws MalformedURLException
*/
public void bind() throws MalformedURLException {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.availabilityZone);
final List<String> ips = getCandidateIps();
Ordering<NetworkInterface> ipsOrder = Ordering.natural().onResultOf(new Function<NetworkInterface, Integer>() {
public Integer apply(NetworkInterface networkInterface) {
return ips.indexOf(networkInterface.getPrivateIpAddress());
}
});
AmazonEC2 ec2Service = getEC2Service();
String subnetId = instanceData(myInstanceId, ec2Service).getSubnetId();
DescribeNetworkInterfacesResult result = ec2Service
.describeNetworkInterfaces(new DescribeNetworkInterfacesRequest()
.withFilters(new Filter("private-ip-address", ips))
.withFilters(new Filter("status", Lists.newArrayList("available")))
.withFilters(new Filter("subnet-id", Lists.newArrayList(subnetId)))
);
if (result.getNetworkInterfaces().isEmpty()) {
logger.info("No ip is free to be associated with this instance. Candidate ips are: {} for zone: {}", ips, myZone);
} else {
NetworkInterface selected = ipsOrder.min(result.getNetworkInterfaces());
ec2Service.attachNetworkInterface(
new AttachNetworkInterfaceRequest()
.withNetworkInterfaceId(selected.getNetworkInterfaceId())
.withDeviceIndex(1)
.withInstanceId(myInstanceId)
);
}
}
/**
* Unbind the IP that this instance is associated with.
*/
public void unbind() throws Exception {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
AmazonEC2 ec2 = getEC2Service();
List<InstanceNetworkInterface> result = instanceData(myInstanceId, ec2).getNetworkInterfaces();
List<String> ips = getCandidateIps();
for(InstanceNetworkInterface networkInterface: result){
if (ips.contains(networkInterface.getPrivateIpAddress())) {
String attachmentId = networkInterface.getAttachment().getAttachmentId();
ec2.detachNetworkInterface(new DetachNetworkInterfaceRequest().withAttachmentId(attachmentId));
break;
}
}
}
private Instance instanceData(String myInstanceId, AmazonEC2 ec2) {
return ec2.describeInstances(new DescribeInstancesRequest().withInstanceIds(myInstanceId)).getReservations().get(0).getInstances().get(0);
}
/**
* Based on shouldUseDnsForFetchingServiceUrls configuration, either retrieves candidates from dns records or from
* configuration properties.
*
*
*/
public List<String> getCandidateIps() throws MalformedURLException {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.availabilityZone);
Collection<String> candidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getIPsForZoneFromDNS(myZone)
: getIPsForZoneFromConfig(myZone);
if (candidates == null || candidates.size() == 0) {
throw new RuntimeException("Could not get any ips from the pool for zone :" + myZone);
}
List<String> ips = Lists.newArrayList();
for(String candidate : candidates) {
String host = new URL(candidate).getHost();
if (InetAddresses.isInetAddress(host)) {
ips.add(host);
} else {
// ip-172-31-55-172.ec2.internal -> ip-172-31-55-172
String firstPartOfHost = Splitter.on(".").splitToList(host).get(0);
// ip-172-31-55-172 -> [172,31,55,172]
List<String> noIpPrefix = Splitter.on("-").splitToList(firstPartOfHost).subList(1, 5);
// [172,31,55,172] -> 172.31.55.172
String ip = Joiner.on(".").join(noIpPrefix);
if (InetAddresses.isInetAddress(ip)) {
ips.add(ip);
} else {
throw new IllegalArgumentException("Illegal internal hostname " + host + " translated to '" + ip + "'");
}
}
}
return ips;
}
private Collection<String> getIPsForZoneFromConfig(String myZone) {
return clientConfig.getEurekaServerServiceUrls(myZone);
}
private Collection<String> getIPsForZoneFromDNS(String myZone) {
return EndpointUtils.getServiceUrlsFromDNS(
clientConfig,
myZone,
true,
new EndpointUtils.InstanceInfoBasedUrlRandomizer(applicationInfoManager.getInfo())
);
}
private AmazonEC2 getEC2Service() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
AmazonEC2 ec2Service;
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
ec2Service = new AmazonEC2Client(new BasicAWSCredentials(aWSAccessId, aWSSecretKey));
} else {
ec2Service = new AmazonEC2Client(new InstanceProfileCredentialsProvider());
}
String region = clientConfig.getRegion();
region = region.trim().toLowerCase();
ec2Service.setEndpoint("ec2." + region + ".amazonaws.com");
return ec2Service;
}
private class IPBindingTask extends TimerTask {
@Override
public void run() {
boolean alreadyBound = false;
try {
alreadyBound = alreadyBound();
// If the ip is not bound, the registry could be stale. First sync up the registry from the
// neighboring node before trying to bind the IP
if (!alreadyBound) {
registry.clearRegistry();
int count = registry.syncUp();
registry.openForTraffic(applicationInfoManager, count);
} else {
// An ip is already bound
return;
}
bind();
} catch (Throwable e) {
logger.error("Could not bind to IP", e);
} finally {
if (alreadyBound) {
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMs());
} else {
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
}
}
}
}
| 6,975 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBinderDelegate.java | package com.netflix.eureka.aws;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
@Singleton
public class AwsBinderDelegate implements AwsBinder {
private final AwsBinder delegate;
@Inject
public AwsBinderDelegate(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
AwsBindingStrategy bindingStrategy = serverConfig.getBindingStrategy();
switch (bindingStrategy) {
case ROUTE53:
delegate = new Route53Binder(serverConfig, clientConfig, applicationInfoManager);
break;
case EIP:
delegate = new EIPManager(serverConfig, clientConfig, registry, applicationInfoManager);
break;
case ENI:
delegate = new ElasticNetworkInterfaceBinder(serverConfig, clientConfig, registry, applicationInfoManager);
break;
default:
throw new IllegalArgumentException("Unexpected BindingStrategy " + bindingStrategy);
}
}
@Override
@PostConstruct
public void start() {
try {
delegate.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
@PreDestroy
public void shutdown() {
try {
delegate.shutdown();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | 6,976 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AsgClient.java | package com.netflix.eureka.aws;
import com.netflix.appinfo.InstanceInfo;
public interface AsgClient {
boolean isASGEnabled(InstanceInfo instanceInfo);
void setStatus(String asgName, boolean enabled);
}
| 6,977 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/EIPManager.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.aws;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.Address;
import com.amazonaws.services.ec2.model.AssociateAddressRequest;
import com.amazonaws.services.ec2.model.DescribeAddressesRequest;
import com.amazonaws.services.ec2.model.DescribeAddressesResult;
import com.amazonaws.services.ec2.model.DisassociateAddressRequest;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo.Name;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.endpoint.EndpointUtils;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* An AWS specific <em>elastic ip</em> binding utility for binding eureka
* servers for a well known <code>IP address</code>.
*
* <p>
* <em>Eureka</em> clients talk to <em>Eureka</em> servers bound with well known
* <code>IP addresses</code> since that is the most reliable mechanism to
* discover the <em>Eureka</em> servers. When Eureka servers come up they bind
* themselves to a well known <em>elastic ip</em>
* </p>
*
* <p>
* This binding mechanism gravitates towards one eureka server per zone for
* resilience. At least one elastic ip should be slotted for each eureka server in
* a zone. If more than eureka server is launched per zone and there are not
* enough elastic ips slotted, the server tries to pick a free EIP slotted for other
* zones and if it still cannot find a free EIP, waits and keeps trying.
* </p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Singleton
public class EIPManager implements AwsBinder {
private static final Logger logger = LoggerFactory.getLogger(EIPManager.class);
private static final String US_EAST_1 = "us-east-1";
private static final int EIP_BIND_SLEEP_TIME_MS = 1000;
private static final Timer timer = new Timer("Eureka-EIPBinder", true);
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final PeerAwareInstanceRegistry registry;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public EIPManager(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.applicationInfoManager = applicationInfoManager;
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
@PostConstruct
public void start() {
try {
handleEIPBinding();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@PreDestroy
public void shutdown() {
timer.cancel();
for (int i = 0; i < serverConfig.getEIPBindRebindRetries(); i++) {
try {
unbindEIP();
break;
} catch (Exception e) {
logger.warn("Cannot unbind the EIP from the instance");
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
/**
* Handles EIP binding process in AWS Cloud.
*
* @throws InterruptedException
*/
private void handleEIPBinding() throws InterruptedException {
int retries = serverConfig.getEIPBindRebindRetries();
// Bind to EIP if needed
for (int i = 0; i < retries; i++) {
try {
if (isEIPBound()) {
break;
} else {
bindEIP();
}
} catch (Throwable e) {
logger.error("Cannot bind to EIP", e);
Thread.sleep(EIP_BIND_SLEEP_TIME_MS);
}
}
// Schedule a timer which periodically checks for EIP binding.
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
/**
* Checks if an EIP is already bound to the instance.
* @return true if an EIP is bound, false otherwise
*/
public boolean isEIPBound() {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.availabilityZone);
String myPublicIP = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.publicIpv4);
Collection<String> candidateEIPs = getCandidateEIPs(myInstanceId, myZone);
for (String eipEntry : candidateEIPs) {
if (eipEntry.equals(myPublicIP)) {
logger.info("My instance {} seems to be already associated with the public ip {}",
myInstanceId, myPublicIP);
return true;
}
}
return false;
}
/**
* Checks if an EIP is bound and optionally binds the EIP.
*
* The list of EIPs are arranged with the EIPs allocated in the zone first
* followed by other EIPs.
*
* If an EIP is already bound to this instance this method simply returns. Otherwise, this method tries to find
* an unused EIP based on information from AWS. If it cannot find any unused EIP this method, it will be retried
* for a specified interval.
*
* One of the following scenarios can happen here :
*
* 1) If the instance is already bound to an EIP as deemed by AWS, no action is taken.
* 2) If an EIP is already bound to another instance as deemed by AWS, that EIP is skipped.
* 3) If an EIP is not already bound to an instance and if this instance is not bound to an EIP, then
* the EIP is bound to this instance.
*/
public void bindEIP() {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.availabilityZone);
Collection<String> candidateEIPs = getCandidateEIPs(myInstanceId, myZone);
AmazonEC2 ec2Service = getEC2Service();
boolean isMyinstanceAssociatedWithEIP = false;
Address selectedEIP = null;
for (String eipEntry : candidateEIPs) {
try {
String associatedInstanceId;
// Check with AWS, if this EIP is already been used by another instance
DescribeAddressesRequest describeAddressRequest = new DescribeAddressesRequest().withPublicIps(eipEntry);
DescribeAddressesResult result = ec2Service.describeAddresses(describeAddressRequest);
if ((result.getAddresses() != null) && (!result.getAddresses().isEmpty())) {
Address eipAddress = result.getAddresses().get(0);
associatedInstanceId = eipAddress.getInstanceId();
// This EIP is not used by any other instance, hence mark it for selection if it is not
// already marked.
if (((associatedInstanceId == null) || (associatedInstanceId.isEmpty()))) {
if (selectedEIP == null) {
selectedEIP = eipAddress;
}
} else if (isMyinstanceAssociatedWithEIP = (associatedInstanceId.equals(myInstanceId))) {
// This EIP is associated with an instance, check if this is the same as the current instance.
// If it is the same, stop searching for an EIP as this instance is already associated with an
// EIP
selectedEIP = eipAddress;
break;
} else {
// The EIP is used by some other instance, hence skip it
logger.warn("The selected EIP {} is associated with another instance {} according to AWS," +
" hence skipping this", eipEntry, associatedInstanceId);
}
}
} catch (Throwable t) {
logger.error("Failed to bind elastic IP: {} to {}", eipEntry, myInstanceId, t);
}
}
if (null != selectedEIP) {
String publicIp = selectedEIP.getPublicIp();
// Only bind if the EIP is not already associated
if (!isMyinstanceAssociatedWithEIP) {
AssociateAddressRequest associateAddressRequest = new AssociateAddressRequest()
.withInstanceId(myInstanceId);
String domain = selectedEIP.getDomain();
if ("vpc".equals(domain)) {
associateAddressRequest.setAllocationId(selectedEIP.getAllocationId());
} else {
associateAddressRequest.setPublicIp(publicIp);
}
ec2Service.associateAddress(associateAddressRequest);
logger.info("\n\n\nAssociated {} running in zone: {} to elastic IP: {}", myInstanceId, myZone, publicIp);
}
logger.info("My instance {} seems to be already associated with the EIP {}", myInstanceId, publicIp);
} else {
logger.info("No EIP is free to be associated with this instance. Candidate EIPs are: {}", candidateEIPs);
}
}
/**
* Unbind the EIP that this instance is associated with.
*/
public void unbindEIP() throws Exception {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myPublicIP = null;
if (myInfo != null
&& myInfo.getDataCenterInfo().getName() == Name.Amazon) {
myPublicIP = ((AmazonInfo) myInfo.getDataCenterInfo())
.get(MetaDataKey.publicIpv4);
if (myPublicIP == null) {
logger.info("Instance is not associated with an EIP. Will not try to unbind");
return;
}
try {
AmazonEC2 ec2Service = getEC2Service();
DescribeAddressesRequest describeAddressRequest = new DescribeAddressesRequest()
.withPublicIps(myPublicIP);
DescribeAddressesResult result = ec2Service.describeAddresses(describeAddressRequest);
if ((result.getAddresses() != null) && (!result.getAddresses().isEmpty())) {
Address eipAddress = result.getAddresses().get(0);
DisassociateAddressRequest dissociateRequest = new DisassociateAddressRequest();
String domain = eipAddress.getDomain();
if ("vpc".equals(domain)) {
dissociateRequest.setAssociationId(eipAddress.getAssociationId());
} else {
dissociateRequest.setPublicIp(eipAddress.getPublicIp());
}
ec2Service.disassociateAddress(dissociateRequest);
logger.info("Dissociated the EIP {} from this instance", myPublicIP);
}
} catch (Throwable e) {
throw new RuntimeException("Cannot dissociate address from this instance", e);
}
}
}
/**
* Get the list of EIPs in the order of preference depending on instance zone.
*
* @param myInstanceId
* the instance id for this instance
* @param myZone
* the zone where this instance is in
* @return Collection containing the list of available EIPs
*/
public Collection<String> getCandidateEIPs(String myInstanceId, String myZone) {
if (myZone == null) {
myZone = "us-east-1d";
}
Collection<String> eipCandidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getEIPsForZoneFromDNS(myZone)
: getEIPsForZoneFromConfig(myZone);
if (eipCandidates == null || eipCandidates.size() == 0) {
throw new RuntimeException("Could not get any elastic ips from the EIP pool for zone :" + myZone);
}
return eipCandidates;
}
/**
* Get the list of EIPs from the configuration.
*
* @param myZone
* - the zone in which the instance resides.
* @return collection of EIPs to choose from for binding.
*/
private Collection<String> getEIPsForZoneFromConfig(String myZone) {
List<String> ec2Urls = clientConfig.getEurekaServerServiceUrls(myZone);
return getEIPsFromServiceUrls(ec2Urls);
}
/**
* Get the list of EIPs from the ec2 urls.
*
* @param ec2Urls
* the ec2urls for which the EIP needs to be obtained.
* @return collection of EIPs.
*/
private Collection<String> getEIPsFromServiceUrls(List<String> ec2Urls) {
List<String> returnedUrls = new ArrayList<>();
String region = clientConfig.getRegion();
String regionPhrase = "";
if (!US_EAST_1.equals(region)) {
regionPhrase = "." + region;
}
for (String cname : ec2Urls) {
int beginIndex = cname.indexOf("ec2-");
if (-1 < beginIndex) {
// CNAME contains "ec2-"
int endIndex = cname.indexOf(regionPhrase + ".compute");
String eipStr = cname.substring(beginIndex + 4, endIndex);
String eip = eipStr.replaceAll("\\-", ".");
returnedUrls.add(eip);
}
// Otherwise, if CNAME doesn't contain, do nothing.
// Handle case where there are no cnames containing "ec2-". Reasons include:
// Systems without public addresses - purely attached to corp lan via AWS Direct Connect
// Use of EC2 network adapters that are attached to an instance after startup
}
return returnedUrls;
}
/**
* Get the list of EIPS from the DNS.
*
* <p>
* This mechanism looks for the EIP pool in the zone the instance is in by
* looking up the DNS name <code>{zone}.{region}.{domainName}</code>. The
* zone is fetched from the {@link InstanceInfo} object;the region is picked
* up from the specified configuration
* {@link com.netflix.discovery.EurekaClientConfig#getRegion()};the domain name is picked up from
* the specified configuration {@link com.netflix.discovery.EurekaClientConfig#getEurekaServerDNSName()}
* with a "txt." prefix (see {@link com.netflix.discovery.endpoint.EndpointUtils
* #getZoneBasedDiscoveryUrlsFromRegion(com.netflix.discovery.EurekaClientConfig, String)}.
* </p>
*
* @param myZone
* the zone where this instance exist in.
* @return the collection of EIPs that exist in the zone this instance is
* in.
*/
private Collection<String> getEIPsForZoneFromDNS(String myZone) {
List<String> ec2Urls = EndpointUtils.getServiceUrlsFromDNS(
clientConfig,
myZone,
true,
new EndpointUtils.InstanceInfoBasedUrlRandomizer(applicationInfoManager.getInfo())
);
return getEIPsFromServiceUrls(ec2Urls);
}
/**
* Gets the EC2 service object to call AWS APIs.
*
* @return the EC2 service object to call AWS APIs.
*/
private AmazonEC2 getEC2Service() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
AmazonEC2 ec2Service;
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
ec2Service = new AmazonEC2Client(new BasicAWSCredentials(aWSAccessId, aWSSecretKey));
} else {
ec2Service = new AmazonEC2Client(new InstanceProfileCredentialsProvider());
}
String region = clientConfig.getRegion();
region = region.trim().toLowerCase();
ec2Service.setEndpoint("ec2." + region + ".amazonaws.com");
return ec2Service;
}
/**
* An EIP binding timer task which constantly polls for EIP in the
* same zone and binds it to itself.If the EIP is taken away for some
* reason, this task tries to get the EIP back. Hence it is advised to take
* one EIP assignment per instance in a zone.
*/
private class EIPBindingTask extends TimerTask {
@Override
public void run() {
boolean isEIPBound = false;
try {
isEIPBound = isEIPBound();
// If the EIP is not bound, the registry could be stale. First sync up the registry from the
// neighboring node before trying to bind the EIP
if (!isEIPBound) {
registry.clearRegistry();
int count = registry.syncUp();
registry.openForTraffic(applicationInfoManager, count);
} else {
// An EIP is already bound
return;
}
bindEIP();
} catch (Throwable e) {
logger.error("Could not bind to EIP", e);
} finally {
if (isEIPBound) {
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMs());
} else {
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
}
}
};
}
| 6,978 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsAsgUtil.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.aws;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.amazonaws.services.securitytoken.model.Credentials;
import com.google.common.base.Strings;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.InstanceRegistry;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A utility class for querying and updating information about amazon
* autoscaling groups using the AWS APIs.
*
* @author Karthik Ranganathan
*
*/
@Singleton
public class AwsAsgUtil implements AsgClient {
private static final Logger logger = LoggerFactory.getLogger(AwsAsgUtil.class);
private static final String PROP_ADD_TO_LOAD_BALANCER = "AddToLoadBalancer";
private static final String accountId = getAccountId();
private Map<String, Credentials> stsCredentials = new HashMap<>();
private final ExecutorService cacheReloadExecutor = new ThreadPoolExecutor(
1, 10, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r, "Eureka-AWS-isASGEnabled");
thread.setDaemon(true);
return thread;
}
});
private ListeningExecutorService listeningCacheReloadExecutor = MoreExecutors.listeningDecorator(cacheReloadExecutor);
// Cache for the AWS ASG information
private final Timer timer = new Timer("Eureka-ASGCacheRefresh", true);
private final com.netflix.servo.monitor.Timer loadASGInfoTimer = Monitors.newTimer("Eureka-loadASGInfo");
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final InstanceRegistry registry;
private final LoadingCache<CacheKey, Boolean> asgCache;
private final AmazonAutoScaling awsClient;
@Inject
public AwsAsgUtil(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
InstanceRegistry registry) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.asgCache = CacheBuilder
.newBuilder().initialCapacity(500)
.expireAfterAccess(serverConfig.getASGCacheExpiryTimeoutMs(), TimeUnit.MILLISECONDS)
.build(new CacheLoader<CacheKey, Boolean>() {
@Override
public Boolean load(CacheKey key) throws Exception {
return isASGEnabledinAWS(key.asgAccountId, key.asgName);
}
@Override
public ListenableFuture<Boolean> reload(final CacheKey key, Boolean oldValue) throws Exception {
return listeningCacheReloadExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return load(key);
}
});
}
});
this.awsClient = getAmazonAutoScalingClient();
this.awsClient.setEndpoint("autoscaling." + clientConfig.getRegion() + ".amazonaws.com");
this.timer.schedule(getASGUpdateTask(),
serverConfig.getASGUpdateIntervalMs(),
serverConfig.getASGUpdateIntervalMs());
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor :", e);
}
}
/**
* Return the status of the ASG whether is enabled or disabled for service.
* The value is picked up from the cache except the very first time.
*
* @param instanceInfo the instanceInfo for the lookup
* @return true if enabled, false otherwise
*/
public boolean isASGEnabled(InstanceInfo instanceInfo) {
CacheKey cacheKey = new CacheKey(getAccountId(instanceInfo, accountId), instanceInfo.getASGName());
Boolean result = asgCache.getIfPresent(cacheKey);
if (result != null) {
return result;
} else {
if (!serverConfig.shouldUseAwsAsgApi()) {
// Disabled, cached values (if any) are still being returned if the caller makes
// a decision to call the disabled client during some sort of transitioning
// period, but no new values will be fetched while disabled.
logger.info(("'{}' is not cached at the moment and won't be fetched because querying AWS ASGs "
+ "has been disabled via the config, returning the fallback value."),
cacheKey);
return true;
}
logger.info("Cache value for asg {} does not exist yet, async refreshing.", cacheKey.asgName);
// Only do an async refresh if it does not yet exist. Do this to refrain from calling aws api too much
asgCache.refresh(cacheKey);
return true;
}
}
/**
* Sets the status of the ASG.
*
* @param asgName The name of the ASG
* @param enabled true to enable, false to disable
*/
public void setStatus(String asgName, boolean enabled) {
String asgAccountId = getASGAccount(asgName);
asgCache.put(new CacheKey(asgAccountId, asgName), enabled);
}
/**
* Check if the ASG is disabled. The amazon flag "AddToLoadBalancer" is
* queried to figure out if it is or not.
*
* @param asgName
* - The name of the ASG for which the status needs to be queried
* @return - true if the ASG is disabled, false otherwise
*/
private boolean isAddToLoadBalancerSuspended(String asgAccountId, String asgName) {
AutoScalingGroup asg;
if(asgAccountId == null || asgAccountId.equals(accountId)) {
asg = retrieveAutoScalingGroup(asgName);
} else {
asg = retrieveAutoScalingGroupCrossAccount(asgAccountId, asgName);
}
if (asg == null) {
logger.warn("The ASG information for {} could not be found. So returning false.", asgName);
return false;
}
return isAddToLoadBalancerSuspended(asg);
}
/**
* Checks if the load balancer addition is disabled or not.
*
* @param asg
* - The ASG object for which the status needs to be checked
* @return - true, if the load balancer addition is suspended, false
* otherwise.
*/
private boolean isAddToLoadBalancerSuspended(AutoScalingGroup asg) {
List<SuspendedProcess> suspendedProcesses = asg.getSuspendedProcesses();
for (SuspendedProcess process : suspendedProcesses) {
if (PROP_ADD_TO_LOAD_BALANCER.equals(process.getProcessName())) {
return true;
}
}
return false;
}
/**
* Queries AWS to get the autoscaling information given the asgName.
*
* @param asgName
* - The name of the ASG.
* @return - The auto scaling group information.
*/
private AutoScalingGroup retrieveAutoScalingGroup(String asgName) {
if (Strings.isNullOrEmpty(asgName)) {
logger.warn("null asgName specified, not attempting to retrieve AutoScalingGroup from AWS");
return null;
}
// You can pass one name or a list of names in the request
DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(asgName);
DescribeAutoScalingGroupsResult result = awsClient
.describeAutoScalingGroups(request);
List<AutoScalingGroup> asgs = result.getAutoScalingGroups();
if (asgs.isEmpty()) {
return null;
} else {
return asgs.get(0);
}
}
private Credentials initializeStsSession(String asgAccount) {
AWSSecurityTokenService sts = new AWSSecurityTokenServiceClient(new InstanceProfileCredentialsProvider());
String region = clientConfig.getRegion();
if (!region.equals("us-east-1")) {
sts.setEndpoint("sts." + region + ".amazonaws.com");
}
String roleName = serverConfig.getListAutoScalingGroupsRoleName();
String roleArn = "arn:aws:iam::" + asgAccount + ":role/" + roleName;
AssumeRoleResult assumeRoleResult = sts.assumeRole(new AssumeRoleRequest()
.withRoleArn(roleArn)
.withRoleSessionName("sts-session-" + asgAccount)
);
return assumeRoleResult.getCredentials();
}
private AutoScalingGroup retrieveAutoScalingGroupCrossAccount(String asgAccount, String asgName) {
logger.debug("Getting cross account ASG for asgName: {}, asgAccount: {}", asgName, asgAccount);
Credentials credentials = stsCredentials.get(asgAccount);
if (credentials == null || credentials.getExpiration().getTime() < System.currentTimeMillis() + 1000) {
stsCredentials.put(asgAccount, initializeStsSession(asgAccount));
credentials = stsCredentials.get(asgAccount);
}
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
AmazonAutoScaling autoScalingClient = new AmazonAutoScalingClient(
new BasicSessionCredentials(
credentials.getAccessKeyId(),
credentials.getSecretAccessKey(),
credentials.getSessionToken()
),
clientConfiguration
);
String region = clientConfig.getRegion();
if (!region.equals("us-east-1")) {
autoScalingClient.setEndpoint("autoscaling." + region + ".amazonaws.com");
}
DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(asgName);
DescribeAutoScalingGroupsResult result = autoScalingClient.describeAutoScalingGroups(request);
List<AutoScalingGroup> asgs = result.getAutoScalingGroups();
if (asgs.isEmpty()) {
return null;
} else {
return asgs.get(0);
}
}
/**
* Queries AWS to see if the load balancer flag is suspended.
*
* @param asgAccountid the accountId this asg resides in, if applicable (null will use the default accountId)
* @param asgName the name of the asg
* @return true, if the load balancer flag is not suspended, false otherwise.
*/
private Boolean isASGEnabledinAWS(String asgAccountid, String asgName) {
try {
Stopwatch t = this.loadASGInfoTimer.start();
boolean returnValue = !isAddToLoadBalancerSuspended(asgAccountid, asgName);
t.stop();
return returnValue;
} catch (Throwable e) {
logger.error("Could not get ASG information from AWS: ", e);
}
return Boolean.TRUE;
}
/**
* Gets the number of elements in the ASG cache.
*
* @return the long value representing the number of elements in the ASG
* cache.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfElementsinASGCache",
description = "Number of elements in the ASG Cache", type = DataSourceType.GAUGE)
public long getNumberofElementsinASGCache() {
return asgCache.size();
}
/**
* Gets the number of ASG queries done in the period.
*
* @return the long value representing the number of ASG queries done in the
* period.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfASGQueries",
description = "Number of queries made to AWS to retrieve ASG information", type = DataSourceType.COUNTER)
public long getNumberofASGQueries() {
return asgCache.stats().loadCount();
}
/**
* Gets the number of ASG queries that failed because of some reason.
*
* @return the long value representing the number of ASG queries that failed
* because of some reason.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfASGQueryFailures",
description = "Number of queries made to AWS to retrieve ASG information and that failed",
type = DataSourceType.COUNTER)
public long getNumberofASGQueryFailures() {
return asgCache.stats().loadExceptionCount();
}
/**
* Gets the task that updates the ASG information periodically.
*
* @return TimerTask that updates the ASG information periodically.
*/
private TimerTask getASGUpdateTask() {
return new TimerTask() {
@Override
public void run() {
try {
if (!serverConfig.shouldUseAwsAsgApi()) {
// Disabled via the config, no-op.
return;
}
// First get the active ASG names
Set<CacheKey> cacheKeys = getCacheKeys();
if (logger.isDebugEnabled()) {
logger.debug("Trying to refresh the keys for {}", Arrays.toString(cacheKeys.toArray()));
}
for (CacheKey key : cacheKeys) {
try {
asgCache.refresh(key);
} catch (Throwable e) {
logger.error("Error updating the ASG cache for {}", key, e);
}
}
} catch (Throwable e) {
logger.error("Error updating the ASG cache", e);
}
}
};
}
/**
* Get the cacheKeys of all the ASG to which query AWS for.
*
* <p>
* The names are obtained from the {@link com.netflix.eureka.registry.InstanceRegistry} which is then
* used for querying the AWS.
* </p>
*
* @return the set of ASG cacheKeys (asgName + accountId).
*/
private Set<CacheKey> getCacheKeys() {
Set<CacheKey> cacheKeys = new HashSet<>();
Applications apps = registry.getApplicationsFromLocalRegionOnly();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instanceInfo : app.getInstances()) {
String localAccountId = getAccountId(instanceInfo, accountId);
String asgName = instanceInfo.getASGName();
if (asgName != null) {
CacheKey key = new CacheKey(localAccountId, asgName);
cacheKeys.add(key);
}
}
}
return cacheKeys;
}
/**
* Get the AWS account id where an ASG is created.
* Warning: This is expensive as it loops through all instances currently registered.
*
* @param asgName The name of the ASG
* @return the account id
*/
private String getASGAccount(String asgName) {
Applications apps = registry.getApplicationsFromLocalRegionOnly();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instanceInfo : app.getInstances()) {
String thisAsgName = instanceInfo.getASGName();
if (thisAsgName != null && thisAsgName.equals(asgName)) {
String localAccountId = getAccountId(instanceInfo, null);
if (localAccountId != null) {
return localAccountId;
}
}
}
}
logger.info("Couldn't get the ASG account for {}, using the default accountId instead", asgName);
return accountId;
}
private String getAccountId(InstanceInfo instanceInfo, String fallbackId) {
String localAccountId = null;
DataCenterInfo dataCenterInfo = instanceInfo.getDataCenterInfo();
if (dataCenterInfo instanceof AmazonInfo) {
localAccountId = ((AmazonInfo) dataCenterInfo).get(MetaDataKey.accountId);
}
return localAccountId == null ? fallbackId : localAccountId;
}
private AmazonAutoScaling getAmazonAutoScalingClient() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
if (null != aWSAccessId && !"".equals(aWSAccessId) && null != aWSSecretKey && !"".equals(aWSSecretKey)) {
return new AmazonAutoScalingClient(
new BasicAWSCredentials(aWSAccessId, aWSSecretKey),
clientConfiguration);
} else {
return new AmazonAutoScalingClient(
new InstanceProfileCredentialsProvider(),
clientConfiguration);
}
}
private static String getAccountId() {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
return ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.accountId);
}
private static class CacheKey {
final String asgAccountId;
final String asgName;
CacheKey(String asgAccountId, String asgName) {
this.asgAccountId = asgAccountId;
this.asgName = asgName;
}
@Override
public String toString() {
return "CacheKey{" +
"asgName='" + asgName + '\'' +
", asgAccountId='" + asgAccountId + '\'' +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof CacheKey)) return false;
CacheKey cacheKey = (CacheKey) o;
if (asgAccountId != null ? !asgAccountId.equals(cacheKey.asgAccountId) : cacheKey.asgAccountId != null)
return false;
if (asgName != null ? !asgName.equals(cacheKey.asgName) : cacheKey.asgName != null) return false;
return true;
}
@Override
public int hashCode() {
int result = asgName != null ? asgName.hashCode() : 0;
result = 31 * result + (asgAccountId != null ? asgAccountId.hashCode() : 0);
return result;
}
}
}
| 6,979 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBindingStrategy.java | package com.netflix.eureka.aws;
public enum AwsBindingStrategy {
EIP, ROUTE53, ENI
}
| 6,980 |
0 | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/util/InstanceInfoGeneratorTest.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.util;
import java.util.Iterator;
import com.netflix.appinfo.InstanceInfo;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
/**
* @author Tomasz Bak
*/
public class InstanceInfoGeneratorTest {
@Test
public void testInstanceInfoStream() throws Exception {
Iterator<InstanceInfo> it = InstanceInfoGenerator.newBuilder(4, "app1", "app2").build().serviceIterator();
assertThat(it.next().getAppName(), is(equalTo("APP1")));
assertThat(it.next().getAppName(), is(equalTo("APP2")));
assertThat(it.next().getAppName(), is(equalTo("APP1")));
assertThat(it.next().getAppName(), is(equalTo("APP2")));
}
} | 6,981 |
0 | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/shared/transport/SimpleEurekaHttpServerTest.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import java.net.URI;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.discovery.converters.wrappers.CodecWrappers.JacksonJson;
import com.netflix.discovery.shared.resolver.DefaultEndpoint;
import com.netflix.discovery.shared.transport.jersey.JerseyEurekaHttpClientFactory;
import org.junit.After;
/**
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServerTest extends EurekaHttpClientCompatibilityTestSuite {
private TransportClientFactory httpClientFactory;
private EurekaHttpClient eurekaHttpClient;
@Override
@After
public void tearDown() throws Exception {
httpClientFactory.shutdown();
super.tearDown();
}
@Override
protected EurekaHttpClient getEurekaHttpClient(URI serviceURI) {
Preconditions.checkState(eurekaHttpClient == null, "EurekaHttpClient has been already created");
httpClientFactory = JerseyEurekaHttpClientFactory.newBuilder()
.withClientName("test")
.withMaxConnectionsPerHost(10)
.withMaxTotalConnections(10)
.withDecoder(JacksonJson.class.getSimpleName(), EurekaAccept.full.name())
.withEncoder(JacksonJson.class.getSimpleName())
.build();
this.eurekaHttpClient = httpClientFactory.newClient(new DefaultEndpoint(serviceURI.toString()));
return eurekaHttpClient;
}
} | 6,982 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/DiagnosticClient.java | package com.netflix.discovery.util;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.DefaultEurekaServerConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.transport.JerseyReplicationClient;
import com.netflix.eureka.resources.DefaultServerCodecs;
import static com.netflix.discovery.util.EurekaEntityFunctions.countInstances;
/**
* A tool for running diagnostic tasks against a discovery server. Currently limited to observing
* of consistency of delta updates.
*
* @author Tomasz Bak
*/
public class DiagnosticClient {
public static void main(String[] args) throws InterruptedException {
String discoveryURL = args[0];
long startTime = System.currentTimeMillis();
EurekaServerConfig serverConfig = new DefaultEurekaServerConfig("eureka.");
JerseyReplicationClient client = JerseyReplicationClient.createReplicationClient(
serverConfig,
new DefaultServerCodecs(serverConfig),
discoveryURL
);
Applications applications = client.getApplications().getEntity();
System.out.println("Applications count=" + applications.getRegisteredApplications().size());
System.out.println("Instance count=" + countInstances(applications));
while (true) {
long delay = System.currentTimeMillis() - startTime;
if (delay >= 30000) {
System.out.println("Processing delay exceeds 30sec; we may be out of sync");
} else {
long waitTime = 30 * 1000 - delay;
System.out.println("Waiting " + waitTime / 1000 + "sec before next fetch...");
Thread.sleep(15 * 1000);
}
startTime = System.currentTimeMillis();
Applications delta = client.getDelta().getEntity();
Applications merged = EurekaEntityFunctions.mergeApplications(applications, delta);
if (merged.getAppsHashCode().equals(delta.getAppsHashCode())) {
System.out.println("Hash codes match: " + delta.getAppsHashCode() + "(delta count=" + countInstances(delta) + ')');
applications = merged;
} else {
System.out.println("ERROR: hash codes do not match (" + delta.getAppsHashCode() + "(delta) != "
+ merged.getAppsHashCode() + " (merged) != "
+ applications.getAppsHashCode() + "(old apps)" +
"(delta count=" + countInstances(delta) + ')'
);
applications = client.getApplications().getEntity();
}
}
}
}
| 6,983 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/InstanceInfoGenerator.java | package com.netflix.discovery.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.ActionType;
import com.netflix.appinfo.InstanceInfo.Builder;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.appinfo.InstanceInfo.PortType;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import static com.netflix.discovery.util.EurekaEntityFunctions.mergeApplications;
import static com.netflix.discovery.util.EurekaEntityFunctions.toApplicationMap;
/**
* Test data generator.
*
* @author Tomasz Bak
*/
public class InstanceInfoGenerator {
public static final int RENEW_INTERVAL = 5;
private final int instanceCount;
private final String[] appNames;
private final String zone;
private final boolean taggedId;
private Iterator<InstanceInfo> currentIt;
private Applications allApplications = new Applications();
private final boolean withMetaData;
private final boolean includeAsg;
private final boolean useInstanceId;
InstanceInfoGenerator(InstanceInfoGeneratorBuilder builder) {
this.instanceCount = builder.instanceCount;
this.appNames = builder.appNames;
this.zone = builder.zone == null ? "us-east-1c" : builder.zone;
this.taggedId = builder.taggedId;
this.withMetaData = builder.includeMetaData;
this.includeAsg = builder.includeAsg;
this.useInstanceId = builder.useInstanceId;
}
public Applications takeDelta(int count) {
if (currentIt == null) {
currentIt = serviceIterator();
allApplications = new Applications();
}
List<InstanceInfo> instanceBatch = new ArrayList<>();
for (int i = 0; i < count; i++) {
InstanceInfo next = currentIt.next();
next.setActionType(ActionType.ADDED);
instanceBatch.add(next);
}
Applications nextBatch = EurekaEntityFunctions.toApplications(toApplicationMap(instanceBatch));
allApplications = mergeApplications(allApplications, nextBatch);
nextBatch.setAppsHashCode(allApplications.getAppsHashCode());
return nextBatch;
}
public Iterator<InstanceInfo> serviceIterator() {
return new Iterator<InstanceInfo>() {
private int returned;
private final int[] appInstanceIds = new int[appNames.length];
private int currentApp;
@Override
public boolean hasNext() {
return returned < instanceCount;
}
@Override
public InstanceInfo next() {
if (!hasNext()) {
throw new NoSuchElementException("no more InstanceInfo elements");
}
InstanceInfo toReturn = generateInstanceInfo(currentApp, appInstanceIds[currentApp], useInstanceId, ActionType.ADDED);
appInstanceIds[currentApp]++;
currentApp = (currentApp + 1) % appNames.length;
returned++;
return toReturn;
}
@Override
public void remove() {
throw new IllegalStateException("method not supported");
}
};
}
public Applications toApplications() {
Map<String, Application> appsByName = new HashMap<>();
Iterator<InstanceInfo> it = serviceIterator();
while (it.hasNext()) {
InstanceInfo instanceInfo = it.next();
Application instanceApp = appsByName.get(instanceInfo.getAppName());
if (instanceApp == null) {
instanceApp = new Application(instanceInfo.getAppName());
appsByName.put(instanceInfo.getAppName(), instanceApp);
}
instanceApp.addInstance(instanceInfo);
}
// Do not pass application list to the constructor, as it does not initialize properly Applications
// data structure.
Applications applications = new Applications();
for (Application app : appsByName.values()) {
applications.addApplication(app);
}
applications.shuffleInstances(false);
applications.setAppsHashCode(applications.getReconcileHashCode());
applications.setVersion(1L);
return applications;
}
public List<InstanceInfo> toInstanceList() {
List<InstanceInfo> result = new ArrayList<>(instanceCount);
Iterator<InstanceInfo> it = serviceIterator();
while (it.hasNext()) {
InstanceInfo instanceInfo = it.next();
result.add(instanceInfo);
}
return result;
}
public InstanceInfo first() {
return take(0);
}
public InstanceInfo take(int idx) {
return toInstanceList().get(idx);
}
public static InstanceInfo takeOne() {
return newBuilder(1, 1).withMetaData(true).build().serviceIterator().next();
}
public static InstanceInfoGeneratorBuilder newBuilder(int instanceCount, int applicationCount) {
return new InstanceInfoGeneratorBuilder(instanceCount, applicationCount);
}
public static InstanceInfoGeneratorBuilder newBuilder(int instanceCount, String... appNames) {
return new InstanceInfoGeneratorBuilder(instanceCount, appNames);
}
public Applications takeDeltaForDelete(boolean useInstanceId, int instanceCount) {
List<InstanceInfo> instanceInfoList = new ArrayList<>();
for (int i = 0; i < instanceCount; i ++) {
instanceInfoList.add(this.generateInstanceInfo(i, i, useInstanceId, ActionType.DELETED));
}
Applications delete = EurekaEntityFunctions.toApplications(toApplicationMap(instanceInfoList));
allApplications = mergeApplications(allApplications, delete);
delete.setAppsHashCode(allApplications.getAppsHashCode());
return delete;
}
// useInstanceId to false to generate older InstanceInfo types that does not use instanceId field for instance id.
private InstanceInfo generateInstanceInfo(int appIndex, int appInstanceId, boolean useInstanceId, ActionType actionType) {
String appName = appNames[appIndex];
String hostName = "instance" + appInstanceId + '.' + appName + ".com";
String privateHostname = "ip-10.0" + appIndex + "." + appInstanceId + ".compute.internal";
String publicIp = "20.0." + appIndex + '.' + appInstanceId;
String privateIp = "192.168." + appIndex + '.' + appInstanceId;
String ipv6 = "::FFFF:" + publicIp;
String instanceId = String.format("i-%04d%04d", appIndex, appInstanceId);
if (taggedId) {
instanceId = instanceId + '_' + appName;
}
AmazonInfo dataCenterInfo = AmazonInfo.Builder.newBuilder()
.addMetadata(MetaDataKey.accountId, "testAccountId")
.addMetadata(MetaDataKey.amiId, String.format("ami-%04d%04d", appIndex, appInstanceId))
.addMetadata(MetaDataKey.availabilityZone, zone)
.addMetadata(MetaDataKey.instanceId, instanceId)
.addMetadata(MetaDataKey.instanceType, "m2.xlarge")
.addMetadata(MetaDataKey.localHostname, privateHostname)
.addMetadata(MetaDataKey.localIpv4, privateIp)
.addMetadata(MetaDataKey.publicHostname, hostName)
.addMetadata(MetaDataKey.publicIpv4, publicIp)
.addMetadata(MetaDataKey.ipv6, ipv6)
.build();
String unsecureURL = "http://" + hostName + ":8080";
String secureURL = "https://" + hostName + ":8081";
long now = System.currentTimeMillis();
LeaseInfo leaseInfo = LeaseInfo.Builder.newBuilder()
.setDurationInSecs(3 * RENEW_INTERVAL)
.setRenewalIntervalInSecs(RENEW_INTERVAL)
.setServiceUpTimestamp(now - RENEW_INTERVAL)
.setRegistrationTimestamp(now)
.setEvictionTimestamp(now + 3 * RENEW_INTERVAL)
.setRenewalTimestamp(now + RENEW_INTERVAL)
.build();
Builder builder = useInstanceId
? InstanceInfo.Builder.newBuilder().setInstanceId(instanceId)
: InstanceInfo.Builder.newBuilder();
builder
.setActionType(actionType)
.setAppGroupName(appName + "Group")
.setAppName(appName)
.setHostName(hostName)
.setIPAddr(publicIp)
.setPort(8080)
.setSecurePort(8081)
.enablePort(PortType.SECURE, true)
.setHealthCheckUrls("/healthcheck", unsecureURL + "/healthcheck", secureURL + "/healthcheck")
.setHomePageUrl("/homepage", unsecureURL + "/homepage")
.setStatusPageUrl("/status", unsecureURL + "/status")
.setLeaseInfo(leaseInfo)
.setStatus(InstanceStatus.UP)
.setVIPAddress(appName + ":8080")
.setSecureVIPAddress(appName + ":8081")
.setDataCenterInfo(dataCenterInfo)
.setLastUpdatedTimestamp(System.currentTimeMillis() - 100)
.setLastDirtyTimestamp(System.currentTimeMillis() - 100)
.setIsCoordinatingDiscoveryServer(true)
.enablePort(PortType.UNSECURE, true);
if (includeAsg) {
builder.setASGName(appName + "ASG");
}
if (withMetaData) {
builder.add("appKey" + appIndex, Integer.toString(appInstanceId));
}
return builder.build();
}
public static class InstanceInfoGeneratorBuilder {
private final int instanceCount;
private String[] appNames;
private boolean includeMetaData;
private boolean includeAsg = true;
private String zone;
private boolean taggedId;
private boolean useInstanceId = true;
public InstanceInfoGeneratorBuilder(int instanceCount, int applicationCount) {
this.instanceCount = instanceCount;
String[] appNames = new String[applicationCount];
for (int i = 0; i < appNames.length; i++) {
appNames[i] = "application" + i;
}
this.appNames = appNames;
}
public InstanceInfoGeneratorBuilder(int instanceCount, String... appNames) {
this.instanceCount = instanceCount;
this.appNames = appNames;
}
public InstanceInfoGeneratorBuilder withZone(String zone) {
this.zone = zone;
return this;
}
public InstanceInfoGeneratorBuilder withTaggedId(boolean taggedId) {
this.taggedId = taggedId;
return this;
}
public InstanceInfoGeneratorBuilder withMetaData(boolean includeMetaData) {
this.includeMetaData = includeMetaData;
return this;
}
public InstanceInfoGeneratorBuilder withAsg(boolean includeAsg) {
this.includeAsg = includeAsg;
return this;
}
public InstanceInfoGeneratorBuilder withUseInstanceId(boolean useInstanceId) {
this.useInstanceId = useInstanceId;
return this;
}
public InstanceInfoGenerator build() {
return new InstanceInfoGenerator(this);
}
}
}
| 6,984 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/ApplicationFunctions.java | package com.netflix.discovery.util;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
/**
* Collection of functions operating on {@link Applications} and {@link Application} data
* structures.
*
* @author Tomasz Bak
* @deprecated Use instead {@link EurekaEntityFunctions}
*/
public final class ApplicationFunctions {
private ApplicationFunctions() {
}
public static Map<String, Application> toApplicationMap(List<InstanceInfo> instances) {
Map<String, Application> applicationMap = new HashMap<String, Application>();
for (InstanceInfo instance : instances) {
String appName = instance.getAppName();
Application application = applicationMap.get(appName);
if (application == null) {
applicationMap.put(appName, application = new Application(appName));
}
application.addInstance(instance);
}
return applicationMap;
}
public static Applications toApplications(Map<String, Application> applicationMap) {
Applications applications = new Applications();
for (Application application : applicationMap.values()) {
applications.addApplication(application);
}
return updateMeta(applications);
}
public static Set<String> applicationNames(Applications applications) {
Set<String> names = new HashSet<>();
for (Application application : applications.getRegisteredApplications()) {
names.add(application.getName());
}
return names;
}
public static Application copyOf(Application application) {
Application copy = new Application(application.getName());
for (InstanceInfo instance : application.getInstances()) {
copy.addInstance(instance);
}
return copy;
}
public static Application merge(Application first, Application second) {
if (!first.getName().equals(second.getName())) {
throw new IllegalArgumentException("Cannot merge applications with different names");
}
Application merged = copyOf(first);
for (InstanceInfo instance : second.getInstances()) {
switch (instance.getActionType()) {
case ADDED:
case MODIFIED:
merged.addInstance(instance);
break;
case DELETED:
merged.removeInstance(instance);
}
}
return merged;
}
public static Applications merge(Applications first, Applications second) {
Set<String> firstNames = applicationNames(first);
Set<String> secondNames = applicationNames(second);
Set<String> allNames = new HashSet<>(firstNames);
allNames.addAll(secondNames);
Applications merged = new Applications();
for (String appName : allNames) {
if (firstNames.contains(appName)) {
if (secondNames.contains(appName)) {
merged.addApplication(merge(first.getRegisteredApplications(appName), second.getRegisteredApplications(appName)));
} else {
merged.addApplication(copyOf(first.getRegisteredApplications(appName)));
}
} else {
merged.addApplication(copyOf(second.getRegisteredApplications(appName)));
}
}
return updateMeta(merged);
}
public static Applications updateMeta(Applications applications) {
applications.setVersion(1L);
applications.setAppsHashCode(applications.getReconcileHashCode());
return applications;
}
public static int countInstances(Applications applications) {
int count = 0;
for(Application application: applications.getRegisteredApplications()) {
count += application.getInstances().size();
}
return count;
}
}
| 6,985 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit/resource/SimpleEurekaHttpServerResource.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.junit.resource;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.SimpleEurekaHttpServer;
import org.junit.rules.ExternalResource;
import static org.mockito.Mockito.mock;
/**
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServerResource extends ExternalResource {
private final EurekaHttpClient requestHandler = mock(EurekaHttpClient.class);
private SimpleEurekaHttpServer eurekaHttpServer;
@Override
protected void before() throws Throwable {
eurekaHttpServer = new SimpleEurekaHttpServer(requestHandler);
}
@Override
protected void after() {
if (eurekaHttpServer != null) {
eurekaHttpServer.shutdown();
}
}
public EurekaHttpClient getRequestHandler() {
return requestHandler;
}
public SimpleEurekaHttpServer getEurekaHttpServer() {
return eurekaHttpServer;
}
}
| 6,986 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit/resource/DiscoveryClientResource.java | package com.netflix.discovery.junit.resource;
import javax.ws.rs.core.UriBuilder;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.appinfo.MyDataCenterInstanceConfig;
import com.netflix.config.ConfigurationManager;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.DefaultEurekaClientConfig;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.transport.SimpleEurekaHttpServer;
import com.netflix.discovery.shared.transport.jersey.Jersey1DiscoveryClientOptionalArgs;
import com.netflix.eventbus.impl.EventBusImpl;
import com.netflix.eventbus.spi.EventBus;
import com.netflix.eventbus.spi.InvalidSubscriberException;
import com.netflix.eventbus.spi.Subscribe;
import org.junit.rules.ExternalResource;
/**
* JUnit rule for discovery client + collection of static methods for setting it up.
*/
public class DiscoveryClientResource extends ExternalResource {
public static final String REMOTE_REGION = "myregion";
public static final String REMOTE_ZONE = "myzone";
public static final int CLIENT_REFRESH_RATE = 10;
public static final String EUREKA_TEST_NAMESPACE = "eurekaTestNamespace.";
private static final Set<String> SYSTEM_PROPERTY_TRACKER = new HashSet<>();
private final boolean registrationEnabled;
private final boolean registryFetchEnabled;
private final InstanceInfo instance;
private final SimpleEurekaHttpServer eurekaHttpServer;
private final Callable<Integer> portResolverCallable;
private final List<String> remoteRegions;
private final String vipFetch;
private final String userName;
private final String password;
private EventBus eventBus;
private ApplicationInfoManager applicationManager;
private EurekaClient client;
private final List<DiscoveryClientResource> forkedDiscoveryClientResources = new ArrayList<>();
private ApplicationInfoManager applicationInfoManager;
DiscoveryClientResource(DiscoveryClientRuleBuilder builder) {
this.registrationEnabled = builder.registrationEnabled;
this.registryFetchEnabled = builder.registryFetchEnabled;
this.portResolverCallable = builder.portResolverCallable;
this.eurekaHttpServer = builder.eurekaHttpServer;
this.instance = builder.instance;
this.remoteRegions = builder.remoteRegions;
this.vipFetch = builder.vipFetch;
this.userName = builder.userName;
this.password = builder.password;
}
public InstanceInfo getMyInstanceInfo() {
return createApplicationManager().getInfo();
}
public EventBus getEventBus() {
if (client == null) {
getClient(); // Lazy initialization
}
return eventBus;
}
public ApplicationInfoManager getApplicationInfoManager() {
return applicationInfoManager;
}
public EurekaClient getClient() {
if (client == null) {
try {
applicationInfoManager = createApplicationManager();
EurekaClientConfig clientConfig = createEurekaClientConfig();
Jersey1DiscoveryClientOptionalArgs optionalArgs = new Jersey1DiscoveryClientOptionalArgs();
eventBus = new EventBusImpl();
optionalArgs.setEventBus(eventBus);
client = new DiscoveryClient(applicationInfoManager, clientConfig, optionalArgs);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return client;
}
public boolean awaitCacheUpdate(long timeout, TimeUnit unit) throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
Object eventListener = new Object() {
@Subscribe
public void consume(CacheRefreshedEvent event) {
latch.countDown();
}
};
try {
getEventBus().registerSubscriber(eventListener);
} catch (InvalidSubscriberException e) {
throw new IllegalStateException("Unexpected error during subscriber registration", e);
}
try {
return latch.await(timeout, unit);
} finally {
getEventBus().unregisterSubscriber(eventListener);
}
}
private ApplicationInfoManager createApplicationManager() {
if (applicationManager == null) {
EurekaInstanceConfig instanceConfig = new MyDataCenterInstanceConfig(EUREKA_TEST_NAMESPACE) {
@Override
public String getAppname() {
return "discoveryClientTest";
}
@Override
public int getLeaseRenewalIntervalInSeconds() {
return 1;
}
};
applicationManager = new ApplicationInfoManager(instanceConfig);
}
return applicationManager;
}
private EurekaClientConfig createEurekaClientConfig() throws Exception {
// Cluster connectivity
URI serviceURI;
if (portResolverCallable != null) {
serviceURI = new URI("http://localhost:" + portResolverCallable.call() + "/eureka/v2/");
} else if (eurekaHttpServer != null) {
serviceURI = eurekaHttpServer.getServiceURI();
} else {
throw new IllegalStateException("Either port or EurekaHttpServer must be configured");
}
if (userName != null) {
serviceURI = UriBuilder.fromUri(serviceURI).userInfo(userName + ':' + password).build();
}
bindProperty(EUREKA_TEST_NAMESPACE + "serviceUrl.default", serviceURI.toString());
if (remoteRegions != null && !remoteRegions.isEmpty()) {
StringBuilder regions = new StringBuilder();
for (String region : remoteRegions) {
regions.append(',').append(region);
}
bindProperty(EUREKA_TEST_NAMESPACE + "fetchRemoteRegionsRegistry", regions.substring(1));
}
// Registration
bindProperty(EUREKA_TEST_NAMESPACE + "registration.enabled", Boolean.toString(registrationEnabled));
bindProperty(EUREKA_TEST_NAMESPACE + "appinfo.initial.replicate.time", Integer.toString(0));
bindProperty(EUREKA_TEST_NAMESPACE + "appinfo.replicate.interval", Integer.toString(1));
// Registry fetch
bindProperty(EUREKA_TEST_NAMESPACE + "shouldFetchRegistry", Boolean.toString(registryFetchEnabled));
bindProperty(EUREKA_TEST_NAMESPACE + "client.refresh.interval", Integer.toString(1));
if (vipFetch != null) {
bindProperty(EUREKA_TEST_NAMESPACE + "registryRefreshSingleVipAddress", vipFetch);
}
return new DefaultEurekaClientConfig(EUREKA_TEST_NAMESPACE);
}
@Override
protected void after() {
if (client != null) {
client.shutdown();
}
for (DiscoveryClientResource resource : forkedDiscoveryClientResources) {
resource.after();
}
for (String property : SYSTEM_PROPERTY_TRACKER) {
ConfigurationManager.getConfigInstance().clearProperty(property);
}
clearDiscoveryClientConfig();
}
public DiscoveryClientRuleBuilder fork() {
DiscoveryClientRuleBuilder builder = new DiscoveryClientRuleBuilder() {
@Override
public DiscoveryClientResource build() {
DiscoveryClientResource clientResource = super.build();
try {
clientResource.before();
} catch (Throwable e) {
throw new IllegalStateException("Unexpected error during forking the client resource", e);
}
forkedDiscoveryClientResources.add(clientResource);
return clientResource;
}
};
return builder.withInstanceInfo(instance)
.connectWith(eurekaHttpServer)
.withPortResolver(portResolverCallable)
.withRegistration(registrationEnabled)
.withRegistryFetch(registryFetchEnabled)
.withRemoteRegions(remoteRegions.toArray(new String[remoteRegions.size()]));
}
public static DiscoveryClientRuleBuilder newBuilder() {
return new DiscoveryClientRuleBuilder();
}
public static void setupDiscoveryClientConfig(int serverPort, String path) {
ConfigurationManager.getConfigInstance().setProperty("eureka.shouldFetchRegistry", "true");
ConfigurationManager.getConfigInstance().setProperty("eureka.responseCacheAutoExpirationInSeconds", "10");
ConfigurationManager.getConfigInstance().setProperty("eureka.client.refresh.interval", CLIENT_REFRESH_RATE);
ConfigurationManager.getConfigInstance().setProperty("eureka.registration.enabled", "false");
ConfigurationManager.getConfigInstance().setProperty("eureka.fetchRemoteRegionsRegistry", REMOTE_REGION);
ConfigurationManager.getConfigInstance().setProperty("eureka.myregion.availabilityZones", REMOTE_ZONE);
ConfigurationManager.getConfigInstance().setProperty("eureka.serviceUrl.default",
"http://localhost:" + serverPort + path);
}
public static void clearDiscoveryClientConfig() {
ConfigurationManager.getConfigInstance().clearProperty("eureka.client.refresh.interval");
ConfigurationManager.getConfigInstance().clearProperty("eureka.registration.enabled");
ConfigurationManager.getConfigInstance().clearProperty("eureka.fetchRemoteRegionsRegistry");
ConfigurationManager.getConfigInstance().clearProperty("eureka.myregion.availabilityZones");
ConfigurationManager.getConfigInstance().clearProperty("eureka.serviceUrl.default");
ConfigurationManager.getConfigInstance().clearProperty("eureka.shouldEnforceFetchRegistryAtInit");
}
public static EurekaClient setupDiscoveryClient(InstanceInfo clientInstanceInfo) {
DefaultEurekaClientConfig config = new DefaultEurekaClientConfig();
// setup config in advance, used in initialize converter
ApplicationInfoManager applicationInfoManager = new ApplicationInfoManager(new MyDataCenterInstanceConfig(), clientInstanceInfo);
DiscoveryManager.getInstance().setEurekaClientConfig(config);
EurekaClient client = new DiscoveryClient(applicationInfoManager, config);
return client;
}
public static EurekaClient setupInjector(InstanceInfo clientInstanceInfo) {
DefaultEurekaClientConfig config = new DefaultEurekaClientConfig();
// setup config in advance, used in initialize converter
DiscoveryManager.getInstance().setEurekaClientConfig(config);
EurekaClient client = new DiscoveryClient(clientInstanceInfo, config);
ApplicationInfoManager.getInstance().initComponent(new MyDataCenterInstanceConfig());
return client;
}
public static InstanceInfo.Builder newInstanceInfoBuilder(int renewalIntervalInSecs) {
InstanceInfo.Builder builder = InstanceInfo.Builder.newBuilder();
builder.setIPAddr("10.10.101.00");
builder.setHostName("Hosttt");
builder.setAppName("EurekaTestApp-" + UUID.randomUUID());
builder.setDataCenterInfo(new DataCenterInfo() {
@Override
public Name getName() {
return Name.MyOwn;
}
});
builder.setLeaseInfo(LeaseInfo.Builder.newBuilder().setRenewalIntervalInSecs(renewalIntervalInSecs).build());
return builder;
}
private static void bindProperty(String propertyName, String value) {
SYSTEM_PROPERTY_TRACKER.add(propertyName);
ConfigurationManager.getConfigInstance().setProperty(propertyName, value);
}
public static class DiscoveryClientRuleBuilder {
private boolean registrationEnabled;
private boolean registryFetchEnabled;
private Callable<Integer> portResolverCallable;
private InstanceInfo instance;
private SimpleEurekaHttpServer eurekaHttpServer;
private List<String> remoteRegions;
private String vipFetch;
private String userName;
private String password;
public DiscoveryClientRuleBuilder withInstanceInfo(InstanceInfo instance) {
this.instance = instance;
return this;
}
public DiscoveryClientRuleBuilder withRegistration(boolean enabled) {
this.registrationEnabled = enabled;
return this;
}
public DiscoveryClientRuleBuilder withRegistryFetch(boolean enabled) {
this.registryFetchEnabled = enabled;
return this;
}
public DiscoveryClientRuleBuilder withPortResolver(Callable<Integer> portResolverCallable) {
this.portResolverCallable = portResolverCallable;
return this;
}
public DiscoveryClientRuleBuilder connectWith(SimpleEurekaHttpServer eurekaHttpServer) {
this.eurekaHttpServer = eurekaHttpServer;
return this;
}
public DiscoveryClientRuleBuilder withRemoteRegions(String... remoteRegions) {
if (this.remoteRegions == null) {
this.remoteRegions = new ArrayList<>();
}
Collections.addAll(this.remoteRegions, remoteRegions);
return this;
}
public DiscoveryClientRuleBuilder withVipFetch(String vipFetch) {
this.vipFetch = vipFetch;
return this;
}
public DiscoveryClientRuleBuilder basicAuthentication(String userName, String password) {
Preconditions.checkNotNull(userName, "HTTP basic authentication user name is null");
Preconditions.checkNotNull(password, "HTTP basic authentication password is null");
this.userName = userName;
this.password = password;
return this;
}
public DiscoveryClientResource build() {
return new DiscoveryClientResource(this);
}
}
}
| 6,987 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/ClusterSampleData.java | package com.netflix.discovery.shared.transport;
import java.util.Iterator;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.util.InstanceInfoGenerator;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
import com.netflix.eureka.cluster.protocol.ReplicationInstance;
import com.netflix.eureka.cluster.protocol.ReplicationInstanceResponse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Collection of functions to create different kinds of configuration/data.
*
* @author Tomasz Bak
*/
public final class ClusterSampleData {
public static final long REPLICATION_EXPIRY_TIME_MS = 100;
public static final long RETRY_SLEEP_TIME_MS = 1;
public static final long SERVER_UNAVAILABLE_SLEEP_TIME_MS = 1;
public static final long EUREKA_NODES_UPDATE_INTERVAL_MS = 10;
private ClusterSampleData() {
}
public static EurekaServerConfig newEurekaServerConfig() {
EurekaServerConfig config = mock(EurekaServerConfig.class);
// Cluster management related
when(config.getPeerEurekaNodesUpdateIntervalMs()).thenReturn((int) EUREKA_NODES_UPDATE_INTERVAL_MS);
// Replication logic related
when(config.shouldSyncWhenTimestampDiffers()).thenReturn(true);
when(config.getMaxTimeForReplication()).thenReturn((int) REPLICATION_EXPIRY_TIME_MS);
when(config.getMaxElementsInPeerReplicationPool()).thenReturn(10);
when(config.getMaxElementsInStatusReplicationPool()).thenReturn(10);
when(config.getMaxThreadsForPeerReplication()).thenReturn(1);
when(config.getMaxThreadsForStatusReplication()).thenReturn(1);
return config;
}
public static InstanceInfo newInstanceInfo(int index) {
Iterator<InstanceInfo> instanceGenerator = InstanceInfoGenerator.newBuilder(10, 10)
.withMetaData(true).build().serviceIterator();
// Skip to the requested index
for (int i = 0; i < index; i++) {
instanceGenerator.next();
}
return instanceGenerator.next();
}
public static ReplicationInstance newReplicationInstance() {
return newReplicationInstanceOf(Action.Register, newInstanceInfo(0));
}
public static ReplicationInstance newReplicationInstanceOf(Action action, InstanceInfo instance) {
switch (action) {
case Register:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
instance.getStatus().name(),
instance,
action
);
case Cancel:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
null,
null,
action
);
case Heartbeat:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
InstanceStatus.OUT_OF_SERVICE.name(),
instance.getStatus().name(),
instance,
action
);
case StatusUpdate:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
InstanceStatus.OUT_OF_SERVICE.name(),
null,
action
);
case DeleteStatusOverride:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
InstanceStatus.UP.name(),
null,
action
);
}
throw new IllegalStateException("Unexpected action " + action);
}
public static ReplicationInstanceResponse newReplicationInstanceResponse(boolean withInstanceInfo) {
return new ReplicationInstanceResponse(200, withInstanceInfo ? newInstanceInfo(1) : null);
}
}
| 6,988 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaHttpRequest.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import java.net.URI;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
*/
public class EurekaHttpRequest {
private final String requestMethod;
private final URI requestURI;
private final Map<String, String> headers;
public EurekaHttpRequest(String requestMethod, URI requestURI, Map<String, String> headers) {
this.requestMethod = requestMethod;
this.requestURI = requestURI;
this.headers = Collections.unmodifiableMap(new HashMap<String, String>(headers));
}
public String getRequestMethod() {
return requestMethod;
}
public URI getRequestURI() {
return requestURI;
}
public Map<String, String> getHeaders() {
return headers;
}
}
| 6,989 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaHttpClientCompatibilityTestSuite.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.UriBuilder;
import java.net.URI;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.util.EurekaEntityComparators;
import com.netflix.discovery.util.InstanceInfoGenerator;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.discovery.shared.transport.EurekaHttpResponse.anEurekaHttpResponse;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* @author Tomasz Bak
*/
public abstract class EurekaHttpClientCompatibilityTestSuite {
private static final String REMOTE_REGION = "us-east-1";
private final EurekaHttpClient requestHandler = mock(EurekaHttpClient.class);
private final List<EurekaHttpRequest> observedHttpRequests = new CopyOnWriteArrayList<>();
private final EurekaTransportEventListener transportEventListener = new EurekaTransportEventListener() {
@Override
public void onHttpRequest(EurekaHttpRequest request) {
observedHttpRequests.add(request);
}
};
private SimpleEurekaHttpServer httpServer;
protected EurekaHttpClientCompatibilityTestSuite() {
}
@Before
public void setUp() throws Exception {
httpServer = new SimpleEurekaHttpServer(requestHandler, transportEventListener);
}
@After
public void tearDown() throws Exception {
httpServer.shutdown();
}
protected abstract EurekaHttpClient getEurekaHttpClient(URI serviceURI);
protected EurekaHttpClient getEurekaHttpClient() {
return getEurekaHttpClient(getHttpServer().getServiceURI());
}
protected EurekaHttpClient getEurekaClientWithBasicAuthentication(String userName, String password) {
URI serviceURI = UriBuilder.fromUri(getHttpServer().getServiceURI()).userInfo(userName + ':' + password).build();
return getEurekaHttpClient(serviceURI);
}
protected SimpleEurekaHttpServer getHttpServer() {
return httpServer;
}
@Test
public void testRegisterRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.register(instance)).thenReturn(EurekaHttpResponse.status(204));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().register(instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(204)));
}
@Test
public void testCancelRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.cancel(instance.getAppName(), instance.getId())).thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().cancel(instance.getAppName(), instance.getId());
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testHeartbeatRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
InstanceInfo updated = new InstanceInfo.Builder(instance).setHostName("another.host").build();
when(requestHandler.sendHeartBeat(instance.getAppName(), instance.getId(), null, null)).thenReturn(createResponse(updated));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().sendHeartBeat(instance.getAppName(), instance.getId(), instance, null);
verifyResponseOkWithEntity(updated, httpResponse);
}
@Test
public void testStatusUpdateRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.statusUpdate(instance.getAppName(), instance.getId(), InstanceStatus.OUT_OF_SERVICE, null))
.thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().statusUpdate(instance.getAppName(), instance.getId(), InstanceStatus.OUT_OF_SERVICE, instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testGetApplicationsRequest() throws Exception {
Applications apps = InstanceInfoGenerator.newBuilder(2, 1).build().toApplications();
when(requestHandler.getApplications()).thenReturn(createResponse(apps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getApplications();
verifyResponseOkWithEntity(apps, httpResponse);
}
@Test
public void testGetApplicationsWithRemoteRegionRequest() throws Exception {
Applications apps = InstanceInfoGenerator.newBuilder(2, 1).build().toApplications();
when(requestHandler.getApplications(REMOTE_REGION)).thenReturn(createResponse(apps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getApplications(REMOTE_REGION);
verifyResponseOkWithEntity(apps, httpResponse);
}
@Test
public void testGetDeltaRequest() throws Exception {
Applications delta = InstanceInfoGenerator.newBuilder(2, 1).build().takeDelta(2);
when(requestHandler.getDelta()).thenReturn(createResponse(delta));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getDelta();
verifyResponseOkWithEntity(delta, httpResponse);
}
@Test
public void testGetDeltaWithRemoteRegionRequest() throws Exception {
Applications delta = InstanceInfoGenerator.newBuilder(2, 1).build().takeDelta(2);
when(requestHandler.getDelta(REMOTE_REGION)).thenReturn(createResponse(delta));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getDelta(REMOTE_REGION);
verifyResponseOkWithEntity(delta, httpResponse);
}
@Test
public void testGetInstanceRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.getInstance(instance.getId())).thenReturn(createResponse(instance));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().getInstance(instance.getId());
verifyResponseOkWithEntity(instance, httpResponse);
}
@Test
public void testGetApplicationInstanceRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.getInstance(instance.getAppName(), instance.getId())).thenReturn(createResponse(instance));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().getInstance(instance.getAppName(), instance.getId());
verifyResponseOkWithEntity(instance, httpResponse);
}
@Test
public void testGetVipRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String vipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getVIPAddress();
when(requestHandler.getVip(vipAddress)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getVip(vipAddress);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetVipWithRemoteRegionRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String vipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getVIPAddress();
when(requestHandler.getVip(vipAddress, REMOTE_REGION)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getVip(vipAddress, REMOTE_REGION);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetSecureVipRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String secureVipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getSecureVipAddress();
when(requestHandler.getSecureVip(secureVipAddress)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getSecureVip(secureVipAddress);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetSecureVipWithRemoteRegionRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String secureVipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getSecureVipAddress();
when(requestHandler.getSecureVip(secureVipAddress, REMOTE_REGION)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getSecureVip(secureVipAddress, REMOTE_REGION);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testStatusUpdateDeleteRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.deleteStatusOverride(instance.getAppName(), instance.getId(), null))
.thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().deleteStatusOverride(instance.getAppName(), instance.getId(), instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testBasicAuthentication() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.register(instance)).thenReturn(EurekaHttpResponse.status(204));
EurekaHttpResponse<Void> httpResponse = getEurekaClientWithBasicAuthentication("myuser", "mypassword").register(instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(204)));
assertThat(observedHttpRequests.get(0).getHeaders().containsKey(HttpHeaders.AUTHORIZATION), is(true));
}
private static void verifyResponseOkWithEntity(Applications original, EurekaHttpResponse<Applications> httpResponse) {
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
assertThat(httpResponse.getEntity(), is(notNullValue()));
assertThat(EurekaEntityComparators.equal(httpResponse.getEntity(), original), is(true));
}
private static void verifyResponseOkWithEntity(InstanceInfo original, EurekaHttpResponse<InstanceInfo> httpResponse) {
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
assertThat(httpResponse.getEntity(), is(notNullValue()));
assertThat(EurekaEntityComparators.equal(httpResponse.getEntity(), original), is(true));
}
private static <T> EurekaHttpResponse<T> createResponse(T entity) {
return anEurekaHttpResponse(200, entity).headers(HttpHeaders.CONTENT_TYPE, "application/json").build();
}
}
| 6,990 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/SimpleEurekaHttpServer.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.converters.wrappers.CodecWrappers;
import com.netflix.discovery.converters.wrappers.CodecWrappers.JacksonJson;
import com.netflix.discovery.converters.wrappers.DecoderWrapper;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.discovery.shared.Applications;
import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* HTTP server with Eureka compatible REST API that delegates client request to the provided {@link EurekaHttpClient}
* implementation. It is very lightweight implementation that can be used in unit test without incurring to much
* overhead.
*
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServer {
private static final Logger logger = LoggerFactory.getLogger(SimpleEurekaHttpServer.class);
private final EurekaHttpClient requestHandler;
private final EurekaTransportEventListener eventListener;
private final HttpServer httpServer;
private final EncoderWrapper encoder = CodecWrappers.getEncoder(JacksonJson.class);
private final DecoderWrapper decoder = CodecWrappers.getDecoder(JacksonJson.class);
public SimpleEurekaHttpServer(EurekaHttpClient requestHandler) throws IOException {
this(requestHandler, null);
}
public SimpleEurekaHttpServer(EurekaHttpClient requestHandler, EurekaTransportEventListener eventListener) throws IOException {
this.requestHandler = requestHandler;
this.eventListener = eventListener;
this.httpServer = HttpServer.create(new InetSocketAddress(0), 1);
httpServer.createContext("/v2", createEurekaV2Handle());
httpServer.setExecutor(null);
httpServer.start();
}
public void shutdown() {
httpServer.stop(0);
}
public URI getServiceURI() {
try {
return new URI("http://localhost:" + getServerPort() + "/v2/");
} catch (URISyntaxException e) {
throw new IllegalStateException("Cannot parse service URI", e);
}
}
public int getServerPort() {
return httpServer.getAddress().getPort();
}
private HttpHandler createEurekaV2Handle() {
return new HttpHandler() {
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if(eventListener != null) {
eventListener.onHttpRequest(mapToEurekaHttpRequest(httpExchange));
}
try {
String method = httpExchange.getRequestMethod();
String path = httpExchange.getRequestURI().getPath();
if (path.startsWith("/v2/apps")) {
if ("GET".equals(method)) {
handleAppsGET(httpExchange);
} else if ("POST".equals(method)) {
handleAppsPost(httpExchange);
} else if ("PUT".equals(method)) {
handleAppsPut(httpExchange);
} else if ("DELETE".equals(method)) {
handleAppsDelete(httpExchange);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
} else if (path.startsWith("/v2/vips")) {
handleVipsGET(httpExchange);
} else if (path.startsWith("/v2/svips")) {
handleSecureVipsGET(httpExchange);
} else if (path.startsWith("/v2/instances")) {
handleInstanceGET(httpExchange);
}
} catch (Exception e) {
logger.error("HttpServer error", e);
httpExchange.sendResponseHeaders(500, 0);
}
httpExchange.close();
}
};
}
private void handleAppsGET(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if (path.matches("/v2/apps[/]?")) {
String regions = getQueryParam(httpExchange, "regions");
httpResponse = regions == null ? requestHandler.getApplications() : requestHandler.getApplications(regions);
} else if (path.matches("/v2/apps/delta[/]?")) {
String regions = getQueryParam(httpExchange, "regions");
httpResponse = regions == null ? requestHandler.getDelta() : requestHandler.getDelta(regions);
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
httpResponse = requestHandler.getInstance(matcher.group(1), matcher.group(2));
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
if (httpResponse == null) {
httpResponse = EurekaHttpResponse.anEurekaHttpResponse(HttpServletResponse.SC_NOT_FOUND).build();
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsPost(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
if (path.matches("/v2/apps/([^/]+)(/)?")) {
InstanceInfo instance = decoder.decode(httpExchange.getRequestBody(), InstanceInfo.class);
httpResponse = requestHandler.register(instance);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsPut(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
String overriddenstatus = getQueryParam(httpExchange, "overriddenstatus");
httpResponse = requestHandler.sendHeartBeat(
matcher.group(1), matcher.group(2), null,
overriddenstatus == null ? null : InstanceStatus.valueOf(overriddenstatus)
);
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)/status").matcher(path)).matches()) {
String newStatus = getQueryParam(httpExchange, "value");
httpResponse = requestHandler.statusUpdate(
matcher.group(1), matcher.group(2),
newStatus == null ? null : InstanceStatus.valueOf(newStatus),
null
);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsDelete(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
httpResponse = requestHandler.cancel(matcher.group(1), matcher.group(2));
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)/status").matcher(path)).matches()) {
httpResponse = requestHandler.deleteStatusOverride(matcher.group(1), matcher.group(2), null);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleVipsGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/vips/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
String regions = getQueryParam(httpExchange, "regions");
EurekaHttpResponse<Applications> httpResponse = regions == null
? requestHandler.getVip(matcher.group(1))
: requestHandler.getVip(matcher.group(1), regions);
mapResponse(httpExchange, httpResponse);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private void handleSecureVipsGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/svips/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
String regions = getQueryParam(httpExchange, "regions");
EurekaHttpResponse<Applications> httpResponse = regions == null
? requestHandler.getSecureVip(matcher.group(1))
: requestHandler.getSecureVip(matcher.group(1), regions);
mapResponse(httpExchange, httpResponse);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private void handleInstanceGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/instances/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
mapResponse(httpExchange, requestHandler.getInstance(matcher.group(1)));
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private EurekaHttpRequest mapToEurekaHttpRequest(HttpExchange httpExchange) {
Headers exchangeHeaders = httpExchange.getRequestHeaders();
Map<String, String> headers = new HashMap<>();
for(String key: exchangeHeaders.keySet()) {
headers.put(key, exchangeHeaders.getFirst(key));
}
return new EurekaHttpRequest(httpExchange.getRequestMethod(), httpExchange.getRequestURI(), headers);
}
private <T> void mapResponse(HttpExchange httpExchange, EurekaHttpResponse<T> response) throws IOException {
// Add headers
for (Map.Entry<String, String> headerEntry : response.getHeaders().entrySet()) {
httpExchange.getResponseHeaders().add(headerEntry.getKey(), headerEntry.getValue());
}
if (response.getStatusCode() / 100 != 2) {
httpExchange.sendResponseHeaders(response.getStatusCode(), 0);
return;
}
// Prepare body, if any
T entity = response.getEntity();
byte[] body = null;
if (entity != null) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
encoder.encode(entity, bos);
body = bos.toByteArray();
}
// Set status and body length
httpExchange.sendResponseHeaders(response.getStatusCode(), body == null ? 0 : body.length);
// Send body
if (body != null) {
OutputStream responseStream = httpExchange.getResponseBody();
try {
responseStream.write(body);
responseStream.flush();
} finally {
responseStream.close();
}
}
}
private static String getQueryParam(HttpExchange httpExchange, String queryParam) {
String query = httpExchange.getRequestURI().getQuery();
if (query != null) {
for (String part : query.split("&")) {
String[] keyValue = part.split("=");
if (keyValue.length > 1 && keyValue[0].equals(queryParam)) {
return keyValue[1];
}
}
}
return null;
}
}
| 6,991 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaTransportEventListener.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
/**
*/
public interface EurekaTransportEventListener {
void onHttpRequest(EurekaHttpRequest request);
}
| 6,992 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws/testmaven/AppTest.java | package com.amazonaws.testmaven;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class AppTest
{
@Test
public void test1()
{
assertTrue( 1 + 1 == 2 );
}
@Test
public void test2()
{
assertTrue( 2 + 2 == 4 );
}
@Test
public void test3()
{
assertTrue( 3 + 3 == 6 );
}
}
| 6,993 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws/testmaven2/AppTest2.java | package com.amazonaws.testmaven2;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class AppTest2
{
@Test
public void test1()
{
assertTrue( 1 + 1 == 2 );
}
@Test
public void test2()
{
assertTrue( 2 + 2 == 4 );
}
@Test
public void test3()
{
assertTrue( 3 + 3 == 6 );
}
}
| 6,994 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/main/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/main/java/com/amazonaws/testmaven/App.java | package com.amazonaws.testmaven;
/**
* Hello world!
*
*/
public class App
{
public static void main( String[] args )
{
System.out.println( "Hello World!" );
}
}
| 6,995 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/test/java | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/test/java/gradle/AppTest.java | /*
* This Java source file was generated by the Gradle 'init' task.
*/
package gradle;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
class AppTest {
@Test
void test1() {
assertTrue(1 + 1 == 2);
}
@Test
void test2() {
assertTrue(2 + 2 == 4);
}
@Test
void test3() {
assertTrue(3 + 3 == 6);
}
}
| 6,996 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/main/java | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/main/java/gradle/App.java | /*
* This Java source file was generated by the Gradle 'init' task.
*/
package gradle;
public class App {
public String getGreeting() {
return "Hello World!";
}
public static void main(String[] args) {
System.out.println(new App().getGreeting());
}
}
| 6,997 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen/customization/TestUtils.java | /*
* Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen.customization;
import java.nio.file.Path;
import software.amazon.smithy.codegen.core.CodegenException;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.loader.ModelAssembler;
import software.amazon.smithy.model.node.Node;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.utils.IoUtils;
public class TestUtils {
public static final String AWS_MODELS_PATH_PREFIX = "../sdk-codegen/aws-models/";
public static Node getAwsModel(String modelFile) {
try {
return Node.parseJsonWithComments(IoUtils.readUtf8File(Path.of(AWS_MODELS_PATH_PREFIX, modelFile)));
} catch (Exception e) {
throw new CodegenException(e);
}
}
public static Model preprocessModelIntegration(GoIntegration integration, String modelFile) {
GoSettings settings = new GoSettings();
Model model = new ModelAssembler()
.addDocumentNode(getAwsModel(modelFile))
.disableValidation()
.putProperty(ModelAssembler.ALLOW_UNKNOWN_TRAITS, true)
.assemble()
.unwrap();
ShapeId service = model.getServiceShapes().stream().findFirst().get().getId();
settings.setService(service);
return integration.preprocessModel(model, settings);
}
}
| 6,998 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen/customization/S3HttpPathBucketFilterIntegrationTest.java | /*
* Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen.customization;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import org.junit.jupiter.api.Test;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.model.traits.HttpTrait;
public class S3HttpPathBucketFilterIntegrationTest {
@Test
public void test() {
Model model = TestUtils.preprocessModelIntegration(
new S3HttpPathBucketFilterIntegration(),
S3ModelUtils.SERVICE_S3_MODEL_FILE);
OperationShape operation = model.expectShape(
ShapeId.from("com.amazonaws.s3#DeleteBucketWebsite"),
OperationShape.class);
String uri = operation.expectTrait(HttpTrait.class)
.getUri().toString();
// URI is originally: /{Bucket}?website
assertFalse(uri.contains("{Bucket}"));
assertEquals(uri, "/?website");
}
}
| 6,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.