index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/Key.java | package com.netflix.eureka.registry;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.eureka.Version;
import javax.annotation.Nullable;
import java.util.Arrays;
public class Key {
public enum KeyType {
JSON, XML
}
/**
* An enum to define the entity that is stored in this cache for this key.
*/
public enum EntityType {
Application, VIP, SVIP
}
private final String entityName;
private final String[] regions;
private final KeyType requestType;
private final Version requestVersion;
private final String hashKey;
private final EntityType entityType;
private final EurekaAccept eurekaAccept;
public Key(EntityType entityType, String entityName, KeyType type, Version v, EurekaAccept eurekaAccept) {
this(entityType, entityName, type, v, eurekaAccept, null);
}
public Key(EntityType entityType, String entityName, KeyType type, Version v, EurekaAccept eurekaAccept, @Nullable String[] regions) {
this.regions = regions;
this.entityType = entityType;
this.entityName = entityName;
this.requestType = type;
this.requestVersion = v;
this.eurekaAccept = eurekaAccept;
hashKey = this.entityType + this.entityName + (null != this.regions ? Arrays.toString(this.regions) : "")
+ requestType.name() + requestVersion.name() + this.eurekaAccept.name();
}
public String getName() {
return entityName;
}
public String getHashKey() {
return hashKey;
}
public KeyType getType() {
return requestType;
}
public Version getVersion() {
return requestVersion;
}
public EurekaAccept getEurekaAccept() {
return eurekaAccept;
}
public EntityType getEntityType() {
return entityType;
}
public boolean hasRegions() {
return null != regions && regions.length != 0;
}
public String[] getRegions() {
return regions;
}
public Key cloneWithoutRegions() {
return new Key(entityType, entityName, requestType, requestVersion, eurekaAccept);
}
@Override
public int hashCode() {
String hashKey = getHashKey();
return hashKey.hashCode();
}
@Override
public boolean equals(Object other) {
if (other instanceof Key) {
return getHashKey().equals(((Key) other).getHashKey());
} else {
return false;
}
}
public String toStringCompact() {
StringBuilder sb = new StringBuilder();
sb.append("{name=").append(entityName).append(", type=").append(entityType).append(", format=").append(requestType);
if(regions != null) {
sb.append(", regions=").append(Arrays.toString(regions));
}
sb.append('}');
return sb.toString();
}
}
| 8,200 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/AwsInstanceRegistry.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.aws.AwsAsgUtil;
import com.netflix.eureka.cluster.PeerEurekaNodes;
import com.netflix.eureka.registry.rule.AsgEnabledRule;
import com.netflix.eureka.registry.rule.DownOrStartingRule;
import com.netflix.eureka.registry.rule.FirstMatchWinsCompositeRule;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
import com.netflix.eureka.registry.rule.LeaseExistsRule;
import com.netflix.eureka.registry.rule.OverrideExistsRule;
import com.netflix.eureka.resources.ServerCodecs;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* Override some methods with aws specific use cases.
*
* @author David Liu
*/
@Singleton
public class AwsInstanceRegistry extends PeerAwareInstanceRegistryImpl {
private AwsAsgUtil awsAsgUtil;
private InstanceStatusOverrideRule instanceStatusOverrideRule;
@Inject
public AwsInstanceRegistry(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ServerCodecs serverCodecs,
EurekaClient eurekaClient) {
super(serverConfig, clientConfig, serverCodecs, eurekaClient);
}
@Override
public void init(PeerEurekaNodes peerEurekaNodes) throws Exception {
super.init(peerEurekaNodes);
this.awsAsgUtil = new AwsAsgUtil(serverConfig, clientConfig, this);
// We first check if the instance is STARTING or DOWN, then we check explicit overrides,
// then we see if our ASG is UP, then we check the status of a potentially existing lease.
this.instanceStatusOverrideRule = new FirstMatchWinsCompositeRule(new DownOrStartingRule(),
new OverrideExistsRule(overriddenInstanceStatusMap), new AsgEnabledRule(this.awsAsgUtil),
new LeaseExistsRule());
}
@Override
protected InstanceStatusOverrideRule getInstanceInfoOverrideRule() {
return this.instanceStatusOverrideRule;
}
public AwsAsgUtil getAwsAsgUtil() {
return awsAsgUtil;
}
}
| 8,201 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/AbstractInstanceRegistry.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import javax.annotation.Nullable;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.AbstractQueue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.cache.CacheBuilder;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.ActionType;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.shared.Pair;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.eureka.util.MeasuredRate;
import com.netflix.servo.annotations.DataSourceType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.netflix.eureka.util.EurekaMonitors.*;
/**
* Handles all registry requests from eureka clients.
*
* <p>
* Primary operations that are performed are the
* <em>Registers</em>, <em>Renewals</em>, <em>Cancels</em>, <em>Expirations</em>, and <em>Status Changes</em>. The
* registry also stores only the delta operations
* </p>
*
* @author Karthik Ranganathan
*
*/
public abstract class AbstractInstanceRegistry implements InstanceRegistry {
private static final Logger logger = LoggerFactory.getLogger(AbstractInstanceRegistry.class);
private static final String[] EMPTY_STR_ARRAY = new String[0];
private final ConcurrentHashMap<String, Map<String, Lease<InstanceInfo>>> registry
= new ConcurrentHashMap<String, Map<String, Lease<InstanceInfo>>>();
protected Map<String, RemoteRegionRegistry> regionNameVSRemoteRegistry = new HashMap<String, RemoteRegionRegistry>();
protected final ConcurrentMap<String, InstanceStatus> overriddenInstanceStatusMap = CacheBuilder
.newBuilder().initialCapacity(500)
.expireAfterAccess(1, TimeUnit.HOURS)
.<String, InstanceStatus>build().asMap();
// CircularQueues here for debugging/statistics purposes only
private final CircularQueue<Pair<Long, String>> recentRegisteredQueue;
private final CircularQueue<Pair<Long, String>> recentCanceledQueue;
private ConcurrentLinkedQueue<RecentlyChangedItem> recentlyChangedQueue = new ConcurrentLinkedQueue<>();
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final Lock read = readWriteLock.readLock();
private final Lock write = readWriteLock.writeLock();
protected final Object lock = new Object();
private Timer deltaRetentionTimer = new Timer("Eureka-DeltaRetentionTimer", true);
private Timer evictionTimer = new Timer("Eureka-EvictionTimer", true);
private final MeasuredRate renewsLastMin;
private final AtomicReference<EvictionTask> evictionTaskRef = new AtomicReference<>();
protected String[] allKnownRemoteRegions = EMPTY_STR_ARRAY;
protected volatile int numberOfRenewsPerMinThreshold;
protected volatile int expectedNumberOfClientsSendingRenews;
protected final EurekaServerConfig serverConfig;
protected final EurekaClientConfig clientConfig;
protected final ServerCodecs serverCodecs;
protected volatile ResponseCache responseCache;
/**
* Create a new, empty instance registry.
*/
protected AbstractInstanceRegistry(EurekaServerConfig serverConfig, EurekaClientConfig clientConfig, ServerCodecs serverCodecs) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.serverCodecs = serverCodecs;
this.recentCanceledQueue = new CircularQueue<Pair<Long, String>>(1000);
this.recentRegisteredQueue = new CircularQueue<Pair<Long, String>>(1000);
this.renewsLastMin = new MeasuredRate(1000 * 60 * 1);
this.deltaRetentionTimer.schedule(getDeltaRetentionTask(),
serverConfig.getDeltaRetentionTimerIntervalInMs(),
serverConfig.getDeltaRetentionTimerIntervalInMs());
}
@Override
public synchronized void initializedResponseCache() {
if (responseCache == null) {
responseCache = new ResponseCacheImpl(serverConfig, serverCodecs, this);
}
}
protected void initRemoteRegionRegistry() throws MalformedURLException {
Map<String, String> remoteRegionUrlsWithName = serverConfig.getRemoteRegionUrlsWithName();
if (!remoteRegionUrlsWithName.isEmpty()) {
allKnownRemoteRegions = new String[remoteRegionUrlsWithName.size()];
int remoteRegionArrayIndex = 0;
for (Map.Entry<String, String> remoteRegionUrlWithName : remoteRegionUrlsWithName.entrySet()) {
RemoteRegionRegistry remoteRegionRegistry = new RemoteRegionRegistry(
serverConfig,
clientConfig,
serverCodecs,
remoteRegionUrlWithName.getKey(),
new URL(remoteRegionUrlWithName.getValue()));
regionNameVSRemoteRegistry.put(remoteRegionUrlWithName.getKey(), remoteRegionRegistry);
allKnownRemoteRegions[remoteRegionArrayIndex++] = remoteRegionUrlWithName.getKey();
}
}
logger.info("Finished initializing remote region registries. All known remote regions: {}",
(Object) allKnownRemoteRegions);
}
@Override
public ResponseCache getResponseCache() {
return responseCache;
}
public long getLocalRegistrySize() {
long total = 0;
for (Map<String, Lease<InstanceInfo>> entry : registry.values()) {
total += entry.size();
}
return total;
}
/**
* Completely clear the registry.
*/
@Override
public void clearRegistry() {
overriddenInstanceStatusMap.clear();
recentCanceledQueue.clear();
recentRegisteredQueue.clear();
recentlyChangedQueue.clear();
registry.clear();
}
// for server info use
@Override
public Map<String, InstanceStatus> overriddenInstanceStatusesSnapshot() {
return new HashMap<>(overriddenInstanceStatusMap);
}
/**
* Registers a new instance with a given duration.
*
* @see com.netflix.eureka.lease.LeaseManager#register(java.lang.Object, int, boolean)
*/
public void register(InstanceInfo registrant, int leaseDuration, boolean isReplication) {
read.lock();
try {
Map<String, Lease<InstanceInfo>> gMap = registry.get(registrant.getAppName());
REGISTER.increment(isReplication);
if (gMap == null) {
final ConcurrentHashMap<String, Lease<InstanceInfo>> gNewMap = new ConcurrentHashMap<String, Lease<InstanceInfo>>();
gMap = registry.putIfAbsent(registrant.getAppName(), gNewMap);
if (gMap == null) {
gMap = gNewMap;
}
}
Lease<InstanceInfo> existingLease = gMap.get(registrant.getId());
// Retain the last dirty timestamp without overwriting it, if there is already a lease
if (existingLease != null && (existingLease.getHolder() != null)) {
Long existingLastDirtyTimestamp = existingLease.getHolder().getLastDirtyTimestamp();
Long registrationLastDirtyTimestamp = registrant.getLastDirtyTimestamp();
logger.debug("Existing lease found (existing={}, provided={}", existingLastDirtyTimestamp, registrationLastDirtyTimestamp);
// this is a > instead of a >= because if the timestamps are equal, we still take the remote transmitted
// InstanceInfo instead of the server local copy.
if (existingLastDirtyTimestamp > registrationLastDirtyTimestamp) {
logger.warn("There is an existing lease and the existing lease's dirty timestamp {} is greater" +
" than the one that is being registered {}", existingLastDirtyTimestamp, registrationLastDirtyTimestamp);
logger.warn("Using the existing instanceInfo instead of the new instanceInfo as the registrant");
registrant = existingLease.getHolder();
}
} else {
// The lease does not exist and hence it is a new registration
synchronized (lock) {
if (this.expectedNumberOfClientsSendingRenews > 0) {
// Since the client wants to register it, increase the number of clients sending renews
this.expectedNumberOfClientsSendingRenews = this.expectedNumberOfClientsSendingRenews + 1;
updateRenewsPerMinThreshold();
}
}
logger.debug("No previous lease information found; it is new registration");
}
Lease<InstanceInfo> lease = new Lease<>(registrant, leaseDuration);
if (existingLease != null) {
lease.setServiceUpTimestamp(existingLease.getServiceUpTimestamp());
}
gMap.put(registrant.getId(), lease);
recentRegisteredQueue.add(new Pair<Long, String>(
System.currentTimeMillis(),
registrant.getAppName() + "(" + registrant.getId() + ")"));
// This is where the initial state transfer of overridden status happens
if (!InstanceStatus.UNKNOWN.equals(registrant.getOverriddenStatus())) {
logger.debug("Found overridden status {} for instance {}. Checking to see if needs to be add to the "
+ "overrides", registrant.getOverriddenStatus(), registrant.getId());
if (!overriddenInstanceStatusMap.containsKey(registrant.getId())) {
logger.info("Not found overridden id {} and hence adding it", registrant.getId());
overriddenInstanceStatusMap.put(registrant.getId(), registrant.getOverriddenStatus());
}
}
InstanceStatus overriddenStatusFromMap = overriddenInstanceStatusMap.get(registrant.getId());
if (overriddenStatusFromMap != null) {
logger.info("Storing overridden status {} from map", overriddenStatusFromMap);
registrant.setOverriddenStatus(overriddenStatusFromMap);
}
// Set the status based on the overridden status rules
InstanceStatus overriddenInstanceStatus = getOverriddenInstanceStatus(registrant, existingLease, isReplication);
registrant.setStatusWithoutDirty(overriddenInstanceStatus);
// If the lease is registered with UP status, set lease service up timestamp
if (InstanceStatus.UP.equals(registrant.getStatus())) {
lease.serviceUp();
}
registrant.setActionType(ActionType.ADDED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
registrant.setLastUpdatedTimestamp();
invalidateCache(registrant.getAppName(), registrant.getVIPAddress(), registrant.getSecureVipAddress());
logger.info("Registered instance {}/{} with status {} (replication={})",
registrant.getAppName(), registrant.getId(), registrant.getStatus(), isReplication);
} finally {
read.unlock();
}
}
/**
* Cancels the registration of an instance.
*
* <p>
* This is normally invoked by a client when it shuts down informing the
* server to remove the instance from traffic.
* </p>
*
* @param appName the application name of the application.
* @param id the unique identifier of the instance.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the instance was removed from the {@link AbstractInstanceRegistry} successfully, false otherwise.
*/
@Override
public boolean cancel(String appName, String id, boolean isReplication) {
return internalCancel(appName, id, isReplication);
}
/**
* {@link #cancel(String, String, boolean)} method is overridden by {@link PeerAwareInstanceRegistry}, so each
* cancel request is replicated to the peers. This is however not desired for expires which would be counted
* in the remote peers as valid cancellations, so self preservation mode would not kick-in.
*/
protected boolean internalCancel(String appName, String id, boolean isReplication) {
read.lock();
try {
CANCEL.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> leaseToCancel = null;
if (gMap != null) {
leaseToCancel = gMap.remove(id);
}
recentCanceledQueue.add(new Pair<Long, String>(System.currentTimeMillis(), appName + "(" + id + ")"));
InstanceStatus instanceStatus = overriddenInstanceStatusMap.remove(id);
if (instanceStatus != null) {
logger.debug("Removed instance id {} from the overridden map which has value {}", id, instanceStatus.name());
}
if (leaseToCancel == null) {
CANCEL_NOT_FOUND.increment(isReplication);
logger.warn("DS: Registry: cancel failed because Lease is not registered for: {}/{}", appName, id);
return false;
} else {
leaseToCancel.cancel();
InstanceInfo instanceInfo = leaseToCancel.getHolder();
String vip = null;
String svip = null;
if (instanceInfo != null) {
instanceInfo.setActionType(ActionType.DELETED);
recentlyChangedQueue.add(new RecentlyChangedItem(leaseToCancel));
instanceInfo.setLastUpdatedTimestamp();
vip = instanceInfo.getVIPAddress();
svip = instanceInfo.getSecureVipAddress();
}
invalidateCache(appName, vip, svip);
logger.info("Cancelled instance {}/{} (replication={})", appName, id, isReplication);
}
} finally {
read.unlock();
}
synchronized (lock) {
if (this.expectedNumberOfClientsSendingRenews > 0) {
// Since the client wants to cancel it, reduce the number of clients to send renews.
this.expectedNumberOfClientsSendingRenews = this.expectedNumberOfClientsSendingRenews - 1;
updateRenewsPerMinThreshold();
}
}
return true;
}
/**
* Marks the given instance of the given app name as renewed, and also marks whether it originated from
* replication.
*
* @see com.netflix.eureka.lease.LeaseManager#renew(java.lang.String, java.lang.String, boolean)
*/
public boolean renew(String appName, String id, boolean isReplication) {
RENEW.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> leaseToRenew = null;
if (gMap != null) {
leaseToRenew = gMap.get(id);
}
if (leaseToRenew == null) {
RENEW_NOT_FOUND.increment(isReplication);
logger.warn("DS: Registry: lease doesn't exist, registering resource: {} - {}", appName, id);
return false;
} else {
InstanceInfo instanceInfo = leaseToRenew.getHolder();
if (instanceInfo != null) {
// touchASGCache(instanceInfo.getASGName());
InstanceStatus overriddenInstanceStatus = this.getOverriddenInstanceStatus(
instanceInfo, leaseToRenew, isReplication);
if (overriddenInstanceStatus == InstanceStatus.UNKNOWN) {
logger.info("Instance status UNKNOWN possibly due to deleted override for instance {}"
+ "; re-register required", instanceInfo.getId());
RENEW_NOT_FOUND.increment(isReplication);
return false;
}
if (!instanceInfo.getStatus().equals(overriddenInstanceStatus)) {
logger.info(
"The instance status {} is different from overridden instance status {} for instance {}. "
+ "Hence setting the status to overridden status", instanceInfo.getStatus().name(),
overriddenInstanceStatus.name(),
instanceInfo.getId());
instanceInfo.setStatusWithoutDirty(overriddenInstanceStatus);
}
}
renewsLastMin.increment();
leaseToRenew.renew();
return true;
}
}
/**
* @deprecated this is expensive, try not to use. See if you can use
* {@link #storeOverriddenStatusIfRequired(String, String, InstanceStatus)} instead.
*
* Stores overridden status if it is not already there. This happens during
* a reconciliation process during renewal requests.
*
* @param id the unique identifier of the instance.
* @param overriddenStatus Overridden status if any.
*/
@Deprecated
@Override
public void storeOverriddenStatusIfRequired(String id, InstanceStatus overriddenStatus) {
InstanceStatus instanceStatus = overriddenInstanceStatusMap.get(id);
if ((instanceStatus == null)
|| (!overriddenStatus.equals(instanceStatus))) {
// We might not have the overridden status if the server got restarted -this will help us maintain
// the overridden state from the replica
logger.info(
"Adding overridden status for instance id {} and the value is {}",
id, overriddenStatus.name());
overriddenInstanceStatusMap.put(id, overriddenStatus);
List<InstanceInfo> instanceInfo = this.getInstancesById(id, false);
if ((instanceInfo != null) && (!instanceInfo.isEmpty())) {
instanceInfo.iterator().next().setOverriddenStatus(overriddenStatus);
logger.info(
"Setting the overridden status for instance id {} and the value is {} ",
id, overriddenStatus.name());
}
}
}
/**
* Stores overridden status if it is not already there. This happens during
* a reconciliation process during renewal requests.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param overriddenStatus overridden status if any.
*/
@Override
public void storeOverriddenStatusIfRequired(String appName, String id, InstanceStatus overriddenStatus) {
InstanceStatus instanceStatus = overriddenInstanceStatusMap.get(id);
if ((instanceStatus == null) || (!overriddenStatus.equals(instanceStatus))) {
// We might not have the overridden status if the server got
// restarted -this will help us maintain the overridden state
// from the replica
logger.info("Adding overridden status for instance id {} and the value is {}",
id, overriddenStatus.name());
overriddenInstanceStatusMap.put(id, overriddenStatus);
InstanceInfo instanceInfo = this.getInstanceByAppAndId(appName, id, false);
instanceInfo.setOverriddenStatus(overriddenStatus);
logger.info("Set the overridden status for instance (appname:{}, id:{}} and the value is {} ",
appName, id, overriddenStatus.name());
}
}
/**
* Updates the status of an instance. Normally happens to put an instance
* between {@link InstanceStatus#OUT_OF_SERVICE} and
* {@link InstanceStatus#UP} to put the instance in and out of traffic.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param newStatus the new {@link InstanceStatus}.
* @param lastDirtyTimestamp last timestamp when this instance information was updated.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the status was successfully updated, false otherwise.
*/
@Override
public boolean statusUpdate(String appName, String id,
InstanceStatus newStatus, String lastDirtyTimestamp,
boolean isReplication) {
read.lock();
try {
STATUS_UPDATE.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (gMap != null) {
lease = gMap.get(id);
}
if (lease == null) {
return false;
} else {
lease.renew();
InstanceInfo info = lease.getHolder();
// Lease is always created with its instance info object.
// This log statement is provided as a safeguard, in case this invariant is violated.
if (info == null) {
logger.error("Found Lease without a holder for instance id {}", id);
}
if ((info != null) && !(info.getStatus().equals(newStatus))) {
// Mark service as UP if needed
if (InstanceStatus.UP.equals(newStatus)) {
lease.serviceUp();
}
// This is NAC overridden status
overriddenInstanceStatusMap.put(id, newStatus);
// Set it for transfer of overridden status to replica on
// replica start up
info.setOverriddenStatus(newStatus);
long replicaDirtyTimestamp = 0;
info.setStatusWithoutDirty(newStatus);
if (lastDirtyTimestamp != null) {
replicaDirtyTimestamp = Long.parseLong(lastDirtyTimestamp);
}
// If the replication's dirty timestamp is more than the existing one, just update
// it to the replica's.
if (replicaDirtyTimestamp > info.getLastDirtyTimestamp()) {
info.setLastDirtyTimestamp(replicaDirtyTimestamp);
}
info.setActionType(ActionType.MODIFIED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
info.setLastUpdatedTimestamp();
invalidateCache(appName, info.getVIPAddress(), info.getSecureVipAddress());
}
return true;
}
} finally {
read.unlock();
}
}
/**
* Removes status override for a give instance.
*
* @param appName the application name of the instance.
* @param id the unique identifier of the instance.
* @param newStatus the new {@link InstanceStatus}.
* @param lastDirtyTimestamp last timestamp when this instance information was updated.
* @param isReplication true if this is a replication event from other nodes, false
* otherwise.
* @return true if the status was successfully updated, false otherwise.
*/
@Override
public boolean deleteStatusOverride(String appName, String id,
InstanceStatus newStatus,
String lastDirtyTimestamp,
boolean isReplication) {
read.lock();
try {
STATUS_OVERRIDE_DELETE.increment(isReplication);
Map<String, Lease<InstanceInfo>> gMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (gMap != null) {
lease = gMap.get(id);
}
if (lease == null) {
return false;
} else {
lease.renew();
InstanceInfo info = lease.getHolder();
// Lease is always created with its instance info object.
// This log statement is provided as a safeguard, in case this invariant is violated.
if (info == null) {
logger.error("Found Lease without a holder for instance id {}", id);
}
InstanceStatus currentOverride = overriddenInstanceStatusMap.remove(id);
if (currentOverride != null && info != null) {
info.setOverriddenStatus(InstanceStatus.UNKNOWN);
info.setStatusWithoutDirty(newStatus);
long replicaDirtyTimestamp = 0;
if (lastDirtyTimestamp != null) {
replicaDirtyTimestamp = Long.parseLong(lastDirtyTimestamp);
}
// If the replication's dirty timestamp is more than the existing one, just update
// it to the replica's.
if (replicaDirtyTimestamp > info.getLastDirtyTimestamp()) {
info.setLastDirtyTimestamp(replicaDirtyTimestamp);
}
info.setActionType(ActionType.MODIFIED);
recentlyChangedQueue.add(new RecentlyChangedItem(lease));
info.setLastUpdatedTimestamp();
invalidateCache(appName, info.getVIPAddress(), info.getSecureVipAddress());
}
return true;
}
} finally {
read.unlock();
}
}
/**
* Evicts everything in the instance registry that has expired, if expiry is enabled.
*
* @see com.netflix.eureka.lease.LeaseManager#evict()
*/
@Override
public void evict() {
evict(0l);
}
public void evict(long additionalLeaseMs) {
logger.debug("Running the evict task");
if (!isLeaseExpirationEnabled()) {
logger.debug("DS: lease expiration is currently disabled.");
return;
}
// We collect first all expired items, to evict them in random order. For large eviction sets,
// if we do not that, we might wipe out whole apps before self preservation kicks in. By randomizing it,
// the impact should be evenly distributed across all applications.
List<Lease<InstanceInfo>> expiredLeases = new ArrayList<>();
for (Entry<String, Map<String, Lease<InstanceInfo>>> groupEntry : registry.entrySet()) {
Map<String, Lease<InstanceInfo>> leaseMap = groupEntry.getValue();
if (leaseMap != null) {
for (Entry<String, Lease<InstanceInfo>> leaseEntry : leaseMap.entrySet()) {
Lease<InstanceInfo> lease = leaseEntry.getValue();
if (lease.isExpired(additionalLeaseMs) && lease.getHolder() != null) {
expiredLeases.add(lease);
}
}
}
}
// To compensate for GC pauses or drifting local time, we need to use current registry size as a base for
// triggering self-preservation. Without that we would wipe out full registry.
int registrySize = (int) getLocalRegistrySize();
int registrySizeThreshold = (int) (registrySize * serverConfig.getRenewalPercentThreshold());
int evictionLimit = registrySize - registrySizeThreshold;
int toEvict = Math.min(expiredLeases.size(), evictionLimit);
if (toEvict > 0) {
logger.info("Evicting {} items (expired={}, evictionLimit={})", toEvict, expiredLeases.size(), evictionLimit);
Random random = new Random(System.currentTimeMillis());
for (int i = 0; i < toEvict; i++) {
// Pick a random item (Knuth shuffle algorithm)
int next = i + random.nextInt(expiredLeases.size() - i);
Collections.swap(expiredLeases, i, next);
Lease<InstanceInfo> lease = expiredLeases.get(i);
String appName = lease.getHolder().getAppName();
String id = lease.getHolder().getId();
EXPIRED.increment();
logger.warn("DS: Registry: expired lease for {}/{}", appName, id);
internalCancel(appName, id, false);
}
}
}
/**
* Returns the given app that is in this instance only, falling back to other regions transparently only
* if specified in this client configuration.
*
* @param appName the application name of the application
* @return the application
*
* @see com.netflix.discovery.shared.LookupService#getApplication(java.lang.String)
*/
@Override
public Application getApplication(String appName) {
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
return this.getApplication(appName, !disableTransparentFallback);
}
/**
* Get application information.
*
* @param appName The name of the application
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the application
*/
@Override
public Application getApplication(String appName, boolean includeRemoteRegion) {
Application app = null;
Map<String, Lease<InstanceInfo>> leaseMap = registry.get(appName);
if (leaseMap != null && leaseMap.size() > 0) {
for (Entry<String, Lease<InstanceInfo>> entry : leaseMap.entrySet()) {
if (app == null) {
app = new Application(appName);
}
app.addInstance(decorateInstanceInfo(entry.getValue()));
}
} else if (includeRemoteRegion) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Application application = remoteRegistry.getApplication(appName);
if (application != null) {
return application;
}
}
}
return app;
}
/**
* Get all applications in this instance registry, falling back to other regions if allowed in the Eureka config.
*
* @return the list of all known applications
*
* @see com.netflix.discovery.shared.LookupService#getApplications()
*/
public Applications getApplications() {
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
if (disableTransparentFallback) {
return getApplicationsFromLocalRegionOnly();
} else {
return getApplicationsFromAllRemoteRegions(); // Behavior of falling back to remote region can be disabled.
}
}
/**
* Returns applications including instances from all remote regions. <br/>
* Same as calling {@link #getApplicationsFromMultipleRegions(String[])} with a <code>null</code> argument.
*/
public Applications getApplicationsFromAllRemoteRegions() {
return getApplicationsFromMultipleRegions(allKnownRemoteRegions);
}
/**
* Returns applications including instances from local region only. <br/>
* Same as calling {@link #getApplicationsFromMultipleRegions(String[])} with an empty array.
*/
@Override
public Applications getApplicationsFromLocalRegionOnly() {
return getApplicationsFromMultipleRegions(EMPTY_STR_ARRAY);
}
/**
* This method will return applications with instances from all passed remote regions as well as the current region.
* Thus, this gives a union view of instances from multiple regions. <br/>
* The application instances for which this union will be done can be restricted to the names returned by
* {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} for every region. In case, there is no whitelist
* defined for a region, this method will also look for a global whitelist by passing <code>null</code> to the
* method {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} <br/>
* If you are not selectively requesting for a remote region, use {@link #getApplicationsFromAllRemoteRegions()}
* or {@link #getApplicationsFromLocalRegionOnly()}
*
* @param remoteRegions The remote regions for which the instances are to be queried. The instances may be limited
* by a whitelist as explained above. If <code>null</code> or empty no remote regions are
* included.
*
* @return The applications with instances from the passed remote regions as well as local region. The instances
* from remote regions can be only for certain whitelisted apps as explained above.
*/
public Applications getApplicationsFromMultipleRegions(String[] remoteRegions) {
boolean includeRemoteRegion = null != remoteRegions && remoteRegions.length != 0;
logger.debug("Fetching applications registry with remote regions: {}, Regions argument {}",
includeRemoteRegion, remoteRegions);
if (includeRemoteRegion) {
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS.increment();
} else {
GET_ALL_CACHE_MISS.increment();
}
Applications apps = new Applications();
apps.setVersion(1L);
for (Entry<String, Map<String, Lease<InstanceInfo>>> entry : registry.entrySet()) {
Application app = null;
if (entry.getValue() != null) {
for (Entry<String, Lease<InstanceInfo>> stringLeaseEntry : entry.getValue().entrySet()) {
Lease<InstanceInfo> lease = stringLeaseEntry.getValue();
if (app == null) {
app = new Application(lease.getHolder().getAppName());
}
app.addInstance(decorateInstanceInfo(lease));
}
}
if (app != null) {
apps.addApplication(app);
}
}
if (includeRemoteRegion) {
for (String remoteRegion : remoteRegions) {
RemoteRegionRegistry remoteRegistry = regionNameVSRemoteRegistry.get(remoteRegion);
if (null != remoteRegistry) {
Applications remoteApps = remoteRegistry.getApplications();
for (Application application : remoteApps.getRegisteredApplications()) {
if (shouldFetchFromRemoteRegistry(application.getName(), remoteRegion)) {
logger.info("Application {} fetched from the remote region {}",
application.getName(), remoteRegion);
Application appInstanceTillNow = apps.getRegisteredApplications(application.getName());
if (appInstanceTillNow == null) {
appInstanceTillNow = new Application(application.getName());
apps.addApplication(appInstanceTillNow);
}
for (InstanceInfo instanceInfo : application.getInstances()) {
appInstanceTillNow.addInstance(instanceInfo);
}
} else {
logger.debug("Application {} not fetched from the remote region {} as there exists a "
+ "whitelist and this app is not in the whitelist.",
application.getName(), remoteRegion);
}
}
} else {
logger.warn("No remote registry available for the remote region {}", remoteRegion);
}
}
}
apps.setAppsHashCode(apps.getReconcileHashCode());
return apps;
}
private boolean shouldFetchFromRemoteRegistry(String appName, String remoteRegion) {
Set<String> whiteList = serverConfig.getRemoteRegionAppWhitelist(remoteRegion);
if (null == whiteList) {
whiteList = serverConfig.getRemoteRegionAppWhitelist(null); // see global whitelist.
}
return null == whiteList || whiteList.contains(appName);
}
/**
* Get the registry information about all {@link Applications}.
*
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return applications
*
* @deprecated Use {@link #getApplicationsFromMultipleRegions(String[])} instead. This method has a flawed behavior
* of transparently falling back to a remote region if no instances for an app is available locally. The new
* behavior is to explicitly specify if you need a remote region.
*/
@Deprecated
public Applications getApplications(boolean includeRemoteRegion) {
GET_ALL_CACHE_MISS.increment();
Applications apps = new Applications();
apps.setVersion(1L);
for (Entry<String, Map<String, Lease<InstanceInfo>>> entry : registry.entrySet()) {
Application app = null;
if (entry.getValue() != null) {
for (Entry<String, Lease<InstanceInfo>> stringLeaseEntry : entry.getValue().entrySet()) {
Lease<InstanceInfo> lease = stringLeaseEntry.getValue();
if (app == null) {
app = new Application(lease.getHolder().getAppName());
}
app.addInstance(decorateInstanceInfo(lease));
}
}
if (app != null) {
apps.addApplication(app);
}
}
if (includeRemoteRegion) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Applications applications = remoteRegistry.getApplications();
for (Application application : applications
.getRegisteredApplications()) {
Application appInLocalRegistry = apps
.getRegisteredApplications(application.getName());
if (appInLocalRegistry == null) {
apps.addApplication(application);
}
}
}
}
apps.setAppsHashCode(apps.getReconcileHashCode());
return apps;
}
/**
* Get the registry information about the delta changes. The deltas are
* cached for a window specified by
* {@link EurekaServerConfig#getRetentionTimeInMSInDeltaQueue()}. Subsequent
* requests for delta information may return the same information and client
* must make sure this does not adversely affect them.
*
* @return all application deltas.
* @deprecated use {@link #getApplicationDeltasFromMultipleRegions(String[])} instead. This method has a
* flawed behavior of transparently falling back to a remote region if no instances for an app is available locally.
* The new behavior is to explicitly specify if you need a remote region.
*/
@Deprecated
public Applications getApplicationDeltas() {
GET_ALL_CACHE_MISS_DELTA.increment();
Applications apps = new Applications();
apps.setVersion(responseCache.getVersionDelta().get());
Map<String, Application> applicationInstancesMap = new HashMap<String, Application>();
write.lock();
try {
Iterator<RecentlyChangedItem> iter = this.recentlyChangedQueue.iterator();
logger.debug("The number of elements in the delta queue is : {}",
this.recentlyChangedQueue.size());
while (iter.hasNext()) {
Lease<InstanceInfo> lease = iter.next().getLeaseInfo();
InstanceInfo instanceInfo = lease.getHolder();
logger.debug(
"The instance id {} is found with status {} and actiontype {}",
instanceInfo.getId(), instanceInfo.getStatus().name(), instanceInfo.getActionType().name());
Application app = applicationInstancesMap.get(instanceInfo
.getAppName());
if (app == null) {
app = new Application(instanceInfo.getAppName());
applicationInstancesMap.put(instanceInfo.getAppName(), app);
apps.addApplication(app);
}
app.addInstance(new InstanceInfo(decorateInstanceInfo(lease)));
}
boolean disableTransparentFallback = serverConfig.disableTransparentFallbackToOtherRegion();
if (!disableTransparentFallback) {
Applications allAppsInLocalRegion = getApplications(false);
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Applications applications = remoteRegistry.getApplicationDeltas();
for (Application application : applications.getRegisteredApplications()) {
Application appInLocalRegistry =
allAppsInLocalRegion.getRegisteredApplications(application.getName());
if (appInLocalRegistry == null) {
apps.addApplication(application);
}
}
}
}
Applications allApps = getApplications(!disableTransparentFallback);
apps.setAppsHashCode(allApps.getReconcileHashCode());
return apps;
} finally {
write.unlock();
}
}
/**
* Gets the application delta also including instances from the passed remote regions, with the instances from the
* local region. <br/>
*
* The remote regions from where the instances will be chosen can further be restricted if this application does not
* appear in the whitelist specified for the region as returned by
* {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} for a region. In case, there is no whitelist
* defined for a region, this method will also look for a global whitelist by passing <code>null</code> to the
* method {@link EurekaServerConfig#getRemoteRegionAppWhitelist(String)} <br/>
*
* @param remoteRegions The remote regions for which the instances are to be queried. The instances may be limited
* by a whitelist as explained above. If <code>null</code> all remote regions are included.
* If empty list then no remote region is included.
*
* @return The delta with instances from the passed remote regions as well as local region. The instances
* from remote regions can be further be restricted as explained above. <code>null</code> if the application does
* not exist locally or in remote regions.
*/
public Applications getApplicationDeltasFromMultipleRegions(String[] remoteRegions) {
if (null == remoteRegions) {
remoteRegions = allKnownRemoteRegions; // null means all remote regions.
}
boolean includeRemoteRegion = remoteRegions.length != 0;
if (includeRemoteRegion) {
GET_ALL_WITH_REMOTE_REGIONS_CACHE_MISS_DELTA.increment();
} else {
GET_ALL_CACHE_MISS_DELTA.increment();
}
Applications apps = new Applications();
apps.setVersion(responseCache.getVersionDeltaWithRegions().get());
Map<String, Application> applicationInstancesMap = new HashMap<String, Application>();
write.lock();
try {
Iterator<RecentlyChangedItem> iter = this.recentlyChangedQueue.iterator();
logger.debug("The number of elements in the delta queue is :{}", this.recentlyChangedQueue.size());
while (iter.hasNext()) {
Lease<InstanceInfo> lease = iter.next().getLeaseInfo();
InstanceInfo instanceInfo = lease.getHolder();
logger.debug("The instance id {} is found with status {} and actiontype {}",
instanceInfo.getId(), instanceInfo.getStatus().name(), instanceInfo.getActionType().name());
Application app = applicationInstancesMap.get(instanceInfo.getAppName());
if (app == null) {
app = new Application(instanceInfo.getAppName());
applicationInstancesMap.put(instanceInfo.getAppName(), app);
apps.addApplication(app);
}
app.addInstance(new InstanceInfo(decorateInstanceInfo(lease)));
}
if (includeRemoteRegion) {
for (String remoteRegion : remoteRegions) {
RemoteRegionRegistry remoteRegistry = regionNameVSRemoteRegistry.get(remoteRegion);
if (null != remoteRegistry) {
Applications remoteAppsDelta = remoteRegistry.getApplicationDeltas();
if (null != remoteAppsDelta) {
for (Application application : remoteAppsDelta.getRegisteredApplications()) {
if (shouldFetchFromRemoteRegistry(application.getName(), remoteRegion)) {
Application appInstanceTillNow =
apps.getRegisteredApplications(application.getName());
if (appInstanceTillNow == null) {
appInstanceTillNow = new Application(application.getName());
apps.addApplication(appInstanceTillNow);
}
for (InstanceInfo instanceInfo : application.getInstances()) {
appInstanceTillNow.addInstance(new InstanceInfo(instanceInfo));
}
}
}
}
}
}
}
Applications allApps = getApplicationsFromMultipleRegions(remoteRegions);
apps.setAppsHashCode(allApps.getReconcileHashCode());
return apps;
} finally {
write.unlock();
}
}
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @return the information about the instance.
*/
@Override
public InstanceInfo getInstanceByAppAndId(String appName, String id) {
return this.getInstanceByAppAndId(appName, id, true);
}
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the information about the instance.
*/
@Override
public InstanceInfo getInstanceByAppAndId(String appName, String id, boolean includeRemoteRegions) {
Map<String, Lease<InstanceInfo>> leaseMap = registry.get(appName);
Lease<InstanceInfo> lease = null;
if (leaseMap != null) {
lease = leaseMap.get(id);
}
if (lease != null
&& (!isLeaseExpirationEnabled() || !lease.isExpired())) {
return decorateInstanceInfo(lease);
} else if (includeRemoteRegions) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
Application application = remoteRegistry.getApplication(appName);
if (application != null) {
return application.getByInstanceId(id);
}
}
}
return null;
}
/**
* @deprecated Try {@link #getInstanceByAppAndId(String, String)} instead.
*
* Get all instances by ID, including automatically asking other regions if the ID is unknown.
*
* @see com.netflix.discovery.shared.LookupService#getInstancesById(String)
*/
@Deprecated
public List<InstanceInfo> getInstancesById(String id) {
return this.getInstancesById(id, true);
}
/**
* @deprecated Try {@link #getInstanceByAppAndId(String, String, boolean)} instead.
*
* Get the list of instances by its unique id.
*
* @param id the unique id of the instance
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link URL} by this property
* {@link EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return list of InstanceInfo objects.
*/
@Deprecated
public List<InstanceInfo> getInstancesById(String id, boolean includeRemoteRegions) {
List<InstanceInfo> list = new ArrayList<>();
for (Iterator<Entry<String, Map<String, Lease<InstanceInfo>>>> iter =
registry.entrySet().iterator(); iter.hasNext(); ) {
Map<String, Lease<InstanceInfo>> leaseMap = iter.next().getValue();
if (leaseMap != null) {
Lease<InstanceInfo> lease = leaseMap.get(id);
if (lease == null || (isLeaseExpirationEnabled() && lease.isExpired())) {
continue;
}
if (list == Collections.EMPTY_LIST) {
list = new ArrayList<>();
}
list.add(decorateInstanceInfo(lease));
}
}
if (list.isEmpty() && includeRemoteRegions) {
for (RemoteRegionRegistry remoteRegistry : this.regionNameVSRemoteRegistry.values()) {
for (Application application : remoteRegistry.getApplications()
.getRegisteredApplications()) {
InstanceInfo instanceInfo = application.getByInstanceId(id);
if (instanceInfo != null) {
list.add(instanceInfo);
return list;
}
}
}
}
return list;
}
private InstanceInfo decorateInstanceInfo(Lease<InstanceInfo> lease) {
InstanceInfo info = lease.getHolder();
// client app settings
int renewalInterval = LeaseInfo.DEFAULT_LEASE_RENEWAL_INTERVAL;
int leaseDuration = LeaseInfo.DEFAULT_LEASE_DURATION;
// TODO: clean this up
if (info.getLeaseInfo() != null) {
renewalInterval = info.getLeaseInfo().getRenewalIntervalInSecs();
leaseDuration = info.getLeaseInfo().getDurationInSecs();
}
info.setLeaseInfo(LeaseInfo.Builder.newBuilder()
.setRegistrationTimestamp(lease.getRegistrationTimestamp())
.setRenewalTimestamp(lease.getLastRenewalTimestamp())
.setServiceUpTimestamp(lease.getServiceUpTimestamp())
.setRenewalIntervalInSecs(renewalInterval)
.setDurationInSecs(leaseDuration)
.setEvictionTimestamp(lease.getEvictionTimestamp()).build());
info.setIsCoordinatingDiscoveryServer();
return info;
}
/**
* Servo route; do not call.
*
* @return servo data
*/
@com.netflix.servo.annotations.Monitor(name = "numOfRenewsInLastMin",
description = "Number of total heartbeats received in the last minute", type = DataSourceType.GAUGE)
@Override
public long getNumOfRenewsInLastMin() {
return renewsLastMin.getCount();
}
/**
* Gets the threshold for the renewals per minute.
*
* @return the integer representing the threshold for the renewals per
* minute.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfRenewsPerMinThreshold", type = DataSourceType.GAUGE)
@Override
public int getNumOfRenewsPerMinThreshold() {
return numberOfRenewsPerMinThreshold;
}
/**
* Get the N instances that are most recently registered.
*
* @return
*/
@Override
public List<Pair<Long, String>> getLastNRegisteredInstances() {
List<Pair<Long, String>> list = new ArrayList<>(recentRegisteredQueue);
Collections.reverse(list);
return list;
}
/**
* Get the N instances that have most recently canceled.
*
* @return
*/
@Override
public List<Pair<Long, String>> getLastNCanceledInstances() {
List<Pair<Long, String>> list = new ArrayList<>(recentCanceledQueue);
Collections.reverse(list);
return list;
}
private void invalidateCache(String appName, @Nullable String vipAddress, @Nullable String secureVipAddress) {
// invalidate cache
responseCache.invalidate(appName, vipAddress, secureVipAddress);
}
protected void updateRenewsPerMinThreshold() {
this.numberOfRenewsPerMinThreshold = (int) (this.expectedNumberOfClientsSendingRenews
* (60.0 / serverConfig.getExpectedClientRenewalIntervalSeconds())
* serverConfig.getRenewalPercentThreshold());
}
private static final class RecentlyChangedItem {
private long lastUpdateTime;
private Lease<InstanceInfo> leaseInfo;
public RecentlyChangedItem(Lease<InstanceInfo> lease) {
this.leaseInfo = lease;
lastUpdateTime = System.currentTimeMillis();
}
public long getLastUpdateTime() {
return this.lastUpdateTime;
}
public Lease<InstanceInfo> getLeaseInfo() {
return this.leaseInfo;
}
}
protected void postInit() {
renewsLastMin.start();
if (evictionTaskRef.get() != null) {
evictionTaskRef.get().cancel();
}
evictionTaskRef.set(new EvictionTask());
evictionTimer.schedule(evictionTaskRef.get(),
serverConfig.getEvictionIntervalTimerInMs(),
serverConfig.getEvictionIntervalTimerInMs());
}
/**
* Perform all cleanup and shutdown operations.
*/
@Override
public void shutdown() {
deltaRetentionTimer.cancel();
evictionTimer.cancel();
renewsLastMin.stop();
responseCache.stop();
}
@com.netflix.servo.annotations.Monitor(name = "numOfElementsinInstanceCache", description = "Number of overrides in the instance Cache", type = DataSourceType.GAUGE)
public long getNumberofElementsininstanceCache() {
return overriddenInstanceStatusMap.size();
}
/* visible for testing */ class EvictionTask extends TimerTask {
private final AtomicLong lastExecutionNanosRef = new AtomicLong(0l);
@Override
public void run() {
try {
long compensationTimeMs = getCompensationTimeMs();
logger.info("Running the evict task with compensationTime {}ms", compensationTimeMs);
evict(compensationTimeMs);
} catch (Throwable e) {
logger.error("Could not run the evict task", e);
}
}
/**
* compute a compensation time defined as the actual time this task was executed since the prev iteration,
* vs the configured amount of time for execution. This is useful for cases where changes in time (due to
* clock skew or gc for example) causes the actual eviction task to execute later than the desired time
* according to the configured cycle.
*/
long getCompensationTimeMs() {
long currNanos = getCurrentTimeNano();
long lastNanos = lastExecutionNanosRef.getAndSet(currNanos);
if (lastNanos == 0l) {
return 0l;
}
long elapsedMs = TimeUnit.NANOSECONDS.toMillis(currNanos - lastNanos);
long compensationTime = elapsedMs - serverConfig.getEvictionIntervalTimerInMs();
return compensationTime <= 0l ? 0l : compensationTime;
}
long getCurrentTimeNano() { // for testing
return System.nanoTime();
}
}
/* visible for testing */ static class CircularQueue<E> extends AbstractQueue<E> {
private final ArrayBlockingQueue<E> delegate;
private final int capacity;
public CircularQueue(int capacity) {
this.capacity = capacity;
this.delegate = new ArrayBlockingQueue<>(capacity);
}
@Override
public Iterator<E> iterator() {
return delegate.iterator();
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean offer(E e) {
while (!delegate.offer(e)) {
delegate.poll();
}
return true;
}
@Override
public E poll() {
return delegate.poll();
}
@Override
public E peek() {
return delegate.peek();
}
@Override
public void clear() {
delegate.clear();
}
@Override
public Object[] toArray() {
return delegate.toArray();
}
}
/**
* @return The rule that will process the instance status override.
*/
protected abstract InstanceStatusOverrideRule getInstanceInfoOverrideRule();
protected InstanceInfo.InstanceStatus getOverriddenInstanceStatus(InstanceInfo r,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
InstanceStatusOverrideRule rule = getInstanceInfoOverrideRule();
logger.debug("Processing override status using rule: {}", rule);
return rule.apply(r, existingLease, isReplication).status();
}
private TimerTask getDeltaRetentionTask() {
return new TimerTask() {
@Override
public void run() {
Iterator<RecentlyChangedItem> it = recentlyChangedQueue.iterator();
while (it.hasNext()) {
if (it.next().getLastUpdateTime() <
System.currentTimeMillis() - serverConfig.getRetentionTimeInMSInDeltaQueue()) {
it.remove();
} else {
break;
}
}
}
};
}
}
| 8,202 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/InstanceRegistry.java | package com.netflix.eureka.registry;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.shared.LookupService;
import com.netflix.discovery.shared.Pair;
import com.netflix.eureka.lease.LeaseManager;
import java.util.List;
import java.util.Map;
/**
* @author Tomasz Bak
*/
public interface InstanceRegistry extends LeaseManager<InstanceInfo>, LookupService<String> {
void openForTraffic(ApplicationInfoManager applicationInfoManager, int count);
void shutdown();
@Deprecated
void storeOverriddenStatusIfRequired(String id, InstanceStatus overriddenStatus);
void storeOverriddenStatusIfRequired(String appName, String id, InstanceStatus overriddenStatus);
boolean statusUpdate(String appName, String id, InstanceStatus newStatus,
String lastDirtyTimestamp, boolean isReplication);
boolean deleteStatusOverride(String appName, String id, InstanceStatus newStatus,
String lastDirtyTimestamp, boolean isReplication);
Map<String, InstanceStatus> overriddenInstanceStatusesSnapshot();
Applications getApplicationsFromLocalRegionOnly();
List<Application> getSortedApplications();
/**
* Get application information.
*
* @param appName The name of the application
* @param includeRemoteRegion true, if we need to include applications from remote regions
* as indicated by the region {@link java.net.URL} by this property
* {@link com.netflix.eureka.EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the application
*/
Application getApplication(String appName, boolean includeRemoteRegion);
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @return the information about the instance.
*/
InstanceInfo getInstanceByAppAndId(String appName, String id);
/**
* Gets the {@link InstanceInfo} information.
*
* @param appName the application name for which the information is requested.
* @param id the unique identifier of the instance.
* @param includeRemoteRegions true, if we need to include applications from remote regions
* as indicated by the region {@link java.net.URL} by this property
* {@link com.netflix.eureka.EurekaServerConfig#getRemoteRegionUrls()}, false otherwise
* @return the information about the instance.
*/
InstanceInfo getInstanceByAppAndId(String appName, String id, boolean includeRemoteRegions);
void clearRegistry();
void initializedResponseCache();
ResponseCache getResponseCache();
long getNumOfRenewsInLastMin();
int getNumOfRenewsPerMinThreshold();
int isBelowRenewThresold();
List<Pair<Long, String>> getLastNRegisteredInstances();
List<Pair<Long, String>> getLastNCanceledInstances();
/**
* Checks whether lease expiration is enabled.
* @return true if enabled
*/
boolean isLeaseExpirationEnabled();
boolean isSelfPreservationModeEnabled();
}
| 8,203 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/ResponseCacheImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.registry;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.GZIPOutputStream;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Supplier;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.Version;
import com.netflix.eureka.resources.CurrentRequestVersion;
import com.netflix.eureka.resources.ServerCodecs;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The class that is responsible for caching registry information that will be
* queried by the clients.
*
* <p>
* The cache is maintained in compressed and non-compressed form for three
* categories of requests - all applications, delta changes and for individual
* applications. The compressed form is probably the most efficient in terms of
* network traffic especially when querying all applications.
*
* The cache also maintains separate pay load for <em>JSON</em> and <em>XML</em>
* formats and for multiple versions too.
* </p>
*
* @author Karthik Ranganathan, Greg Kim
*/
public class ResponseCacheImpl implements ResponseCache {
private static final Key.KeyType[] KEY_TYPE_VALUES = Key.KeyType.values();
private static final Version[] VERSION_VALUES = Version.values();
private static final Logger logger = LoggerFactory.getLogger(ResponseCacheImpl.class);
public static final String ALL_APPS = "ALL_APPS";
public static final String ALL_APPS_DELTA = "ALL_APPS_DELTA";
// FIXME deprecated, here for backwards compatibility.
private static final AtomicLong versionDeltaLegacy = new AtomicLong(0);
private static final AtomicLong versionDeltaWithRegionsLegacy = new AtomicLong(0);
private static final String EMPTY_PAYLOAD = "";
private final java.util.Timer timer = new java.util.Timer("Eureka-CacheFillTimer", true);
private final AtomicLong versionDelta = new AtomicLong(0);
private final AtomicLong versionDeltaWithRegions = new AtomicLong(0);
private final Timer serializeAllAppsTimer = Monitors.newTimer("serialize-all");
private final Timer serializeDeltaAppsTimer = Monitors.newTimer("serialize-all-delta");
private final Timer serializeAllAppsWithRemoteRegionTimer = Monitors.newTimer("serialize-all_remote_region");
private final Timer serializeDeltaAppsWithRemoteRegionTimer = Monitors.newTimer("serialize-all-delta_remote_region");
private final Timer serializeOneApptimer = Monitors.newTimer("serialize-one");
private final Timer serializeViptimer = Monitors.newTimer("serialize-one-vip");
private final Timer compressPayloadTimer = Monitors.newTimer("compress-payload");
/**
* This map holds mapping of keys without regions to a list of keys with region (provided by clients)
* Since, during invalidation, triggered by a change in registry for local region, we do not know the regions
* requested by clients, we use this mapping to get all the keys with regions to be invalidated.
* If we do not do this, any cached user requests containing region keys will not be invalidated and will stick
* around till expiry. Github issue: https://github.com/Netflix/eureka/issues/118
*/
private final Multimap<Key, Key> regionSpecificKeys =
Multimaps.newListMultimap(new ConcurrentHashMap<Key, Collection<Key>>(), new Supplier<List<Key>>() {
@Override
public List<Key> get() {
return new CopyOnWriteArrayList<Key>();
}
});
private final ConcurrentMap<Key, Value> readOnlyCacheMap = new ConcurrentHashMap<Key, Value>();
private final LoadingCache<Key, Value> readWriteCacheMap;
private final boolean shouldUseReadOnlyResponseCache;
private final AbstractInstanceRegistry registry;
private final EurekaServerConfig serverConfig;
private final ServerCodecs serverCodecs;
ResponseCacheImpl(EurekaServerConfig serverConfig, ServerCodecs serverCodecs, AbstractInstanceRegistry registry) {
this.serverConfig = serverConfig;
this.serverCodecs = serverCodecs;
this.shouldUseReadOnlyResponseCache = serverConfig.shouldUseReadOnlyResponseCache();
this.registry = registry;
long responseCacheUpdateIntervalMs = serverConfig.getResponseCacheUpdateIntervalMs();
this.readWriteCacheMap =
CacheBuilder.newBuilder().initialCapacity(serverConfig.getInitialCapacityOfResponseCache())
.expireAfterWrite(serverConfig.getResponseCacheAutoExpirationInSeconds(), TimeUnit.SECONDS)
.removalListener(new RemovalListener<Key, Value>() {
@Override
public void onRemoval(RemovalNotification<Key, Value> notification) {
Key removedKey = notification.getKey();
if (removedKey.hasRegions()) {
Key cloneWithNoRegions = removedKey.cloneWithoutRegions();
regionSpecificKeys.remove(cloneWithNoRegions, removedKey);
}
}
})
.build(new CacheLoader<Key, Value>() {
@Override
public Value load(Key key) throws Exception {
if (key.hasRegions()) {
Key cloneWithNoRegions = key.cloneWithoutRegions();
regionSpecificKeys.put(cloneWithNoRegions, key);
}
Value value = generatePayload(key);
return value;
}
});
if (shouldUseReadOnlyResponseCache) {
timer.schedule(getCacheUpdateTask(),
new Date(((System.currentTimeMillis() / responseCacheUpdateIntervalMs) * responseCacheUpdateIntervalMs)
+ responseCacheUpdateIntervalMs),
responseCacheUpdateIntervalMs);
}
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
private TimerTask getCacheUpdateTask() {
return new TimerTask() {
@Override
public void run() {
logger.debug("Updating the client cache from response cache");
for (Key key : readOnlyCacheMap.keySet()) {
if (logger.isDebugEnabled()) {
logger.debug("Updating the client cache from response cache for key : {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType());
}
try {
CurrentRequestVersion.set(key.getVersion());
Value cacheValue = readWriteCacheMap.get(key);
Value currentCacheValue = readOnlyCacheMap.get(key);
if (cacheValue != currentCacheValue) {
readOnlyCacheMap.put(key, cacheValue);
}
} catch (Throwable th) {
logger.error("Error while updating the client cache from response cache for key {}", key.toStringCompact(), th);
} finally {
CurrentRequestVersion.remove();
}
}
}
};
}
/**
* Get the cached information about applications.
*
* <p>
* If the cached information is not available it is generated on the first
* request. After the first request, the information is then updated
* periodically by a background thread.
* </p>
*
* @param key the key for which the cached information needs to be obtained.
* @return payload which contains information about the applications.
*/
public String get(final Key key) {
return get(key, shouldUseReadOnlyResponseCache);
}
@VisibleForTesting
String get(final Key key, boolean useReadOnlyCache) {
Value payload = getValue(key, useReadOnlyCache);
if (payload == null || payload.getPayload().equals(EMPTY_PAYLOAD)) {
return null;
} else {
return payload.getPayload();
}
}
/**
* Get the compressed information about the applications.
*
* @param key
* the key for which the compressed cached information needs to
* be obtained.
* @return compressed payload which contains information about the
* applications.
*/
public byte[] getGZIP(Key key) {
Value payload = getValue(key, shouldUseReadOnlyResponseCache);
if (payload == null) {
return null;
}
return payload.getGzipped();
}
@Override
public void stop() {
timer.cancel();
Monitors.unregisterObject(this);
}
/**
* Invalidate the cache of a particular application.
*
* @param appName the application name of the application.
*/
@Override
public void invalidate(String appName, @Nullable String vipAddress, @Nullable String secureVipAddress) {
for (Key.KeyType type : KEY_TYPE_VALUES) {
for (Version v : VERSION_VALUES) {
invalidate(
new Key(Key.EntityType.Application, appName, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, appName, type, v, EurekaAccept.compact),
new Key(Key.EntityType.Application, ALL_APPS, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, ALL_APPS, type, v, EurekaAccept.compact),
new Key(Key.EntityType.Application, ALL_APPS_DELTA, type, v, EurekaAccept.full),
new Key(Key.EntityType.Application, ALL_APPS_DELTA, type, v, EurekaAccept.compact)
);
if (null != vipAddress) {
invalidate(new Key(Key.EntityType.VIP, vipAddress, type, v, EurekaAccept.full));
}
if (null != secureVipAddress) {
invalidate(new Key(Key.EntityType.SVIP, secureVipAddress, type, v, EurekaAccept.full));
}
}
}
}
/**
* Invalidate the cache information given the list of keys.
*
* @param keys the list of keys for which the cache information needs to be invalidated.
*/
public void invalidate(Key... keys) {
for (Key key : keys) {
logger.debug("Invalidating the response cache key : {} {} {} {}, {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(), key.getEurekaAccept());
readWriteCacheMap.invalidate(key);
Collection<Key> keysWithRegions = regionSpecificKeys.get(key);
if (null != keysWithRegions && !keysWithRegions.isEmpty()) {
for (Key keysWithRegion : keysWithRegions) {
logger.debug("Invalidating the response cache key : {} {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(), key.getEurekaAccept());
readWriteCacheMap.invalidate(keysWithRegion);
}
}
}
}
/**
* Gets the version number of the cached data.
*
* @return teh version number of the cached data.
*/
@Override
public AtomicLong getVersionDelta() {
return versionDelta;
}
/**
* Gets the version number of the cached data with remote regions.
*
* @return teh version number of the cached data with remote regions.
*/
@Override
public AtomicLong getVersionDeltaWithRegions() {
return versionDeltaWithRegions;
}
/**
* @deprecated use instance method {@link #getVersionDelta()}
*
* Gets the version number of the cached data.
*
* @return teh version number of the cached data.
*/
@Deprecated
public static AtomicLong getVersionDeltaStatic() {
return versionDeltaLegacy;
}
/**
* @deprecated use instance method {@link #getVersionDeltaWithRegions()}
*
* Gets the version number of the cached data with remote regions.
*
* @return teh version number of the cached data with remote regions.
*/
@Deprecated
public static AtomicLong getVersionDeltaWithRegionsLegacy() {
return versionDeltaWithRegionsLegacy;
}
/**
* Get the number of items in the response cache.
*
* @return int value representing the number of items in response cache.
*/
@Monitor(name = "responseCacheSize", type = DataSourceType.GAUGE)
public int getCurrentSize() {
return readWriteCacheMap.asMap().size();
}
/**
* Get the payload in both compressed and uncompressed form.
*/
@VisibleForTesting
Value getValue(final Key key, boolean useReadOnlyCache) {
Value payload = null;
try {
if (useReadOnlyCache) {
final Value currentPayload = readOnlyCacheMap.get(key);
if (currentPayload != null) {
payload = currentPayload;
} else {
payload = readWriteCacheMap.get(key);
readOnlyCacheMap.put(key, payload);
}
} else {
payload = readWriteCacheMap.get(key);
}
} catch (Throwable t) {
logger.error("Cannot get value for key : {}", key, t);
}
return payload;
}
/**
* Generate pay load with both JSON and XML formats for all applications.
*/
private String getPayLoad(Key key, Applications apps) {
EncoderWrapper encoderWrapper = serverCodecs.getEncoder(key.getType(), key.getEurekaAccept());
String result;
try {
result = encoderWrapper.encode(apps);
} catch (Exception e) {
logger.error("Failed to encode the payload for all apps", e);
return "";
}
if(logger.isDebugEnabled()) {
logger.debug("New application cache entry {} with apps hashcode {}", key.toStringCompact(), apps.getAppsHashCode());
}
return result;
}
/**
* Generate pay load with both JSON and XML formats for a given application.
*/
private String getPayLoad(Key key, Application app) {
if (app == null) {
return EMPTY_PAYLOAD;
}
EncoderWrapper encoderWrapper = serverCodecs.getEncoder(key.getType(), key.getEurekaAccept());
try {
return encoderWrapper.encode(app);
} catch (Exception e) {
logger.error("Failed to encode the payload for application {}", app.getName(), e);
return "";
}
}
/*
* Generate pay load for the given key.
*/
private Value generatePayload(Key key) {
Stopwatch tracer = null;
try {
String payload;
switch (key.getEntityType()) {
case Application:
boolean isRemoteRegionRequested = key.hasRegions();
if (ALL_APPS.equals(key.getName())) {
if (isRemoteRegionRequested) {
tracer = serializeAllAppsWithRemoteRegionTimer.start();
payload = getPayLoad(key, registry.getApplicationsFromMultipleRegions(key.getRegions()));
} else {
tracer = serializeAllAppsTimer.start();
payload = getPayLoad(key, registry.getApplications());
}
} else if (ALL_APPS_DELTA.equals(key.getName())) {
if (isRemoteRegionRequested) {
tracer = serializeDeltaAppsWithRemoteRegionTimer.start();
versionDeltaWithRegions.incrementAndGet();
versionDeltaWithRegionsLegacy.incrementAndGet();
payload = getPayLoad(key,
registry.getApplicationDeltasFromMultipleRegions(key.getRegions()));
} else {
tracer = serializeDeltaAppsTimer.start();
versionDelta.incrementAndGet();
versionDeltaLegacy.incrementAndGet();
payload = getPayLoad(key, registry.getApplicationDeltas());
}
} else {
tracer = serializeOneApptimer.start();
payload = getPayLoad(key, registry.getApplication(key.getName()));
}
break;
case VIP:
case SVIP:
tracer = serializeViptimer.start();
payload = getPayLoad(key, getApplicationsForVip(key, registry));
break;
default:
logger.error("Unidentified entity type: {} found in the cache key.", key.getEntityType());
payload = "";
break;
}
return new Value(payload);
} finally {
if (tracer != null) {
tracer.stop();
}
}
}
private static Applications getApplicationsForVip(Key key, AbstractInstanceRegistry registry) {
logger.debug(
"Retrieving applications from registry for key : {} {} {} {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType());
Applications toReturn = new Applications();
Applications applications = registry.getApplications();
for (Application application : applications.getRegisteredApplications()) {
Application appToAdd = null;
for (InstanceInfo instanceInfo : application.getInstances()) {
String vipAddress;
if (Key.EntityType.VIP.equals(key.getEntityType())) {
vipAddress = instanceInfo.getVIPAddress();
} else if (Key.EntityType.SVIP.equals(key.getEntityType())) {
vipAddress = instanceInfo.getSecureVipAddress();
} else {
// should not happen, but just in case.
continue;
}
if (null != vipAddress) {
String[] vipAddresses = vipAddress.split(",");
Arrays.sort(vipAddresses);
if (Arrays.binarySearch(vipAddresses, key.getName()) >= 0) {
if (null == appToAdd) {
appToAdd = new Application(application.getName());
toReturn.addApplication(appToAdd);
}
appToAdd.addInstance(instanceInfo);
}
}
}
}
toReturn.setAppsHashCode(toReturn.getReconcileHashCode());
logger.debug(
"Retrieved applications from registry for key : {} {} {} {}, reconcile hashcode: {}",
key.getEntityType(), key.getName(), key.getVersion(), key.getType(),
toReturn.getReconcileHashCode());
return toReturn;
}
/**
* The class that stores payload in both compressed and uncompressed form.
*
*/
public class Value {
private final String payload;
private byte[] gzipped;
public Value(String payload) {
this.payload = payload;
if (!EMPTY_PAYLOAD.equals(payload)) {
Stopwatch tracer = compressPayloadTimer.start();
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
GZIPOutputStream out = new GZIPOutputStream(bos);
byte[] rawBytes = payload.getBytes();
out.write(rawBytes);
// Finish creation of gzip file
out.finish();
out.close();
bos.close();
gzipped = bos.toByteArray();
} catch (IOException e) {
gzipped = null;
} finally {
if (tracer != null) {
tracer.stop();
}
}
} else {
gzipped = null;
}
}
public String getPayload() {
return payload;
}
public byte[] getGzipped() {
return gzipped;
}
}
}
| 8,204 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/AlwaysMatchInstanceStatusRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches always and returns the current status of the instance.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class AlwaysMatchInstanceStatusRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(AlwaysMatchInstanceStatusRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
logger.debug("Returning the default instance status {} for instance {}", instanceInfo.getStatus(),
instanceInfo.getId());
return StatusOverrideResult.matchingStatus(instanceInfo.getStatus());
}
@Override
public String toString() {
return AlwaysMatchInstanceStatusRule.class.getName();
}
}
| 8,205 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/AsgEnabledRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.eureka.aws.AsgClient;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a rule that checks if the ASG for an instance is enabled or not and if not then it brings the instance
* OUT_OF_SERVICE.
*
* Created by Nikos Michalakis on 7/14/16.
*/
public class AsgEnabledRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(AsgEnabledRule.class);
private final AsgClient asgClient;
public AsgEnabledRule(AsgClient asgClient) {
this.asgClient = asgClient;
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo, Lease<InstanceInfo> existingLease, boolean isReplication) {
// If the ASGName is present- check for its status
if (instanceInfo.getASGName() != null) {
boolean isASGDisabled = !asgClient.isASGEnabled(instanceInfo);
logger.debug("The ASG name is specified {} and the value is {}", instanceInfo.getASGName(), isASGDisabled);
if (isASGDisabled) {
return StatusOverrideResult.matchingStatus(InstanceStatus.OUT_OF_SERVICE);
} else {
return StatusOverrideResult.matchingStatus(InstanceStatus.UP);
}
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return AsgEnabledRule.class.getName();
}
}
| 8,206 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/LeaseExistsRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches if we have an existing lease for the instance that is UP or OUT_OF_SERVICE.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class LeaseExistsRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(LeaseExistsRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
// This is for backward compatibility until all applications have ASG
// names, otherwise while starting up
// the client status may override status replicated from other servers
if (!isReplication) {
InstanceInfo.InstanceStatus existingStatus = null;
if (existingLease != null) {
existingStatus = existingLease.getHolder().getStatus();
}
// Allow server to have its way when the status is UP or OUT_OF_SERVICE
if ((existingStatus != null)
&& (InstanceInfo.InstanceStatus.OUT_OF_SERVICE.equals(existingStatus)
|| InstanceInfo.InstanceStatus.UP.equals(existingStatus))) {
logger.debug("There is already an existing lease with status {} for instance {}",
existingLease.getHolder().getStatus().name(),
existingLease.getHolder().getId());
return StatusOverrideResult.matchingStatus(existingLease.getHolder().getStatus());
}
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return LeaseExistsRule.class.getName();
}
}
| 8,207 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/DownOrStartingRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This rule matches if the instance is DOWN or STARTING.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class DownOrStartingRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(DownOrStartingRule.class);
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
// ReplicationInstance is DOWN or STARTING - believe that, but when the instance says UP, question that
// The client instance sends STARTING or DOWN (because of heartbeat failures), then we accept what
// the client says. The same is the case with replica as well.
// The OUT_OF_SERVICE from the client or replica needs to be confirmed as well since the service may be
// currently in SERVICE
if ((!InstanceInfo.InstanceStatus.UP.equals(instanceInfo.getStatus()))
&& (!InstanceInfo.InstanceStatus.OUT_OF_SERVICE.equals(instanceInfo.getStatus()))) {
logger.debug("Trusting the instance status {} from replica or instance for instance {}",
instanceInfo.getStatus(), instanceInfo.getId());
return StatusOverrideResult.matchingStatus(instanceInfo.getStatus());
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return DownOrStartingRule.class.getName();
}
}
| 8,208 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/OverrideExistsRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
/**
* This rule checks to see if we have overrides for an instance and if we do then we return those.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class OverrideExistsRule implements InstanceStatusOverrideRule {
private static final Logger logger = LoggerFactory.getLogger(OverrideExistsRule.class);
private Map<String, InstanceInfo.InstanceStatus> statusOverrides;
public OverrideExistsRule(Map<String, InstanceInfo.InstanceStatus> statusOverrides) {
this.statusOverrides = statusOverrides;
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo, Lease<InstanceInfo> existingLease, boolean isReplication) {
InstanceInfo.InstanceStatus overridden = statusOverrides.get(instanceInfo.getId());
// If there are instance specific overrides, then they win - otherwise the ASG status
if (overridden != null) {
logger.debug("The instance specific override for instance {} and the value is {}",
instanceInfo.getId(), overridden.name());
return StatusOverrideResult.matchingStatus(overridden);
}
return StatusOverrideResult.NO_MATCH;
}
@Override
public String toString() {
return OverrideExistsRule.class.getName();
}
}
| 8,209 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/FirstMatchWinsCompositeRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import java.util.ArrayList;
import java.util.List;
/**
* This rule takes an ordered list of rules and returns the result of the first match or the
* result of the {@link AlwaysMatchInstanceStatusRule}.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class FirstMatchWinsCompositeRule implements InstanceStatusOverrideRule {
private final InstanceStatusOverrideRule[] rules;
private final InstanceStatusOverrideRule defaultRule;
private final String compositeRuleName;
public FirstMatchWinsCompositeRule(InstanceStatusOverrideRule... rules) {
this.rules = rules;
this.defaultRule = new AlwaysMatchInstanceStatusRule();
// Let's build up and "cache" the rule name to be used by toString();
List<String> ruleNames = new ArrayList<>(rules.length+1);
for (int i = 0; i < rules.length; ++i) {
ruleNames.add(rules[i].toString());
}
ruleNames.add(defaultRule.toString());
compositeRuleName = ruleNames.toString();
}
@Override
public StatusOverrideResult apply(InstanceInfo instanceInfo,
Lease<InstanceInfo> existingLease,
boolean isReplication) {
for (int i = 0; i < this.rules.length; ++i) {
StatusOverrideResult result = this.rules[i].apply(instanceInfo, existingLease, isReplication);
if (result.matches()) {
return result;
}
}
return defaultRule.apply(instanceInfo, existingLease, isReplication);
}
@Override
public String toString() {
return this.compositeRuleName;
}
}
| 8,210 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/InstanceStatusOverrideRule.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.lease.Lease;
import com.netflix.eureka.registry.AbstractInstanceRegistry;
/**
* A single rule that if matched it returns an instance status.
* The idea is to use an ordered list of such rules and pick the first result that matches.
*
* It is designed to be used by
* {@link AbstractInstanceRegistry#getOverriddenInstanceStatus(InstanceInfo, Lease, boolean)}
*
* Created by Nikos Michalakis on 7/13/16.
*/
public interface InstanceStatusOverrideRule {
/**
* Match this rule.
*
* @param instanceInfo The instance info whose status we care about.
* @param existingLease Does the instance have an existing lease already? If so let's consider that.
* @param isReplication When overriding consider if we are under a replication mode from other servers.
* @return A result with whether we matched and what we propose the status to be overriden to.
*/
StatusOverrideResult apply(final InstanceInfo instanceInfo,
final Lease<InstanceInfo> existingLease,
boolean isReplication);
}
| 8,211 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/registry/rule/StatusOverrideResult.java | package com.netflix.eureka.registry.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.eureka.registry.rule.InstanceStatusOverrideRule;
/**
* Container for a result computed by an {@link InstanceStatusOverrideRule}.
*
* Created by Nikos Michalakis on 7/13/16.
*/
public class StatusOverrideResult {
public static StatusOverrideResult NO_MATCH = new StatusOverrideResult(false, null);
public static StatusOverrideResult matchingStatus(InstanceInfo.InstanceStatus status) {
return new StatusOverrideResult(true, status);
}
// Does the rule match?
private final boolean matches;
// The status computed by the rule.
private final InstanceInfo.InstanceStatus status;
private StatusOverrideResult(boolean matches, InstanceInfo.InstanceStatus status) {
this.matches = matches;
this.status = status;
}
public boolean matches() {
return matches;
}
public InstanceInfo.InstanceStatus status() {
return status;
}
}
| 8,212 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/Route53Binder.java | package com.netflix.eureka.aws;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.route53.AmazonRoute53Client;
import com.amazonaws.services.route53.model.*;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
/**
* Route53 binder implementation. Will look for a free domain in the list of service url to bind itself to via Route53.
*/
@Singleton
public class Route53Binder implements AwsBinder {
private static final Logger logger = LoggerFactory
.getLogger(Route53Binder.class);
public static final String NULL_DOMAIN = "null";
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final ApplicationInfoManager applicationInfoManager;
/**
* the hostname to register under the Route53 CNAME
*/
private final String registrationHostname;
private final Timer timer;
private final AmazonRoute53Client amazonRoute53Client;
@Inject
public Route53Binder(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
ApplicationInfoManager applicationInfoManager) {
this(getRegistrationHostnameFromAmazonDataCenterInfo(applicationInfoManager),
serverConfig,
clientConfig,
applicationInfoManager);
}
/**
* @param registrationHostname the hostname to register under the Route53 CNAME
*/
public Route53Binder(String registrationHostname, EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig, ApplicationInfoManager applicationInfoManager) {
this.registrationHostname = registrationHostname;
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.applicationInfoManager = applicationInfoManager;
this.timer = new Timer("Eureka-Route53Binder", true);
this.amazonRoute53Client = getAmazonRoute53Client(serverConfig);
}
private static String getRegistrationHostnameFromAmazonDataCenterInfo(ApplicationInfoManager applicationInfoManager) {
InstanceInfo myInfo = applicationInfoManager.getInfo();
AmazonInfo dataCenterInfo = (AmazonInfo) myInfo.getDataCenterInfo();
String ip = dataCenterInfo.get(AmazonInfo.MetaDataKey.publicHostname);
if (ip == null || ip.length() == 0) {
return dataCenterInfo.get(AmazonInfo.MetaDataKey.localHostname);
}
return ip;
}
@Override
@PostConstruct
public void start() {
try {
doBind();
timer.schedule(
new TimerTask() {
@Override
public void run() {
try {
doBind();
} catch (Throwable e) {
logger.error("Could not bind to Route53", e);
}
}
},
serverConfig.getRoute53BindingRetryIntervalMs(),
serverConfig.getRoute53BindingRetryIntervalMs());
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void doBind() throws InterruptedException {
List<ResourceRecordSetWithHostedZone> freeDomains = new ArrayList<>();
List<String> domains = getDeclaredDomains();
for(String domain : domains) {
ResourceRecordSetWithHostedZone rrs = getResourceRecordSetWithHostedZone(domain);
if (rrs != null) {
if (rrs.getResourceRecordSet() == null) {
ResourceRecordSet resourceRecordSet = new ResourceRecordSet();
resourceRecordSet.setName(domain);
resourceRecordSet.setType(RRType.CNAME);
resourceRecordSet.setTTL(serverConfig.getRoute53DomainTTL());
freeDomains.add(new ResourceRecordSetWithHostedZone(rrs.getHostedZone(), resourceRecordSet));
} else if (NULL_DOMAIN.equals(rrs.getResourceRecordSet().getResourceRecords().get(0).getValue())) {
freeDomains.add(rrs);
}
// already registered
if (hasValue(rrs, registrationHostname)) {
return;
}
}
}
for(ResourceRecordSetWithHostedZone rrs : freeDomains) {
if (createResourceRecordSet(rrs)) {
logger.info("Bind {} to {}" , registrationHostname, rrs.getResourceRecordSet().getName());
return;
}
}
logger.warn("Unable to find free domain in {}", domains);
}
private boolean createResourceRecordSet(ResourceRecordSetWithHostedZone rrs) throws InterruptedException {
rrs.getResourceRecordSet().setResourceRecords(Arrays.asList(new ResourceRecord(registrationHostname)));
Change change = new Change(ChangeAction.UPSERT, rrs.getResourceRecordSet());
if (executeChangeWithRetry(change, rrs.getHostedZone())) {
Thread.sleep(1000);
// check change not overwritten
ResourceRecordSet resourceRecordSet = getResourceRecordSet(rrs.getResourceRecordSet().getName(), rrs.getHostedZone());
if (resourceRecordSet != null) {
return resourceRecordSet.getResourceRecords().equals(rrs.getResourceRecordSet().getResourceRecords());
}
}
return false;
}
private List<String> toDomains(List<String> ec2Urls) {
List<String> domains = new ArrayList<>(ec2Urls.size());
for(String url : ec2Urls) {
try {
domains.add(extractDomain(url));
} catch(MalformedURLException e) {
logger.error("Invalid url {}", url, e);
}
}
return domains;
}
private String getMyZone() {
InstanceInfo info = applicationInfoManager.getInfo();
AmazonInfo amazonInfo = info != null ? (AmazonInfo) info.getDataCenterInfo() : null;
String zone = amazonInfo != null ? amazonInfo.get(AmazonInfo.MetaDataKey.availabilityZone) : null;
if (zone == null) {
throw new RuntimeException("Cannot extract availabilityZone");
}
return zone;
}
private List<String> getDeclaredDomains() {
final String myZone = getMyZone();
List<String> ec2Urls = clientConfig.getEurekaServerServiceUrls(myZone);
return toDomains(ec2Urls);
}
private boolean executeChangeWithRetry(Change change, HostedZone hostedZone) throws InterruptedException {
Throwable firstError = null;
for (int i = 0; i < serverConfig.getRoute53BindRebindRetries(); i++) {
try {
executeChange(change, hostedZone);
return true;
} catch (Throwable e) {
if (firstError == null) {
firstError = e;
}
Thread.sleep(1000);
}
}
if (firstError != null) {
logger.error("Cannot execute change {} {}", change, firstError, firstError);
}
return false;
}
private void executeChange(Change change, HostedZone hostedZone) {
logger.info("Execute change {} ", change);
ChangeResourceRecordSetsRequest changeResourceRecordSetsRequest = new ChangeResourceRecordSetsRequest();
changeResourceRecordSetsRequest.setHostedZoneId(hostedZone.getId());
ChangeBatch changeBatch = new ChangeBatch();
changeBatch.withChanges(change);
changeResourceRecordSetsRequest.setChangeBatch(changeBatch);
amazonRoute53Client.changeResourceRecordSets(changeResourceRecordSetsRequest);
}
private ResourceRecordSetWithHostedZone getResourceRecordSetWithHostedZone(String domain) {
HostedZone hostedZone = getHostedZone(domain);
if (hostedZone != null) {
return new ResourceRecordSetWithHostedZone(hostedZone, getResourceRecordSet(domain, hostedZone));
}
return null;
}
private ResourceRecordSet getResourceRecordSet(String domain, HostedZone hostedZone) {
ListResourceRecordSetsRequest request = new ListResourceRecordSetsRequest();
request.setMaxItems(String.valueOf(Integer.MAX_VALUE));
request.setHostedZoneId(hostedZone.getId());
ListResourceRecordSetsResult listResourceRecordSetsResult = amazonRoute53Client.listResourceRecordSets(request);
for(ResourceRecordSet rrs : listResourceRecordSetsResult.getResourceRecordSets()) {
if (rrs.getName().equals(domain)) {
return rrs;
}
}
return null;
}
private HostedZone getHostedZone(String domain) {
ListHostedZonesRequest listHostedZoneRequest = new ListHostedZonesRequest();
listHostedZoneRequest.setMaxItems(String.valueOf(Integer.MAX_VALUE));
ListHostedZonesResult listHostedZonesResult = amazonRoute53Client.listHostedZones(listHostedZoneRequest);
for(HostedZone hostedZone : listHostedZonesResult.getHostedZones()) {
if (domain.endsWith(hostedZone.getName())) {
return hostedZone;
}
}
return null;
}
private void unbindFromDomain(String domain) throws InterruptedException {
ResourceRecordSetWithHostedZone resourceRecordSetWithHostedZone = getResourceRecordSetWithHostedZone(domain);
if (hasValue(resourceRecordSetWithHostedZone, registrationHostname)) {
resourceRecordSetWithHostedZone.getResourceRecordSet().getResourceRecords().get(0).setValue(NULL_DOMAIN);
executeChangeWithRetry(new Change(ChangeAction.UPSERT, resourceRecordSetWithHostedZone.getResourceRecordSet()), resourceRecordSetWithHostedZone.getHostedZone());
}
}
private String extractDomain(String url) throws MalformedURLException {
return new URL(url).getHost() + ".";
}
@Override
@PreDestroy
public void shutdown() {
timer.cancel();
for(String domain : getDeclaredDomains()) {
try {
unbindFromDomain(domain);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
amazonRoute53Client.shutdown();
}
private AmazonRoute53Client getAmazonRoute53Client(EurekaServerConfig serverConfig) {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
return new AmazonRoute53Client(
new BasicAWSCredentials(aWSAccessId, aWSSecretKey),
clientConfiguration);
} else {
return new AmazonRoute53Client(
new InstanceProfileCredentialsProvider(),
clientConfiguration);
}
}
private boolean hasValue(ResourceRecordSetWithHostedZone resourceRecordSetWithHostedZone, String ip) {
if (resourceRecordSetWithHostedZone != null && resourceRecordSetWithHostedZone.getResourceRecordSet() != null) {
for (ResourceRecord rr : resourceRecordSetWithHostedZone.getResourceRecordSet().getResourceRecords()) {
if (ip.equals(rr.getValue())) {
return true;
}
}
}
return false;
}
private class ResourceRecordSetWithHostedZone {
private final HostedZone hostedZone;
private final ResourceRecordSet resourceRecordSet;
public ResourceRecordSetWithHostedZone(HostedZone hostedZone, ResourceRecordSet resourceRecordSet) {
this.hostedZone = hostedZone;
this.resourceRecordSet = resourceRecordSet;
}
public HostedZone getHostedZone() {
return hostedZone;
}
public ResourceRecordSet getResourceRecordSet() {
return resourceRecordSet;
}
}
} | 8,213 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBinder.java | package com.netflix.eureka.aws;
/**
* Binds the Eureka server to a EIP, Route53 or else...
*/
public interface AwsBinder {
void start() throws Exception;
void shutdown() throws Exception;
} | 8,214 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/ElasticNetworkInterfaceBinder.java | package com.netflix.eureka.aws;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.*;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.net.InetAddresses;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.endpoint.EndpointUtils;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Collection;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
/**
* Amazon ENI binder for instances.
*
* Candidate ENI's discovery is done using the same mechanism as Elastic ip binder, via dns records or service urls.
*
* The dns records and the service urls should use the ENI private dns or private ip
*
* Dns record examples
* txt.us-east-1.eureka="us-east-1a.eureka" "us-east-1b.eureka"
* txt.us-east-1a.eureka="ip-172-31-y-y.ec2.internal"
* txt.us-east-1b.eureka="ip-172-31-x-x.ec2.internal"
* where "ip-172-31-x-x.ec2.internal" is the ENI private dns
*
* Service url example:
* eureka.serviceUrl.us-east-1a=http://ip-172-31-x-x.ec2.internal:7001/eureka/v2/
*
* ENI Binding strategy should be configured via property like:
*
* eureka.awsBindingStrategy=ENI
*
* If there are no available ENI's for the availability zone, it will not attach any already attached ENI
*/
public class ElasticNetworkInterfaceBinder implements AwsBinder {
private static final Logger logger = LoggerFactory.getLogger(ElasticNetworkInterfaceBinder.class);
private static final int IP_BIND_SLEEP_TIME_MS = 1000;
private static final Timer timer = new Timer("Eureka-ElasticNetworkInterfaceBinder", true);
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final PeerAwareInstanceRegistry registry;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public ElasticNetworkInterfaceBinder(
EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.applicationInfoManager = applicationInfoManager;
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
@PostConstruct
public void start() {
int retries = serverConfig.getEIPBindRebindRetries();
for (int i = 0; i < retries; i++) {
try {
if (alreadyBound()) {
break;
} else {
bind();
}
} catch (Throwable e) {
logger.error("Cannot bind to IP", e);
try {
Thread.sleep(IP_BIND_SLEEP_TIME_MS);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
// Schedule a timer which periodically checks for IP binding.
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
@PreDestroy
public void shutdown() {
timer.cancel();
for (int i = 0; i < serverConfig.getEIPBindRebindRetries(); i++) {
try {
unbind();
break;
} catch (Exception e) {
logger.warn("Cannot unbind the IP from the instance");
try {
Thread.sleep(IP_BIND_SLEEP_TIME_MS);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
public boolean alreadyBound() throws MalformedURLException {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
AmazonEC2 ec2Service = getEC2Service();
List<InstanceNetworkInterface> instanceNetworkInterfaces = instanceData(myInstanceId, ec2Service).getNetworkInterfaces();
List<String> candidateIPs = getCandidateIps();
for (String ip : candidateIPs) {
for(InstanceNetworkInterface ini: instanceNetworkInterfaces) {
if (ip.equals(ini.getPrivateIpAddress())) {
logger.info("My instance {} seems to be already associated with the ip {}", myInstanceId, ip);
return true;
}
}
}
return false;
}
/**
* Binds an ENI to the instance.
*
* The candidate ENI's are deduced in the same wa the EIP binder works: Via dns records or via service urls,
* depending on configuration.
*
* It will try to attach the first ENI that is:
* Available
* For this subnet
* In the list of candidate ENI's
*
* @throws MalformedURLException
*/
public void bind() throws MalformedURLException {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.availabilityZone);
final List<String> ips = getCandidateIps();
Ordering<NetworkInterface> ipsOrder = Ordering.natural().onResultOf(new Function<NetworkInterface, Integer>() {
public Integer apply(NetworkInterface networkInterface) {
return ips.indexOf(networkInterface.getPrivateIpAddress());
}
});
AmazonEC2 ec2Service = getEC2Service();
String subnetId = instanceData(myInstanceId, ec2Service).getSubnetId();
DescribeNetworkInterfacesResult result = ec2Service
.describeNetworkInterfaces(new DescribeNetworkInterfacesRequest()
.withFilters(new Filter("private-ip-address", ips))
.withFilters(new Filter("status", Lists.newArrayList("available")))
.withFilters(new Filter("subnet-id", Lists.newArrayList(subnetId)))
);
if (result.getNetworkInterfaces().isEmpty()) {
logger.info("No ip is free to be associated with this instance. Candidate ips are: {} for zone: {}", ips, myZone);
} else {
NetworkInterface selected = ipsOrder.min(result.getNetworkInterfaces());
ec2Service.attachNetworkInterface(
new AttachNetworkInterfaceRequest()
.withNetworkInterfaceId(selected.getNetworkInterfaceId())
.withDeviceIndex(1)
.withInstanceId(myInstanceId)
);
}
}
/**
* Unbind the IP that this instance is associated with.
*/
public void unbind() throws Exception {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.instanceId);
AmazonEC2 ec2 = getEC2Service();
List<InstanceNetworkInterface> result = instanceData(myInstanceId, ec2).getNetworkInterfaces();
List<String> ips = getCandidateIps();
for(InstanceNetworkInterface networkInterface: result){
if (ips.contains(networkInterface.getPrivateIpAddress())) {
String attachmentId = networkInterface.getAttachment().getAttachmentId();
ec2.detachNetworkInterface(new DetachNetworkInterfaceRequest().withAttachmentId(attachmentId));
break;
}
}
}
private Instance instanceData(String myInstanceId, AmazonEC2 ec2) {
return ec2.describeInstances(new DescribeInstancesRequest().withInstanceIds(myInstanceId)).getReservations().get(0).getInstances().get(0);
}
/**
* Based on shouldUseDnsForFetchingServiceUrls configuration, either retrieves candidates from dns records or from
* configuration properties.
*
*
*/
public List<String> getCandidateIps() throws MalformedURLException {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(AmazonInfo.MetaDataKey.availabilityZone);
Collection<String> candidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getIPsForZoneFromDNS(myZone)
: getIPsForZoneFromConfig(myZone);
if (candidates == null || candidates.size() == 0) {
throw new RuntimeException("Could not get any ips from the pool for zone :" + myZone);
}
List<String> ips = Lists.newArrayList();
for(String candidate : candidates) {
String host = new URL(candidate).getHost();
if (InetAddresses.isInetAddress(host)) {
ips.add(host);
} else {
// ip-172-31-55-172.ec2.internal -> ip-172-31-55-172
String firstPartOfHost = Splitter.on(".").splitToList(host).get(0);
// ip-172-31-55-172 -> [172,31,55,172]
List<String> noIpPrefix = Splitter.on("-").splitToList(firstPartOfHost).subList(1, 5);
// [172,31,55,172] -> 172.31.55.172
String ip = Joiner.on(".").join(noIpPrefix);
if (InetAddresses.isInetAddress(ip)) {
ips.add(ip);
} else {
throw new IllegalArgumentException("Illegal internal hostname " + host + " translated to '" + ip + "'");
}
}
}
return ips;
}
private Collection<String> getIPsForZoneFromConfig(String myZone) {
return clientConfig.getEurekaServerServiceUrls(myZone);
}
private Collection<String> getIPsForZoneFromDNS(String myZone) {
return EndpointUtils.getServiceUrlsFromDNS(
clientConfig,
myZone,
true,
new EndpointUtils.InstanceInfoBasedUrlRandomizer(applicationInfoManager.getInfo())
);
}
private AmazonEC2 getEC2Service() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
AmazonEC2 ec2Service;
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
ec2Service = new AmazonEC2Client(new BasicAWSCredentials(aWSAccessId, aWSSecretKey));
} else {
ec2Service = new AmazonEC2Client(new InstanceProfileCredentialsProvider());
}
String region = clientConfig.getRegion();
region = region.trim().toLowerCase();
ec2Service.setEndpoint("ec2." + region + ".amazonaws.com");
return ec2Service;
}
private class IPBindingTask extends TimerTask {
@Override
public void run() {
boolean alreadyBound = false;
try {
alreadyBound = alreadyBound();
// If the ip is not bound, the registry could be stale. First sync up the registry from the
// neighboring node before trying to bind the IP
if (!alreadyBound) {
registry.clearRegistry();
int count = registry.syncUp();
registry.openForTraffic(applicationInfoManager, count);
} else {
// An ip is already bound
return;
}
bind();
} catch (Throwable e) {
logger.error("Could not bind to IP", e);
} finally {
if (alreadyBound) {
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMs());
} else {
timer.schedule(new IPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
}
}
}
}
| 8,215 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBinderDelegate.java | package com.netflix.eureka.aws;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
@Singleton
public class AwsBinderDelegate implements AwsBinder {
private final AwsBinder delegate;
@Inject
public AwsBinderDelegate(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
AwsBindingStrategy bindingStrategy = serverConfig.getBindingStrategy();
switch (bindingStrategy) {
case ROUTE53:
delegate = new Route53Binder(serverConfig, clientConfig, applicationInfoManager);
break;
case EIP:
delegate = new EIPManager(serverConfig, clientConfig, registry, applicationInfoManager);
break;
case ENI:
delegate = new ElasticNetworkInterfaceBinder(serverConfig, clientConfig, registry, applicationInfoManager);
break;
default:
throw new IllegalArgumentException("Unexpected BindingStrategy " + bindingStrategy);
}
}
@Override
@PostConstruct
public void start() {
try {
delegate.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
@PreDestroy
public void shutdown() {
try {
delegate.shutdown();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | 8,216 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AsgClient.java | package com.netflix.eureka.aws;
import com.netflix.appinfo.InstanceInfo;
public interface AsgClient {
boolean isASGEnabled(InstanceInfo instanceInfo);
void setStatus(String asgName, boolean enabled);
}
| 8,217 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/EIPManager.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.aws;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.Address;
import com.amazonaws.services.ec2.model.AssociateAddressRequest;
import com.amazonaws.services.ec2.model.DescribeAddressesRequest;
import com.amazonaws.services.ec2.model.DescribeAddressesResult;
import com.amazonaws.services.ec2.model.DisassociateAddressRequest;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo.Name;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.endpoint.EndpointUtils;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistry;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* An AWS specific <em>elastic ip</em> binding utility for binding eureka
* servers for a well known <code>IP address</code>.
*
* <p>
* <em>Eureka</em> clients talk to <em>Eureka</em> servers bound with well known
* <code>IP addresses</code> since that is the most reliable mechanism to
* discover the <em>Eureka</em> servers. When Eureka servers come up they bind
* themselves to a well known <em>elastic ip</em>
* </p>
*
* <p>
* This binding mechanism gravitates towards one eureka server per zone for
* resilience. At least one elastic ip should be slotted for each eureka server in
* a zone. If more than eureka server is launched per zone and there are not
* enough elastic ips slotted, the server tries to pick a free EIP slotted for other
* zones and if it still cannot find a free EIP, waits and keeps trying.
* </p>
*
* @author Karthik Ranganathan, Greg Kim
*
*/
@Singleton
public class EIPManager implements AwsBinder {
private static final Logger logger = LoggerFactory.getLogger(EIPManager.class);
private static final String US_EAST_1 = "us-east-1";
private static final int EIP_BIND_SLEEP_TIME_MS = 1000;
private static final Timer timer = new Timer("Eureka-EIPBinder", true);
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final PeerAwareInstanceRegistry registry;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public EIPManager(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
PeerAwareInstanceRegistry registry,
ApplicationInfoManager applicationInfoManager) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.applicationInfoManager = applicationInfoManager;
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor for the InstanceRegistry", e);
}
}
@PostConstruct
public void start() {
try {
handleEIPBinding();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@PreDestroy
public void shutdown() {
timer.cancel();
for (int i = 0; i < serverConfig.getEIPBindRebindRetries(); i++) {
try {
unbindEIP();
break;
} catch (Exception e) {
logger.warn("Cannot unbind the EIP from the instance");
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
/**
* Handles EIP binding process in AWS Cloud.
*
* @throws InterruptedException
*/
private void handleEIPBinding() throws InterruptedException {
int retries = serverConfig.getEIPBindRebindRetries();
// Bind to EIP if needed
for (int i = 0; i < retries; i++) {
try {
if (isEIPBound()) {
break;
} else {
bindEIP();
}
} catch (Throwable e) {
logger.error("Cannot bind to EIP", e);
Thread.sleep(EIP_BIND_SLEEP_TIME_MS);
}
}
// Schedule a timer which periodically checks for EIP binding.
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
/**
* Checks if an EIP is already bound to the instance.
* @return true if an EIP is bound, false otherwise
*/
public boolean isEIPBound() {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.availabilityZone);
String myPublicIP = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.publicIpv4);
Collection<String> candidateEIPs = getCandidateEIPs(myInstanceId, myZone);
for (String eipEntry : candidateEIPs) {
if (eipEntry.equals(myPublicIP)) {
logger.info("My instance {} seems to be already associated with the public ip {}",
myInstanceId, myPublicIP);
return true;
}
}
return false;
}
/**
* Checks if an EIP is bound and optionally binds the EIP.
*
* The list of EIPs are arranged with the EIPs allocated in the zone first
* followed by other EIPs.
*
* If an EIP is already bound to this instance this method simply returns. Otherwise, this method tries to find
* an unused EIP based on information from AWS. If it cannot find any unused EIP this method, it will be retried
* for a specified interval.
*
* One of the following scenarios can happen here :
*
* 1) If the instance is already bound to an EIP as deemed by AWS, no action is taken.
* 2) If an EIP is already bound to another instance as deemed by AWS, that EIP is skipped.
* 3) If an EIP is not already bound to an instance and if this instance is not bound to an EIP, then
* the EIP is bound to this instance.
*/
public void bindEIP() {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myInstanceId = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.instanceId);
String myZone = ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.availabilityZone);
Collection<String> candidateEIPs = getCandidateEIPs(myInstanceId, myZone);
AmazonEC2 ec2Service = getEC2Service();
boolean isMyinstanceAssociatedWithEIP = false;
Address selectedEIP = null;
for (String eipEntry : candidateEIPs) {
try {
String associatedInstanceId;
// Check with AWS, if this EIP is already been used by another instance
DescribeAddressesRequest describeAddressRequest = new DescribeAddressesRequest().withPublicIps(eipEntry);
DescribeAddressesResult result = ec2Service.describeAddresses(describeAddressRequest);
if ((result.getAddresses() != null) && (!result.getAddresses().isEmpty())) {
Address eipAddress = result.getAddresses().get(0);
associatedInstanceId = eipAddress.getInstanceId();
// This EIP is not used by any other instance, hence mark it for selection if it is not
// already marked.
if (((associatedInstanceId == null) || (associatedInstanceId.isEmpty()))) {
if (selectedEIP == null) {
selectedEIP = eipAddress;
}
} else if (isMyinstanceAssociatedWithEIP = (associatedInstanceId.equals(myInstanceId))) {
// This EIP is associated with an instance, check if this is the same as the current instance.
// If it is the same, stop searching for an EIP as this instance is already associated with an
// EIP
selectedEIP = eipAddress;
break;
} else {
// The EIP is used by some other instance, hence skip it
logger.warn("The selected EIP {} is associated with another instance {} according to AWS," +
" hence skipping this", eipEntry, associatedInstanceId);
}
}
} catch (Throwable t) {
logger.error("Failed to bind elastic IP: {} to {}", eipEntry, myInstanceId, t);
}
}
if (null != selectedEIP) {
String publicIp = selectedEIP.getPublicIp();
// Only bind if the EIP is not already associated
if (!isMyinstanceAssociatedWithEIP) {
AssociateAddressRequest associateAddressRequest = new AssociateAddressRequest()
.withInstanceId(myInstanceId);
String domain = selectedEIP.getDomain();
if ("vpc".equals(domain)) {
associateAddressRequest.setAllocationId(selectedEIP.getAllocationId());
} else {
associateAddressRequest.setPublicIp(publicIp);
}
ec2Service.associateAddress(associateAddressRequest);
logger.info("\n\n\nAssociated {} running in zone: {} to elastic IP: {}", myInstanceId, myZone, publicIp);
}
logger.info("My instance {} seems to be already associated with the EIP {}", myInstanceId, publicIp);
} else {
logger.info("No EIP is free to be associated with this instance. Candidate EIPs are: {}", candidateEIPs);
}
}
/**
* Unbind the EIP that this instance is associated with.
*/
public void unbindEIP() throws Exception {
InstanceInfo myInfo = applicationInfoManager.getInfo();
String myPublicIP = null;
if (myInfo != null
&& myInfo.getDataCenterInfo().getName() == Name.Amazon) {
myPublicIP = ((AmazonInfo) myInfo.getDataCenterInfo())
.get(MetaDataKey.publicIpv4);
if (myPublicIP == null) {
logger.info("Instance is not associated with an EIP. Will not try to unbind");
return;
}
try {
AmazonEC2 ec2Service = getEC2Service();
DescribeAddressesRequest describeAddressRequest = new DescribeAddressesRequest()
.withPublicIps(myPublicIP);
DescribeAddressesResult result = ec2Service.describeAddresses(describeAddressRequest);
if ((result.getAddresses() != null) && (!result.getAddresses().isEmpty())) {
Address eipAddress = result.getAddresses().get(0);
DisassociateAddressRequest dissociateRequest = new DisassociateAddressRequest();
String domain = eipAddress.getDomain();
if ("vpc".equals(domain)) {
dissociateRequest.setAssociationId(eipAddress.getAssociationId());
} else {
dissociateRequest.setPublicIp(eipAddress.getPublicIp());
}
ec2Service.disassociateAddress(dissociateRequest);
logger.info("Dissociated the EIP {} from this instance", myPublicIP);
}
} catch (Throwable e) {
throw new RuntimeException("Cannot dissociate address from this instance", e);
}
}
}
/**
* Get the list of EIPs in the order of preference depending on instance zone.
*
* @param myInstanceId
* the instance id for this instance
* @param myZone
* the zone where this instance is in
* @return Collection containing the list of available EIPs
*/
public Collection<String> getCandidateEIPs(String myInstanceId, String myZone) {
if (myZone == null) {
myZone = "us-east-1d";
}
Collection<String> eipCandidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getEIPsForZoneFromDNS(myZone)
: getEIPsForZoneFromConfig(myZone);
if (eipCandidates == null || eipCandidates.size() == 0) {
throw new RuntimeException("Could not get any elastic ips from the EIP pool for zone :" + myZone);
}
return eipCandidates;
}
/**
* Get the list of EIPs from the configuration.
*
* @param myZone
* - the zone in which the instance resides.
* @return collection of EIPs to choose from for binding.
*/
private Collection<String> getEIPsForZoneFromConfig(String myZone) {
List<String> ec2Urls = clientConfig.getEurekaServerServiceUrls(myZone);
return getEIPsFromServiceUrls(ec2Urls);
}
/**
* Get the list of EIPs from the ec2 urls.
*
* @param ec2Urls
* the ec2urls for which the EIP needs to be obtained.
* @return collection of EIPs.
*/
private Collection<String> getEIPsFromServiceUrls(List<String> ec2Urls) {
List<String> returnedUrls = new ArrayList<>();
String region = clientConfig.getRegion();
String regionPhrase = "";
if (!US_EAST_1.equals(region)) {
regionPhrase = "." + region;
}
for (String cname : ec2Urls) {
int beginIndex = cname.indexOf("ec2-");
if (-1 < beginIndex) {
// CNAME contains "ec2-"
int endIndex = cname.indexOf(regionPhrase + ".compute");
String eipStr = cname.substring(beginIndex + 4, endIndex);
String eip = eipStr.replaceAll("\\-", ".");
returnedUrls.add(eip);
}
// Otherwise, if CNAME doesn't contain, do nothing.
// Handle case where there are no cnames containing "ec2-". Reasons include:
// Systems without public addresses - purely attached to corp lan via AWS Direct Connect
// Use of EC2 network adapters that are attached to an instance after startup
}
return returnedUrls;
}
/**
* Get the list of EIPS from the DNS.
*
* <p>
* This mechanism looks for the EIP pool in the zone the instance is in by
* looking up the DNS name <code>{zone}.{region}.{domainName}</code>. The
* zone is fetched from the {@link InstanceInfo} object;the region is picked
* up from the specified configuration
* {@link com.netflix.discovery.EurekaClientConfig#getRegion()};the domain name is picked up from
* the specified configuration {@link com.netflix.discovery.EurekaClientConfig#getEurekaServerDNSName()}
* with a "txt." prefix (see {@link com.netflix.discovery.endpoint.EndpointUtils
* #getZoneBasedDiscoveryUrlsFromRegion(com.netflix.discovery.EurekaClientConfig, String)}.
* </p>
*
* @param myZone
* the zone where this instance exist in.
* @return the collection of EIPs that exist in the zone this instance is
* in.
*/
private Collection<String> getEIPsForZoneFromDNS(String myZone) {
List<String> ec2Urls = EndpointUtils.getServiceUrlsFromDNS(
clientConfig,
myZone,
true,
new EndpointUtils.InstanceInfoBasedUrlRandomizer(applicationInfoManager.getInfo())
);
return getEIPsFromServiceUrls(ec2Urls);
}
/**
* Gets the EC2 service object to call AWS APIs.
*
* @return the EC2 service object to call AWS APIs.
*/
private AmazonEC2 getEC2Service() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
AmazonEC2 ec2Service;
if (null != aWSAccessId && !"".equals(aWSAccessId)
&& null != aWSSecretKey && !"".equals(aWSSecretKey)) {
ec2Service = new AmazonEC2Client(new BasicAWSCredentials(aWSAccessId, aWSSecretKey));
} else {
ec2Service = new AmazonEC2Client(new InstanceProfileCredentialsProvider());
}
String region = clientConfig.getRegion();
region = region.trim().toLowerCase();
ec2Service.setEndpoint("ec2." + region + ".amazonaws.com");
return ec2Service;
}
/**
* An EIP binding timer task which constantly polls for EIP in the
* same zone and binds it to itself.If the EIP is taken away for some
* reason, this task tries to get the EIP back. Hence it is advised to take
* one EIP assignment per instance in a zone.
*/
private class EIPBindingTask extends TimerTask {
@Override
public void run() {
boolean isEIPBound = false;
try {
isEIPBound = isEIPBound();
// If the EIP is not bound, the registry could be stale. First sync up the registry from the
// neighboring node before trying to bind the EIP
if (!isEIPBound) {
registry.clearRegistry();
int count = registry.syncUp();
registry.openForTraffic(applicationInfoManager, count);
} else {
// An EIP is already bound
return;
}
bindEIP();
} catch (Throwable e) {
logger.error("Could not bind to EIP", e);
} finally {
if (isEIPBound) {
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMs());
} else {
timer.schedule(new EIPBindingTask(), serverConfig.getEIPBindingRetryIntervalMsWhenUnbound());
}
}
}
};
}
| 8,218 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsAsgUtil.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka.aws;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.amazonaws.services.securitytoken.model.Credentials;
import com.google.common.base.Strings;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.InstanceRegistry;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A utility class for querying and updating information about amazon
* autoscaling groups using the AWS APIs.
*
* @author Karthik Ranganathan
*
*/
@Singleton
public class AwsAsgUtil implements AsgClient {
private static final Logger logger = LoggerFactory.getLogger(AwsAsgUtil.class);
private static final String PROP_ADD_TO_LOAD_BALANCER = "AddToLoadBalancer";
private static final String accountId = getAccountId();
private Map<String, Credentials> stsCredentials = new HashMap<>();
private final ExecutorService cacheReloadExecutor = new ThreadPoolExecutor(
1, 10, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r, "Eureka-AWS-isASGEnabled");
thread.setDaemon(true);
return thread;
}
});
private ListeningExecutorService listeningCacheReloadExecutor = MoreExecutors.listeningDecorator(cacheReloadExecutor);
// Cache for the AWS ASG information
private final Timer timer = new Timer("Eureka-ASGCacheRefresh", true);
private final com.netflix.servo.monitor.Timer loadASGInfoTimer = Monitors.newTimer("Eureka-loadASGInfo");
private final EurekaServerConfig serverConfig;
private final EurekaClientConfig clientConfig;
private final InstanceRegistry registry;
private final LoadingCache<CacheKey, Boolean> asgCache;
private final AmazonAutoScaling awsClient;
@Inject
public AwsAsgUtil(EurekaServerConfig serverConfig,
EurekaClientConfig clientConfig,
InstanceRegistry registry) {
this.serverConfig = serverConfig;
this.clientConfig = clientConfig;
this.registry = registry;
this.asgCache = CacheBuilder
.newBuilder().initialCapacity(500)
.expireAfterAccess(serverConfig.getASGCacheExpiryTimeoutMs(), TimeUnit.MILLISECONDS)
.build(new CacheLoader<CacheKey, Boolean>() {
@Override
public Boolean load(CacheKey key) throws Exception {
return isASGEnabledinAWS(key.asgAccountId, key.asgName);
}
@Override
public ListenableFuture<Boolean> reload(final CacheKey key, Boolean oldValue) throws Exception {
return listeningCacheReloadExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return load(key);
}
});
}
});
this.awsClient = getAmazonAutoScalingClient();
this.awsClient.setEndpoint("autoscaling." + clientConfig.getRegion() + ".amazonaws.com");
this.timer.schedule(getASGUpdateTask(),
serverConfig.getASGUpdateIntervalMs(),
serverConfig.getASGUpdateIntervalMs());
try {
Monitors.registerObject(this);
} catch (Throwable e) {
logger.warn("Cannot register the JMX monitor :", e);
}
}
/**
* Return the status of the ASG whether is enabled or disabled for service.
* The value is picked up from the cache except the very first time.
*
* @param instanceInfo the instanceInfo for the lookup
* @return true if enabled, false otherwise
*/
public boolean isASGEnabled(InstanceInfo instanceInfo) {
CacheKey cacheKey = new CacheKey(getAccountId(instanceInfo, accountId), instanceInfo.getASGName());
Boolean result = asgCache.getIfPresent(cacheKey);
if (result != null) {
return result;
} else {
if (!serverConfig.shouldUseAwsAsgApi()) {
// Disabled, cached values (if any) are still being returned if the caller makes
// a decision to call the disabled client during some sort of transitioning
// period, but no new values will be fetched while disabled.
logger.info(("'{}' is not cached at the moment and won't be fetched because querying AWS ASGs "
+ "has been disabled via the config, returning the fallback value."),
cacheKey);
return true;
}
logger.info("Cache value for asg {} does not exist yet, async refreshing.", cacheKey.asgName);
// Only do an async refresh if it does not yet exist. Do this to refrain from calling aws api too much
asgCache.refresh(cacheKey);
return true;
}
}
/**
* Sets the status of the ASG.
*
* @param asgName The name of the ASG
* @param enabled true to enable, false to disable
*/
public void setStatus(String asgName, boolean enabled) {
String asgAccountId = getASGAccount(asgName);
asgCache.put(new CacheKey(asgAccountId, asgName), enabled);
}
/**
* Check if the ASG is disabled. The amazon flag "AddToLoadBalancer" is
* queried to figure out if it is or not.
*
* @param asgName
* - The name of the ASG for which the status needs to be queried
* @return - true if the ASG is disabled, false otherwise
*/
private boolean isAddToLoadBalancerSuspended(String asgAccountId, String asgName) {
AutoScalingGroup asg;
if(asgAccountId == null || asgAccountId.equals(accountId)) {
asg = retrieveAutoScalingGroup(asgName);
} else {
asg = retrieveAutoScalingGroupCrossAccount(asgAccountId, asgName);
}
if (asg == null) {
logger.warn("The ASG information for {} could not be found. So returning false.", asgName);
return false;
}
return isAddToLoadBalancerSuspended(asg);
}
/**
* Checks if the load balancer addition is disabled or not.
*
* @param asg
* - The ASG object for which the status needs to be checked
* @return - true, if the load balancer addition is suspended, false
* otherwise.
*/
private boolean isAddToLoadBalancerSuspended(AutoScalingGroup asg) {
List<SuspendedProcess> suspendedProcesses = asg.getSuspendedProcesses();
for (SuspendedProcess process : suspendedProcesses) {
if (PROP_ADD_TO_LOAD_BALANCER.equals(process.getProcessName())) {
return true;
}
}
return false;
}
/**
* Queries AWS to get the autoscaling information given the asgName.
*
* @param asgName
* - The name of the ASG.
* @return - The auto scaling group information.
*/
private AutoScalingGroup retrieveAutoScalingGroup(String asgName) {
if (Strings.isNullOrEmpty(asgName)) {
logger.warn("null asgName specified, not attempting to retrieve AutoScalingGroup from AWS");
return null;
}
// You can pass one name or a list of names in the request
DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(asgName);
DescribeAutoScalingGroupsResult result = awsClient
.describeAutoScalingGroups(request);
List<AutoScalingGroup> asgs = result.getAutoScalingGroups();
if (asgs.isEmpty()) {
return null;
} else {
return asgs.get(0);
}
}
private Credentials initializeStsSession(String asgAccount) {
AWSSecurityTokenService sts = new AWSSecurityTokenServiceClient(new InstanceProfileCredentialsProvider());
String region = clientConfig.getRegion();
if (!region.equals("us-east-1")) {
sts.setEndpoint("sts." + region + ".amazonaws.com");
}
String roleName = serverConfig.getListAutoScalingGroupsRoleName();
String roleArn = "arn:aws:iam::" + asgAccount + ":role/" + roleName;
AssumeRoleResult assumeRoleResult = sts.assumeRole(new AssumeRoleRequest()
.withRoleArn(roleArn)
.withRoleSessionName("sts-session-" + asgAccount)
);
return assumeRoleResult.getCredentials();
}
private AutoScalingGroup retrieveAutoScalingGroupCrossAccount(String asgAccount, String asgName) {
logger.debug("Getting cross account ASG for asgName: {}, asgAccount: {}", asgName, asgAccount);
Credentials credentials = stsCredentials.get(asgAccount);
if (credentials == null || credentials.getExpiration().getTime() < System.currentTimeMillis() + 1000) {
stsCredentials.put(asgAccount, initializeStsSession(asgAccount));
credentials = stsCredentials.get(asgAccount);
}
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
AmazonAutoScaling autoScalingClient = new AmazonAutoScalingClient(
new BasicSessionCredentials(
credentials.getAccessKeyId(),
credentials.getSecretAccessKey(),
credentials.getSessionToken()
),
clientConfiguration
);
String region = clientConfig.getRegion();
if (!region.equals("us-east-1")) {
autoScalingClient.setEndpoint("autoscaling." + region + ".amazonaws.com");
}
DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(asgName);
DescribeAutoScalingGroupsResult result = autoScalingClient.describeAutoScalingGroups(request);
List<AutoScalingGroup> asgs = result.getAutoScalingGroups();
if (asgs.isEmpty()) {
return null;
} else {
return asgs.get(0);
}
}
/**
* Queries AWS to see if the load balancer flag is suspended.
*
* @param asgAccountid the accountId this asg resides in, if applicable (null will use the default accountId)
* @param asgName the name of the asg
* @return true, if the load balancer flag is not suspended, false otherwise.
*/
private Boolean isASGEnabledinAWS(String asgAccountid, String asgName) {
try {
Stopwatch t = this.loadASGInfoTimer.start();
boolean returnValue = !isAddToLoadBalancerSuspended(asgAccountid, asgName);
t.stop();
return returnValue;
} catch (Throwable e) {
logger.error("Could not get ASG information from AWS: ", e);
}
return Boolean.TRUE;
}
/**
* Gets the number of elements in the ASG cache.
*
* @return the long value representing the number of elements in the ASG
* cache.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfElementsinASGCache",
description = "Number of elements in the ASG Cache", type = DataSourceType.GAUGE)
public long getNumberofElementsinASGCache() {
return asgCache.size();
}
/**
* Gets the number of ASG queries done in the period.
*
* @return the long value representing the number of ASG queries done in the
* period.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfASGQueries",
description = "Number of queries made to AWS to retrieve ASG information", type = DataSourceType.COUNTER)
public long getNumberofASGQueries() {
return asgCache.stats().loadCount();
}
/**
* Gets the number of ASG queries that failed because of some reason.
*
* @return the long value representing the number of ASG queries that failed
* because of some reason.
*/
@com.netflix.servo.annotations.Monitor(name = "numOfASGQueryFailures",
description = "Number of queries made to AWS to retrieve ASG information and that failed",
type = DataSourceType.COUNTER)
public long getNumberofASGQueryFailures() {
return asgCache.stats().loadExceptionCount();
}
/**
* Gets the task that updates the ASG information periodically.
*
* @return TimerTask that updates the ASG information periodically.
*/
private TimerTask getASGUpdateTask() {
return new TimerTask() {
@Override
public void run() {
try {
if (!serverConfig.shouldUseAwsAsgApi()) {
// Disabled via the config, no-op.
return;
}
// First get the active ASG names
Set<CacheKey> cacheKeys = getCacheKeys();
if (logger.isDebugEnabled()) {
logger.debug("Trying to refresh the keys for {}", Arrays.toString(cacheKeys.toArray()));
}
for (CacheKey key : cacheKeys) {
try {
asgCache.refresh(key);
} catch (Throwable e) {
logger.error("Error updating the ASG cache for {}", key, e);
}
}
} catch (Throwable e) {
logger.error("Error updating the ASG cache", e);
}
}
};
}
/**
* Get the cacheKeys of all the ASG to which query AWS for.
*
* <p>
* The names are obtained from the {@link com.netflix.eureka.registry.InstanceRegistry} which is then
* used for querying the AWS.
* </p>
*
* @return the set of ASG cacheKeys (asgName + accountId).
*/
private Set<CacheKey> getCacheKeys() {
Set<CacheKey> cacheKeys = new HashSet<>();
Applications apps = registry.getApplicationsFromLocalRegionOnly();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instanceInfo : app.getInstances()) {
String localAccountId = getAccountId(instanceInfo, accountId);
String asgName = instanceInfo.getASGName();
if (asgName != null) {
CacheKey key = new CacheKey(localAccountId, asgName);
cacheKeys.add(key);
}
}
}
return cacheKeys;
}
/**
* Get the AWS account id where an ASG is created.
* Warning: This is expensive as it loops through all instances currently registered.
*
* @param asgName The name of the ASG
* @return the account id
*/
private String getASGAccount(String asgName) {
Applications apps = registry.getApplicationsFromLocalRegionOnly();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo instanceInfo : app.getInstances()) {
String thisAsgName = instanceInfo.getASGName();
if (thisAsgName != null && thisAsgName.equals(asgName)) {
String localAccountId = getAccountId(instanceInfo, null);
if (localAccountId != null) {
return localAccountId;
}
}
}
}
logger.info("Couldn't get the ASG account for {}, using the default accountId instead", asgName);
return accountId;
}
private String getAccountId(InstanceInfo instanceInfo, String fallbackId) {
String localAccountId = null;
DataCenterInfo dataCenterInfo = instanceInfo.getDataCenterInfo();
if (dataCenterInfo instanceof AmazonInfo) {
localAccountId = ((AmazonInfo) dataCenterInfo).get(MetaDataKey.accountId);
}
return localAccountId == null ? fallbackId : localAccountId;
}
private AmazonAutoScaling getAmazonAutoScalingClient() {
String aWSAccessId = serverConfig.getAWSAccessId();
String aWSSecretKey = serverConfig.getAWSSecretKey();
ClientConfiguration clientConfiguration = new ClientConfiguration()
.withConnectionTimeout(serverConfig.getASGQueryTimeoutMs());
if (null != aWSAccessId && !"".equals(aWSAccessId) && null != aWSSecretKey && !"".equals(aWSSecretKey)) {
return new AmazonAutoScalingClient(
new BasicAWSCredentials(aWSAccessId, aWSSecretKey),
clientConfiguration);
} else {
return new AmazonAutoScalingClient(
new InstanceProfileCredentialsProvider(),
clientConfiguration);
}
}
private static String getAccountId() {
InstanceInfo myInfo = ApplicationInfoManager.getInstance().getInfo();
return ((AmazonInfo) myInfo.getDataCenterInfo()).get(MetaDataKey.accountId);
}
private static class CacheKey {
final String asgAccountId;
final String asgName;
CacheKey(String asgAccountId, String asgName) {
this.asgAccountId = asgAccountId;
this.asgName = asgName;
}
@Override
public String toString() {
return "CacheKey{" +
"asgName='" + asgName + '\'' +
", asgAccountId='" + asgAccountId + '\'' +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof CacheKey)) return false;
CacheKey cacheKey = (CacheKey) o;
if (asgAccountId != null ? !asgAccountId.equals(cacheKey.asgAccountId) : cacheKey.asgAccountId != null)
return false;
if (asgName != null ? !asgName.equals(cacheKey.asgName) : cacheKey.asgName != null) return false;
return true;
}
@Override
public int hashCode() {
int result = asgName != null ? asgName.hashCode() : 0;
result = 31 * result + (asgAccountId != null ? asgAccountId.hashCode() : 0);
return result;
}
}
}
| 8,219 |
0 | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka | Create_ds/eureka/eureka-core/src/main/java/com/netflix/eureka/aws/AwsBindingStrategy.java | package com.netflix.eureka.aws;
public enum AwsBindingStrategy {
EIP, ROUTE53, ENI
}
| 8,220 |
0 | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/util/InstanceInfoGeneratorTest.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.util;
import java.util.Iterator;
import com.netflix.appinfo.InstanceInfo;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
/**
* @author Tomasz Bak
*/
public class InstanceInfoGeneratorTest {
@Test
public void testInstanceInfoStream() throws Exception {
Iterator<InstanceInfo> it = InstanceInfoGenerator.newBuilder(4, "app1", "app2").build().serviceIterator();
assertThat(it.next().getAppName(), is(equalTo("APP1")));
assertThat(it.next().getAppName(), is(equalTo("APP2")));
assertThat(it.next().getAppName(), is(equalTo("APP1")));
assertThat(it.next().getAppName(), is(equalTo("APP2")));
}
} | 8,221 |
0 | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/test/java/com/netflix/discovery/shared/transport/SimpleEurekaHttpServerTest.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import java.net.URI;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.EurekaAccept;
import com.netflix.discovery.converters.wrappers.CodecWrappers.JacksonJson;
import com.netflix.discovery.shared.resolver.DefaultEndpoint;
import com.netflix.discovery.shared.transport.jersey.JerseyEurekaHttpClientFactory;
import org.junit.After;
/**
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServerTest extends EurekaHttpClientCompatibilityTestSuite {
private TransportClientFactory httpClientFactory;
private EurekaHttpClient eurekaHttpClient;
@Override
@After
public void tearDown() throws Exception {
httpClientFactory.shutdown();
super.tearDown();
}
@Override
protected EurekaHttpClient getEurekaHttpClient(URI serviceURI) {
Preconditions.checkState(eurekaHttpClient == null, "EurekaHttpClient has been already created");
httpClientFactory = JerseyEurekaHttpClientFactory.newBuilder()
.withClientName("test")
.withMaxConnectionsPerHost(10)
.withMaxTotalConnections(10)
.withDecoder(JacksonJson.class.getSimpleName(), EurekaAccept.full.name())
.withEncoder(JacksonJson.class.getSimpleName())
.build();
this.eurekaHttpClient = httpClientFactory.newClient(new DefaultEndpoint(serviceURI.toString()));
return eurekaHttpClient;
}
} | 8,222 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/DiagnosticClient.java | package com.netflix.discovery.util;
import com.netflix.discovery.shared.Applications;
import com.netflix.eureka.DefaultEurekaServerConfig;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.transport.JerseyReplicationClient;
import com.netflix.eureka.resources.DefaultServerCodecs;
import static com.netflix.discovery.util.EurekaEntityFunctions.countInstances;
/**
* A tool for running diagnostic tasks against a discovery server. Currently limited to observing
* of consistency of delta updates.
*
* @author Tomasz Bak
*/
public class DiagnosticClient {
public static void main(String[] args) throws InterruptedException {
String discoveryURL = args[0];
long startTime = System.currentTimeMillis();
EurekaServerConfig serverConfig = new DefaultEurekaServerConfig("eureka.");
JerseyReplicationClient client = JerseyReplicationClient.createReplicationClient(
serverConfig,
new DefaultServerCodecs(serverConfig),
discoveryURL
);
Applications applications = client.getApplications().getEntity();
System.out.println("Applications count=" + applications.getRegisteredApplications().size());
System.out.println("Instance count=" + countInstances(applications));
while (true) {
long delay = System.currentTimeMillis() - startTime;
if (delay >= 30000) {
System.out.println("Processing delay exceeds 30sec; we may be out of sync");
} else {
long waitTime = 30 * 1000 - delay;
System.out.println("Waiting " + waitTime / 1000 + "sec before next fetch...");
Thread.sleep(15 * 1000);
}
startTime = System.currentTimeMillis();
Applications delta = client.getDelta().getEntity();
Applications merged = EurekaEntityFunctions.mergeApplications(applications, delta);
if (merged.getAppsHashCode().equals(delta.getAppsHashCode())) {
System.out.println("Hash codes match: " + delta.getAppsHashCode() + "(delta count=" + countInstances(delta) + ')');
applications = merged;
} else {
System.out.println("ERROR: hash codes do not match (" + delta.getAppsHashCode() + "(delta) != "
+ merged.getAppsHashCode() + " (merged) != "
+ applications.getAppsHashCode() + "(old apps)" +
"(delta count=" + countInstances(delta) + ')'
);
applications = client.getApplications().getEntity();
}
}
}
}
| 8,223 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/InstanceInfoGenerator.java | package com.netflix.discovery.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.ActionType;
import com.netflix.appinfo.InstanceInfo.Builder;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.appinfo.InstanceInfo.PortType;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import static com.netflix.discovery.util.EurekaEntityFunctions.mergeApplications;
import static com.netflix.discovery.util.EurekaEntityFunctions.toApplicationMap;
/**
* Test data generator.
*
* @author Tomasz Bak
*/
public class InstanceInfoGenerator {
public static final int RENEW_INTERVAL = 5;
private final int instanceCount;
private final String[] appNames;
private final String zone;
private final boolean taggedId;
private Iterator<InstanceInfo> currentIt;
private Applications allApplications = new Applications();
private final boolean withMetaData;
private final boolean includeAsg;
private final boolean useInstanceId;
InstanceInfoGenerator(InstanceInfoGeneratorBuilder builder) {
this.instanceCount = builder.instanceCount;
this.appNames = builder.appNames;
this.zone = builder.zone == null ? "us-east-1c" : builder.zone;
this.taggedId = builder.taggedId;
this.withMetaData = builder.includeMetaData;
this.includeAsg = builder.includeAsg;
this.useInstanceId = builder.useInstanceId;
}
public Applications takeDelta(int count) {
if (currentIt == null) {
currentIt = serviceIterator();
allApplications = new Applications();
}
List<InstanceInfo> instanceBatch = new ArrayList<>();
for (int i = 0; i < count; i++) {
InstanceInfo next = currentIt.next();
next.setActionType(ActionType.ADDED);
instanceBatch.add(next);
}
Applications nextBatch = EurekaEntityFunctions.toApplications(toApplicationMap(instanceBatch));
allApplications = mergeApplications(allApplications, nextBatch);
nextBatch.setAppsHashCode(allApplications.getAppsHashCode());
return nextBatch;
}
public Iterator<InstanceInfo> serviceIterator() {
return new Iterator<InstanceInfo>() {
private int returned;
private final int[] appInstanceIds = new int[appNames.length];
private int currentApp;
@Override
public boolean hasNext() {
return returned < instanceCount;
}
@Override
public InstanceInfo next() {
if (!hasNext()) {
throw new NoSuchElementException("no more InstanceInfo elements");
}
InstanceInfo toReturn = generateInstanceInfo(currentApp, appInstanceIds[currentApp], useInstanceId, ActionType.ADDED);
appInstanceIds[currentApp]++;
currentApp = (currentApp + 1) % appNames.length;
returned++;
return toReturn;
}
@Override
public void remove() {
throw new IllegalStateException("method not supported");
}
};
}
public Applications toApplications() {
Map<String, Application> appsByName = new HashMap<>();
Iterator<InstanceInfo> it = serviceIterator();
while (it.hasNext()) {
InstanceInfo instanceInfo = it.next();
Application instanceApp = appsByName.get(instanceInfo.getAppName());
if (instanceApp == null) {
instanceApp = new Application(instanceInfo.getAppName());
appsByName.put(instanceInfo.getAppName(), instanceApp);
}
instanceApp.addInstance(instanceInfo);
}
// Do not pass application list to the constructor, as it does not initialize properly Applications
// data structure.
Applications applications = new Applications();
for (Application app : appsByName.values()) {
applications.addApplication(app);
}
applications.shuffleInstances(false);
applications.setAppsHashCode(applications.getReconcileHashCode());
applications.setVersion(1L);
return applications;
}
public List<InstanceInfo> toInstanceList() {
List<InstanceInfo> result = new ArrayList<>(instanceCount);
Iterator<InstanceInfo> it = serviceIterator();
while (it.hasNext()) {
InstanceInfo instanceInfo = it.next();
result.add(instanceInfo);
}
return result;
}
public InstanceInfo first() {
return take(0);
}
public InstanceInfo take(int idx) {
return toInstanceList().get(idx);
}
public static InstanceInfo takeOne() {
return newBuilder(1, 1).withMetaData(true).build().serviceIterator().next();
}
public static InstanceInfoGeneratorBuilder newBuilder(int instanceCount, int applicationCount) {
return new InstanceInfoGeneratorBuilder(instanceCount, applicationCount);
}
public static InstanceInfoGeneratorBuilder newBuilder(int instanceCount, String... appNames) {
return new InstanceInfoGeneratorBuilder(instanceCount, appNames);
}
public Applications takeDeltaForDelete(boolean useInstanceId, int instanceCount) {
List<InstanceInfo> instanceInfoList = new ArrayList<>();
for (int i = 0; i < instanceCount; i ++) {
instanceInfoList.add(this.generateInstanceInfo(i, i, useInstanceId, ActionType.DELETED));
}
Applications delete = EurekaEntityFunctions.toApplications(toApplicationMap(instanceInfoList));
allApplications = mergeApplications(allApplications, delete);
delete.setAppsHashCode(allApplications.getAppsHashCode());
return delete;
}
// useInstanceId to false to generate older InstanceInfo types that does not use instanceId field for instance id.
private InstanceInfo generateInstanceInfo(int appIndex, int appInstanceId, boolean useInstanceId, ActionType actionType) {
String appName = appNames[appIndex];
String hostName = "instance" + appInstanceId + '.' + appName + ".com";
String privateHostname = "ip-10.0" + appIndex + "." + appInstanceId + ".compute.internal";
String publicIp = "20.0." + appIndex + '.' + appInstanceId;
String privateIp = "192.168." + appIndex + '.' + appInstanceId;
String ipv6 = "::FFFF:" + publicIp;
String instanceId = String.format("i-%04d%04d", appIndex, appInstanceId);
if (taggedId) {
instanceId = instanceId + '_' + appName;
}
AmazonInfo dataCenterInfo = AmazonInfo.Builder.newBuilder()
.addMetadata(MetaDataKey.accountId, "testAccountId")
.addMetadata(MetaDataKey.amiId, String.format("ami-%04d%04d", appIndex, appInstanceId))
.addMetadata(MetaDataKey.availabilityZone, zone)
.addMetadata(MetaDataKey.instanceId, instanceId)
.addMetadata(MetaDataKey.instanceType, "m2.xlarge")
.addMetadata(MetaDataKey.localHostname, privateHostname)
.addMetadata(MetaDataKey.localIpv4, privateIp)
.addMetadata(MetaDataKey.publicHostname, hostName)
.addMetadata(MetaDataKey.publicIpv4, publicIp)
.addMetadata(MetaDataKey.ipv6, ipv6)
.build();
String unsecureURL = "http://" + hostName + ":8080";
String secureURL = "https://" + hostName + ":8081";
long now = System.currentTimeMillis();
LeaseInfo leaseInfo = LeaseInfo.Builder.newBuilder()
.setDurationInSecs(3 * RENEW_INTERVAL)
.setRenewalIntervalInSecs(RENEW_INTERVAL)
.setServiceUpTimestamp(now - RENEW_INTERVAL)
.setRegistrationTimestamp(now)
.setEvictionTimestamp(now + 3 * RENEW_INTERVAL)
.setRenewalTimestamp(now + RENEW_INTERVAL)
.build();
Builder builder = useInstanceId
? InstanceInfo.Builder.newBuilder().setInstanceId(instanceId)
: InstanceInfo.Builder.newBuilder();
builder
.setActionType(actionType)
.setAppGroupName(appName + "Group")
.setAppName(appName)
.setHostName(hostName)
.setIPAddr(publicIp)
.setPort(8080)
.setSecurePort(8081)
.enablePort(PortType.SECURE, true)
.setHealthCheckUrls("/healthcheck", unsecureURL + "/healthcheck", secureURL + "/healthcheck")
.setHomePageUrl("/homepage", unsecureURL + "/homepage")
.setStatusPageUrl("/status", unsecureURL + "/status")
.setLeaseInfo(leaseInfo)
.setStatus(InstanceStatus.UP)
.setVIPAddress(appName + ":8080")
.setSecureVIPAddress(appName + ":8081")
.setDataCenterInfo(dataCenterInfo)
.setLastUpdatedTimestamp(System.currentTimeMillis() - 100)
.setLastDirtyTimestamp(System.currentTimeMillis() - 100)
.setIsCoordinatingDiscoveryServer(true)
.enablePort(PortType.UNSECURE, true);
if (includeAsg) {
builder.setASGName(appName + "ASG");
}
if (withMetaData) {
builder.add("appKey" + appIndex, Integer.toString(appInstanceId));
}
return builder.build();
}
public static class InstanceInfoGeneratorBuilder {
private final int instanceCount;
private String[] appNames;
private boolean includeMetaData;
private boolean includeAsg = true;
private String zone;
private boolean taggedId;
private boolean useInstanceId = true;
public InstanceInfoGeneratorBuilder(int instanceCount, int applicationCount) {
this.instanceCount = instanceCount;
String[] appNames = new String[applicationCount];
for (int i = 0; i < appNames.length; i++) {
appNames[i] = "application" + i;
}
this.appNames = appNames;
}
public InstanceInfoGeneratorBuilder(int instanceCount, String... appNames) {
this.instanceCount = instanceCount;
this.appNames = appNames;
}
public InstanceInfoGeneratorBuilder withZone(String zone) {
this.zone = zone;
return this;
}
public InstanceInfoGeneratorBuilder withTaggedId(boolean taggedId) {
this.taggedId = taggedId;
return this;
}
public InstanceInfoGeneratorBuilder withMetaData(boolean includeMetaData) {
this.includeMetaData = includeMetaData;
return this;
}
public InstanceInfoGeneratorBuilder withAsg(boolean includeAsg) {
this.includeAsg = includeAsg;
return this;
}
public InstanceInfoGeneratorBuilder withUseInstanceId(boolean useInstanceId) {
this.useInstanceId = useInstanceId;
return this;
}
public InstanceInfoGenerator build() {
return new InstanceInfoGenerator(this);
}
}
}
| 8,224 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/util/ApplicationFunctions.java | package com.netflix.discovery.util;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
/**
* Collection of functions operating on {@link Applications} and {@link Application} data
* structures.
*
* @author Tomasz Bak
* @deprecated Use instead {@link EurekaEntityFunctions}
*/
public final class ApplicationFunctions {
private ApplicationFunctions() {
}
public static Map<String, Application> toApplicationMap(List<InstanceInfo> instances) {
Map<String, Application> applicationMap = new HashMap<String, Application>();
for (InstanceInfo instance : instances) {
String appName = instance.getAppName();
Application application = applicationMap.get(appName);
if (application == null) {
applicationMap.put(appName, application = new Application(appName));
}
application.addInstance(instance);
}
return applicationMap;
}
public static Applications toApplications(Map<String, Application> applicationMap) {
Applications applications = new Applications();
for (Application application : applicationMap.values()) {
applications.addApplication(application);
}
return updateMeta(applications);
}
public static Set<String> applicationNames(Applications applications) {
Set<String> names = new HashSet<>();
for (Application application : applications.getRegisteredApplications()) {
names.add(application.getName());
}
return names;
}
public static Application copyOf(Application application) {
Application copy = new Application(application.getName());
for (InstanceInfo instance : application.getInstances()) {
copy.addInstance(instance);
}
return copy;
}
public static Application merge(Application first, Application second) {
if (!first.getName().equals(second.getName())) {
throw new IllegalArgumentException("Cannot merge applications with different names");
}
Application merged = copyOf(first);
for (InstanceInfo instance : second.getInstances()) {
switch (instance.getActionType()) {
case ADDED:
case MODIFIED:
merged.addInstance(instance);
break;
case DELETED:
merged.removeInstance(instance);
}
}
return merged;
}
public static Applications merge(Applications first, Applications second) {
Set<String> firstNames = applicationNames(first);
Set<String> secondNames = applicationNames(second);
Set<String> allNames = new HashSet<>(firstNames);
allNames.addAll(secondNames);
Applications merged = new Applications();
for (String appName : allNames) {
if (firstNames.contains(appName)) {
if (secondNames.contains(appName)) {
merged.addApplication(merge(first.getRegisteredApplications(appName), second.getRegisteredApplications(appName)));
} else {
merged.addApplication(copyOf(first.getRegisteredApplications(appName)));
}
} else {
merged.addApplication(copyOf(second.getRegisteredApplications(appName)));
}
}
return updateMeta(merged);
}
public static Applications updateMeta(Applications applications) {
applications.setVersion(1L);
applications.setAppsHashCode(applications.getReconcileHashCode());
return applications;
}
public static int countInstances(Applications applications) {
int count = 0;
for(Application application: applications.getRegisteredApplications()) {
count += application.getInstances().size();
}
return count;
}
}
| 8,225 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit/resource/SimpleEurekaHttpServerResource.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.junit.resource;
import com.netflix.discovery.shared.transport.EurekaHttpClient;
import com.netflix.discovery.shared.transport.SimpleEurekaHttpServer;
import org.junit.rules.ExternalResource;
import static org.mockito.Mockito.mock;
/**
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServerResource extends ExternalResource {
private final EurekaHttpClient requestHandler = mock(EurekaHttpClient.class);
private SimpleEurekaHttpServer eurekaHttpServer;
@Override
protected void before() throws Throwable {
eurekaHttpServer = new SimpleEurekaHttpServer(requestHandler);
}
@Override
protected void after() {
if (eurekaHttpServer != null) {
eurekaHttpServer.shutdown();
}
}
public EurekaHttpClient getRequestHandler() {
return requestHandler;
}
public SimpleEurekaHttpServer getEurekaHttpServer() {
return eurekaHttpServer;
}
}
| 8,226 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/junit/resource/DiscoveryClientResource.java | package com.netflix.discovery.junit.resource;
import javax.ws.rs.core.UriBuilder;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.LeaseInfo;
import com.netflix.appinfo.MyDataCenterInstanceConfig;
import com.netflix.config.ConfigurationManager;
import com.netflix.discovery.CacheRefreshedEvent;
import com.netflix.discovery.DefaultEurekaClientConfig;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.transport.SimpleEurekaHttpServer;
import com.netflix.discovery.shared.transport.jersey.Jersey1DiscoveryClientOptionalArgs;
import com.netflix.eventbus.impl.EventBusImpl;
import com.netflix.eventbus.spi.EventBus;
import com.netflix.eventbus.spi.InvalidSubscriberException;
import com.netflix.eventbus.spi.Subscribe;
import org.junit.rules.ExternalResource;
/**
* JUnit rule for discovery client + collection of static methods for setting it up.
*/
public class DiscoveryClientResource extends ExternalResource {
public static final String REMOTE_REGION = "myregion";
public static final String REMOTE_ZONE = "myzone";
public static final int CLIENT_REFRESH_RATE = 10;
public static final String EUREKA_TEST_NAMESPACE = "eurekaTestNamespace.";
private static final Set<String> SYSTEM_PROPERTY_TRACKER = new HashSet<>();
private final boolean registrationEnabled;
private final boolean registryFetchEnabled;
private final InstanceInfo instance;
private final SimpleEurekaHttpServer eurekaHttpServer;
private final Callable<Integer> portResolverCallable;
private final List<String> remoteRegions;
private final String vipFetch;
private final String userName;
private final String password;
private EventBus eventBus;
private ApplicationInfoManager applicationManager;
private EurekaClient client;
private final List<DiscoveryClientResource> forkedDiscoveryClientResources = new ArrayList<>();
private ApplicationInfoManager applicationInfoManager;
DiscoveryClientResource(DiscoveryClientRuleBuilder builder) {
this.registrationEnabled = builder.registrationEnabled;
this.registryFetchEnabled = builder.registryFetchEnabled;
this.portResolverCallable = builder.portResolverCallable;
this.eurekaHttpServer = builder.eurekaHttpServer;
this.instance = builder.instance;
this.remoteRegions = builder.remoteRegions;
this.vipFetch = builder.vipFetch;
this.userName = builder.userName;
this.password = builder.password;
}
public InstanceInfo getMyInstanceInfo() {
return createApplicationManager().getInfo();
}
public EventBus getEventBus() {
if (client == null) {
getClient(); // Lazy initialization
}
return eventBus;
}
public ApplicationInfoManager getApplicationInfoManager() {
return applicationInfoManager;
}
public EurekaClient getClient() {
if (client == null) {
try {
applicationInfoManager = createApplicationManager();
EurekaClientConfig clientConfig = createEurekaClientConfig();
Jersey1DiscoveryClientOptionalArgs optionalArgs = new Jersey1DiscoveryClientOptionalArgs();
eventBus = new EventBusImpl();
optionalArgs.setEventBus(eventBus);
client = new DiscoveryClient(applicationInfoManager, clientConfig, optionalArgs);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return client;
}
public boolean awaitCacheUpdate(long timeout, TimeUnit unit) throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
Object eventListener = new Object() {
@Subscribe
public void consume(CacheRefreshedEvent event) {
latch.countDown();
}
};
try {
getEventBus().registerSubscriber(eventListener);
} catch (InvalidSubscriberException e) {
throw new IllegalStateException("Unexpected error during subscriber registration", e);
}
try {
return latch.await(timeout, unit);
} finally {
getEventBus().unregisterSubscriber(eventListener);
}
}
private ApplicationInfoManager createApplicationManager() {
if (applicationManager == null) {
EurekaInstanceConfig instanceConfig = new MyDataCenterInstanceConfig(EUREKA_TEST_NAMESPACE) {
@Override
public String getAppname() {
return "discoveryClientTest";
}
@Override
public int getLeaseRenewalIntervalInSeconds() {
return 1;
}
};
applicationManager = new ApplicationInfoManager(instanceConfig);
}
return applicationManager;
}
private EurekaClientConfig createEurekaClientConfig() throws Exception {
// Cluster connectivity
URI serviceURI;
if (portResolverCallable != null) {
serviceURI = new URI("http://localhost:" + portResolverCallable.call() + "/eureka/v2/");
} else if (eurekaHttpServer != null) {
serviceURI = eurekaHttpServer.getServiceURI();
} else {
throw new IllegalStateException("Either port or EurekaHttpServer must be configured");
}
if (userName != null) {
serviceURI = UriBuilder.fromUri(serviceURI).userInfo(userName + ':' + password).build();
}
bindProperty(EUREKA_TEST_NAMESPACE + "serviceUrl.default", serviceURI.toString());
if (remoteRegions != null && !remoteRegions.isEmpty()) {
StringBuilder regions = new StringBuilder();
for (String region : remoteRegions) {
regions.append(',').append(region);
}
bindProperty(EUREKA_TEST_NAMESPACE + "fetchRemoteRegionsRegistry", regions.substring(1));
}
// Registration
bindProperty(EUREKA_TEST_NAMESPACE + "registration.enabled", Boolean.toString(registrationEnabled));
bindProperty(EUREKA_TEST_NAMESPACE + "appinfo.initial.replicate.time", Integer.toString(0));
bindProperty(EUREKA_TEST_NAMESPACE + "appinfo.replicate.interval", Integer.toString(1));
// Registry fetch
bindProperty(EUREKA_TEST_NAMESPACE + "shouldFetchRegistry", Boolean.toString(registryFetchEnabled));
bindProperty(EUREKA_TEST_NAMESPACE + "client.refresh.interval", Integer.toString(1));
if (vipFetch != null) {
bindProperty(EUREKA_TEST_NAMESPACE + "registryRefreshSingleVipAddress", vipFetch);
}
return new DefaultEurekaClientConfig(EUREKA_TEST_NAMESPACE);
}
@Override
protected void after() {
if (client != null) {
client.shutdown();
}
for (DiscoveryClientResource resource : forkedDiscoveryClientResources) {
resource.after();
}
for (String property : SYSTEM_PROPERTY_TRACKER) {
ConfigurationManager.getConfigInstance().clearProperty(property);
}
clearDiscoveryClientConfig();
}
public DiscoveryClientRuleBuilder fork() {
DiscoveryClientRuleBuilder builder = new DiscoveryClientRuleBuilder() {
@Override
public DiscoveryClientResource build() {
DiscoveryClientResource clientResource = super.build();
try {
clientResource.before();
} catch (Throwable e) {
throw new IllegalStateException("Unexpected error during forking the client resource", e);
}
forkedDiscoveryClientResources.add(clientResource);
return clientResource;
}
};
return builder.withInstanceInfo(instance)
.connectWith(eurekaHttpServer)
.withPortResolver(portResolverCallable)
.withRegistration(registrationEnabled)
.withRegistryFetch(registryFetchEnabled)
.withRemoteRegions(remoteRegions.toArray(new String[remoteRegions.size()]));
}
public static DiscoveryClientRuleBuilder newBuilder() {
return new DiscoveryClientRuleBuilder();
}
public static void setupDiscoveryClientConfig(int serverPort, String path) {
ConfigurationManager.getConfigInstance().setProperty("eureka.shouldFetchRegistry", "true");
ConfigurationManager.getConfigInstance().setProperty("eureka.responseCacheAutoExpirationInSeconds", "10");
ConfigurationManager.getConfigInstance().setProperty("eureka.client.refresh.interval", CLIENT_REFRESH_RATE);
ConfigurationManager.getConfigInstance().setProperty("eureka.registration.enabled", "false");
ConfigurationManager.getConfigInstance().setProperty("eureka.fetchRemoteRegionsRegistry", REMOTE_REGION);
ConfigurationManager.getConfigInstance().setProperty("eureka.myregion.availabilityZones", REMOTE_ZONE);
ConfigurationManager.getConfigInstance().setProperty("eureka.serviceUrl.default",
"http://localhost:" + serverPort + path);
}
public static void clearDiscoveryClientConfig() {
ConfigurationManager.getConfigInstance().clearProperty("eureka.client.refresh.interval");
ConfigurationManager.getConfigInstance().clearProperty("eureka.registration.enabled");
ConfigurationManager.getConfigInstance().clearProperty("eureka.fetchRemoteRegionsRegistry");
ConfigurationManager.getConfigInstance().clearProperty("eureka.myregion.availabilityZones");
ConfigurationManager.getConfigInstance().clearProperty("eureka.serviceUrl.default");
ConfigurationManager.getConfigInstance().clearProperty("eureka.shouldEnforceFetchRegistryAtInit");
}
public static EurekaClient setupDiscoveryClient(InstanceInfo clientInstanceInfo) {
DefaultEurekaClientConfig config = new DefaultEurekaClientConfig();
// setup config in advance, used in initialize converter
ApplicationInfoManager applicationInfoManager = new ApplicationInfoManager(new MyDataCenterInstanceConfig(), clientInstanceInfo);
DiscoveryManager.getInstance().setEurekaClientConfig(config);
EurekaClient client = new DiscoveryClient(applicationInfoManager, config);
return client;
}
public static EurekaClient setupInjector(InstanceInfo clientInstanceInfo) {
DefaultEurekaClientConfig config = new DefaultEurekaClientConfig();
// setup config in advance, used in initialize converter
DiscoveryManager.getInstance().setEurekaClientConfig(config);
EurekaClient client = new DiscoveryClient(clientInstanceInfo, config);
ApplicationInfoManager.getInstance().initComponent(new MyDataCenterInstanceConfig());
return client;
}
public static InstanceInfo.Builder newInstanceInfoBuilder(int renewalIntervalInSecs) {
InstanceInfo.Builder builder = InstanceInfo.Builder.newBuilder();
builder.setIPAddr("10.10.101.00");
builder.setHostName("Hosttt");
builder.setAppName("EurekaTestApp-" + UUID.randomUUID());
builder.setDataCenterInfo(new DataCenterInfo() {
@Override
public Name getName() {
return Name.MyOwn;
}
});
builder.setLeaseInfo(LeaseInfo.Builder.newBuilder().setRenewalIntervalInSecs(renewalIntervalInSecs).build());
return builder;
}
private static void bindProperty(String propertyName, String value) {
SYSTEM_PROPERTY_TRACKER.add(propertyName);
ConfigurationManager.getConfigInstance().setProperty(propertyName, value);
}
public static class DiscoveryClientRuleBuilder {
private boolean registrationEnabled;
private boolean registryFetchEnabled;
private Callable<Integer> portResolverCallable;
private InstanceInfo instance;
private SimpleEurekaHttpServer eurekaHttpServer;
private List<String> remoteRegions;
private String vipFetch;
private String userName;
private String password;
public DiscoveryClientRuleBuilder withInstanceInfo(InstanceInfo instance) {
this.instance = instance;
return this;
}
public DiscoveryClientRuleBuilder withRegistration(boolean enabled) {
this.registrationEnabled = enabled;
return this;
}
public DiscoveryClientRuleBuilder withRegistryFetch(boolean enabled) {
this.registryFetchEnabled = enabled;
return this;
}
public DiscoveryClientRuleBuilder withPortResolver(Callable<Integer> portResolverCallable) {
this.portResolverCallable = portResolverCallable;
return this;
}
public DiscoveryClientRuleBuilder connectWith(SimpleEurekaHttpServer eurekaHttpServer) {
this.eurekaHttpServer = eurekaHttpServer;
return this;
}
public DiscoveryClientRuleBuilder withRemoteRegions(String... remoteRegions) {
if (this.remoteRegions == null) {
this.remoteRegions = new ArrayList<>();
}
Collections.addAll(this.remoteRegions, remoteRegions);
return this;
}
public DiscoveryClientRuleBuilder withVipFetch(String vipFetch) {
this.vipFetch = vipFetch;
return this;
}
public DiscoveryClientRuleBuilder basicAuthentication(String userName, String password) {
Preconditions.checkNotNull(userName, "HTTP basic authentication user name is null");
Preconditions.checkNotNull(password, "HTTP basic authentication password is null");
this.userName = userName;
this.password = password;
return this;
}
public DiscoveryClientResource build() {
return new DiscoveryClientResource(this);
}
}
}
| 8,227 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/ClusterSampleData.java | package com.netflix.discovery.shared.transport;
import java.util.Iterator;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.util.InstanceInfoGenerator;
import com.netflix.eureka.EurekaServerConfig;
import com.netflix.eureka.registry.PeerAwareInstanceRegistryImpl.Action;
import com.netflix.eureka.cluster.protocol.ReplicationInstance;
import com.netflix.eureka.cluster.protocol.ReplicationInstanceResponse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Collection of functions to create different kinds of configuration/data.
*
* @author Tomasz Bak
*/
public final class ClusterSampleData {
public static final long REPLICATION_EXPIRY_TIME_MS = 100;
public static final long RETRY_SLEEP_TIME_MS = 1;
public static final long SERVER_UNAVAILABLE_SLEEP_TIME_MS = 1;
public static final long EUREKA_NODES_UPDATE_INTERVAL_MS = 10;
private ClusterSampleData() {
}
public static EurekaServerConfig newEurekaServerConfig() {
EurekaServerConfig config = mock(EurekaServerConfig.class);
// Cluster management related
when(config.getPeerEurekaNodesUpdateIntervalMs()).thenReturn((int) EUREKA_NODES_UPDATE_INTERVAL_MS);
// Replication logic related
when(config.shouldSyncWhenTimestampDiffers()).thenReturn(true);
when(config.getMaxTimeForReplication()).thenReturn((int) REPLICATION_EXPIRY_TIME_MS);
when(config.getMaxElementsInPeerReplicationPool()).thenReturn(10);
when(config.getMaxElementsInStatusReplicationPool()).thenReturn(10);
when(config.getMaxThreadsForPeerReplication()).thenReturn(1);
when(config.getMaxThreadsForStatusReplication()).thenReturn(1);
return config;
}
public static InstanceInfo newInstanceInfo(int index) {
Iterator<InstanceInfo> instanceGenerator = InstanceInfoGenerator.newBuilder(10, 10)
.withMetaData(true).build().serviceIterator();
// Skip to the requested index
for (int i = 0; i < index; i++) {
instanceGenerator.next();
}
return instanceGenerator.next();
}
public static ReplicationInstance newReplicationInstance() {
return newReplicationInstanceOf(Action.Register, newInstanceInfo(0));
}
public static ReplicationInstance newReplicationInstanceOf(Action action, InstanceInfo instance) {
switch (action) {
case Register:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
instance.getStatus().name(),
instance,
action
);
case Cancel:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
null,
null,
action
);
case Heartbeat:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
InstanceStatus.OUT_OF_SERVICE.name(),
instance.getStatus().name(),
instance,
action
);
case StatusUpdate:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
InstanceStatus.OUT_OF_SERVICE.name(),
null,
action
);
case DeleteStatusOverride:
return new ReplicationInstance(
instance.getAppName(),
instance.getId(),
System.currentTimeMillis(),
null,
InstanceStatus.UP.name(),
null,
action
);
}
throw new IllegalStateException("Unexpected action " + action);
}
public static ReplicationInstanceResponse newReplicationInstanceResponse(boolean withInstanceInfo) {
return new ReplicationInstanceResponse(200, withInstanceInfo ? newInstanceInfo(1) : null);
}
}
| 8,228 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaHttpRequest.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import java.net.URI;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
*/
public class EurekaHttpRequest {
private final String requestMethod;
private final URI requestURI;
private final Map<String, String> headers;
public EurekaHttpRequest(String requestMethod, URI requestURI, Map<String, String> headers) {
this.requestMethod = requestMethod;
this.requestURI = requestURI;
this.headers = Collections.unmodifiableMap(new HashMap<String, String>(headers));
}
public String getRequestMethod() {
return requestMethod;
}
public URI getRequestURI() {
return requestURI;
}
public Map<String, String> getHeaders() {
return headers;
}
}
| 8,229 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaHttpClientCompatibilityTestSuite.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.UriBuilder;
import java.net.URI;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.shared.Applications;
import com.netflix.discovery.util.EurekaEntityComparators;
import com.netflix.discovery.util.InstanceInfoGenerator;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.discovery.shared.transport.EurekaHttpResponse.anEurekaHttpResponse;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* @author Tomasz Bak
*/
public abstract class EurekaHttpClientCompatibilityTestSuite {
private static final String REMOTE_REGION = "us-east-1";
private final EurekaHttpClient requestHandler = mock(EurekaHttpClient.class);
private final List<EurekaHttpRequest> observedHttpRequests = new CopyOnWriteArrayList<>();
private final EurekaTransportEventListener transportEventListener = new EurekaTransportEventListener() {
@Override
public void onHttpRequest(EurekaHttpRequest request) {
observedHttpRequests.add(request);
}
};
private SimpleEurekaHttpServer httpServer;
protected EurekaHttpClientCompatibilityTestSuite() {
}
@Before
public void setUp() throws Exception {
httpServer = new SimpleEurekaHttpServer(requestHandler, transportEventListener);
}
@After
public void tearDown() throws Exception {
httpServer.shutdown();
}
protected abstract EurekaHttpClient getEurekaHttpClient(URI serviceURI);
protected EurekaHttpClient getEurekaHttpClient() {
return getEurekaHttpClient(getHttpServer().getServiceURI());
}
protected EurekaHttpClient getEurekaClientWithBasicAuthentication(String userName, String password) {
URI serviceURI = UriBuilder.fromUri(getHttpServer().getServiceURI()).userInfo(userName + ':' + password).build();
return getEurekaHttpClient(serviceURI);
}
protected SimpleEurekaHttpServer getHttpServer() {
return httpServer;
}
@Test
public void testRegisterRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.register(instance)).thenReturn(EurekaHttpResponse.status(204));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().register(instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(204)));
}
@Test
public void testCancelRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.cancel(instance.getAppName(), instance.getId())).thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().cancel(instance.getAppName(), instance.getId());
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testHeartbeatRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
InstanceInfo updated = new InstanceInfo.Builder(instance).setHostName("another.host").build();
when(requestHandler.sendHeartBeat(instance.getAppName(), instance.getId(), null, null)).thenReturn(createResponse(updated));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().sendHeartBeat(instance.getAppName(), instance.getId(), instance, null);
verifyResponseOkWithEntity(updated, httpResponse);
}
@Test
public void testStatusUpdateRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.statusUpdate(instance.getAppName(), instance.getId(), InstanceStatus.OUT_OF_SERVICE, null))
.thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().statusUpdate(instance.getAppName(), instance.getId(), InstanceStatus.OUT_OF_SERVICE, instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testGetApplicationsRequest() throws Exception {
Applications apps = InstanceInfoGenerator.newBuilder(2, 1).build().toApplications();
when(requestHandler.getApplications()).thenReturn(createResponse(apps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getApplications();
verifyResponseOkWithEntity(apps, httpResponse);
}
@Test
public void testGetApplicationsWithRemoteRegionRequest() throws Exception {
Applications apps = InstanceInfoGenerator.newBuilder(2, 1).build().toApplications();
when(requestHandler.getApplications(REMOTE_REGION)).thenReturn(createResponse(apps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getApplications(REMOTE_REGION);
verifyResponseOkWithEntity(apps, httpResponse);
}
@Test
public void testGetDeltaRequest() throws Exception {
Applications delta = InstanceInfoGenerator.newBuilder(2, 1).build().takeDelta(2);
when(requestHandler.getDelta()).thenReturn(createResponse(delta));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getDelta();
verifyResponseOkWithEntity(delta, httpResponse);
}
@Test
public void testGetDeltaWithRemoteRegionRequest() throws Exception {
Applications delta = InstanceInfoGenerator.newBuilder(2, 1).build().takeDelta(2);
when(requestHandler.getDelta(REMOTE_REGION)).thenReturn(createResponse(delta));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getDelta(REMOTE_REGION);
verifyResponseOkWithEntity(delta, httpResponse);
}
@Test
public void testGetInstanceRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.getInstance(instance.getId())).thenReturn(createResponse(instance));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().getInstance(instance.getId());
verifyResponseOkWithEntity(instance, httpResponse);
}
@Test
public void testGetApplicationInstanceRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.getInstance(instance.getAppName(), instance.getId())).thenReturn(createResponse(instance));
EurekaHttpResponse<InstanceInfo> httpResponse = getEurekaHttpClient().getInstance(instance.getAppName(), instance.getId());
verifyResponseOkWithEntity(instance, httpResponse);
}
@Test
public void testGetVipRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String vipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getVIPAddress();
when(requestHandler.getVip(vipAddress)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getVip(vipAddress);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetVipWithRemoteRegionRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String vipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getVIPAddress();
when(requestHandler.getVip(vipAddress, REMOTE_REGION)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getVip(vipAddress, REMOTE_REGION);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetSecureVipRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String secureVipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getSecureVipAddress();
when(requestHandler.getSecureVip(secureVipAddress)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getSecureVip(secureVipAddress);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testGetSecureVipWithRemoteRegionRequest() throws Exception {
Applications vipApps = InstanceInfoGenerator.newBuilder(1, 2).build().toApplications();
String secureVipAddress = vipApps.getRegisteredApplications().get(0).getInstances().get(0).getSecureVipAddress();
when(requestHandler.getSecureVip(secureVipAddress, REMOTE_REGION)).thenReturn(createResponse(vipApps));
EurekaHttpResponse<Applications> httpResponse = getEurekaHttpClient().getSecureVip(secureVipAddress, REMOTE_REGION);
verifyResponseOkWithEntity(vipApps, httpResponse);
}
@Test
public void testStatusUpdateDeleteRequest() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.deleteStatusOverride(instance.getAppName(), instance.getId(), null))
.thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = getEurekaHttpClient().deleteStatusOverride(instance.getAppName(), instance.getId(), instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
@Test
public void testBasicAuthentication() throws Exception {
InstanceInfo instance = InstanceInfoGenerator.takeOne();
when(requestHandler.register(instance)).thenReturn(EurekaHttpResponse.status(204));
EurekaHttpResponse<Void> httpResponse = getEurekaClientWithBasicAuthentication("myuser", "mypassword").register(instance);
assertThat(httpResponse.getStatusCode(), is(equalTo(204)));
assertThat(observedHttpRequests.get(0).getHeaders().containsKey(HttpHeaders.AUTHORIZATION), is(true));
}
private static void verifyResponseOkWithEntity(Applications original, EurekaHttpResponse<Applications> httpResponse) {
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
assertThat(httpResponse.getEntity(), is(notNullValue()));
assertThat(EurekaEntityComparators.equal(httpResponse.getEntity(), original), is(true));
}
private static void verifyResponseOkWithEntity(InstanceInfo original, EurekaHttpResponse<InstanceInfo> httpResponse) {
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
assertThat(httpResponse.getEntity(), is(notNullValue()));
assertThat(EurekaEntityComparators.equal(httpResponse.getEntity(), original), is(true));
}
private static <T> EurekaHttpResponse<T> createResponse(T entity) {
return anEurekaHttpResponse(200, entity).headers(HttpHeaders.CONTENT_TYPE, "application/json").build();
}
}
| 8,230 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/SimpleEurekaHttpServer.java | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.converters.wrappers.CodecWrappers;
import com.netflix.discovery.converters.wrappers.CodecWrappers.JacksonJson;
import com.netflix.discovery.converters.wrappers.DecoderWrapper;
import com.netflix.discovery.converters.wrappers.EncoderWrapper;
import com.netflix.discovery.shared.Applications;
import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* HTTP server with Eureka compatible REST API that delegates client request to the provided {@link EurekaHttpClient}
* implementation. It is very lightweight implementation that can be used in unit test without incurring to much
* overhead.
*
* @author Tomasz Bak
*/
public class SimpleEurekaHttpServer {
private static final Logger logger = LoggerFactory.getLogger(SimpleEurekaHttpServer.class);
private final EurekaHttpClient requestHandler;
private final EurekaTransportEventListener eventListener;
private final HttpServer httpServer;
private final EncoderWrapper encoder = CodecWrappers.getEncoder(JacksonJson.class);
private final DecoderWrapper decoder = CodecWrappers.getDecoder(JacksonJson.class);
public SimpleEurekaHttpServer(EurekaHttpClient requestHandler) throws IOException {
this(requestHandler, null);
}
public SimpleEurekaHttpServer(EurekaHttpClient requestHandler, EurekaTransportEventListener eventListener) throws IOException {
this.requestHandler = requestHandler;
this.eventListener = eventListener;
this.httpServer = HttpServer.create(new InetSocketAddress(0), 1);
httpServer.createContext("/v2", createEurekaV2Handle());
httpServer.setExecutor(null);
httpServer.start();
}
public void shutdown() {
httpServer.stop(0);
}
public URI getServiceURI() {
try {
return new URI("http://localhost:" + getServerPort() + "/v2/");
} catch (URISyntaxException e) {
throw new IllegalStateException("Cannot parse service URI", e);
}
}
public int getServerPort() {
return httpServer.getAddress().getPort();
}
private HttpHandler createEurekaV2Handle() {
return new HttpHandler() {
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if(eventListener != null) {
eventListener.onHttpRequest(mapToEurekaHttpRequest(httpExchange));
}
try {
String method = httpExchange.getRequestMethod();
String path = httpExchange.getRequestURI().getPath();
if (path.startsWith("/v2/apps")) {
if ("GET".equals(method)) {
handleAppsGET(httpExchange);
} else if ("POST".equals(method)) {
handleAppsPost(httpExchange);
} else if ("PUT".equals(method)) {
handleAppsPut(httpExchange);
} else if ("DELETE".equals(method)) {
handleAppsDelete(httpExchange);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
} else if (path.startsWith("/v2/vips")) {
handleVipsGET(httpExchange);
} else if (path.startsWith("/v2/svips")) {
handleSecureVipsGET(httpExchange);
} else if (path.startsWith("/v2/instances")) {
handleInstanceGET(httpExchange);
}
} catch (Exception e) {
logger.error("HttpServer error", e);
httpExchange.sendResponseHeaders(500, 0);
}
httpExchange.close();
}
};
}
private void handleAppsGET(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if (path.matches("/v2/apps[/]?")) {
String regions = getQueryParam(httpExchange, "regions");
httpResponse = regions == null ? requestHandler.getApplications() : requestHandler.getApplications(regions);
} else if (path.matches("/v2/apps/delta[/]?")) {
String regions = getQueryParam(httpExchange, "regions");
httpResponse = regions == null ? requestHandler.getDelta() : requestHandler.getDelta(regions);
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
httpResponse = requestHandler.getInstance(matcher.group(1), matcher.group(2));
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
if (httpResponse == null) {
httpResponse = EurekaHttpResponse.anEurekaHttpResponse(HttpServletResponse.SC_NOT_FOUND).build();
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsPost(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
if (path.matches("/v2/apps/([^/]+)(/)?")) {
InstanceInfo instance = decoder.decode(httpExchange.getRequestBody(), InstanceInfo.class);
httpResponse = requestHandler.register(instance);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsPut(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
String overriddenstatus = getQueryParam(httpExchange, "overriddenstatus");
httpResponse = requestHandler.sendHeartBeat(
matcher.group(1), matcher.group(2), null,
overriddenstatus == null ? null : InstanceStatus.valueOf(overriddenstatus)
);
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)/status").matcher(path)).matches()) {
String newStatus = getQueryParam(httpExchange, "value");
httpResponse = requestHandler.statusUpdate(
matcher.group(1), matcher.group(2),
newStatus == null ? null : InstanceStatus.valueOf(newStatus),
null
);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleAppsDelete(HttpExchange httpExchange) throws IOException {
EurekaHttpResponse<?> httpResponse;
String path = httpExchange.getRequestURI().getPath();
Matcher matcher;
if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)").matcher(path)).matches()) {
httpResponse = requestHandler.cancel(matcher.group(1), matcher.group(2));
} else if ((matcher = Pattern.compile("/v2/apps/([^/]+)/([^/]+)/status").matcher(path)).matches()) {
httpResponse = requestHandler.deleteStatusOverride(matcher.group(1), matcher.group(2), null);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
return;
}
mapResponse(httpExchange, httpResponse);
}
private void handleVipsGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/vips/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
String regions = getQueryParam(httpExchange, "regions");
EurekaHttpResponse<Applications> httpResponse = regions == null
? requestHandler.getVip(matcher.group(1))
: requestHandler.getVip(matcher.group(1), regions);
mapResponse(httpExchange, httpResponse);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private void handleSecureVipsGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/svips/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
String regions = getQueryParam(httpExchange, "regions");
EurekaHttpResponse<Applications> httpResponse = regions == null
? requestHandler.getSecureVip(matcher.group(1))
: requestHandler.getSecureVip(matcher.group(1), regions);
mapResponse(httpExchange, httpResponse);
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private void handleInstanceGET(HttpExchange httpExchange) throws IOException {
Matcher matcher = Pattern.compile("/v2/instances/([^/]+)").matcher(httpExchange.getRequestURI().getPath());
if (matcher.matches()) {
mapResponse(httpExchange, requestHandler.getInstance(matcher.group(1)));
} else {
httpExchange.sendResponseHeaders(HttpServletResponse.SC_NOT_FOUND, 0);
}
}
private EurekaHttpRequest mapToEurekaHttpRequest(HttpExchange httpExchange) {
Headers exchangeHeaders = httpExchange.getRequestHeaders();
Map<String, String> headers = new HashMap<>();
for(String key: exchangeHeaders.keySet()) {
headers.put(key, exchangeHeaders.getFirst(key));
}
return new EurekaHttpRequest(httpExchange.getRequestMethod(), httpExchange.getRequestURI(), headers);
}
private <T> void mapResponse(HttpExchange httpExchange, EurekaHttpResponse<T> response) throws IOException {
// Add headers
for (Map.Entry<String, String> headerEntry : response.getHeaders().entrySet()) {
httpExchange.getResponseHeaders().add(headerEntry.getKey(), headerEntry.getValue());
}
if (response.getStatusCode() / 100 != 2) {
httpExchange.sendResponseHeaders(response.getStatusCode(), 0);
return;
}
// Prepare body, if any
T entity = response.getEntity();
byte[] body = null;
if (entity != null) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
encoder.encode(entity, bos);
body = bos.toByteArray();
}
// Set status and body length
httpExchange.sendResponseHeaders(response.getStatusCode(), body == null ? 0 : body.length);
// Send body
if (body != null) {
OutputStream responseStream = httpExchange.getResponseBody();
try {
responseStream.write(body);
responseStream.flush();
} finally {
responseStream.close();
}
}
}
private static String getQueryParam(HttpExchange httpExchange, String queryParam) {
String query = httpExchange.getRequestURI().getQuery();
if (query != null) {
for (String part : query.split("&")) {
String[] keyValue = part.split("=");
if (keyValue.length > 1 && keyValue[0].equals(queryParam)) {
return keyValue[1];
}
}
}
return null;
}
}
| 8,231 |
0 | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared | Create_ds/eureka/eureka-test-utils/src/main/java/com/netflix/discovery/shared/transport/EurekaTransportEventListener.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.discovery.shared.transport;
/**
*/
public interface EurekaTransportEventListener {
void onHttpRequest(EurekaHttpRequest request);
}
| 8,232 |
0 | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.backend.provider;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.util.concurrent.TimeUnit;
import java.lang.InterruptedException;
public class Provider {
/**
* To get ipv6 address to work, add
* System.setProperty("java.net.preferIPv6Addresses", "true");
* before running your application.
*/
public static void main(String[] args) throws Exception {
ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(new String[]{"META-INF/spring/dubbo-demo-provider.xml"});
context.start();
while (true) {
try {
TimeUnit.MINUTES.sleep(1);
} catch (InterruptedException ex) {}
}
}
}
| 8,233 |
0 | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DemoServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.backend.provider;
import org.apache.dubbo.backend.DemoService;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.lang.InterruptedException;
public class DemoServiceImpl implements DemoService {
@Override
public Map<String, Object> hello(Map<String, Object> context) {
Map<String, Object> ret = new HashMap<String, Object>();
ret.put("body", "dubbo success\n");
ret.put("status", "200");
for (Map.Entry<String, Object> entry : context.entrySet()) {
System.out.println("Key = " + entry.getKey() + ", Value = " + entry.getValue());
if (entry.getKey().startsWith("extra-arg")) {
ret.put("Got-" + entry.getKey(), entry.getValue());
}
}
return ret;
}
@Override
public Map<String, Object> fail(Map<String, Object> context) {
Map<String, Object> ret = new HashMap<String, Object>();
ret.put("body", "dubbo fail\n");
ret.put("status", "503");
return ret;
}
@Override
public Map<String, Object> timeout(Map<String, Object> context) {
Map<String, Object> ret = new HashMap<String, Object>();
try {
TimeUnit.MILLISECONDS.sleep(500);
} catch (InterruptedException ex) {}
ret.put("body", "dubbo fail\n");
ret.put("status", "503");
return ret;
}
@Override
public Map<String, Object> badStatus(Map<String, Object> context) {
Map<String, Object> ret = new HashMap<String, Object>();
ret.put("body", "ok\n");
ret.put("status", 200);
return ret;
}
}
| 8,234 |
0 | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-interface/src/main/java/org/apache/dubbo | Create_ds/apisix/t/lib/dubbo-backend/dubbo-backend-interface/src/main/java/org/apache/dubbo/backend/DemoService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.backend;
import java.util.Map;
public interface DemoService {;
/**
* standard samples tengine dubbo infterace demo
* @param context pass http infos
* @return Map<String, Object></> pass to response http
**/
Map<String, Object> hello(Map<String, Object> context);
/**
* test for dubbo non-200 response
* @param context pass http infos
* @return Map<String, Object></> pass to response http
**/
Map<String, Object> fail(Map<String, Object> context);
/**
* test for dubbo response timeout
* @param context pass http infos
* @return Map<String, Object></> pass to response http
**/
Map<String, Object> timeout(Map<String, Object> context);
/**
* test for non-string status code
* @param context pass http infos
* @return Map<String, Object></> pass to response http
**/
Map<String, Object> badStatus(Map<String, Object> context);
}
| 8,235 |
0 | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test/listener/InMemoryEventRecorderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.fineract.cn.lang.TenantContextHolder;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.support.AnnotationConfigContextLoader;
import java.util.Objects;
/**
* @author Myrle Krantz
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(
classes = {InMemoryEventRecorderTest.TestConfiguration.class},
loader = AnnotationConfigContextLoader.class)
public class InMemoryEventRecorderTest {
@SuppressWarnings("SpringJavaAutowiringInspection")
@Autowired
EventRecorder eventRecorder;
@Test
public void shouldNotFindEventAfterClear() throws InterruptedException {
final String tenant = "x";
final String line = "could not put humpty together again";
final String lunch = "egg salad";
generateEvent(tenant, line, lunch);
eventRecorder.clear();
TenantContextHolder.setIdentifier(tenant);
final boolean found = eventRecorder.wait(line, new TestPayloadObject(lunch));
TenantContextHolder.clear();
Assert.assertFalse(found);
}
@Test
public void shouldNotFindEventWhichDidNotOccur() throws InterruptedException {
final boolean found = eventRecorder.wait("bleblablub", new TestPayloadObject("ceasar dressing"));
Assert.assertFalse(found);
}
@Test
public void shouldNotFindEventWhenEventWithWrongTenantOccurs() throws InterruptedException {
final String line = "all the king's horses";
final String dressing = "vinegar dressing";
generateEvent("y", line, dressing);
TenantContextHolder.setIdentifier("x");
final boolean found = eventRecorder.wait(line, new TestPayloadObject(dressing));
TenantContextHolder.clear();
Assert.assertFalse(found);
}
@Test
public void shouldNotFindEventWhenEventWithWrongOperationOccurs() throws InterruptedException {
final String tenant = "x";
final String dressing = "italian dressing";
generateEvent(tenant, "had a great fall", dressing);
TenantContextHolder.setIdentifier(tenant);
final boolean found = eventRecorder.wait("humpty dumpty", new TestPayloadObject(dressing));
TenantContextHolder.clear();
Assert.assertFalse(found);
}
@Test
public void shouldNotFindEventWhenEventWithWrongPayloadOccurs() throws InterruptedException {
final String tenant = "x";
final String line = "and all the king's men";
generateEvent(tenant, line, "ranch dressing");
TenantContextHolder.setIdentifier(tenant);
final boolean found = eventRecorder.wait(line,
new TestPayloadObject("blue cheese dressing"));
TenantContextHolder.clear();
Assert.assertFalse(found);
}
@Test
public void shouldFindEventWhenEverythingMatches() throws InterruptedException {
final String tenant = "x";
final String line = "could not put humpty together again";
final String lunch = "egg salad";
generateEvent(tenant, line, lunch);
TenantContextHolder.setIdentifier(tenant);
final boolean yum = eventRecorder.wait(line, new TestPayloadObject(lunch));
TenantContextHolder.clear();
Assert.assertTrue(yum);
}
@Test
public void shouldFindEventWhenTwoAmongSeveral() throws InterruptedException {
final String tenant = "x";
final String operation = "tickle";
final String payloadParamater = "funnyBone";
for (int i = 0; i < 20; i++) {
generateEvent(tenant, operation + i, payloadParamater + i);
}
TenantContextHolder.setIdentifier(tenant);
boolean found = eventRecorder.wait("tickle15", new TestPayloadObject("funnyBone15"));
TenantContextHolder.clear();
Assert.assertTrue(found);
TenantContextHolder.setIdentifier(tenant);
found = eventRecorder.wait("tickle11", new TestPayloadObject("funnyBone11"));
TenantContextHolder.clear();
Assert.assertTrue(found);
}
private void generateEvent(
final String tenant,
final String operation,
final String payloadParameter) {
final Gson gson = new GsonBuilder().create();
eventRecorder.event(tenant, operation,
gson.toJson(new TestPayloadObject(payloadParameter)),
TestPayloadObject.class);
}
@Configuration
@EnableEventRecording(maxWait = 1L)
static public class TestConfiguration {
public TestConfiguration() {
}
}
private class TestPayloadObject {
private final String param;
private TestPayloadObject(String param) {
this.param = param;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof TestPayloadObject))
return false;
TestPayloadObject that = (TestPayloadObject) o;
return Objects.equals(param, that.param);
}
@Override
public int hashCode() {
return Objects.hash(param);
}
}
}
| 8,236 |
0 | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test/env/TestEnvironmentTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.env;
import org.junit.Assert;
import org.junit.Test;
public class TestEnvironmentTest {
private static final String UNIQUE_ID_PREFIX = "blah-";
public TestEnvironmentTest() {
super();
}
@Test
public void shouldPopulateAllProperties() throws Exception {
final TestEnvironment testEnvironment = new TestEnvironment("fineract-cn-core");
testEnvironment.populate();
for (final Object key : testEnvironment.properties.keySet()) {
Assert.assertNotNull(key + " not found.", System.getProperty(key.toString()));
}
}
@Test
public void shouldGenerateUniqueId() {
final TestEnvironment testEnvironment = new TestEnvironment("fineract-cn-core");
final String uniqueId = testEnvironment.generateUniqueIdentifier(UNIQUE_ID_PREFIX);
final String uniqueId2 = testEnvironment.generateUniqueIdentifier(UNIQUE_ID_PREFIX);
Assert.assertNotEquals(uniqueId, uniqueId2);
Assert.assertTrue(uniqueId.startsWith(UNIQUE_ID_PREFIX));
Assert.assertTrue(uniqueId2.startsWith(UNIQUE_ID_PREFIX));
}
@Test
public void shouldGenerateZeroBufferedUniqueId() {
final TestEnvironment testEnvironment = new TestEnvironment("fineract-cn-core");
final String uniqueId = testEnvironment.generateUniqueIdentifier(UNIQUE_ID_PREFIX, 5);
Assert.assertTrue(uniqueId.startsWith(UNIQUE_ID_PREFIX));
Assert.assertEquals(uniqueId.length() - UNIQUE_ID_PREFIX.length(), 5);
}
}
| 8,237 |
0 | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test/domain/DateStampCheckerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import org.apache.fineract.cn.lang.DateConverter;
import org.junit.Assert;
import org.junit.Test;
import java.time.Clock;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
/**
*
* @author Myrle Krantz
*/
public class DateStampCheckerTest {
@Test
public void inTheFuture() throws Exception {
final DateStampChecker checker = DateStampChecker.inTheFuture(5);
final LocalDate now = LocalDate.now(Clock.systemUTC());
final LocalDate fiveDaysFromNow = now.plus(5, ChronoUnit.DAYS);
final String fiveDaysFromNowAsString = DateConverter.toIsoString(fiveDaysFromNow);
checker.assertCorrect(fiveDaysFromNowAsString);
final String nowAsString = DateConverter.toIsoString(now);
Assert.assertFalse(checker.isCorrect(nowAsString));
}
}
| 8,238 |
0 | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/test/java/org/apache/fineract/cn/test/domain/TimeStampCheckerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import org.apache.fineract.cn.lang.DateConverter;
import org.junit.Assert;
import org.junit.Test;
import java.time.Clock;
import java.time.Duration;
import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
/**
*
* @author Myrle Krantz
*/
public class TimeStampCheckerTest {
@Test
public void roughlyNow() throws Exception {
final TimeStampChecker checker = TimeStampChecker.roughlyNow();
final LocalDateTime now = LocalDateTime.now(Clock.systemUTC());
final String nowAsString = DateConverter.toIsoString(now);
checker.assertCorrect(nowAsString);
final LocalDateTime fiveSecondsAgo = now.minus(5, ChronoUnit.SECONDS);
final String fiveSecondsAgoAsString = DateConverter.toIsoString(fiveSecondsAgo);
Assert.assertFalse(checker.isCorrect(fiveSecondsAgoAsString));
}
@Test
public void inTheFuture() throws Exception {
final TimeStampChecker checker = TimeStampChecker.inTheFuture(Duration.ofMinutes(5));
final LocalDateTime now = LocalDateTime.now(Clock.systemUTC());
final LocalDateTime fiveMinutesFromNow = now.plus(5, ChronoUnit.MINUTES);
final String fiveMinutesFromNowAsString = DateConverter.toIsoString(fiveMinutesFromNow);
checker.assertCorrect(fiveMinutesFromNowAsString);
final String nowAsString = DateConverter.toIsoString(now);
Assert.assertFalse(checker.isCorrect(nowAsString));
}
@Test
public void allowSomeWiggleRoom() throws Exception {
final TimeStampChecker checker = TimeStampChecker.allowSomeWiggleRoom(Duration.ofSeconds(30));
final LocalDateTime now = LocalDateTime.now(Clock.systemUTC());
final LocalDateTime roughlyNow = now.minus(25, ChronoUnit.SECONDS);
final String roughlyNowAsString = DateConverter.toIsoString(roughlyNow);
checker.assertCorrect(roughlyNowAsString);
final LocalDateTime fiveMinutesFromNow = now.plus(5, ChronoUnit.MINUTES);
final String fiveMinutesFromNowAsString = DateConverter.toIsoString(fiveMinutesFromNow);
Assert.assertFalse(checker.isCorrect(fiveMinutesFromNowAsString));
}
@Test
public void justLocalDateTime() throws Exception {
final TimeStampChecker checker = TimeStampChecker.roughlyNow();
final LocalDateTime now = LocalDateTime.now(Clock.systemUTC());
checker.assertCorrect(now);
}
} | 8,239 |
0 | Create_ds/fineract-cn-test/src/main/java/org/junit | Create_ds/fineract-cn-test/src/main/java/org/junit/rules/RunExternalResourceOnce.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.junit.rules;
/**
* Use this to "decorate" a resource to ensure that it is
* initialized exactly once and de-initialized exactly once
* when used in a suite of multiple tests. This is mostly
* useful when creating test suites.
*
* Example:
*
* <pre>
* {@code
* @literal @ClassRule
* public static TestRule orderClassRules = RuleChain
* .outerRule(new RunExternalResourceOnce(testEnvironment))
* .around(new RunExternalResourceOnce(cassandraInitializer))
* .around(new RunExternalResourceOnce(mariaDBInitializer));
* }
* </pre>
*
* @author Myrle Krantz
*/
@SuppressWarnings("unused")
public class RunExternalResourceOnce extends ExternalResource {
private final ExternalResource decoratedResource;
private int callCount = 0;
public RunExternalResourceOnce(final ExternalResource decoratedResource) {
//I love to decorate. Don't you?
this.decoratedResource = decoratedResource;
}
@Override
protected void before() throws Throwable {
if (callCount == 0)
decoratedResource.before();
callCount++;
}
@Override
protected void after() {
callCount--;
if (callCount == 0)
decoratedResource.after();
}
} | 8,240 |
0 | Create_ds/fineract-cn-test/src/main/java/org/junit | Create_ds/fineract-cn-test/src/main/java/org/junit/rules/RunExternalResourceConditionally.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.junit.rules;
/**
* Use this to "decorate" a resource to ensure that it is
* initialized and de-initialized only if the condition is met.
*
* Example:
*
* <pre>
* {@code
* @literal @ClassRule
* public static final ExternalResource EUREKA_FOR_TEST = RunExternalResourceConditionally(
* new EurekaForTest()
* System.getProperty("external.infrastructure") == null);
* }
* </pre>
*
* @author Myrle Krantz
*/
@SuppressWarnings("unused")
public class RunExternalResourceConditionally extends ExternalResource {
private final ExternalResource decoratedResource;
private final boolean condition;
public RunExternalResourceConditionally(
final ExternalResource decoratedResource,
final boolean condition) {
this.decoratedResource = decoratedResource;
this.condition = condition;
}
@Override
protected void before() throws Throwable {
if (condition)
decoratedResource.before();
}
@Override
protected void after() {
if (condition)
decoratedResource.after();
}
}
| 8,241 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/listener/EventRecorderBeanDefiner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
import com.google.gson.GsonBuilder;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.support.AbstractBeanDefinition;
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.context.annotation.ImportBeanDefinitionRegistrar;
import org.springframework.core.type.AnnotationMetadata;
import static org.springframework.beans.factory.config.BeanDefinition.SCOPE_SINGLETON;
/**
* @author Myrle Krantz
*/
class EventRecorderBeanDefiner implements ImportBeanDefinitionRegistrar {
@Override
public void registerBeanDefinitions(
final AnnotationMetadata importingClassMetadata,
final BeanDefinitionRegistry registry) {
final Object maxWait = importingClassMetadata.getAnnotationAttributes(
EnableEventRecording.class.getTypeName()).get("maxWait");
final AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder
.genericBeanDefinition(InMemoryEventRecorder.class)
.addConstructorArgValue(maxWait)
.addConstructorArgValue(new GsonBuilder().create())
.addConstructorArgValue(LoggerFactory.getLogger("event-recorder-logger"))
.setScope(SCOPE_SINGLETON)
.getBeanDefinition();
registry.registerBeanDefinition("eventRecorder", beanDefinition);
}
}
| 8,242 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/listener/InMemoryEventRecorder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
import com.google.gson.Gson;
import java.util.Objects;
import org.apache.fineract.cn.lang.TenantContextHolder;
import org.slf4j.Logger;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* @author Myrle Krantz
*/
class InMemoryEventRecorder implements EventRecorder {
private final long maxWait;
private final BlockingDeque<EventRecord> blockingDeque;
private final Gson gson;
private final Logger logger;
InMemoryEventRecorder(final long maxWait, final Gson gson, final Logger logger) {
this.maxWait = maxWait;
this.blockingDeque = new LinkedBlockingDeque<>();
this.gson = gson;
this.logger = logger;
}
@Override
public <T> boolean wait(final String operation, final T payloadMatcher) throws InterruptedException {
return waitForMatch(operation, new Function<T, Boolean>() {
@Override
public Boolean apply(Object x) {
return Objects.equals(x,payloadMatcher);
}
@Override
public String toString() {
return payloadMatcher.toString();
}
});
}
@Override
public <T> boolean waitForMatch(String operation, Function<T, Boolean> payloadMatcher) throws InterruptedException {
final long startTime = new Date().getTime();
long waitedSoFar = 0;
try (final Cleanup cleanup = new Cleanup()) {
boolean found = false;
while (!found) {
final EventRecord event = this.blockingDeque.poll(Math.max(0, maxWait - waitedSoFar), TimeUnit.MILLISECONDS);
final long now = new Date().getTime();
waitedSoFar = now - startTime;
if (event != null) {
//noinspection unchecked
found = (TenantContextHolder.identifier().map(x -> x.equals(event.getTenant())).orElse(false) &&
(event.getOperation().equals(operation)) &&
payloadMatcher.apply((T)event.getPayloadObject()));
if (!found)
cleanup.addStep(() -> blockingDeque.putLast(event));
}
if ((waitedSoFar > maxWait) && blockingDeque.isEmpty()) {
logger.info("Waited {} milliseconds, and event {} with payload {} wasn't observed",
waitedSoFar, operation, payloadMatcher);
return false;
}
}
return true;
}
}
@Override
public <T> void event(final String tenant, final String operation, final String payload, final Class<T> clazz) {
final T payloadObject = gson.fromJson(payload, clazz);
this.blockingDeque.add(new EventRecord(tenant, operation, payloadObject));
}
public void clear() {
this.blockingDeque.clear();
}
private interface Step {
void clean() throws InterruptedException;
}
private class Cleanup implements AutoCloseable {
private List<Step> steps = new LinkedList<>();
Cleanup() {
super();
}
@Override
public void close() throws InterruptedException {
cleanup();
}
void addStep(final Step newFirstStep) {
steps.add(0, newFirstStep);
}
private void cleanup() throws InterruptedException {
for (final Step step : steps) {
step.clean();
}
}
}
}
| 8,243 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/listener/EnableEventRecording.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
import org.springframework.context.annotation.Import;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@SuppressWarnings("WeakerAccess")
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
@Import({EventRecorderBeanDefiner.class})
public @interface EnableEventRecording {
long maxWait() default 30000L;
}
| 8,244 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/listener/EventRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
/**
* @author Myrle Krantz
*/
class EventRecord {
private final String tenant;
private final String operation;
private final Object payloadObject;
EventRecord(final String tenant, final String operation, final Object payloadObject) {
this.tenant = tenant;
this.operation = operation;
this.payloadObject = payloadObject;
}
String getTenant() {
return tenant;
}
String getOperation() {
return operation;
}
Object getPayloadObject() {
return payloadObject;
}
}
| 8,245 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/listener/EventRecorder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.listener;
import java.util.function.Function;
@SuppressWarnings("WeakerAccess")
public interface EventRecorder {
<T> void event(final String tenant, final String operation, final String payload, final Class<T> clazz);
<T> boolean wait(final String operation, final T payloadMatcher) throws InterruptedException;
<T> boolean waitForMatch(final String operation, final Function<T, Boolean> payloadMatcher) throws InterruptedException;
void clear();
}
| 8,246 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/TenantDataStoreTestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture;
import org.apache.fineract.cn.test.env.TestEnvironment;
import org.apache.fineract.cn.lang.AutoTenantContext;
/**
* @author Myrle Krantz
*/
@SuppressWarnings({"WeakerAccess", "unused"})
public class TenantDataStoreTestContext implements AutoCloseable {
private final AutoTenantContext autoTenantContext;
private TenantDataStoreTestContext(final String tenantName, final DataStoreTenantInitializer[] dataStoreTenantInitializers) {
this.autoTenantContext = new AutoTenantContext(tenantName);
for (final DataStoreTenantInitializer dataStoreTenantInitializer : dataStoreTenantInitializers)
{
dataStoreTenantInitializer.initializeTenant(tenantName);
}
}
public static TenantDataStoreTestContext forDefinedTenantName(final String tenantName, final DataStoreTenantInitializer... dataStoreTenantInitializers)
{
return new TenantDataStoreTestContext(tenantName, dataStoreTenantInitializers);
}
public static TenantDataStoreTestContext forRandomTenantName(final DataStoreTenantInitializer... dataStoreTenantInitializers)
{
return new TenantDataStoreTestContext(TestEnvironment.getRandomTenantName(), dataStoreTenantInitializers);
}
@Override
public void close() {
autoTenantContext.close();
}
} | 8,247 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/DataStoreTenantInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture;
import org.junit.rules.ExternalResource;
/**
* @author Myrle Krantz
*/
public abstract class DataStoreTenantInitializer extends ExternalResource {
protected abstract void initialize() throws Exception;
protected abstract void initializeTenant(final String tenantName);
protected abstract void finish();
@Override
protected void before() throws Exception {
initialize();
}
@Override
protected void after() {
finish();
}
}
| 8,248 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/TenantDataStoreContextTestRule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture;
import org.apache.fineract.cn.test.env.TestEnvironment;
import org.junit.rules.ExternalResource;
import org.springframework.util.Assert;
import javax.annotation.Nullable;
/**
* Use an instance of class as a @Classrule in component tests which require a tenant
* initialized in the data store(s) used by the service under test. It will generate a
* tenant name, create any necessary data structures, and set the tenant context for
* REST calls into the service.
*
* Example:
* <pre>
* {@code
* @literal @ClassRule
* public final static TenantDataStoreContextTestRule tenantDataStoreContext
* = TenantDataStoreContextTestRule.forRandomTenantName(cassandraInitializer, mariaDBInitializer);
* }
* </pre>
*
*
* @author Myrle Krantz
*/
@SuppressWarnings({"unused", "WeakerAccess"})
public class TenantDataStoreContextTestRule extends ExternalResource {
@Nullable
private String tenantName;
@Nullable
private TenantDataStoreTestContext tenantDataStoreTestContext;
private final boolean generateTenantName;
private final DataStoreTenantInitializer[] dataStoreTenantInitializers;
private TenantDataStoreContextTestRule(
final String tenantName,
final DataStoreTenantInitializer... dataStoreTenantInitializers) {
this.generateTenantName = false;
this.tenantName = tenantName;
this.dataStoreTenantInitializers = dataStoreTenantInitializers;
}
private TenantDataStoreContextTestRule(
final DataStoreTenantInitializer... dataStoreTenantInitializers) {
this.generateTenantName = true;
this.tenantName = null;
this.dataStoreTenantInitializers = dataStoreTenantInitializers;
}
public static TenantDataStoreContextTestRule forRandomTenantName(
final DataStoreTenantInitializer... dataStoreTenantInitializers)
{
return new TenantDataStoreContextTestRule(dataStoreTenantInitializers);
}
public static TenantDataStoreContextTestRule forDefinedTenantName(
final String tenantName,
final DataStoreTenantInitializer... dataStoreTenantInitializers)
{
return new TenantDataStoreContextTestRule(tenantName, dataStoreTenantInitializers);
}
@Override
public void before() {
if (generateTenantName) {
//Generate the tenantName in the before method rather than in the constructor.
//If this rule is used as a static variable in a test parent class, in the context of a
//test suite consisting of multiple test classes, the tenantName should
//be regenerated for each test class run. This has two advantages:
//1.) The database initialization isn't re-executed for the tenant. Re-executing database
// initialization fails, causing all tests after the first one to fail.
//2.) Each test class is executed in the context of a new tenant, thus mostly isolating
// each test class from any side-effects produced by the others.
tenantName = TestEnvironment.getRandomTenantName();
}
tenantDataStoreTestContext = TenantDataStoreTestContext.forDefinedTenantName(tenantName, dataStoreTenantInitializers);
}
@Override
public void after() {
Assert.notNull(tenantDataStoreTestContext);
tenantDataStoreTestContext.close();
}
public String getTenantName() {
return tenantName;
}
} | 8,249 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/postgresql/PostgreSQLInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture.postgresql;
import com.opentable.db.postgres.embedded.EmbeddedPostgres;
import org.apache.fineract.cn.test.env.TestEnvironment;
import org.apache.fineract.cn.test.fixture.DataStoreTenantInitializer;
import java.sql.*;
import org.apache.fineract.cn.postgresql.util.JdbcUrlBuilder;
@SuppressWarnings({"WeakerAccess", "unused", "SqlNoDataSourceInspection", "SqlDialectInspection"})
public final class PostgreSQLInitializer extends DataStoreTenantInitializer {
private final boolean useExistingDB;
private static EmbeddedPostgres pg;
public PostgreSQLInitializer() {
this(false);
}
public PostgreSQLInitializer(final boolean useExistingDB) {
super();
this.useExistingDB = useExistingDB;
}
@Override
public void initialize() throws Exception {
PostgreSQLInitializer.setup(useExistingDB);
}
@Override
public void initializeTenant(final String tenantName) {
PostgreSQLInitializer.createDatabaseTenant(tenantName);
}
@Override
public void finish() {
if (!useExistingDB) {
try {
PostgreSQLInitializer.tearDown();
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
}
public static void setup() throws Exception {
setup(false);
}
public static void setup(final boolean useExistingDB) throws Exception {
if (!useExistingDB) {
PostgreSQLInitializer.startEmbeddedPostgreSQL();
PostgreSQLInitializer.createDatabaseSeshat();
}
}
public static void tearDown() throws Exception {
if (PostgreSQLInitializer.pg != null) {
PostgreSQLInitializer.pg.close();
PostgreSQLInitializer.pg = null;
}
}
private static void startEmbeddedPostgreSQL() throws Exception {
PostgreSQLInitializer.pg = EmbeddedPostgres.builder().setPort(Integer.valueOf(TestEnvironment.POSTGRESQL_PORT_DEFAULT)).start();
System.setProperty(TestEnvironment.POSTGRESQL_HOST_PROPERTY, TestEnvironment.POSTGRESQL_HOST_DEFAULT);
System.setProperty(TestEnvironment.POSTGRESQL_PORT_PROPERTY, TestEnvironment.POSTGRESQL_PORT_DEFAULT);
}
private static void createDatabaseSeshat() {
try {
Class.forName(System.getProperty(TestEnvironment.POSTGRESQL_DRIVER_CLASS_PROPERTY));
} catch (ClassNotFoundException ex) {
throw new IllegalArgumentException(ex.getMessage(), ex);
}
final String jdbcUrl = JdbcUrlBuilder
.create(JdbcUrlBuilder.DatabaseType.POSTGRESQL)
.host(System.getProperty(TestEnvironment.POSTGRESQL_HOST_PROPERTY))
.port(System.getProperty(TestEnvironment.POSTGRESQL_PORT_PROPERTY))
.instanceName(TestEnvironment.POSTGRESQL_DATABASE_NAME)
.build();
try (final Connection pgConnection = DriverManager.getConnection(jdbcUrl,
System.getProperty(TestEnvironment.POSTGRESQL_USER_PROPERTY),
System.getProperty(TestEnvironment.POSTGRESQL_PASSWORD_PROPERTY));
final Statement createDbStatement = pgConnection.createStatement()) {
pgConnection.setAutoCommit(true);
// create meta database seshat
createDbStatement.execute("CREATE DATABASE " + System.getProperty(TestEnvironment.POSTGRESQL_DATABASE_NAME_PROPERTY));
createDbStatement.execute("CREATE DATABASE playground");
} catch (final SQLException ex) {
ex.printStackTrace();
}
final String tenantJdbcUrl = JdbcUrlBuilder
.create(JdbcUrlBuilder.DatabaseType.POSTGRESQL)
.host(System.getProperty(TestEnvironment.POSTGRESQL_HOST_PROPERTY))
.port(System.getProperty(TestEnvironment.POSTGRESQL_PORT_PROPERTY))
.instanceName(System.getProperty(TestEnvironment.POSTGRESQL_DATABASE_NAME_PROPERTY))
.build();
try (
final Connection metaDbConnection = DriverManager.getConnection(tenantJdbcUrl,
System.getProperty(TestEnvironment.POSTGRESQL_USER_PROPERTY),
System.getProperty(TestEnvironment.POSTGRESQL_PASSWORD_PROPERTY));
final Statement metaStatement = metaDbConnection.createStatement()
) {
metaDbConnection.setAutoCommit(true);
// create needed tenant management table
metaStatement.execute("CREATE TABLE IF NOT EXISTS tenants (" +
" identifier VARCHAR(32) NOT NULL," +
" driver_class VARCHAR(255) NOT NULL," +
" database_name VARCHAR(32) NOT NULL," +
" host VARCHAR(32) NOT NULL," +
" port VARCHAR(5) NOT NULL," +
" a_user VARCHAR(32) NOT NULL," +
" pwd VARCHAR(32) NOT NULL," +
" PRIMARY KEY (identifier)" +
")");
} catch (SQLException e) {
e.printStackTrace();
}
}
public static void createDatabaseTenant(final String identifier) {
try {
Class.forName(System.getProperty(TestEnvironment.POSTGRESQL_DRIVER_CLASS_PROPERTY));
} catch (ClassNotFoundException ex) {
throw new IllegalArgumentException(ex.getMessage(), ex);
}
final String jdbcUrl = JdbcUrlBuilder
.create(JdbcUrlBuilder.DatabaseType.POSTGRESQL)
.host(System.getProperty(TestEnvironment.POSTGRESQL_HOST_PROPERTY))
.port(System.getProperty(TestEnvironment.POSTGRESQL_PORT_PROPERTY))
.instanceName(TestEnvironment.POSTGRESQL_DATABASE_NAME_DEFAULT)
.build();
try (final Connection connection = DriverManager.getConnection(jdbcUrl,
System.getProperty(TestEnvironment.POSTGRESQL_USER_PROPERTY),
System.getProperty(TestEnvironment.POSTGRESQL_PASSWORD_PROPERTY))) {
try (final Statement statement = connection.createStatement()) {
connection.setAutoCommit(true);
// create tenant database
statement.execute("CREATE DATABASE " + identifier);
// insert tenant connection info in management table
try (final ResultSet resultSet = statement.executeQuery("SELECT * FROM tenants WHERE identifier = '" + identifier + "'")) {
if (resultSet.next()
&& resultSet.getInt(1) == 0) {
final PostgreSQLTenant postgreSQLTenant = new PostgreSQLTenant();
postgreSQLTenant.setIdentifier(identifier);
postgreSQLTenant.setDriverClass(System.getProperty(TestEnvironment.POSTGRESQL_DRIVER_CLASS_PROPERTY));
postgreSQLTenant.setDatabaseName(identifier);
postgreSQLTenant.setHost(System.getProperty(TestEnvironment.POSTGRESQL_HOST_PROPERTY));
postgreSQLTenant.setPort(System.getProperty(TestEnvironment.POSTGRESQL_PORT_PROPERTY));
postgreSQLTenant.setUser(System.getProperty(TestEnvironment.POSTGRESQL_USER_PROPERTY));
postgreSQLTenant.setPassword(System.getProperty(TestEnvironment.POSTGRESQL_PASSWORD_PROPERTY));
postgreSQLTenant.insert(connection);
}
}
}
} catch (final SQLException ex) {
ex.printStackTrace();
}
}
}
| 8,250 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/postgresql/PostgreSQLTenant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture.postgresql;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
@SuppressWarnings({"SqlNoDataSourceInspection", "SqlDialectInspection", "WeakerAccess"})
class PostgreSQLTenant {
private static final int INDEX_IDENTIFIER = 1;
private static final int INDEX_DRIVER_CLASS = 2;
private static final int INDEX_DATABASE_NAME = 3;
private static final int INDEX_HOST = 4;
private static final int INDEX_PORT = 5;
private static final int INDEX_USER = 6;
private static final int INDEX_PASSWORD = 7;
private static final String INSERT_STMT = " INSERT INTO " + "tenants" +
" (identifier, driver_class, database_name, host, port, a_user, pwd) " +
" values " +
" (?, ?, ?, ?, ?, ?, ?) ";
private String identifier;
private String driverClass;
private String databaseName;
private String host;
private String port;
private String user;
private String password;
PostgreSQLTenant() {
super();
}
void insert(final Connection connection) throws SQLException {
try (final PreparedStatement insertTenantStatement = connection.prepareStatement(PostgreSQLTenant.INSERT_STMT)) {
insertTenantStatement.setString(INDEX_IDENTIFIER, this.getIdentifier());
insertTenantStatement.setString(INDEX_DRIVER_CLASS, this.getDriverClass());
insertTenantStatement.setString(INDEX_DATABASE_NAME, this.getDatabaseName());
insertTenantStatement.setString(INDEX_HOST, this.getHost());
insertTenantStatement.setString(INDEX_PORT, this.getPort());
insertTenantStatement.setString(INDEX_USER, this.getUser());
insertTenantStatement.setString(INDEX_PASSWORD, this.getPassword());
insertTenantStatement.execute();
}
}
String getIdentifier() {
return identifier;
}
void setIdentifier(String identifier) {
this.identifier = identifier;
}
String getDriverClass() {
return driverClass;
}
void setDriverClass(String driverClass) {
this.driverClass = driverClass;
}
String getDatabaseName() {
return databaseName;
}
void setDatabaseName(String databaseName) {
this.databaseName = databaseName;
}
String getHost() {
return host;
}
void setHost(String host) {
this.host = host;
}
String getPort() {
return port;
}
void setPort(String port) {
this.port = port;
}
String getUser() {
return user;
}
void setUser(String user) {
this.user = user;
}
String getPassword() {
return password;
}
void setPassword(String password) {
this.password = password;
}
}
| 8,251 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/cassandra/CassandraTenant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture.cassandra;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
@SuppressWarnings({"WeakerAccess", "unused"})
@Table(name = "tenants")
public class CassandraTenant {
@PartitionKey
@Column(name = "identifier")
private String identifier;
@Column(name = "cluster_name")
private String clusterName;
@Column(name = "contact_points")
private String contactPoints;
@Column(name = "keyspace_name")
private String keyspaceName;
@Column(name = "replication_type")
private String replicationType;
@Column(name = "replicas")
private String replicas;
@Column(name = "name")
private String name;
@Column(name = "description")
private String description;
public CassandraTenant() {
super();
}
public String getIdentifier() {
return identifier;
}
public void setIdentifier(String identifier) {
this.identifier = identifier;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getContactPoints() {
return contactPoints;
}
public void setContactPoints(String contactPoints) {
this.contactPoints = contactPoints;
}
public String getKeyspaceName() {
return keyspaceName;
}
public void setKeyspaceName(String keyspaceName) {
this.keyspaceName = keyspaceName;
}
public String getReplicationType() {
return replicationType;
}
public void setReplicationType(String replicationType) {
this.replicationType = replicationType;
}
public String getReplicas() {
return replicas;
}
public void setReplicas(String replicas) {
this.replicas = replicas;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CassandraTenant that = (CassandraTenant) o;
return identifier.equals(that.identifier);
}
@Override
public int hashCode() {
return identifier.hashCode();
}
}
| 8,252 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/fixture/cassandra/CassandraInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.fixture.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.schemabuilder.SchemaBuilder;
import com.datastax.driver.mapping.Mapper;
import com.datastax.driver.mapping.MappingManager;
import org.apache.fineract.cn.test.env.TestEnvironment;
import org.apache.fineract.cn.test.fixture.DataStoreTenantInitializer;
import org.apache.fineract.cn.cassandra.util.ContactPointUtils;
import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
import java.util.concurrent.TimeUnit;
@SuppressWarnings({"WeakerAccess", "unused"})
public final class CassandraInitializer extends DataStoreTenantInitializer {
private final boolean useExistingDB;
private Cluster cluster;
public CassandraInitializer() {
this(false);
}
public CassandraInitializer(boolean useExistingDB) {
super();this.useExistingDB = useExistingDB;
}
@Override
public void initialize() throws Exception {
final Cluster.Builder clusterBuilder = new Cluster.Builder()
.withClusterName(System.getProperty(TestEnvironment.CASSANDRA_CLUSTER_NAME_PROPERTY));
ContactPointUtils.process(clusterBuilder, System.getProperty(TestEnvironment.CASSANDRA_CONTACT_POINTS_PROPERTY));
cluster = clusterBuilder.build();
setup();
}
@Override
public void initializeTenant(final String tenantName) {
createKeyspaceTenant(tenantName);
}
@Override
public void finish() {
if (cluster != null)
cluster.close();
if (!useExistingDB) this.tearDown();
}
private void setup() throws Exception {
if (!useExistingDB) {
startEmbeddedCassandra();
createKeyspaceSeshat();
}
}
private void tearDown() {
EmbeddedCassandraServerHelper.cleanEmbeddedCassandra();
}
private void startEmbeddedCassandra() throws Exception {
EmbeddedCassandraServerHelper.startEmbeddedCassandra(TimeUnit.SECONDS.toMillis(30L));
}
private void createKeyspaceSeshat() {
try (final Session session = cluster.connect()) {
// create meta keyspace seshat
session.execute("CREATE KEYSPACE " +
System.getProperty(TestEnvironment.CASSANDRA_META_KEYSPACE_PROPERTY) +
" WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}");
// create needed tenant management table
final String createTenantsTable = SchemaBuilder.createTable(
System.getProperty(TestEnvironment.CASSANDRA_META_KEYSPACE_PROPERTY), "tenants")
.addPartitionKey("identifier", DataType.text())
.addColumn("cluster_name", DataType.text())
.addColumn("contact_points", DataType.text())
.addColumn("keyspace_name", DataType.text())
.addColumn("replication_type", DataType.text())
.addColumn("replicas", DataType.text())
.addColumn("name", DataType.text())
.addColumn("description", DataType.text())
.buildInternal();
session.execute(createTenantsTable);
}
}
private void createKeyspaceTenant(final String identifier) {
try (final Session session = cluster.connect()) {
// create tenant keyspace
session.execute("CREATE KEYSPACE " + identifier
+ " WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}");
// create needed command source table for the new tenant
final String createCommandSourceTable = SchemaBuilder.createTable(identifier, "command_source")
.addPartitionKey("source", DataType.text())
.addPartitionKey("bucket", DataType.text())
.addClusteringColumn("created_on", DataType.timestamp())
.addColumn("command", DataType.text())
.addColumn("processed", DataType.cboolean())
.addColumn("failed", DataType.cboolean())
.addColumn("failure_message", DataType.text())
.buildInternal();
session.execute(createCommandSourceTable);
// insert tenant connection info in management table
session.execute("USE " + System.getProperty(TestEnvironment.CASSANDRA_META_KEYSPACE_PROPERTY));
final MappingManager mappingManager = new MappingManager(session);
final CassandraTenant cassandraTenant = new CassandraTenant();
cassandraTenant.setIdentifier(identifier);
cassandraTenant.setClusterName(System.getProperty(TestEnvironment.CASSANDRA_CLUSTER_NAME_PROPERTY));
cassandraTenant.setContactPoints(System.getProperty(TestEnvironment.CASSANDRA_CONTACT_POINTS_PROPERTY));
cassandraTenant.setKeyspaceName(identifier);
cassandraTenant.setReplicationType("Simple");
cassandraTenant.setReplicas("1");
cassandraTenant.setName(identifier);
final Mapper<CassandraTenant> cassandraTenantMapper = mappingManager.mapper(CassandraTenant.class);
cassandraTenantMapper.save(cassandraTenant);
}
}
}
| 8,253 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/env/ExtraProperties.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.env;
import java.util.Hashtable;
/**
* @author Myrle Krantz
*/
@SuppressWarnings("WeakerAccess")
public class ExtraProperties extends Hashtable<String, String> {
@SuppressWarnings("unused")
public synchronized String setProperty (String key, String value ) {
return put(key, value);
}
}
| 8,254 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/env/TestEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.env;
import org.apache.fineract.cn.lang.AutoTenantContext;
import org.apache.fineract.cn.lang.security.RsaKeyPairFactory;
import org.junit.rules.ExternalResource;
import org.springframework.util.Base64Utils;
import java.security.interfaces.RSAPrivateKey;
import java.security.interfaces.RSAPublicKey;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
@SuppressWarnings({"WeakerAccess", "unused"})
public final class TestEnvironment extends ExternalResource {
public static final String SPRING_APPLICATION_NAME_PROPERTY = "spring.application.name";
public static final String SERVER_PORT_PROPERTY = "server.port";
public static final String SERVER_PORT_DEFAULT = "9090";
public static final String SERVER_CONTEXT_PATH_PROPERTY = "server.contextPath";
public static final String SYSTEM_PUBLIC_KEY_TIMESTAMP_PROPERTY = "system.publicKey.timestamp";
public static final String SYSTEM_PUBLIC_KEY_MODULUS_PROPERTY = "system.publicKey.modulus";
public static final String SYSTEM_PUBLIC_KEY_EXPONENT_PROPERTY = "system.publicKey.exponent";
public static final String SYSTEM_PRIVATE_KEY_MODULUS_PROPERTY = "system.privateKey.modulus";
public static final String SYSTEM_PRIVATE_KEY_EXPONENT_PROPERTY = "system.privateKey.exponent";
public static final String CASSANDRA_CLUSTER_NAME_PROPERTY = "cassandra.clusterName";
public static final String CASSANDRA_CLUSTER_NAME_DEFAULT = "Test Cluster";
public static final String CASSANDRA_CONTACT_POINTS_PROPERTY = "cassandra.contactPoints";
public static final String CASSANDRA_CONTACT_POINTS_DEFAULT = "127.0.0.1:9142";
public static final String CASSANDRA_META_KEYSPACE_PROPERTY = "cassandra.keyspace";
public static final String CASSANDRA_META_KEYSPACE_DEFAULT = "seshat";
public static final String CASSANDRA_CONSISTENCY_LEVEL_READ_PROPERTY = "cassandra.cl.read";
public static final String CASSANDRA_CONSISTENCY_LEVEL_WRITE_PROPERTY = "cassandra.cl.write";
public static final String CASSANDRA_CONSISTENCY_LEVEL_DELETE_PROPERTY = "cassandra.cl.delete";
public static final String CASSANDRA_CONSISTENCY_LEVEL_DEFAULT = "ONE";
public static final String POSTGRESQL_DRIVER_CLASS_PROPERTY = "postgresql.driverClass";
public static final String POSTGRESQL_DRIVER_CLASS_DEFAULT = "org.postgresql.Driver";
public static final String POSTGRESQL_DATABASE_NAME_PROPERTY = "postgresql.database";
public static final String POSTGRESQL_DATABASE_NAME_DEFAULT = "seshat";
public static final String POSTGRESQL_DATABASE_NAME = "postgres";
public static final String POSTGRESQL_HOST_PROPERTY = "postgresql.host";
public static final String POSTGRESQL_HOST_DEFAULT = "localhost";
public static final String POSTGRESQL_PORT_PROPERTY = "postgresql.port";
public static final String POSTGRESQL_PORT_DEFAULT = "5432";
public static final String POSTGRESQL_USER_PROPERTY = "postgresql.user";
public static final String POSTGRESQL_USER_DEFAULT = "postgres";
public static final String POSTGRESQL_PASSWORD_PROPERTY = "postgresql.password";
public static final String POSTGRESQL_PASSWORD_DEFAULT = "postgres";
public static final String SPRING_CLOUD_DISCOVERY_ENABLED_PROPERTY = "spring.cloud.discovery.enabled";
public static final String SPRING_CLOUD_DISCOVERY_ENABLED_DEFAULT = "false";
public static final String SPRING_CLOUD_CONFIG_ENABLED_PROPERTY = "spring.cloud.config.enabled";
public static final String SPRING_CLOUD_CONFIG_ENABLED_DEFAULT = "false";
public static final String FLYWAY_ENABLED_PROPERTY = "flyway.enabled";
public static final String FLYWAY_ENABLED_DEFAULT = "false";
//Remove circuit breaker. We don't want a fallback when a call fails in a component test.
public static final String HYSTRIX_ENABLED_PROPERTY = "feign.hystrix.enabled";
public static final String HYSTRIX_ENABLED_DEFAULT = "false";
public static final String RIBBON_USES_EUREKA_PROPERTY = "ribbon.eureka.enabled";
public static final String RIBBON_USES_EUREKA_DEFAULT = "false";
public static final String RIBBON_LIST_OF_SERVERS_PROPERTY = "ribbon.listOfServers";
public static final String RIBBON_SERVER_DEFAULT = "localhost";
public static AutoTenantContext createRandomTenantContext()
{
final String randomTenantName = getRandomTenantName();
return new AutoTenantContext(randomTenantName);
}
public static String getRandomTenantName() {
return "cleopatra" + Math.abs(new Random().nextInt());
}
public static String encodePassword(final String password) {
return Base64Utils.encodeToString(password.getBytes());
}
Properties properties;
private RsaKeyPairFactory.KeyPairHolder keyPairHolder;
private int uniquenessSuffix = 0;
public TestEnvironment(final String springApplicationName) {
super();
this.initialize(springApplicationName);
}
@Override
protected void before() {
// initialize test environment and populate default properties
populate();
}
public TestEnvironment addProperties(final ExtraProperties properties) {
properties.entrySet().forEach(x -> setProperty(x.getKey(), x.getValue()));
return this;
}
public String generateUniqueIdentifier(final String prefix) {
return generateUniqueIdentifier(prefix, 1);
}
//prefix followed by a positive number.
public String generateUniqueIdentifier(final String prefix, final int minimumDigitCount) {
uniquenessSuffix++;
final String format = String.format("%%0%dd", minimumDigitCount);
return prefix + String.format(format, uniquenessSuffix);
}
public void setContextPath(final String contextPath) {
this.properties.setProperty(SERVER_CONTEXT_PATH_PROPERTY, contextPath);
}
public String serverURI() {
return "http://localhost:" +
this.properties.getProperty(TestEnvironment.SERVER_PORT_PROPERTY) +
this.properties.getProperty(TestEnvironment.SERVER_CONTEXT_PATH_PROPERTY);
}
public String getSystemKeyTimestamp() {
return keyPairHolder.getTimestamp();
}
public RSAPublicKey getSystemPublicKey() {
return keyPairHolder.publicKey();
}
public RSAPrivateKey getSystemPrivateKey() {
return keyPairHolder.privateKey();
}
public void setKeyPair(final String timestamp, final RSAPublicKey publicKey, final RSAPrivateKey privateKey)
{
this.keyPairHolder = new RsaKeyPairFactory.KeyPairHolder(timestamp, publicKey, privateKey);
this.properties.setProperty(SYSTEM_PUBLIC_KEY_TIMESTAMP_PROPERTY, getSystemKeyTimestamp());
this.properties.setProperty(SYSTEM_PUBLIC_KEY_MODULUS_PROPERTY, publicKey.getModulus().toString());
this.properties.setProperty(SYSTEM_PUBLIC_KEY_EXPONENT_PROPERTY, publicKey.getPublicExponent().toString());
}
public void addSystemPrivateKeyToProperties()
{
setProperty(SYSTEM_PUBLIC_KEY_TIMESTAMP_PROPERTY, getSystemKeyTimestamp());
setProperty(SYSTEM_PRIVATE_KEY_MODULUS_PROPERTY, getSystemPrivateKey().getModulus().toString());
setProperty(SYSTEM_PRIVATE_KEY_EXPONENT_PROPERTY, getSystemPrivateKey().getPrivateExponent().toString());
}
public void setProperty(final String key, final String value) {
this.properties.setProperty(key, value);
}
public String getProperty(final String key) {
return this.properties.getProperty(key);
}
public void populate() {
System.getProperties().putAll(this.properties);
}
public void populateProcessEnvironment(final ProcessBuilder processBuilder) {
properties.entrySet().forEach(entry -> populateVariable(processBuilder, entry));
}
private String populateVariable(final ProcessBuilder processBuilder, final Map.Entry<Object, Object> entry) {
return processBuilder.environment().put(entry.getKey().toString(), entry.getValue().toString());
}
private void initialize(final String springApplicationName) {
this.properties = new Properties();
this.properties.setProperty(SPRING_APPLICATION_NAME_PROPERTY, springApplicationName);
this.properties.setProperty(SERVER_PORT_PROPERTY, SERVER_PORT_DEFAULT);
this.properties.setProperty(SERVER_CONTEXT_PATH_PROPERTY, "/" + springApplicationName.replace("-", "/"));
this.properties.setProperty(CASSANDRA_CLUSTER_NAME_PROPERTY, CASSANDRA_CLUSTER_NAME_DEFAULT);
this.properties.setProperty(CASSANDRA_CONTACT_POINTS_PROPERTY, CASSANDRA_CONTACT_POINTS_DEFAULT);
this.properties.setProperty(CASSANDRA_META_KEYSPACE_PROPERTY, CASSANDRA_META_KEYSPACE_DEFAULT);
this.properties.setProperty(CASSANDRA_CONSISTENCY_LEVEL_READ_PROPERTY, CASSANDRA_CONSISTENCY_LEVEL_DEFAULT);
this.properties.setProperty(CASSANDRA_CONSISTENCY_LEVEL_WRITE_PROPERTY, CASSANDRA_CONSISTENCY_LEVEL_DEFAULT);
this.properties.setProperty(CASSANDRA_CONSISTENCY_LEVEL_DELETE_PROPERTY, CASSANDRA_CONSISTENCY_LEVEL_DEFAULT);
this.properties.setProperty(POSTGRESQL_DRIVER_CLASS_PROPERTY, POSTGRESQL_DRIVER_CLASS_DEFAULT);
this.properties.setProperty(POSTGRESQL_DATABASE_NAME_PROPERTY, POSTGRESQL_DATABASE_NAME_DEFAULT);
this.properties.setProperty(POSTGRESQL_HOST_PROPERTY, POSTGRESQL_HOST_DEFAULT);
this.properties.setProperty(POSTGRESQL_PORT_PROPERTY, POSTGRESQL_PORT_DEFAULT);
this.properties.setProperty(POSTGRESQL_USER_PROPERTY, POSTGRESQL_USER_DEFAULT);
this.properties.setProperty(POSTGRESQL_PASSWORD_PROPERTY, POSTGRESQL_PASSWORD_DEFAULT);
this.properties.setProperty(SPRING_CLOUD_DISCOVERY_ENABLED_PROPERTY, SPRING_CLOUD_DISCOVERY_ENABLED_DEFAULT);
this.properties.setProperty(SPRING_CLOUD_CONFIG_ENABLED_PROPERTY, SPRING_CLOUD_CONFIG_ENABLED_DEFAULT);
this.properties.setProperty(FLYWAY_ENABLED_PROPERTY, FLYWAY_ENABLED_DEFAULT);
this.properties.setProperty(HYSTRIX_ENABLED_PROPERTY, HYSTRIX_ENABLED_DEFAULT);
this.properties.setProperty(RIBBON_USES_EUREKA_PROPERTY, RIBBON_USES_EUREKA_DEFAULT);
this.properties.setProperty(RIBBON_LIST_OF_SERVERS_PROPERTY, RIBBON_SERVER_DEFAULT + ":" + SERVER_PORT_DEFAULT);
this.keyPairHolder = RsaKeyPairFactory.createKeyPair();
this.properties.setProperty(SYSTEM_PUBLIC_KEY_TIMESTAMP_PROPERTY, this.keyPairHolder.getTimestamp());
this.properties.setProperty(SYSTEM_PUBLIC_KEY_MODULUS_PROPERTY, this.keyPairHolder.publicKey().getModulus().toString());
this.properties.setProperty(SYSTEM_PUBLIC_KEY_EXPONENT_PROPERTY, this.keyPairHolder.publicKey().getPublicExponent().toString());
}
}
| 8,255 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/domain/DateStampChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import org.apache.fineract.cn.lang.DateConverter;
import org.junit.Assert;
import java.time.Clock;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
/**
* Support class for testing that the correct time stamp is returned
*
* @author Myrle Krantz
*/
@SuppressWarnings("WeakerAccess")
public class DateStampChecker {
private final LocalDate expectedDateStamp;
private final int maximumDelta;
public static DateStampChecker inTheFuture(final int offset)
{
return new DateStampChecker(LocalDate.now(Clock.systemUTC()).plusDays(offset), 0);
}
private DateStampChecker(final LocalDate expectedDateStamp, final int maximumDelta) {
this.expectedDateStamp = expectedDateStamp;
this.maximumDelta = maximumDelta;
}
public void assertCorrect(final String dateStamp)
{
Assert.assertTrue("Delta from expected should have been less than " +
maximumDelta + ". Timestamp string was " + dateStamp + ".",
isCorrect(dateStamp));
}
public boolean isCorrect(final String dateStamp) {
final LocalDate parsedDateStamp = DateConverter.dateFromIsoString(dateStamp);
final long deltaFromExpected = parsedDateStamp.until(expectedDateStamp, ChronoUnit.DAYS);
return deltaFromExpected <= maximumDelta;
}
}
| 8,256 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/domain/ValidationTestCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import javax.validation.ConstraintViolation;
import javax.validation.Validation;
import javax.validation.Validator;
import javax.validation.ValidatorFactory;
import java.util.Set;
import java.util.function.Consumer;
/**
* Support class for testing correct validation of API domain objects.
*
* @author Myrle Krantz
*
* @param <T> the type of domain object being tested.
*/
@SuppressWarnings("WeakerAccess")
public class ValidationTestCase<T> {
private final String description;
private boolean valid = true;
private Consumer<T> adjustment = x -> {};
public ValidationTestCase(final String description)
{
this.description = description;
}
public ValidationTestCase adjustment(final Consumer<T> adjustment) {
this.adjustment = adjustment;
return this;
}
public ValidationTestCase valid(boolean newVal) {
valid = newVal;
return this;
}
public Consumer<T> getAdjustment() {
return adjustment;
}
public boolean check(T testSubject) {
final ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
final Validator validator = factory.getValidator();
final Set<ConstraintViolation<T>> errors = validator.validate(testSubject);
if (valid)
return errors.size() == 0;
else
return errors.size() != 0;
}
@Override
public String toString() {
return "TestCase{" +
"description='" + description + '\'' +
'}';
}
public void applyAdjustment(T testSubject) {
adjustment.accept(testSubject);
}
}
| 8,257 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/domain/TimeStampChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import org.apache.fineract.cn.lang.DateConverter;
import org.junit.Assert;
import java.time.Clock;
import java.time.Duration;
import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
/**
* Support class for testing that the correct time stamp is returned
*
* @author Myrle Krantz
*/
@SuppressWarnings("WeakerAccess")
public class TimeStampChecker {
private static final int DEFAULT_MAXIMUM_DELTA = 2;
private final LocalDateTime expectedTimeStamp;
private final Duration maximumDelta;
public static TimeStampChecker roughlyNow()
{
return new TimeStampChecker(LocalDateTime.now(Clock.systemUTC()), Duration.ofSeconds(DEFAULT_MAXIMUM_DELTA));
}
public static TimeStampChecker inTheFuture(final Duration offset)
{
return inTheFutureWithWiggleRoom(offset, Duration.ofSeconds(DEFAULT_MAXIMUM_DELTA));
}
public static TimeStampChecker inTheFutureWithWiggleRoom(final Duration offset, final Duration maximumDelta)
{
return new TimeStampChecker(LocalDateTime.now(Clock.systemUTC()).plus(offset), maximumDelta);
}
public static TimeStampChecker allowSomeWiggleRoom(final Duration maximumDelta)
{
return new TimeStampChecker(LocalDateTime.now(Clock.systemUTC()), maximumDelta);
}
private TimeStampChecker(final LocalDateTime expectedTimeStamp, final Duration maximumDelta) {
this.expectedTimeStamp = expectedTimeStamp;
this.maximumDelta = maximumDelta;
}
public void assertCorrect(final String timeStamp)
{
Assert.assertTrue("Delta from expected should have been less than " +
maximumDelta + ". Timestamp string was " + timeStamp + ".",
isCorrect(timeStamp));
}
public boolean isCorrect(final String timeStamp) {
final LocalDateTime parsedTimeStamp = DateConverter.fromIsoString(timeStamp);
return isCorrect(parsedTimeStamp);
}
public void assertCorrect(final LocalDateTime localDateTime)
{
Assert.assertTrue("Delta from expected should have been less than " +
maximumDelta + ". LocalDateTime was " + localDateTime + ".",
isCorrect(localDateTime));
}
public boolean isCorrect(final LocalDateTime localDateTime) {
final Duration deltaFromExpected = Duration.ofNanos(Math.abs(
localDateTime.until(expectedTimeStamp, ChronoUnit.NANOS)));
return deltaFromExpected.compareTo(maximumDelta) < 0;
}
} | 8,258 |
0 | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test | Create_ds/fineract-cn-test/src/main/java/org/apache/fineract/cn/test/domain/ValidationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.cn.test.domain;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
/**
* Support class for testing correct validation of API domain objects.
*
* @author Myrle Krantz
*
* @param <T> the type of domain object being tested.
*/
@SuppressWarnings("unused")
@RunWith(Parameterized.class)
public abstract class ValidationTest<T> {
private final ValidationTestCase<T> testCase;
public ValidationTest(final ValidationTestCase<T> testCase)
{
this.testCase = testCase;
}
@Test()
public void test(){
final T testSubject = createValidTestSubject();
testCase.applyAdjustment(testSubject);
Assert.assertTrue(testCase.toString(), testCase.check(testSubject));
}
@SuppressWarnings("WeakerAccess")
abstract protected T createValidTestSubject();
}
| 8,259 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws/testmaven/AppTest.java | package com.amazonaws.testmaven;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class AppTest
{
@Test
public void test1()
{
assertTrue( 1 + 1 == 2 );
}
@Test
public void test2()
{
assertTrue( 2 + 2 == 4 );
}
@Test
public void test3()
{
assertTrue( 3 + 3 == 6 );
}
}
| 8,260 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/test/java/com/amazonaws/testmaven2/AppTest2.java | package com.amazonaws.testmaven2;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class AppTest2
{
@Test
public void test1()
{
assertTrue( 1 + 1 == 2 );
}
@Test
public void test2()
{
assertTrue( 2 + 2 == 4 );
}
@Test
public void test3()
{
assertTrue( 3 + 3 == 6 );
}
}
| 8,261 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/main/java/com/amazonaws | Create_ds/universal-test-runner/tests-integ/test-projects/maven/src/main/java/com/amazonaws/testmaven/App.java | package com.amazonaws.testmaven;
/**
* Hello world!
*
*/
public class App
{
public static void main( String[] args )
{
System.out.println( "Hello World!" );
}
}
| 8,262 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/test/java | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/test/java/gradle/AppTest.java | /*
* This Java source file was generated by the Gradle 'init' task.
*/
package gradle;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
class AppTest {
@Test
void test1() {
assertTrue(1 + 1 == 2);
}
@Test
void test2() {
assertTrue(2 + 2 == 4);
}
@Test
void test3() {
assertTrue(3 + 3 == 6);
}
}
| 8,263 |
0 | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/main/java | Create_ds/universal-test-runner/tests-integ/test-projects/gradle/app/src/main/java/gradle/App.java | /*
* This Java source file was generated by the Gradle 'init' task.
*/
package gradle;
public class App {
public String getGreeting() {
return "Hello World!";
}
public static void main(String[] args) {
System.out.println(new App().getGreeting());
}
}
| 8,264 |
0 | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatIT.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.core.client.mapreduce;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.accumulo.bsp.AccumuloInputFormat;
import org.apache.accumulo.bsp.MapreduceWrapper;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hama.bsp.BSP;
import org.apache.hama.bsp.BSPJob;
import org.apache.hama.bsp.BSPPeer;
import org.apache.hama.bsp.InputSplit;
import org.apache.hama.bsp.sync.SyncException;
import org.apache.hama.util.KeyValuePair;
import org.junit.Test;
/**
*
*/
public class AccumuloInputFormatIT {
static class InputFormatTestBSP<M extends Writable> extends BSP<Key,Value,Key,Value,M> {
Key key = null;
int count = 0;
@Override
public void bsp(BSPPeer<Key,Value,Key,Value,M> peer) throws IOException, SyncException, InterruptedException {
// this method reads the next key value record from file
KeyValuePair<Key,Value> pair;
while ((pair = peer.readNext()) != null) {
if (key != null) {
assertEquals(key.getRow().toString(), new String(pair.getValue().get()));
}
assertEquals(pair.getKey().getRow(), new Text(String.format("%09x", count + 1)));
assertEquals(new String(pair.getValue().get()), String.format("%09x", count));
count++;
key = new Key(pair.getKey());
}
peer.sync();
assertEquals(100, count);
}
}
@Test
public void testBSPInputFormat() throws Exception {
MockInstance mockInstance = new MockInstance("testmapinstance");
Connector c = mockInstance.getConnector("root", new byte[] {});
if (c.tableOperations().exists("testtable"))
c.tableOperations().delete("testtable");
c.tableOperations().create("testtable");
BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
for (int i = 0; i < 100; i++) {
Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
bw.addMutation(m);
}
bw.close();
BSPJob bspJob = new BSPJob();
Job job = MapreduceWrapper.wrappedJob(bspJob);
bspJob.setInputFormat(AccumuloInputFormat.class);
bspJob.setBspClass(InputFormatTestBSP.class);
bspJob.setInputPath(new Path("test"));
AccumuloInputFormat.setConnectorInfo(job, "root", "".getBytes(Charset.forName("UTF-8")));
AccumuloInputFormat.setInputTableName(job, "testtable");
AccumuloInputFormat.setMockInstance(job, "testmapinstance");
AccumuloInputFormat input = new AccumuloInputFormat();
InputSplit[] splits = input.getSplits(bspJob, 0);
assertEquals(splits.length, 1);
bspJob.setJar("target/integration-tests.jar");
bspJob.setOutputPath(new Path("target/bsp-inputformat-test"));
if (!bspJob.waitForCompletion(false))
fail("Job not finished successfully");
}
}
| 8,265 |
0 | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatIT.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.core.client.mapreduce;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.bsp.AccumuloInputFormat;
import org.apache.accumulo.bsp.AccumuloOutputFormat;
import org.apache.accumulo.bsp.MapreduceWrapper;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hama.HamaConfiguration;
import org.apache.hama.bsp.BSP;
import org.apache.hama.bsp.BSPJob;
import org.apache.hama.bsp.BSPPeer;
import org.apache.hama.bsp.InputSplit;
import org.apache.hama.bsp.sync.SyncException;
import org.apache.hama.util.KeyValuePair;
import org.junit.Test;
/**
*
*/
public class AccumuloOutputFormatIT {
static class OutputFormatTestBSP<M extends Writable> extends BSP<Key,Value,Text,Mutation,M> {
Key key = null;
int count = 0;
@Override
public void bsp(BSPPeer<Key,Value,Text,Mutation,M> peer) throws IOException, SyncException, InterruptedException {
// this method reads the next key value record from file
KeyValuePair<Key,Value> pair;
while ((pair = peer.readNext()) != null) {
if (key != null) {
assertEquals(key.getRow().toString(), new String(pair.getValue().get()));
}
assertEquals(pair.getKey().getRow(), new Text(String.format("%09x", count + 1)));
assertEquals(new String(pair.getValue().get()), String.format("%09x", count));
count++;
key = new Key(pair.getKey());
}
peer.sync();
}
@Override
public void cleanup(BSPPeer<Key,Value,Text,Mutation,M> peer) throws IOException {
Mutation m = new Mutation("total");
m.put("", "", Integer.toString(count));
peer.write(new Text("testtable2"), m);
}
}
@Test
public void testBSPOutputFormat() throws Exception {
MockInstance mockInstance = new MockInstance("testmrinstance");
Connector c = mockInstance.getConnector("root", new byte[] {});
if (c.tableOperations().exists("testtable1"))
c.tableOperations().delete("testtable1");
if (c.tableOperations().exists("testtable2"))
c.tableOperations().delete("testtable2");
c.tableOperations().create("testtable1");
c.tableOperations().create("testtable2");
BatchWriter bw = c.createBatchWriter("testtable1", new BatchWriterConfig());
for (int i = 0; i < 100; i++) {
Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
bw.addMutation(m);
}
bw.close();
Configuration conf = new Configuration();
BSPJob bspJob = new BSPJob(new HamaConfiguration(conf));
bspJob.setJobName("Test Input Output");
bspJob.setBspClass(OutputFormatTestBSP.class);
bspJob.setInputFormat(AccumuloInputFormat.class);
bspJob.setInputPath(new Path("test"));
bspJob.setOutputFormat(AccumuloOutputFormat.class);
bspJob.setJar("target/integration-tests.jar");
bspJob.setOutputPath(new Path("target/bsp-outputformat-test"));
bspJob.setOutputKeyClass(Text.class);
bspJob.setOutputValueClass(Mutation.class);
Job job = MapreduceWrapper.wrappedJob(bspJob);
AccumuloInputFormat.setConnectorInfo(job, "root", "".getBytes(Charset.forName("UTF-8")));
AccumuloInputFormat.setInputTableName(job, "testtable1");
AccumuloInputFormat.setMockInstance(job, "testmrinstance");
AccumuloOutputFormat.setConnectorInfo(job, "root", "".getBytes(Charset.forName("UTF-8")));
AccumuloOutputFormat.setDefaultTableName(job, "testtable2");
AccumuloOutputFormat.setMockInstance(job, "testmrinstance");
AccumuloInputFormat input = new AccumuloInputFormat();
InputSplit[] splits = input.getSplits(bspJob, 0);
assertEquals(splits.length, 1);
if (!bspJob.waitForCompletion(false))
fail("Job not finished successfully");
Scanner scanner = c.createScanner("testtable2", new Authorizations());
Iterator<Entry<Key,Value>> iter = scanner.iterator();
assertTrue(iter.hasNext());
Entry<Key,Value> entry = iter.next();
assertEquals("total", entry.getKey().getRow().toString());
assertEquals(100, Integer.parseInt(new String(entry.getValue().get())));
assertFalse(iter.hasNext());
}
}
| 8,266 |
0 | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client | Create_ds/accumulo-bsp/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.core.client.mapreduce;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.bsp.AccumuloInputFormat;
import org.apache.accumulo.bsp.MapreduceWrapper;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.iterators.user.RegExFilter;
import org.apache.accumulo.core.iterators.user.WholeRowIterator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hama.bsp.BSPJob;
import org.junit.Test;
/**
*
*/
public class AccumuloInputFormatTest {
@Test
public void testSetIterator() throws IOException {
BSPJob bspJob = new BSPJob();
Job job = MapreduceWrapper.wrappedJob(bspJob);
AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
TaskAttemptContext context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
List<IteratorSetting> iterators = AccumuloInputFormat.getIterators(context);
assertEquals(1, iterators.size());
IteratorSetting iter = iterators.get(0);
assertEquals(1, iter.getPriority());
assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", iter.getIteratorClass());
assertEquals("WholeRow", iter.getName());
assertEquals(0, iter.getOptions().size());
}
@Test
public void testAddIterator() throws IOException {
BSPJob bspJob = new BSPJob();
Job job = MapreduceWrapper.wrappedJob(bspJob);
AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
iter.addOption("v1", "1");
iter.addOption("junk", "\0omg:!\\xyzzy");
AccumuloInputFormat.addIterator(job, iter);
TaskAttemptContext context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
List<IteratorSetting> list = AccumuloInputFormat.getIterators(context);
// Check the list size
assertTrue(list.size() == 3);
// Walk the list and make sure our settings are correct
IteratorSetting setting = list.get(0);
assertEquals(1, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.user.WholeRowIterator", setting.getIteratorClass());
assertEquals("WholeRow", setting.getName());
setting = list.get(1);
assertEquals(2, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
assertEquals("Versions", setting.getName());
setting = list.get(2);
assertEquals(3, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
assertEquals("Count", setting.getName());
Map<String,String> iteratorOptions = setting.getOptions();
assertEquals(2, iteratorOptions.size());
assertTrue(iteratorOptions.containsKey("v1"));
assertEquals("1", iteratorOptions.get("v1"));
assertTrue(iteratorOptions.containsKey("junk"));
assertEquals("\0omg:!\\xyzzy", iteratorOptions.get("junk"));
}
@Test
public void testIteratorOptionEncoding() throws IOException {
BSPJob bspJob = new BSPJob();
String key = "colon:delimited:key";
String value = "comma,delimited,value";
Job job = MapreduceWrapper.wrappedJob(bspJob);
IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
someSetting.addOption(key, value);
AccumuloInputFormat.addIterator(job, someSetting);
TaskAttemptContext context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
List<IteratorSetting> iters = AccumuloInputFormat.getIterators(context);
assertEquals(1, iters.size());
assertEquals("iterator", iters.get(0).getName());
assertEquals("Iterator.class", iters.get(0).getIteratorClass());
assertEquals(1, iters.get(0).getPriority());
Map<String,String> opts = iters.get(0).getOptions();
assertEquals(1, opts.size());
assertTrue(opts.containsKey(key));
assertEquals(value, opts.get(key));
someSetting.addOption(key + "2", value);
someSetting.setPriority(2);
someSetting.setName("it2");
AccumuloInputFormat.addIterator(job, someSetting);
context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
iters = AccumuloInputFormat.getIterators(context);
assertEquals(2, iters.size());
assertEquals("iterator", iters.get(0).getName());
assertEquals("Iterator.class", iters.get(0).getIteratorClass());
assertEquals(1, iters.get(0).getPriority());
opts = iters.get(0).getOptions();
assertEquals(1, opts.size());
assertTrue(opts.containsKey(key));
assertEquals(value, opts.get(key));
assertEquals("it2", iters.get(1).getName());
assertEquals("Iterator.class", iters.get(1).getIteratorClass());
assertEquals(2, iters.get(1).getPriority());
opts = iters.get(1).getOptions();
assertEquals(2, opts.size());
assertTrue(opts.containsKey(key));
assertEquals(value, opts.get(key));
assertTrue(opts.containsKey(key + "2"));
assertEquals(value, opts.get(key + "2"));
}
@Test
public void testGetIteratorSettings() throws IOException {
BSPJob bspJob = new BSPJob();
Job job = MapreduceWrapper.wrappedJob(bspJob);
AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
TaskAttemptContext context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
List<IteratorSetting> list = AccumuloInputFormat.getIterators(context);
// Check the list size
assertEquals(3, list.size());
// Walk the list and make sure our settings are correct
IteratorSetting setting = list.get(0);
assertEquals(1, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", setting.getIteratorClass());
assertEquals("WholeRow", setting.getName());
setting = list.get(1);
assertEquals(2, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
assertEquals("Versions", setting.getName());
setting = list.get(2);
assertEquals(3, setting.getPriority());
assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
assertEquals("Count", setting.getName());
}
@Test
public void testSetRegex() throws IOException {
BSPJob bspJob = new BSPJob();
Job job = MapreduceWrapper.wrappedJob(bspJob);
String regex = ">\"*%<>\'\\";
IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
RegExFilter.setRegexs(is, regex, null, null, null, false);
AccumuloInputFormat.addIterator(job, is);
TaskAttemptContext context = MapreduceWrapper.wrappedTaskAttemptContext(bspJob);
assertEquals(regex, AccumuloInputFormat.getIterators(context).get(0).getName());
}
}
| 8,267 |
0 | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo/bsp/MapreduceWrapper.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.bsp;
import java.io.IOException;
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hama.bsp.BSPJob;
/**
* <p>
* MapreduceWrapper class. Provides a wrapper to wrap {@link BSPJob} into the appropriate Hadoop type required by {@link AccumuloInputFormat} and
* {@link AccumuloOutputFormat} static configurator methods. Useful for reusing code to set the job's configuration and not using the expected Hadoop API.
* </p>
*/
public class MapreduceWrapper {
/**
* Wraps a {@link BSPJob} for reading its {@link Configuration} within Accumulo MapReduce classes' protected static configuration getters.
*
* @param job
* the {@link BSPJob} instance to be wrapped
* @return an instance of {@link TaskAttemptContext} whose {@link Configuration} is the same as the job
*/
public static TaskAttemptContext wrappedTaskAttemptContext(final BSPJob job) {
return new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID());
}
/**
* Wraps a {@link BSPJob} for writing its {@link Configuration} within Accumulo MapReduce classes' public static configuration setters.
*
* @param job
* the {@link BSPJob} instance to be wrapped
* @return an instance of {@link Job} that exposes {@link BSPJob#getConfiguration()} via {@link Job#getConfiguration()}; no other methods of {@link Job} are
* implemented, so this object cannot be used for anything other than editing the {@link BSPJob}'s {@link Configuration}
*/
public static Job wrappedJob(BSPJob job) {
final BSPJob bspJob = job;
try {
return new Job() {
@Override
public Configuration getConfiguration() {
return bspJob.getConfiguration();
}
};
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 8,268 |
0 | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo/bsp/AccumuloInputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.bsp;
import java.io.IOException;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.hama.bsp.BSPJob;
import org.apache.hama.bsp.InputFormat;
import org.apache.hama.bsp.InputSplit;
import org.apache.hama.bsp.RecordReader;
/**
* <p>
* AccumuloInputFormat class. To be used with Hama BSP.
* </p>
*
* @see BSPJob#setInputFormat(Class)
*/
public class AccumuloInputFormat extends org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat implements InputFormat<Key,Value> {
public class BSPRecordReaderBase extends RecordReaderBase<Key,Value> implements RecordReader<Key,Value> {
public BSPRecordReaderBase(InputSplit split, BSPJob job) throws IOException {
this.initialize((BSPRangeInputSplit) split, MapreduceWrapper.wrappedTaskAttemptContext(job));
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return next(currentKey, currentValue);
}
@Override
public Key createKey() {
if (currentKey == null) {
return new Key();
} else {
return currentKey;
}
}
@Override
public Value createValue() {
if (currentValue == null) {
return new Value(new byte[0]);
} else {
return currentValue;
}
}
@Override
public long getPos() throws IOException {
return 0;
}
@Override
public boolean next(Key k, Value v) throws IOException {
if (scannerIterator.hasNext()) {
++numKeysRead;
Entry<Key,Value> entry = scannerIterator.next();
currentKey = entry.getKey();
currentValue = entry.getValue();
k.set(currentKey);
v.set(currentValue.get());
return true;
}
return false;
}
}
public static class BSPRangeInputSplit extends RangeInputSplit implements InputSplit {
public BSPRangeInputSplit() {
super();
}
public BSPRangeInputSplit(RangeInputSplit split) throws IOException {
super(split);
}
}
@Override
public RecordReader<Key,Value> getRecordReader(InputSplit split, BSPJob job) throws IOException {
return new BSPRecordReaderBase(split, job);
}
@Override
public InputSplit[] getSplits(BSPJob job, int arg1) throws IOException {
List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(MapreduceWrapper.wrappedTaskAttemptContext(job));
InputSplit[] bspSplits = new BSPRangeInputSplit[splits.size()];
for (int i = 0; i < splits.size(); i++) {
bspSplits[i] = new BSPRangeInputSplit((RangeInputSplit) splits.get(i));
}
return bspSplits;
}
}
| 8,269 |
0 | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo | Create_ds/accumulo-bsp/src/main/java/org/apache/accumulo/bsp/AccumuloOutputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.bsp;
import java.io.IOException;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hama.bsp.BSPJob;
import org.apache.hama.bsp.OutputFormat;
import org.apache.hama.bsp.RecordWriter;
/**
* <p>
* AccumuloOutputFormat class. To be used with Hama BSP.
* </p>
*
* @see BSPJob#setOutputFormat(Class)
*/
public class AccumuloOutputFormat extends org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
protected static class BSPRecordWriter extends AccumuloRecordWriter implements RecordWriter<Text,Mutation> {
private BSPJob job;
BSPRecordWriter(BSPJob job) throws AccumuloException, AccumuloSecurityException, IOException {
super(MapreduceWrapper.wrappedTaskAttemptContext(job));
this.job = job;
}
@Override
public void close() throws IOException {
try {
close(MapreduceWrapper.wrappedTaskAttemptContext(job));
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
@Override
public void checkOutputSpecs(FileSystem fs, BSPJob job) throws IOException {
checkOutputSpecs(MapreduceWrapper.wrappedTaskAttemptContext(job));
}
@Override
public RecordWriter<Text,Mutation> getRecordWriter(FileSystem fs, BSPJob job, String name) throws IOException {
try {
return new BSPRecordWriter(job);
} catch (Exception e) {
throw new IOException(e);
}
}
}
| 8,270 |
0 | Create_ds/react-native-square-reader-sdk/reader-sdk-react-native-quickstart/android/app/src/debug/java/com | Create_ds/react-native-square-reader-sdk/reader-sdk-react-native-quickstart/android/app/src/debug/java/com/rnreadersdksample/ReactNativeFlipper.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.rnreadersdksample;
import android.content.Context;
import com.facebook.flipper.android.AndroidFlipperClient;
import com.facebook.flipper.android.utils.FlipperUtils;
import com.facebook.flipper.core.FlipperClient;
import com.facebook.flipper.plugins.crashreporter.CrashReporterPlugin;
import com.facebook.flipper.plugins.databases.DatabasesFlipperPlugin;
import com.facebook.flipper.plugins.fresco.FrescoFlipperPlugin;
import com.facebook.flipper.plugins.inspector.DescriptorMapping;
import com.facebook.flipper.plugins.inspector.InspectorFlipperPlugin;
import com.facebook.flipper.plugins.network.FlipperOkhttpInterceptor;
import com.facebook.flipper.plugins.network.NetworkFlipperPlugin;
import com.facebook.flipper.plugins.react.ReactFlipperPlugin;
import com.facebook.flipper.plugins.sharedpreferences.SharedPreferencesFlipperPlugin;
import com.facebook.react.ReactInstanceManager;
import com.facebook.react.bridge.ReactContext;
import com.facebook.react.modules.network.NetworkingModule;
import okhttp3.OkHttpClient;
public class ReactNativeFlipper {
public static void initializeFlipper(Context context, ReactInstanceManager reactInstanceManager) {
if (FlipperUtils.shouldEnableFlipper(context)) {
final FlipperClient client = AndroidFlipperClient.getInstance(context);
client.addPlugin(new InspectorFlipperPlugin(context, DescriptorMapping.withDefaults()));
client.addPlugin(new ReactFlipperPlugin());
client.addPlugin(new DatabasesFlipperPlugin(context));
client.addPlugin(new SharedPreferencesFlipperPlugin(context));
client.addPlugin(CrashReporterPlugin.getInstance());
NetworkFlipperPlugin networkFlipperPlugin = new NetworkFlipperPlugin();
NetworkingModule.setCustomClientBuilder(
new NetworkingModule.CustomClientBuilder() {
@Override
public void apply(OkHttpClient.Builder builder) {
builder.addNetworkInterceptor(new FlipperOkhttpInterceptor(networkFlipperPlugin));
}
});
client.addPlugin(networkFlipperPlugin);
client.start();
// Fresco Plugin needs to ensure that ImagePipelineFactory is initialized
// Hence we run if after all native modules have been initialized
ReactContext reactContext = reactInstanceManager.getCurrentReactContext();
if (reactContext == null) {
reactInstanceManager.addReactInstanceEventListener(
new ReactInstanceManager.ReactInstanceEventListener() {
@Override
public void onReactContextInitialized(ReactContext reactContext) {
reactInstanceManager.removeReactInstanceEventListener(this);
reactContext.runOnNativeModulesQueueThread(
new Runnable() {
@Override
public void run() {
client.addPlugin(new FrescoFlipperPlugin());
}
});
}
});
} else {
client.addPlugin(new FrescoFlipperPlugin());
}
}
}
}
| 8,271 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/ReaderSdkPackage.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.facebook.react.ReactPackage;
import com.facebook.react.bridge.NativeModule;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.uimanager.ViewManager;
public class ReaderSdkPackage implements ReactPackage {
@Override
public List<NativeModule> createNativeModules(ReactApplicationContext reactContext) {
List<NativeModule> modules = new ArrayList<>();
modules.add(new AuthorizationModule(reactContext));
modules.add(new CheckoutModule(reactContext));
modules.add(new ReaderSettingsModule(reactContext));
modules.add(new StoreCustomerCardModule(reactContext));
return modules;
}
@Override
public List<ViewManager> createViewManagers(ReactApplicationContext reactContext) {
return Collections.emptyList();
}
}
| 8,272 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/CheckoutModule.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react;
import android.app.Activity;
import android.os.Handler;
import android.os.Looper;
import com.facebook.react.bridge.Promise;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.facebook.react.bridge.ReadableArray;
import com.facebook.react.bridge.ReadableMap;
import com.facebook.react.bridge.ReadableType;
import com.squareup.sdk.reader.ReaderSdk;
import com.squareup.sdk.reader.checkout.AdditionalPaymentType;
import com.squareup.sdk.reader.checkout.CheckoutActivityCallback;
import com.squareup.sdk.reader.checkout.CheckoutErrorCode;
import com.squareup.sdk.reader.checkout.CheckoutParameters;
import com.squareup.sdk.reader.checkout.CheckoutResult;
import com.squareup.sdk.reader.checkout.CurrencyCode;
import com.squareup.sdk.reader.checkout.Money;
import com.squareup.sdk.reader.checkout.TipSettings;
import com.squareup.sdk.reader.core.CallbackReference;
import com.squareup.sdk.reader.core.Result;
import com.squareup.sdk.reader.core.ResultError;
import com.squareup.sdk.reader.react.internal.converter.CheckoutResultConverter;
import com.squareup.sdk.reader.react.internal.ErrorHandlerUtils;
import com.squareup.sdk.reader.react.internal.ReaderSdkException;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
class CheckoutModule extends ReactContextBaseJavaModule {
// Define all the checkout debug codes and messages below
// These error codes and messages **MUST** align with iOS error codes and javascript error codes
// Search KEEP_IN_SYNC_CHECKOUT_ERROR to update all places
// react native module debug error codes
private static final String RN_CHECKOUT_ALREADY_IN_PROGRESS = "rn_checkout_already_in_progress";
private static final String RN_CHECKOUT_INVALID_PARAMETER = "rn_checkout_invalid_parameter";
// react native module debug messages
private static final String RN_MESSAGE_CHECKOUT_ALREADY_IN_PROGRESS = "A checkout operation is already in progress. Ensure that the in-progress checkout is completed before calling startCheckoutAsync again.";
private static final String RN_MESSAGE_CHECKOUT_INVALID_PARAMETER = "Invalid parameter found in checkout parameters.";
private volatile CallbackReference checkoutCallbackRef;
private final Handler mainLooperHandler;
private final CheckoutResultConverter checkoutResultConverter;
public CheckoutModule(ReactApplicationContext reactContext) {
super(reactContext);
mainLooperHandler = new Handler(Looper.getMainLooper());
checkoutResultConverter = new CheckoutResultConverter();
}
@Override
public String getName() {
return "RNReaderSDKCheckout";
}
@ReactMethod
public void startCheckout(ReadableMap jsCheckoutParameters, final Promise promise) {
StringBuilder paramError = new StringBuilder();
if (!validateJSCheckoutParams(jsCheckoutParameters, paramError)) {
String paramErrorDebugMessage = String.format("%s %s", RN_MESSAGE_CHECKOUT_INVALID_PARAMETER, paramError.toString());
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_CHECKOUT_INVALID_PARAMETER, paramErrorDebugMessage);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
if (checkoutCallbackRef != null) {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_CHECKOUT_ALREADY_IN_PROGRESS, RN_MESSAGE_CHECKOUT_ALREADY_IN_PROGRESS);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
CheckoutActivityCallback checkoutCallback = new CheckoutActivityCallback() {
@Override
public void onResult(Result<CheckoutResult, ResultError<CheckoutErrorCode>> result) {
checkoutCallbackRef.clear();
checkoutCallbackRef = null;
if (result.isError()) {
ResultError<CheckoutErrorCode> error = result.getError();
String errorJsonMessage = ErrorHandlerUtils.serializeErrorToJson(error.getDebugCode(), error.getMessage(), error.getDebugMessage());
promise.reject(ErrorHandlerUtils.getErrorCode(error.getCode()), new ReaderSdkException(errorJsonMessage));
return;
}
CheckoutResult checkoutResult = result.getSuccessValue();
promise.resolve(checkoutResultConverter.toJSObject(checkoutResult));
}
};
checkoutCallbackRef = ReaderSdk.checkoutManager().addCheckoutActivityCallback(checkoutCallback);
ReadableMap jsAmountMoney = jsCheckoutParameters.getMap("amountMoney");
Money amountMoney = new Money(
jsAmountMoney.getInt("amount"),
jsAmountMoney.hasKey("currencyCode") ? CurrencyCode.valueOf(jsAmountMoney.getString("currencyCode")) : CurrencyCode.current());
CheckoutParameters.Builder checkoutParamsBuilder = CheckoutParameters.newBuilder(amountMoney);
if (jsCheckoutParameters.hasKey("note")) {
checkoutParamsBuilder.note(jsCheckoutParameters.getString("note"));
}
if (jsCheckoutParameters.hasKey("skipReceipt")) {
checkoutParamsBuilder.skipReceipt(jsCheckoutParameters.getBoolean("skipReceipt"));
}
if (jsCheckoutParameters.hasKey("collectSignature")) {
checkoutParamsBuilder.collectSignature(jsCheckoutParameters.getBoolean("collectSignature"));
}
if (jsCheckoutParameters.hasKey("allowSplitTender")) {
checkoutParamsBuilder.allowSplitTender(jsCheckoutParameters.getBoolean("allowSplitTender"));
}
if (jsCheckoutParameters.hasKey("delayCapture")) {
checkoutParamsBuilder.delayCapture(jsCheckoutParameters.getBoolean("delayCapture"));
}
if (jsCheckoutParameters.hasKey("tipSettings")) {
TipSettings tipSettings = buildTipSettings(jsCheckoutParameters.getMap("tipSettings"));
checkoutParamsBuilder.tipSettings(tipSettings);
}
if (jsCheckoutParameters.hasKey("additionalPaymentTypes")) {
Set<AdditionalPaymentType> additionalPaymentTypes = buildAdditionalPaymentTypes(jsCheckoutParameters.getArray("additionalPaymentTypes"));
checkoutParamsBuilder.additionalPaymentTypes(additionalPaymentTypes);
}
final CheckoutParameters checkoutParams = checkoutParamsBuilder.build();
final Activity currentActivity = getCurrentActivity();
mainLooperHandler.post(new Runnable() {
@Override
public void run() {
ReaderSdk.checkoutManager().startCheckoutActivity(currentActivity, checkoutParams);
}
});
}
@Override
public void onCatalystInstanceDestroy() {
super.onCatalystInstanceDestroy();
// clear the callback to avoid memory leaks when react native module is destroyed
if (checkoutCallbackRef != null) {
checkoutCallbackRef.clear();
}
}
static private boolean validateJSCheckoutParams(ReadableMap jsCheckoutParams, StringBuilder paramError) {
// check types of all parameters
if (!jsCheckoutParams.hasKey("amountMoney") || jsCheckoutParams.getType("amountMoney") != ReadableType.Map) {
paramError.append("'amountMoney' is missing or not an object");
return false;
} else if (jsCheckoutParams.hasKey("skipReceipt") && jsCheckoutParams.getType("skipReceipt") != ReadableType.Boolean) {
paramError.append("'skipReceipt' is not a boolean");
return false;
} else if (jsCheckoutParams.hasKey("collectSignature") && jsCheckoutParams.getType("collectSignature") != ReadableType.Boolean) {
paramError.append("'collectSignature' is not a boolean");
return false;
} else if (jsCheckoutParams.hasKey("allowSplitTender") && jsCheckoutParams.getType("allowSplitTender") != ReadableType.Boolean) {
paramError.append("'allowSplitTender' is not a boolean");
return false;
} else if (jsCheckoutParams.hasKey("delayCapture") && jsCheckoutParams.getType("delayCapture") != ReadableType.Boolean) {
paramError.append("'delayCapture' is not a boolean");
return false;
} else if (jsCheckoutParams.hasKey("note") && jsCheckoutParams.getType("note") != ReadableType.String) {
paramError.append("'note' is not a string");
return false;
} else if (jsCheckoutParams.hasKey("tipSettings") && jsCheckoutParams.getType("tipSettings") != ReadableType.Map) {
paramError.append("'tipSettings' is not an object");
return false;
} else if (jsCheckoutParams.hasKey("additionalPaymentTypes") && jsCheckoutParams.getType("additionalPaymentTypes") != ReadableType.Array) {
paramError.append("'additionalPaymentTypes' is not an array");
return false;
}
// check amountMoney
ReadableMap amountMoney = jsCheckoutParams.getMap("amountMoney");
if (!amountMoney.hasKey("amount") || amountMoney.getType("amount") != ReadableType.Number) {
paramError.append("'amount' is not an integer");
return false;
}
if (amountMoney.hasKey("currencyCode") && amountMoney.getType("currencyCode") != ReadableType.String) {
paramError.append("'currencyCode' is not a String");
return false;
}
if (amountMoney.hasKey("currencyCode")) {
try {
CurrencyCode.valueOf(amountMoney.getString("currencyCode"));
} catch (IllegalArgumentException ex) {
paramError.append("failed to parse 'currencyCode'");
return false;
}
}
if (jsCheckoutParams.hasKey("tipSettings")) {
// check tipSettings
ReadableMap tipSettings = jsCheckoutParams.getMap("tipSettings");
if (tipSettings.hasKey("showCustomTipField") && tipSettings.getType("showCustomTipField") != ReadableType.Boolean) {
paramError.append("'showCustomTipField' is not a boolean");
return false;
} else if (tipSettings.hasKey("showSeparateTipScreen") && tipSettings.getType("showSeparateTipScreen") != ReadableType.Boolean) {
paramError.append("'showSeparateTipScreen' is not a boolean");
return false;
} else if (tipSettings.hasKey("tipPercentages") && tipSettings.getType("tipPercentages") != ReadableType.Array) {
paramError.append("'tipPercentages' is not an array");
return false;
}
}
return true;
}
static private TipSettings buildTipSettings(ReadableMap tipSettingsConfig) {
TipSettings.Builder tipSettingsBuilder = TipSettings.newBuilder();
if (tipSettingsConfig.hasKey("showCustomTipField")) {
tipSettingsBuilder.showCustomTipField(tipSettingsConfig.getBoolean("showCustomTipField"));
}
if (tipSettingsConfig.hasKey("showSeparateTipScreen")) {
tipSettingsBuilder.showSeparateTipScreen(tipSettingsConfig.getBoolean("showSeparateTipScreen"));
}
if (tipSettingsConfig.hasKey("tipPercentages")) {
ReadableArray tipPercentages = tipSettingsConfig.getArray("tipPercentages");
if (tipPercentages != null) {
List<Integer> percentagesList = new ArrayList<>();
for (int i = 0; i < tipPercentages.size(); i++) {
percentagesList.add(tipPercentages.getInt(i));
}
tipSettingsBuilder.tipPercentages(percentagesList);
}
}
return tipSettingsBuilder.build();
}
static private Set<AdditionalPaymentType> buildAdditionalPaymentTypes(ReadableArray additionalPaymentTypes) {
Set<AdditionalPaymentType> types = new LinkedHashSet<>();
if (additionalPaymentTypes != null) {
for (int i = 0; i < additionalPaymentTypes.size(); i++) {
String typeName = additionalPaymentTypes.getString(i);
switch (typeName) {
case "cash":
types.add(AdditionalPaymentType.CASH);
break;
case "manual_card_entry":
types.add(AdditionalPaymentType.MANUAL_CARD_ENTRY);
break;
case "other":
types.add(AdditionalPaymentType.OTHER);
break;
default:
throw new RuntimeException("Unexpected payment type: " + typeName);
}
}
}
return types;
}
}
| 8,273 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/StoreCustomerCardModule.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react;
import android.app.Activity;
import android.os.Handler;
import android.os.Looper;
import com.facebook.react.bridge.Promise;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.squareup.sdk.reader.ReaderSdk;
import com.squareup.sdk.reader.checkout.Card;
import com.squareup.sdk.reader.core.CallbackReference;
import com.squareup.sdk.reader.core.Result;
import com.squareup.sdk.reader.core.ResultError;
import com.squareup.sdk.reader.crm.StoreCardActivityCallback;
import com.squareup.sdk.reader.crm.StoreCustomerCardErrorCode;
import com.squareup.sdk.reader.react.internal.ErrorHandlerUtils;
import com.squareup.sdk.reader.react.internal.ReaderSdkException;
import com.squareup.sdk.reader.react.internal.converter.CardConverter;
class StoreCustomerCardModule extends ReactContextBaseJavaModule {
// Define all the store customer card debug codes and messages below
// These error codes and messages **MUST** align with iOS error codes and javascript error codes
// Search KEEP_IN_SYNC_STORE_CUSTOMER_CARD_ERROR to update all places
// react native module debug error codes
private static final String RN_STORE_CUSTOMER_CARD_ALREADY_IN_PROGRESS = "rn_store_customer_card_already_in_progress";
// react native module debug messages
private static final String RN_MESSAGE_STORE_CUSTOMER_CARD_ALREADY_IN_PROGRESS = "A store customer card operation is already in progress. Ensure that the in-progress store customer card is completed before calling startStoreCardAsync again.";
private volatile CallbackReference storeCardCallbackRef;
private final Handler mainLooperHandler;
private final CardConverter cardConverter;
public StoreCustomerCardModule(ReactApplicationContext reactContext) {
super(reactContext);
mainLooperHandler = new Handler(Looper.getMainLooper());
cardConverter = new CardConverter();
}
@Override
public String getName() {
return "RNReaderSDKStoreCustomerCard";
}
@ReactMethod
public void startStoreCard(final String customerId, final Promise promise) {
if (storeCardCallbackRef != null) {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_STORE_CUSTOMER_CARD_ALREADY_IN_PROGRESS, RN_MESSAGE_STORE_CUSTOMER_CARD_ALREADY_IN_PROGRESS);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
StoreCardActivityCallback storeCardActivityCallback = new StoreCardActivityCallback() {
@Override
public void onResult(Result<Card, ResultError<StoreCustomerCardErrorCode>> result) {
storeCardCallbackRef.clear();
storeCardCallbackRef = null;
if (result.isError()) {
ResultError<StoreCustomerCardErrorCode> error = result.getError();
String errorJsonMessage = ErrorHandlerUtils.serializeErrorToJson(error.getDebugCode(), error.getMessage(), error.getDebugMessage());
promise.reject(ErrorHandlerUtils.getErrorCode(error.getCode()), new ReaderSdkException(errorJsonMessage));
return;
}
Card card = result.getSuccessValue();
promise.resolve(cardConverter.toJSObject(card));
}
};
storeCardCallbackRef = ReaderSdk.customerCardManager().addStoreCardActivityCallback(storeCardActivityCallback);
final Activity currentActivity = getCurrentActivity();
mainLooperHandler.post(new Runnable() {
@Override
public void run() {
ReaderSdk.customerCardManager().startStoreCardActivity(currentActivity, customerId);
}
});
}
@Override
public void onCatalystInstanceDestroy() {
super.onCatalystInstanceDestroy();
// clear the callback to avoid memory leaks when react native module is destroyed
if (storeCardCallbackRef != null) {
storeCardCallbackRef.clear();
}
}
}
| 8,274 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/ReaderSettingsModule.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react;
import android.app.Activity;
import android.os.Handler;
import android.os.Looper;
import com.facebook.react.bridge.Promise;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.squareup.sdk.reader.ReaderSdk;
import com.squareup.sdk.reader.core.CallbackReference;
import com.squareup.sdk.reader.core.Result;
import com.squareup.sdk.reader.core.ResultError;
import com.squareup.sdk.reader.hardware.ReaderSettingsActivityCallback;
import com.squareup.sdk.reader.hardware.ReaderSettingsErrorCode;
import com.squareup.sdk.reader.react.internal.ErrorHandlerUtils;
import com.squareup.sdk.reader.react.internal.ReaderSdkException;
class ReaderSettingsModule extends ReactContextBaseJavaModule {
// Define all the reader settings debug codes and messages below
// These error codes and messages **MUST** align with iOS error codes and javascript error codes
// Search KEEP_IN_SYNC_READER_SETTINGS_ERROR to update all places
// react native module debug error codes
private static final String RN_READER_SETTINGS_ALREADY_IN_PROGRESS = "rn_reader_settings_already_in_progress";
// react native module debug messages
private static final String RN_MESSAGE_READER_SETTINGS_ALREADY_IN_PROGRESS = "A reader settings operation is already in progress. Ensure that the in-progress reader settings is completed before calling startReaderSettingsAsync again.";
private volatile CallbackReference readerSettingCallbackRef;
private final Handler mainLooperHandler;
public ReaderSettingsModule(ReactApplicationContext reactContext) {
super(reactContext);
mainLooperHandler = new Handler(Looper.getMainLooper());
}
@Override
public String getName() {
return "RNReaderSDKReaderSettings";
}
@ReactMethod
public void startReaderSettings(final Promise promise) {
if (readerSettingCallbackRef != null) {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_READER_SETTINGS_ALREADY_IN_PROGRESS, RN_MESSAGE_READER_SETTINGS_ALREADY_IN_PROGRESS);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
ReaderSettingsActivityCallback readerSettingsCallback = new ReaderSettingsActivityCallback() {
@Override
public void onResult(Result<Void, ResultError<ReaderSettingsErrorCode>> result) {
readerSettingCallbackRef.clear();
readerSettingCallbackRef = null;
if (result.isError()) {
ResultError<ReaderSettingsErrorCode> error = result.getError();
String errorJsonMessage = ErrorHandlerUtils.serializeErrorToJson(error.getDebugCode(), error.getMessage(), error.getDebugMessage());
promise.reject(ErrorHandlerUtils.getErrorCode(error.getCode()), new ReaderSdkException(errorJsonMessage));
return;
}
promise.resolve(null);
}
};
readerSettingCallbackRef = ReaderSdk.readerManager()
.addReaderSettingsActivityCallback(readerSettingsCallback);
final Activity currentActivity = getCurrentActivity();
mainLooperHandler.post(new Runnable() {
@Override
public void run() {
ReaderSdk.readerManager().startReaderSettingsActivity(currentActivity);
}
});
}
@Override
public void onCatalystInstanceDestroy() {
super.onCatalystInstanceDestroy();
// clear the callback to avoid memory leaks when react native module is destroyed
if (readerSettingCallbackRef != null) {
readerSettingCallbackRef.clear();
}
}
}
| 8,275 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/AuthorizationModule.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react;
import android.os.Handler;
import android.os.Looper;
import com.facebook.react.bridge.Promise;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.squareup.sdk.reader.ReaderSdk;
import com.squareup.sdk.reader.authorization.AuthorizeCallback;
import com.squareup.sdk.reader.authorization.AuthorizeErrorCode;
import com.squareup.sdk.reader.authorization.DeauthorizeCallback;
import com.squareup.sdk.reader.authorization.DeauthorizeErrorCode;
import com.squareup.sdk.reader.authorization.Location;
import com.squareup.sdk.reader.core.CallbackReference;
import com.squareup.sdk.reader.core.Result;
import com.squareup.sdk.reader.core.ResultError;
import com.squareup.sdk.reader.react.internal.ErrorHandlerUtils;
import com.squareup.sdk.reader.react.internal.converter.LocationConverter;
import com.squareup.sdk.reader.react.internal.ReaderSdkException;
class AuthorizationModule extends ReactContextBaseJavaModule {
// Define all the authorization error debug codes and messages below
// These error codes and messages **MUST** align with iOS error codes and javascript error codes
// Search KEEP_IN_SYNC_AUTHORIZE_ERROR to update all places
// react native module debug error codes
private static final String RN_AUTH_LOCATION_NOT_AUTHORIZED = "rn_auth_location_not_authorized";
// react native module debug messages
private static final String RN_MESSAGE_AUTH_LOCATION_NOT_AUTHORIZED = "This device must be authorized with a Square location in order to get that location. Obtain an authorization code for a Square location from the mobile/authorization-code endpoint and then call authorizeAsync.";
// Android only react native errors and messages
private static final String RN_AUTHORIZE_ALREADY_IN_PROGRESS = "rn_authorize_already_in_progress";
private static final String RN_DEAUTHORIZE_ALREADY_IN_PROGRESS = "rn_deauthorize_already_in_progress";
private static final String RN_MESSAGE_AUTHORIZE_ALREADY_IN_PROGRESS = "Authorization is already in progress. Please wait for authorizeAsync to complete.";
private static final String RN_MESSAGE_DEAUTHORIZE_ALREADY_IN_PROGRESS = "Deauthorization is already in progress. Please wait for deauthorizeAsync to complete.";
private volatile CallbackReference authorizeCallbackRef;
private volatile CallbackReference deauthorizeCallbackRef;
private final Handler mainLooperHandler;
public AuthorizationModule(ReactApplicationContext reactContext) {
super(reactContext);
mainLooperHandler = new Handler(Looper.getMainLooper());
}
@Override
public String getName() {
return "RNReaderSDKAuthorization";
}
@ReactMethod
public void isAuthorized(Promise promise) {
promise.resolve(ReaderSdk.authorizationManager().getAuthorizationState().isAuthorized());
}
@ReactMethod
public void isAuthorizationInProgress(Promise promise) {
promise.resolve(ReaderSdk.authorizationManager().getAuthorizationState().isAuthorizationInProgress());
}
@ReactMethod
public void authorizedLocation(Promise promise) {
if (ReaderSdk.authorizationManager().getAuthorizationState().isAuthorized()) {
LocationConverter locationConverter = new LocationConverter();
promise.resolve(locationConverter.toJSObject(ReaderSdk.authorizationManager().getAuthorizationState().getAuthorizedLocation()));
} else {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_AUTH_LOCATION_NOT_AUTHORIZED, RN_MESSAGE_AUTH_LOCATION_NOT_AUTHORIZED);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
}
}
@ReactMethod
public void authorize(final String authCode, final Promise promise) {
if (authorizeCallbackRef != null) {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_AUTHORIZE_ALREADY_IN_PROGRESS, RN_MESSAGE_AUTHORIZE_ALREADY_IN_PROGRESS);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
AuthorizeCallback authCallback = new AuthorizeCallback() {
@Override
public void onResult(Result<Location, ResultError<AuthorizeErrorCode>> result) {
authorizeCallbackRef.clear();
authorizeCallbackRef = null;
if (result.isError()) {
ResultError<AuthorizeErrorCode> error = result.getError();
String errorJsonMessage = ErrorHandlerUtils.serializeErrorToJson(error.getDebugCode(), error.getMessage(), error.getDebugMessage());
promise.reject(ErrorHandlerUtils.getErrorCode(error.getCode()), new ReaderSdkException(errorJsonMessage));
return;
}
Location location = result.getSuccessValue();
LocationConverter locationConverter = new LocationConverter();
promise.resolve(locationConverter.toJSObject(location));
}
};
authorizeCallbackRef = ReaderSdk.authorizationManager().addAuthorizeCallback(authCallback);
mainLooperHandler.post(new Runnable() {
@Override
public void run() {
ReaderSdk.authorizationManager().authorize(authCode);
}
});
}
@ReactMethod
public void canDeauthorize(Promise promise) {
promise.resolve(ReaderSdk.authorizationManager().getAuthorizationState().canDeauthorize());
}
@ReactMethod
public void deauthorize(final Promise promise) {
if (deauthorizeCallbackRef != null) {
String errorJsonMessage = ErrorHandlerUtils.createNativeModuleError(RN_DEAUTHORIZE_ALREADY_IN_PROGRESS, RN_MESSAGE_DEAUTHORIZE_ALREADY_IN_PROGRESS);
promise.reject(ErrorHandlerUtils.USAGE_ERROR, new ReaderSdkException(errorJsonMessage));
return;
}
DeauthorizeCallback deauthCallback = new DeauthorizeCallback() {
@Override
public void onResult(Result<Void, ResultError<DeauthorizeErrorCode>> result) {
deauthorizeCallbackRef.clear();
deauthorizeCallbackRef = null;
if (result.isError()) {
ResultError<DeauthorizeErrorCode> error = result.getError();
String errorJsonMessage = ErrorHandlerUtils.serializeErrorToJson(error.getDebugCode(), error.getMessage(), error.getDebugMessage());
promise.reject(ErrorHandlerUtils.getErrorCode(error.getCode()), new ReaderSdkException(errorJsonMessage));
return;
}
promise.resolve(null);
}
};
deauthorizeCallbackRef = ReaderSdk.authorizationManager().addDeauthorizeCallback(deauthCallback);
mainLooperHandler.post(new Runnable() {
@Override
public void run() {
ReaderSdk.authorizationManager().deauthorize();
}
});
}
@Override
public void onCatalystInstanceDestroy() {
super.onCatalystInstanceDestroy();
// clear the callback to avoid memory leaks when react native module is destroyed
if (authorizeCallbackRef != null) {
authorizeCallbackRef.clear();
}
if (deauthorizeCallbackRef != null) {
deauthorizeCallbackRef.clear();
}
}
}
| 8,276 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/ErrorHandlerUtils.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal;
import com.squareup.sdk.reader.authorization.AuthorizeErrorCode;
import com.squareup.sdk.reader.checkout.CheckoutErrorCode;
import com.squareup.sdk.reader.core.ErrorCode;
import com.squareup.sdk.reader.crm.StoreCustomerCardErrorCode;
import com.squareup.sdk.reader.hardware.ReaderSettingsErrorCode;
import java.util.LinkedHashMap;
import java.util.Map;
import org.json.JSONException;
import org.json.JSONObject;
public class ErrorHandlerUtils {
// Define all the error codes and messages below
// These error codes and messages **MUST** align with iOS error codes and javascript error codes
// Usage error
public static final String USAGE_ERROR = "USAGE_ERROR";
private static final Map<AuthorizeErrorCode, String> authorizeErrorMap;
private static final Map<CheckoutErrorCode, String> checkoutErrorMap;
private static final Map<ReaderSettingsErrorCode, String> readerSettingsErrorMap;
private static final Map<StoreCustomerCardErrorCode, String> storeCustomerCardErrorMap;
static {
// Build Expected Error mappings
authorizeErrorMap = new LinkedHashMap<>();
for(AuthorizeErrorCode authorizeErrorCode : AuthorizeErrorCode.values()) {
// Search KEEP_IN_SYNC_AUTHORIZE_ERROR to update all places
switch (authorizeErrorCode) {
case NO_NETWORK:
authorizeErrorMap.put(AuthorizeErrorCode.NO_NETWORK, "AUTHORIZE_NO_NETWORK");
break;
case USAGE_ERROR:
// Usage error is handled separately
break;
default:
throw new RuntimeException("Unexpected auth error code: " + authorizeErrorCode.name());
}
}
checkoutErrorMap = new LinkedHashMap<>();
for(CheckoutErrorCode checkoutErrorCode : CheckoutErrorCode.values()) {
// Search KEEP_IN_SYNC_CHECKOUT_ERROR to update all places
switch (checkoutErrorCode) {
case SDK_NOT_AUTHORIZED:
checkoutErrorMap.put(CheckoutErrorCode.SDK_NOT_AUTHORIZED, "CHECKOUT_SDK_NOT_AUTHORIZED");
break;
case CANCELED:
checkoutErrorMap.put(CheckoutErrorCode.CANCELED, "CHECKOUT_CANCELED");
break;
case USAGE_ERROR:
// Usage error is handled separately
break;
default:
throw new RuntimeException("Unexpected checkout error code: " + checkoutErrorCode.name());
}
}
readerSettingsErrorMap = new LinkedHashMap<>();
for(ReaderSettingsErrorCode readerSettingsErrorCode : ReaderSettingsErrorCode.values()) {
// Search KEEP_IN_SYNC_READER_SETTINGS_ERROR to update all places
switch (readerSettingsErrorCode) {
case SDK_NOT_AUTHORIZED:
readerSettingsErrorMap.put(ReaderSettingsErrorCode.SDK_NOT_AUTHORIZED, "READER_SETTINGS_SDK_NOT_AUTHORIZED");
break;
case USAGE_ERROR:
// Usage error is handled separately
break;
default:
throw new RuntimeException("Unexpected reader settings error code: " + readerSettingsErrorCode.name());
}
}
storeCustomerCardErrorMap = new LinkedHashMap<>();
for(StoreCustomerCardErrorCode storeCustomerCardErrorCode : StoreCustomerCardErrorCode.values()) {
// Search KEEP_IN_SYNC_STORE_CUSTOMER_CARD_ERROR to update all places
switch (storeCustomerCardErrorCode) {
case CANCELED:
storeCustomerCardErrorMap.put(StoreCustomerCardErrorCode.CANCELED, "STORE_CUSTOMER_CARD_CANCELED");
break;
case INVALID_CUSTOMER_ID:
storeCustomerCardErrorMap.put(StoreCustomerCardErrorCode.INVALID_CUSTOMER_ID, "STORE_CUSTOMER_CARD_INVALID_CUSTOMER_ID");
break;
case SDK_NOT_AUTHORIZED:
storeCustomerCardErrorMap.put(StoreCustomerCardErrorCode.SDK_NOT_AUTHORIZED, "STORE_CUSTOMER_CARD_SDK_NOT_AUTHORIZED");
break;
case NO_NETWORK:
storeCustomerCardErrorMap.put(StoreCustomerCardErrorCode.NO_NETWORK, "STORE_CUSTOMER_CARD_NO_NETWORK");
break;
case USAGE_ERROR:
// Usage error is handled separately
break;
default:
throw new RuntimeException("Unexpected reader settings error code: " + storeCustomerCardErrorCode.name());
}
}
}
public static String createNativeModuleError(String nativeModuleErrorCode, String debugMessage) {
return serializeErrorToJson(
nativeModuleErrorCode,
String.format("Something went wrong. Please contact the developer of this application and provide them with this error code: %s", nativeModuleErrorCode),
debugMessage);
}
public static String serializeErrorToJson(String debugCode, String message, String debugMessage) {
JSONObject errorData = new JSONObject();
try {
errorData.put("debugCode", debugCode);
errorData.put("message", message);
errorData.put("debugMessage", debugMessage);
} catch (JSONException ex) {
return "{ 'message': 'failed to serialize error'}";
}
return errorData.toString();
}
public static String getErrorCode(ErrorCode nativeErrorCode) {
if (nativeErrorCode.isUsageError()) {
return USAGE_ERROR;
} else {
String errorCodeString;
if (nativeErrorCode instanceof AuthorizeErrorCode) {
AuthorizeErrorCode authErrorCode = (AuthorizeErrorCode)nativeErrorCode;
errorCodeString = authorizeErrorMap.get(authErrorCode);
} else if (nativeErrorCode instanceof CheckoutErrorCode) {
CheckoutErrorCode checkoutErrorCode = (CheckoutErrorCode)nativeErrorCode;
errorCodeString = checkoutErrorMap.get(checkoutErrorCode);
} else if (nativeErrorCode instanceof ReaderSettingsErrorCode) {
ReaderSettingsErrorCode readerSettingsErrorCode = (ReaderSettingsErrorCode)nativeErrorCode;
errorCodeString = readerSettingsErrorMap.get(readerSettingsErrorCode);
} else if (nativeErrorCode instanceof StoreCustomerCardErrorCode) {
StoreCustomerCardErrorCode storeCustomerCardErrorCode = (StoreCustomerCardErrorCode)nativeErrorCode;
errorCodeString = storeCustomerCardErrorMap.get(storeCustomerCardErrorCode);
} else {
throw new RuntimeException("Unexpected error code: " + nativeErrorCode.toString());
}
return errorCodeString;
}
}
}
| 8,277 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/ReaderSdkException.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal;
public class ReaderSdkException extends Exception {
public ReaderSdkException(String message) {
super(message);
}
}
| 8,278 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/DateFormatUtils.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
public class DateFormatUtils {
private static final ThreadLocal<DateFormat> ISO_8601 = new ThreadLocal<DateFormat>() {
@Override protected DateFormat initialValue() {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
}
};
public static String formatISO8601UTC(Date date) {
return ISO_8601.get().format(date);
}
}
| 8,279 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/TenderConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.Tender;
import com.squareup.sdk.reader.react.internal.DateFormatUtils;
import java.util.HashMap;
import java.util.Map;
class TenderConverter {
private static final Map<Tender.Type, String> tenderTypeMap;
static {
tenderTypeMap = new HashMap<>();
for (Tender.Type type : Tender.Type.values()) {
switch (type) {
case CARD:
tenderTypeMap.put(type, "card");
break;
case CASH:
tenderTypeMap.put(type, "cash");
break;
case OTHER:
tenderTypeMap.put(type, "other");
break;
default:
// unknown should never happen if the right Reader SDK version is loaded with plugin
// But we choose not break plugin if the type isn't important
tenderTypeMap.put(type, "unknown");
}
}
}
private final MoneyConverter moneyConverter;
private final TenderCardDetailsConverter tenderCardDetailsConverter;
private final TenderCashDetailsConverter tenderCashDetailsConverter;
public TenderConverter(){
moneyConverter = new MoneyConverter();
tenderCardDetailsConverter = new TenderCardDetailsConverter();
tenderCashDetailsConverter = new TenderCashDetailsConverter();
}
public WritableMap toJSObject(Tender tender) {
WritableMap mapToReturn = new WritableNativeMap();
mapToReturn.putString("createdAt", DateFormatUtils.formatISO8601UTC(tender.getCreatedAt()));
mapToReturn.putMap("tipMoney", moneyConverter.toJSObject(tender.getTipMoney()));
mapToReturn.putMap("totalMoney", moneyConverter.toJSObject(tender.getTotalMoney()));
Tender.Type tenderType = tender.getType();
mapToReturn.putString("type", tenderTypeMap.get(tenderType));
if (tenderType == Tender.Type.CARD) {
mapToReturn.putString("tenderId", tender.getTenderId());
mapToReturn.putMap("cardDetails", tenderCardDetailsConverter.toJSObject(tender.getCardDetails()));
} else if (tenderType == Tender.Type.CASH) {
mapToReturn.putMap("cashDetails", tenderCashDetailsConverter.toJSObject(tender.getCashDetails()));
}
return mapToReturn;
}
}
| 8,280 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/TenderCashDetailsConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.TenderCashDetails;
class TenderCashDetailsConverter {
private final MoneyConverter moneyConverter;
public TenderCashDetailsConverter(){
moneyConverter = new MoneyConverter();
}
public WritableMap toJSObject(TenderCashDetails tenderCashDetails) {
WritableMap mapToReturn = new WritableNativeMap();
mapToReturn.putMap("buyerTenderedMoney", moneyConverter.toJSObject(tenderCashDetails.getBuyerTenderedMoney()));
mapToReturn.putMap("changeBackMoney", moneyConverter.toJSObject(tenderCashDetails.getChangeBackMoney()));
return mapToReturn;
}
}
| 8,281 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/MoneyConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.Money;
class MoneyConverter {
public WritableMap toJSObject(Money money) {
WritableMap mapToReturn = new WritableNativeMap();
// WritalbeMap doesn't provide a long type but the money amount is a long type,
// So convert the long to double
mapToReturn.putDouble("amount", money.getAmount());
mapToReturn.putString("currencyCode", money.getCurrencyCode().name());
return mapToReturn;
}
}
| 8,282 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/TenderCardDetailsConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.TenderCardDetails;
import java.util.HashMap;
import java.util.Map;
class TenderCardDetailsConverter {
private static final Map<TenderCardDetails.EntryMethod, String> entryMethodStringMap;
static {
entryMethodStringMap = new HashMap<>();
for (TenderCardDetails.EntryMethod method : TenderCardDetails.EntryMethod.values()) {
switch (method) {
case MANUALLY_ENTERED:
entryMethodStringMap.put(method, "MANUALLY_ENTERED");
break;
case SWIPE:
entryMethodStringMap.put(method, "SWIPE");
break;
case CHIP:
entryMethodStringMap.put(method, "CHIP");
break;
case CONTACTLESS:
entryMethodStringMap.put(method, "CONTACTLESS");
break;
default:
// UNKNOWN should never happen if the right Reader SDK version is loaded with plugin
// But we choose not break plugin if the type isn't important
entryMethodStringMap.put(method, "UNKNOWN");
}
}
}
private final CardConverter cardConverter;
public TenderCardDetailsConverter(){
cardConverter = new CardConverter();
}
public WritableMap toJSObject(TenderCardDetails tenderCardDetails) {
WritableMap mapToReturn = new WritableNativeMap();
mapToReturn.putString("entryMethod", entryMethodStringMap.get(tenderCardDetails.getEntryMethod()));
mapToReturn.putMap("card", cardConverter.toJSObject(tenderCardDetails.getCard()));
return mapToReturn;
}
}
| 8,283 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/LocationConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.authorization.Location;
public final class LocationConverter {
private final MoneyConverter moneyConverter;
public LocationConverter() {
moneyConverter = new MoneyConverter();
}
public WritableMap toJSObject(Location location) {
WritableMap mapToReturn = new WritableNativeMap();
mapToReturn.putString("locationId", location.getLocationId());
mapToReturn.putString("name", location.getName());
mapToReturn.putString("businessName", location.getBusinessName());
mapToReturn.putBoolean("isCardProcessingActivated", location.isCardProcessingActivated());
mapToReturn.putMap("minimumCardPaymentAmountMoney", moneyConverter.toJSObject(location.getMinimumCardPaymentAmountMoney()));
mapToReturn.putMap("maximumCardPaymentAmountMoney", moneyConverter.toJSObject(location.getMaximumCardPaymentAmountMoney()));
mapToReturn.putString("currencyCode", location.getCurrencyCode().name());
return mapToReturn;
}
}
| 8,284 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/CardConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.Card;
import java.util.HashMap;
import java.util.Map;
public class CardConverter {
private static final Map<Card.Brand, String> brandStringMap;
static {
brandStringMap = new HashMap<>();
for (Card.Brand brand : Card.Brand.values()) {
switch(brand) {
case VISA:
brandStringMap.put(brand, "VISA");
break;
case MASTERCARD:
brandStringMap.put(brand, "MASTERCARD");
break;
case AMERICAN_EXPRESS:
brandStringMap.put(brand, "AMERICAN_EXPRESS");
break;
case DISCOVER:
brandStringMap.put(brand, "DISCOVER");
break;
case DISCOVER_DINERS:
brandStringMap.put(brand, "DISCOVER_DINERS");
break;
case INTERAC:
brandStringMap.put(brand, "INTERAC");
break;
case JCB:
brandStringMap.put(brand, "JCB");
break;
case CHINA_UNIONPAY:
brandStringMap.put(brand, "CHINA_UNIONPAY");
break;
case SQUARE_GIFT_CARD:
brandStringMap.put(brand, "SQUARE_GIFT_CARD");
break;
case EFTPOS:
brandStringMap.put(brand, "EFTPOS");
break;
case FELICA:
brandStringMap.put(brand, "FELICA");
break;
case OTHER_BRAND:
brandStringMap.put(brand, "OTHER_BRAND");
break;
default:
// UNKNOWN should never happen if the right Reader SDK version is loaded with plugin
// But we choose not break plugin if the type isn't important
brandStringMap.put(brand, "UNKNOWN");
}
}
}
public WritableMap toJSObject(Card card) {
// We use this "Ignore if null" principle for all returned dictionary
WritableMap mapToReturn = new WritableNativeMap();
mapToReturn.putString("brand", brandStringMap.get(card.getBrand()));
mapToReturn.putString("lastFourDigits", card.getLastFourDigits());
if (card.getExpirationMonth() != null) {
mapToReturn.putInt("expirationMonth", card.getExpirationMonth());
}
if (card.getExpirationYear() != null) {
mapToReturn.putInt("expirationYear", card.getExpirationYear());
}
if (card.getId() != null) {
mapToReturn.putString("id", card.getId());
}
if (card.getCardholderName() != null) {
mapToReturn.putString("cardholderName", card.getCardholderName());
}
return mapToReturn;
}
}
| 8,285 |
0 | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal | Create_ds/react-native-square-reader-sdk/android/src/main/java/com/squareup/sdk/reader/react/internal/converter/CheckoutResultConverter.java | /*
Copyright 2022 Square Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.squareup.sdk.reader.react.internal.converter;
import com.facebook.react.bridge.WritableArray;
import com.facebook.react.bridge.WritableMap;
import com.facebook.react.bridge.WritableNativeArray;
import com.facebook.react.bridge.WritableNativeMap;
import com.squareup.sdk.reader.checkout.CheckoutResult;
import com.squareup.sdk.reader.checkout.Money;
import com.squareup.sdk.reader.checkout.Tender;
import com.squareup.sdk.reader.react.internal.DateFormatUtils;
public class CheckoutResultConverter {
private final MoneyConverter moneyConverter;
private final TenderConverter tenderConverter;
public CheckoutResultConverter() {
moneyConverter = new MoneyConverter();
tenderConverter = new TenderConverter();
}
public WritableMap toJSObject(CheckoutResult result) {
// We use this "Ignore if null" principle for all returned dictionary
WritableMap mapToReturn = new WritableNativeMap();
if (result.getTransactionId() != null) {
mapToReturn.putString("transactionId", result.getTransactionId());
}
mapToReturn.putString("transactionClientId", result.getTransactionClientId());
mapToReturn.putString("locationId", result.getLocationId());
mapToReturn.putString("createdAt", DateFormatUtils.formatISO8601UTC(result.getCreatedAt()));
Money totalMoney = result.getTotalMoney();
mapToReturn.putMap("totalMoney", moneyConverter.toJSObject(totalMoney));
Money totalTipMoney = result.getTotalTipMoney();
mapToReturn.putMap("totalTipMoney", moneyConverter.toJSObject(totalTipMoney));
WritableArray jsTenders = new WritableNativeArray();
for (Tender tender : result.getTenders()) {
jsTenders.pushMap(tenderConverter.toJSObject(tender));
}
mapToReturn.putArray("tenders", jsTenders);
return mapToReturn;
}
}
| 8,286 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen/customization/TestUtils.java | /*
* Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen.customization;
import java.nio.file.Path;
import software.amazon.smithy.codegen.core.CodegenException;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.loader.ModelAssembler;
import software.amazon.smithy.model.node.Node;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.utils.IoUtils;
public class TestUtils {
public static final String AWS_MODELS_PATH_PREFIX = "../sdk-codegen/aws-models/";
public static Node getAwsModel(String modelFile) {
try {
return Node.parseJsonWithComments(IoUtils.readUtf8File(Path.of(AWS_MODELS_PATH_PREFIX, modelFile)));
} catch (Exception e) {
throw new CodegenException(e);
}
}
public static Model preprocessModelIntegration(GoIntegration integration, String modelFile) {
GoSettings settings = new GoSettings();
Model model = new ModelAssembler()
.addDocumentNode(getAwsModel(modelFile))
.disableValidation()
.putProperty(ModelAssembler.ALLOW_UNKNOWN_TRAITS, true)
.assemble()
.unwrap();
ShapeId service = model.getServiceShapes().stream().findFirst().get().getId();
settings.setService(service);
return integration.preprocessModel(model, settings);
}
}
| 8,287 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/test/java/software/amazon/smithy/aws/go/codegen/customization/S3HttpPathBucketFilterIntegrationTest.java | /*
* Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen.customization;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import org.junit.jupiter.api.Test;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.model.traits.HttpTrait;
public class S3HttpPathBucketFilterIntegrationTest {
@Test
public void test() {
Model model = TestUtils.preprocessModelIntegration(
new S3HttpPathBucketFilterIntegration(),
S3ModelUtils.SERVICE_S3_MODEL_FILE);
OperationShape operation = model.expectShape(
ShapeId.from("com.amazonaws.s3#DeleteBucketWebsite"),
OperationShape.class);
String uri = operation.expectTrait(HttpTrait.class)
.getUri().toString();
// URI is originally: /{Bucket}?website
assertFalse(uri.contains("{Bucket}"));
assertEquals(uri, "/?website");
}
}
| 8,288 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/JsonRpcProtocolGenerator.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import static software.amazon.smithy.aws.go.codegen.AwsProtocolUtils.handleDecodeError;
import static software.amazon.smithy.aws.go.codegen.AwsProtocolUtils.initializeJsonDecoder;
import static software.amazon.smithy.aws.go.codegen.AwsProtocolUtils.writeJsonErrorMessageCodeDeserializer;
import java.util.HashSet;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
import software.amazon.smithy.codegen.core.Symbol;
import software.amazon.smithy.go.codegen.CodegenUtils;
import software.amazon.smithy.go.codegen.GoWriter;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.integration.HttpRpcProtocolGenerator;
import software.amazon.smithy.go.codegen.integration.ProtocolGenerator;
import software.amazon.smithy.go.codegen.integration.ProtocolUtils;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.knowledge.EventStreamInfo;
import software.amazon.smithy.model.shapes.MemberShape;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.ServiceShape;
import software.amazon.smithy.model.shapes.Shape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.model.shapes.StructureShape;
import software.amazon.smithy.model.shapes.UnionShape;
import software.amazon.smithy.model.traits.ErrorTrait;
import software.amazon.smithy.model.traits.EventHeaderTrait;
import software.amazon.smithy.model.traits.EventPayloadTrait;
import software.amazon.smithy.go.codegen.endpoints.EndpointResolutionGenerator;
import software.amazon.smithy.go.codegen.endpoints.FnGenerator;
/**
* Handles generating the aws.rest-json protocol for services.
*
* @inheritDoc
* @see HttpRpcProtocolGenerator
*/
abstract class JsonRpcProtocolGenerator extends HttpRpcProtocolGenerator {
private final Set<ShapeId> generatedDocumentBodyShapeSerializers = new HashSet<>();
private final Set<ShapeId> generatedEventMessageSerializers = new HashSet<>();
private final Set<ShapeId> generatedDocumentBodyShapeDeserializers = new HashSet<>();
private final Set<ShapeId> generatedEventMessageDeserializers = new HashSet<>();
/**
* Creates an AWS JSON RPC protocol generator
*/
public JsonRpcProtocolGenerator() {
super();
}
@Override
protected String getOperationPath(GenerationContext context, OperationShape operation) {
return "/";
}
@Override
protected void writeDefaultHeaders(GenerationContext context, OperationShape operation, GoWriter writer) {
super.writeDefaultHeaders(context, operation, writer);
ServiceShape service = context.getService();
String target = service.getId().getName(service) + "." + operation.getId().getName(service);
writer.write("httpBindingEncoder.SetHeader(\"X-Amz-Target\").String($S)", target);
}
@Override
protected void serializeInputDocument(GenerationContext context, OperationShape operation) {
GoWriter writer = context.getWriter().get();
// Stub synthetic inputs mean there never was an input modeled, always serialize
// empty JSON object
// as place holder.
if (CodegenUtils.isStubSynthetic(ProtocolUtils.expectInput(context.getModel(), operation))) {
writer.addUseImports(SmithyGoDependency.STRINGS);
writer.openBlock("if request, err = request.SetStream(strings.NewReader(`{}`)); err != nil {",
"}", () -> {
writer.write("return out, metadata, &smithy.SerializationError{Err: err}");
});
return;
}
StructureShape input = ProtocolUtils.expectInput(context.getModel(), operation);
String functionName = ProtocolGenerator.getDocumentSerializerFunctionName(input, context.getService(),
getProtocolName());
writer.addUseImports(SmithyGoDependency.SMITHY_JSON);
writer.write("jsonEncoder := smithyjson.NewEncoder()");
writer.openBlock("if err := $L(input, jsonEncoder.Value); err != nil {", "}", functionName, () -> {
writer.write("return out, metadata, &smithy.SerializationError{Err: err}");
}).write("");
writer.addUseImports(SmithyGoDependency.BYTES);
writer.openBlock("if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {",
"}", () -> {
writer.write("return out, metadata, &smithy.SerializationError{Err: err}");
});
}
@Override
protected void generateDocumentBodyShapeSerializers(GenerationContext context, Set<Shape> shapes) {
JsonShapeSerVisitor visitor = JsonShapeSerVisitor.builder()
.context(context)
.build();
shapes.forEach(shape -> {
if (generatedDocumentBodyShapeSerializers.contains(shape.toShapeId())) {
return;
}
shape.accept(visitor);
generatedDocumentBodyShapeSerializers.add(shape.toShapeId());
});
}
@Override
protected void deserializeOutputDocument(GenerationContext context, OperationShape operation) {
GoWriter writer = context.getWriter().get();
StructureShape output = ProtocolUtils.expectOutput(context.getModel(), operation);
String functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(output, context.getService(),
getProtocolName());
initializeJsonDecoder(writer, "response.Body");
AwsProtocolUtils.decodeJsonIntoInterface(writer, "out, metadata, ");
writer.write("err = $L(&output, shape)", functionName);
handleDecodeError(writer, "out, metadata, ");
}
@Override
protected void generateDocumentBodyShapeDeserializers(GenerationContext context, Set<Shape> shapes) {
JsonShapeDeserVisitor visitor = JsonShapeDeserVisitor.builder()
.context(context)
.build();
shapes.forEach(shape -> {
if (generatedDocumentBodyShapeDeserializers.contains(shape.toShapeId())) {
return;
}
shape.accept(visitor);
generatedDocumentBodyShapeDeserializers.add(shape.toShapeId());
});
}
@Override
protected void deserializeError(GenerationContext context, StructureShape shape) {
GoWriter writer = context.getWriter().get();
Symbol symbol = context.getSymbolProvider().toSymbol(shape);
String functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(shape, context.getService(),
getProtocolName());
initializeJsonDecoder(writer, "errorBody");
AwsProtocolUtils.decodeJsonIntoInterface(writer, "");
writer.write("output := &$T{}", symbol);
writer.write("err := $L(&output, shape)", functionName);
writer.write("");
handleDecodeError(writer);
writer.write("errorBody.Seek(0, io.SeekStart)");
writer.write("return output");
}
@Override
public void generateProtocolTests(GenerationContext context) {
AwsProtocolUtils.generateHttpProtocolTests(context);
}
@Override
protected void writeErrorMessageCodeDeserializer(GenerationContext context) {
writeJsonErrorMessageCodeDeserializer(context);
}
@Override
public void generateProtocolDocumentMarshalerUnmarshalDocument(GenerationContext context) {
JsonProtocolDocumentUtils.generateProtocolDocumentMarshalerUnmarshalDocument(context);
}
@Override
public void generateProtocolDocumentMarshalerMarshalDocument(GenerationContext context) {
JsonProtocolDocumentUtils.generateProtocolDocumentMarshalerMarshalDocument(context);
}
@Override
public void generateProtocolDocumentUnmarshalerUnmarshalDocument(GenerationContext context) {
JsonProtocolDocumentUtils.generateProtocolDocumentUnmarshalerUnmarshalDocument(context);
}
@Override
public void generateProtocolDocumentUnmarshalerMarshalDocument(GenerationContext context) {
JsonProtocolDocumentUtils.generateProtocolDocumentUnmarshalerMarshalDocument(context);
}
@Override
public void generateEventStreamComponents(GenerationContext context) {
AwsEventStreamUtils.generateEventStreamComponents(context);
}
@Override
protected void writeOperationSerializerMiddlewareEventStreamSetup(GenerationContext context, EventStreamInfo info) {
AwsEventStreamUtils.writeOperationSerializerMiddlewareEventStreamSetup(context, info,
"httpBindingEncoder");
}
@Override
protected void generateEventStreamSerializers(
GenerationContext context,
UnionShape eventUnion,
Set<EventStreamInfo> eventStreamInfos) {
Model model = context.getModel();
AwsEventStreamUtils.generateEventStreamSerializer(context, eventUnion);
var memberShapes = eventUnion.members().stream()
.filter(ms -> ms.getMemberTrait(model, ErrorTrait.class).isEmpty())
.collect(Collectors.toCollection(TreeSet::new));
final var eventDocumentShapes = new TreeSet<Shape>();
for (MemberShape member : memberShapes) {
var targetShape = model.expectShape(member.getTarget());
if (generatedEventMessageSerializers.contains(targetShape.toShapeId())) {
continue;
}
AwsEventStreamUtils.generateEventMessageSerializer(context, targetShape, (ctx, payloadTarget, operand) -> {
var functionName = ProtocolGenerator.getDocumentSerializerFunctionName(payloadTarget,
ctx.getService(), ctx.getProtocolName());
AwsProtocolUtils.writeJsonEventMessageSerializerDelegator(ctx, functionName, operand,
getDocumentContentType());
});
generatedEventMessageSerializers.add(targetShape.toShapeId());
var hasBindings = targetShape.members().stream()
.filter(ms -> ms.getTrait(EventHeaderTrait.class).isPresent()
|| ms.getTrait(EventPayloadTrait.class).isPresent())
.findAny();
if (hasBindings.isPresent()) {
var payload = targetShape.members().stream()
.filter(ms -> ms.getTrait(EventPayloadTrait.class).isPresent())
.map(ms -> model.expectShape(ms.getTarget()))
.filter(ProtocolUtils::requiresDocumentSerdeFunction)
.findAny();
payload.ifPresent(eventDocumentShapes::add);
continue;
}
eventDocumentShapes.add(targetShape);
}
eventDocumentShapes.addAll(ProtocolUtils.resolveRequiredDocumentShapeSerde(model, eventDocumentShapes));
generateDocumentBodyShapeSerializers(context, eventDocumentShapes);
for (EventStreamInfo streamInfo : eventStreamInfos) {
var inputShape = model.expectShape(streamInfo.getOperation().getInput().get());
var functionName = ProtocolGenerator.getDocumentSerializerFunctionName(inputShape,
context.getService(), context.getProtocolName());
AwsEventStreamUtils.generateEventMessageRequestSerializer(context, inputShape,
(ctx, payloadTarget, operand) -> {
AwsProtocolUtils.writeJsonEventMessageSerializerDelegator(ctx, functionName, operand,
getDocumentContentType());
});
var initialMessageMembers = streamInfo.getInitialMessageMembers()
.values();
inputShape.accept(JsonShapeSerVisitor.builder()
.context(context)
.memberFilter(initialMessageMembers::contains)
.serializerNameProvider((shape, serviceShape, proto) -> functionName)
.build());
}
}
@Override
protected void generateEventStreamDeserializers(
GenerationContext context,
UnionShape eventUnion,
Set<EventStreamInfo> eventStreamInfos) {
var model = context.getModel();
AwsEventStreamUtils.generateEventStreamDeserializer(context, eventUnion);
AwsEventStreamUtils.generateEventStreamExceptionDeserializer(context, eventUnion,
AwsProtocolUtils::writeJsonEventStreamUnknownExceptionDeserializer);
final var eventDocumentShapes = new TreeSet<Shape>();
for (MemberShape shape : eventUnion.members()) {
var targetShape = model.expectShape(shape.getTarget());
if (generatedEventMessageDeserializers.contains(targetShape.toShapeId())) {
continue;
}
generatedEventMessageDeserializers.add(targetShape.toShapeId());
if (shape.getMemberTrait(model, ErrorTrait.class).isPresent()) {
AwsEventStreamUtils.generateEventMessageExceptionDeserializer(context, targetShape,
(ctx, payloadTarget) -> {
AwsProtocolUtils.initializeJsonEventMessageDeserializer(ctx);
var functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(
payloadTarget, ctx.getService(), getProtocolName());
var ctxWriter = ctx.getWriter().get();
ctxWriter.write("v := &$T{}", ctx.getSymbolProvider().toSymbol(payloadTarget))
.openBlock("if err := $L(&v, shape); err != nil {", "}", functionName,
() -> handleDecodeError(ctxWriter))
.write("return v");
});
eventDocumentShapes.add(targetShape);
} else {
AwsEventStreamUtils.generateEventMessageDeserializer(context, targetShape,
(ctx, payloadTarget, operand) -> {
AwsProtocolUtils.initializeJsonEventMessageDeserializer(ctx);
var functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(
payloadTarget, ctx.getService(), getProtocolName());
var ctxWriter = ctx.getWriter().get();
ctxWriter.openBlock("if err := $L(&$L, shape); err != nil {", "}", functionName, operand,
() -> handleDecodeError(ctxWriter))
.write("return nil");
});
var hasBindings = targetShape.members().stream()
.filter(ms -> ms.getTrait(EventHeaderTrait.class).isPresent()
|| ms.getTrait(EventPayloadTrait.class).isPresent())
.findAny();
if (hasBindings.isPresent()) {
var payload = targetShape.members().stream()
.filter(ms -> ms.getTrait(EventPayloadTrait.class).isPresent())
.map(ms -> model.expectShape(ms.getTarget()))
.filter(ProtocolUtils::requiresDocumentSerdeFunction)
.findAny();
payload.ifPresent(eventDocumentShapes::add);
continue;
}
eventDocumentShapes.add(targetShape);
}
}
eventDocumentShapes.addAll(ProtocolUtils.resolveRequiredDocumentShapeSerde(model, eventDocumentShapes));
generateDocumentBodyShapeDeserializers(context, eventDocumentShapes);
for (EventStreamInfo streamInfo : eventStreamInfos) {
var outputShape = model.expectShape(streamInfo.getOperation().getOutput().get());
var functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(outputShape,
context.getService(), context.getProtocolName());
AwsEventStreamUtils.generateEventMessageRequestDeserializer(context, outputShape,
(ctx, payloadTarget, operand) -> {
AwsProtocolUtils.initializeJsonEventMessageDeserializer(ctx, "nil,");
var ctxWriter = ctx.getWriter().get();
ctxWriter.openBlock("if err := $L(&$L, shape); err != nil {", "}", functionName, operand,
() -> handleDecodeError(ctxWriter, "nil,"))
.write("return v, nil");
});
var initialMessageMembers = streamInfo.getInitialMessageMembers()
.values();
outputShape.accept(JsonShapeDeserVisitor.builder()
.context(context)
.memberFilter(initialMessageMembers::contains)
.deserializerNameProvider((shape, serviceShape, proto) -> functionName)
.build());
}
}
@Override
public void generateEndpointResolution(GenerationContext context) {
var generator = new EndpointResolutionGenerator(new AwsFnProvider());
generator.generate(context);
}
}
| 8,289 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsSdkServiceId.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import software.amazon.smithy.aws.traits.ServiceTrait;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.shapes.ServiceShape;
public class AwsSdkServiceId implements GoIntegration {
@Override
public byte getOrder() {
return 127;
}
@Override
public String processServiceId(GoSettings settings, Model model, String serviceId) {
ServiceShape serviceShape = settings.getService(model);
return serviceShape.expectTrait(ServiceTrait.class).getSdkId();
}
}
| 8,290 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/RequestResponseLogging.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import java.util.List;
import software.amazon.smithy.codegen.core.Symbol;
import software.amazon.smithy.codegen.core.SymbolProvider;
import software.amazon.smithy.go.codegen.GoDelegator;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.SymbolUtils;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.go.codegen.integration.MiddlewareRegistrar;
import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.utils.ListUtils;
public class RequestResponseLogging implements GoIntegration {
private final static String MIDDLEWARE_HELPER = "addRequestResponseLogging";
@Override
public byte getOrder() {
return 127;
}
@Override
public void writeAdditionalFiles(
GoSettings settings,
Model model,
SymbolProvider symbolProvider,
GoDelegator goDelegator
) {
goDelegator.useShapeWriter(settings.getService(model), writer -> {
Symbol stackSymbol = SymbolUtils.createPointableSymbolBuilder("Stack", SmithyGoDependency.SMITHY_MIDDLEWARE)
.build();
Symbol middlewareSymbol = SymbolUtils.createValueSymbolBuilder("RequestResponseLogger",
SmithyGoDependency.SMITHY_HTTP_TRANSPORT).build();
writer.openBlock("func $L(stack $P, o Options) error {", "}", MIDDLEWARE_HELPER, stackSymbol, () -> {
writer.openBlock("return stack.Deserialize.Add(&$T{", "}, middleware.After)", middlewareSymbol, () -> {
writer.write("LogRequest: o.$L.IsRequest(),", AddAwsConfigFields.LOG_MODE_CONFIG_NAME);
writer.write("LogRequestWithBody: o.$L.IsRequestWithBody(),",
AddAwsConfigFields.LOG_MODE_CONFIG_NAME);
writer.write("LogResponse: o.$L.IsResponse(),", AddAwsConfigFields.LOG_MODE_CONFIG_NAME);
writer.write("LogResponseWithBody: o.$L.IsResponseWithBody(),",
AddAwsConfigFields.LOG_MODE_CONFIG_NAME);
});
});
});
}
@Override
public List<RuntimeClientPlugin> getClientPlugins() {
return ListUtils.of(RuntimeClientPlugin.builder()
.registerMiddleware(MiddlewareRegistrar.builder()
.resolvedFunction(SymbolUtils.createValueSymbolBuilder(MIDDLEWARE_HELPER).build())
.useClientOptions()
.build())
.build());
}
}
| 8,291 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/XmlMemberDeserVisitor.java | package software.amazon.smithy.aws.go.codegen;
import software.amazon.smithy.codegen.core.CodegenException;
import software.amazon.smithy.codegen.core.Symbol;
import software.amazon.smithy.go.codegen.CodegenUtils;
import software.amazon.smithy.go.codegen.GoWriter;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.integration.ProtocolGenerator;
import software.amazon.smithy.go.codegen.integration.ProtocolGenerator.GenerationContext;
import software.amazon.smithy.go.codegen.integration.ProtocolUtils;
import software.amazon.smithy.go.codegen.knowledge.GoPointableIndex;
import software.amazon.smithy.model.shapes.BigDecimalShape;
import software.amazon.smithy.model.shapes.BigIntegerShape;
import software.amazon.smithy.model.shapes.BlobShape;
import software.amazon.smithy.model.shapes.BooleanShape;
import software.amazon.smithy.model.shapes.ByteShape;
import software.amazon.smithy.model.shapes.CollectionShape;
import software.amazon.smithy.model.shapes.DocumentShape;
import software.amazon.smithy.model.shapes.DoubleShape;
import software.amazon.smithy.model.shapes.FloatShape;
import software.amazon.smithy.model.shapes.IntegerShape;
import software.amazon.smithy.model.shapes.ListShape;
import software.amazon.smithy.model.shapes.LongShape;
import software.amazon.smithy.model.shapes.MapShape;
import software.amazon.smithy.model.shapes.MemberShape;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.ResourceShape;
import software.amazon.smithy.model.shapes.ServiceShape;
import software.amazon.smithy.model.shapes.SetShape;
import software.amazon.smithy.model.shapes.Shape;
import software.amazon.smithy.model.shapes.ShapeVisitor;
import software.amazon.smithy.model.shapes.ShortShape;
import software.amazon.smithy.model.shapes.StringShape;
import software.amazon.smithy.model.shapes.StructureShape;
import software.amazon.smithy.model.shapes.TimestampShape;
import software.amazon.smithy.model.shapes.UnionShape;
import software.amazon.smithy.model.traits.EnumTrait;
import software.amazon.smithy.model.traits.TimestampFormatTrait.Format;
import software.amazon.smithy.model.traits.XmlFlattenedTrait;
/**
* Visitor to generate member values for aggregate types deserialized from documents.
*/
public class XmlMemberDeserVisitor implements ShapeVisitor<Void> {
private final GenerationContext context;
private final MemberShape member;
private final String dataDest;
private final Format timestampFormat;
private final GoPointableIndex pointableIndex;
// isXmlAttributeMember indicates if member is deserialized from the xml start elements attribute value.
private final boolean isXmlAttributeMember;
private final boolean isFlattened;
public XmlMemberDeserVisitor(
GenerationContext context,
MemberShape member,
String dataDest,
Format timestampFormat,
boolean isXmlAttributeMember
) {
this.context = context;
this.member = member;
this.dataDest = dataDest;
this.timestampFormat = timestampFormat;
this.isXmlAttributeMember = isXmlAttributeMember;
this.isFlattened = member.hasTrait(XmlFlattenedTrait.ID);
this.pointableIndex = GoPointableIndex.of(context.getModel());
}
@Override
public Void blobShape(BlobShape shape) {
GoWriter writer = context.getWriter().get();
writer.write("var data string");
handleString(shape, () -> writer.write("data = xtv"));
writer.addUseImports(SmithyGoDependency.BASE64);
writer.write("$L, err = base64.StdEncoding.DecodeString(data)", dataDest);
writer.write("if err != nil { return err }");
return null;
}
@Override
public Void booleanShape(BooleanShape shape) {
GoWriter writer = context.getWriter().get();
writer.addUseImports(SmithyGoDependency.FMT);
consumeToken(shape);
writer.openBlock("{", "}", () -> {
writer.addUseImports(SmithyGoDependency.STRCONV);
writer.write("xtv, err := strconv.ParseBool(string(val))");
writer.openBlock("if err != nil {", "}", () -> {
writer.write("return fmt.Errorf(\"expected $L to be of type *bool, got %T instead\", val)",
shape.getId().getName());
});
writer.write("$L = $L", dataDest, CodegenUtils.getAsPointerIfPointable(context.getModel(),
context.getWriter().get(), pointableIndex, member, "xtv"));
});
return null;
}
/**
* Consumes a single token into the variable "val", returning on any error.
* If member is an xmlAttributeMember, "attr" representing xml attribute value is in scope.
*/
private void consumeToken(Shape shape) {
GoWriter writer = context.getWriter().get();
// if the member is a modeled as an xml attribute, we do not need to
// get another token, instead use the attribute values from previously
// decoded start element.
if (isXmlAttributeMember) {
writer.write("val := []byte(attr.Value)");
return;
}
writer.write("val, err := decoder.Value()");
writer.write("if err != nil { return err }");
writer.write("if val == nil { break }");
}
@Override
public Void byteShape(ByteShape shape) {
// Smithy's byte shape represents a signed 8-bit int, which doesn't line up with Go's unsigned byte
handleInteger(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "int8(i64)"));
return null;
}
@Override
public Void shortShape(ShortShape shape) {
handleInteger(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "int16(i64)"));
return null;
}
@Override
public Void integerShape(IntegerShape shape) {
handleInteger(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "int32(i64)"));
return null;
}
@Override
public Void longShape(LongShape shape) {
handleInteger(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "i64"));
return null;
}
/**
* Deserializes a string representing number without a fractional value.
* The 64-bit integer representation of the number is stored in the variable {@code i64}.
*
* @param shape The shape being deserialized.
* @param cast A wrapping of {@code i64} to cast it to the proper type.
*/
private void handleInteger(Shape shape, String cast) {
GoWriter writer = context.getWriter().get();
handleNumber(shape, () -> {
writer.addUseImports(SmithyGoDependency.STRCONV);
writer.write("i64, err := strconv.ParseInt(xtv, 10, 64)");
writer.write("if err != nil { return err }");
writer.write("$L = $L", dataDest, cast);
});
}
/**
* Deserializes a xml number string into a xml token.
* The number token is stored under the variable {@code xtv}.
*
* @param shape The shape being deserialized.
* @param r A runnable that runs after the value has been parsed, before the scope closes.
*/
private void handleNumber(Shape shape, Runnable r) {
GoWriter writer = context.getWriter().get();
writer.addUseImports(SmithyGoDependency.FMT);
consumeToken(shape);
writer.openBlock("{", "}", () -> {
writer.write("xtv := string(val)");
r.run();
});
}
@Override
public Void floatShape(FloatShape shape) {
handleFloat(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "float32(f64)"));
return null;
}
@Override
public Void doubleShape(DoubleShape shape) {
handleFloat(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), context.getWriter().get(),
pointableIndex, member, "f64"));
return null;
}
/**
* Deserializes a string representing number with a fractional value.
* The 64-bit float representation of the number is stored in the variable {@code f64}.
*
* @param shape The shape being deserialized.
* @param cast A wrapping of {@code f64} to cast it to the proper type.
*/
private void handleFloat(Shape shape, String cast) {
GoWriter writer = context.getWriter().get();
handleNumber(shape, () -> {
writer.write("f64, err := strconv.ParseFloat(xtv, 64)");
writer.write("if err != nil { return err }");
writer.write("$L = $L", dataDest, cast);
});
}
@Override
public Void stringShape(StringShape shape) {
GoWriter writer = context.getWriter().get();
Symbol symbol = context.getSymbolProvider().toSymbol(shape);
if (shape.hasTrait(EnumTrait.class)) {
handleString(shape, () -> writer.write("$L = $P(xtv)", dataDest, symbol));
} else {
handleString(shape, () -> writer.write("$L = $L", dataDest, CodegenUtils.getAsPointerIfPointable(
context.getModel(), context.getWriter().get(), pointableIndex, member, "xtv")));
}
return null;
}
/**
* Deserializes a xml string into a xml token.
* The number token is stored under the variable {@code xtv}.
*
* @param shape The shape being deserialized.
* @param r A runnable that runs after the value has been parsed, before the scope closes.
*/
private void handleString(Shape shape, Runnable r) {
GoWriter writer = context.getWriter().get();
writer.addUseImports(SmithyGoDependency.FMT);
consumeToken(shape);
writer.openBlock("{", "}", () -> {
writer.write("xtv := string(val)");
r.run();
});
}
@Override
public Void timestampShape(TimestampShape shape) {
GoWriter writer = context.getWriter().get();
writer.addUseImports(SmithyGoDependency.SMITHY_TIME);
switch (timestampFormat) {
case DATE_TIME:
handleString(shape, () -> {
writer.write("t, err := smithytime.ParseDateTime(xtv)");
writer.write("if err != nil { return err }");
writer.write("$L = $L", dataDest, CodegenUtils.getAsPointerIfPointable(context.getModel(),
context.getWriter().get(), pointableIndex, member, "t"));
});
break;
case HTTP_DATE:
handleString(shape, () -> {
writer.write("t, err := smithytime.ParseHTTPDate(xtv)");
writer.write("if err != nil { return err }");
writer.write("$L = $L", dataDest, CodegenUtils.getAsPointerIfPointable(context.getModel(),
context.getWriter().get(), pointableIndex, member, "t"));
});
break;
case EPOCH_SECONDS:
writer.addUseImports(SmithyGoDependency.SMITHY_PTR);
handleFloat(shape, CodegenUtils.getAsPointerIfPointable(context.getModel(), writer,
pointableIndex, member, "smithytime.ParseEpochSeconds(f64)"));
break;
default:
throw new CodegenException(String.format("Unknown timestamp format %s", timestampFormat));
}
return null;
}
@Override
public Void bigIntegerShape(BigIntegerShape shape) {
// Fail instead of losing precision through Number.
unsupportedShape(shape);
return null;
}
@Override
public Void bigDecimalShape(BigDecimalShape shape) {
// Fail instead of losing precision through Number.
unsupportedShape(shape);
return null;
}
private void unsupportedShape(Shape shape) {
throw new CodegenException(String.format("Cannot deserialize shape type %s on protocol, shape: %s.",
shape.getType(), shape.getId()));
}
@Override
public Void operationShape(OperationShape shape) {
throw new CodegenException("Operation shapes cannot be bound to documents.");
}
@Override
public Void resourceShape(ResourceShape shape) {
throw new CodegenException("Resource shapes cannot be bound to documents.");
}
@Override
public Void serviceShape(ServiceShape shape) {
throw new CodegenException("Service shapes cannot be bound to documents.");
}
@Override
public Void memberShape(MemberShape shape) {
throw new CodegenException("Member shapes cannot be bound to documents.");
}
@Override
public Void documentShape(DocumentShape shape) {
writeDelegateFunction(shape);
return null;
}
@Override
public Void structureShape(StructureShape shape) {
writeDelegateFunction(shape);
return null;
}
@Override
public Void unionShape(UnionShape shape) {
writeDelegateFunction(shape);
return null;
}
@Override
public Void listShape(ListShape shape) {
return collectionShape(shape);
}
@Override
public Void setShape(SetShape shape) {
return collectionShape(shape);
}
private Void collectionShape(CollectionShape shape) {
if (isFlattened) {
writeUnwrappedDelegateFunction(shape);
} else {
writeDelegateFunction(shape);
}
return null;
}
@Override
public Void mapShape(MapShape shape) {
if (isFlattened) {
writeUnwrappedDelegateFunction(shape);
} else {
writeDelegateFunction(shape);
}
return null;
}
private void writeDelegateFunction(Shape shape) {
String functionName = ProtocolGenerator.getDocumentDeserializerFunctionName(shape, context.getService(), context.getProtocolName());
GoWriter writer = context.getWriter().get();
writer.write("nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)");
ProtocolUtils.writeDeserDelegateFunction(context, writer, member, dataDest, (destVar) -> {
writer.openBlock("if err := $L(&$L, nodeDecoder); err != nil {", "}", functionName, destVar, () -> {
writer.write("return err");
});
});
}
private String getUnwrappedDelegateFunctionName(Shape shape) {
return ProtocolGenerator.getDocumentDeserializerFunctionName(shape, context.getService(), context.getProtocolName()) + "Unwrapped";
}
private void writeUnwrappedDelegateFunction(Shape shape) {
final String functionName = getUnwrappedDelegateFunctionName(shape);
final GoWriter writer = context.getWriter().get();
writer.write("nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)");
ProtocolUtils.writeDeserDelegateFunction(context, writer, member, dataDest, (destVar) -> {
writer.openBlock("if err := $L(&$L, nodeDecoder); err != nil {", "}", functionName, destVar, () -> {
writer.write("return err");
});
});
}
}
| 8,292 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsProtocolUtils.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import java.util.Set;
import java.util.TreeSet;
import software.amazon.smithy.aws.go.codegen.customization.AwsCustomGoDependency;
import software.amazon.smithy.codegen.core.Symbol;
import software.amazon.smithy.go.codegen.GoWriter;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.SymbolUtils;
import software.amazon.smithy.go.codegen.integration.HttpProtocolTestGenerator;
import software.amazon.smithy.go.codegen.integration.HttpProtocolUnitTestGenerator;
import software.amazon.smithy.go.codegen.integration.HttpProtocolUnitTestGenerator.ConfigValue;
import software.amazon.smithy.go.codegen.integration.HttpProtocolUnitTestRequestGenerator;
import software.amazon.smithy.go.codegen.integration.HttpProtocolUnitTestResponseErrorGenerator;
import software.amazon.smithy.go.codegen.integration.HttpProtocolUnitTestResponseGenerator;
import software.amazon.smithy.go.codegen.integration.IdempotencyTokenMiddlewareGenerator;
import software.amazon.smithy.go.codegen.integration.ProtocolGenerator.GenerationContext;
import software.amazon.smithy.model.shapes.Shape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.utils.SetUtils;
/**
* Utility methods for generating AWS protocols.
*/
final class AwsProtocolUtils {
private AwsProtocolUtils() {
}
/**
* Generates HTTP protocol tests with all required AWS-specific configuration set.
*
* @param context The generation context.
*/
static void generateHttpProtocolTests(GenerationContext context) {
Set<HttpProtocolUnitTestGenerator.ConfigValue> configValues = new TreeSet<>(SetUtils.of(
HttpProtocolUnitTestGenerator.ConfigValue.builder()
.name(AddAwsConfigFields.REGION_CONFIG_NAME)
.value(writer -> writer.write("$S,", "us-west-2"))
.build(),
HttpProtocolUnitTestGenerator.ConfigValue.builder()
.name(AddAwsConfigFields.ENDPOINT_RESOLVER_CONFIG_NAME)
.value(writer -> {
writer.addUseImports(AwsGoDependency.AWS_CORE);
writer.openBlock("$L(func(region string, options $L) (e aws.Endpoint, err error) {", "}),",
EndpointGenerator.RESOLVER_FUNC_NAME, EndpointGenerator.RESOLVER_OPTIONS, () -> {
writer.write("e.URL = serverURL");
writer.write("e.SigningRegion = \"us-west-2\"");
writer.write("return e, err");
});
})
.build(),
HttpProtocolUnitTestGenerator.ConfigValue.builder()
.name("APIOptions")
.value(writer -> {
Symbol stackSymbol = SymbolUtils.createPointableSymbolBuilder("Stack",
SmithyGoDependency.SMITHY_MIDDLEWARE).build();
writer.openBlock("[]func($P) error{", "},", stackSymbol, () -> {
writer.openBlock("func(s $P) error {", "},", stackSymbol, () -> {
writer.write("s.Finalize.Clear()");
writer.write("s.Initialize.Remove(`OperationInputValidation`)");
writer.write("return nil");
});
});
})
.build()
));
// TODO can this check be replaced with a lookup into the runtime plugins?
if (IdempotencyTokenMiddlewareGenerator.hasOperationsWithIdempotencyToken(context.getModel(),
context.getService())) {
configValues.add(
HttpProtocolUnitTestGenerator.ConfigValue.builder()
.name(IdempotencyTokenMiddlewareGenerator.IDEMPOTENCY_CONFIG_NAME)
.value(writer -> {
writer.addUseImports(SmithyGoDependency.SMITHY_RAND);
writer.addUseImports(SmithyGoDependency.SMITHY_TESTING);
writer.write("smithyrand.NewUUIDIdempotencyToken(&smithytesting.ByteLoop{}),");
})
.build()
);
}
Set<ConfigValue> inputConfigValues = new TreeSet<>(configValues);
inputConfigValues.add(HttpProtocolUnitTestGenerator.ConfigValue.builder()
.name(AddAwsConfigFields.HTTP_CLIENT_CONFIG_NAME)
.value(writer -> {
writer.addUseImports(AwsGoDependency.AWS_PROTOCOL_TEST_HTTP_CLIENT);
writer.write("protocoltesthttp.NewClient(),");
})
.build());
// skip request compression tests, not yet implemented in the SDK
Set<HttpProtocolUnitTestGenerator.SkipTest> inputSkipTests = new TreeSet<>(SetUtils.of(
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.json10#JsonRpc10"))
.operation(ShapeId.from("aws.protocoltests.json10#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_awsJson1_0")
.addTestName("SDKAppendsGzipAndIgnoresHttpProvidedEncoding_awsJson1_0")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.json#JsonProtocol"))
.operation(ShapeId.from("aws.protocoltests.json#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_awsJson1_1")
.addTestName("SDKAppendsGzipAndIgnoresHttpProvidedEncoding_awsJson1_1")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.query#AwsQuery"))
.operation(ShapeId.from("aws.protocoltests.query#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_awsQuery")
.addTestName("SDKAppendsGzipAndIgnoresHttpProvidedEncoding_awsQuery")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.ec2#AwsEc2"))
.operation(ShapeId.from("aws.protocoltests.ec2#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_ec2Query")
.addTestName("SDKAppendsGzipAndIgnoresHttpProvidedEncoding_ec2Query")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restjson#RestJson"))
.operation(ShapeId.from("aws.protocoltests.restjson#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_restJson1")
.addTestName("SDKAppendedGzipAfterProvidedEncoding_restJson1")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restxml#RestXml"))
.operation(ShapeId.from("aws.protocoltests.restxml#PutWithContentEncoding"))
.addTestName("SDKAppliedContentEncoding_restXml")
.addTestName("SDKAppendedGzipAfterProvidedEncoding_restXml")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restxml#RestXml"))
.operation(ShapeId.from("aws.protocoltests.restxml#HttpPayloadWithUnion"))
.addTestName("RestXmlHttpPayloadWithUnion")
.addTestName("RestXmlHttpPayloadWithUnsetUnion")
.build()
));
Set<HttpProtocolUnitTestGenerator.SkipTest> outputSkipTests = new TreeSet<>(SetUtils.of(
// REST-JSON optional (SHOULD) test cases
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restjson#RestJson"))
.operation(ShapeId.from("aws.protocoltests.restjson#JsonMaps"))
.addTestName("RestJsonDeserializesDenseSetMapAndSkipsNull")
.build(),
// REST-XML opinionated test - prefix headers as empty vs nil map
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restxml#RestXml"))
.operation(ShapeId.from("aws.protocoltests.restxml#HttpPrefixHeaders"))
.addTestName("HttpPrefixHeadersAreNotPresent")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.restjson#RestJson"))
.operation(ShapeId.from("aws.protocoltests.restjson#JsonUnions"))
.addTestName("RestJsonDeserializeIgnoreType")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.json10#JsonRpc10"))
.operation(ShapeId.from("aws.protocoltests.json10#JsonUnions"))
.addTestName("AwsJson10DeserializeIgnoreType")
.build(),
HttpProtocolUnitTestGenerator.SkipTest.builder()
.service(ShapeId.from("aws.protocoltests.json#JsonProtocol"))
.operation(ShapeId.from("aws.protocoltests.json#JsonUnions"))
.addTestName("AwsJson11DeserializeIgnoreType")
.build()
));
new HttpProtocolTestGenerator(context,
(HttpProtocolUnitTestRequestGenerator.Builder) new HttpProtocolUnitTestRequestGenerator
.Builder()
.settings(context.getSettings())
.addSkipTests(inputSkipTests)
.addClientConfigValues(inputConfigValues),
(HttpProtocolUnitTestResponseGenerator.Builder) new HttpProtocolUnitTestResponseGenerator
.Builder()
.settings(context.getSettings())
.addSkipTests(outputSkipTests)
.addClientConfigValues(configValues),
(HttpProtocolUnitTestResponseErrorGenerator.Builder) new HttpProtocolUnitTestResponseErrorGenerator
.Builder()
.settings(context.getSettings())
.addClientConfigValues(configValues)
).generateProtocolTests();
}
public static void writeJsonErrorMessageCodeDeserializer(GenerationContext context) {
GoWriter writer = context.getWriter().get();
// The error code could be in the headers, even though for this protocol it should be in the body.
writer.write("headerCode := response.Header.Get(\"X-Amzn-ErrorType\")");
writer.write("if len(headerCode) != 0 { errorCode = restjson.SanitizeErrorCode(headerCode) }");
writer.write("");
initializeJsonDecoder(writer, "errorBody");
writer.addUseImports(AwsGoDependency.AWS_REST_JSON_PROTOCOL);
// This will check various body locations for the error code and error message
writer.write("jsonCode, message, err := restjson.GetErrorInfo(decoder)");
handleDecodeError(writer);
writer.addUseImports(SmithyGoDependency.IO);
// Reset the body in case it needs to be used for anything else.
writer.write("errorBody.Seek(0, io.SeekStart)");
// Only set the values if something was found so that we keep the default values.
// The header version of the error wins out over either of the body fields.
writer.write("if len(headerCode) == 0 && len(jsonCode) != 0 { errorCode = restjson.SanitizeErrorCode(jsonCode) }");
writer.write("if len(message) != 0 { errorMessage = message }");
writer.write("");
}
public static void initializeJsonDecoder(GoWriter writer, String bodyLocation) {
// Use a ring buffer and tee reader to help in pinpointing any deserialization errors.
writer.addUseImports(SmithyGoDependency.SMITHY_IO);
writer.write("var buff [1024]byte");
writer.write("ringBuffer := smithyio.NewRingBuffer(buff[:])");
writer.write("");
writer.addUseImports(SmithyGoDependency.IO);
writer.addUseImports(SmithyGoDependency.JSON);
writer.write("body := io.TeeReader($L, ringBuffer)", bodyLocation);
writer.write("decoder := json.NewDecoder(body)");
writer.write("decoder.UseNumber()");
}
/**
* Decodes JSON into {@code shape} with type {@code interface{}} using the encoding/json decoder
* referenced by {@code decoder}.
*
* @param writer GoWriter to write code to
* @param errorReturnExtras extra parameters to return if an error occurs
*/
public static void decodeJsonIntoInterface(GoWriter writer, String errorReturnExtras) {
writer.write("var shape interface{}");
writer.addUseImports(SmithyGoDependency.IO);
writer.openBlock("if err := decoder.Decode(&shape); err != nil && err != io.EOF {", "}", () -> {
wrapAsDeserializationError(writer);
writer.write("return $Lerr", errorReturnExtras);
});
writer.write("");
}
/**
* Wraps the Go error {@code err} in a {@code DeserializationError} with a snapshot
*
* @param writer
*/
private static void wrapAsDeserializationError(GoWriter writer) {
writer.write("var snapshot bytes.Buffer");
writer.write("io.Copy(&snapshot, ringBuffer)");
writer.openBlock("err = &smithy.DeserializationError {", "}", () -> {
writer.write("Err: fmt.Errorf(\"failed to decode response body, %w\", err),");
writer.write("Snapshot: snapshot.Bytes(),");
});
}
public static void handleDecodeError(GoWriter writer, String returnExtras) {
writer.openBlock("if err != nil {", "}", () -> {
writer.addUseImports(SmithyGoDependency.BYTES);
writer.addUseImports(SmithyGoDependency.SMITHY);
writer.addUseImports(SmithyGoDependency.IO);
wrapAsDeserializationError(writer);
writer.write("return $Lerr", returnExtras);
}).write("");
}
public static void handleDecodeError(GoWriter writer) {
handleDecodeError(writer, "");
}
public static void writeJsonEventMessageSerializerDelegator(
GenerationContext ctx,
String functionName,
String operand,
String contentType
) {
var writer = ctx.getWriter().get();
var stringValue = SymbolUtils.createValueSymbolBuilder("StringValue",
AwsGoDependency.SERVICE_INTERNAL_EVENTSTREAM).build();
var contentTypeHeader = SymbolUtils.createValueSymbolBuilder("ContentTypeHeader",
AwsGoDependency.SERVICE_INTERNAL_EVENTSTREAMAPI).build();
writer.write("msg.Headers.Set($T, $T($S))",
contentTypeHeader, stringValue, contentType);
var newEncoder = SymbolUtils.createValueSymbolBuilder("NewEncoder",
SmithyGoDependency.SMITHY_JSON).build();
writer.write("jsonEncoder := $T()", newEncoder)
.openBlock("if err := $L($L, jsonEncoder.Value); err != nil {", "}", functionName, operand,
() -> writer.write("return err"))
.write("msg.Payload = jsonEncoder.Bytes()")
.write("return nil");
}
public static void initializeJsonEventMessageDeserializer(GenerationContext ctx) {
initializeJsonEventMessageDeserializer(ctx, "");
}
public static void initializeJsonEventMessageDeserializer(GenerationContext ctx, String errorReturnExtras) {
var writer = ctx.getWriter().get();
writer.write("br := $T(msg.Payload)", SymbolUtils.createValueSymbolBuilder(
"NewReader", SmithyGoDependency.BYTES).build());
initializeJsonDecoder(writer, "br");
AwsProtocolUtils.decodeJsonIntoInterface(writer, errorReturnExtras);
}
public static void writeJsonEventStreamUnknownExceptionDeserializer(GenerationContext ctx) {
var writer = ctx.getWriter().get();
writer.write("br := $T(msg.Payload)", SymbolUtils.createValueSymbolBuilder("NewReader",
SmithyGoDependency.BYTES).build());
AwsProtocolUtils.initializeJsonDecoder(writer, "br");
writer.write("""
code, message, err := $T(decoder)
if err != nil {
return err
}
errorCode := "UnknownError"
errorMessage := errorCode
if ev := exceptionType.String(); len(ev) > 0 {
errorCode = ev
} else if ev := code; len(ev) > 0 {
errorCode = ev
}
if ev := message; len(ev) > 0 {
errorMessage = ev
}
return &$T{
Code: errorCode,
Message: errorMessage,
}
""",
SymbolUtils.createValueSymbolBuilder("GetErrorInfo",
AwsGoDependency.AWS_REST_JSON_PROTOCOL).build(),
SymbolUtils.createValueSymbolBuilder("GenericAPIError", SmithyGoDependency.SMITHY).build());
}
}
| 8,293 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsRestXml.java | package software.amazon.smithy.aws.go.codegen;
import software.amazon.smithy.aws.traits.protocols.RestXmlTrait;
import software.amazon.smithy.model.shapes.ShapeId;
/**
* Handles generating the aws.rest-xml protocol for services.
*
* @inheritDoc
*
* @see RestXmlProtocolGenerator
*/
public final class AwsRestXml extends RestXmlProtocolGenerator {
@Override
protected String getDocumentContentType() {
return "application/xml";
}
@Override
public ShapeId getProtocol() {
return RestXmlTrait.ID;
}
}
| 8,294 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsEventStreamIntegration.java | package software.amazon.smithy.aws.go.codegen;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import software.amazon.smithy.go.codegen.GoEventStreamIndex;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.SymbolUtils;
import software.amazon.smithy.go.codegen.integration.ConfigFieldResolver;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.go.codegen.integration.MiddlewareRegistrar;
import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.knowledge.EventStreamIndex;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.utils.ListUtils;
public class AwsEventStreamIntegration implements GoIntegration {
private final Map<ShapeId, Collection<OperationShape>> serviceOperationMap = new HashMap<>();
private final Map<ShapeId, Collection<OperationShape>> minHttp2 = new HashMap<>();
@Override
public byte getOrder() {
return -127;
}
@Override
public void processFinalizedModel(
GoSettings settings,
Model model
) {
var goEventStreamIndex = GoEventStreamIndex.of(model);
var service = settings.getService();
Collection<OperationShape> operationShapes = new HashSet<>();
goEventStreamIndex.getInputEventStreams(service).ifPresent(shapeIdSetMap ->
shapeIdSetMap.values().forEach(eventStreamInfos ->
eventStreamInfos.forEach(info -> operationShapes.add(info.getOperation()))));
goEventStreamIndex.getOutputEventStreams(service).ifPresent(shapeIdSetMap ->
shapeIdSetMap.values().forEach(eventStreamInfos ->
eventStreamInfos.forEach(info -> operationShapes.add(info.getOperation()))));
if (!operationShapes.isEmpty()) {
serviceOperationMap.put(service, operationShapes);
}
var biDirectional = new HashSet<OperationShape>();
var streamIndex = EventStreamIndex.of(model);
operationShapes.forEach(operationShape -> {
if (streamIndex.getInputInfo(operationShape).isPresent() && streamIndex.getOutputInfo(operationShape).isPresent()) {
biDirectional.add(operationShape);
}
});
if (!biDirectional.isEmpty()) {
minHttp2.put(service, biDirectional);
}
}
@Override
public List<RuntimeClientPlugin> getClientPlugins() {
final List<RuntimeClientPlugin> plugins = new ArrayList<>();
plugins.add(RuntimeClientPlugin.builder()
.servicePredicate((model, serviceShape) -> serviceOperationMap.containsKey(serviceShape.toShapeId()))
.addConfigFieldResolver(ConfigFieldResolver.builder()
.location(ConfigFieldResolver.Location.OPERATION)
.resolver(AwsEventStreamUtils.getEventStreamClientLogModeFinalizerSymbol())
.target(ConfigFieldResolver.Target.FINALIZATION)
.withOperationName(true)
.build())
.build());
serviceOperationMap.forEach((shapeId, operationShapes) -> operationShapes.forEach(operationShape ->
plugins.add(RuntimeClientPlugin.builder()
.operationPredicate((model, service, operation) ->
service.getId().equals(shapeId) && operation.equals(operationShape))
.registerMiddleware(MiddlewareRegistrar.builder()
.resolvedFunction(
AwsEventStreamUtils.getAddEventStreamOperationMiddlewareSymbol(
operationShape))
.useClientOptions()
.build())
.build())));
minHttp2.forEach((shapeId, operationShapes) -> operationShapes.forEach(operationShape ->
plugins.add(RuntimeClientPlugin.builder()
.operationPredicate((model, service, operation) ->
service.getId().equals(shapeId) && operation.equals(operationShape))
.registerMiddleware(MiddlewareRegistrar.builder()
.resolvedFunction(SymbolUtils.createValueSymbolBuilder(
"AddRequireMinimumProtocol",
SmithyGoDependency.SMITHY_HTTP_TRANSPORT).build())
.functionArguments(ListUtils.of(
SymbolUtils.createValueSymbolBuilder("2").build(),
SymbolUtils.createValueSymbolBuilder("0").build()))
.build())
.build())));
return plugins;
}
}
| 8,295 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsClientUserAgent.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import software.amazon.smithy.aws.traits.ServiceTrait;
import software.amazon.smithy.codegen.core.SymbolProvider;
import software.amazon.smithy.go.codegen.CodegenUtils;
import software.amazon.smithy.go.codegen.GoDelegator;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.SymbolUtils;
import software.amazon.smithy.go.codegen.integration.ConfigField;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.go.codegen.integration.MiddlewareRegistrar;
import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.utils.ListUtils;
import java.util.List;
public class AwsClientUserAgent implements GoIntegration {
public static final String MIDDLEWARE_RESOLVER = "addClientUserAgent";
public static final String SDK_UA_APP_ID = "AppID";
@Override
public byte getOrder() {
return -49;
}
@Override
public void writeAdditionalFiles(
GoSettings settings,
Model model,
SymbolProvider symbolProvider,
GoDelegator goDelegator
) {
ServiceTrait serviceTrait = settings.getService(model).expectTrait(ServiceTrait.class);
String serviceId = serviceTrait.getSdkId().replace("-", "").replace(" ", "").toLowerCase();
goDelegator.useShapeWriter(settings.getService(model), writer -> {
writer.openBlock("func $L(stack $P, options Options) error {", "}", MIDDLEWARE_RESOLVER, SymbolUtils.createPointableSymbolBuilder("Stack",
SmithyGoDependency.SMITHY_MIDDLEWARE).build(), () -> {
writer.write("if err := $T($T, $S, $T)(stack); err != nil { return err }",
SymbolUtils.createValueSymbolBuilder("AddSDKAgentKeyValue", AwsGoDependency.AWS_MIDDLEWARE)
.build(),
SymbolUtils.createValueSymbolBuilder("APIMetadata",
AwsGoDependency.AWS_MIDDLEWARE).build(),
serviceId,
SymbolUtils.createValueSymbolBuilder("goModuleVersion").build()
);
writer.write("");
writer.openBlock("if len(options.AppID) > 0 {", "}", () -> {
writer.write("return $T($T, options.AppID)(stack)",
SymbolUtils.createValueSymbolBuilder("AddSDKAgentKey", AwsGoDependency.AWS_MIDDLEWARE)
.build(),
SymbolUtils.createValueSymbolBuilder("ApplicationIdentifier",
AwsGoDependency.AWS_MIDDLEWARE).build()
);
});
writer.write("");
writer.write("return nil");
});
});
}
@Override
public List<RuntimeClientPlugin> getClientPlugins() {
return ListUtils.of(
RuntimeClientPlugin.builder()
.configFields(ListUtils.of(
ConfigField.builder()
.name(SDK_UA_APP_ID)
.type(SymbolUtils.createValueSymbolBuilder("string")
.putProperty(SymbolUtils.GO_UNIVERSE_TYPE, true)
.build())
.documentation("The optional application specific identifier appended to the User-Agent header.")
.build()
))
.build()
);
}
}
| 8,296 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/Ec2Query.java | package software.amazon.smithy.aws.go.codegen;
import java.util.Set;
import java.util.function.Consumer;
import software.amazon.smithy.aws.traits.protocols.Ec2QueryTrait;
import software.amazon.smithy.codegen.core.Symbol;
import software.amazon.smithy.go.codegen.GoWriter;
import software.amazon.smithy.go.codegen.SmithyGoDependency;
import software.amazon.smithy.go.codegen.SymbolUtils;
import software.amazon.smithy.model.shapes.OperationShape;
import software.amazon.smithy.model.shapes.Shape;
import software.amazon.smithy.model.shapes.ShapeId;
/**
* Handles generating the ec2 query protocol for services.
*
* @inheritDoc
* @see AwsQuery
*/
final class Ec2Query extends AwsQuery {
@Override
public ShapeId getProtocol() {
return Ec2QueryTrait.ID;
}
@Override
protected void generateDocumentBodyShapeSerializers(GenerationContext context, Set<Shape> shapes) {
Ec2QueryShapeSerVisitor visitor = new Ec2QueryShapeSerVisitor(context);
shapes.forEach(shape -> shape.accept(visitor));
}
@Override
protected void generateDocumentBodyShapeDeserializers(GenerationContext context, Set<Shape> shapes) {
XmlShapeDeserVisitor visitor = new XmlShapeDeserVisitor(context);
shapes.forEach(shape -> shape.accept(visitor));
}
@Override
protected void writeErrorMessageCodeDeserializer(GenerationContext context) {
GoWriter writer = context.getWriter().get();
writer.addUseImports(AwsGoDependency.AWS_EC2QUERY_PROTOCOL);
writer.write("errorComponents, err := ec2query.GetErrorResponseComponents(errorBody)");
writer.write("if err != nil { return err }");
writer.insertTrailingNewline();
writer.addUseImports(AwsGoDependency.AWS_MIDDLEWARE);
writer.write("awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID)");
writer.insertTrailingNewline();
writer.write("if len(errorComponents.Code) != 0 { errorCode = errorComponents.Code}");
writer.write("if len(errorComponents.Message) != 0 { errorMessage = errorComponents.Message}");
writer.insertTrailingNewline();
writer.write("errorBody.Seek(0, io.SeekStart)");
writer.insertTrailingNewline();
}
@Override
protected void unwrapOutputDocument(GenerationContext context, OperationShape shape) {
// EC2 Query Protocol does not contain a document wrapper
}
@Override
protected void unwrapErrorElement(GenerationContext context) {
Symbol wrapNodeDecoder = SymbolUtils.createValueSymbolBuilder("WrapNodeDecoder",
SmithyGoDependency.SMITHY_XML).build();
GoWriter writer = context.getWriter().get();
Consumer<String> unwrapElement = (String element) -> {
writer.write("t, err = decoder.GetElement($S)", element);
XmlProtocolUtils.handleDecodeError(writer, "");
writer.write("decoder = $T(decoder.Decoder, t)", wrapNodeDecoder);
};
unwrapElement.accept("Errors");
unwrapElement.accept("Error");
}
}
| 8,297 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/FilterShapes.java | /*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.smithy.aws.go.codegen;
import java.util.Optional;
import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import software.amazon.smithy.go.codegen.GoSettings;
import software.amazon.smithy.go.codegen.integration.GoIntegration;
import software.amazon.smithy.model.Model;
import software.amazon.smithy.model.shapes.ShapeId;
import software.amazon.smithy.model.transform.ModelTransformer;
import software.amazon.smithy.utils.SetUtils;
/**
* Filters out certain shapes such as an operation.
*/
public final class FilterShapes implements GoIntegration {
private static final Logger LOGGER = Logger.getLogger(FilterShapes.class.getName());
private static final Set<ShapeId> SHAPE_IDS = SetUtils.of();
public FilterShapes() {
}
@Override
public Model preprocessModel(Model model, GoSettings settings) {
var toRemove = SHAPE_IDS.stream()
.map(model::getShape)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toSet());
if (toRemove.size() == 0) {
return model;
}
ModelTransformer transformer = ModelTransformer.create();
return transformer.removeUnreferencedShapes(transformer.removeShapes(model, toRemove));
}
}
| 8,298 |
0 | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go | Create_ds/aws-sdk-go-v2/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/Ec2QueryShapeSerVisitor.java | package software.amazon.smithy.aws.go.codegen;
import java.util.Optional;
import java.util.function.Predicate;
import software.amazon.smithy.aws.traits.protocols.Ec2QueryNameTrait;
import software.amazon.smithy.go.codegen.integration.ProtocolGenerator.GenerationContext;
import software.amazon.smithy.model.shapes.MemberShape;
import software.amazon.smithy.model.shapes.ShapeType;
import software.amazon.smithy.model.traits.TimestampFormatTrait.Format;
import software.amazon.smithy.model.traits.XmlNameTrait;
import software.amazon.smithy.utils.StringUtils;
/**
* Visitor to generate serialization functions for shapes in EC2 Query protocol
* document bodies.
*
* This class uses the implementations provided by {@code QueryShapeSerVisitor} but with
* the following protocol specific customizations for ec2 query:
*
* <ul>
* <li>ec2 query flattens all lists, sets, and maps regardless of the {@code @xmlFlattened} trait.</li>
* <li>ec2 query respects the {@code @ec2QueryName} trait, then the {@code xmlName}
* trait value with the first letter capitalized.</li>
* </ul>
*
* Timestamps are serialized to {@link Format}.DATE_TIME by default.
*
* @see QueryShapeSerVisitor
*/
final class Ec2QueryShapeSerVisitor extends QueryShapeSerVisitor {
public Ec2QueryShapeSerVisitor(GenerationContext context) {
super(context);
}
public Ec2QueryShapeSerVisitor(GenerationContext context, Predicate<MemberShape> memberFilter) {
super(context, memberFilter);
}
@Override
protected String getSerializedLocationName(MemberShape memberShape, String defaultValue) {
// The serialization for aws.ec2 prioritizes the @ec2QueryName trait for serialization.
Optional<Ec2QueryNameTrait> trait = memberShape.getTrait(Ec2QueryNameTrait.class);
if (trait.isPresent()) {
return trait.get().getValue();
}
// Fall back to the capitalized @xmlName trait if present on the member,
// otherwise use the capitalized default value.
return StringUtils.capitalize(memberShape.getTrait(XmlNameTrait.class)
.map(XmlNameTrait::getValue)
.orElse(defaultValue));
}
@Override
protected boolean isFlattened(GenerationContext context, MemberShape memberShape) {
// All lists, sets, and maps are flattened in aws.ec2.
ShapeType targetType = context.getModel().expectShape(memberShape.getTarget()).getType();
return targetType == ShapeType.LIST || targetType == ShapeType.SET || targetType == ShapeType.MAP;
}
}
| 8,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.