index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/config/PropertyFactory.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.config; import java.util.concurrent.ConcurrentHashMap; import com.netflix.config.DynamicProperty; /** Used to configure the Conductor workers using properties. */ public class PropertyFactory { private final DynamicProperty global; private final DynamicProperty local; private static final String PROPERTY_PREFIX = "conductor.worker"; private static final ConcurrentHashMap<String, PropertyFactory> PROPERTY_FACTORY_MAP = new ConcurrentHashMap<>(); private PropertyFactory(String prefix, String propName, String workerName) { this.global = DynamicProperty.getInstance(prefix + "." + propName); this.local = DynamicProperty.getInstance(prefix + "." + workerName + "." + propName); } /** * @param defaultValue Default Value * @return Returns the value as integer. If not value is set (either global or worker specific), * then returns the default value. */ public Integer getInteger(int defaultValue) { Integer value = local.getInteger(); if (value == null) { value = global.getInteger(defaultValue); } return value; } /** * @param defaultValue Default Value * @return Returns the value as String. If not value is set (either global or worker specific), * then returns the default value. */ public String getString(String defaultValue) { String value = local.getString(); if (value == null) { value = global.getString(defaultValue); } return value; } /** * @param defaultValue Default Value * @return Returns the value as Boolean. If not value is set (either global or worker specific), * then returns the default value. */ public Boolean getBoolean(Boolean defaultValue) { Boolean value = local.getBoolean(); if (value == null) { value = global.getBoolean(defaultValue); } return value; } public static Integer getInteger(String workerName, String property, Integer defaultValue) { return getPropertyFactory(workerName, property).getInteger(defaultValue); } public static Boolean getBoolean(String workerName, String property, Boolean defaultValue) { return getPropertyFactory(workerName, property).getBoolean(defaultValue); } public static String getString(String workerName, String property, String defaultValue) { return getPropertyFactory(workerName, property).getString(defaultValue); } private static PropertyFactory getPropertyFactory(String workerName, String property) { String key = property + "." + workerName; return PROPERTY_FACTORY_MAP.computeIfAbsent( key, t -> new PropertyFactory(PROPERTY_PREFIX, property, workerName)); } }
7,000
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/automator/TaskRunnerConfigurer.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.automator; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.exception.ConductorClientException; import com.netflix.conductor.client.http.TaskClient; import com.netflix.conductor.client.worker.Worker; import com.netflix.discovery.EurekaClient; /** Configures automated polling of tasks and execution via the registered {@link Worker}s. */ public class TaskRunnerConfigurer { private static final Logger LOGGER = LoggerFactory.getLogger(TaskRunnerConfigurer.class); private static final String INVALID_THREAD_COUNT = "Invalid worker thread count specified, use either shared thread pool or config thread count per task"; private static final String MISSING_TASK_THREAD_COUNT = "Missing task thread count config for %s"; private ScheduledExecutorService scheduledExecutorService; private final EurekaClient eurekaClient; private final TaskClient taskClient; private final List<Worker> workers = new LinkedList<>(); private final int sleepWhenRetry; private final int updateRetryCount; @Deprecated private final int threadCount; private final int shutdownGracePeriodSeconds; private final String workerNamePrefix; private final Map<String /*taskType*/, String /*domain*/> taskToDomain; private final Map<String /*taskType*/, Integer /*threadCount*/> taskThreadCount; private TaskPollExecutor taskPollExecutor; /** * @see TaskRunnerConfigurer.Builder * @see TaskRunnerConfigurer#init() */ private TaskRunnerConfigurer(Builder builder) { // only allow either shared thread pool or per task thread pool if (builder.threadCount != -1 && !builder.taskThreadCount.isEmpty()) { LOGGER.error(INVALID_THREAD_COUNT); throw new ConductorClientException(INVALID_THREAD_COUNT); } else if (!builder.taskThreadCount.isEmpty()) { for (Worker worker : builder.workers) { if (!builder.taskThreadCount.containsKey(worker.getTaskDefName())) { LOGGER.info( "No thread count specified for task type {}, default to 1 thread", worker.getTaskDefName()); builder.taskThreadCount.put(worker.getTaskDefName(), 1); } workers.add(worker); } this.taskThreadCount = builder.taskThreadCount; this.threadCount = -1; } else { Set<String> taskTypes = new HashSet<>(); for (Worker worker : builder.workers) { taskTypes.add(worker.getTaskDefName()); workers.add(worker); } this.threadCount = (builder.threadCount == -1) ? workers.size() : builder.threadCount; // shared thread pool will be evenly split between task types int splitThreadCount = threadCount / taskTypes.size(); this.taskThreadCount = taskTypes.stream().collect(Collectors.toMap(v -> v, v -> splitThreadCount)); } this.eurekaClient = builder.eurekaClient; this.taskClient = builder.taskClient; this.sleepWhenRetry = builder.sleepWhenRetry; this.updateRetryCount = builder.updateRetryCount; this.workerNamePrefix = builder.workerNamePrefix; this.taskToDomain = builder.taskToDomain; this.shutdownGracePeriodSeconds = builder.shutdownGracePeriodSeconds; } /** Builder used to create the instances of TaskRunnerConfigurer */ public static class Builder { private String workerNamePrefix = "workflow-worker-%d"; private int sleepWhenRetry = 500; private int updateRetryCount = 3; @Deprecated private int threadCount = -1; private int shutdownGracePeriodSeconds = 10; private final Iterable<Worker> workers; private EurekaClient eurekaClient; private final TaskClient taskClient; private Map<String /*taskType*/, String /*domain*/> taskToDomain = new HashMap<>(); private Map<String /*taskType*/, Integer /*threadCount*/> taskThreadCount = new HashMap<>(); public Builder(TaskClient taskClient, Iterable<Worker> workers) { Validate.notNull(taskClient, "TaskClient cannot be null"); Validate.notNull(workers, "Workers cannot be null"); this.taskClient = taskClient; this.workers = workers; } /** * @param workerNamePrefix prefix to be used for worker names, defaults to workflow-worker- * if not supplied. * @return Returns the current instance. */ public Builder withWorkerNamePrefix(String workerNamePrefix) { this.workerNamePrefix = workerNamePrefix; return this; } /** * @param sleepWhenRetry time in milliseconds, for which the thread should sleep when task * update call fails, before retrying the operation. * @return Returns the current instance. */ public Builder withSleepWhenRetry(int sleepWhenRetry) { this.sleepWhenRetry = sleepWhenRetry; return this; } /** * @param updateRetryCount number of times to retry the failed updateTask operation * @return Builder instance * @see #withSleepWhenRetry(int) */ public Builder withUpdateRetryCount(int updateRetryCount) { this.updateRetryCount = updateRetryCount; return this; } /** * @param threadCount # of threads assigned to the workers. Should be at-least the size of * taskWorkers to avoid starvation in a busy system. * @return Builder instance * @deprecated Use {@link TaskRunnerConfigurer.Builder#withTaskThreadCount(Map)} instead. */ @Deprecated public Builder withThreadCount(int threadCount) { if (threadCount < 1) { throw new IllegalArgumentException("No. of threads cannot be less than 1"); } this.threadCount = threadCount; return this; } /** * @param shutdownGracePeriodSeconds waiting seconds before forcing shutdown of your worker * @return Builder instance */ public Builder withShutdownGracePeriodSeconds(int shutdownGracePeriodSeconds) { if (shutdownGracePeriodSeconds < 1) { throw new IllegalArgumentException( "Seconds of shutdownGracePeriod cannot be less than 1"); } this.shutdownGracePeriodSeconds = shutdownGracePeriodSeconds; return this; } /** * @param eurekaClient Eureka client - used to identify if the server is in discovery or * not. When the server goes out of discovery, the polling is terminated. If passed * null, discovery check is not done. * @return Builder instance */ public Builder withEurekaClient(EurekaClient eurekaClient) { this.eurekaClient = eurekaClient; return this; } public Builder withTaskToDomain(Map<String, String> taskToDomain) { this.taskToDomain = taskToDomain; return this; } public Builder withTaskThreadCount(Map<String, Integer> taskThreadCount) { this.taskThreadCount = taskThreadCount; return this; } /** * Builds an instance of the TaskRunnerConfigurer. * * <p>Please see {@link TaskRunnerConfigurer#init()} method. The method must be called after * this constructor for the polling to start. */ public TaskRunnerConfigurer build() { return new TaskRunnerConfigurer(this); } } /** * @return Thread Count for the shared executor pool */ @Deprecated public int getThreadCount() { return threadCount; } /** * @return Thread Count for individual task type */ public Map<String, Integer> getTaskThreadCount() { return taskThreadCount; } /** * @return seconds before forcing shutdown of worker */ public int getShutdownGracePeriodSeconds() { return shutdownGracePeriodSeconds; } /** * @return sleep time in millisecond before task update retry is done when receiving error from * the Conductor server */ public int getSleepWhenRetry() { return sleepWhenRetry; } /** * @return Number of times updateTask should be retried when receiving error from Conductor * server */ public int getUpdateRetryCount() { return updateRetryCount; } /** * @return prefix used for worker names */ public String getWorkerNamePrefix() { return workerNamePrefix; } /** * Starts the polling. Must be called after {@link TaskRunnerConfigurer.Builder#build()} method. */ public synchronized void init() { this.taskPollExecutor = new TaskPollExecutor( eurekaClient, taskClient, updateRetryCount, taskToDomain, workerNamePrefix, taskThreadCount); this.scheduledExecutorService = Executors.newScheduledThreadPool(workers.size()); workers.forEach( worker -> scheduledExecutorService.scheduleWithFixedDelay( () -> taskPollExecutor.pollAndExecute(worker), worker.getPollingInterval(), worker.getPollingInterval(), TimeUnit.MILLISECONDS)); } /** * Invoke this method within a PreDestroy block within your application to facilitate a graceful * shutdown of your worker, during process termination. */ public void shutdown() { taskPollExecutor.shutdownAndAwaitTermination( scheduledExecutorService, shutdownGracePeriodSeconds); taskPollExecutor.shutdown(shutdownGracePeriodSeconds); } }
7,001
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/automator/PollingSemaphore.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.automator; import java.util.concurrent.Semaphore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A class wrapping a semaphore which holds the number of permits available for polling and * executing tasks. */ class PollingSemaphore { private static final Logger LOGGER = LoggerFactory.getLogger(PollingSemaphore.class); private final Semaphore semaphore; PollingSemaphore(int numSlots) { LOGGER.debug("Polling semaphore initialized with {} permits", numSlots); semaphore = new Semaphore(numSlots); } /** Signals that processing is complete and the specified number of permits can be released. */ void complete(int numSlots) { LOGGER.debug("Completed execution; releasing permit"); semaphore.release(numSlots); } /** * Gets the number of threads available for processing. * * @return number of available permits */ int availableSlots() { int available = semaphore.availablePermits(); LOGGER.debug("Number of available permits: {}", available); return available; } /** * Signals if processing is allowed based on whether specified number of permits can be * acquired. * * @param numSlots the number of permits to acquire * @return {@code true} - if permit is acquired {@code false} - if permit could not be acquired */ public boolean acquireSlots(int numSlots) { boolean acquired = semaphore.tryAcquire(numSlots); LOGGER.debug("Trying to acquire {} permit: {}", numSlots, acquired); return acquired; } }
7,002
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/automator/TaskPollExecutor.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.automator; import java.io.PrintWriter; import java.io.StringWriter; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.concurrent.*; import java.util.function.Function; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.concurrent.BasicThreadFactory; import org.apache.commons.lang3.time.StopWatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.appinfo.InstanceInfo.InstanceStatus; import com.netflix.conductor.client.config.PropertyFactory; import com.netflix.conductor.client.http.TaskClient; import com.netflix.conductor.client.telemetry.MetricsContainer; import com.netflix.conductor.client.worker.Worker; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.discovery.EurekaClient; import com.netflix.spectator.api.Registry; import com.netflix.spectator.api.Spectator; import com.netflix.spectator.api.patterns.ThreadPoolMonitor; /** * Manages the threadpool used by the workers for execution and server communication (polling and * task update). */ class TaskPollExecutor { private static final Logger LOGGER = LoggerFactory.getLogger(TaskPollExecutor.class); private static final Registry REGISTRY = Spectator.globalRegistry(); private final EurekaClient eurekaClient; private final TaskClient taskClient; private final int updateRetryCount; private final ExecutorService executorService; private final Map<String, PollingSemaphore> pollingSemaphoreMap; private final Map<String /*taskType*/, String /*domain*/> taskToDomain; private static final String DOMAIN = "domain"; private static final String OVERRIDE_DISCOVERY = "pollOutOfDiscovery"; private static final String ALL_WORKERS = "all"; private static final int LEASE_EXTEND_RETRY_COUNT = 3; private static final double LEASE_EXTEND_DURATION_FACTOR = 0.8; private ScheduledExecutorService leaseExtendExecutorService; Map<String /* ID of the task*/, ScheduledFuture<?>> leaseExtendMap = new HashMap<>(); TaskPollExecutor( EurekaClient eurekaClient, TaskClient taskClient, int updateRetryCount, Map<String, String> taskToDomain, String workerNamePrefix, Map<String, Integer> taskThreadCount) { this.eurekaClient = eurekaClient; this.taskClient = taskClient; this.updateRetryCount = updateRetryCount; this.taskToDomain = taskToDomain; this.pollingSemaphoreMap = new HashMap<>(); int totalThreadCount = 0; for (Map.Entry<String, Integer> entry : taskThreadCount.entrySet()) { String taskType = entry.getKey(); int count = entry.getValue(); totalThreadCount += count; pollingSemaphoreMap.put(taskType, new PollingSemaphore(count)); } LOGGER.info("Initialized the TaskPollExecutor with {} threads", totalThreadCount); this.executorService = Executors.newFixedThreadPool( totalThreadCount, new BasicThreadFactory.Builder() .namingPattern(workerNamePrefix) .uncaughtExceptionHandler(uncaughtExceptionHandler) .build()); ThreadPoolMonitor.attach(REGISTRY, (ThreadPoolExecutor) executorService, workerNamePrefix); LOGGER.info("Initialized the task lease extend executor"); leaseExtendExecutorService = Executors.newSingleThreadScheduledExecutor( new BasicThreadFactory.Builder() .namingPattern("workflow-lease-extend-%d") .daemon(true) .uncaughtExceptionHandler(uncaughtExceptionHandler) .build()); } void pollAndExecute(Worker worker) { Boolean discoveryOverride = Optional.ofNullable( PropertyFactory.getBoolean( worker.getTaskDefName(), OVERRIDE_DISCOVERY, null)) .orElseGet( () -> PropertyFactory.getBoolean( ALL_WORKERS, OVERRIDE_DISCOVERY, false)); if (eurekaClient != null && !eurekaClient.getInstanceRemoteStatus().equals(InstanceStatus.UP) && !discoveryOverride) { LOGGER.debug("Instance is NOT UP in discovery - will not poll"); return; } if (worker.paused()) { MetricsContainer.incrementTaskPausedCount(worker.getTaskDefName()); LOGGER.debug("Worker {} has been paused. Not polling anymore!", worker.getClass()); return; } String taskType = worker.getTaskDefName(); PollingSemaphore pollingSemaphore = getPollingSemaphore(taskType); int slotsToAcquire = pollingSemaphore.availableSlots(); if (slotsToAcquire <= 0 || !pollingSemaphore.acquireSlots(slotsToAcquire)) { return; } int acquiredTasks = 0; try { String domain = Optional.ofNullable(PropertyFactory.getString(taskType, DOMAIN, null)) .orElseGet( () -> Optional.ofNullable( PropertyFactory.getString( ALL_WORKERS, DOMAIN, null)) .orElse(taskToDomain.get(taskType))); LOGGER.debug("Polling task of type: {} in domain: '{}'", taskType, domain); List<Task> tasks = MetricsContainer.getPollTimer(taskType) .record( () -> taskClient.batchPollTasksInDomain( taskType, domain, worker.getIdentity(), slotsToAcquire, worker.getBatchPollTimeoutInMS())); acquiredTasks = tasks.size(); for (Task task : tasks) { if (Objects.nonNull(task) && StringUtils.isNotBlank(task.getTaskId())) { MetricsContainer.incrementTaskPollCount(taskType, 1); LOGGER.debug( "Polled task: {} of type: {} in domain: '{}', from worker: {}", task.getTaskId(), taskType, domain, worker.getIdentity()); CompletableFuture<Task> taskCompletableFuture = CompletableFuture.supplyAsync( () -> processTask(task, worker, pollingSemaphore), executorService); if (task.getResponseTimeoutSeconds() > 0 && worker.leaseExtendEnabled()) { ScheduledFuture<?> leaseExtendFuture = leaseExtendExecutorService.scheduleWithFixedDelay( extendLease(task, taskCompletableFuture), Math.round( task.getResponseTimeoutSeconds() * LEASE_EXTEND_DURATION_FACTOR), Math.round( task.getResponseTimeoutSeconds() * LEASE_EXTEND_DURATION_FACTOR), TimeUnit.SECONDS); leaseExtendMap.put(task.getTaskId(), leaseExtendFuture); } taskCompletableFuture.whenComplete(this::finalizeTask); } else { // no task was returned in the poll, release the permit pollingSemaphore.complete(1); } } } catch (Exception e) { MetricsContainer.incrementTaskPollErrorCount(worker.getTaskDefName(), e); LOGGER.error("Error when polling for tasks", e); } // immediately release unused permits pollingSemaphore.complete(slotsToAcquire - acquiredTasks); } void shutdown(int timeout) { shutdownAndAwaitTermination(executorService, timeout); shutdownAndAwaitTermination(leaseExtendExecutorService, timeout); leaseExtendMap.clear(); } void shutdownAndAwaitTermination(ExecutorService executorService, int timeout) { try { executorService.shutdown(); if (executorService.awaitTermination(timeout, TimeUnit.SECONDS)) { LOGGER.debug("tasks completed, shutting down"); } else { LOGGER.warn(String.format("forcing shutdown after waiting for %s second", timeout)); executorService.shutdownNow(); } } catch (InterruptedException ie) { LOGGER.warn("shutdown interrupted, invoking shutdownNow"); executorService.shutdownNow(); Thread.currentThread().interrupt(); } } @SuppressWarnings("FieldCanBeLocal") private final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = (thread, error) -> { // JVM may be in unstable state, try to send metrics then exit MetricsContainer.incrementUncaughtExceptionCount(); LOGGER.error("Uncaught exception. Thread {} will exit now", thread, error); }; private Task processTask(Task task, Worker worker, PollingSemaphore pollingSemaphore) { LOGGER.debug( "Executing task: {} of type: {} in worker: {} at {}", task.getTaskId(), task.getTaskDefName(), worker.getClass().getSimpleName(), worker.getIdentity()); try { executeTask(worker, task); } catch (Throwable t) { task.setStatus(Task.Status.FAILED); TaskResult result = new TaskResult(task); handleException(t, result, worker, task); } finally { pollingSemaphore.complete(1); } return task; } private void executeTask(Worker worker, Task task) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); TaskResult result = null; try { LOGGER.debug( "Executing task: {} in worker: {} at {}", task.getTaskId(), worker.getClass().getSimpleName(), worker.getIdentity()); result = worker.execute(task); result.setWorkflowInstanceId(task.getWorkflowInstanceId()); result.setTaskId(task.getTaskId()); result.setWorkerId(worker.getIdentity()); } catch (Exception e) { LOGGER.error( "Unable to execute task: {} of type: {}", task.getTaskId(), task.getTaskDefName(), e); if (result == null) { task.setStatus(Task.Status.FAILED); result = new TaskResult(task); } handleException(e, result, worker, task); } finally { stopwatch.stop(); MetricsContainer.getExecutionTimer(worker.getTaskDefName()) .record(stopwatch.getTime(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); } LOGGER.debug( "Task: {} executed by worker: {} at {} with status: {}", task.getTaskId(), worker.getClass().getSimpleName(), worker.getIdentity(), result.getStatus()); updateTaskResult(updateRetryCount, task, result, worker); } private void finalizeTask(Task task, Throwable throwable) { if (throwable != null) { LOGGER.error( "Error processing task: {} of type: {}", task.getTaskId(), task.getTaskType(), throwable); MetricsContainer.incrementTaskExecutionErrorCount(task.getTaskType(), throwable); } else { LOGGER.debug( "Task:{} of type:{} finished processing with status:{}", task.getTaskId(), task.getTaskDefName(), task.getStatus()); String taskId = task.getTaskId(); ScheduledFuture<?> leaseExtendFuture = leaseExtendMap.get(taskId); if (leaseExtendFuture != null) { leaseExtendFuture.cancel(true); leaseExtendMap.remove(taskId); } } } private void updateTaskResult(int count, Task task, TaskResult result, Worker worker) { try { // upload if necessary Optional<String> optionalExternalStorageLocation = retryOperation( (TaskResult taskResult) -> upload(taskResult, task.getTaskType()), count, result, "evaluateAndUploadLargePayload"); if (optionalExternalStorageLocation.isPresent()) { result.setExternalOutputPayloadStoragePath(optionalExternalStorageLocation.get()); result.setOutputData(null); } retryOperation( (TaskResult taskResult) -> { taskClient.updateTask(taskResult); return null; }, count, result, "updateTask"); } catch (Exception e) { worker.onErrorUpdate(task); MetricsContainer.incrementTaskUpdateErrorCount(worker.getTaskDefName(), e); LOGGER.error( String.format( "Failed to update result: %s for task: %s in worker: %s", result.toString(), task.getTaskDefName(), worker.getIdentity()), e); } } private Optional<String> upload(TaskResult result, String taskType) { try { return taskClient.evaluateAndUploadLargePayload(result.getOutputData(), taskType); } catch (IllegalArgumentException iae) { result.setReasonForIncompletion(iae.getMessage()); result.setOutputData(null); result.setStatus(TaskResult.Status.FAILED_WITH_TERMINAL_ERROR); return Optional.empty(); } } private <T, R> R retryOperation(Function<T, R> operation, int count, T input, String opName) { int index = 0; while (index < count) { try { return operation.apply(input); } catch (Exception e) { index++; try { Thread.sleep(500L); } catch (InterruptedException ie) { LOGGER.error("Retry interrupted", ie); } } } throw new RuntimeException("Exhausted retries performing " + opName); } private void handleException(Throwable t, TaskResult result, Worker worker, Task task) { LOGGER.error(String.format("Error while executing task %s", task.toString()), t); MetricsContainer.incrementTaskExecutionErrorCount(worker.getTaskDefName(), t); result.setStatus(TaskResult.Status.FAILED); result.setReasonForIncompletion("Error while executing the task: " + t); StringWriter stringWriter = new StringWriter(); t.printStackTrace(new PrintWriter(stringWriter)); result.log(stringWriter.toString()); updateTaskResult(updateRetryCount, task, result, worker); } private PollingSemaphore getPollingSemaphore(String taskType) { return pollingSemaphoreMap.get(taskType); } private Runnable extendLease(Task task, CompletableFuture<Task> taskCompletableFuture) { return () -> { if (taskCompletableFuture.isDone()) { LOGGER.warn( "Task processing for {} completed, but its lease extend was not cancelled", task.getTaskId()); return; } LOGGER.info("Attempting to extend lease for {}", task.getTaskId()); try { TaskResult result = new TaskResult(task); result.setExtendLease(true); retryOperation( (TaskResult taskResult) -> { taskClient.updateTask(taskResult); return null; }, LEASE_EXTEND_RETRY_COUNT, result, "extend lease"); MetricsContainer.incrementTaskLeaseExtendCount(task.getTaskDefName(), 1); } catch (Exception e) { MetricsContainer.incrementTaskLeaseExtendErrorCount(task.getTaskDefName(), e); LOGGER.error("Failed to extend lease for {}", task.getTaskId(), e); } }; } }
7,003
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/EventClient.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.util.List; import org.apache.commons.lang3.Validate; import com.netflix.conductor.client.config.ConductorClientConfiguration; import com.netflix.conductor.client.config.DefaultConductorClientConfiguration; import com.netflix.conductor.common.metadata.events.EventHandler; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.GenericType; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; import com.sun.jersey.api.client.filter.ClientFilter; // Client class for all Event Handler operations public class EventClient extends ClientBase { private static final GenericType<List<EventHandler>> eventHandlerList = new GenericType<List<EventHandler>>() {}; /** Creates a default metadata client */ public EventClient() { this(new DefaultClientConfig(), new DefaultConductorClientConfiguration(), null); } /** * @param clientConfig REST Client configuration */ public EventClient(ClientConfig clientConfig) { this(clientConfig, new DefaultConductorClientConfiguration(), null); } /** * @param clientConfig REST Client configuration * @param clientHandler Jersey client handler. Useful when plugging in various http client * interaction modules (e.g. ribbon) */ public EventClient(ClientConfig clientConfig, ClientHandler clientHandler) { this(clientConfig, new DefaultConductorClientConfiguration(), clientHandler); } /** * @param config config REST Client configuration * @param handler handler Jersey client handler. Useful when plugging in various http client * interaction modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public EventClient(ClientConfig config, ClientHandler handler, ClientFilter... filters) { this(config, new DefaultConductorClientConfiguration(), handler, filters); } /** * @param config REST Client configuration * @param clientConfiguration Specific properties configured for the client, see {@link * ConductorClientConfiguration} * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public EventClient( ClientConfig config, ConductorClientConfiguration clientConfiguration, ClientHandler handler, ClientFilter... filters) { super(new ClientRequestHandler(config, handler, filters), clientConfiguration); } EventClient(ClientRequestHandler requestHandler) { super(requestHandler, null); } /** * Register an event handler with the server * * @param eventHandler the eventHandler definition */ public void registerEventHandler(EventHandler eventHandler) { Validate.notNull(eventHandler, "Event Handler definition cannot be null"); postForEntityWithRequestOnly("event", eventHandler); } /** * Updates an event handler with the server * * @param eventHandler the eventHandler definition */ public void updateEventHandler(EventHandler eventHandler) { Validate.notNull(eventHandler, "Event Handler definition cannot be null"); put("event", null, eventHandler); } /** * @param event name of the event * @param activeOnly if true, returns only the active handlers * @return Returns the list of all the event handlers for a given event */ public List<EventHandler> getEventHandlers(String event, boolean activeOnly) { Validate.notBlank(event, "Event cannot be blank"); return getForEntity( "event/{event}", new Object[] {"activeOnly", activeOnly}, eventHandlerList, event); } /** * Removes the event handler definition from the conductor server * * @param name the name of the event handler to be unregistered */ public void unregisterEventHandler(String name) { Validate.notBlank(name, "Event handler name cannot be blank"); delete("event/{name}", name); } }
7,004
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/TaskClient.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.config.ConductorClientConfiguration; import com.netflix.conductor.client.config.DefaultConductorClientConfiguration; import com.netflix.conductor.client.exception.ConductorClientException; import com.netflix.conductor.client.telemetry.MetricsContainer; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskExecLog; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.utils.ExternalPayloadStorage; import com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.GenericType; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; import com.sun.jersey.api.client.filter.ClientFilter; /** Client for conductor task management including polling for task, updating task status etc. */ public class TaskClient extends ClientBase { private static final GenericType<List<Task>> taskList = new GenericType<List<Task>>() {}; private static final GenericType<List<TaskExecLog>> taskExecLogList = new GenericType<List<TaskExecLog>>() {}; private static final GenericType<List<PollData>> pollDataList = new GenericType<List<PollData>>() {}; private static final GenericType<SearchResult<TaskSummary>> searchResultTaskSummary = new GenericType<SearchResult<TaskSummary>>() {}; private static final GenericType<SearchResult<Task>> searchResultTask = new GenericType<SearchResult<Task>>() {}; private static final GenericType<Map<String, Integer>> queueSizeMap = new GenericType<Map<String, Integer>>() {}; private static final Logger LOGGER = LoggerFactory.getLogger(TaskClient.class); /** Creates a default task client */ public TaskClient() { this(new DefaultClientConfig(), new DefaultConductorClientConfiguration(), null); } /** * @param config REST Client configuration */ public TaskClient(ClientConfig config) { this(config, new DefaultConductorClientConfiguration(), null); } /** * @param config REST Client configuration * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) */ public TaskClient(ClientConfig config, ClientHandler handler) { this(config, new DefaultConductorClientConfiguration(), handler); } /** * @param config REST Client configuration * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public TaskClient(ClientConfig config, ClientHandler handler, ClientFilter... filters) { this(config, new DefaultConductorClientConfiguration(), handler, filters); } /** * @param config REST Client configuration * @param clientConfiguration Specific properties configured for the client, see {@link * ConductorClientConfiguration} * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public TaskClient( ClientConfig config, ConductorClientConfiguration clientConfiguration, ClientHandler handler, ClientFilter... filters) { super(new ClientRequestHandler(config, handler, filters), clientConfiguration); } TaskClient(ClientRequestHandler requestHandler) { super(requestHandler, null); } /** * Perform a poll for a task of a specific task type. * * @param taskType The taskType to poll for * @param domain The domain of the task type * @param workerId Name of the client worker. Used for logging. * @return Task waiting to be executed. */ public Task pollTask(String taskType, String workerId, String domain) { Validate.notBlank(taskType, "Task type cannot be blank"); Validate.notBlank(workerId, "Worker id cannot be blank"); Object[] params = new Object[] {"workerid", workerId, "domain", domain}; Task task = ObjectUtils.defaultIfNull( getForEntity("tasks/poll/{taskType}", params, Task.class, taskType), new Task()); populateTaskPayloads(task); return task; } /** * Perform a batch poll for tasks by task type. Batch size is configurable by count. * * @param taskType Type of task to poll for * @param workerId Name of the client worker. Used for logging. * @param count Maximum number of tasks to be returned. Actual number of tasks returned can be * less than this number. * @param timeoutInMillisecond Long poll wait timeout. * @return List of tasks awaiting to be executed. */ public List<Task> batchPollTasksByTaskType( String taskType, String workerId, int count, int timeoutInMillisecond) { Validate.notBlank(taskType, "Task type cannot be blank"); Validate.notBlank(workerId, "Worker id cannot be blank"); Validate.isTrue(count > 0, "Count must be greater than 0"); Object[] params = new Object[] { "workerid", workerId, "count", count, "timeout", timeoutInMillisecond }; List<Task> tasks = getForEntity("tasks/poll/batch/{taskType}", params, taskList, taskType); tasks.forEach(this::populateTaskPayloads); return tasks; } /** * Batch poll for tasks in a domain. Batch size is configurable by count. * * @param taskType Type of task to poll for * @param domain The domain of the task type * @param workerId Name of the client worker. Used for logging. * @param count Maximum number of tasks to be returned. Actual number of tasks returned can be * less than this number. * @param timeoutInMillisecond Long poll wait timeout. * @return List of tasks awaiting to be executed. */ public List<Task> batchPollTasksInDomain( String taskType, String domain, String workerId, int count, int timeoutInMillisecond) { Validate.notBlank(taskType, "Task type cannot be blank"); Validate.notBlank(workerId, "Worker id cannot be blank"); Validate.isTrue(count > 0, "Count must be greater than 0"); Object[] params = new Object[] { "workerid", workerId, "count", count, "timeout", timeoutInMillisecond, "domain", domain }; List<Task> tasks = getForEntity("tasks/poll/batch/{taskType}", params, taskList, taskType); tasks.forEach(this::populateTaskPayloads); return tasks; } /** * Populates the task input/output from external payload storage if the external storage path is * specified. * * @param task the task for which the input is to be populated. */ private void populateTaskPayloads(Task task) { if (StringUtils.isNotBlank(task.getExternalInputPayloadStoragePath())) { MetricsContainer.incrementExternalPayloadUsedCount( task.getTaskDefName(), ExternalPayloadStorage.Operation.READ.name(), ExternalPayloadStorage.PayloadType.TASK_INPUT.name()); task.setInputData( downloadFromExternalStorage( ExternalPayloadStorage.PayloadType.TASK_INPUT, task.getExternalInputPayloadStoragePath())); task.setExternalInputPayloadStoragePath(null); } if (StringUtils.isNotBlank(task.getExternalOutputPayloadStoragePath())) { MetricsContainer.incrementExternalPayloadUsedCount( task.getTaskDefName(), ExternalPayloadStorage.Operation.READ.name(), PayloadType.TASK_OUTPUT.name()); task.setOutputData( downloadFromExternalStorage( ExternalPayloadStorage.PayloadType.TASK_OUTPUT, task.getExternalOutputPayloadStoragePath())); task.setExternalOutputPayloadStoragePath(null); } } /** * Updates the result of a task execution. If the size of the task output payload is bigger than * {@link ConductorClientConfiguration#getTaskOutputPayloadThresholdKB()}, it is uploaded to * {@link ExternalPayloadStorage}, if enabled, else the task is marked as * FAILED_WITH_TERMINAL_ERROR. * * @param taskResult the {@link TaskResult} of the executed task to be updated. */ public void updateTask(TaskResult taskResult) { Validate.notNull(taskResult, "Task result cannot be null"); postForEntityWithRequestOnly("tasks", taskResult); } public Optional<String> evaluateAndUploadLargePayload( Map<String, Object> taskOutputData, String taskType) { try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { objectMapper.writeValue(byteArrayOutputStream, taskOutputData); byte[] taskOutputBytes = byteArrayOutputStream.toByteArray(); long taskResultSize = taskOutputBytes.length; MetricsContainer.recordTaskResultPayloadSize(taskType, taskResultSize); long payloadSizeThreshold = conductorClientConfiguration.getTaskOutputPayloadThresholdKB() * 1024L; if (taskResultSize > payloadSizeThreshold) { if (!conductorClientConfiguration.isExternalPayloadStorageEnabled() || taskResultSize > conductorClientConfiguration.getTaskOutputMaxPayloadThresholdKB() * 1024L) { throw new IllegalArgumentException( String.format( "The TaskResult payload size: %d is greater than the permissible %d bytes", taskResultSize, payloadSizeThreshold)); } MetricsContainer.incrementExternalPayloadUsedCount( taskType, ExternalPayloadStorage.Operation.WRITE.name(), ExternalPayloadStorage.PayloadType.TASK_OUTPUT.name()); return Optional.of( uploadToExternalPayloadStorage( PayloadType.TASK_OUTPUT, taskOutputBytes, taskResultSize)); } return Optional.empty(); } catch (IOException e) { String errorMsg = String.format("Unable to update task: %s with task result", taskType); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } } /** * Ack for the task poll. * * @param taskId Id of the task to be polled * @param workerId user identified worker. * @return true if the task was found with the given ID and acknowledged. False otherwise. If * the server returns false, the client should NOT attempt to ack again. */ public Boolean ack(String taskId, String workerId) { Validate.notBlank(taskId, "Task id cannot be blank"); String response = postForEntity( "tasks/{taskId}/ack", null, new Object[] {"workerid", workerId}, String.class, taskId); return Boolean.valueOf(response); } /** * Log execution messages for a task. * * @param taskId id of the task * @param logMessage the message to be logged */ public void logMessageForTask(String taskId, String logMessage) { Validate.notBlank(taskId, "Task id cannot be blank"); postForEntityWithRequestOnly("tasks/" + taskId + "/log", logMessage); } /** * Fetch execution logs for a task. * * @param taskId id of the task. */ public List<TaskExecLog> getTaskLogs(String taskId) { Validate.notBlank(taskId, "Task id cannot be blank"); return getForEntity("tasks/{taskId}/log", null, taskExecLogList, taskId); } /** * Retrieve information about the task * * @param taskId ID of the task * @return Task details */ public Task getTaskDetails(String taskId) { Validate.notBlank(taskId, "Task id cannot be blank"); return getForEntity("tasks/{taskId}", null, Task.class, taskId); } /** * Removes a task from a taskType queue * * @param taskType the taskType to identify the queue * @param taskId the id of the task to be removed */ public void removeTaskFromQueue(String taskType, String taskId) { Validate.notBlank(taskType, "Task type cannot be blank"); Validate.notBlank(taskId, "Task id cannot be blank"); delete("tasks/queue/{taskType}/{taskId}", taskType, taskId); } public int getQueueSizeForTask(String taskType) { Validate.notBlank(taskType, "Task type cannot be blank"); Integer queueSize = getForEntity( "tasks/queue/size", new Object[] {"taskType", taskType}, new GenericType<Integer>() {}); return queueSize != null ? queueSize : 0; } public int getQueueSizeForTask( String taskType, String domain, String isolationGroupId, String executionNamespace) { Validate.notBlank(taskType, "Task type cannot be blank"); List<Object> params = new LinkedList<>(); params.add("taskType"); params.add(taskType); if (StringUtils.isNotBlank(domain)) { params.add("domain"); params.add(domain); } if (StringUtils.isNotBlank(isolationGroupId)) { params.add("isolationGroupId"); params.add(isolationGroupId); } if (StringUtils.isNotBlank(executionNamespace)) { params.add("executionNamespace"); params.add(executionNamespace); } Integer queueSize = getForEntity( "tasks/queue/size", params.toArray(new Object[0]), new GenericType<Integer>() {}); return queueSize != null ? queueSize : 0; } /** * Get last poll data for a given task type * * @param taskType the task type for which poll data is to be fetched * @return returns the list of poll data for the task type */ public List<PollData> getPollData(String taskType) { Validate.notBlank(taskType, "Task type cannot be blank"); Object[] params = new Object[] {"taskType", taskType}; return getForEntity("tasks/queue/polldata", params, pollDataList); } /** * Get the last poll data for all task types * * @return returns a list of poll data for all task types */ public List<PollData> getAllPollData() { return getForEntity("tasks/queue/polldata/all", null, pollDataList); } /** * Requeue pending tasks for all running workflows * * @return returns the number of tasks that have been requeued */ public String requeueAllPendingTasks() { return postForEntity("tasks/queue/requeue", null, null, String.class); } /** * Requeue pending tasks of a specific task type * * @return returns the number of tasks that have been requeued */ public String requeuePendingTasksByTaskType(String taskType) { Validate.notBlank(taskType, "Task type cannot be blank"); return postForEntity("tasks/queue/requeue/{taskType}", null, null, String.class, taskType); } /** * Search for tasks based on payload * * @param query the search string * @return returns the {@link SearchResult} containing the {@link TaskSummary} matching the * query */ public SearchResult<TaskSummary> search(String query) { return getForEntity("tasks/search", new Object[] {"query", query}, searchResultTaskSummary); } /** * Search for tasks based on payload * * @param query the search string * @return returns the {@link SearchResult} containing the {@link Task} matching the query */ public SearchResult<Task> searchV2(String query) { return getForEntity("tasks/search-v2", new Object[] {"query", query}, searchResultTask); } /** * Paginated search for tasks based on payload * * @param start start value of page * @param size number of tasks to be returned * @param sort sort order * @param freeText additional free text query * @param query the search query * @return the {@link SearchResult} containing the {@link TaskSummary} that match the query */ public SearchResult<TaskSummary> search( Integer start, Integer size, String sort, String freeText, String query) { Object[] params = new Object[] { "start", start, "size", size, "sort", sort, "freeText", freeText, "query", query }; return getForEntity("tasks/search", params, searchResultTaskSummary); } /** * Paginated search for tasks based on payload * * @param start start value of page * @param size number of tasks to be returned * @param sort sort order * @param freeText additional free text query * @param query the search query * @return the {@link SearchResult} containing the {@link Task} that match the query */ public SearchResult<Task> searchV2( Integer start, Integer size, String sort, String freeText, String query) { Object[] params = new Object[] { "start", start, "size", size, "sort", sort, "freeText", freeText, "query", query }; return getForEntity("tasks/search-v2", params, searchResultTask); } }
7,005
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/PayloadStorage.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import javax.ws.rs.core.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.exception.ConductorClientException; import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.utils.ExternalPayloadStorage; import com.amazonaws.util.IOUtils; /** An implementation of {@link ExternalPayloadStorage} for storing large JSON payload data. */ class PayloadStorage implements ExternalPayloadStorage { private static final Logger LOGGER = LoggerFactory.getLogger(PayloadStorage.class); private final ClientBase clientBase; PayloadStorage(ClientBase clientBase) { this.clientBase = clientBase; } /** * This method is not intended to be used in the client. The client makes a request to the * server to get the {@link ExternalStorageLocation} */ @Override public ExternalStorageLocation getLocation( Operation operation, PayloadType payloadType, String path) { String uri; switch (payloadType) { case WORKFLOW_INPUT: case WORKFLOW_OUTPUT: uri = "workflow"; break; case TASK_INPUT: case TASK_OUTPUT: uri = "tasks"; break; default: throw new ConductorClientException( String.format( "Invalid payload type: %s for operation: %s", payloadType.toString(), operation.toString())); } return clientBase.getForEntity( String.format("%s/externalstoragelocation", uri), new Object[] { "path", path, "operation", operation.toString(), "payloadType", payloadType.toString() }, ExternalStorageLocation.class); } /** * Uploads the payload to the uri specified. * * @param uri the location to which the object is to be uploaded * @param payload an {@link InputStream} containing the json payload which is to be uploaded * @param payloadSize the size of the json payload in bytes * @throws ConductorClientException if the upload fails due to an invalid path or an error from * external storage */ @Override public void upload(String uri, InputStream payload, long payloadSize) { HttpURLConnection connection = null; try { URL url = new URI(uri).toURL(); connection = (HttpURLConnection) url.openConnection(); connection.setDoOutput(true); connection.setRequestMethod("PUT"); try (BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(connection.getOutputStream())) { long count = IOUtils.copy(payload, bufferedOutputStream); bufferedOutputStream.flush(); // Check the HTTP response code int responseCode = connection.getResponseCode(); if (Response.Status.fromStatusCode(responseCode).getFamily() != Response.Status.Family.SUCCESSFUL) { String errorMsg = String.format("Unable to upload. Response code: %d", responseCode); LOGGER.error(errorMsg); throw new ConductorClientException(errorMsg); } LOGGER.debug( "Uploaded {} bytes to uri: {}, with HTTP response code: {}", count, uri, responseCode); } } catch (URISyntaxException | MalformedURLException e) { String errorMsg = String.format("Invalid path specified: %s", uri); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } catch (IOException e) { String errorMsg = String.format("Error uploading to path: %s", uri); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } finally { if (connection != null) { connection.disconnect(); } try { if (payload != null) { payload.close(); } } catch (IOException e) { LOGGER.warn("Unable to close inputstream when uploading to uri: {}", uri); } } } /** * Downloads the payload from the given uri. * * @param uri the location from where the object is to be downloaded * @return an inputstream of the payload in the external storage * @throws ConductorClientException if the download fails due to an invalid path or an error * from external storage */ @Override public InputStream download(String uri) { HttpURLConnection connection = null; String errorMsg; try { URL url = new URI(uri).toURL(); connection = (HttpURLConnection) url.openConnection(); connection.setDoOutput(false); // Check the HTTP response code int responseCode = connection.getResponseCode(); if (responseCode == HttpURLConnection.HTTP_OK) { LOGGER.debug( "Download completed with HTTP response code: {}", connection.getResponseCode()); return org.apache.commons.io.IOUtils.toBufferedInputStream( connection.getInputStream()); } errorMsg = String.format("Unable to download. Response code: %d", responseCode); LOGGER.error(errorMsg); throw new ConductorClientException(errorMsg); } catch (URISyntaxException | MalformedURLException e) { errorMsg = String.format("Invalid uri specified: %s", uri); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } catch (IOException e) { errorMsg = String.format("Error downloading from uri: %s", uri); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } finally { if (connection != null) { connection.disconnect(); } } } }
7,006
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/WorkflowClient.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.config.ConductorClientConfiguration; import com.netflix.conductor.client.config.DefaultConductorClientConfiguration; import com.netflix.conductor.client.exception.ConductorClientException; import com.netflix.conductor.client.telemetry.MetricsContainer; import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; import com.netflix.conductor.common.model.BulkResponse; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.common.run.WorkflowTestRequest; import com.netflix.conductor.common.utils.ExternalPayloadStorage; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.GenericType; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; import com.sun.jersey.api.client.filter.ClientFilter; public class WorkflowClient extends ClientBase { private static final GenericType<SearchResult<WorkflowSummary>> searchResultWorkflowSummary = new GenericType<SearchResult<WorkflowSummary>>() {}; private static final GenericType<SearchResult<Workflow>> searchResultWorkflow = new GenericType<SearchResult<Workflow>>() {}; private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowClient.class); /** Creates a default workflow client */ public WorkflowClient() { this(new DefaultClientConfig(), new DefaultConductorClientConfiguration(), null); } /** * @param config REST Client configuration */ public WorkflowClient(ClientConfig config) { this(config, new DefaultConductorClientConfiguration(), null); } /** * @param config REST Client configuration * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) */ public WorkflowClient(ClientConfig config, ClientHandler handler) { this(config, new DefaultConductorClientConfiguration(), handler); } /** * @param config REST Client configuration * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public WorkflowClient(ClientConfig config, ClientHandler handler, ClientFilter... filters) { this(config, new DefaultConductorClientConfiguration(), handler, filters); } /** * @param config REST Client configuration * @param clientConfiguration Specific properties configured for the client, see {@link * ConductorClientConfiguration} * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public WorkflowClient( ClientConfig config, ConductorClientConfiguration clientConfiguration, ClientHandler handler, ClientFilter... filters) { super(new ClientRequestHandler(config, handler, filters), clientConfiguration); } WorkflowClient(ClientRequestHandler requestHandler) { super(requestHandler, null); } /** * Starts a workflow. If the size of the workflow input payload is bigger than {@link * ConductorClientConfiguration#getWorkflowInputPayloadThresholdKB()}, it is uploaded to {@link * ExternalPayloadStorage}, if enabled, else the workflow is rejected. * * @param startWorkflowRequest the {@link StartWorkflowRequest} object to start the workflow. * @return the id of the workflow instance that can be used for tracking. * @throws ConductorClientException if {@link ExternalPayloadStorage} is disabled or if the * payload size is greater than {@link * ConductorClientConfiguration#getWorkflowInputMaxPayloadThresholdKB()}. * @throws NullPointerException if {@link StartWorkflowRequest} is null or {@link * StartWorkflowRequest#getName()} is null. * @throws IllegalArgumentException if {@link StartWorkflowRequest#getName()} is empty. */ public String startWorkflow(StartWorkflowRequest startWorkflowRequest) { Validate.notNull(startWorkflowRequest, "StartWorkflowRequest cannot be null"); Validate.notBlank(startWorkflowRequest.getName(), "Workflow name cannot be null or empty"); Validate.isTrue( StringUtils.isBlank(startWorkflowRequest.getExternalInputPayloadStoragePath()), "External Storage Path must not be set"); String version = startWorkflowRequest.getVersion() != null ? startWorkflowRequest.getVersion().toString() : "latest"; try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { objectMapper.writeValue(byteArrayOutputStream, startWorkflowRequest.getInput()); byte[] workflowInputBytes = byteArrayOutputStream.toByteArray(); long workflowInputSize = workflowInputBytes.length; MetricsContainer.recordWorkflowInputPayloadSize( startWorkflowRequest.getName(), version, workflowInputSize); if (workflowInputSize > conductorClientConfiguration.getWorkflowInputPayloadThresholdKB() * 1024L) { if (!conductorClientConfiguration.isExternalPayloadStorageEnabled() || (workflowInputSize > conductorClientConfiguration .getWorkflowInputMaxPayloadThresholdKB() * 1024L)) { String errorMsg = String.format( "Input payload larger than the allowed threshold of: %d KB", conductorClientConfiguration .getWorkflowInputPayloadThresholdKB()); throw new ConductorClientException(errorMsg); } else { MetricsContainer.incrementExternalPayloadUsedCount( startWorkflowRequest.getName(), ExternalPayloadStorage.Operation.WRITE.name(), ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT.name()); String externalStoragePath = uploadToExternalPayloadStorage( ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT, workflowInputBytes, workflowInputSize); startWorkflowRequest.setExternalInputPayloadStoragePath(externalStoragePath); startWorkflowRequest.setInput(null); } } } catch (IOException e) { String errorMsg = String.format( "Unable to start workflow:%s, version:%s", startWorkflowRequest.getName(), version); LOGGER.error(errorMsg, e); MetricsContainer.incrementWorkflowStartErrorCount(startWorkflowRequest.getName(), e); throw new ConductorClientException(errorMsg, e); } try { return postForEntity( "workflow", startWorkflowRequest, null, String.class, startWorkflowRequest.getName()); } catch (ConductorClientException e) { String errorMsg = String.format( "Unable to send start workflow request:%s, version:%s", startWorkflowRequest.getName(), version); LOGGER.error(errorMsg, e); MetricsContainer.incrementWorkflowStartErrorCount(startWorkflowRequest.getName(), e); throw e; } } /** * Retrieve a workflow by workflow id * * @param workflowId the id of the workflow * @param includeTasks specify if the tasks in the workflow need to be returned * @return the requested workflow */ public Workflow getWorkflow(String workflowId, boolean includeTasks) { Validate.notBlank(workflowId, "workflow id cannot be blank"); Workflow workflow = getForEntity( "workflow/{workflowId}", new Object[] {"includeTasks", includeTasks}, Workflow.class, workflowId); populateWorkflowOutput(workflow); return workflow; } /** * Retrieve all workflows for a given correlation id and name * * @param name the name of the workflow * @param correlationId the correlation id * @param includeClosed specify if all workflows are to be returned or only running workflows * @param includeTasks specify if the tasks in the workflow need to be returned * @return list of workflows for the given correlation id and name */ public List<Workflow> getWorkflows( String name, String correlationId, boolean includeClosed, boolean includeTasks) { Validate.notBlank(name, "name cannot be blank"); Validate.notBlank(correlationId, "correlationId cannot be blank"); Object[] params = new Object[] {"includeClosed", includeClosed, "includeTasks", includeTasks}; List<Workflow> workflows = getForEntity( "workflow/{name}/correlated/{correlationId}", params, new GenericType<List<Workflow>>() {}, name, correlationId); workflows.forEach(this::populateWorkflowOutput); return workflows; } /** * Populates the workflow output from external payload storage if the external storage path is * specified. * * @param workflow the workflow for which the output is to be populated. */ private void populateWorkflowOutput(Workflow workflow) { if (StringUtils.isNotBlank(workflow.getExternalOutputPayloadStoragePath())) { MetricsContainer.incrementExternalPayloadUsedCount( workflow.getWorkflowName(), ExternalPayloadStorage.Operation.READ.name(), ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT.name()); workflow.setOutput( downloadFromExternalStorage( ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT, workflow.getExternalOutputPayloadStoragePath())); } } /** * Removes a workflow from the system * * @param workflowId the id of the workflow to be deleted * @param archiveWorkflow flag to indicate if the workflow and associated tasks should be * archived before deletion */ public void deleteWorkflow(String workflowId, boolean archiveWorkflow) { Validate.notBlank(workflowId, "Workflow id cannot be blank"); Object[] params = new Object[] {"archiveWorkflow", archiveWorkflow}; deleteWithUriVariables(params, "workflow/{workflowId}/remove", workflowId); } /** * Terminates the execution of all given workflows instances * * @param workflowIds the ids of the workflows to be terminated * @param reason the reason to be logged and displayed * @return the {@link BulkResponse} contains bulkErrorResults and bulkSuccessfulResults */ public BulkResponse terminateWorkflows(List<String> workflowIds, String reason) { Validate.isTrue(!workflowIds.isEmpty(), "workflow id cannot be blank"); return postForEntity( "workflow/bulk/terminate", workflowIds, new Object[] {"reason", reason}, BulkResponse.class); } /** * Retrieve all running workflow instances for a given name and version * * @param workflowName the name of the workflow * @param version the version of the wokflow definition. Defaults to 1. * @return the list of running workflow instances */ public List<String> getRunningWorkflow(String workflowName, Integer version) { Validate.notBlank(workflowName, "Workflow name cannot be blank"); return getForEntity( "workflow/running/{name}", new Object[] {"version", version}, new GenericType<List<String>>() {}, workflowName); } /** * Retrieve all workflow instances for a given workflow name between a specific time period * * @param workflowName the name of the workflow * @param version the version of the workflow definition. Defaults to 1. * @param startTime the start time of the period * @param endTime the end time of the period * @return returns a list of workflows created during the specified during the time period */ public List<String> getWorkflowsByTimePeriod( String workflowName, int version, Long startTime, Long endTime) { Validate.notBlank(workflowName, "Workflow name cannot be blank"); Validate.notNull(startTime, "Start time cannot be null"); Validate.notNull(endTime, "End time cannot be null"); Object[] params = new Object[] {"version", version, "startTime", startTime, "endTime", endTime}; return getForEntity( "workflow/running/{name}", params, new GenericType<List<String>>() {}, workflowName); } /** * Starts the decision task for the given workflow instance * * @param workflowId the id of the workflow instance */ public void runDecider(String workflowId) { Validate.notBlank(workflowId, "workflow id cannot be blank"); put("workflow/decide/{workflowId}", null, null, workflowId); } /** * Pause a workflow by workflow id * * @param workflowId the workflow id of the workflow to be paused */ public void pauseWorkflow(String workflowId) { Validate.notBlank(workflowId, "workflow id cannot be blank"); put("workflow/{workflowId}/pause", null, null, workflowId); } /** * Resume a paused workflow by workflow id * * @param workflowId the workflow id of the paused workflow */ public void resumeWorkflow(String workflowId) { Validate.notBlank(workflowId, "workflow id cannot be blank"); put("workflow/{workflowId}/resume", null, null, workflowId); } /** * Skips a given task from a current RUNNING workflow * * @param workflowId the id of the workflow instance * @param taskReferenceName the reference name of the task to be skipped */ public void skipTaskFromWorkflow(String workflowId, String taskReferenceName) { Validate.notBlank(workflowId, "workflow id cannot be blank"); Validate.notBlank(taskReferenceName, "Task reference name cannot be blank"); put( "workflow/{workflowId}/skiptask/{taskReferenceName}", null, null, workflowId, taskReferenceName); } /** * Reruns the workflow from a specific task * * @param workflowId the id of the workflow * @param rerunWorkflowRequest the request containing the task to rerun from * @return the id of the workflow */ public String rerunWorkflow(String workflowId, RerunWorkflowRequest rerunWorkflowRequest) { Validate.notBlank(workflowId, "workflow id cannot be blank"); Validate.notNull(rerunWorkflowRequest, "RerunWorkflowRequest cannot be null"); return postForEntity( "workflow/{workflowId}/rerun", rerunWorkflowRequest, null, String.class, workflowId); } /** * Restart a completed workflow * * @param workflowId the workflow id of the workflow to be restarted * @param useLatestDefinitions if true, use the latest workflow and task definitions when * restarting the workflow if false, use the workflow and task definitions embedded in the * workflow execution when restarting the workflow */ public void restart(String workflowId, boolean useLatestDefinitions) { Validate.notBlank(workflowId, "workflow id cannot be blank"); Object[] params = new Object[] {"useLatestDefinitions", useLatestDefinitions}; postForEntity("workflow/{workflowId}/restart", null, params, Void.TYPE, workflowId); } /** * Retries the last failed task in a workflow * * @param workflowId the workflow id of the workflow with the failed task */ public void retryLastFailedTask(String workflowId) { Validate.notBlank(workflowId, "workflow id cannot be blank"); postForEntityWithUriVariablesOnly("workflow/{workflowId}/retry", workflowId); } /** * Resets the callback times of all IN PROGRESS tasks to 0 for the given workflow * * @param workflowId the id of the workflow */ public void resetCallbacksForInProgressTasks(String workflowId) { Validate.notBlank(workflowId, "workflow id cannot be blank"); postForEntityWithUriVariablesOnly("workflow/{workflowId}/resetcallbacks", workflowId); } /** * Terminates the execution of the given workflow instance * * @param workflowId the id of the workflow to be terminated * @param reason the reason to be logged and displayed */ public void terminateWorkflow(String workflowId, String reason) { Validate.notBlank(workflowId, "workflow id cannot be blank"); deleteWithUriVariables( new Object[] {"reason", reason}, "workflow/{workflowId}", workflowId); } /** * Search for workflows based on payload * * @param query the search query * @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query */ public SearchResult<WorkflowSummary> search(String query) { return getForEntity( "workflow/search", new Object[] {"query", query}, searchResultWorkflowSummary); } /** * Search for workflows based on payload * * @param query the search query * @return the {@link SearchResult} containing the {@link Workflow} that match the query */ public SearchResult<Workflow> searchV2(String query) { return getForEntity( "workflow/search-v2", new Object[] {"query", query}, searchResultWorkflow); } /** * Paginated search for workflows based on payload * * @param start start value of page * @param size number of workflows to be returned * @param sort sort order * @param freeText additional free text query * @param query the search query * @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query */ public SearchResult<WorkflowSummary> search( Integer start, Integer size, String sort, String freeText, String query) { Object[] params = new Object[] { "start", start, "size", size, "sort", sort, "freeText", freeText, "query", query }; return getForEntity("workflow/search", params, searchResultWorkflowSummary); } /** * Paginated search for workflows based on payload * * @param start start value of page * @param size number of workflows to be returned * @param sort sort order * @param freeText additional free text query * @param query the search query * @return the {@link SearchResult} containing the {@link Workflow} that match the query */ public SearchResult<Workflow> searchV2( Integer start, Integer size, String sort, String freeText, String query) { Object[] params = new Object[] { "start", start, "size", size, "sort", sort, "freeText", freeText, "query", query }; return getForEntity("workflow/search-v2", params, searchResultWorkflow); } public Workflow testWorkflow(WorkflowTestRequest testRequest) { Validate.notNull(testRequest, "testRequest cannot be null"); if (testRequest.getWorkflowDef() != null) { testRequest.setName(testRequest.getWorkflowDef().getName()); testRequest.setVersion(testRequest.getWorkflowDef().getVersion()); } return postForEntity("workflow/test", testRequest, null, Workflow.class); } }
7,007
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/MetadataClient.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.util.List; import org.apache.commons.lang3.Validate; import com.netflix.conductor.client.config.ConductorClientConfiguration; import com.netflix.conductor.client.config.DefaultConductorClientConfiguration; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.GenericType; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; import com.sun.jersey.api.client.filter.ClientFilter; public class MetadataClient extends ClientBase { private static final GenericType<List<WorkflowDef>> workflowDefList = new GenericType<List<WorkflowDef>>() {}; /** Creates a default metadata client */ public MetadataClient() { this(new DefaultClientConfig(), new DefaultConductorClientConfiguration(), null); } /** * @param clientConfig REST Client configuration */ public MetadataClient(ClientConfig clientConfig) { this(clientConfig, new DefaultConductorClientConfiguration(), null); } /** * @param clientConfig REST Client configuration * @param clientHandler Jersey client handler. Useful when plugging in various http client * interaction modules (e.g. ribbon) */ public MetadataClient(ClientConfig clientConfig, ClientHandler clientHandler) { this(clientConfig, new DefaultConductorClientConfiguration(), clientHandler); } /** * @param config config REST Client configuration * @param handler handler Jersey client handler. Useful when plugging in various http client * interaction modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public MetadataClient(ClientConfig config, ClientHandler handler, ClientFilter... filters) { this(config, new DefaultConductorClientConfiguration(), handler, filters); } /** * @param config REST Client configuration * @param clientConfiguration Specific properties configured for the client, see {@link * ConductorClientConfiguration} * @param handler Jersey client handler. Useful when plugging in various http client interaction * modules (e.g. ribbon) * @param filters Chain of client side filters to be applied per request */ public MetadataClient( ClientConfig config, ConductorClientConfiguration clientConfiguration, ClientHandler handler, ClientFilter... filters) { super(new ClientRequestHandler(config, handler, filters), clientConfiguration); } MetadataClient(ClientRequestHandler requestHandler) { super(requestHandler, null); } // Workflow Metadata Operations /** * Register a workflow definition with the server * * @param workflowDef the workflow definition */ public void registerWorkflowDef(WorkflowDef workflowDef) { Validate.notNull(workflowDef, "Workflow definition cannot be null"); postForEntityWithRequestOnly("metadata/workflow", workflowDef); } public void validateWorkflowDef(WorkflowDef workflowDef) { Validate.notNull(workflowDef, "Workflow definition cannot be null"); postForEntityWithRequestOnly("metadata/workflow/validate", workflowDef); } /** * Updates a list of existing workflow definitions * * @param workflowDefs List of workflow definitions to be updated */ public void updateWorkflowDefs(List<WorkflowDef> workflowDefs) { Validate.notNull(workflowDefs, "Workflow defs list cannot be null"); put("metadata/workflow", null, workflowDefs); } /** * Retrieve the workflow definition * * @param name the name of the workflow * @param version the version of the workflow def * @return Workflow definition for the given workflow and version */ public WorkflowDef getWorkflowDef(String name, Integer version) { Validate.notBlank(name, "name cannot be blank"); return getForEntity( "metadata/workflow/{name}", new Object[] {"version", version}, WorkflowDef.class, name); } /** */ public List<WorkflowDef> getAllWorkflowsWithLatestVersions() { return getForEntity( "metadata/workflow/latest-versions", null, workflowDefList, (Object) null); } /** * Removes the workflow definition of a workflow from the conductor server. It does not remove * associated workflows. Use with caution. * * @param name Name of the workflow to be unregistered. * @param version Version of the workflow definition to be unregistered. */ public void unregisterWorkflowDef(String name, Integer version) { Validate.notBlank(name, "Workflow name cannot be blank"); Validate.notNull(version, "Version cannot be null"); delete("metadata/workflow/{name}/{version}", name, version); } // Task Metadata Operations /** * Registers a list of task types with the conductor server * * @param taskDefs List of task types to be registered. */ public void registerTaskDefs(List<TaskDef> taskDefs) { Validate.notNull(taskDefs, "Task defs list cannot be null"); postForEntityWithRequestOnly("metadata/taskdefs", taskDefs); } /** * Updates an existing task definition * * @param taskDef the task definition to be updated */ public void updateTaskDef(TaskDef taskDef) { Validate.notNull(taskDef, "Task definition cannot be null"); put("metadata/taskdefs", null, taskDef); } /** * Retrieve the task definition of a given task type * * @param taskType type of task for which to retrieve the definition * @return Task Definition for the given task type */ public TaskDef getTaskDef(String taskType) { Validate.notBlank(taskType, "Task type cannot be blank"); return getForEntity("metadata/taskdefs/{tasktype}", null, TaskDef.class, taskType); } /** * Removes the task definition of a task type from the conductor server. Use with caution. * * @param taskType Task type to be unregistered. */ public void unregisterTaskDef(String taskType) { Validate.notBlank(taskType, "Task type cannot be blank"); delete("metadata/taskdefs/{tasktype}", taskType); } }
7,008
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/ClientBase.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.Collection; import java.util.Map; import java.util.function.Function; import javax.ws.rs.core.UriBuilder; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.config.ConductorClientConfiguration; import com.netflix.conductor.client.config.DefaultConductorClientConfiguration; import com.netflix.conductor.client.exception.ConductorClientException; import com.netflix.conductor.common.config.ObjectMapperProvider; import com.netflix.conductor.common.model.BulkResponse; import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.utils.ExternalPayloadStorage; import com.netflix.conductor.common.validation.ErrorResponse; import com.fasterxml.jackson.core.Version; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.GenericType; import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.WebResource.Builder; /** Abstract client for the REST server */ public abstract class ClientBase { private static final Logger LOGGER = LoggerFactory.getLogger(ClientBase.class); protected ClientRequestHandler requestHandler; protected String root = ""; protected ObjectMapper objectMapper; protected PayloadStorage payloadStorage; protected ConductorClientConfiguration conductorClientConfiguration; protected ClientBase( ClientRequestHandler requestHandler, ConductorClientConfiguration clientConfiguration) { this.objectMapper = new ObjectMapperProvider().getObjectMapper(); // https://github.com/FasterXML/jackson-databind/issues/2683 if (isNewerJacksonVersion()) { objectMapper.registerModule(new JavaTimeModule()); } this.requestHandler = requestHandler; this.conductorClientConfiguration = ObjectUtils.defaultIfNull( clientConfiguration, new DefaultConductorClientConfiguration()); this.payloadStorage = new PayloadStorage(this); } public void setRootURI(String root) { this.root = root; } protected void delete(String url, Object... uriVariables) { deleteWithUriVariables(null, url, uriVariables); } protected void deleteWithUriVariables( Object[] queryParams, String url, Object... uriVariables) { delete(queryParams, url, uriVariables, null); } protected BulkResponse deleteWithRequestBody(Object[] queryParams, String url, Object body) { return delete(queryParams, url, null, body); } private BulkResponse delete( Object[] queryParams, String url, Object[] uriVariables, Object body) { URI uri = null; BulkResponse response = null; try { uri = getURIBuilder(root + url, queryParams).build(uriVariables); response = requestHandler.delete(uri, body); } catch (UniformInterfaceException e) { handleUniformInterfaceException(e, uri); } catch (RuntimeException e) { handleRuntimeException(e, uri); } return response; } protected void put(String url, Object[] queryParams, Object request, Object... uriVariables) { URI uri = null; try { uri = getURIBuilder(root + url, queryParams).build(uriVariables); requestHandler.getWebResourceBuilder(uri, request).put(); } catch (RuntimeException e) { handleException(uri, e); } } protected void postForEntityWithRequestOnly(String url, Object request) { Class<?> type = null; postForEntity(url, request, null, type); } protected void postForEntityWithUriVariablesOnly(String url, Object... uriVariables) { Class<?> type = null; postForEntity(url, null, null, type, uriVariables); } protected <T> T postForEntity( String url, Object request, Object[] queryParams, Class<T> responseType, Object... uriVariables) { return postForEntity( url, request, queryParams, responseType, builder -> builder.post(responseType), uriVariables); } protected <T> T postForEntity( String url, Object request, Object[] queryParams, GenericType<T> responseType, Object... uriVariables) { return postForEntity( url, request, queryParams, responseType, builder -> builder.post(responseType), uriVariables); } private <T> T postForEntity( String url, Object request, Object[] queryParams, Object responseType, Function<Builder, T> postWithEntity, Object... uriVariables) { URI uri = null; try { uri = getURIBuilder(root + url, queryParams).build(uriVariables); Builder webResourceBuilder = requestHandler.getWebResourceBuilder(uri, request); if (responseType == null) { webResourceBuilder.post(); return null; } return postWithEntity.apply(webResourceBuilder); } catch (UniformInterfaceException e) { handleUniformInterfaceException(e, uri); } catch (RuntimeException e) { handleRuntimeException(e, uri); } return null; } protected <T> T getForEntity( String url, Object[] queryParams, Class<T> responseType, Object... uriVariables) { return getForEntity( url, queryParams, response -> response.getEntity(responseType), uriVariables); } protected <T> T getForEntity( String url, Object[] queryParams, GenericType<T> responseType, Object... uriVariables) { return getForEntity( url, queryParams, response -> response.getEntity(responseType), uriVariables); } private <T> T getForEntity( String url, Object[] queryParams, Function<ClientResponse, T> entityProvider, Object... uriVariables) { URI uri = null; ClientResponse clientResponse; try { uri = getURIBuilder(root + url, queryParams).build(uriVariables); clientResponse = requestHandler.get(uri); if (clientResponse.getStatus() < 300) { return entityProvider.apply(clientResponse); } else { throw new UniformInterfaceException(clientResponse); } } catch (UniformInterfaceException e) { handleUniformInterfaceException(e, uri); } catch (RuntimeException e) { handleRuntimeException(e, uri); } return null; } /** * Uses the {@link PayloadStorage} for storing large payloads. Gets the uri for storing the * payload from the server and then uploads to this location * * @param payloadType the {@link * com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType} to be uploaded * @param payloadBytes the byte array containing the payload * @param payloadSize the size of the payload * @return the path where the payload is stored in external storage */ protected String uploadToExternalPayloadStorage( ExternalPayloadStorage.PayloadType payloadType, byte[] payloadBytes, long payloadSize) { Validate.isTrue( payloadType.equals(ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT) || payloadType.equals(ExternalPayloadStorage.PayloadType.TASK_OUTPUT), "Payload type must be workflow input or task output"); ExternalStorageLocation externalStorageLocation = payloadStorage.getLocation(ExternalPayloadStorage.Operation.WRITE, payloadType, ""); payloadStorage.upload( externalStorageLocation.getUri(), new ByteArrayInputStream(payloadBytes), payloadSize); return externalStorageLocation.getPath(); } /** * Uses the {@link PayloadStorage} for downloading large payloads to be used by the client. Gets * the uri of the payload fom the server and then downloads from this location. * * @param payloadType the {@link * com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType} to be downloaded * @param path the relative of the payload in external storage * @return the payload object that is stored in external storage */ @SuppressWarnings("unchecked") protected Map<String, Object> downloadFromExternalStorage( ExternalPayloadStorage.PayloadType payloadType, String path) { Validate.notBlank(path, "uri cannot be blank"); ExternalStorageLocation externalStorageLocation = payloadStorage.getLocation( ExternalPayloadStorage.Operation.READ, payloadType, path); try (InputStream inputStream = payloadStorage.download(externalStorageLocation.getUri())) { return objectMapper.readValue(inputStream, Map.class); } catch (IOException e) { String errorMsg = String.format( "Unable to download payload from external storage location: %s", path); LOGGER.error(errorMsg, e); throw new ConductorClientException(errorMsg, e); } } private UriBuilder getURIBuilder(String path, Object[] queryParams) { if (path == null) { path = ""; } UriBuilder builder = UriBuilder.fromPath(path); if (queryParams != null) { for (int i = 0; i < queryParams.length; i += 2) { String param = queryParams[i].toString(); Object value = queryParams[i + 1]; if (value != null) { if (value instanceof Collection) { Object[] values = ((Collection<?>) value).toArray(); builder.queryParam(param, values); } else { builder.queryParam(param, value); } } } } return builder; } protected boolean isNewerJacksonVersion() { Version version = com.fasterxml.jackson.databind.cfg.PackageVersion.VERSION; return version.getMajorVersion() == 2 && version.getMinorVersion() >= 12; } private void handleClientHandlerException(ClientHandlerException exception, URI uri) { String errorMessage = String.format( "Unable to invoke Conductor API with uri: %s, failure to process request or response", uri); LOGGER.error(errorMessage, exception); throw new ConductorClientException(errorMessage, exception); } private void handleRuntimeException(RuntimeException exception, URI uri) { String errorMessage = String.format( "Unable to invoke Conductor API with uri: %s, runtime exception occurred", uri); LOGGER.error(errorMessage, exception); throw new ConductorClientException(errorMessage, exception); } private void handleUniformInterfaceException(UniformInterfaceException exception, URI uri) { ClientResponse clientResponse = exception.getResponse(); if (clientResponse == null) { throw new ConductorClientException( String.format("Unable to invoke Conductor API with uri: %s", uri)); } try { if (clientResponse.getStatus() < 300) { return; } String errorMessage = clientResponse.getEntity(String.class); LOGGER.warn( "Unable to invoke Conductor API with uri: {}, unexpected response from server: statusCode={}, responseBody='{}'.", uri, clientResponse.getStatus(), errorMessage); ErrorResponse errorResponse; try { errorResponse = objectMapper.readValue(errorMessage, ErrorResponse.class); } catch (IOException e) { throw new ConductorClientException(clientResponse.getStatus(), errorMessage); } throw new ConductorClientException(clientResponse.getStatus(), errorResponse); } catch (ConductorClientException e) { throw e; } catch (ClientHandlerException e) { handleClientHandlerException(e, uri); } catch (RuntimeException e) { handleRuntimeException(e, uri); } finally { clientResponse.close(); } } private void handleException(URI uri, RuntimeException e) { if (e instanceof UniformInterfaceException) { handleUniformInterfaceException(((UniformInterfaceException) e), uri); } else if (e instanceof ClientHandlerException) { handleClientHandlerException((ClientHandlerException) e, uri); } else { handleRuntimeException(e, uri); } } /** * Converts ClientResponse object to string with detailed debug information including status * code, media type, response headers, and response body if exists. */ private String clientResponseToString(ClientResponse response) { if (response == null) { return null; } StringBuilder builder = new StringBuilder(); builder.append("[status: ").append(response.getStatus()); builder.append(", media type: ").append(response.getType()); if (response.getStatus() != 404) { try { String responseBody = response.getEntity(String.class); if (responseBody != null) { builder.append(", response body: ").append(responseBody); } } catch (RuntimeException ignore) { // Ignore if there is no response body, or IO error - it may have already been read // in certain scenario. } } builder.append(", response headers: ").append(response.getHeaders()); builder.append("]"); return builder.toString(); } }
7,009
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/http/ClientRequestHandler.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.http; import java.net.URI; import javax.ws.rs.core.MediaType; import com.netflix.conductor.common.config.ObjectMapperProvider; import com.netflix.conductor.common.model.BulkResponse; import com.fasterxml.jackson.core.Version; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.filter.ClientFilter; public class ClientRequestHandler { private final Client client; public ClientRequestHandler( ClientConfig config, ClientHandler handler, ClientFilter... filters) { ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper(); // https://github.com/FasterXML/jackson-databind/issues/2683 if (isNewerJacksonVersion()) { objectMapper.registerModule(new JavaTimeModule()); } JacksonJsonProvider provider = new JacksonJsonProvider(objectMapper); config.getSingletons().add(provider); if (handler == null) { this.client = Client.create(config); } else { this.client = new Client(handler, config); } for (ClientFilter filter : filters) { this.client.addFilter(filter); } } public BulkResponse delete(URI uri, Object body) { if (body != null) { return client.resource(uri) .type(MediaType.APPLICATION_JSON_TYPE) .delete(BulkResponse.class, body); } else { client.resource(uri).delete(); } return null; } public ClientResponse get(URI uri) { return client.resource(uri) .accept(MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN) .get(ClientResponse.class); } public WebResource.Builder getWebResourceBuilder(URI URI, Object entity) { return client.resource(URI) .type(MediaType.APPLICATION_JSON) .entity(entity) .accept(MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON); } private boolean isNewerJacksonVersion() { Version version = com.fasterxml.jackson.databind.cfg.PackageVersion.VERSION; return version.getMajorVersion() == 2 && version.getMinorVersion() >= 12; } }
7,010
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/telemetry/MetricsContainer.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.telemetry; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Id; import com.netflix.spectator.api.Registry; import com.netflix.spectator.api.Spectator; import com.netflix.spectator.api.Tag; import com.netflix.spectator.api.Timer; import com.netflix.spectator.api.patterns.PolledMeter; public class MetricsContainer { private static final String TASK_TYPE = "taskType"; private static final String WORKFLOW_TYPE = "workflowType"; private static final String WORKFLOW_VERSION = "version"; private static final String EXCEPTION = "exception"; private static final String ENTITY_NAME = "entityName"; private static final String OPERATION = "operation"; private static final String PAYLOAD_TYPE = "payload_type"; private static final String TASK_EXECUTION_QUEUE_FULL = "task_execution_queue_full"; private static final String TASK_POLL_ERROR = "task_poll_error"; private static final String TASK_PAUSED = "task_paused"; private static final String TASK_EXECUTE_ERROR = "task_execute_error"; private static final String TASK_ACK_FAILED = "task_ack_failed"; private static final String TASK_ACK_ERROR = "task_ack_error"; private static final String TASK_UPDATE_ERROR = "task_update_error"; private static final String TASK_LEASE_EXTEND_ERROR = "task_lease_extend_error"; private static final String TASK_LEASE_EXTEND_COUNTER = "task_lease_extend_counter"; private static final String TASK_POLL_COUNTER = "task_poll_counter"; private static final String TASK_EXECUTE_TIME = "task_execute_time"; private static final String TASK_POLL_TIME = "task_poll_time"; private static final String TASK_RESULT_SIZE = "task_result_size"; private static final String WORKFLOW_INPUT_SIZE = "workflow_input_size"; private static final String EXTERNAL_PAYLOAD_USED = "external_payload_used"; private static final String WORKFLOW_START_ERROR = "workflow_start_error"; private static final String THREAD_UNCAUGHT_EXCEPTION = "thread_uncaught_exceptions"; private static final Registry REGISTRY = Spectator.globalRegistry(); private static final Map<String, Timer> TIMERS = new ConcurrentHashMap<>(); private static final Map<String, Counter> COUNTERS = new ConcurrentHashMap<>(); private static final Map<String, AtomicLong> GAUGES = new ConcurrentHashMap<>(); private static final String CLASS_NAME = MetricsContainer.class.getSimpleName(); private MetricsContainer() {} public static Timer getPollTimer(String taskType) { return getTimer(TASK_POLL_TIME, TASK_TYPE, taskType); } public static Timer getExecutionTimer(String taskType) { return getTimer(TASK_EXECUTE_TIME, TASK_TYPE, taskType); } private static Timer getTimer(String name, String... additionalTags) { String key = CLASS_NAME + "." + name + "." + String.join(",", additionalTags); return TIMERS.computeIfAbsent( key, k -> { List<Tag> tagList = getTags(additionalTags); tagList.add(new BasicTag("unit", TimeUnit.MILLISECONDS.name())); return REGISTRY.timer(name, tagList); }); } @SuppressWarnings({"rawtypes", "unchecked"}) private static List<Tag> getTags(String[] additionalTags) { List<Tag> tagList = new ArrayList(); tagList.add(new BasicTag("class", CLASS_NAME)); for (int j = 0; j < additionalTags.length - 1; j++) { tagList.add(new BasicTag(additionalTags[j], additionalTags[j + 1])); j++; } return tagList; } private static void incrementCount(String name, String... additionalTags) { getCounter(name, additionalTags).increment(); } private static Counter getCounter(String name, String... additionalTags) { String key = CLASS_NAME + "." + name + "." + String.join(",", additionalTags); return COUNTERS.computeIfAbsent( key, k -> { List<Tag> tags = getTags(additionalTags); return REGISTRY.counter(name, tags); }); } private static AtomicLong getGauge(String name, String... additionalTags) { String key = CLASS_NAME + "." + name + "." + String.join(",", additionalTags); return GAUGES.computeIfAbsent( key, pollTimer -> { Id id = REGISTRY.createId(name, getTags(additionalTags)); return PolledMeter.using(REGISTRY).withId(id).monitorValue(new AtomicLong(0)); }); } public static void incrementTaskExecutionQueueFullCount(String taskType) { incrementCount(TASK_EXECUTION_QUEUE_FULL, TASK_TYPE, taskType); } public static void incrementUncaughtExceptionCount() { incrementCount(THREAD_UNCAUGHT_EXCEPTION); } public static void incrementTaskPollErrorCount(String taskType, Exception e) { incrementCount( TASK_POLL_ERROR, TASK_TYPE, taskType, EXCEPTION, e.getClass().getSimpleName()); } public static void incrementTaskPausedCount(String taskType) { incrementCount(TASK_PAUSED, TASK_TYPE, taskType); } public static void incrementTaskExecutionErrorCount(String taskType, Throwable e) { incrementCount( TASK_EXECUTE_ERROR, TASK_TYPE, taskType, EXCEPTION, e.getClass().getSimpleName()); } public static void incrementTaskAckFailedCount(String taskType) { incrementCount(TASK_ACK_FAILED, TASK_TYPE, taskType); } public static void incrementTaskAckErrorCount(String taskType, Exception e) { incrementCount( TASK_ACK_ERROR, TASK_TYPE, taskType, EXCEPTION, e.getClass().getSimpleName()); } public static void recordTaskResultPayloadSize(String taskType, long payloadSize) { getGauge(TASK_RESULT_SIZE, TASK_TYPE, taskType).getAndSet(payloadSize); } public static void incrementTaskUpdateErrorCount(String taskType, Throwable t) { incrementCount( TASK_UPDATE_ERROR, TASK_TYPE, taskType, EXCEPTION, t.getClass().getSimpleName()); } public static void incrementTaskLeaseExtendErrorCount(String taskType, Throwable t) { incrementCount( TASK_LEASE_EXTEND_ERROR, TASK_TYPE, taskType, EXCEPTION, t.getClass().getSimpleName()); } public static void incrementTaskLeaseExtendCount(String taskType, int taskCount) { getCounter(TASK_LEASE_EXTEND_COUNTER, TASK_TYPE, taskType).increment(taskCount); } public static void incrementTaskPollCount(String taskType, int taskCount) { getCounter(TASK_POLL_COUNTER, TASK_TYPE, taskType).increment(taskCount); } public static void recordWorkflowInputPayloadSize( String workflowType, String version, long payloadSize) { getGauge(WORKFLOW_INPUT_SIZE, WORKFLOW_TYPE, workflowType, WORKFLOW_VERSION, version) .getAndSet(payloadSize); } public static void incrementExternalPayloadUsedCount( String name, String operation, String payloadType) { incrementCount( EXTERNAL_PAYLOAD_USED, ENTITY_NAME, name, OPERATION, operation, PAYLOAD_TYPE, payloadType); } public static void incrementWorkflowStartErrorCount(String workflowType, Throwable t) { incrementCount( WORKFLOW_START_ERROR, WORKFLOW_TYPE, workflowType, EXCEPTION, t.getClass().getSimpleName()); } }
7,011
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/worker/Worker.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.worker; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.function.Function; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.client.config.PropertyFactory; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.amazonaws.util.EC2MetadataUtils; public interface Worker { /** * Retrieve the name of the task definition the worker is currently working on. * * @return the name of the task definition. */ String getTaskDefName(); /** * Executes a task and returns the updated task. * * @param task Task to be executed. * @return the {@link TaskResult} object If the task is not completed yet, return with the * status as IN_PROGRESS. */ TaskResult execute(Task task); /** * Called when the task coordinator fails to update the task to the server. Client should store * the task id (in a database) and retry the update later * * @param task Task which cannot be updated back to the server. */ default void onErrorUpdate(Task task) {} /** * Override this method to pause the worker from polling. * * @return true if the worker is paused and no more tasks should be polled from server. */ default boolean paused() { return PropertyFactory.getBoolean(getTaskDefName(), "paused", false); } /** * Override this method to app specific rules. * * @return returns the serverId as the id of the instance that the worker is running. */ default String getIdentity() { String serverId; try { serverId = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { serverId = System.getenv("HOSTNAME"); } if (serverId == null) { serverId = (EC2MetadataUtils.getInstanceId() == null) ? System.getProperty("user.name") : EC2MetadataUtils.getInstanceId(); } LoggerHolder.logger.debug("Setting worker id to {}", serverId); return serverId; } /** * Override this method to change the interval between polls. * * @return interval in millisecond at which the server should be polled for worker tasks. */ default int getPollingInterval() { return PropertyFactory.getInteger(getTaskDefName(), "pollInterval", 1000); } default boolean leaseExtendEnabled() { return PropertyFactory.getBoolean(getTaskDefName(), "leaseExtendEnabled", false); } default int getBatchPollTimeoutInMS() { return PropertyFactory.getInteger(getTaskDefName(), "batchPollTimeoutInMS", 1000); } static Worker create(String taskType, Function<Task, TaskResult> executor) { return new Worker() { @Override public String getTaskDefName() { return taskType; } @Override public TaskResult execute(Task task) { return executor.apply(task); } @Override public boolean paused() { return Worker.super.paused(); } }; } } final class LoggerHolder { static final Logger logger = LoggerFactory.getLogger(Worker.class); }
7,012
0
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client
Create_ds/conductor/client/src/main/java/com/netflix/conductor/client/exception/ConductorClientException.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.client.exception; import java.util.List; import com.netflix.conductor.common.validation.ErrorResponse; import com.netflix.conductor.common.validation.ValidationError; /** Client exception thrown from Conductor api clients. */ public class ConductorClientException extends RuntimeException { private int status; private String message; private String instance; private String code; private boolean retryable; public List<ValidationError> getValidationErrors() { return validationErrors; } public void setValidationErrors(List<ValidationError> validationErrors) { this.validationErrors = validationErrors; } private List<ValidationError> validationErrors; public ConductorClientException() { super(); } public ConductorClientException(String message) { super(message); this.message = message; } public ConductorClientException(String message, Throwable cause) { super(message, cause); this.message = message; } public ConductorClientException(int status, String message) { super(message); this.status = status; this.message = message; } public ConductorClientException(int status, ErrorResponse errorResponse) { super(errorResponse.getMessage()); this.status = status; this.retryable = errorResponse.isRetryable(); this.message = errorResponse.getMessage(); this.code = errorResponse.getCode(); this.instance = errorResponse.getInstance(); this.validationErrors = errorResponse.getValidationErrors(); } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append(getClass().getName()).append(": "); if (this.message != null) { builder.append(message); } if (status > 0) { builder.append(" {status=").append(status); if (this.code != null) { builder.append(", code='").append(code).append("'"); } builder.append(", retryable: ").append(retryable); } if (this.instance != null) { builder.append(", instance: ").append(instance); } if (this.validationErrors != null) { builder.append(", validationErrors: ").append(validationErrors.toString()); } builder.append("}"); return builder.toString(); } public String getCode() { return code; } public void setCode(String code) { this.code = code; } public void setStatus(int status) { this.status = status; } public void setMessage(String message) { this.message = message; } public String getInstance() { return instance; } public void setInstance(String instance) { this.instance = instance; } public boolean isRetryable() { return retryable; } public void setRetryable(boolean retryable) { this.retryable = retryable; } @Override public String getMessage() { return this.message; } public int getStatus() { return this.status; } }
7,013
0
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/RedisConcurrentExecutionLimitDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.redis.limit; import java.util.Optional; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.data.redis.core.StringRedisTemplate; import org.springframework.stereotype.Component; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.redis.limit.config.RedisConcurrentExecutionLimitProperties; @Trace @Component @ConditionalOnProperty( value = "conductor.redis-concurrent-execution-limit.enabled", havingValue = "true") public class RedisConcurrentExecutionLimitDAO implements ConcurrentExecutionLimitDAO { private static final Logger LOGGER = LoggerFactory.getLogger(RedisConcurrentExecutionLimitDAO.class); private static final String CLASS_NAME = RedisConcurrentExecutionLimitDAO.class.getSimpleName(); private final StringRedisTemplate stringRedisTemplate; private final RedisConcurrentExecutionLimitProperties properties; public RedisConcurrentExecutionLimitDAO( StringRedisTemplate stringRedisTemplate, RedisConcurrentExecutionLimitProperties properties) { this.stringRedisTemplate = stringRedisTemplate; this.properties = properties; } /** * Adds the {@link TaskModel} identifier to a Redis Set for the {@link TaskDef}'s name. * * @param task The {@link TaskModel} object. */ @Override public void addTaskToLimit(TaskModel task) { try { Monitors.recordDaoRequests( CLASS_NAME, "addTaskToLimit", task.getTaskType(), task.getWorkflowType()); String taskId = task.getTaskId(); String taskDefName = task.getTaskDefName(); String keyName = createKeyName(taskDefName); stringRedisTemplate.opsForSet().add(keyName, taskId); LOGGER.debug("Added taskId: {} to key: {}", taskId, keyName); } catch (Exception e) { Monitors.error(CLASS_NAME, "addTaskToLimit"); String errorMsg = String.format( "Error updating taskDefLimit for task - %s:%s in workflow: %s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } /** * Remove the {@link TaskModel} identifier from the Redis Set for the {@link TaskDef}'s name. * * @param task The {@link TaskModel} object. */ @Override public void removeTaskFromLimit(TaskModel task) { try { Monitors.recordDaoRequests( CLASS_NAME, "removeTaskFromLimit", task.getTaskType(), task.getWorkflowType()); String taskId = task.getTaskId(); String taskDefName = task.getTaskDefName(); String keyName = createKeyName(taskDefName); stringRedisTemplate.opsForSet().remove(keyName, taskId); LOGGER.debug("Removed taskId: {} from key: {}", taskId, keyName); } catch (Exception e) { Monitors.error(CLASS_NAME, "removeTaskFromLimit"); String errorMsg = String.format( "Error updating taskDefLimit for task - %s:%s in workflow: %s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } /** * Checks if the {@link TaskModel} identifier is in the Redis Set and size of the set is more * than the {@link TaskDef#concurrencyLimit()}. * * @param task The {@link TaskModel} object. * @return true if the task id is not in the set and size of the set is more than the {@link * TaskDef#concurrencyLimit()}. */ @Override public boolean exceedsLimit(TaskModel task) { Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (taskDefinition.isEmpty()) { return false; } int limit = taskDefinition.get().concurrencyLimit(); if (limit <= 0) { return false; } try { Monitors.recordDaoRequests( CLASS_NAME, "exceedsLimit", task.getTaskType(), task.getWorkflowType()); String taskId = task.getTaskId(); String taskDefName = task.getTaskDefName(); String keyName = createKeyName(taskDefName); boolean isMember = ObjectUtils.defaultIfNull( stringRedisTemplate.opsForSet().isMember(keyName, taskId), false); long size = ObjectUtils.defaultIfNull(stringRedisTemplate.opsForSet().size(keyName), -1L); LOGGER.debug( "Task: {} is {} of {}, size: {} and limit: {}", taskId, isMember ? "a member" : "not a member", keyName, size, limit); return !isMember && size >= limit; } catch (Exception e) { Monitors.error(CLASS_NAME, "exceedsLimit"); String errorMsg = String.format( "Failed to get in progress limit - %s:%s in workflow :%s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } private String createKeyName(String taskDefName) { StringBuilder builder = new StringBuilder(); String namespace = properties.getNamespace(); if (StringUtils.isNotBlank(namespace)) { builder.append(namespace).append(':'); } return builder.append(taskDefName).toString(); } }
7,014
0
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitConfiguration.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.redis.limit.config; import java.util.List; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.data.redis.connection.RedisClusterConfiguration; import org.springframework.data.redis.connection.RedisConnectionFactory; import org.springframework.data.redis.connection.RedisStandaloneConfiguration; import org.springframework.data.redis.connection.jedis.JedisClientConfiguration; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; @Configuration @ConditionalOnProperty( value = "conductor.redis-concurrent-execution-limit.enabled", havingValue = "true") @EnableConfigurationProperties(RedisConcurrentExecutionLimitProperties.class) public class RedisConcurrentExecutionLimitConfiguration { @Bean @ConditionalOnProperty( value = "conductor.redis-concurrent-execution-limit.type", havingValue = "cluster") public RedisConnectionFactory redisClusterConnectionFactory( RedisConcurrentExecutionLimitProperties properties) { GenericObjectPoolConfig<?> poolConfig = new GenericObjectPoolConfig<>(); poolConfig.setMaxTotal(properties.getMaxConnectionsPerHost()); poolConfig.setTestWhileIdle(true); JedisClientConfiguration clientConfig = JedisClientConfiguration.builder() .usePooling() .poolConfig(poolConfig) .and() .clientName(properties.getClientName()) .build(); RedisClusterConfiguration redisClusterConfiguration = new RedisClusterConfiguration( List.of(properties.getHost() + ":" + properties.getPort())); return new JedisConnectionFactory(redisClusterConfiguration, clientConfig); } @Bean @ConditionalOnProperty( value = "conductor.redis-concurrent-execution-limit.type", havingValue = "standalone", matchIfMissing = true) public RedisConnectionFactory redisStandaloneConnectionFactory( RedisConcurrentExecutionLimitProperties properties) { RedisStandaloneConfiguration config = new RedisStandaloneConfiguration(properties.getHost(), properties.getPort()); return new JedisConnectionFactory(config); } }
7,015
0
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit
Create_ds/conductor/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitProperties.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.redis.limit.config; import org.springframework.boot.context.properties.ConfigurationProperties; @ConfigurationProperties("conductor.redis-concurrent-execution-limit") public class RedisConcurrentExecutionLimitProperties { public enum RedisType { STANDALONE, CLUSTER } private RedisType type; private String host; private int port; private String password; private int maxConnectionsPerHost; private String clientName; private String namespace = "conductor"; public RedisType getType() { return type; } public void setType(RedisType type) { this.type = type; } public int getMaxConnectionsPerHost() { return maxConnectionsPerHost; } public void setMaxConnectionsPerHost(int maxConnectionsPerHost) { this.maxConnectionsPerHost = maxConnectionsPerHost; } public String getClientName() { return clientName; } public void setClientName(String clientName) { this.clientName = clientName; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public int getPort() { return port; } public void setPort(int port) { this.port = port; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getNamespace() { return namespace; } public void setNamespace(String namespace) { this.namespace = namespace; } }
7,016
0
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest/controllers/MetadataResourceTest.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.List; import org.junit.Before; import org.junit.Test; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.service.MetadataService; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class MetadataResourceTest { private MetadataResource metadataResource; private MetadataService mockMetadataService; @Before public void before() { this.mockMetadataService = mock(MetadataService.class); this.metadataResource = new MetadataResource(this.mockMetadataService); } @Test public void testCreateWorkflow() { WorkflowDef workflowDef = new WorkflowDef(); metadataResource.create(workflowDef); verify(mockMetadataService, times(1)).registerWorkflowDef(any(WorkflowDef.class)); } @Test public void testValidateWorkflow() { WorkflowDef workflowDef = new WorkflowDef(); metadataResource.validate(workflowDef); verify(mockMetadataService, times(1)).validateWorkflowDef(any(WorkflowDef.class)); } @Test public void testUpdateWorkflow() { WorkflowDef workflowDef = new WorkflowDef(); List<WorkflowDef> listOfWorkflowDef = new ArrayList<>(); listOfWorkflowDef.add(workflowDef); metadataResource.update(listOfWorkflowDef); verify(mockMetadataService, times(1)).updateWorkflowDef(anyList()); } @Test public void testGetWorkflowDef() { WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setName("test"); workflowDef.setVersion(1); workflowDef.setDescription("test"); when(mockMetadataService.getWorkflowDef(anyString(), any())).thenReturn(workflowDef); assertEquals(workflowDef, metadataResource.get("test", 1)); } @Test public void testGetAllWorkflowDef() { WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setName("test"); workflowDef.setVersion(1); workflowDef.setDescription("test"); List<WorkflowDef> listOfWorkflowDef = new ArrayList<>(); listOfWorkflowDef.add(workflowDef); when(mockMetadataService.getWorkflowDefs()).thenReturn(listOfWorkflowDef); assertEquals(listOfWorkflowDef, metadataResource.getAll()); } @Test public void testGetAllWorkflowDefLatestVersions() { WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setName("test"); workflowDef.setVersion(1); workflowDef.setDescription("test"); List<WorkflowDef> listOfWorkflowDef = new ArrayList<>(); listOfWorkflowDef.add(workflowDef); when(mockMetadataService.getWorkflowDefsLatestVersions()).thenReturn(listOfWorkflowDef); assertEquals(listOfWorkflowDef, metadataResource.getAllWorkflowsWithLatestVersions()); } @Test public void testUnregisterWorkflowDef() throws Exception { metadataResource.unregisterWorkflowDef("test", 1); verify(mockMetadataService, times(1)).unregisterWorkflowDef(anyString(), any()); } @Test public void testRegisterListOfTaskDef() { TaskDef taskDef = new TaskDef(); taskDef.setName("test"); taskDef.setDescription("desc"); List<TaskDef> listOfTaskDefs = new ArrayList<>(); listOfTaskDefs.add(taskDef); metadataResource.registerTaskDef(listOfTaskDefs); verify(mockMetadataService, times(1)).registerTaskDef(listOfTaskDefs); } @Test public void testRegisterTaskDef() { TaskDef taskDef = new TaskDef(); taskDef.setName("test"); taskDef.setDescription("desc"); metadataResource.registerTaskDef(taskDef); verify(mockMetadataService, times(1)).updateTaskDef(taskDef); } @Test public void testGetAllTaskDefs() { TaskDef taskDef = new TaskDef(); taskDef.setName("test"); taskDef.setDescription("desc"); List<TaskDef> listOfTaskDefs = new ArrayList<>(); listOfTaskDefs.add(taskDef); when(mockMetadataService.getTaskDefs()).thenReturn(listOfTaskDefs); assertEquals(listOfTaskDefs, metadataResource.getTaskDefs()); } @Test public void testGetTaskDef() { TaskDef taskDef = new TaskDef(); taskDef.setName("test"); taskDef.setDescription("desc"); when(mockMetadataService.getTaskDef(anyString())).thenReturn(taskDef); assertEquals(taskDef, metadataResource.getTaskDef("test")); } @Test public void testUnregisterTaskDef() { metadataResource.unregisterTaskDef("test"); verify(mockMetadataService, times(1)).unregisterTaskDef(anyString()); } }
7,017
0
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest/controllers/TaskResourceTest.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.springframework.http.ResponseEntity; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskExecLog; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.service.TaskService; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TaskResourceTest { private TaskService mockTaskService; private TaskResource taskResource; @Before public void before() { this.mockTaskService = mock(TaskService.class); this.taskResource = new TaskResource(this.mockTaskService); } @Test public void testPoll() { Task task = new Task(); task.setTaskType("SIMPLE"); task.setWorkerId("123"); task.setDomain("test"); when(mockTaskService.poll(anyString(), anyString(), anyString())).thenReturn(task); assertEquals(ResponseEntity.ok(task), taskResource.poll("SIMPLE", "123", "test")); } @Test public void testBatchPoll() { Task task = new Task(); task.setTaskType("SIMPLE"); task.setWorkerId("123"); task.setDomain("test"); List<Task> listOfTasks = new ArrayList<>(); listOfTasks.add(task); when(mockTaskService.batchPoll(anyString(), anyString(), anyString(), anyInt(), anyInt())) .thenReturn(listOfTasks); assertEquals( ResponseEntity.ok(listOfTasks), taskResource.batchPoll("SIMPLE", "123", "test", 1, 100)); } @Test public void testUpdateTask() { TaskResult taskResult = new TaskResult(); taskResult.setStatus(TaskResult.Status.COMPLETED); taskResult.setTaskId("123"); when(mockTaskService.updateTask(any(TaskResult.class))).thenReturn("123"); assertEquals("123", taskResource.updateTask(taskResult)); } @Test public void testLog() { taskResource.log("123", "test log"); verify(mockTaskService, times(1)).log(anyString(), anyString()); } @Test public void testGetTaskLogs() { List<TaskExecLog> listOfLogs = new ArrayList<>(); listOfLogs.add(new TaskExecLog("test log")); when(mockTaskService.getTaskLogs(anyString())).thenReturn(listOfLogs); assertEquals(listOfLogs, taskResource.getTaskLogs("123")); } @Test public void testGetTask() { Task task = new Task(); task.setTaskType("SIMPLE"); task.setWorkerId("123"); task.setDomain("test"); task.setStatus(Task.Status.IN_PROGRESS); when(mockTaskService.getTask(anyString())).thenReturn(task); ResponseEntity<Task> entity = taskResource.getTask("123"); assertNotNull(entity); assertEquals(task, entity.getBody()); } @Test public void testSize() { Map<String, Integer> map = new HashMap<>(); map.put("test1", 1); map.put("test2", 2); List<String> list = new ArrayList<>(); list.add("test1"); list.add("test2"); when(mockTaskService.getTaskQueueSizes(anyList())).thenReturn(map); assertEquals(map, taskResource.size(list)); } @Test public void testAllVerbose() { Map<String, Long> map = new HashMap<>(); map.put("queue1", 1L); map.put("queue2", 2L); Map<String, Map<String, Long>> mapOfMap = new HashMap<>(); mapOfMap.put("queue", map); Map<String, Map<String, Map<String, Long>>> queueSizeMap = new HashMap<>(); queueSizeMap.put("queue", mapOfMap); when(mockTaskService.allVerbose()).thenReturn(queueSizeMap); assertEquals(queueSizeMap, taskResource.allVerbose()); } @Test public void testQueueDetails() { Map<String, Long> map = new HashMap<>(); map.put("queue1", 1L); map.put("queue2", 2L); when(mockTaskService.getAllQueueDetails()).thenReturn(map); assertEquals(map, taskResource.all()); } @Test public void testGetPollData() { PollData pollData = new PollData("queue", "test", "w123", 100); List<PollData> listOfPollData = new ArrayList<>(); listOfPollData.add(pollData); when(mockTaskService.getPollData(anyString())).thenReturn(listOfPollData); assertEquals(listOfPollData, taskResource.getPollData("w123")); } @Test public void testGetAllPollData() { PollData pollData = new PollData("queue", "test", "w123", 100); List<PollData> listOfPollData = new ArrayList<>(); listOfPollData.add(pollData); when(mockTaskService.getAllPollData()).thenReturn(listOfPollData); assertEquals(listOfPollData, taskResource.getAllPollData()); } @Test public void testRequeueTaskType() { when(mockTaskService.requeuePendingTask(anyString())).thenReturn("1"); assertEquals("1", taskResource.requeuePendingTask("SIMPLE")); } @Test public void testSearch() { Task task = new Task(); task.setTaskType("SIMPLE"); task.setWorkerId("123"); task.setDomain("test"); task.setStatus(Task.Status.IN_PROGRESS); TaskSummary taskSummary = new TaskSummary(task); List<TaskSummary> listOfTaskSummary = Collections.singletonList(taskSummary); SearchResult<TaskSummary> searchResult = new SearchResult<>(100, listOfTaskSummary); when(mockTaskService.search(0, 100, "asc", "*", "*")).thenReturn(searchResult); assertEquals(searchResult, taskResource.search(0, 100, "asc", "*", "*")); } @Test public void testSearchV2() { Task task = new Task(); task.setTaskType("SIMPLE"); task.setWorkerId("123"); task.setDomain("test"); task.setStatus(Task.Status.IN_PROGRESS); List<Task> listOfTasks = Collections.singletonList(task); SearchResult<Task> searchResult = new SearchResult<>(100, listOfTasks); when(mockTaskService.searchV2(0, 100, "asc", "*", "*")).thenReturn(searchResult); assertEquals(searchResult, taskResource.searchV2(0, 100, "asc", "*", "*")); } @Test public void testGetExternalStorageLocation() { ExternalStorageLocation externalStorageLocation = mock(ExternalStorageLocation.class); when(mockTaskService.getExternalStorageLocation("path", "operation", "payloadType")) .thenReturn(externalStorageLocation); assertEquals( externalStorageLocation, taskResource.getExternalStorageLocation("path", "operation", "payloadType")); } }
7,018
0
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest/controllers/WorkflowResourceTest.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.service.WorkflowService; import com.netflix.conductor.service.WorkflowTestService; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class WorkflowResourceTest { @Mock private WorkflowService mockWorkflowService; @Mock private WorkflowTestService mockWorkflowTestService; private WorkflowResource workflowResource; @Before public void before() { this.mockWorkflowService = mock(WorkflowService.class); this.mockWorkflowTestService = mock(WorkflowTestService.class); this.workflowResource = new WorkflowResource(this.mockWorkflowService, this.mockWorkflowTestService); } @Test public void testStartWorkflow() { StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest(); startWorkflowRequest.setName("w123"); Map<String, Object> input = new HashMap<>(); input.put("1", "abc"); startWorkflowRequest.setInput(input); String workflowID = "w112"; when(mockWorkflowService.startWorkflow(any(StartWorkflowRequest.class))) .thenReturn(workflowID); assertEquals("w112", workflowResource.startWorkflow(startWorkflowRequest)); } @Test public void testStartWorkflowParam() { Map<String, Object> input = new HashMap<>(); input.put("1", "abc"); String workflowID = "w112"; when(mockWorkflowService.startWorkflow( anyString(), anyInt(), anyString(), anyInt(), anyMap())) .thenReturn(workflowID); assertEquals("w112", workflowResource.startWorkflow("test1", 1, "c123", 0, input)); } @Test public void getWorkflows() { Workflow workflow = new Workflow(); workflow.setCorrelationId("123"); ArrayList<Workflow> listOfWorkflows = new ArrayList<>() { { add(workflow); } }; when(mockWorkflowService.getWorkflows(anyString(), anyString(), anyBoolean(), anyBoolean())) .thenReturn(listOfWorkflows); assertEquals(listOfWorkflows, workflowResource.getWorkflows("test1", "123", true, true)); } @Test public void testGetWorklfowsMultipleCorrelationId() { Workflow workflow = new Workflow(); workflow.setCorrelationId("c123"); List<Workflow> workflowArrayList = new ArrayList<>() { { add(workflow); } }; List<String> correlationIdList = new ArrayList<>() { { add("c123"); } }; Map<String, List<Workflow>> workflowMap = new HashMap<>(); workflowMap.put("c123", workflowArrayList); when(mockWorkflowService.getWorkflows(anyString(), anyBoolean(), anyBoolean(), anyList())) .thenReturn(workflowMap); assertEquals( workflowMap, workflowResource.getWorkflows("test", true, true, correlationIdList)); } @Test public void testGetExecutionStatus() { Workflow workflow = new Workflow(); workflow.setCorrelationId("c123"); when(mockWorkflowService.getExecutionStatus(anyString(), anyBoolean())) .thenReturn(workflow); assertEquals(workflow, workflowResource.getExecutionStatus("w123", true)); } @Test public void testDelete() { workflowResource.delete("w123", true); verify(mockWorkflowService, times(1)).deleteWorkflow(anyString(), anyBoolean()); } @Test public void testGetRunningWorkflow() { List<String> listOfWorklfows = new ArrayList<>() { { add("w123"); } }; when(mockWorkflowService.getRunningWorkflows(anyString(), anyInt(), anyLong(), anyLong())) .thenReturn(listOfWorklfows); assertEquals(listOfWorklfows, workflowResource.getRunningWorkflow("w123", 1, 12L, 13L)); } @Test public void testDecide() { workflowResource.decide("w123"); verify(mockWorkflowService, times(1)).decideWorkflow(anyString()); } @Test public void testPauseWorkflow() { workflowResource.pauseWorkflow("w123"); verify(mockWorkflowService, times(1)).pauseWorkflow(anyString()); } @Test public void testResumeWorkflow() { workflowResource.resumeWorkflow("test"); verify(mockWorkflowService, times(1)).resumeWorkflow(anyString()); } @Test public void testSkipTaskFromWorkflow() { workflowResource.skipTaskFromWorkflow("test", "testTask", null); verify(mockWorkflowService, times(1)) .skipTaskFromWorkflow(anyString(), anyString(), isNull()); } @Test public void testRerun() { RerunWorkflowRequest request = new RerunWorkflowRequest(); workflowResource.rerun("test", request); verify(mockWorkflowService, times(1)) .rerunWorkflow(anyString(), any(RerunWorkflowRequest.class)); } @Test public void restart() { workflowResource.restart("w123", false); verify(mockWorkflowService, times(1)).restartWorkflow(anyString(), anyBoolean()); } @Test public void testRetry() { workflowResource.retry("w123", false); verify(mockWorkflowService, times(1)).retryWorkflow(anyString(), anyBoolean()); } @Test public void testResetWorkflow() { workflowResource.resetWorkflow("w123"); verify(mockWorkflowService, times(1)).resetWorkflow(anyString()); } @Test public void testTerminate() { workflowResource.terminate("w123", "test"); verify(mockWorkflowService, times(1)).terminateWorkflow(anyString(), anyString()); } @Test public void testSearch() { workflowResource.search(0, 100, "asc", "*", "*"); verify(mockWorkflowService, times(1)) .searchWorkflows(anyInt(), anyInt(), anyString(), anyString(), anyString()); } @Test public void testSearchV2() { workflowResource.searchV2(0, 100, "asc", "*", "*"); verify(mockWorkflowService).searchWorkflowsV2(0, 100, "asc", "*", "*"); } @Test public void testSearchWorkflowsByTasks() { workflowResource.searchWorkflowsByTasks(0, 100, "asc", "*", "*"); verify(mockWorkflowService, times(1)) .searchWorkflowsByTasks(anyInt(), anyInt(), anyString(), anyString(), anyString()); } @Test public void testSearchWorkflowsByTasksV2() { workflowResource.searchWorkflowsByTasksV2(0, 100, "asc", "*", "*"); verify(mockWorkflowService).searchWorkflowsByTasksV2(0, 100, "asc", "*", "*"); } @Test public void testGetExternalStorageLocation() { workflowResource.getExternalStorageLocation("path", "operation", "payloadType"); verify(mockWorkflowService).getExternalStorageLocation("path", "operation", "payloadType"); } }
7,019
0
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest/controllers/EventResourceTest.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.List; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.service.EventService; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class EventResourceTest { private EventResource eventResource; @Mock private EventService mockEventService; @Before public void setUp() { this.mockEventService = mock(EventService.class); this.eventResource = new EventResource(this.mockEventService); } @Test public void testAddEventHandler() { EventHandler eventHandler = new EventHandler(); eventResource.addEventHandler(eventHandler); verify(mockEventService, times(1)).addEventHandler(any(EventHandler.class)); } @Test public void testUpdateEventHandler() { EventHandler eventHandler = new EventHandler(); eventResource.updateEventHandler(eventHandler); verify(mockEventService, times(1)).updateEventHandler(any(EventHandler.class)); } @Test public void testRemoveEventHandlerStatus() { eventResource.removeEventHandlerStatus("testEvent"); verify(mockEventService, times(1)).removeEventHandlerStatus(anyString()); } @Test public void testGetEventHandlersForEvent() { EventHandler eventHandler = new EventHandler(); eventResource.addEventHandler(eventHandler); List<EventHandler> listOfEventHandler = new ArrayList<>(); listOfEventHandler.add(eventHandler); when(mockEventService.getEventHandlersForEvent(anyString(), anyBoolean())) .thenReturn(listOfEventHandler); assertEquals(listOfEventHandler, eventResource.getEventHandlersForEvent("testEvent", true)); } @Test public void testGetEventHandlers() { EventHandler eventHandler = new EventHandler(); eventResource.addEventHandler(eventHandler); List<EventHandler> listOfEventHandler = new ArrayList<>(); listOfEventHandler.add(eventHandler); when(mockEventService.getEventHandlers()).thenReturn(listOfEventHandler); assertEquals(listOfEventHandler, eventResource.getEventHandlers()); } }
7,020
0
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/test/java/com/netflix/conductor/rest/controllers/AdminResourceTest.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.service.AdminService; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class AdminResourceTest { @Mock private AdminService mockAdminService; @Mock private AdminResource adminResource; @Before public void before() { this.mockAdminService = mock(AdminService.class); this.adminResource = new AdminResource(mockAdminService); } @Test public void testGetAllConfig() { Map<String, Object> configs = new HashMap<>(); configs.put("config1", "test"); when(mockAdminService.getAllConfig()).thenReturn(configs); assertEquals(configs, adminResource.getAllConfig()); } @Test public void testView() { Task task = new Task(); task.setReferenceTaskName("test"); List<Task> listOfTask = new ArrayList<>(); listOfTask.add(task); when(mockAdminService.getListOfPendingTask(anyString(), anyInt(), anyInt())) .thenReturn(listOfTask); assertEquals(listOfTask, adminResource.view("testTask", 0, 100)); } @Test public void testRequeueSweep() { String workflowId = "w123"; when(mockAdminService.requeueSweep(anyString())).thenReturn(workflowId); assertEquals(workflowId, adminResource.requeueSweep(workflowId)); } @Test public void testGetEventQueues() { adminResource.getEventQueues(false); verify(mockAdminService, times(1)).getEventQueues(anyBoolean()); } }
7,021
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/config/RestConfiguration.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.config; import org.springframework.context.annotation.Configuration; import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; import static org.springframework.http.MediaType.APPLICATION_JSON; import static org.springframework.http.MediaType.TEXT_PLAIN; @Configuration public class RestConfiguration implements WebMvcConfigurer { /** * Disable all 3 (Accept header, url parameter, path extension) strategies of content * negotiation and only allow <code>application/json</code> and <code>text/plain</code> types. * <br> * * <p>Any "mapping" that is annotated with <code>produces=TEXT_PLAIN_VALUE</code> will be sent * as <code>text/plain</code> all others as <code>application/json</code>.<br> * More details on Spring MVC content negotiation can be found at <a * href="https://spring.io/blog/2013/05/11/content-negotiation-using-spring-mvc">https://spring.io/blog/2013/05/11/content-negotiation-using-spring-mvc</a> * <br> */ @Override public void configureContentNegotiation(ContentNegotiationConfigurer configurer) { configurer .favorParameter(false) .favorPathExtension(false) .ignoreAcceptHeader(true) .defaultContentType(APPLICATION_JSON, TEXT_PLAIN); } }
7,022
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/config/RequestMappingConstants.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.config; public interface RequestMappingConstants { String API_PREFIX = "/api/"; String ADMIN = API_PREFIX + "admin"; String EVENT = API_PREFIX + "event"; String METADATA = API_PREFIX + "metadata"; String QUEUE = API_PREFIX + "queue"; String TASKS = API_PREFIX + "tasks"; String WORKFLOW_BULK = API_PREFIX + "workflow/bulk"; String WORKFLOW = API_PREFIX + "workflow"; }
7,023
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/startup/KitchenSinkInitializer.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.startup; import java.io.IOException; import java.io.InputStreamReader; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.context.event.ApplicationReadyEvent; import org.springframework.boot.web.client.RestTemplateBuilder; import org.springframework.context.event.EventListener; import org.springframework.core.io.Resource; import org.springframework.http.HttpEntity; import org.springframework.stereotype.Component; import org.springframework.util.FileCopyUtils; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; import org.springframework.web.client.RestTemplate; import com.netflix.conductor.common.metadata.tasks.TaskDef; import static org.springframework.http.HttpHeaders.CONTENT_TYPE; import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; @Component public class KitchenSinkInitializer { private static final Logger LOGGER = LoggerFactory.getLogger(KitchenSinkInitializer.class); private final RestTemplate restTemplate; @Value("${loadSample:false}") private boolean loadSamples; @Value("${server.port:8080}") private int port; @Value("classpath:./kitchensink/kitchensink.json") private Resource kitchenSink; @Value("classpath:./kitchensink/sub_flow_1.json") private Resource subFlow; @Value("classpath:./kitchensink/kitchenSink-ephemeralWorkflowWithStoredTasks.json") private Resource ephemeralWorkflowWithStoredTasks; @Value("classpath:./kitchensink/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json") private Resource ephemeralWorkflowWithEphemeralTasks; public KitchenSinkInitializer(RestTemplateBuilder restTemplateBuilder) { this.restTemplate = restTemplateBuilder.build(); } @EventListener(ApplicationReadyEvent.class) public void setupKitchenSink() { try { if (loadSamples) { LOGGER.info("Loading Kitchen Sink examples"); createKitchenSink(); } } catch (Exception e) { LOGGER.error("Error initializing kitchen sink", e); } } private void createKitchenSink() throws Exception { List<TaskDef> taskDefs = new LinkedList<>(); TaskDef taskDef; for (int i = 0; i < 40; i++) { taskDef = new TaskDef("task_" + i, "task_" + i, 1, 0); taskDef.setOwnerEmail("example@email.com"); taskDefs.add(taskDef); } taskDef = new TaskDef("search_elasticsearch", "search_elasticsearch", 1, 0); taskDef.setOwnerEmail("example@email.com"); taskDefs.add(taskDef); restTemplate.postForEntity(url("/api/metadata/taskdefs"), taskDefs, Object.class); /* * Kitchensink example (stored workflow with stored tasks) */ MultiValueMap<String, String> headers = new LinkedMultiValueMap<>(); headers.add(CONTENT_TYPE, APPLICATION_JSON_VALUE); HttpEntity<String> request = new HttpEntity<>(readToString(kitchenSink), headers); restTemplate.postForEntity(url("/api/metadata/workflow/"), request, Map.class); request = new HttpEntity<>(readToString(subFlow), headers); restTemplate.postForEntity(url("/api/metadata/workflow/"), request, Map.class); restTemplate.postForEntity( url("/api/workflow/kitchensink"), Collections.singletonMap("task2Name", "task_5"), String.class); LOGGER.info("Kitchen sink workflow is created!"); /* * Kitchensink example with ephemeral workflow and stored tasks */ request = new HttpEntity<>(readToString(ephemeralWorkflowWithStoredTasks), headers); restTemplate.postForEntity(url("/api/workflow/"), request, String.class); LOGGER.info("Ephemeral Kitchen sink workflow with stored tasks is created!"); /* * Kitchensink example with ephemeral workflow and ephemeral tasks */ request = new HttpEntity<>(readToString(ephemeralWorkflowWithEphemeralTasks), headers); restTemplate.postForEntity(url("/api/workflow/"), request, String.class); LOGGER.info("Ephemeral Kitchen sink workflow with ephemeral tasks is created!"); } private String readToString(Resource resource) throws IOException { return FileCopyUtils.copyToString(new InputStreamReader(resource.getInputStream())); } private String url(String path) { return "http://localhost:" + port + path; } }
7,024
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowBulkResource.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.model.BulkResponse; import com.netflix.conductor.service.WorkflowBulkService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.WORKFLOW_BULK; /** Synchronous Bulk APIs to process the workflows in batches */ @RestController @RequestMapping(WORKFLOW_BULK) public class WorkflowBulkResource { private final WorkflowBulkService workflowBulkService; public WorkflowBulkResource(WorkflowBulkService workflowBulkService) { this.workflowBulkService = workflowBulkService; } /** * Pause the list of workflows. * * @param workflowIds - list of workflow Ids to perform pause operation on * @return bulk response object containing a list of succeeded workflows and a list of failed * ones with errors */ @PutMapping("/pause") @Operation(summary = "Pause the list of workflows") public BulkResponse pauseWorkflow(@RequestBody List<String> workflowIds) { return workflowBulkService.pauseWorkflow(workflowIds); } /** * Resume the list of workflows. * * @param workflowIds - list of workflow Ids to perform resume operation on * @return bulk response object containing a list of succeeded workflows and a list of failed * ones with errors */ @PutMapping("/resume") @Operation(summary = "Resume the list of workflows") public BulkResponse resumeWorkflow(@RequestBody List<String> workflowIds) { return workflowBulkService.resumeWorkflow(workflowIds); } /** * Restart the list of workflows. * * @param workflowIds - list of workflow Ids to perform restart operation on * @param useLatestDefinitions if true, use latest workflow and task definitions upon restart * @return bulk response object containing a list of succeeded workflows and a list of failed * ones with errors */ @PostMapping("/restart") @Operation(summary = "Restart the list of completed workflow") public BulkResponse restart( @RequestBody List<String> workflowIds, @RequestParam(value = "useLatestDefinitions", defaultValue = "false", required = false) boolean useLatestDefinitions) { return workflowBulkService.restart(workflowIds, useLatestDefinitions); } /** * Retry the last failed task for each workflow from the list. * * @param workflowIds - list of workflow Ids to perform retry operation on * @return bulk response object containing a list of succeeded workflows and a list of failed * ones with errors */ @PostMapping("/retry") @Operation(summary = "Retry the last failed task for each workflow from the list") public BulkResponse retry(@RequestBody List<String> workflowIds) { return workflowBulkService.retry(workflowIds); } /** * Terminate workflows execution. * * @param workflowIds - list of workflow Ids to perform terminate operation on * @param reason - description to be specified for the terminated workflow for future * references. * @return bulk response object containing a list of succeeded workflows and a list of failed * ones with errors */ @PostMapping("/terminate") @Operation(summary = "Terminate workflows execution") public BulkResponse terminate( @RequestBody List<String> workflowIds, @RequestParam(value = "reason", required = false) String reason) { return workflowBulkService.terminate(workflowIds, reason); } }
7,025
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/EventResource.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.service.EventService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.EVENT; @RestController @RequestMapping(EVENT) public class EventResource { private final EventService eventService; public EventResource(EventService eventService) { this.eventService = eventService; } @PostMapping @Operation(summary = "Add a new event handler.") public void addEventHandler(@RequestBody EventHandler eventHandler) { eventService.addEventHandler(eventHandler); } @PutMapping @Operation(summary = "Update an existing event handler.") public void updateEventHandler(@RequestBody EventHandler eventHandler) { eventService.updateEventHandler(eventHandler); } @DeleteMapping("/{name}") @Operation(summary = "Remove an event handler") public void removeEventHandlerStatus(@PathVariable("name") String name) { eventService.removeEventHandlerStatus(name); } @GetMapping @Operation(summary = "Get all the event handlers") public List<EventHandler> getEventHandlers() { return eventService.getEventHandlers(); } @GetMapping("/{event}") @Operation(summary = "Get event handlers for a given event") public List<EventHandler> getEventHandlersForEvent( @PathVariable("event") String event, @RequestParam(value = "activeOnly", defaultValue = "true", required = false) boolean activeOnly) { return eventService.getEventHandlersForEvent(event, activeOnly); } }
7,026
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/ApplicationExceptionMapper.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.HashMap; import java.util.Map; import javax.servlet.http.HttpServletRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.annotation.Order; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.RestControllerAdvice; import com.netflix.conductor.common.validation.ErrorResponse; import com.netflix.conductor.core.exception.ConflictException; import com.netflix.conductor.core.exception.NotFoundException; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.core.utils.Utils; import com.netflix.conductor.metrics.Monitors; import com.fasterxml.jackson.databind.exc.InvalidFormatException; @RestControllerAdvice @Order(ValidationExceptionMapper.ORDER + 1) public class ApplicationExceptionMapper { private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationExceptionMapper.class); private final String host = Utils.getServerId(); private static final Map<Class<? extends Throwable>, HttpStatus> EXCEPTION_STATUS_MAP = new HashMap<>(); static { EXCEPTION_STATUS_MAP.put(NotFoundException.class, HttpStatus.NOT_FOUND); EXCEPTION_STATUS_MAP.put(ConflictException.class, HttpStatus.CONFLICT); EXCEPTION_STATUS_MAP.put(IllegalArgumentException.class, HttpStatus.BAD_REQUEST); EXCEPTION_STATUS_MAP.put(InvalidFormatException.class, HttpStatus.INTERNAL_SERVER_ERROR); } @ExceptionHandler(Throwable.class) public ResponseEntity<ErrorResponse> handleAll(HttpServletRequest request, Throwable th) { logException(request, th); HttpStatus status = EXCEPTION_STATUS_MAP.getOrDefault(th.getClass(), HttpStatus.INTERNAL_SERVER_ERROR); ErrorResponse errorResponse = new ErrorResponse(); errorResponse.setInstance(host); errorResponse.setStatus(status.value()); errorResponse.setMessage(th.getMessage()); errorResponse.setRetryable( th instanceof TransientException); // set it to true for TransientException Monitors.error("error", String.valueOf(status.value())); return new ResponseEntity<>(errorResponse, status); } private void logException(HttpServletRequest request, Throwable exception) { LOGGER.error( "Error {} url: '{}'", exception.getClass().getSimpleName(), request.getRequestURI(), exception); } }
7,027
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowResource.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import java.util.Map; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; import com.netflix.conductor.common.run.*; import com.netflix.conductor.service.WorkflowService; import com.netflix.conductor.service.WorkflowTestService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.WORKFLOW; import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE; import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE; @RestController @RequestMapping(WORKFLOW) public class WorkflowResource { private final WorkflowService workflowService; private final WorkflowTestService workflowTestService; public WorkflowResource( WorkflowService workflowService, WorkflowTestService workflowTestService) { this.workflowService = workflowService; this.workflowTestService = workflowTestService; } @PostMapping(produces = TEXT_PLAIN_VALUE) @Operation( summary = "Start a new workflow with StartWorkflowRequest, which allows task to be executed in a domain") public String startWorkflow(@RequestBody StartWorkflowRequest request) { return workflowService.startWorkflow(request); } @PostMapping(value = "/{name}", produces = TEXT_PLAIN_VALUE) @Operation( summary = "Start a new workflow. Returns the ID of the workflow instance that can be later used for tracking") public String startWorkflow( @PathVariable("name") String name, @RequestParam(value = "version", required = false) Integer version, @RequestParam(value = "correlationId", required = false) String correlationId, @RequestParam(value = "priority", defaultValue = "0", required = false) int priority, @RequestBody Map<String, Object> input) { return workflowService.startWorkflow(name, version, correlationId, priority, input); } @GetMapping("/{name}/correlated/{correlationId}") @Operation(summary = "Lists workflows for the given correlation id") public List<Workflow> getWorkflows( @PathVariable("name") String name, @PathVariable("correlationId") String correlationId, @RequestParam(value = "includeClosed", defaultValue = "false", required = false) boolean includeClosed, @RequestParam(value = "includeTasks", defaultValue = "false", required = false) boolean includeTasks) { return workflowService.getWorkflows(name, correlationId, includeClosed, includeTasks); } @PostMapping(value = "/{name}/correlated") @Operation(summary = "Lists workflows for the given correlation id list") public Map<String, List<Workflow>> getWorkflows( @PathVariable("name") String name, @RequestParam(value = "includeClosed", defaultValue = "false", required = false) boolean includeClosed, @RequestParam(value = "includeTasks", defaultValue = "false", required = false) boolean includeTasks, @RequestBody List<String> correlationIds) { return workflowService.getWorkflows(name, includeClosed, includeTasks, correlationIds); } @GetMapping("/{workflowId}") @Operation(summary = "Gets the workflow by workflow id") public Workflow getExecutionStatus( @PathVariable("workflowId") String workflowId, @RequestParam(value = "includeTasks", defaultValue = "true", required = false) boolean includeTasks) { return workflowService.getExecutionStatus(workflowId, includeTasks); } @DeleteMapping("/{workflowId}/remove") @Operation(summary = "Removes the workflow from the system") public void delete( @PathVariable("workflowId") String workflowId, @RequestParam(value = "archiveWorkflow", defaultValue = "true", required = false) boolean archiveWorkflow) { workflowService.deleteWorkflow(workflowId, archiveWorkflow); } @GetMapping("/running/{name}") @Operation(summary = "Retrieve all the running workflows") public List<String> getRunningWorkflow( @PathVariable("name") String workflowName, @RequestParam(value = "version", defaultValue = "1", required = false) int version, @RequestParam(value = "startTime", required = false) Long startTime, @RequestParam(value = "endTime", required = false) Long endTime) { return workflowService.getRunningWorkflows(workflowName, version, startTime, endTime); } @PutMapping("/decide/{workflowId}") @Operation(summary = "Starts the decision task for a workflow") public void decide(@PathVariable("workflowId") String workflowId) { workflowService.decideWorkflow(workflowId); } @PutMapping("/{workflowId}/pause") @Operation(summary = "Pauses the workflow") public void pauseWorkflow(@PathVariable("workflowId") String workflowId) { workflowService.pauseWorkflow(workflowId); } @PutMapping("/{workflowId}/resume") @Operation(summary = "Resumes the workflow") public void resumeWorkflow(@PathVariable("workflowId") String workflowId) { workflowService.resumeWorkflow(workflowId); } @PutMapping("/{workflowId}/skiptask/{taskReferenceName}") @Operation(summary = "Skips a given task from a current running workflow") public void skipTaskFromWorkflow( @PathVariable("workflowId") String workflowId, @PathVariable("taskReferenceName") String taskReferenceName, SkipTaskRequest skipTaskRequest) { workflowService.skipTaskFromWorkflow(workflowId, taskReferenceName, skipTaskRequest); } @PostMapping(value = "/{workflowId}/rerun", produces = TEXT_PLAIN_VALUE) @Operation(summary = "Reruns the workflow from a specific task") public String rerun( @PathVariable("workflowId") String workflowId, @RequestBody RerunWorkflowRequest request) { return workflowService.rerunWorkflow(workflowId, request); } @PostMapping("/{workflowId}/restart") @Operation(summary = "Restarts a completed workflow") @ResponseStatus( value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which // expects a 204 for this request public void restart( @PathVariable("workflowId") String workflowId, @RequestParam(value = "useLatestDefinitions", defaultValue = "false", required = false) boolean useLatestDefinitions) { workflowService.restartWorkflow(workflowId, useLatestDefinitions); } @PostMapping("/{workflowId}/retry") @Operation(summary = "Retries the last failed task") @ResponseStatus( value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which // expects a 204 for this request public void retry( @PathVariable("workflowId") String workflowId, @RequestParam( value = "resumeSubworkflowTasks", defaultValue = "false", required = false) boolean resumeSubworkflowTasks) { workflowService.retryWorkflow(workflowId, resumeSubworkflowTasks); } @PostMapping("/{workflowId}/resetcallbacks") @Operation(summary = "Resets callback times of all non-terminal SIMPLE tasks to 0") @ResponseStatus( value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which // expects a 204 for this request public void resetWorkflow(@PathVariable("workflowId") String workflowId) { workflowService.resetWorkflow(workflowId); } @DeleteMapping("/{workflowId}") @Operation(summary = "Terminate workflow execution") public void terminate( @PathVariable("workflowId") String workflowId, @RequestParam(value = "reason", required = false) String reason) { workflowService.terminateWorkflow(workflowId, reason); } @Operation( summary = "Search for workflows based on payload and other parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC.") @GetMapping(value = "/search") public SearchResult<WorkflowSummary> search( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return workflowService.searchWorkflows(start, size, sort, freeText, query); } @Operation( summary = "Search for workflows based on payload and other parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC.") @GetMapping(value = "/search-v2") public SearchResult<Workflow> searchV2( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return workflowService.searchWorkflowsV2(start, size, sort, freeText, query); } @Operation( summary = "Search for workflows based on task parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC") @GetMapping(value = "/search-by-tasks") public SearchResult<WorkflowSummary> searchWorkflowsByTasks( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return workflowService.searchWorkflowsByTasks(start, size, sort, freeText, query); } @Operation( summary = "Search for workflows based on task parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC") @GetMapping(value = "/search-by-tasks-v2") public SearchResult<Workflow> searchWorkflowsByTasksV2( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return workflowService.searchWorkflowsByTasksV2(start, size, sort, freeText, query); } @Operation( summary = "Get the uri and path of the external storage where the workflow payload is to be stored") @GetMapping("/externalstoragelocation") public ExternalStorageLocation getExternalStorageLocation( @RequestParam("path") String path, @RequestParam("operation") String operation, @RequestParam("payloadType") String payloadType) { return workflowService.getExternalStorageLocation(path, operation, payloadType); } @PostMapping(value = "test", produces = APPLICATION_JSON_VALUE) @Operation(summary = "Test workflow execution using mock data") public Workflow testWorkflow(@RequestBody WorkflowTestRequest request) { return workflowTestService.testWorkflow(request); } }
7,028
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/ValidationExceptionMapper.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.validation.ConstraintViolation; import javax.validation.ConstraintViolationException; import javax.validation.ValidationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.RestControllerAdvice; import com.netflix.conductor.common.validation.ErrorResponse; import com.netflix.conductor.common.validation.ValidationError; import com.netflix.conductor.core.utils.Utils; import com.netflix.conductor.metrics.Monitors; /** This class converts Hibernate {@link ValidationException} into http response. */ @RestControllerAdvice @Order(ValidationExceptionMapper.ORDER) public class ValidationExceptionMapper { private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationExceptionMapper.class); public static final int ORDER = Ordered.HIGHEST_PRECEDENCE; private final String host = Utils.getServerId(); @ExceptionHandler(ValidationException.class) public ResponseEntity<ErrorResponse> toResponse( HttpServletRequest request, ValidationException exception) { logException(request, exception); HttpStatus httpStatus; if (exception instanceof ConstraintViolationException) { httpStatus = HttpStatus.BAD_REQUEST; } else { httpStatus = HttpStatus.INTERNAL_SERVER_ERROR; Monitors.error("error", "error"); } return new ResponseEntity<>(toErrorResponse(exception), httpStatus); } private ErrorResponse toErrorResponse(ValidationException ve) { if (ve instanceof ConstraintViolationException) { return constraintViolationExceptionToErrorResponse((ConstraintViolationException) ve); } else { ErrorResponse result = new ErrorResponse(); result.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.value()); result.setMessage(ve.getMessage()); result.setInstance(host); return result; } } private ErrorResponse constraintViolationExceptionToErrorResponse( ConstraintViolationException exception) { ErrorResponse errorResponse = new ErrorResponse(); errorResponse.setStatus(HttpStatus.BAD_REQUEST.value()); errorResponse.setMessage("Validation failed, check below errors for detail."); List<ValidationError> validationErrors = new ArrayList<>(); exception .getConstraintViolations() .forEach( e -> validationErrors.add( new ValidationError( getViolationPath(e), e.getMessage(), getViolationInvalidValue(e.getInvalidValue())))); errorResponse.setValidationErrors(validationErrors); return errorResponse; } private String getViolationPath(final ConstraintViolation<?> violation) { final String propertyPath = violation.getPropertyPath().toString(); return !"".equals(propertyPath) ? propertyPath : ""; } private String getViolationInvalidValue(final Object invalidValue) { if (invalidValue == null) { return null; } if (invalidValue.getClass().isArray()) { if (invalidValue instanceof Object[]) { // not helpful to return object array, skip it. return null; } else if (invalidValue instanceof boolean[]) { return Arrays.toString((boolean[]) invalidValue); } else if (invalidValue instanceof byte[]) { return Arrays.toString((byte[]) invalidValue); } else if (invalidValue instanceof char[]) { return Arrays.toString((char[]) invalidValue); } else if (invalidValue instanceof double[]) { return Arrays.toString((double[]) invalidValue); } else if (invalidValue instanceof float[]) { return Arrays.toString((float[]) invalidValue); } else if (invalidValue instanceof int[]) { return Arrays.toString((int[]) invalidValue); } else if (invalidValue instanceof long[]) { return Arrays.toString((long[]) invalidValue); } else if (invalidValue instanceof short[]) { return Arrays.toString((short[]) invalidValue); } } // It is only helpful to return invalid value of primitive types if (invalidValue.getClass().getName().startsWith("java.lang.")) { return invalidValue.toString(); } return null; } private void logException(HttpServletRequest request, ValidationException exception) { LOGGER.error( "Error {} url: '{}'", exception.getClass().getSimpleName(), request.getRequestURI(), exception); } }
7,029
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/HealthCheckResource.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.Collections; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import com.netflix.runtime.health.api.HealthCheckStatus; @RestController @RequestMapping("/health") public class HealthCheckResource { // SBMTODO: Move this Spring boot health check @GetMapping public HealthCheckStatus doCheck() throws Exception { return HealthCheckStatus.create(true, Collections.emptyList()); } }
7,030
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/QueueAdminResource.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.Map; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.core.events.queue.DefaultEventQueueProcessor; import com.netflix.conductor.model.TaskModel.Status; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.QUEUE; @RestController @RequestMapping(QUEUE) public class QueueAdminResource { private final DefaultEventQueueProcessor defaultEventQueueProcessor; public QueueAdminResource(DefaultEventQueueProcessor defaultEventQueueProcessor) { this.defaultEventQueueProcessor = defaultEventQueueProcessor; } @Operation(summary = "Get the queue length") @GetMapping(value = "/size") public Map<String, Long> size() { return defaultEventQueueProcessor.size(); } @Operation(summary = "Get Queue Names") @GetMapping(value = "/") public Map<Status, String> names() { return defaultEventQueueProcessor.queues(); } @Operation(summary = "Publish a message in queue to mark a wait task as completed.") @PostMapping(value = "/update/{workflowId}/{taskRefName}/{status}") public void update( @PathVariable("workflowId") String workflowId, @PathVariable("taskRefName") String taskRefName, @PathVariable("status") Status status, @RequestBody Map<String, Object> output) throws Exception { defaultEventQueueProcessor.updateByTaskRefName(workflowId, taskRefName, output, status); } @Operation(summary = "Publish a message in queue to mark a wait task (by taskId) as completed.") @PostMapping("/update/{workflowId}/task/{taskId}/{status}") public void updateByTaskId( @PathVariable("workflowId") String workflowId, @PathVariable("taskId") String taskId, @PathVariable("status") Status status, @RequestBody Map<String, Object> output) throws Exception { defaultEventQueueProcessor.updateByTaskId(workflowId, taskId, output, status); } }
7,031
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/AdminResource.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import java.util.Map; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.service.AdminService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.ADMIN; import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE; @RestController @RequestMapping(ADMIN) public class AdminResource { private final AdminService adminService; public AdminResource(AdminService adminService) { this.adminService = adminService; } @Operation(summary = "Get all the configuration parameters") @GetMapping("/config") public Map<String, Object> getAllConfig() { return adminService.getAllConfig(); } @GetMapping("/task/{tasktype}") @Operation(summary = "Get the list of pending tasks for a given task type") public List<Task> view( @PathVariable("tasktype") String taskType, @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "count", defaultValue = "100", required = false) int count) { return adminService.getListOfPendingTask(taskType, start, count); } @PostMapping(value = "/sweep/requeue/{workflowId}", produces = TEXT_PLAIN_VALUE) @Operation(summary = "Queue up all the running workflows for sweep") public String requeueSweep(@PathVariable("workflowId") String workflowId) { return adminService.requeueSweep(workflowId); } @PostMapping(value = "/consistency/verifyAndRepair/{workflowId}", produces = TEXT_PLAIN_VALUE) @Operation(summary = "Verify and repair workflow consistency") public String verifyAndRepairWorkflowConsistency( @PathVariable("workflowId") String workflowId) { return String.valueOf(adminService.verifyAndRepairWorkflowConsistency(workflowId)); } @GetMapping("/queues") @Operation(summary = "Get registered queues") public Map<String, ?> getEventQueues( @RequestParam(value = "verbose", defaultValue = "false", required = false) boolean verbose) { return adminService.getEventQueues(verbose); } }
7,032
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/TaskResource.java
/* * Copyright 2021 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import java.util.Map; import java.util.Optional; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskExecLog; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.service.TaskService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.TASKS; import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE; @RestController @RequestMapping(value = TASKS) public class TaskResource { private final TaskService taskService; public TaskResource(TaskService taskService) { this.taskService = taskService; } @GetMapping("/poll/{tasktype}") @Operation(summary = "Poll for a task of a certain type") public ResponseEntity<Task> poll( @PathVariable("tasktype") String taskType, @RequestParam(value = "workerid", required = false) String workerId, @RequestParam(value = "domain", required = false) String domain) { // for backwards compatibility with 2.x client which expects a 204 when no Task is found return Optional.ofNullable(taskService.poll(taskType, workerId, domain)) .map(ResponseEntity::ok) .orElse(ResponseEntity.noContent().build()); } @GetMapping("/poll/batch/{tasktype}") @Operation(summary = "Batch poll for a task of a certain type") public ResponseEntity<List<Task>> batchPoll( @PathVariable("tasktype") String taskType, @RequestParam(value = "workerid", required = false) String workerId, @RequestParam(value = "domain", required = false) String domain, @RequestParam(value = "count", defaultValue = "1") int count, @RequestParam(value = "timeout", defaultValue = "100") int timeout) { // for backwards compatibility with 2.x client which expects a 204 when no Task is found return Optional.ofNullable( taskService.batchPoll(taskType, workerId, domain, count, timeout)) .map(ResponseEntity::ok) .orElse(ResponseEntity.noContent().build()); } @PostMapping(produces = TEXT_PLAIN_VALUE) @Operation(summary = "Update a task") public String updateTask(@RequestBody TaskResult taskResult) { return taskService.updateTask(taskResult); } @PostMapping("/{taskId}/log") @Operation(summary = "Log Task Execution Details") public void log(@PathVariable("taskId") String taskId, @RequestBody String log) { taskService.log(taskId, log); } @GetMapping("/{taskId}/log") @Operation(summary = "Get Task Execution Logs") public List<TaskExecLog> getTaskLogs(@PathVariable("taskId") String taskId) { return taskService.getTaskLogs(taskId); } @GetMapping("/{taskId}") @Operation(summary = "Get task by Id") public ResponseEntity<Task> getTask(@PathVariable("taskId") String taskId) { // for backwards compatibility with 2.x client which expects a 204 when no Task is found return Optional.ofNullable(taskService.getTask(taskId)) .map(ResponseEntity::ok) .orElse(ResponseEntity.noContent().build()); } @GetMapping("/queue/sizes") @Operation(summary = "Deprecated. Please use /tasks/queue/size endpoint") @Deprecated public Map<String, Integer> size( @RequestParam(value = "taskType", required = false) List<String> taskTypes) { return taskService.getTaskQueueSizes(taskTypes); } @GetMapping("/queue/size") @Operation(summary = "Get queue size for a task type.") public Integer taskDepth( @RequestParam("taskType") String taskType, @RequestParam(value = "domain", required = false) String domain, @RequestParam(value = "isolationGroupId", required = false) String isolationGroupId, @RequestParam(value = "executionNamespace", required = false) String executionNamespace) { return taskService.getTaskQueueSize(taskType, domain, executionNamespace, isolationGroupId); } @GetMapping("/queue/all/verbose") @Operation(summary = "Get the details about each queue") public Map<String, Map<String, Map<String, Long>>> allVerbose() { return taskService.allVerbose(); } @GetMapping("/queue/all") @Operation(summary = "Get the details about each queue") public Map<String, Long> all() { return taskService.getAllQueueDetails(); } @GetMapping("/queue/polldata") @Operation(summary = "Get the last poll data for a given task type") public List<PollData> getPollData(@RequestParam("taskType") String taskType) { return taskService.getPollData(taskType); } @GetMapping("/queue/polldata/all") @Operation(summary = "Get the last poll data for all task types") public List<PollData> getAllPollData() { return taskService.getAllPollData(); } @PostMapping(value = "/queue/requeue/{taskType}", produces = TEXT_PLAIN_VALUE) @Operation(summary = "Requeue pending tasks") public String requeuePendingTask(@PathVariable("taskType") String taskType) { return taskService.requeuePendingTask(taskType); } @Operation( summary = "Search for tasks based in payload and other parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC") @GetMapping(value = "/search") public SearchResult<TaskSummary> search( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return taskService.search(start, size, sort, freeText, query); } @Operation( summary = "Search for tasks based in payload and other parameters", description = "use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + " If order is not specified, defaults to ASC") @GetMapping(value = "/search-v2") public SearchResult<Task> searchV2( @RequestParam(value = "start", defaultValue = "0", required = false) int start, @RequestParam(value = "size", defaultValue = "100", required = false) int size, @RequestParam(value = "sort", required = false) String sort, @RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText, @RequestParam(value = "query", required = false) String query) { return taskService.searchV2(start, size, sort, freeText, query); } @Operation(summary = "Get the external uri where the task payload is to be stored") @GetMapping("/externalstoragelocation") public ExternalStorageLocation getExternalStorageLocation( @RequestParam("path") String path, @RequestParam("operation") String operation, @RequestParam("payloadType") String payloadType) { return taskService.getExternalStorageLocation(path, operation, payloadType); } }
7,033
0
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest
Create_ds/conductor/rest/src/main/java/com/netflix/conductor/rest/controllers/MetadataResource.java
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.rest.controllers; import java.util.List; import java.util.Map; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDefSummary; import com.netflix.conductor.common.model.BulkResponse; import com.netflix.conductor.service.MetadataService; import io.swagger.v3.oas.annotations.Operation; import static com.netflix.conductor.rest.config.RequestMappingConstants.METADATA; @RestController @RequestMapping(value = METADATA) public class MetadataResource { private final MetadataService metadataService; public MetadataResource(MetadataService metadataService) { this.metadataService = metadataService; } @PostMapping("/workflow") @Operation(summary = "Create a new workflow definition") public void create(@RequestBody WorkflowDef workflowDef) { metadataService.registerWorkflowDef(workflowDef); } @PostMapping("/workflow/validate") @Operation(summary = "Validates a new workflow definition") public void validate(@RequestBody WorkflowDef workflowDef) { metadataService.validateWorkflowDef(workflowDef); } @PutMapping("/workflow") @Operation(summary = "Create or update workflow definition") public BulkResponse update(@RequestBody List<WorkflowDef> workflowDefs) { return metadataService.updateWorkflowDef(workflowDefs); } @Operation(summary = "Retrieves workflow definition along with blueprint") @GetMapping("/workflow/{name}") public WorkflowDef get( @PathVariable("name") String name, @RequestParam(value = "version", required = false) Integer version) { return metadataService.getWorkflowDef(name, version); } @Operation(summary = "Retrieves all workflow definition along with blueprint") @GetMapping("/workflow") public List<WorkflowDef> getAll() { return metadataService.getWorkflowDefs(); } @Operation(summary = "Returns workflow names and versions only (no definition bodies)") @GetMapping("/workflow/names-and-versions") public Map<String, ? extends Iterable<WorkflowDefSummary>> getWorkflowNamesAndVersions() { return metadataService.getWorkflowNamesAndVersions(); } @Operation(summary = "Returns only the latest version of all workflow definitions") @GetMapping("/workflow/latest-versions") public List<WorkflowDef> getAllWorkflowsWithLatestVersions() { return metadataService.getWorkflowDefsLatestVersions(); } @DeleteMapping("/workflow/{name}/{version}") @Operation( summary = "Removes workflow definition. It does not remove workflows associated with the definition.") public void unregisterWorkflowDef( @PathVariable("name") String name, @PathVariable("version") Integer version) { metadataService.unregisterWorkflowDef(name, version); } @PostMapping("/taskdefs") @Operation(summary = "Create new task definition(s)") public void registerTaskDef(@RequestBody List<TaskDef> taskDefs) { metadataService.registerTaskDef(taskDefs); } @PutMapping("/taskdefs") @Operation(summary = "Update an existing task") public void registerTaskDef(@RequestBody TaskDef taskDef) { metadataService.updateTaskDef(taskDef); } @GetMapping(value = "/taskdefs") @Operation(summary = "Gets all task definition") public List<TaskDef> getTaskDefs() { return metadataService.getTaskDefs(); } @GetMapping("/taskdefs/{tasktype}") @Operation(summary = "Gets the task definition") public TaskDef getTaskDef(@PathVariable("tasktype") String taskType) { return metadataService.getTaskDef(taskType); } @DeleteMapping("/taskdefs/{tasktype}") @Operation(summary = "Remove a task definition") public void unregisterTaskDef(@PathVariable("tasktype") String taskType) { metadataService.unregisterTaskDef(taskType); } }
7,034
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Statements.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.util; import com.datastax.driver.core.querybuilder.QueryBuilder; import static com.netflix.conductor.cassandra.util.Constants.ENTITY_KEY; import static com.netflix.conductor.cassandra.util.Constants.ENTITY_TYPE_TASK; import static com.netflix.conductor.cassandra.util.Constants.ENTITY_TYPE_WORKFLOW; import static com.netflix.conductor.cassandra.util.Constants.EVENT_EXECUTION_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY; import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY; import static com.netflix.conductor.cassandra.util.Constants.MESSAGE_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.PAYLOAD_KEY; import static com.netflix.conductor.cassandra.util.Constants.SHARD_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_EXECUTIONS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_HANDLERS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEFS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEF_LIMIT; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_LOOKUP; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOWS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS_INDEX; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEF_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.TOTAL_PARTITIONS_KEY; import static com.netflix.conductor.cassandra.util.Constants.TOTAL_TASKS_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_VALUE; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_VERSION_KEY; import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; import static com.datastax.driver.core.querybuilder.QueryBuilder.set; /** * DML statements * * <p><em>MetadataDAO</em> * * <ul> * <li>INSERT INTO conductor.workflow_definitions (workflow_def_name,version,workflow_definition) * VALUES (?,?,?) IF NOT EXISTS; * <li>INSERT INTO conductor.workflow_defs_index * (workflow_def_version_index,workflow_def_name_version, workflow_def_index_value) VALUES * ('workflow_def_version_index',?,?); * <li>INSERT INTO conductor.task_definitions (task_defs,task_def_name,task_definition) VALUES * ('task_defs',?,?); * <li>SELECT workflow_definition FROM conductor.workflow_definitions WHERE workflow_def_name=? * AND version=?; * <li>SELECT * FROM conductor.workflow_definitions WHERE workflow_def_name=?; * <li>SELECT * FROM conductor.workflow_defs_index WHERE workflow_def_version_index=?; * <li>SELECT task_definition FROM conductor.task_definitions WHERE task_defs='task_defs' AND * task_def_name=?; * <li>SELECT * FROM conductor.task_definitions WHERE task_defs=?; * <li>UPDATE conductor.workflow_definitions SET workflow_definition=? WHERE workflow_def_name=? * AND version=?; * <li>DELETE FROM conductor.workflow_definitions WHERE workflow_def_name=? AND version=?; * <li>DELETE FROM conductor.workflow_defs_index WHERE workflow_def_version_index=? AND * workflow_def_name_version=?; * <li>DELETE FROM conductor.task_definitions WHERE task_defs='task_defs' AND task_def_name=?; * </ul> * * <em>ExecutionDAO</em> * * <ul> * <li>INSERT INTO conductor.workflows * (workflow_id,shard_id,task_id,entity,payload,total_tasks,total_partitions) VALUES * (?,?,?,'workflow',?,?,?); * <li>INSERT INTO conductor.workflows (workflow_id,shard_id,task_id,entity,payload) VALUES * (?,?,?,'task',?); * <li>INSERT INTO conductor.event_executions * (message_id,event_handler_name,event_execution_id,payload) VALUES (?,?,?,?) IF NOT EXISTS; * <li>SELECT total_tasks,total_partitions FROM conductor.workflows WHERE workflow_id=? AND * shard_id=1; * <li>SELECT payload FROM conductor.workflows WHERE workflow_id=? AND shard_id=? AND * entity='task' AND task_id=?; * <li>SELECT payload FROM conductor.workflows WHERE workflow_id=? AND shard_id=1 AND * entity='workflow'; * <li>SELECT * FROM conductor.workflows WHERE workflow_id=? AND shard_id=?; * <li>SELECT workflow_id FROM conductor.task_lookup WHERE task_id=?; * <li>SELECT * FROM conductor.task_def_limit WHERE task_def_name=?; * <li>SELECT * FROM conductor.event_executions WHERE message_id=? AND event_handler_name=?; * <li>UPDATE conductor.workflows SET payload=? WHERE workflow_id=? AND shard_id=1 AND * entity='workflow' AND task_id=''; * <li>UPDATE conductor.workflows SET total_tasks=? WHERE workflow_id=? AND shard_id=?; * <li>UPDATE conductor.workflows SET total_partitions=?,total_tasks=? WHERE workflow_id=? AND * shard_id=1; * <li>UPDATE conductor.task_lookup SET workflow_id=? WHERE task_id=?; * <li>UPDATE conductor.task_def_limit SET workflow_id=? WHERE task_def_name=? AND task_id=?; * <li>UPDATE conductor.event_executions USING TTL ? SET payload=? WHERE message_id=? AND * event_handler_name=? AND event_execution_id=?; * <li>DELETE FROM conductor.workflows WHERE workflow_id=? AND shard_id=?; * <li>DELETE FROM conductor.workflows WHERE workflow_id=? AND shard_id=? AND entity='task' AND * task_id=?; * <li>DELETE FROM conductor.task_lookup WHERE task_id=?; * <li>DELETE FROM conductor.task_def_limit WHERE task_def_name=? AND task_id=?; * <li>DELETE FROM conductor.event_executions WHERE message_id=? AND event_handler_name=? AND * event_execution_id=?; * </ul> * * <em>EventHandlerDAO</em> * * <ul> * <li>INSERT INTO conductor.event_handlers (handlers,event_handler_name,event_handler) VALUES * ('handlers',?,?); * <li>SELECT * FROM conductor.event_handlers WHERE handlers=?; * <li>DELETE FROM conductor.event_handlers WHERE handlers='handlers' AND event_handler_name=?; * </ul> */ public class Statements { private final String keyspace; public Statements(String keyspace) { this.keyspace = keyspace; } // MetadataDAO // Insert Statements /** * @return cql query statement to insert a new workflow definition into the * "workflow_definitions" table */ public String getInsertWorkflowDefStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOW_DEFS) .value(WORKFLOW_DEF_NAME_KEY, bindMarker()) .value(WORKFLOW_VERSION_KEY, bindMarker()) .value(WORKFLOW_DEFINITION_KEY, bindMarker()) .ifNotExists() .getQueryString(); } /** * @return cql query statement to insert a workflow def name version index into the * "workflow_defs_index" table */ public String getInsertWorkflowDefVersionIndexStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOW_DEFS_INDEX) .value(WORKFLOW_DEF_INDEX_KEY, WORKFLOW_DEF_INDEX_KEY) .value(WORKFLOW_DEF_NAME_VERSION_KEY, bindMarker()) .value(WORKFLOW_DEF_INDEX_VALUE, bindMarker()) .getQueryString(); } /** * @return cql query statement to insert a new task definition into the "task_definitions" table */ public String getInsertTaskDefStatement() { return QueryBuilder.insertInto(keyspace, TABLE_TASK_DEFS) .value(TASK_DEFS_KEY, TASK_DEFS_KEY) .value(TASK_DEF_NAME_KEY, bindMarker()) .value(TASK_DEFINITION_KEY, bindMarker()) .getQueryString(); } // Select Statements /** * @return cql query statement to fetch a workflow definition by name and version from the * "workflow_definitions" table */ public String getSelectWorkflowDefStatement() { return QueryBuilder.select(WORKFLOW_DEFINITION_KEY) .from(keyspace, TABLE_WORKFLOW_DEFS) .where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker())) .and(eq(WORKFLOW_VERSION_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve all versions of a workflow definition by name from * the "workflow_definitions" table */ public String getSelectAllWorkflowDefVersionsByNameStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_WORKFLOW_DEFS) .where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to fetch all workflow def names and version from the * "workflow_defs_index" table */ public String getSelectAllWorkflowDefsStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_WORKFLOW_DEFS_INDEX) .where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker())) .getQueryString(); } public String getSelectAllWorkflowDefsLatestVersionsStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_WORKFLOW_DEFS_INDEX) .where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to fetch a task definition by name from the "task_definitions" * table */ public String getSelectTaskDefStatement() { return QueryBuilder.select(TASK_DEFINITION_KEY) .from(keyspace, TABLE_TASK_DEFS) .where(eq(TASK_DEFS_KEY, TASK_DEFS_KEY)) .and(eq(TASK_DEF_NAME_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve all task definitions from the "task_definitions" * table */ public String getSelectAllTaskDefsStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_TASK_DEFS) .where(eq(TASK_DEFS_KEY, bindMarker())) .getQueryString(); } // Update Statement /** * @return cql query statement to update a workflow definitinos in the "workflow_definitions" * table */ public String getUpdateWorkflowDefStatement() { return QueryBuilder.update(keyspace, TABLE_WORKFLOW_DEFS) .with(set(WORKFLOW_DEFINITION_KEY, bindMarker())) .where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker())) .and(eq(WORKFLOW_VERSION_KEY, bindMarker())) .getQueryString(); } // Delete Statements /** * @return cql query statement to delete a workflow definition by name and version from the * "workflow_definitions" table */ public String getDeleteWorkflowDefStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_WORKFLOW_DEFS) .where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker())) .and(eq(WORKFLOW_VERSION_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete a workflow def name/version from the * "workflow_defs_index" table */ public String getDeleteWorkflowDefIndexStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_WORKFLOW_DEFS_INDEX) .where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker())) .and(eq(WORKFLOW_DEF_NAME_VERSION_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete a task definition by name from the "task_definitions" * table */ public String getDeleteTaskDefStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_TASK_DEFS) .where(eq(TASK_DEFS_KEY, TASK_DEFS_KEY)) .and(eq(TASK_DEF_NAME_KEY, bindMarker())) .getQueryString(); } // ExecutionDAO // Insert Statements /** * @return cql query statement to insert a new workflow into the "workflows" table */ public String getInsertWorkflowStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS) .value(WORKFLOW_ID_KEY, bindMarker()) .value(SHARD_ID_KEY, bindMarker()) .value(TASK_ID_KEY, bindMarker()) .value(ENTITY_KEY, ENTITY_TYPE_WORKFLOW) .value(PAYLOAD_KEY, bindMarker()) .value(TOTAL_TASKS_KEY, bindMarker()) .value(TOTAL_PARTITIONS_KEY, bindMarker()) .getQueryString(); } /** * @return cql query statement to insert a new task into the "workflows" table */ public String getInsertTaskStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS) .value(WORKFLOW_ID_KEY, bindMarker()) .value(SHARD_ID_KEY, bindMarker()) .value(TASK_ID_KEY, bindMarker()) .value(ENTITY_KEY, ENTITY_TYPE_TASK) .value(PAYLOAD_KEY, bindMarker()) .getQueryString(); } /** * @return cql query statement to insert a new event execution into the "event_executions" table */ public String getInsertEventExecutionStatement() { return QueryBuilder.insertInto(keyspace, TABLE_EVENT_EXECUTIONS) .value(MESSAGE_ID_KEY, bindMarker()) .value(EVENT_HANDLER_NAME_KEY, bindMarker()) .value(EVENT_EXECUTION_ID_KEY, bindMarker()) .value(PAYLOAD_KEY, bindMarker()) .ifNotExists() .getQueryString(); } // Select Statements /** * @return cql query statement to retrieve the total_tasks and total_partitions for a workflow * from the "workflows" table */ public String getSelectTotalStatement() { return QueryBuilder.select(TOTAL_TASKS_KEY, TOTAL_PARTITIONS_KEY) .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, 1)) .getQueryString(); } /** * @return cql query statement to retrieve a task from the "workflows" table */ public String getSelectTaskStatement() { return QueryBuilder.select(PAYLOAD_KEY) .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .and(eq(ENTITY_KEY, ENTITY_TYPE_TASK)) .and(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve a workflow (without its tasks) from the "workflows" * table */ public String getSelectWorkflowStatement() { return QueryBuilder.select(PAYLOAD_KEY) .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, 1)) .and(eq(ENTITY_KEY, ENTITY_TYPE_WORKFLOW)) .getQueryString(); } /** * @return cql query statement to retrieve a workflow with its tasks from the "workflows" table */ public String getSelectWorkflowWithTasksStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve the workflow_id for a particular task_id from the * "task_lookup" table */ public String getSelectTaskFromLookupTableStatement() { return QueryBuilder.select(WORKFLOW_ID_KEY) .from(keyspace, TABLE_TASK_LOOKUP) .where(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve all task ids for a given taskDefName with concurrent * execution limit configured from the "task_def_limit" table */ public String getSelectTasksFromTaskDefLimitStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_TASK_DEF_LIMIT) .where(eq(TASK_DEF_NAME_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to retrieve all event executions for a given message and event * handler from the "event_executions" table */ public String getSelectAllEventExecutionsForMessageFromEventExecutionsStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_EVENT_EXECUTIONS) .where(eq(MESSAGE_ID_KEY, bindMarker())) .and(eq(EVENT_HANDLER_NAME_KEY, bindMarker())) .getQueryString(); } // Update Statements /** * @return cql query statement to update a workflow in the "workflows" table */ public String getUpdateWorkflowStatement() { return QueryBuilder.update(keyspace, TABLE_WORKFLOWS) .with(set(PAYLOAD_KEY, bindMarker())) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, 1)) .and(eq(ENTITY_KEY, ENTITY_TYPE_WORKFLOW)) .and(eq(TASK_ID_KEY, "")) .getQueryString(); } /** * @return cql query statement to update the total_tasks in a shard for a workflow in the * "workflows" table */ public String getUpdateTotalTasksStatement() { return QueryBuilder.update(keyspace, TABLE_WORKFLOWS) .with(set(TOTAL_TASKS_KEY, bindMarker())) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to update the total_partitions for a workflow in the "workflows" * table */ public String getUpdateTotalPartitionsStatement() { return QueryBuilder.update(keyspace, TABLE_WORKFLOWS) .with(set(TOTAL_PARTITIONS_KEY, bindMarker())) .and(set(TOTAL_TASKS_KEY, bindMarker())) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, 1)) .getQueryString(); } /** * @return cql query statement to add a new task_id to workflow_id mapping to the "task_lookup" * table */ public String getUpdateTaskLookupStatement() { return QueryBuilder.update(keyspace, TABLE_TASK_LOOKUP) .with(set(WORKFLOW_ID_KEY, bindMarker())) .where(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to add a new task_id to the "task_def_limit" table */ public String getUpdateTaskDefLimitStatement() { return QueryBuilder.update(keyspace, TABLE_TASK_DEF_LIMIT) .with(set(WORKFLOW_ID_KEY, bindMarker())) .where(eq(TASK_DEF_NAME_KEY, bindMarker())) .and(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to update an event execution in the "event_executions" table */ public String getUpdateEventExecutionStatement() { return QueryBuilder.update(keyspace, TABLE_EVENT_EXECUTIONS) .using(QueryBuilder.ttl(bindMarker())) .with(set(PAYLOAD_KEY, bindMarker())) .where(eq(MESSAGE_ID_KEY, bindMarker())) .and(eq(EVENT_HANDLER_NAME_KEY, bindMarker())) .and(eq(EVENT_EXECUTION_ID_KEY, bindMarker())) .getQueryString(); } // Delete statements /** * @return cql query statement to delete a workflow from the "workflows" table */ public String getDeleteWorkflowStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete a task_id to workflow_id mapping from the "task_lookup" * table */ public String getDeleteTaskLookupStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_TASK_LOOKUP) .where(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete a task from the "workflows" table */ public String getDeleteTaskStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .and(eq(ENTITY_KEY, ENTITY_TYPE_TASK)) .and(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete a task_id from the "task_def_limit" table */ public String getDeleteTaskDefLimitStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_TASK_DEF_LIMIT) .where(eq(TASK_DEF_NAME_KEY, bindMarker())) .and(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); } /** * @return cql query statement to delete an event execution from the "event_execution" table */ public String getDeleteEventExecutionsStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_EVENT_EXECUTIONS) .where(eq(MESSAGE_ID_KEY, bindMarker())) .and(eq(EVENT_HANDLER_NAME_KEY, bindMarker())) .and(eq(EVENT_EXECUTION_ID_KEY, bindMarker())) .getQueryString(); } // EventHandlerDAO // Insert Statements /** * @return cql query statement to insert an event handler into the "event_handlers" table */ public String getInsertEventHandlerStatement() { return QueryBuilder.insertInto(keyspace, TABLE_EVENT_HANDLERS) .value(HANDLERS_KEY, HANDLERS_KEY) .value(EVENT_HANDLER_NAME_KEY, bindMarker()) .value(EVENT_HANDLER_KEY, bindMarker()) .getQueryString(); } // Select Statements /** * @return cql query statement to retrieve all event handlers from the "event_handlers" table */ public String getSelectAllEventHandlersStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_EVENT_HANDLERS) .where(eq(HANDLERS_KEY, bindMarker())) .getQueryString(); } // Delete Statements /** * @return cql query statement to delete an event handler by name from the "event_handlers" * table */ public String getDeleteEventHandlerStatement() { return QueryBuilder.delete() .from(keyspace, TABLE_EVENT_HANDLERS) .where(eq(HANDLERS_KEY, HANDLERS_KEY)) .and(eq(EVENT_HANDLER_NAME_KEY, bindMarker())) .getQueryString(); } }
7,035
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Constants.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.util; public interface Constants { String DAO_NAME = "cassandra"; String TABLE_WORKFLOWS = "workflows"; String TABLE_TASK_LOOKUP = "task_lookup"; String TABLE_TASK_DEF_LIMIT = "task_def_limit"; String TABLE_WORKFLOW_DEFS = "workflow_definitions"; String TABLE_WORKFLOW_DEFS_INDEX = "workflow_defs_index"; String TABLE_TASK_DEFS = "task_definitions"; String TABLE_EVENT_HANDLERS = "event_handlers"; String TABLE_EVENT_EXECUTIONS = "event_executions"; String WORKFLOW_ID_KEY = "workflow_id"; String SHARD_ID_KEY = "shard_id"; String TASK_ID_KEY = "task_id"; String ENTITY_KEY = "entity"; String PAYLOAD_KEY = "payload"; String TOTAL_TASKS_KEY = "total_tasks"; String TOTAL_PARTITIONS_KEY = "total_partitions"; String TASK_DEF_NAME_KEY = "task_def_name"; String WORKFLOW_DEF_NAME_KEY = "workflow_def_name"; String WORKFLOW_VERSION_KEY = "version"; String WORKFLOW_DEFINITION_KEY = "workflow_definition"; String WORKFLOW_DEF_INDEX_KEY = "workflow_def_version_index"; String WORKFLOW_DEF_INDEX_VALUE = "workflow_def_index_value"; String WORKFLOW_DEF_NAME_VERSION_KEY = "workflow_def_name_version"; String TASK_DEFS_KEY = "task_defs"; String TASK_DEFINITION_KEY = "task_definition"; String HANDLERS_KEY = "handlers"; String EVENT_HANDLER_NAME_KEY = "event_handler_name"; String EVENT_HANDLER_KEY = "event_handler"; String MESSAGE_ID_KEY = "message_id"; String EVENT_EXECUTION_ID_KEY = "event_execution_id"; String ENTITY_TYPE_WORKFLOW = "workflow"; String ENTITY_TYPE_TASK = "task"; int DEFAULT_SHARD_ID = 1; int DEFAULT_TOTAL_PARTITIONS = 1; }
7,036
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraProperties.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.config; import java.time.Duration; import java.time.temporal.ChronoUnit; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.convert.DurationUnit; import com.datastax.driver.core.ConsistencyLevel; @ConfigurationProperties("conductor.cassandra") public class CassandraProperties { /** The address for the cassandra database host */ private String hostAddress = "127.0.0.1"; /** The port to be used to connect to the cassandra database instance */ private int port = 9142; /** The name of the cassandra cluster */ private String cluster = ""; /** The keyspace to be used in the cassandra datastore */ private String keyspace = "conductor"; /** * The number of tasks to be stored in a single partition which will be used for sharding * workflows in the datastore */ private int shardSize = 100; /** The replication strategy with which to configure the keyspace */ private String replicationStrategy = "SimpleStrategy"; /** The key to be used while configuring the replication factor */ private String replicationFactorKey = "replication_factor"; /** The replication factor value with which the keyspace is configured */ private int replicationFactorValue = 3; /** The consistency level to be used for read operations */ private ConsistencyLevel readConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM; /** The consistency level to be used for write operations */ private ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM; /** The time in seconds after which the in-memory task definitions cache will be refreshed */ @DurationUnit(ChronoUnit.SECONDS) private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60); /** The time in seconds after which the in-memory event handler cache will be refreshed */ @DurationUnit(ChronoUnit.SECONDS) private Duration eventHandlerCacheRefreshInterval = Duration.ofSeconds(60); /** The time to live in seconds for which the event execution will be persisted */ @DurationUnit(ChronoUnit.SECONDS) private Duration eventExecutionPersistenceTtl = Duration.ZERO; public String getHostAddress() { return hostAddress; } public void setHostAddress(String hostAddress) { this.hostAddress = hostAddress; } public int getPort() { return port; } public void setPort(int port) { this.port = port; } public String getCluster() { return cluster; } public void setCluster(String cluster) { this.cluster = cluster; } public String getKeyspace() { return keyspace; } public void setKeyspace(String keyspace) { this.keyspace = keyspace; } public int getShardSize() { return shardSize; } public void setShardSize(int shardSize) { this.shardSize = shardSize; } public String getReplicationStrategy() { return replicationStrategy; } public void setReplicationStrategy(String replicationStrategy) { this.replicationStrategy = replicationStrategy; } public String getReplicationFactorKey() { return replicationFactorKey; } public void setReplicationFactorKey(String replicationFactorKey) { this.replicationFactorKey = replicationFactorKey; } public int getReplicationFactorValue() { return replicationFactorValue; } public void setReplicationFactorValue(int replicationFactorValue) { this.replicationFactorValue = replicationFactorValue; } public ConsistencyLevel getReadConsistencyLevel() { return readConsistencyLevel; } public void setReadConsistencyLevel(ConsistencyLevel readConsistencyLevel) { this.readConsistencyLevel = readConsistencyLevel; } public ConsistencyLevel getWriteConsistencyLevel() { return writeConsistencyLevel; } public void setWriteConsistencyLevel(ConsistencyLevel writeConsistencyLevel) { this.writeConsistencyLevel = writeConsistencyLevel; } public Duration getTaskDefCacheRefreshInterval() { return taskDefCacheRefreshInterval; } public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) { this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval; } public Duration getEventHandlerCacheRefreshInterval() { return eventHandlerCacheRefreshInterval; } public void setEventHandlerCacheRefreshInterval(Duration eventHandlerCacheRefreshInterval) { this.eventHandlerCacheRefreshInterval = eventHandlerCacheRefreshInterval; } public Duration getEventExecutionPersistenceTtl() { return eventExecutionPersistenceTtl; } public void setEventExecutionPersistenceTtl(Duration eventExecutionPersistenceTtl) { this.eventExecutionPersistenceTtl = eventExecutionPersistenceTtl; } }
7,037
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraConfiguration.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.cache.CacheManager; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.cassandra.config.cache.CacheableEventHandlerDAO; import com.netflix.conductor.cassandra.config.cache.CacheableMetadataDAO; import com.netflix.conductor.cassandra.dao.CassandraEventHandlerDAO; import com.netflix.conductor.cassandra.dao.CassandraExecutionDAO; import com.netflix.conductor.cassandra.dao.CassandraMetadataDAO; import com.netflix.conductor.cassandra.dao.CassandraPollDataDAO; import com.netflix.conductor.cassandra.util.Statements; import com.netflix.conductor.dao.EventHandlerDAO; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.MetadataDAO; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.Session; import com.fasterxml.jackson.databind.ObjectMapper; @Configuration(proxyBeanMethods = false) @EnableConfigurationProperties(CassandraProperties.class) @ConditionalOnProperty(name = "conductor.db.type", havingValue = "cassandra") public class CassandraConfiguration { private static final Logger LOGGER = LoggerFactory.getLogger(CassandraConfiguration.class); @Bean public Cluster cluster(CassandraProperties properties) { String host = properties.getHostAddress(); int port = properties.getPort(); LOGGER.info("Connecting to cassandra cluster with host:{}, port:{}", host, port); Cluster cluster = Cluster.builder().addContactPoint(host).withPort(port).build(); Metadata metadata = cluster.getMetadata(); LOGGER.info("Connected to cluster: {}", metadata.getClusterName()); metadata.getAllHosts() .forEach( h -> LOGGER.info( "Datacenter:{}, host:{}, rack: {}", h.getDatacenter(), h.getEndPoint().resolve().getHostName(), h.getRack())); return cluster; } @Bean public Session session(Cluster cluster) { LOGGER.info("Initializing cassandra session"); return cluster.connect(); } @Bean public MetadataDAO cassandraMetadataDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements, CacheManager cacheManager) { CassandraMetadataDAO cassandraMetadataDAO = new CassandraMetadataDAO(session, objectMapper, properties, statements); return new CacheableMetadataDAO(cassandraMetadataDAO, properties, cacheManager); } @Bean public ExecutionDAO cassandraExecutionDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements) { return new CassandraExecutionDAO(session, objectMapper, properties, statements); } @Bean public EventHandlerDAO cassandraEventHandlerDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements, CacheManager cacheManager) { CassandraEventHandlerDAO cassandraEventHandlerDAO = new CassandraEventHandlerDAO(session, objectMapper, properties, statements); return new CacheableEventHandlerDAO(cassandraEventHandlerDAO, properties, cacheManager); } @Bean public CassandraPollDataDAO cassandraPollDataDAO() { return new CassandraPollDataDAO(); } @Bean public Statements statements(CassandraProperties cassandraProperties) { return new Statements(cassandraProperties.getKeyspace()); } }
7,038
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableMetadataDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.config.cache; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.cache.Cache; import org.springframework.cache.CacheManager; import org.springframework.cache.annotation.CacheEvict; import org.springframework.cache.annotation.CachePut; import org.springframework.cache.annotation.Cacheable; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.cassandra.dao.CassandraMetadataDAO; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.metrics.Monitors; import static com.netflix.conductor.cassandra.config.cache.CachingConfig.TASK_DEF_CACHE; @Trace public class CacheableMetadataDAO implements MetadataDAO { private static final String CLASS_NAME = CacheableMetadataDAO.class.getSimpleName(); private static final Logger LOGGER = LoggerFactory.getLogger(CacheableMetadataDAO.class); private final CassandraMetadataDAO cassandraMetadataDAO; private final CassandraProperties properties; private final CacheManager cacheManager; public CacheableMetadataDAO( CassandraMetadataDAO cassandraMetadataDAO, CassandraProperties properties, CacheManager cacheManager) { this.cassandraMetadataDAO = cassandraMetadataDAO; this.properties = properties; this.cacheManager = cacheManager; } @PostConstruct public void scheduleCacheRefresh() { long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds(); Executors.newSingleThreadScheduledExecutor() .scheduleWithFixedDelay( this::refreshTaskDefsCache, 0, cacheRefreshTime, TimeUnit.SECONDS); LOGGER.info( "Scheduled cache refresh for Task Definitions, every {} seconds", cacheRefreshTime); } @Override @CachePut(value = TASK_DEF_CACHE, key = "#taskDef.name") public TaskDef createTaskDef(TaskDef taskDef) { cassandraMetadataDAO.createTaskDef(taskDef); return taskDef; } @Override @CachePut(value = TASK_DEF_CACHE, key = "#taskDef.name") public TaskDef updateTaskDef(TaskDef taskDef) { return cassandraMetadataDAO.updateTaskDef(taskDef); } @Override @Cacheable(TASK_DEF_CACHE) public TaskDef getTaskDef(String name) { return cassandraMetadataDAO.getTaskDef(name); } @Override public List<TaskDef> getAllTaskDefs() { Object nativeCache = cacheManager.getCache(TASK_DEF_CACHE).getNativeCache(); if (nativeCache != null && nativeCache instanceof ConcurrentHashMap) { ConcurrentHashMap cacheMap = (ConcurrentHashMap) nativeCache; if (!cacheMap.isEmpty()) { List<TaskDef> taskDefs = new ArrayList<>(); cacheMap.values().stream() .filter(element -> element != null && element instanceof TaskDef) .forEach(element -> taskDefs.add((TaskDef) element)); return taskDefs; } } return refreshTaskDefsCache(); } @Override @CacheEvict(TASK_DEF_CACHE) public void removeTaskDef(String name) { cassandraMetadataDAO.removeTaskDef(name); } @Override public void createWorkflowDef(WorkflowDef workflowDef) { cassandraMetadataDAO.createWorkflowDef(workflowDef); } @Override public void updateWorkflowDef(WorkflowDef workflowDef) { cassandraMetadataDAO.updateWorkflowDef(workflowDef); } @Override public Optional<WorkflowDef> getLatestWorkflowDef(String name) { return cassandraMetadataDAO.getLatestWorkflowDef(name); } @Override public Optional<WorkflowDef> getWorkflowDef(String name, int version) { return cassandraMetadataDAO.getWorkflowDef(name, version); } @Override public void removeWorkflowDef(String name, Integer version) { cassandraMetadataDAO.removeWorkflowDef(name, version); } @Override public List<WorkflowDef> getAllWorkflowDefs() { return cassandraMetadataDAO.getAllWorkflowDefs(); } @Override public List<WorkflowDef> getAllWorkflowDefsLatestVersions() { return cassandraMetadataDAO.getAllWorkflowDefsLatestVersions(); } private List<TaskDef> refreshTaskDefsCache() { try { Cache taskDefsCache = cacheManager.getCache(TASK_DEF_CACHE); taskDefsCache.clear(); List<TaskDef> taskDefs = cassandraMetadataDAO.getAllTaskDefs(); taskDefs.forEach(taskDef -> taskDefsCache.put(taskDef.getName(), taskDef)); LOGGER.debug("Refreshed task defs, total num: " + taskDefs.size()); return taskDefs; } catch (Exception e) { Monitors.error(CLASS_NAME, "refreshTaskDefs"); LOGGER.error("refresh TaskDefs failed ", e); } return Collections.emptyList(); } }
7,039
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CachingConfig.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.config.cache; import org.springframework.cache.CacheManager; import org.springframework.cache.annotation.EnableCaching; import org.springframework.cache.concurrent.ConcurrentMapCacheManager; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration @EnableCaching public class CachingConfig { public static final String TASK_DEF_CACHE = "taskDefCache"; public static final String EVENT_HANDLER_CACHE = "eventHandlerCache"; @Bean public CacheManager cacheManager() { return new ConcurrentMapCacheManager(TASK_DEF_CACHE, EVENT_HANDLER_CACHE); } }
7,040
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableEventHandlerDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.config.cache; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.cache.Cache; import org.springframework.cache.CacheManager; import org.springframework.cache.annotation.CacheEvict; import org.springframework.cache.annotation.CachePut; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.cassandra.dao.CassandraEventHandlerDAO; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.dao.EventHandlerDAO; import com.netflix.conductor.metrics.Monitors; import static com.netflix.conductor.cassandra.config.cache.CachingConfig.EVENT_HANDLER_CACHE; @Trace public class CacheableEventHandlerDAO implements EventHandlerDAO { private static final Logger LOGGER = LoggerFactory.getLogger(CacheableEventHandlerDAO.class); private static final String CLASS_NAME = CacheableEventHandlerDAO.class.getSimpleName(); private final CassandraEventHandlerDAO cassandraEventHandlerDAO; private final CassandraProperties properties; private final CacheManager cacheManager; public CacheableEventHandlerDAO( CassandraEventHandlerDAO cassandraEventHandlerDAO, CassandraProperties properties, CacheManager cacheManager) { this.cassandraEventHandlerDAO = cassandraEventHandlerDAO; this.properties = properties; this.cacheManager = cacheManager; } @PostConstruct public void scheduleEventHandlerRefresh() { long cacheRefreshTime = properties.getEventHandlerCacheRefreshInterval().getSeconds(); Executors.newSingleThreadScheduledExecutor() .scheduleWithFixedDelay( this::refreshEventHandlersCache, 0, cacheRefreshTime, TimeUnit.SECONDS); } @Override @CachePut(value = EVENT_HANDLER_CACHE, key = "#eventHandler.name") public void addEventHandler(EventHandler eventHandler) { cassandraEventHandlerDAO.addEventHandler(eventHandler); } @Override @CachePut(value = EVENT_HANDLER_CACHE, key = "#eventHandler.name") public void updateEventHandler(EventHandler eventHandler) { cassandraEventHandlerDAO.updateEventHandler(eventHandler); } @Override @CacheEvict(EVENT_HANDLER_CACHE) public void removeEventHandler(String name) { cassandraEventHandlerDAO.removeEventHandler(name); } @Override public List<EventHandler> getAllEventHandlers() { Object nativeCache = cacheManager.getCache(EVENT_HANDLER_CACHE).getNativeCache(); if (nativeCache != null && nativeCache instanceof ConcurrentHashMap) { ConcurrentHashMap cacheMap = (ConcurrentHashMap) nativeCache; if (!cacheMap.isEmpty()) { List<EventHandler> eventHandlers = new ArrayList<>(); cacheMap.values().stream() .filter(element -> element != null && element instanceof EventHandler) .forEach(element -> eventHandlers.add((EventHandler) element)); return eventHandlers; } } return refreshEventHandlersCache(); } @Override public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) { if (activeOnly) { return getAllEventHandlers().stream() .filter(eventHandler -> eventHandler.getEvent().equals(event)) .filter(EventHandler::isActive) .collect(Collectors.toList()); } else { return getAllEventHandlers().stream() .filter(eventHandler -> eventHandler.getEvent().equals(event)) .collect(Collectors.toList()); } } private List<EventHandler> refreshEventHandlersCache() { try { Cache eventHandlersCache = cacheManager.getCache(EVENT_HANDLER_CACHE); eventHandlersCache.clear(); List<EventHandler> eventHandlers = cassandraEventHandlerDAO.getAllEventHandlers(); eventHandlers.forEach( eventHandler -> eventHandlersCache.put(eventHandler.getName(), eventHandler)); LOGGER.debug("Refreshed event handlers, total num: " + eventHandlers.size()); return eventHandlers; } catch (Exception e) { Monitors.error(CLASS_NAME, "refreshEventHandlersCache"); LOGGER.error("refresh EventHandlers failed", e); } return Collections.emptyList(); } }
7,041
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraBaseDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.dao; import java.io.IOException; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.core.exception.NonTransientException; import com.netflix.conductor.metrics.Monitors; import com.datastax.driver.core.DataType; import com.datastax.driver.core.Session; import com.datastax.driver.core.schemabuilder.SchemaBuilder; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; import static com.netflix.conductor.cassandra.util.Constants.DAO_NAME; import static com.netflix.conductor.cassandra.util.Constants.ENTITY_KEY; import static com.netflix.conductor.cassandra.util.Constants.EVENT_EXECUTION_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY; import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY; import static com.netflix.conductor.cassandra.util.Constants.MESSAGE_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.PAYLOAD_KEY; import static com.netflix.conductor.cassandra.util.Constants.SHARD_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_EXECUTIONS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_HANDLERS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEFS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEF_LIMIT; import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_LOOKUP; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOWS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS; import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS_INDEX; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEF_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.TOTAL_PARTITIONS_KEY; import static com.netflix.conductor.cassandra.util.Constants.TOTAL_TASKS_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_VALUE; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_ID_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_VERSION_KEY; /** * Creates the keyspace and tables. * * <p>CREATE KEYSPACE IF NOT EXISTS conductor WITH replication = { 'class' : * 'NetworkTopologyStrategy', 'us-east': '3'}; * * <p>CREATE TABLE IF NOT EXISTS conductor.workflows ( workflow_id uuid, shard_id int, task_id text, * entity text, payload text, total_tasks int STATIC, total_partitions int STATIC, PRIMARY * KEY((workflow_id, shard_id), entity, task_id) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.task_lookup( task_id uuid, workflow_id uuid, PRIMARY KEY * (task_id) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.task_def_limit( task_def_name text, task_id uuid, * workflow_id uuid, PRIMARY KEY ((task_def_name), task_id_key) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.workflow_definitions( workflow_def_name text, version * int, workflow_definition text, PRIMARY KEY ((workflow_def_name), version) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.workflow_defs_index( workflow_def_version_index text, * workflow_def_name_version text, workflow_def_index_value text,PRIMARY KEY * ((workflow_def_version_index), workflow_def_name_version) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.task_definitions( task_defs text, task_def_name text, * task_definition text, PRIMARY KEY ((task_defs), task_def_name) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.event_handlers( handlers text, event_handler_name text, * event_handler text, PRIMARY KEY ((handlers), event_handler_name) ); * * <p>CREATE TABLE IF NOT EXISTS conductor.event_executions( message_id text, event_handler_name * text, event_execution_id text, payload text, PRIMARY KEY ((message_id, event_handler_name), * event_execution_id) ); */ public abstract class CassandraBaseDAO { private static final Logger LOGGER = LoggerFactory.getLogger(CassandraBaseDAO.class); private final ObjectMapper objectMapper; protected final Session session; protected final CassandraProperties properties; private boolean initialized = false; public CassandraBaseDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties) { this.session = session; this.objectMapper = objectMapper; this.properties = properties; init(); } protected static UUID toUUID(String uuidString, String message) { try { return UUID.fromString(uuidString); } catch (IllegalArgumentException iae) { throw new IllegalArgumentException(message + " " + uuidString, iae); } } private void init() { try { if (!initialized) { session.execute(getCreateKeyspaceStatement()); session.execute(getCreateWorkflowsTableStatement()); session.execute(getCreateTaskLookupTableStatement()); session.execute(getCreateTaskDefLimitTableStatement()); session.execute(getCreateWorkflowDefsTableStatement()); session.execute(getCreateWorkflowDefsIndexTableStatement()); session.execute(getCreateTaskDefsTableStatement()); session.execute(getCreateEventHandlersTableStatement()); session.execute(getCreateEventExecutionsTableStatement()); LOGGER.info( "{} initialization complete! Tables created!", getClass().getSimpleName()); initialized = true; } } catch (Exception e) { LOGGER.error("Error initializing and setting up keyspace and table in cassandra", e); throw e; } } private String getCreateKeyspaceStatement() { return SchemaBuilder.createKeyspace(properties.getKeyspace()) .ifNotExists() .with() .replication( ImmutableMap.of( "class", properties.getReplicationStrategy(), properties.getReplicationFactorKey(), properties.getReplicationFactorValue())) .durableWrites(true) .getQueryString(); } private String getCreateWorkflowsTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOWS) .ifNotExists() .addPartitionKey(WORKFLOW_ID_KEY, DataType.uuid()) .addPartitionKey(SHARD_ID_KEY, DataType.cint()) .addClusteringColumn(ENTITY_KEY, DataType.text()) .addClusteringColumn(TASK_ID_KEY, DataType.text()) .addColumn(PAYLOAD_KEY, DataType.text()) .addStaticColumn(TOTAL_TASKS_KEY, DataType.cint()) .addStaticColumn(TOTAL_PARTITIONS_KEY, DataType.cint()) .getQueryString(); } private String getCreateTaskLookupTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_LOOKUP) .ifNotExists() .addPartitionKey(TASK_ID_KEY, DataType.uuid()) .addColumn(WORKFLOW_ID_KEY, DataType.uuid()) .getQueryString(); } private String getCreateTaskDefLimitTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_DEF_LIMIT) .ifNotExists() .addPartitionKey(TASK_DEF_NAME_KEY, DataType.text()) .addClusteringColumn(TASK_ID_KEY, DataType.uuid()) .addColumn(WORKFLOW_ID_KEY, DataType.uuid()) .getQueryString(); } private String getCreateWorkflowDefsTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOW_DEFS) .ifNotExists() .addPartitionKey(WORKFLOW_DEF_NAME_KEY, DataType.text()) .addClusteringColumn(WORKFLOW_VERSION_KEY, DataType.cint()) .addColumn(WORKFLOW_DEFINITION_KEY, DataType.text()) .getQueryString(); } private String getCreateWorkflowDefsIndexTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOW_DEFS_INDEX) .ifNotExists() .addPartitionKey(WORKFLOW_DEF_INDEX_KEY, DataType.text()) .addClusteringColumn(WORKFLOW_DEF_NAME_VERSION_KEY, DataType.text()) .addColumn(WORKFLOW_DEF_INDEX_VALUE, DataType.text()) .getQueryString(); } private String getCreateTaskDefsTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_DEFS) .ifNotExists() .addPartitionKey(TASK_DEFS_KEY, DataType.text()) .addClusteringColumn(TASK_DEF_NAME_KEY, DataType.text()) .addColumn(TASK_DEFINITION_KEY, DataType.text()) .getQueryString(); } private String getCreateEventHandlersTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_EVENT_HANDLERS) .ifNotExists() .addPartitionKey(HANDLERS_KEY, DataType.text()) .addClusteringColumn(EVENT_HANDLER_NAME_KEY, DataType.text()) .addColumn(EVENT_HANDLER_KEY, DataType.text()) .getQueryString(); } private String getCreateEventExecutionsTableStatement() { return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_EVENT_EXECUTIONS) .ifNotExists() .addPartitionKey(MESSAGE_ID_KEY, DataType.text()) .addPartitionKey(EVENT_HANDLER_NAME_KEY, DataType.text()) .addClusteringColumn(EVENT_EXECUTION_ID_KEY, DataType.text()) .addColumn(PAYLOAD_KEY, DataType.text()) .getQueryString(); } String toJson(Object value) { try { return objectMapper.writeValueAsString(value); } catch (JsonProcessingException e) { throw new NonTransientException("Error serializing to json", e); } } <T> T readValue(String json, Class<T> clazz) { try { return objectMapper.readValue(json, clazz); } catch (IOException e) { throw new NonTransientException("Error de-serializing json", e); } } void recordCassandraDaoRequests(String action) { recordCassandraDaoRequests(action, "n/a", "n/a"); } void recordCassandraDaoRequests(String action, String taskType, String workflowType) { Monitors.recordDaoRequests(DAO_NAME, action, taskType, workflowType); } void recordCassandraDaoEventRequests(String action, String event) { Monitors.recordDaoEventRequests(DAO_NAME, action, event); } void recordCassandraDaoPayloadSize( String action, int size, String taskType, String workflowType) { Monitors.recordDaoPayloadSize(DAO_NAME, action, taskType, workflowType, size); } static class WorkflowMetadata { private int totalTasks; private int totalPartitions; public int getTotalTasks() { return totalTasks; } public void setTotalTasks(int totalTasks) { this.totalTasks = totalTasks; } public int getTotalPartitions() { return totalPartitions; } public void setTotalPartitions(int totalPartitions) { this.totalPartitions = totalPartitions; } } }
7,042
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraEventHandlerDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.dao; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.cassandra.util.Statements; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.dao.EventHandlerDAO; import com.netflix.conductor.metrics.Monitors; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.DriverException; import com.fasterxml.jackson.databind.ObjectMapper; import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY; import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY; @Trace public class CassandraEventHandlerDAO extends CassandraBaseDAO implements EventHandlerDAO { private static final Logger LOGGER = LoggerFactory.getLogger(CassandraEventHandlerDAO.class); private static final String CLASS_NAME = CassandraEventHandlerDAO.class.getSimpleName(); private final PreparedStatement insertEventHandlerStatement; private final PreparedStatement selectAllEventHandlersStatement; private final PreparedStatement deleteEventHandlerStatement; public CassandraEventHandlerDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements) { super(session, objectMapper, properties); insertEventHandlerStatement = session.prepare(statements.getInsertEventHandlerStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); selectAllEventHandlersStatement = session.prepare(statements.getSelectAllEventHandlersStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); deleteEventHandlerStatement = session.prepare(statements.getDeleteEventHandlerStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); } @Override public void addEventHandler(EventHandler eventHandler) { insertOrUpdateEventHandler(eventHandler); } @Override public void updateEventHandler(EventHandler eventHandler) { insertOrUpdateEventHandler(eventHandler); } @Override public void removeEventHandler(String name) { try { recordCassandraDaoRequests("removeEventHandler"); session.execute(deleteEventHandlerStatement.bind(name)); } catch (Exception e) { Monitors.error(CLASS_NAME, "removeEventHandler"); String errorMsg = String.format("Failed to remove event handler: %s", name); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public List<EventHandler> getAllEventHandlers() { return getAllEventHandlersFromDB(); } @Override public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) { if (activeOnly) { return getAllEventHandlers().stream() .filter(eventHandler -> eventHandler.getEvent().equals(event)) .filter(EventHandler::isActive) .collect(Collectors.toList()); } else { return getAllEventHandlers().stream() .filter(eventHandler -> eventHandler.getEvent().equals(event)) .collect(Collectors.toList()); } } @SuppressWarnings("unchecked") private List<EventHandler> getAllEventHandlersFromDB() { try { ResultSet resultSet = session.execute(selectAllEventHandlersStatement.bind(HANDLERS_KEY)); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("No event handlers were found."); return Collections.EMPTY_LIST; } return rows.stream() .map(row -> readValue(row.getString(EVENT_HANDLER_KEY), EventHandler.class)) .collect(Collectors.toList()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getAllEventHandlersFromDB"); String errorMsg = "Failed to get all event handlers"; LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } private void insertOrUpdateEventHandler(EventHandler eventHandler) { try { String handler = toJson(eventHandler); session.execute(insertEventHandlerStatement.bind(eventHandler.getName(), handler)); recordCassandraDaoRequests("storeEventHandler"); recordCassandraDaoPayloadSize("storeEventHandler", handler.length(), "n/a", "n/a"); } catch (DriverException e) { Monitors.error(CLASS_NAME, "insertOrUpdateEventHandler"); String errorMsg = String.format( "Error creating/updating event handler: %s/%s", eventHandler.getName(), eventHandler.getEvent()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } }
7,043
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraPollDataDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.dao; import java.util.List; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.dao.PollDataDAO; /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor. */ public class CassandraPollDataDAO implements PollDataDAO { @Override public void updateLastPollData(String taskDefName, String domain, String workerId) { throw new UnsupportedOperationException( "This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead."); } @Override public PollData getPollData(String taskDefName, String domain) { throw new UnsupportedOperationException( "This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead."); } @Override public List<PollData> getPollData(String taskDefName) { throw new UnsupportedOperationException( "This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead."); } }
7,044
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraMetadataDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.dao; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.PriorityQueue; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.ImmutablePair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.annotations.VisibleForTesting; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.cassandra.util.Statements; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.core.exception.ConflictException; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.metrics.Monitors; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.DriverException; import com.fasterxml.jackson.databind.ObjectMapper; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY; import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY; import static com.netflix.conductor.common.metadata.tasks.TaskDef.ONE_HOUR; @Trace public class CassandraMetadataDAO extends CassandraBaseDAO implements MetadataDAO { private static final Logger LOGGER = LoggerFactory.getLogger(CassandraMetadataDAO.class); private static final String CLASS_NAME = CassandraMetadataDAO.class.getSimpleName(); private static final String INDEX_DELIMITER = "/"; private final PreparedStatement insertWorkflowDefStatement; private final PreparedStatement insertWorkflowDefVersionIndexStatement; private final PreparedStatement insertTaskDefStatement; private final PreparedStatement selectWorkflowDefStatement; private final PreparedStatement selectAllWorkflowDefVersionsByNameStatement; private final PreparedStatement selectAllWorkflowDefsStatement; private final PreparedStatement selectAllWorkflowDefsLatestVersionsStatement; private final PreparedStatement selectTaskDefStatement; private final PreparedStatement selectAllTaskDefsStatement; private final PreparedStatement updateWorkflowDefStatement; private final PreparedStatement deleteWorkflowDefStatement; private final PreparedStatement deleteWorkflowDefIndexStatement; private final PreparedStatement deleteTaskDefStatement; public CassandraMetadataDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements) { super(session, objectMapper, properties); this.insertWorkflowDefStatement = session.prepare(statements.getInsertWorkflowDefStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.insertWorkflowDefVersionIndexStatement = session.prepare(statements.getInsertWorkflowDefVersionIndexStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.insertTaskDefStatement = session.prepare(statements.getInsertTaskDefStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.selectWorkflowDefStatement = session.prepare(statements.getSelectWorkflowDefStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectAllWorkflowDefVersionsByNameStatement = session.prepare(statements.getSelectAllWorkflowDefVersionsByNameStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectAllWorkflowDefsStatement = session.prepare(statements.getSelectAllWorkflowDefsStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectAllWorkflowDefsLatestVersionsStatement = session.prepare(statements.getSelectAllWorkflowDefsLatestVersionsStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectTaskDefStatement = session.prepare(statements.getSelectTaskDefStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectAllTaskDefsStatement = session.prepare(statements.getSelectAllTaskDefsStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.updateWorkflowDefStatement = session.prepare(statements.getUpdateWorkflowDefStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteWorkflowDefStatement = session.prepare(statements.getDeleteWorkflowDefStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteWorkflowDefIndexStatement = session.prepare(statements.getDeleteWorkflowDefIndexStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteTaskDefStatement = session.prepare(statements.getDeleteTaskDefStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); } @Override public TaskDef createTaskDef(TaskDef taskDef) { return insertOrUpdateTaskDef(taskDef); } @Override public TaskDef updateTaskDef(TaskDef taskDef) { return insertOrUpdateTaskDef(taskDef); } @Override public TaskDef getTaskDef(String name) { return getTaskDefFromDB(name); } @Override public List<TaskDef> getAllTaskDefs() { return getAllTaskDefsFromDB(); } @Override public void removeTaskDef(String name) { try { recordCassandraDaoRequests("removeTaskDef"); session.execute(deleteTaskDefStatement.bind(name)); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeTaskDef"); String errorMsg = String.format("Failed to remove task definition: %s", name); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public void createWorkflowDef(WorkflowDef workflowDef) { try { String workflowDefinition = toJson(workflowDef); if (!session.execute( insertWorkflowDefStatement.bind( workflowDef.getName(), workflowDef.getVersion(), workflowDefinition)) .wasApplied()) { throw new ConflictException( "Workflow: %s, version: %s already exists!", workflowDef.getName(), workflowDef.getVersion()); } String workflowDefIndex = getWorkflowDefIndexValue(workflowDef.getName(), workflowDef.getVersion()); session.execute( insertWorkflowDefVersionIndexStatement.bind( workflowDefIndex, workflowDefIndex)); recordCassandraDaoRequests("createWorkflowDef"); recordCassandraDaoPayloadSize( "createWorkflowDef", workflowDefinition.length(), "n/a", workflowDef.getName()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "createWorkflowDef"); String errorMsg = String.format( "Error creating workflow definition: %s/%d", workflowDef.getName(), workflowDef.getVersion()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public void updateWorkflowDef(WorkflowDef workflowDef) { try { String workflowDefinition = toJson(workflowDef); session.execute( updateWorkflowDefStatement.bind( workflowDefinition, workflowDef.getName(), workflowDef.getVersion())); String workflowDefIndex = getWorkflowDefIndexValue(workflowDef.getName(), workflowDef.getVersion()); session.execute( insertWorkflowDefVersionIndexStatement.bind( workflowDefIndex, workflowDefIndex)); recordCassandraDaoRequests("updateWorkflowDef"); recordCassandraDaoPayloadSize( "updateWorkflowDef", workflowDefinition.length(), "n/a", workflowDef.getName()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "updateWorkflowDef"); String errorMsg = String.format( "Error updating workflow definition: %s/%d", workflowDef.getName(), workflowDef.getVersion()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public Optional<WorkflowDef> getLatestWorkflowDef(String name) { List<WorkflowDef> workflowDefList = getAllWorkflowDefVersions(name); if (workflowDefList != null && workflowDefList.size() > 0) { workflowDefList.sort(Comparator.comparingInt(WorkflowDef::getVersion)); return Optional.of(workflowDefList.get(workflowDefList.size() - 1)); } return Optional.empty(); } @Override public Optional<WorkflowDef> getWorkflowDef(String name, int version) { try { recordCassandraDaoRequests("getWorkflowDef"); ResultSet resultSet = session.execute(selectWorkflowDefStatement.bind(name, version)); WorkflowDef workflowDef = Optional.ofNullable(resultSet.one()) .map( row -> readValue( row.getString(WORKFLOW_DEFINITION_KEY), WorkflowDef.class)) .orElse(null); return Optional.ofNullable(workflowDef); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getTaskDef"); String errorMsg = String.format("Error fetching workflow def: %s/%d", name, version); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public void removeWorkflowDef(String name, Integer version) { try { session.execute(deleteWorkflowDefStatement.bind(name, version)); session.execute( deleteWorkflowDefIndexStatement.bind( WORKFLOW_DEF_INDEX_KEY, getWorkflowDefIndexValue(name, version))); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeWorkflowDef"); String errorMsg = String.format("Failed to remove workflow definition: %s/%d", name, version); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @SuppressWarnings("unchecked") @Override public List<WorkflowDef> getAllWorkflowDefs() { try { ResultSet resultSet = session.execute(selectAllWorkflowDefsStatement.bind(WORKFLOW_DEF_INDEX_KEY)); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("No workflow definitions were found."); return Collections.EMPTY_LIST; } return rows.stream() .map( row -> { String defNameVersion = row.getString(WORKFLOW_DEF_NAME_VERSION_KEY); var nameVersion = getWorkflowNameAndVersion(defNameVersion); return getWorkflowDef(nameVersion.getLeft(), nameVersion.getRight()) .orElse(null); }) .filter(Objects::nonNull) .collect(Collectors.toList()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getAllWorkflowDefs"); String errorMsg = "Error retrieving all workflow defs"; LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public List<WorkflowDef> getAllWorkflowDefsLatestVersions() { try { ResultSet resultSet = session.execute( selectAllWorkflowDefsLatestVersionsStatement.bind( WORKFLOW_DEF_INDEX_KEY)); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("No workflow definitions were found."); return Collections.EMPTY_LIST; } Map<String, PriorityQueue<WorkflowDef>> allWorkflowDefs = new HashMap<>(); for (Row row : rows) { String defNameVersion = row.getString(WORKFLOW_DEF_NAME_VERSION_KEY); var nameVersion = getWorkflowNameAndVersion(defNameVersion); WorkflowDef def = getWorkflowDef(nameVersion.getLeft(), nameVersion.getRight()).orElse(null); if (def == null) { continue; } if (allWorkflowDefs.get(def.getName()) == null) { allWorkflowDefs.put( def.getName(), new PriorityQueue<>( (WorkflowDef w1, WorkflowDef w2) -> Integer.compare(w2.getVersion(), w1.getVersion()))); } allWorkflowDefs.get(def.getName()).add(def); } return allWorkflowDefs.values().stream() .map(PriorityQueue::poll) .collect(Collectors.toList()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getAllWorkflowDefsLatestVersions"); String errorMsg = "Error retrieving all workflow defs latest versions"; LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } private TaskDef getTaskDefFromDB(String name) { try { ResultSet resultSet = session.execute(selectTaskDefStatement.bind(name)); recordCassandraDaoRequests("getTaskDef", name, null); return Optional.ofNullable(resultSet.one()).map(this::setDefaults).orElse(null); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getTaskDef"); String errorMsg = String.format("Failed to get task def: %s", name); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @SuppressWarnings("unchecked") private List<TaskDef> getAllTaskDefsFromDB() { try { ResultSet resultSet = session.execute(selectAllTaskDefsStatement.bind(TASK_DEFS_KEY)); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("No task definitions were found."); return Collections.EMPTY_LIST; } return rows.stream().map(this::setDefaults).collect(Collectors.toList()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getAllTaskDefs"); String errorMsg = "Failed to get all task defs"; LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } private List<WorkflowDef> getAllWorkflowDefVersions(String name) { try { ResultSet resultSet = session.execute(selectAllWorkflowDefVersionsByNameStatement.bind(name)); recordCassandraDaoRequests("getAllWorkflowDefVersions", "n/a", name); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("Not workflow definitions were found for : {}", name); return null; } return rows.stream() .map( row -> readValue( row.getString(WORKFLOW_DEFINITION_KEY), WorkflowDef.class)) .collect(Collectors.toList()); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getAllWorkflowDefVersions"); String errorMsg = String.format("Failed to get workflows defs for : %s", name); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } private TaskDef insertOrUpdateTaskDef(TaskDef taskDef) { try { String taskDefinition = toJson(taskDef); session.execute(insertTaskDefStatement.bind(taskDef.getName(), taskDefinition)); recordCassandraDaoRequests("storeTaskDef"); recordCassandraDaoPayloadSize( "storeTaskDef", taskDefinition.length(), taskDef.getName(), "n/a"); } catch (DriverException e) { Monitors.error(CLASS_NAME, "insertOrUpdateTaskDef"); String errorMsg = String.format("Error creating/updating task definition: %s", taskDef.getName()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } return taskDef; } @VisibleForTesting String getWorkflowDefIndexValue(String name, int version) { return name + INDEX_DELIMITER + version; } @VisibleForTesting ImmutablePair<String, Integer> getWorkflowNameAndVersion(String nameVersionStr) { int lastIndexOfDelimiter = nameVersionStr.lastIndexOf(INDEX_DELIMITER); if (lastIndexOfDelimiter == -1) { throw new IllegalStateException( nameVersionStr + " is not in the 'workflowName" + INDEX_DELIMITER + "version' pattern."); } String workflowName = nameVersionStr.substring(0, lastIndexOfDelimiter); String versionStr = nameVersionStr.substring(lastIndexOfDelimiter + 1); try { return new ImmutablePair<>(workflowName, Integer.parseInt(versionStr)); } catch (NumberFormatException e) { throw new IllegalStateException( versionStr + " in " + nameVersionStr + " is not a valid number."); } } private TaskDef setDefaults(Row row) { TaskDef taskDef = readValue(row.getString(TASK_DEFINITION_KEY), TaskDef.class); if (taskDef != null && taskDef.getResponseTimeoutSeconds() == 0) { taskDef.setResponseTimeoutSeconds( taskDef.getTimeoutSeconds() == 0 ? ONE_HOUR : taskDef.getTimeoutSeconds() - 1); } return taskDef; } }
7,045
0
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra
Create_ds/conductor/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraExecutionDAO.java
/* * Copyright 2022 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.cassandra.dao; import java.util.*; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.cassandra.config.CassandraProperties; import com.netflix.conductor.cassandra.util.Statements; import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.core.exception.NonTransientException; import com.netflix.conductor.core.exception.NotFoundException; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; import com.datastax.driver.core.*; import com.datastax.driver.core.exceptions.DriverException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import static com.netflix.conductor.cassandra.util.Constants.*; @Trace public class CassandraExecutionDAO extends CassandraBaseDAO implements ExecutionDAO, ConcurrentExecutionLimitDAO { private static final Logger LOGGER = LoggerFactory.getLogger(CassandraExecutionDAO.class); private static final String CLASS_NAME = CassandraExecutionDAO.class.getSimpleName(); protected final PreparedStatement insertWorkflowStatement; protected final PreparedStatement insertTaskStatement; protected final PreparedStatement insertEventExecutionStatement; protected final PreparedStatement selectTotalStatement; protected final PreparedStatement selectTaskStatement; protected final PreparedStatement selectWorkflowStatement; protected final PreparedStatement selectWorkflowWithTasksStatement; protected final PreparedStatement selectTaskLookupStatement; protected final PreparedStatement selectTasksFromTaskDefLimitStatement; protected final PreparedStatement selectEventExecutionsStatement; protected final PreparedStatement updateWorkflowStatement; protected final PreparedStatement updateTotalTasksStatement; protected final PreparedStatement updateTotalPartitionsStatement; protected final PreparedStatement updateTaskLookupStatement; protected final PreparedStatement updateTaskDefLimitStatement; protected final PreparedStatement updateEventExecutionStatement; protected final PreparedStatement deleteWorkflowStatement; protected final PreparedStatement deleteTaskStatement; protected final PreparedStatement deleteTaskLookupStatement; protected final PreparedStatement deleteTaskDefLimitStatement; protected final PreparedStatement deleteEventExecutionStatement; protected final int eventExecutionsTTL; public CassandraExecutionDAO( Session session, ObjectMapper objectMapper, CassandraProperties properties, Statements statements) { super(session, objectMapper, properties); eventExecutionsTTL = (int) properties.getEventExecutionPersistenceTtl().getSeconds(); this.insertWorkflowStatement = session.prepare(statements.getInsertWorkflowStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.insertTaskStatement = session.prepare(statements.getInsertTaskStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.insertEventExecutionStatement = session.prepare(statements.getInsertEventExecutionStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.selectTotalStatement = session.prepare(statements.getSelectTotalStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectTaskStatement = session.prepare(statements.getSelectTaskStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectWorkflowStatement = session.prepare(statements.getSelectWorkflowStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectWorkflowWithTasksStatement = session.prepare(statements.getSelectWorkflowWithTasksStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectTaskLookupStatement = session.prepare(statements.getSelectTaskFromLookupTableStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectTasksFromTaskDefLimitStatement = session.prepare(statements.getSelectTasksFromTaskDefLimitStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.selectEventExecutionsStatement = session.prepare( statements .getSelectAllEventExecutionsForMessageFromEventExecutionsStatement()) .setConsistencyLevel(properties.getReadConsistencyLevel()); this.updateWorkflowStatement = session.prepare(statements.getUpdateWorkflowStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.updateTotalTasksStatement = session.prepare(statements.getUpdateTotalTasksStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.updateTotalPartitionsStatement = session.prepare(statements.getUpdateTotalPartitionsStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.updateTaskLookupStatement = session.prepare(statements.getUpdateTaskLookupStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.updateTaskDefLimitStatement = session.prepare(statements.getUpdateTaskDefLimitStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.updateEventExecutionStatement = session.prepare(statements.getUpdateEventExecutionStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteWorkflowStatement = session.prepare(statements.getDeleteWorkflowStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteTaskStatement = session.prepare(statements.getDeleteTaskStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteTaskLookupStatement = session.prepare(statements.getDeleteTaskLookupStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteTaskDefLimitStatement = session.prepare(statements.getDeleteTaskDefLimitStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); this.deleteEventExecutionStatement = session.prepare(statements.getDeleteEventExecutionsStatement()) .setConsistencyLevel(properties.getWriteConsistencyLevel()); } @Override public List<TaskModel> getPendingTasksByWorkflow(String taskName, String workflowId) { List<TaskModel> tasks = getTasksForWorkflow(workflowId); return tasks.stream() .filter(task -> taskName.equals(task.getTaskType())) .filter(task -> TaskModel.Status.IN_PROGRESS.equals(task.getStatus())) .collect(Collectors.toList()); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<TaskModel> getTasks(String taskType, String startKey, int count) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * Inserts tasks into the Cassandra datastore. <b>Note:</b> Creates the task_id to workflow_id * mapping in the task_lookup table first. Once this succeeds, inserts the tasks into the * workflows table. Tasks belonging to the same shard are created using batch statements. * * @param tasks tasks to be created */ @Override public List<TaskModel> createTasks(List<TaskModel> tasks) { validateTasks(tasks); String workflowId = tasks.get(0).getWorkflowInstanceId(); UUID workflowUUID = toUUID(workflowId, "Invalid workflow id"); try { WorkflowMetadata workflowMetadata = getWorkflowMetadata(workflowId); int totalTasks = workflowMetadata.getTotalTasks() + tasks.size(); // TODO: write into multiple shards based on number of tasks // update the task_lookup table tasks.forEach( task -> { if (task.getScheduledTime() == 0) { task.setScheduledTime(System.currentTimeMillis()); } session.execute( updateTaskLookupStatement.bind( workflowUUID, toUUID(task.getTaskId(), "Invalid task id"))); }); // update all the tasks in the workflow using batch BatchStatement batchStatement = new BatchStatement(); tasks.forEach( task -> { String taskPayload = toJson(task); batchStatement.add( insertTaskStatement.bind( workflowUUID, DEFAULT_SHARD_ID, task.getTaskId(), taskPayload)); recordCassandraDaoRequests( "createTask", task.getTaskType(), task.getWorkflowType()); recordCassandraDaoPayloadSize( "createTask", taskPayload.length(), task.getTaskType(), task.getWorkflowType()); }); batchStatement.add( updateTotalTasksStatement.bind(totalTasks, workflowUUID, DEFAULT_SHARD_ID)); session.execute(batchStatement); // update the total tasks and partitions for the workflow session.execute( updateTotalPartitionsStatement.bind( DEFAULT_TOTAL_PARTITIONS, totalTasks, workflowUUID)); return tasks; } catch (DriverException e) { Monitors.error(CLASS_NAME, "createTasks"); String errorMsg = String.format( "Error creating %d tasks for workflow: %s", tasks.size(), workflowId); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public void updateTask(TaskModel task) { try { // TODO: calculate the shard number the task belongs to String taskPayload = toJson(task); recordCassandraDaoRequests("updateTask", task.getTaskType(), task.getWorkflowType()); recordCassandraDaoPayloadSize( "updateTask", taskPayload.length(), task.getTaskType(), task.getWorkflowType()); session.execute( insertTaskStatement.bind( UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID, task.getTaskId(), taskPayload)); if (task.getTaskDefinition().isPresent() && task.getTaskDefinition().get().concurrencyLimit() > 0) { if (task.getStatus().isTerminal()) { removeTaskFromLimit(task); } else if (task.getStatus() == TaskModel.Status.IN_PROGRESS) { addTaskToLimit(task); } } } catch (DriverException e) { Monitors.error(CLASS_NAME, "updateTask"); String errorMsg = String.format( "Error updating task: %s in workflow: %s", task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public boolean exceedsLimit(TaskModel task) { Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (taskDefinition.isEmpty()) { return false; } int limit = taskDefinition.get().concurrencyLimit(); if (limit <= 0) { return false; } try { recordCassandraDaoRequests( "selectTaskDefLimit", task.getTaskType(), task.getWorkflowType()); ResultSet resultSet = session.execute( selectTasksFromTaskDefLimitStatement.bind(task.getTaskDefName())); List<String> taskIds = resultSet.all().stream() .map(row -> row.getUUID(TASK_ID_KEY).toString()) .collect(Collectors.toList()); long current = taskIds.size(); if (!taskIds.contains(task.getTaskId()) && current >= limit) { LOGGER.info( "Task execution count limited. task - {}:{}, limit: {}, current: {}", task.getTaskId(), task.getTaskDefName(), limit, current); Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); return true; } } catch (DriverException e) { Monitors.error(CLASS_NAME, "exceedsLimit"); String errorMsg = String.format( "Failed to get in progress limit - %s:%s in workflow :%s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } return false; } @Override public boolean removeTask(String taskId) { TaskModel task = getTask(taskId); if (task == null) { LOGGER.warn("No such task found by id {}", taskId); return false; } return removeTask(task); } @Override public TaskModel getTask(String taskId) { try { String workflowId = lookupWorkflowIdFromTaskId(taskId); if (workflowId == null) { return null; } // TODO: implement for query against multiple shards ResultSet resultSet = session.execute( selectTaskStatement.bind( UUID.fromString(workflowId), DEFAULT_SHARD_ID, taskId)); return Optional.ofNullable(resultSet.one()) .map( row -> { String taskRow = row.getString(PAYLOAD_KEY); TaskModel task = readValue(taskRow, TaskModel.class); recordCassandraDaoRequests( "getTask", task.getTaskType(), task.getWorkflowType()); recordCassandraDaoPayloadSize( "getTask", taskRow.length(), task.getTaskType(), task.getWorkflowType()); return task; }) .orElse(null); } catch (DriverException e) { Monitors.error(CLASS_NAME, "getTask"); String errorMsg = String.format("Error getting task by id: %s", taskId); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @Override public List<TaskModel> getTasks(List<String> taskIds) { Preconditions.checkNotNull(taskIds); Preconditions.checkArgument(taskIds.size() > 0, "Task ids list cannot be empty"); String workflowId = lookupWorkflowIdFromTaskId(taskIds.get(0)); if (workflowId == null) { return null; } return getWorkflow(workflowId, true).getTasks().stream() .filter(task -> taskIds.contains(task.getTaskId())) .collect(Collectors.toList()); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<TaskModel> getPendingTasksForTaskType(String taskType) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } @Override public List<TaskModel> getTasksForWorkflow(String workflowId) { return getWorkflow(workflowId, true).getTasks(); } @Override public String createWorkflow(WorkflowModel workflow) { try { List<TaskModel> tasks = workflow.getTasks(); workflow.setTasks(new LinkedList<>()); String payload = toJson(workflow); recordCassandraDaoRequests("createWorkflow", "n/a", workflow.getWorkflowName()); recordCassandraDaoPayloadSize( "createWorkflow", payload.length(), "n/a", workflow.getWorkflowName()); session.execute( insertWorkflowStatement.bind( UUID.fromString(workflow.getWorkflowId()), 1, "", payload, 0, 1)); workflow.setTasks(tasks); return workflow.getWorkflowId(); } catch (DriverException e) { Monitors.error(CLASS_NAME, "createWorkflow"); String errorMsg = String.format("Error creating workflow: %s", workflow.getWorkflowId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public String updateWorkflow(WorkflowModel workflow) { try { List<TaskModel> tasks = workflow.getTasks(); workflow.setTasks(new LinkedList<>()); String payload = toJson(workflow); recordCassandraDaoRequests("updateWorkflow", "n/a", workflow.getWorkflowName()); recordCassandraDaoPayloadSize( "updateWorkflow", payload.length(), "n/a", workflow.getWorkflowName()); session.execute( updateWorkflowStatement.bind( payload, UUID.fromString(workflow.getWorkflowId()))); workflow.setTasks(tasks); return workflow.getWorkflowId(); } catch (DriverException e) { Monitors.error(CLASS_NAME, "updateWorkflow"); String errorMsg = String.format("Failed to update workflow: %s", workflow.getWorkflowId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @Override public boolean removeWorkflow(String workflowId) { WorkflowModel workflow = getWorkflow(workflowId, true); boolean removed = false; // TODO: calculate number of shards and iterate if (workflow != null) { try { recordCassandraDaoRequests("removeWorkflow", "n/a", workflow.getWorkflowName()); ResultSet resultSet = session.execute( deleteWorkflowStatement.bind( UUID.fromString(workflowId), DEFAULT_SHARD_ID)); removed = resultSet.wasApplied(); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeWorkflow"); String errorMsg = String.format("Failed to remove workflow: %s", workflowId); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } workflow.getTasks().forEach(this::removeTaskLookup); } return removed; } /** * This is a dummy implementation and this feature is not yet implemented for Cassandra backed * Conductor */ @Override public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { throw new UnsupportedOperationException( "This method is not currently implemented in CassandraExecutionDAO. Please use RedisDAO mode instead now for using TTLs."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public void removeFromPendingWorkflow(String workflowType, String workflowId) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } @Override public WorkflowModel getWorkflow(String workflowId) { return getWorkflow(workflowId, true); } @Override public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { UUID workflowUUID = toUUID(workflowId, "Invalid workflow id"); try { WorkflowModel workflow = null; ResultSet resultSet; if (includeTasks) { resultSet = session.execute( selectWorkflowWithTasksStatement.bind( workflowUUID, DEFAULT_SHARD_ID)); List<TaskModel> tasks = new ArrayList<>(); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("Workflow {} not found in datastore", workflowId); return null; } for (Row row : rows) { String entityKey = row.getString(ENTITY_KEY); if (ENTITY_TYPE_WORKFLOW.equals(entityKey)) { workflow = readValue(row.getString(PAYLOAD_KEY), WorkflowModel.class); } else if (ENTITY_TYPE_TASK.equals(entityKey)) { TaskModel task = readValue(row.getString(PAYLOAD_KEY), TaskModel.class); tasks.add(task); } else { throw new NonTransientException( String.format( "Invalid row with entityKey: %s found in datastore for workflow: %s", entityKey, workflowId)); } } if (workflow != null) { recordCassandraDaoRequests("getWorkflow", "n/a", workflow.getWorkflowName()); tasks.sort(Comparator.comparingInt(TaskModel::getSeq)); workflow.setTasks(tasks); } } else { resultSet = session.execute(selectWorkflowStatement.bind(workflowUUID)); workflow = Optional.ofNullable(resultSet.one()) .map( row -> { WorkflowModel wf = readValue( row.getString(PAYLOAD_KEY), WorkflowModel.class); recordCassandraDaoRequests( "getWorkflow", "n/a", wf.getWorkflowName()); return wf; }) .orElse(null); } return workflow; } catch (DriverException e) { Monitors.error(CLASS_NAME, "getWorkflow"); String errorMsg = String.format("Failed to get workflow: %s", workflowId); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<String> getRunningWorkflowIds(String workflowName, int version) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public long getPendingWorkflowCount(String workflowName) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public long getInProgressTaskCount(String taskDefName) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<WorkflowModel> getWorkflowsByType( String workflowName, Long startTime, Long endTime) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } /** * This is a dummy implementation and this feature is not implemented for Cassandra backed * Conductor */ @Override public List<WorkflowModel> getWorkflowsByCorrelationId( String workflowName, String correlationId, boolean includeTasks) { throw new UnsupportedOperationException( "This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead."); } @Override public boolean canSearchAcrossWorkflows() { return false; } @Override public boolean addEventExecution(EventExecution eventExecution) { try { String jsonPayload = toJson(eventExecution); recordCassandraDaoEventRequests("addEventExecution", eventExecution.getEvent()); recordCassandraDaoPayloadSize( "addEventExecution", jsonPayload.length(), eventExecution.getEvent(), "n/a"); return session.execute( insertEventExecutionStatement.bind( eventExecution.getMessageId(), eventExecution.getName(), eventExecution.getId(), jsonPayload)) .wasApplied(); } catch (DriverException e) { Monitors.error(CLASS_NAME, "addEventExecution"); String errorMsg = String.format( "Failed to add event execution for event: %s, handler: %s", eventExecution.getEvent(), eventExecution.getName()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @Override public void updateEventExecution(EventExecution eventExecution) { try { String jsonPayload = toJson(eventExecution); recordCassandraDaoEventRequests("updateEventExecution", eventExecution.getEvent()); recordCassandraDaoPayloadSize( "updateEventExecution", jsonPayload.length(), eventExecution.getEvent(), "n/a"); session.execute( updateEventExecutionStatement.bind( eventExecutionsTTL, jsonPayload, eventExecution.getMessageId(), eventExecution.getName(), eventExecution.getId())); } catch (DriverException e) { Monitors.error(CLASS_NAME, "updateEventExecution"); String errorMsg = String.format( "Failed to update event execution for event: %s, handler: %s", eventExecution.getEvent(), eventExecution.getName()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @Override public void removeEventExecution(EventExecution eventExecution) { try { recordCassandraDaoEventRequests("removeEventExecution", eventExecution.getEvent()); session.execute( deleteEventExecutionStatement.bind( eventExecution.getMessageId(), eventExecution.getName(), eventExecution.getId())); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeEventExecution"); String errorMsg = String.format( "Failed to remove event execution for event: %s, handler: %s", eventExecution.getEvent(), eventExecution.getName()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @VisibleForTesting List<EventExecution> getEventExecutions( String eventHandlerName, String eventName, String messageId) { try { return session .execute(selectEventExecutionsStatement.bind(messageId, eventHandlerName)) .all() .stream() .filter(row -> !row.isNull(PAYLOAD_KEY)) .map(row -> readValue(row.getString(PAYLOAD_KEY), EventExecution.class)) .collect(Collectors.toList()); } catch (DriverException e) { String errorMsg = String.format( "Failed to fetch event executions for event: %s, handler: %s", eventName, eventHandlerName); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @Override public void addTaskToLimit(TaskModel task) { try { recordCassandraDaoRequests( "addTaskToLimit", task.getTaskType(), task.getWorkflowType()); session.execute( updateTaskDefLimitStatement.bind( UUID.fromString(task.getWorkflowInstanceId()), task.getTaskDefName(), UUID.fromString(task.getTaskId()))); } catch (DriverException e) { Monitors.error(CLASS_NAME, "addTaskToLimit"); String errorMsg = String.format( "Error updating taskDefLimit for task - %s:%s in workflow: %s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } @Override public void removeTaskFromLimit(TaskModel task) { try { recordCassandraDaoRequests( "removeTaskFromLimit", task.getTaskType(), task.getWorkflowType()); session.execute( deleteTaskDefLimitStatement.bind( task.getTaskDefName(), UUID.fromString(task.getTaskId()))); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeTaskFromLimit"); String errorMsg = String.format( "Error updating taskDefLimit for task - %s:%s in workflow: %s", task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } protected boolean removeTask(TaskModel task) { // TODO: calculate shard number based on seq and maxTasksPerShard try { // get total tasks for this workflow WorkflowMetadata workflowMetadata = getWorkflowMetadata(task.getWorkflowInstanceId()); int totalTasks = workflowMetadata.getTotalTasks(); // remove from task_lookup table removeTaskLookup(task); recordCassandraDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType()); // delete task from workflows table and decrement total tasks by 1 BatchStatement batchStatement = new BatchStatement(); batchStatement.add( deleteTaskStatement.bind( UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID, task.getTaskId())); batchStatement.add( updateTotalTasksStatement.bind( totalTasks - 1, UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID)); ResultSet resultSet = session.execute(batchStatement); if (task.getTaskDefinition().isPresent() && task.getTaskDefinition().get().concurrencyLimit() > 0) { removeTaskFromLimit(task); } return resultSet.wasApplied(); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeTask"); String errorMsg = String.format("Failed to remove task: %s", task.getTaskId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } protected void removeTaskLookup(TaskModel task) { try { recordCassandraDaoRequests( "removeTaskLookup", task.getTaskType(), task.getWorkflowType()); if (task.getTaskDefinition().isPresent() && task.getTaskDefinition().get().concurrencyLimit() > 0) { removeTaskFromLimit(task); } session.execute(deleteTaskLookupStatement.bind(UUID.fromString(task.getTaskId()))); } catch (DriverException e) { Monitors.error(CLASS_NAME, "removeTaskLookup"); String errorMsg = String.format("Failed to remove task lookup: %s", task.getTaskId()); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg); } } @VisibleForTesting void validateTasks(List<TaskModel> tasks) { Preconditions.checkNotNull(tasks, "Tasks object cannot be null"); Preconditions.checkArgument(!tasks.isEmpty(), "Tasks object cannot be empty"); tasks.forEach( task -> { Preconditions.checkNotNull(task, "task object cannot be null"); Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); Preconditions.checkNotNull( task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); Preconditions.checkNotNull( task.getReferenceTaskName(), "Task reference name cannot be null"); }); String workflowId = tasks.get(0).getWorkflowInstanceId(); Optional<TaskModel> optionalTask = tasks.stream() .filter(task -> !workflowId.equals(task.getWorkflowInstanceId())) .findAny(); if (optionalTask.isPresent()) { throw new NonTransientException( "Tasks of multiple workflows cannot be created/updated simultaneously"); } } @VisibleForTesting WorkflowMetadata getWorkflowMetadata(String workflowId) { ResultSet resultSet = session.execute(selectTotalStatement.bind(UUID.fromString(workflowId))); recordCassandraDaoRequests("getWorkflowMetadata"); return Optional.ofNullable(resultSet.one()) .map( row -> { WorkflowMetadata workflowMetadata = new WorkflowMetadata(); workflowMetadata.setTotalTasks(row.getInt(TOTAL_TASKS_KEY)); workflowMetadata.setTotalPartitions(row.getInt(TOTAL_PARTITIONS_KEY)); return workflowMetadata; }) .orElseThrow( () -> new NotFoundException( "Workflow with id: %s not found in data store", workflowId)); } @VisibleForTesting String lookupWorkflowIdFromTaskId(String taskId) { UUID taskUUID = toUUID(taskId, "Invalid task id"); try { ResultSet resultSet = session.execute(selectTaskLookupStatement.bind(taskUUID)); return Optional.ofNullable(resultSet.one()) .map(row -> row.getUUID(WORKFLOW_ID_KEY).toString()) .orElse(null); } catch (DriverException e) { Monitors.error(CLASS_NAME, "lookupWorkflowIdFromTaskId"); String errorMsg = String.format("Failed to lookup workflowId from taskId: %s", taskId); LOGGER.error(errorMsg, e); throw new TransientException(errorMsg, e); } } }
7,046
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/pig/ScorePMML_AuditTest.java
package org.surus.pig; import static org.junit.Assert.*; import java.io.IOException; import java.util.ArrayList; import java.util.List; import javax.xml.bind.JAXBException; import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.impl.logicalLayer.FrontendException; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.junit.*; import org.surus.pig.ScorePMML; import org.xml.sax.SAXException; public class ScorePMML_AuditTest { // Audit Models private String ensembleAuditModelPath = "./resources/examples/models/ensemble_audit_dectree.xml"; // Tuple Factory private TupleFactory tf = TupleFactory.getInstance(); // -------------------------- // Audit Test Functions // -------------------------- @Test public void ensembleScoringTest_Audit_1() throws IOException, SAXException, JAXBException { Schema inputSchema = buildAuditInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildAuditInputEvent(1038288L,45,"Private","Bachelor","Married","Repair",27743.82,"Male",0,55,"UnitedStates",7298,1); // Visit 1, Output expected = this.buildAuditOutputEvent(1038288L,45,"Private","Bachelor","Married","Repair",27743.82,"Male",0,55,"UnitedStates",7298,1,"0"); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.ensembleAuditModelPath); Schema outputScheam = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("ensembleScoringTest_Audit_1: PASS"); } else { System.out.println("---------- EPIC FAIL: ensembleScoringTest_Audit_1 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } // -------------------------- // Audit Helper Functions // -------------------------- private Schema buildAuditInputSchema() throws FrontendException { // Build Field Schema List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); fieldSchemas.add(new Schema.FieldSchema("id" , DataType.LONG)); fieldSchemas.add(new Schema.FieldSchema("age" , DataType.INTEGER)); fieldSchemas.add(new Schema.FieldSchema("employment" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("education" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("marital" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("occupation" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("income" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("gender" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("deductions" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("hours" , DataType.INTEGER)); fieldSchemas.add(new Schema.FieldSchema("ignore_accounts", DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("risk_adjustment", DataType.INTEGER)); fieldSchemas.add(new Schema.FieldSchema("target_adjusted", DataType.INTEGER)); return new Schema(fieldSchemas); } private Tuple buildAuditInputEvent( Long ID , Integer Age , String Employment , String Education , String Marital , String Occupation , Double Income , String Gender , Integer Deductions , Integer Hours , String IGNORE_Accounts , Integer RISK_Adjustment , Integer TARGET_Adjusted) { Tuple newTuple = tf.newTuple(); newTuple.append(ID ); newTuple.append(Age ); newTuple.append(Employment ); newTuple.append(Education ); newTuple.append(Marital ); newTuple.append(Occupation ); newTuple.append(Income ); newTuple.append(Gender ); newTuple.append(Deductions ); newTuple.append(Hours ); newTuple.append(IGNORE_Accounts); newTuple.append(RISK_Adjustment); newTuple.append(TARGET_Adjusted); return newTuple; } private Tuple buildAuditOutputEvent( Long ID , Integer Age , String Employment , String Education , String Marital , String Occupation , Double Income , String Gender , Integer Deductions , Integer Hours , String IGNORE_Accounts , Integer RISK_Adjustment , Integer TARGET_Adjusted , String TARGET_Adjusted_predicted) { Tuple newTuple = tf.newTuple(); newTuple.append(TARGET_Adjusted_predicted); return newTuple; } }
7,047
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/pig/ScorePMML_ElNinoTest.java
package org.surus.pig; import static org.junit.Assert.*; import java.io.IOException; import java.util.ArrayList; import java.util.List; import javax.xml.bind.JAXBException; import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.impl.logicalLayer.FrontendException; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.junit.*; import org.surus.pig.ScorePMML; import org.xml.sax.SAXException; public class ScorePMML_ElNinoTest { // ElNino Models private String regressionElNinoModelPath = "./resources/examples/models/elnino_linearReg.xml"; private TupleFactory tf = TupleFactory.getInstance(); // -------------------------- // ElNino Test Functions // -------------------------- @Test public void regressionScoringTest_ElNino_1() throws IOException, SAXException, JAXBException { Schema inputSchema = buildElNinoInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildElNinoInputEvent("1","1","1","8.96","-140.32","-6.3","-6.4","83.5","27.32","27.57"); // Visit 1, Output expected = this.buildElNinoOutputEvent("1","1","1","8.96","-140.32","-6.3","-6.4","83.5","27.32","27.57",27.384241597858438); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.regressionElNinoModelPath); Schema outputSchema = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("regressionScoringTest_ElNino_1: PASS"); } else { System.out.println("---------- EPIC FAIL: regressionScoringTest_ElNino_1 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } // -------------------------- // El Nino Helper Functions // -------------------------- private Schema buildElNinoInputSchema() throws FrontendException { // Build Field Schema List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); fieldSchemas.add(new Schema.FieldSchema("buoy_day_ID", DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("buoy" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("day" , DataType.CHARARRAY)); fieldSchemas.add(new Schema.FieldSchema("latitude" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("longitude" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("zon_winds" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("mer_winds" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("humidity" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("airtemp" , DataType.DOUBLE )); fieldSchemas.add(new Schema.FieldSchema("s_s_temp" , DataType.DOUBLE )); return new Schema(fieldSchemas); } private Tuple buildElNinoInputEvent( String buoy_day_ID, String buoy, String day, String latitude, String longitude, String zon_winds, String mer_winds, String humidity, String airtemp, String s_s_temp) { Tuple newTuple = tf.newTuple(); newTuple.append(buoy_day_ID); newTuple.append(buoy ); newTuple.append(day ); newTuple.append(latitude ); newTuple.append(longitude ); newTuple.append(zon_winds ); newTuple.append(mer_winds ); newTuple.append(humidity ); newTuple.append(airtemp ); newTuple.append(s_s_temp ); return newTuple; } private Tuple buildElNinoOutputEvent( String buoy_day_ID, String buoy, String day, String latitude, String longitude, String zon_winds, String mer_winds, String humidity, String airtemp, String s_s_temp, double airtemp_predicted) { Tuple newTuple = tf.newTuple(); newTuple.append(airtemp_predicted); return newTuple; } }
7,048
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/pig/RAD_Test.java
package org.surus.pig; import static org.junit.Assert.*; import org.junit.Test; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.pig.backend.executionengine.ExecException; import org.apache.pig.data.BagFactory; import org.apache.pig.data.DataBag; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.data.DataType; import org.apache.pig.impl.logicalLayer.FrontendException; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.surus.pig.RAD; public class RAD_Test { private TupleFactory tf = TupleFactory.getInstance(); private BagFactory bf = BagFactory.getInstance(); private static final String[] argsDaily8 = new String[]{"metric","8","7"}; private static final String[] argsDaily9 = new String[]{"metric","9","7","False"}; @Test public void testNormal() throws Exception { System.out.println("testNormal"); double[] ts = new double[] {2.05407309078346,2.85886923211884,2.89728554463089,0.790480493540229,0.548595335194215,1.31367506547418,1.74407133897301,4.06071962679526,2.75651081738515,0.604658754735038,0.182607837501951,-1.262201503678,0.996560864201235,2.74637817075616,0.775004762296101,0.906823901472144,2.6839457174704,-0.0625841462071901,-1.09641353766956,0.00479165991036998,0.449351175604642,3.53152043857777,1.05206417605014,2.7864942275709,-0.691007430091048,-1.02038488026721,-1.35124486835257,0.0621976297222073,2.82421545538541,2.41312411015615,1.27711183784622,0.0988204592711682,1.50691474460298,0.272037685359444,1.9889742629239,3.33907184622517,3.68134545243902,0.751559686193563,0.679120355399832,0.428056866405207,0.351341204822829,1.33498418531095,3.04169869243666,1.22542459625713,1.35457091793328,0.567124649501233,-1.95560538335988,-1.09014280752067,1.80062291606412,0.588637569785287,1.89212604693897,1.38386740607786,0.356716316822486,-2.07161693692556,4,1.44451323393473,3.52551739267569,3.16481926426412,1.83839333727511,0.827646664705546,0.654351159135431,-0.00892931340717523,0.678082675364184}; double[] E_r = new double[] {0.3318797478729918,1.373638963651734,1.5863429313355741,-0.13690908975775629,-0.17341746498876717,0.45656608096044515,0.5029180391592517,1.6864361103335357,0.9041099905770569,-0.8601945846628597,-0.43797424973196464,-1.4306784687160095,0.5305755112030833,1.4332243957418884,-0.9225543720714464,-0.48968272112395295,1.2969905519062221,-0.936011207027195,-1.6967451093902703,-0.7685900450169054,-0.7342364348556424,1.1239395771496394,-0.7346252973511546,1.2214527991637296,-1.2219568417836726,-0.9997034788017629,-1.6861131664061504,-1.1927477447840469,1.0418155557468505,0.8807625994533953,-0.03357751903633732,-0.8118290678921689,0.8108046850909548,-0.5663706526498646,0.7314788056938822,1.2544903710465884,1.9742891069463693,-0.6254827173189841,-0.09333463299772303,-0.020202726976659584,-0.3118013823711032,0.04079223440640133,0.7231970417612443,-0.5343365497940273,-0.1640519436117994,-0.026079552263280893,-2.0141760086038945,-1.509009390294657,0.5384928439241734,-0.7732362655677173,0.6211082673104158,0.1455859735298013,-0.7302821616706046,-2.014175981890958,2.014175973413418,0.2514450496241806,1.4414575166495622,1.4769526331968026,0.44081750801343844,0.07149456117262622,0.24164508024661888,-0.6475184991073684,-0.6022063271601131}; double[] S_r = new double[] {0.0,0.0,0.0,-0.0,-0.0,0.0,0.0,0.0,0.0,-0.0,-0.0,-0.0,0.0,0.0,-0.0,-0.0,0.0,-0.0,-0.0,-0.0,-0.0,0.0,-0.0,0.0,-0.0,-0.0,-0.0,-0.0,0.0,0.0,-0.0,-0.0,0.0,-0.0,0.0,0.0,0.0,-0.0,-0.0,-0.0,-0.0,0.0,0.0,-0.0,-0.0,-0.0,-0.040615044404649886,-0.0,0.0,-0.0,0.0,0.0,-0.0,-1.0637357541633508,0.9275030757193699,0.0,0.0,0.0,0.0,0.0,0.0,-0.0,-0.0}; double[] L_r = new double[] {1.7221933429104683,1.4852302684671057,1.3109426132953157,0.9273895832979853,0.7220128001829822,0.8571089845137347,1.2411532998137584,2.3742835164617246,1.852400826808093,1.4648533393978977,0.6205820872339156,0.16847696503800913,0.4659853529981517,1.3131537750142714,1.6975591343675474,1.396506622596097,1.3869551655641779,0.8734270608200048,0.6003315717207102,0.7733817049272755,1.1835876104602845,2.407580861428131,1.7866894734012946,1.5650414284071705,0.5309494116926246,-0.020681401465446836,0.3348682980535801,1.2549453745062544,1.7823998996385595,1.5323615107027546,1.3106893568825573,0.910649527163337,0.6961100595120252,0.8384083380093086,1.2574954572300179,2.084581475178582,1.7070563454926502,1.377042403512547,0.772454988397555,0.4482595933818666,0.6631425871939323,1.2941919509045487,2.3185016506754152,1.7597611460511573,1.5186228615450794,0.5932042017645138,0.09918566964866415,0.41886658277398703,1.2621300721399467,1.3618738353530044,1.2710177796285542,1.2382814325480587,1.0869984784930906,1.0062947991287492,1.058320950867212,1.1930681843105493,2.0840598760261275,1.6878666310673174,1.3975758292616716,0.7561521035329198,0.41270607888881206,0.6385891857001931,1.280289002524297}; // Input/Output Bag // Finally wrap inputBag in tuple Tuple inputTuple = tf.newTuple(); DataBag expected = bf.newDefaultBag(); { // Build Input/Output inputTuple.append(buildDataBag(ts)); expected = buildDataBag(ts,L_r,S_r,E_r,false); } // Initialize Class RAD rsvd = new RAD(argsDaily9); Schema outputSchema = rsvd.outputSchema(buildInputSchema2()); DataBag observed = rsvd.exec(inputTuple); // Test if (approximateCompareBags(expected,observed)) { System.out.println("PASS"); } else { System.out.println("------- EPIC FAIL --------"); System.out.println("Expected: "+expected.toString()); System.out.println("Observed: "+observed.toString()); } assertTrue(approximateCompareBags(expected,observed)); } private Boolean approximateCompareBags(DataBag inputBag1, DataBag inputBag2) throws ExecException { // Hardcode Acceptable Error double errorLimit = 0.0000001; Iterator<Tuple> iter1 = inputBag1.iterator(); Iterator<Tuple> iter2 = inputBag2.iterator(); while (iter1.hasNext()) { Tuple tuple1 = iter1.next(); Tuple tuple2 = iter2.next(); // Check error if (Math.abs((Double) tuple1.get(0) - (Double) tuple2.get(0)) > errorLimit) return false; // TODO: Add unit test for differenced case //if (Math.abs((Double) tuple1.get(1) - (Double) tuple2.get(1)) > errorLimit) return false; if (Math.abs((Double) tuple1.get(2) - (Double) tuple2.get(2)) > errorLimit) return false; if (Math.abs((Double) tuple1.get(3) - (Double) tuple2.get(3)) > errorLimit) return false; if (Math.abs((Double) tuple1.get(4) - (Double) tuple2.get(4)) > errorLimit) return false; } return true; } private DataBag buildDataBag(double[] obj1) { DataBag dataBag = bf.newDefaultBag(); for (int n=0; n<obj1.length; n++) { Tuple newTuple = tf.newTuple(); newTuple.append(obj1[n]); dataBag.add(newTuple); } return dataBag; } private DataBag buildDataBag(double[] obj1, double[] obj2, double[] obj3, double[] obj4, boolean isDifferencing) { DataBag dataBag = bf.newDefaultBag(); double previous = 0.0; for (int n=0; n<obj1.length; n++) { Tuple newTuple = tf.newTuple(); newTuple.append(obj1[n]); if (isDifferencing) { if (n == 0) { newTuple.append(0.0); } else { newTuple.append(obj1[n] - previous); } previous = obj1[n]; } else { newTuple.append(obj1[n]); } newTuple.append(obj2[n]); newTuple.append(obj3[n]); newTuple.append(obj4[n]); dataBag.add(newTuple); } return dataBag; } private Schema buildInputSchema2() { // Outer Tuple Schema List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); fieldSchemas.add(new Schema.FieldSchema("metric" , DataType.DOUBLE)); // Wrap Inner DataBag FieldSchema innerTupleFieldSchema = null; try { innerTupleFieldSchema = new FieldSchema(null, new Schema(fieldSchemas), DataType.TUPLE); } catch (FrontendException e) { e.printStackTrace(); } // Outer Tuple Schema List<FieldSchema> fieldSchemaFinal = new ArrayList<FieldSchema>(); try { fieldSchemaFinal.add(new Schema.FieldSchema("dummy_bag", new Schema(innerTupleFieldSchema), DataType.BAG)); } catch (FrontendException e1) { e1.printStackTrace(); } // Return Schema Schema outputSchema = new Schema(fieldSchemaFinal); return outputSchema; } }
7,049
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/pig/ScorePMML_IrisTest.java
package org.surus.pig; import static org.junit.Assert.*; import java.io.IOException; import java.util.ArrayList; import java.util.List; import javax.xml.bind.JAXBException; import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.impl.logicalLayer.FrontendException; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.junit.*; import org.surus.pig.ScorePMML; import org.xml.sax.SAXException; public class ScorePMML_IrisTest { // Iris Models private String treeIrisModelPath = "./resources/examples/models/single_iris_dectree.xml"; private String nnIrisModelPath = "./resources/examples/models/single_iris_mlp.xml"; private String rfIrisModelPath = "./resources/examples/models/example.randomForest.xml"; private TupleFactory tf = TupleFactory.getInstance(); // -------------------------- // Iris Test Functions // -------------------------- @Test public void treeScoringTest_Iris_1() throws IOException, SAXException, JAXBException { // Build Input Schema Schema inputSchema = buildIrisInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildIrisInputEvent(5.1,3.5,1.4,0.2,"Iris-setosa"); // Visit 1, Output expected = this.buildIrisOutputEvent("Iris-setosa"); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.treeIrisModelPath); Schema outputScheam = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("treeScoringTest_Iris_1: PASS"); } else { System.out.println("---------- EPIC FAIL: treeScoringTest_Iris_1 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } @Test public void treeScoringTest_Iris_2() throws IOException, SAXException, JAXBException { // Build Input Schema Schema inputSchema = buildIrisInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildIrisInputEvent(5.9,3.2,4.8,1.8,"Iris-versicolor"); // Visit 1, Output expected = this.buildIrisOutputEvent("Iris-virginica"); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.treeIrisModelPath); Schema outputScheam = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("treeScoringTest_Iris_2: PASS"); } else { System.out.println("---------- EPIC FAIL: treeScoringTest_Iris_2 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } @Test public void nnScoringTest_Iris_1() throws IOException, SAXException, JAXBException { // Build Input Schema Schema inputSchema = buildIrisInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildIrisInputEvent(5.9,3.2,4.8,1.8,"Iris-versicolor"); // Visit 1, Output expected = this.buildIrisOutputEvent("Iris-versicolor"); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.nnIrisModelPath); Schema outputScheam = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("nnScoringTest_Iris_1: PASS"); } else { System.out.println("---------- EPIC FAIL: nnScoringTest_Iris_1 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } @Test public void rfScoringTest_Iris_1() throws IOException, SAXException, JAXBException { // Build Input Schema Schema inputSchema = buildIrisInputSchema(); // Input/Output Bag Tuple inputTuple = tf.newTuple(); Tuple expected = tf.newTuple(); { // Visit 1, Input: Implicit Signout inputTuple = this.buildIrisInputEvent(5.1,3.5,1.4,0.2,"setosa"); // Visit 1, Output expected = this.buildIrisOutputEvent("setosa","setosa",1.0,0.0,0.0); } // Initialize Class ScorePMML evalPMML = new ScorePMML(this.rfIrisModelPath); Schema outputSchema = evalPMML.outputSchema(inputSchema); Tuple observed = evalPMML.exec(inputTuple); // Test if (expected.equals(observed)) { System.out.println("rfScoringTest_Iris_1: PASS"); } else { System.out.println("---------- EPIC FAIL: rfScoringTest_Iris_1 ----------"); System.out.println("Expected: " + expected.toString()); System.out.println("Observed: " + observed.toString()); System.out.println("-------- END EPIC FAIL --------"); } assertEquals(expected,observed); } // -------------------------- // Iris Helper Functions // -------------------------- private Schema buildIrisInputSchema() throws FrontendException { // Build Field Schema List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); fieldSchemas.add(new Schema.FieldSchema("sepal_length" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("sepal_width" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("petal_length" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("petal_width" , DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("species" , DataType.CHARARRAY)); return new Schema(fieldSchemas); } private Tuple buildIrisInputEvent(double sepal_length, double sepal_width, double petal_length, double petal_width, String inputClass) { Tuple newTuple = tf.newTuple(); newTuple.append(sepal_length); newTuple.append(sepal_width); newTuple.append(petal_length); newTuple.append(petal_width); newTuple.append(inputClass); return newTuple; } private Tuple buildIrisOutputEvent(String predictedClass) { Tuple newTuple = tf.newTuple(); newTuple.append(predictedClass); return newTuple; } private Tuple buildIrisOutputEvent(String predictedClass, String outputField_Class, double predictedClass1, double predictedClass2, double predictedClass3) { Tuple newTuple = tf.newTuple(); newTuple.append(predictedClass); newTuple.append(outputField_Class); newTuple.append(predictedClass1); newTuple.append(predictedClass2); newTuple.append(predictedClass3); return newTuple; } }
7,050
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/math/AugmentedDickeyFuller_Test.java
package org.surus.math; import static org.junit.Assert.*; import java.util.Random; import org.junit.Test; public class AugmentedDickeyFuller_Test { @Test public void testLinearTrend() { Random rand = new Random(); double[] x = new double[100]; for (int i = 0; i < x.length; i ++) { x[i] = (i+1) + 5*rand.nextDouble(); } AugmentedDickeyFuller adf = new AugmentedDickeyFuller(x); assertTrue(adf.isNeedsDiff() == true); } @Test public void testLinearTrendWithOutlier() { Random rand = new Random(); double[] x = new double[100]; for (int i = 0; i < x.length; i ++) { x[i] = (i+1) + 5*rand.nextDouble(); } x[50] = 100; AugmentedDickeyFuller adf = new AugmentedDickeyFuller(x); assertTrue(adf.isNeedsDiff() == true); } }
7,051
0
Create_ds/Surus/src/test/java/org/surus
Create_ds/Surus/src/test/java/org/surus/math/RPCA_Test.java
package org.surus.math; import static org.junit.Assert.assertTrue; import org.junit.Test; public class RPCA_Test { public double[][] VectorToMatrix(double[] x, int rows, int cols) { double[][] input2DArray = new double[rows][cols]; for (int n= 0; n< x.length; n++) { int i = n % rows; int j = (int) Math.floor(n / rows); input2DArray[i][j] = x[n]; } return input2DArray; } public boolean MatrixApproximatelyEquals(double[][] X, double[][] Y, double epsilon) { boolean testOutput = true; int printCnt = 0; for (int j = 0; j < X[0].length; j++) { for (int i = 0; i < X.length; i++) { if (Math.abs(X[i][j] - Y[i][j]) > epsilon) { System.out.println("("+i+","+j+") Left: "+X[i][j] + " Right: "+Y[i][j]); printCnt++; testOutput = false; } } } return testOutput; } @Test public void testRSVD() { System.out.println("Running Test: testRSVD"); // X: double[] ts = new double[] {2.05407309078346,2.85886923211884,2.89728554463089,0.790480493540229,0.548595335194215,1.31367506547418,1.74407133897301,4.06071962679526,2.75651081738515,0.604658754735038,0.182607837501951,-1.262201503678,0.996560864201235,2.74637817075616,0.775004762296101,0.906823901472144,2.6839457174704,-0.0625841462071901,-1.09641353766956,0.00479165991036998,0.449351175604642,3.53152043857777,1.05206417605014,2.7864942275709,-0.691007430091048,-1.02038488026721,-1.35124486835257,0.0621976297222073,2.82421545538541,2.41312411015615,1.27711183784622,0.0988204592711682,1.50691474460298,0.272037685359444,1.9889742629239,3.33907184622517,3.68134545243902,0.751559686193563,0.679120355399832,0.428056866405207,0.351341204822829,1.33498418531095,3.04169869243666,1.22542459625713,1.35457091793328,0.567124649501233,-1.95560538335988,-1.09014280752067,1.80062291606412,0.588637569785287,1.89212604693897,1.38386740607786,0.356716316822486,-2.07161693692556,4,1.44451323393473,3.52551739267569,3.16481926426412,1.83839333727511,0.827646664705546,0.654351159135431,-0.00892931340717523,0.678082675364184}; double[][] X = VectorToMatrix(ts, 7, 9); // E, S, L: double[] E_r = new double[] {-0.0907627955303747,1.01938662397306,1.7153606207031,0.508734242238024,0.723048984114528,1.05744835689681,0.634974592796234,1.52144373899958,0.636387609902244,-0.816766677690375,-0.130107806055245,-0.998365425612053,0.744951709494425,1.46231154911581,-0.226797959197785,0.141398620170014,1.77717624827034,-0.160279457424966,-0.921736144683016,-0.0307375549137413,-0.0215231023010388,1.67109146682516,-0.344092782391524,1.68469787539411,-0.86328701822436,-0.670845951339157,-1.39451774017965,-0.799528103709266,0.889135246585203,0.737525584567534,0.216923473185421,-0.161161265909894,1.64839382264763,0.0264997493930041,0.980289570670967,1.0549440532891,1.71882828500543,-0.505750385484114,0.377784603637951,0.610288197122763,0.0752672973097475,0.15206212152394,1.19645607409238,-0.200471255130702,0.277569021928143,0.381376624759279,-1.64980949817604,-1.16998330599701,0.925239888962673,-0.656964349367174,0.843823792465116,0.689801114362373,0.200313968866586,-1.77717623601756,1.77717624450864,0.810454981413976,1.22657945137526,1.23085136920443,0.557077001335409,0.539281927359977,0.878791698921523,-0.2497479761408,-0.491748238542012}; double[] S_r = new double[] {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.318707767321735,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-0.173279535858878,2.09194305945145,0,0,0,0,0,0,0,0}; double[] L_r = new double[] {2.14483588631383,1.83948260814578,1.18192492392779,0.281746251302205,-0.174453648920312,0.256226708577366,1.10909674617677,2.53927588779568,2.12012320748291,1.42142543242541,0.312715643557195,-0.263836078065949,0.251609154706809,1.28406662164035,1.00180272149389,0.76542528130213,0.588061701878331,0.0976953112177755,-0.174677392986544,0.0355292148241113,0.470874277905681,1.86042897175261,1.39615695844167,1.10179635217679,0.172279588133313,-0.349538928928049,0.0432728718270843,0.861725733431473,1.93508020880021,1.67559852558861,1.0601883646608,0.259981725181062,-0.14147907804465,0.24553793596644,1.00868469225294,2.28412779293606,1.96251716743359,1.25731007167768,0.301335751761881,-0.182231330717556,0.276073907513082,1.18292206378701,1.84524261834428,1.42589585138783,1.07700189600514,0.185748024741955,-0.305795885183839,0.0798404984763351,0.875383027101449,1.24560191915246,1.04830225447385,0.694066291715486,0.1564023479559,-0.121161165049127,0.13088069603991,0.634058252520755,2.29893794130043,1.93396789505969,1.2813163359397,0.288364737345569,-0.224440539786092,0.240818662733625,1.1698309139062}; double[][] E_matrix_r = VectorToMatrix(E_r, 7, 9); double[][] S_matrix_r = VectorToMatrix(S_r, 7, 9); double[][] L_matrix_r = VectorToMatrix(L_r, 7, 9); RPCA rsvd = new RPCA(X, 1, 1.4/3); double[][] E = rsvd.getE().getData(); double[][] S = rsvd.getS().getData(); double[][] L = rsvd.getL().getData(); assertTrue(MatrixApproximatelyEquals(E_matrix_r, E, 0.0001)); assertTrue(MatrixApproximatelyEquals(S_matrix_r, S, 0.0001)); assertTrue(MatrixApproximatelyEquals(L_matrix_r, L, 0.0001)); } }
7,052
0
Create_ds/Surus/src/main/java/org/surus
Create_ds/Surus/src/main/java/org/surus/pig/ScorePMML.java
package org.surus.pig; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import javax.xml.bind.JAXBException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.pig.EvalFunc; import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.apache.pig.impl.util.UDFContext; import org.dmg.pmml.DataField; import org.dmg.pmml.FieldName; import org.dmg.pmml.IOUtil; import org.dmg.pmml.OutputField; import org.dmg.pmml.PMML; import org.jpmml.evaluator.Evaluator; import org.jpmml.evaluator.EvaluatorUtil; import org.jpmml.evaluator.FieldValue; import org.jpmml.evaluator.ModelEvaluatorFactory; import org.jpmml.manager.ModelManager; import org.jpmml.manager.PMMLManager; import org.xml.sax.SAXException; public class ScorePMML extends EvalFunc<Tuple> { private Evaluator evaluator = null; private List<FieldName> activeFields = null; private List<FieldName> predictedFields = null; private List<FieldName> outputFields = null; private String modelPath = null; private String modelName = null; private Schema inputTupleSchema = null; private Map<String,Integer> aliasMap = null; private Boolean failOnTypeMatching = true; Map<FieldName, FieldValue> preparedRow = new LinkedHashMap<FieldName, FieldValue>(); private static final TupleFactory tf = TupleFactory.getInstance(); private static final Map<String, Byte> dataTypeMap = new HashMap<String, Byte>(); static { dataTypeMap.put("STRING" , DataType.CHARARRAY); dataTypeMap.put("INTEGER", DataType.INTEGER); dataTypeMap.put("FLOAT" , DataType.DOUBLE); dataTypeMap.put("LONG" , DataType.DOUBLE); dataTypeMap.put("DOUBLE" , DataType.DOUBLE); dataTypeMap.put("BOOLEAN", DataType.DOUBLE); } // Constructor public ScorePMML(String... params) throws IOException, SAXException, JAXBException { // Call Nested Constructor this(params[0]); // Override default failure mode if (params.length == 2) { this.failOnTypeMatching = Boolean.parseBoolean(params[1]); } } // Constructor public ScorePMML(String modelPath) throws IOException, SAXException, JAXBException { // Set Default failure mode this.failOnTypeMatching = true; // Set Model Path this.modelPath = modelPath; System.err.println("modelPath: "+this.modelPath); // Set Distributed Cache int blah = this.modelPath.lastIndexOf("/") + 1; this.modelName = this.modelPath.substring(blah); System.err.println("modelName: "+this.modelName); } public List<String> getCacheFiles() { String filePath = this.modelPath+"#"+this.modelName; List<String> list = new ArrayList<String>(1); list.add(filePath); System.err.println(filePath+": added to the distributed cache."); return list; } private void initialize(Schema inputSchema) throws IOException, SAXException, JAXBException { this.inputTupleSchema = inputSchema; // and, initialize aliasMap: if (this.aliasMap == null) { this.aliasMap = new HashMap<String,Integer>(); for (String alias : this.inputTupleSchema.getAliases()) { this.aliasMap.put(alias,this.inputTupleSchema.getPosition(alias)); // something to cleanup } } // Get PMML Object PMML pmml = null; try { /* * TODO: Make this more robust. Specifically, Angela Ho wanted to refernce a file in the distributed * cache directly. Obviously, my code doesn't support this, because it would try to open * the file with the IOUtil Java object, as opposed to the hadoop.fs.Path object. * * TODO: This try/catch block is a hack for: * (1) checking if execution is being done on "back-end." A check for back-end can be done with * UDFContext.getUDFContext().isFrontend() BUT this does not resolve problems with local-mode. * (2) enables testing in local-mode without failing unit tests. */ // Try reading file from distributed cache. pmml = IOUtil.unmarshal(new File("./"+this.modelName)); System.err.println("Read model from distributed cache!"); } catch (Throwable t) { // If not on the back-end... (and distributed cache not available) ... if (this.modelPath.toLowerCase().startsWith("s3n://") || this.modelPath.toLowerCase().startsWith("s3://")) { // ... read from S3. Path path = new Path(this.modelPath); FileSystem fs = path.getFileSystem(new Configuration()); FSDataInputStream in = fs.open(path); pmml = IOUtil.unmarshal(in); System.err.println("Read model from s3!"); } else { // ... read from local file. pmml = IOUtil.unmarshal(new File(this.modelPath)); System.err.println("Read model from local disk!"); } } // Initialize the pmmlManager PMMLManager pmmlManager = new PMMLManager(pmml); // Initialize the PMML Model Manager ModelManager<?> modelManager = pmmlManager.getModelManager(null, ModelEvaluatorFactory.getInstance()); this.evaluator = (Evaluator)modelManager; // Model Evaluator this.activeFields = evaluator.getActiveFields(); // input columns this.predictedFields = evaluator.getPredictedFields(); // predicted columns this.outputFields = evaluator.getOutputFields(); // derived output columns (based on predicted columns) } // Define Output Schema @Override public Schema outputSchema(Schema input) { try { initialize(input); } catch (Throwable t) { throw new RuntimeException("Frontend: Unable to initialize PMML file: ",t); } // Define the output schema: try { // Define Input Tuple Schema this.inputTupleSchema = input; HashSet<String> aliases = new HashSet<String>(inputTupleSchema.getAliases()); Boolean isVerbose = false; for (FieldName activeField : this.activeFields) { // Check that all active fields are present in dataset: String activeFieldAlias = activeField.toString().toLowerCase(); if (!aliases.contains(activeFieldAlias)) { throw new RuntimeException("ERROR: "+activeFieldAlias+" is not in the input dataset!"); } // Check that all active fields have expected datatypes: Byte left = this.inputTupleSchema.getField(aliasMap.get(activeFieldAlias)).type; Byte right = dataTypeMap.get(this.evaluator.getDataField(activeField).getDataType().toString()); if (left != right) if (failOnTypeMatching) { throw new RuntimeException("ERROR: "+activeFieldAlias+" does not match expected type! (Expected: " +DataType.genTypeToNameMap().get(right)+" Observed: "+DataType.genTypeToNameMap().get(left)+")"); } else if (UDFContext.getUDFContext().isFrontend() && !isVerbose) { System.err.println("WARNING: active fields do not match expected type! Please run in strict mode to determine which fields are in violation"); isVerbose = true; // System.err.println("WARNING: "+activeFieldAlias+" does not match expected type! (Expected: " // +DataType.genTypeToNameMap().get(right)+" Observed: "+DataType.genTypeToNameMap().get(left)+")"); } } // Create List of Tuple Values List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); // Predicted Fields for (FieldName predictedField : this.predictedFields) { String predictedFieldAlias = "predictedField_" + predictedField.toString().toLowerCase(); // Create FieldName DataField dataField = this.evaluator.getDataField(predictedField); String dataType = dataField.getDataType().toString(); if (dataType == null) { throw new RuntimeException("Predicted Fields with unknown datatype are not supported! Column: "+predictedFieldAlias+", PMML DataType "+dataType+"."); } else if (!dataTypeMap.containsKey(dataType)) { throw new RuntimeException("Column: "+predictedFieldAlias+", PMML DataType "+dataType+" is not currently supported."); } else { fieldSchemas.add(new Schema.FieldSchema(predictedFieldAlias,dataTypeMap.get(dataType))); } } // Output Fields for (FieldName outputField : this.outputFields) { String outputFieldAlias = "outputField_" + outputField.toString().toLowerCase(); // Create FieldName OutputField dataField = this.evaluator.getOutputField(outputField); if (dataField.getDataType() == null) { fieldSchemas.add(new Schema.FieldSchema(outputFieldAlias,DataType.BYTEARRAY)); } else if (dataTypeMap.containsKey(dataField.getDataType().toString())) { fieldSchemas.add(new Schema.FieldSchema(outputFieldAlias,dataTypeMap.get(dataField.getDataType().toString()))); } else { throw new RuntimeException("Column: "+outputFieldAlias+", PMML DataType "+dataField.getDataType().toString()+" is not currently supported."); } } // Build Tuple and Wrap in DataBag FieldSchema tupleFieldSchema = new FieldSchema("EvalPMML", new Schema(fieldSchemas), DataType.TUPLE); // Return Schema Schema outputSchema = new Schema(tupleFieldSchema); return outputSchema; } catch (Throwable t) { System.err.println(t); throw new RuntimeException(t); } } // Define Exec @Override public Tuple exec(Tuple input) throws IOException { // check int dummy = 0; // Initialize Evaluator if null: if (this.evaluator == null) { try { System.out.println("Initializing: "+(dummy++)+" time"); Schema inputSchema = getInputSchema(); this.initialize(inputSchema); // something to check } catch (Throwable t) { throw new RuntimeException("Backend: Unable to initialize PMML file: ",t); } } // Initialize Output as Input Tuple outputTuple = tf.newTuple(this.predictedFields.size() + this.outputFields.size()); /* ************************ // BLOCK: Prepare Data ************************* */ for(FieldName inputField : this.activeFields){ // Get Object Object origBodyCell = (Object) input.get(aliasMap.get(inputField.getValue().toLowerCase())); Object bodyCell; if (origBodyCell instanceof Long) { bodyCell = ((Long) origBodyCell).doubleValue(); } else { bodyCell = origBodyCell; } // Prepare Object for Scoring this.preparedRow.put(inputField, EvaluatorUtil.prepare(this.evaluator, inputField, bodyCell)); // Prepare Object for Scoring // CC: Removed this b/c I think the "Long" check above resolves any issues. /* try { this.preparedRow.put(inputField, EvaluatorUtil.prepare(this.evaluator, inputField, bodyCell)); } catch (Throwable t) { System.err.println("Unable to prepare record, Trouble Parsing: " + inputField.toString() + " (value="+ bodyCell+")"); System.err.println(t); throw new RuntimeException(t); } */ } // Score Data Map<FieldName, ?> result = evaluator.evaluate(this.preparedRow); // Append Predicted Fields int i = 0; for(FieldName predictedField : this.predictedFields){ outputTuple.set(i++,EvaluatorUtil.decode(result.get(predictedField))); } for(FieldName outputField : this.outputFields){ outputTuple.set(i++,EvaluatorUtil.decode(result.get(outputField))); } // Return Tuple: return outputTuple; } }
7,053
0
Create_ds/Surus/src/main/java/org/surus
Create_ds/Surus/src/main/java/org/surus/pig/RAD.java
package org.surus.pig; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Iterator; import org.apache.pig.EvalFunc; import org.apache.pig.data.BagFactory; import org.apache.pig.data.DataBag; import org.apache.pig.data.DataType; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; import org.surus.math.AugmentedDickeyFuller; import org.surus.math.RPCA; public class RAD extends EvalFunc<DataBag> { private final double LPENALTY_DEFAULT_NO_DIFF = 1; private final double SPENALTY_DEFAULT_NO_DIFF = 1.4; private final double LPENALTY_DEFAULT_DIFF = 1; private final double SPENALTY_DEFAULT_DIFF = 1.4; private final String colName; private final Integer nRows; private final Integer nCols; private Double lpenalty; private Double spenalty; private Boolean isForceDiff; private Schema dataBagSchema; private final Integer minRecords; private final Double eps = 1e-12; // Constructor public RAD(String... parameters) { this.colName = parameters[0]; this.nCols = Integer.parseInt(parameters[1]); this.nRows = Integer.parseInt(parameters[2]); if (parameters.length == 4) { this.isForceDiff = Boolean.parseBoolean(parameters[3]); } else if (parameters.length != 3) { throw new RuntimeException("Invalid parameters list"); } // set other parameters this.minRecords = 2 * this.nRows; } // Define Output Schema @Override public Schema outputSchema(Schema input) { try { if (input.size() != 1) { throw new RuntimeException("Expected input to have only a single field"); } // Grab Bag Schema Schema.FieldSchema inputFieldSchema = input.getField(0); if (inputFieldSchema.type != DataType.BAG) { throw new RuntimeException("Expected a BAG as input"); } // Check Bag Schema Schema inputBagSchema = inputFieldSchema.schema; if (inputBagSchema.getField(0).type != DataType.TUPLE) { throw new RuntimeException(String.format("Expected input bag to contain a TUPLE, but instead found %s", DataType.findTypeName(inputBagSchema.getField(0).type))); } // Define Input Tuple Schema this.dataBagSchema = inputBagSchema.getField(0).schema; this.dataBagSchema.prettyPrint(); // Create List of Tuple Values List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>(); fieldSchemas.addAll(dataBagSchema.getFields()); fieldSchemas.add(new Schema.FieldSchema("x_transform", DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("rsvd_l", DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("rsvd_s", DataType.DOUBLE)); fieldSchemas.add(new Schema.FieldSchema("rsvd_e", DataType.DOUBLE)); // Build Tuple and Wrap in DataBag FieldSchema tupleFieldSchema = new FieldSchema(null, new Schema(fieldSchemas), DataType.TUPLE); FieldSchema bagFieldSchema = new FieldSchema(this.getClass().getName().toLowerCase().replace(".", "_"), new Schema(tupleFieldSchema), DataType.BAG); // Return Schema Schema outputSchema = new Schema(bagFieldSchema); return outputSchema; } catch (Throwable t) { throw new RuntimeException(t); } } // Helper Function public double[][] VectorToMatrix(double[] x, int rows, int cols) { double[][] input2DArray = new double[rows][cols]; for (int n= 0; n< x.length; n++) { int i = n % rows; int j = (int) Math.floor(n / rows); input2DArray[i][j] = x[n]; } return input2DArray; } // Define Exec @Override public DataBag exec(Tuple input) throws IOException { // Hack to get the InputSchema on the backend if (this.dataBagSchema == null) { this.dataBagSchema = getInputSchema().getField(0).schema.getField(0).schema; } // Check DataTypes if (!( (this.dataBagSchema.getField(this.colName).type == DataType.LONG ) || (this.dataBagSchema.getField(this.colName).type == DataType.INTEGER) || (this.dataBagSchema.getField(this.colName).type == DataType.DOUBLE ) || (this.dataBagSchema.getField(this.colName).type == DataType.FLOAT ) )) { throw new RuntimeException(String.format("Data type of %s (%s) is not supported,",this.colName, DataType.findTypeName(this.dataBagSchema.getField(this.colName).type))); } // Hardcode getting the bag DataBag inputBag = (DataBag) input.get(0); // Create TupleFactory for Output Bag Generation TupleFactory tupleFactory = TupleFactory.getInstance(); BagFactory bagFactory = BagFactory.getInstance(); // Read Data into Memory List<Tuple> tupleList = new ArrayList<Tuple>(); Iterator<Tuple> bagIter = inputBag.iterator(); while (bagIter.hasNext()) { Tuple tuple = bagIter.next(); tupleList.add(tuple); } if (tupleList.size() != this.nRows*this.nCols) { throw new RuntimeException("ERROR: this.nRows * this.nCols != tupleList.size()"); } // Perform Dickey-Fuller Test double[] inputArray = new double[this.nRows*this.nCols]; Integer numNonZeroRecords = 0; for (int n=0; n< inputArray.length; n++) { if (this.dataBagSchema.getField(this.colName).type == DataType.DOUBLE) { inputArray[n] = (Double) tupleList.get(n).get(this.dataBagSchema.getPosition(this.colName)); } else if (this.dataBagSchema.getField(this.colName).type == DataType.FLOAT) { inputArray[n] = (Float) tupleList.get(n).get(this.dataBagSchema.getPosition(this.colName)); } else if (this.dataBagSchema.getField(this.colName).type == DataType.LONG ) { inputArray[n] = (Long) tupleList.get(n).get(this.dataBagSchema.getPosition(this.colName)); } else if (this.dataBagSchema.getField(this.colName).type == DataType.INTEGER ) { inputArray[n] = (Integer) tupleList.get(n).get(this.dataBagSchema.getPosition(this.colName)); } else { throw new RuntimeException(String.format("Data type of %s (%s) is not supported,",this.colName, DataType.findTypeName(this.dataBagSchema.getField(this.colName).type))); } if (Math.abs(inputArray[n]) > eps) numNonZeroRecords++; } if (numNonZeroRecords>=this.minRecords) { AugmentedDickeyFuller dickeyFullerTest = new AugmentedDickeyFuller(inputArray); double[] inputArrayTransformed = inputArray; if (this.isForceDiff == null && dickeyFullerTest.isNeedsDiff()) { // Auto Diff inputArrayTransformed = dickeyFullerTest.getZeroPaddedDiff(); } else if (this.isForceDiff) { // Force Diff inputArrayTransformed = dickeyFullerTest.getZeroPaddedDiff(); } if (this.spenalty == null) { this.lpenalty = this.LPENALTY_DEFAULT_NO_DIFF; this.spenalty = this.SPENALTY_DEFAULT_NO_DIFF / Math.sqrt(Math.max(this.nCols, this.nRows)); } // Calc Mean double mean = 0; for (int n=0; n < inputArrayTransformed.length; n++) { mean += inputArrayTransformed[n]; } mean /= inputArrayTransformed.length; // Calc STDEV double stdev = 0; for (int n=0; n < inputArrayTransformed.length; n++) { stdev += Math.pow(inputArrayTransformed[n] - mean,2) ; } stdev = Math.sqrt(stdev / (inputArrayTransformed.length - 1)); // Transformation: Zero Mean, Unit Variance for (int n=0; n < inputArrayTransformed.length; n++) { inputArrayTransformed[n] = (inputArrayTransformed[n]-mean)/stdev; } // Read Input Data into Array // Read Input Data into Array double[][] input2DArray = new double[this.nRows][this.nCols]; input2DArray = VectorToMatrix(inputArrayTransformed, this.nRows, this.nCols); RPCA rSVD = new RPCA(input2DArray, this.lpenalty, this.spenalty); double[][] outputE = rSVD.getE().getData(); double[][] outputS = rSVD.getS().getData(); double[][] outputL = rSVD.getL().getData(); // Loop through bag and build output DataBag outputBag = bagFactory.newDefaultBag(); for (int n=0; n< inputArray.length; n++) { int i = n % this.nRows; int j = (int) Math.floor(n / this.nRows); // Add all previous tuple values Tuple oldTuple = tupleList.get(n); Tuple newTuple = tupleFactory.newTuple(oldTuple.size() + 4); int tupleIndex = 0; for (int k = 0; k < oldTuple.size(); k++) { newTuple.set(tupleIndex++, oldTuple.get(k)); } // TODO: Add additional L,S,E matrices newTuple.set(tupleIndex++, inputArrayTransformed[n]); newTuple.set(tupleIndex++, outputL[i][j] * stdev + mean); newTuple.set(tupleIndex++, outputS[i][j] * stdev); newTuple.set(tupleIndex++, outputE[i][j] * stdev); // Add Tuple to DataBag outputBag.add(newTuple); } // Return Tuple return outputBag; } else { // Loop through bag and build output DataBag outputBag = bagFactory.newDefaultBag(); for (int n=0; n< inputArray.length; n++) { int i = n % this.nRows; int j = (int) Math.floor(n / this.nRows); // Add all previous tuple values Tuple oldTuple = tupleList.get(n); Tuple newTuple = tupleFactory.newTuple(oldTuple.size() + 4); int tupleIndex = 0; for (int k = 0; k < oldTuple.size(); k++) { newTuple.set(tupleIndex++, oldTuple.get(k)); } // Add Tuple to DataBag outputBag.add(newTuple); } // Return Tuple return outputBag; } } }
7,054
0
Create_ds/Surus/src/main/java/org/surus
Create_ds/Surus/src/main/java/org/surus/math/RPCA.java
package org.surus.math; import org.apache.commons.math3.linear.MatrixUtils; import org.apache.commons.math3.linear.RealMatrix; import org.apache.commons.math3.linear.SingularValueDecomposition; import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; public class RPCA { private RealMatrix X; private RealMatrix L; private RealMatrix S; private RealMatrix E; private double lpenalty; private double spenalty; private static final int MAX_ITERS = 228; public RPCA(double[][] data, double lpenalty, double spenalty) { this.X = MatrixUtils.createRealMatrix(data); this.lpenalty = lpenalty; this.spenalty = spenalty; initMatrices(); computeRSVD(); } public RPCA(RealMatrix X, double lpenalty, double spenalty) { this.X = X; this.lpenalty = lpenalty; this.spenalty = spenalty; initMatrices(); computeRSVD(); } private void initMatrices() { this.L = MatrixUtils.createRealMatrix(this.X.getRowDimension(), this.X.getColumnDimension()); this.S = MatrixUtils.createRealMatrix(this.X.getRowDimension(), this.X.getColumnDimension()); this.E = MatrixUtils.createRealMatrix(this.X.getRowDimension(), this.X.getColumnDimension()); } private void computeRSVD() { double mu = X.getColumnDimension() * X.getRowDimension() / (4 * l1norm(X.getData())); double objPrev = 0.5*Math.pow(X.getFrobeniusNorm(), 2); double obj = objPrev; double tol = 1e-8 * objPrev; double diff = 2 * tol; int iter = 0; while(diff > tol && iter < MAX_ITERS) { double nuclearNorm = computeS(mu); double l1Norm = computeL(mu); double l2Norm = computeE(); obj = computeObjective(nuclearNorm, l1Norm, l2Norm); diff = Math.abs(objPrev - obj); objPrev = obj; mu = computeDynamicMu(); iter = iter + 1; } } private double[] softThreshold(double[] x, double penalty) { for(int i = 0; i < x.length; i++) { x[i] = Math.signum(x[i]) * Math.max(Math.abs(x[i]) - penalty, 0); } return x; } private double[][] softThreshold(double[][] x, double penalty) { for(int i = 0; i < x.length; i++) { for(int j = 0; j < x[i].length; j++) { x[i][j] = Math.signum(x[i][j]) * Math.max(Math.abs(x[i][j]) - penalty, 0); } } return x; } private double sum(double[] x) { double sum = 0; for (int i = 0; i < x.length; i++) sum += x[i]; return (sum); } private double l1norm(double[][] x) { double l1norm = 0; for (int i = 0; i < x.length; i++) { for (int j = 0; j < x[i].length; j++) { l1norm += Math.abs(x[i][j]); } } return l1norm; } private double computeL(double mu) { double LPenalty = lpenalty * mu; SingularValueDecomposition svd = new SingularValueDecomposition(X.subtract(S)); double[] penalizedD = softThreshold(svd.getSingularValues(), LPenalty); RealMatrix D_matrix = MatrixUtils.createRealDiagonalMatrix(penalizedD); L = svd.getU().multiply(D_matrix).multiply(svd.getVT()); return sum(penalizedD) * LPenalty; } private double computeS(double mu) { double SPenalty = spenalty * mu; double[][] penalizedS = softThreshold(X.subtract(L).getData(), SPenalty); S = MatrixUtils.createRealMatrix(penalizedS); return l1norm(penalizedS) * SPenalty; } private double computeE() { E = X.subtract(L).subtract(S); double norm = E.getFrobeniusNorm(); return Math.pow(norm, 2); } private double computeObjective(double nuclearnorm, double l1norm, double l2norm) { return 0.5*l2norm + nuclearnorm + l1norm; } private double computeDynamicMu() { int m = E.getRowDimension(); int n = E.getColumnDimension(); double E_sd = standardDeviation(E.getData()); double mu = E_sd * Math.sqrt(2*Math.max(m,n)); return Math.max(.01, mu); } /*private double MedianAbsoluteDeviation(double[][] x) { DescriptiveStatistics stats = new DescriptiveStatistics(); for (int i = 0; i < x.length; i ++) for (int j = 0; j < x[i].length; j++) stats.addValue(x[i][j]); double median = stats.getPercentile(50); DescriptiveStatistics absoluteDeviationStats = new DescriptiveStatistics(); for (int i = 0; i < x.length; i ++) for (int j = 0; j < x[i].length; j++) absoluteDeviationStats.addValue(Math.abs(x[i][j] - median)); return absoluteDeviationStats.getPercentile(50) * 1.4826; }*/ private double standardDeviation(double[][] x) { DescriptiveStatistics stats = new DescriptiveStatistics(); for (int i = 0; i < x.length; i ++) for (int j = 0; j < x[i].length; j++) stats.addValue(x[i][j]); return stats.getStandardDeviation(); } public RealMatrix getL() { return L; } public RealMatrix getS() { return S; } public RealMatrix getE() { return E; } }
7,055
0
Create_ds/Surus/src/main/java/org/surus
Create_ds/Surus/src/main/java/org/surus/math/AugmentedDickeyFuller.java
package org.surus.math; import org.apache.commons.math3.linear.MatrixUtils; import org.apache.commons.math3.linear.RealMatrix; import org.apache.commons.math3.linear.RealVector; public class AugmentedDickeyFuller { private double[] ts; private int lag; private boolean needsDiff = true; private double[] zeroPaddedDiff; private double PVALUE_THRESHOLD = -3.45; /** * Uses the Augmented Dickey Fuller test to determine * if ts is a stationary time series * @param ts * @param lag */ public AugmentedDickeyFuller(double[] ts, int lag) { this.ts = ts; this.lag = lag; computeADFStatistics(); } /** * Uses the Augmented Dickey Fuller test to determine * if ts is a stationary time series * @param ts */ public AugmentedDickeyFuller(double[] ts) { this.ts = ts; this.lag = (int) Math.floor(Math.cbrt((ts.length - 1))); computeADFStatistics(); } private void computeADFStatistics() { double[] y = diff(ts); RealMatrix designMatrix = null; int k = lag+1; int n = ts.length - 1; RealMatrix z = MatrixUtils.createRealMatrix(laggedMatrix(y, k)); //has rows length(ts) - 1 - k + 1 RealVector zcol1 = z.getColumnVector(0); //has length length(ts) - 1 - k + 1 double[] xt1 = subsetArray(ts, k-1, n-1); //ts[k:(length(ts) - 1)], has length length(ts) - 1 - k + 1 double[] trend = sequence(k,n); //trend k:n, has length length(ts) - 1 - k + 1 if (k > 1) { RealMatrix yt1 = z.getSubMatrix(0, ts.length - 1 - k, 1, k-1); //same as z but skips first column //build design matrix as cbind(xt1, 1, trend, yt1) designMatrix = MatrixUtils.createRealMatrix(ts.length - 1 - k + 1, 3 + k - 1); designMatrix.setColumn(0, xt1); designMatrix.setColumn(1, ones(ts.length - 1 - k + 1)); designMatrix.setColumn(2, trend); designMatrix.setSubMatrix(yt1.getData(), 0, 3); } else { //build design matrix as cbind(xt1, 1, tt) designMatrix = MatrixUtils.createRealMatrix(ts.length - 1 - k + 1, 3); designMatrix.setColumn(0, xt1); designMatrix.setColumn(1, ones(ts.length - 1 - k + 1)); designMatrix.setColumn(2, trend); } /*OLSMultipleLinearRegression regression = new OLSMultipleLinearRegression(); regression.setNoIntercept(true); regression.newSampleData(zcol1.toArray(), designMatrix.getData()); double[] beta = regression.estimateRegressionParameters(); double[] sd = regression.estimateRegressionParametersStandardErrors(); */ RidgeRegression regression = new RidgeRegression(designMatrix.getData(), zcol1.toArray()); regression.updateCoefficients(.0001); double[] beta = regression.getCoefficients(); double[] sd = regression.getStandarderrors(); double t = beta[0] / sd[0]; if (t <= PVALUE_THRESHOLD) { this.needsDiff = true; } else { this.needsDiff = false; } } /** * Takes finite differences of x * @param x * @return Returns an array of length x.length-1 of * the first differences of x */ private double[] diff(double[] x) { double[] diff = new double[x.length - 1]; double[] zeroPaddedDiff = new double[x.length]; zeroPaddedDiff[0] = 0; for (int i = 0; i < diff.length; i++) { double diff_i = x[i+1] - x[i]; diff[i] = diff_i; zeroPaddedDiff[i+1] = diff_i; } this.zeroPaddedDiff = zeroPaddedDiff; return diff; } /** * Equivalent to matlab and python ones * @param n * @return an array of doubles of length n that are * initialized to 1 */ private double[] ones(int n) { double[] ones = new double[n]; for (int i = 0; i < n; i++) { ones[i] = 1; } return ones; } /** * Equivalent to R's embed function * @param x time series vector * @param lag number of lags, where lag=1 is the same as no lags * @return a matrix that has x.length - lag + 1 rows by lag columns. */ private double[][] laggedMatrix(double[]x, int lag) { double[][] laggedMatrix = new double[x.length - lag + 1][lag]; for (int j = 0; j < lag; j++) { //loop through columns for (int i = 0; i < laggedMatrix.length; i++) { laggedMatrix[i][j] = x[lag - j - 1 + i]; } } return laggedMatrix; } /** * Takes x[start] through x[end - 1] * @param x * @param start * @param end * @return */ private double[] subsetArray(double[] x, int start, int end) { double[] subset = new double[end - start + 1]; System.arraycopy(x, start, subset, 0, end - start + 1); return subset; } /** * Generates a sequence of ints [start, end] * @param start * @param end * @return */ private double[] sequence(int start, int end) { double[] sequence = new double[end - start + 1]; for (int i = start; i <= end; i++) { sequence[i - start] = i; } return sequence; } public boolean isNeedsDiff() { return needsDiff; } public double[] getZeroPaddedDiff() { return zeroPaddedDiff; } }
7,056
0
Create_ds/Surus/src/main/java/org/surus
Create_ds/Surus/src/main/java/org/surus/math/RidgeRegression.java
package org.surus.math; import org.apache.commons.math3.linear.MatrixUtils; import org.apache.commons.math3.linear.RealMatrix; import org.apache.commons.math3.linear.RealVector; import org.apache.commons.math3.linear.SingularValueDecomposition; public class RidgeRegression { private RealMatrix X; private SingularValueDecomposition X_svd = null; private double[] Y; private double l2penalty; private double[] coefficients; private double[] standarderrors; private double[] fitted; private double[] residuals; public RidgeRegression(double[][] x, double[] y) { this.X = MatrixUtils.createRealMatrix(x); this.X_svd = null; this.Y = y; this.l2penalty = 0; this.coefficients = null; this.fitted = new double[y.length]; this.residuals = new double[y.length]; } public void updateCoefficients(double l2penalty) { if (this.X_svd == null) { this.X_svd = new SingularValueDecomposition(X); } RealMatrix V = this.X_svd.getV(); double[] s = this.X_svd.getSingularValues(); RealMatrix U = this.X_svd.getU(); for (int i = 0; i < s.length; i++) { s[i] = s[i] / (s[i]*s[i] + l2penalty); } RealMatrix S = MatrixUtils.createRealDiagonalMatrix(s); RealMatrix Z = V.multiply(S).multiply(U.transpose()); this.coefficients = Z.operate(this.Y); this.fitted = this.X.operate(this.coefficients); double errorVariance = 0; for (int i = 0; i < residuals.length; i++) { this.residuals[i] = this.Y[i] - this.fitted[i]; errorVariance += this.residuals[i] * this.residuals[i]; } errorVariance = errorVariance / (X.getRowDimension() - X.getColumnDimension()); RealMatrix errorVarianceMatrix = MatrixUtils.createRealIdentityMatrix(this.Y.length).scalarMultiply(errorVariance); RealMatrix coefficientsCovarianceMatrix = Z.multiply(errorVarianceMatrix).multiply(Z.transpose()); this.standarderrors = getDiagonal(coefficientsCovarianceMatrix); } private double[] getDiagonal(RealMatrix X) { double[] diag = new double[X.getColumnDimension()]; for (int i = 0; i < diag.length; i++) { diag[i] = X.getEntry(i, i); } return diag; } public double getL2penalty() { return l2penalty; } public void setL2penalty(double l2penalty) { this.l2penalty = l2penalty; } public double[] getCoefficients() { return coefficients; } public double[] getStandarderrors() { return standarderrors; } }
7,057
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.opsworks.tests/src/com/amazonaws/eclipse/opsworks/deploy
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.opsworks.tests/src/com/amazonaws/eclipse/opsworks/deploy/util/ZipUtilsTest.java
package com.amazonaws.eclipse.opsworks.deploy.util; import static org.junit.Assert.assertEquals; import static com.amazonaws.eclipse.opsworks.deploy.util.ZipUtils.unzipFileToDirectory; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Map; import java.util.stream.Collectors; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import org.apache.commons.io.IOUtils; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; public class ZipUtilsTest { @Rule public TemporaryFolder folder = new TemporaryFolder(); @Test public void canUnpackAZipFileToDirectory() throws IOException { File zipFile = folder.newFile("file.zip"); File target = folder.newFolder("target"); try (ZipOutputStream zipOutputStream = new ZipOutputStream(new FileOutputStream(zipFile))) { writeEntry(zipOutputStream, "foo/bar.txt", "hello foo-bar!"); writeEntry(zipOutputStream, "baz.txt", "hello baz!"); writeEntry(zipOutputStream, "foo/../root.txt", "hello root!"); } unzipFileToDirectory(zipFile, target); Map<String, String> actual = Files.walk(target.toPath()).filter(p -> p.toFile().isFile()).collect(Collectors.toMap(p -> target.toPath().relativize(p).toString(), this::content)); assertEquals("hello foo-bar!", actual.get("foo/bar.txt".replace('/', File.separatorChar))); assertEquals("hello baz!", actual.get("baz.txt")); assertEquals("hello root!", actual.get("root.txt")); } @Test(expected = RuntimeException.class) public void exceptionThrownIfRelativeFileAttemptsToLeaveParentDirectory() throws IOException { File zipFile = folder.newFile("file.zip"); File target = folder.newFolder("target"); try (ZipOutputStream zipOutputStream = new ZipOutputStream(new FileOutputStream(zipFile))) { writeEntry(zipOutputStream, "foo/bar.txt", "hello foo-bar!"); writeEntry(zipOutputStream, "../baz.txt", "hello baz!"); } unzipFileToDirectory(zipFile, target); } private void writeEntry(ZipOutputStream zipOutputStream, String name, String content) throws IOException { zipOutputStream.putNextEntry(new ZipEntry(name)); IOUtils.copy(new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)), zipOutputStream); zipOutputStream.closeEntry(); } private String content(Path p) { try { return IOUtils.toString(new FileInputStream(p.toFile())); } catch (IOException e) { throw new RuntimeException(e); } } }
7,058
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.elasticbeanstalk.tests/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.elasticbeanstalk.tests/src/com/amazonaws/eclipse/elasticbeanstalk/EnvironmentNameRegexTest.java
package com.amazonaws.eclipse.elasticbeanstalk; import org.eclipse.core.runtime.IStatus; import org.junit.Test; import static org.junit.Assert.assertEquals; import com.amazonaws.eclipse.elasticbeanstalk.server.ui.databinding.EnvironmentNameValidator; /** * Test cases for environment name field in the * "New Elastic Beanstalk application" wizard page. * */ public class EnvironmentNameRegexTest { @Test public void testEnvironmentNameValidation() { final EnvironmentNameValidator validator = new EnvironmentNameValidator(); assertEquals(validator.validate("").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("a").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("-a").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("-a-").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("ab&&cd").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("abcd!").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("-abcd-").getSeverity(), IStatus.ERROR); assertEquals(validator.validate("abcd").getSeverity(), IStatus.OK); assertEquals(validator.validate("ab-cd").getSeverity(), IStatus.OK); assertEquals(validator.validate("ab-c123d").getSeverity(), IStatus.OK); } }
7,059
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.elasticbeanstalk.tests/src/com/amazonaws/eclipse/elasticbeanstalk/server/ui
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.elasticbeanstalk.tests/src/com/amazonaws/eclipse/elasticbeanstalk/server/ui/configEditor/IgnoredOptionsTest.java
/* * Copyright 2015 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.elasticbeanstalk.server.ui.configEditor; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.util.HashMap; import java.util.List; import org.junit.Before; import org.junit.Test; import com.amazonaws.eclipse.elasticbeanstalk.server.ui.configEditor.EnvironmentConfigDataModel.IgnoredOptions; public class IgnoredOptionsTest { private IgnoredOptions ignoredOptions; @Before public void setup() { ignoredOptions = new IgnoredOptions(new HashMap<String, List<String>>()); } @Test public void ignoreNamespace_ignoresAllOptionsInThatNamespace() { String ignoredNamespace = "some-namespace"; ignoredOptions.ignoreNamespace(ignoredNamespace); assertTrue(ignoredOptions.isNamespaceIgnored(ignoredNamespace)); assertTrue(ignoredOptions.isOptionIgnored(ignoredNamespace, "some-option")); assertFalse(ignoredOptions.isNamespaceIgnored("some-other-namespace")); } @Test public void ignoreOptions_IgnoreMultipleOptions() { String namespace = "some-namespace"; String optionName = "some-option"; String otherOption = "some-other-option"; ignoredOptions.ignoreOption(namespace, optionName); ignoredOptions.ignoreOption(namespace, otherOption); assertTrue(ignoredOptions.isOptionIgnored(namespace, optionName)); assertTrue(ignoredOptions.isOptionIgnored(namespace, otherOption)); } @Test public void ignoreOption_OnlyIgnoresThatOptionNotTheNamespace() { String namespace = "some-namespace"; String optionName = "some-option"; ignoredOptions.ignoreOption(namespace, optionName); assertFalse(ignoredOptions.isNamespaceIgnored(namespace)); assertTrue(ignoredOptions.isOptionIgnored(namespace, optionName)); assertFalse(ignoredOptions.isOptionIgnored(namespace, "some-other-option")); } }
7,060
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/AllTests.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb; import junit.framework.Test; import junit.framework.TestSuite; import com.amazonaws.eclipse.datatools.enablement.simpledb.internal.driver.JdbcStatementTest; public class AllTests { public static Test suite() { TestSuite suite = new TestSuite("Test for com.amazonaws.eclipse.datatools.enablement.simpledb"); suite.addTestSuite(JdbcStatementTest.class); return suite; } }
7,061
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/internal
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/internal/driver/JdbcStatementTest.java
/* * Copyright 2008-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.internal.driver; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import org.jmock.integration.junit3.MockObjectTestCase; import com.amazonaws.eclipse.datatools.enablement.simpledb.driver.SimpleDBItemName; import com.amazonaws.services.simpledb.model.Attribute; import com.amazonaws.services.simpledb.model.DeleteAttributesRequest; import com.amazonaws.services.simpledb.model.PutAttributesRequest; public class JdbcStatementTest extends MockObjectTestCase { @Override protected void setUp() throws Exception { super.setUp(); } public void testSimpleUpdate() throws Exception { String[] params = new String[] { "222", "myitem" }; String[][] attrs = new String[][] { new String[] { "111", "222" } }; assertUpdate("update mydomain set `111`=? where `" + SimpleDBItemName.ITEM_HEADER + "`=?", params, "mydomain", "myitem", attrs); } public void testComplexUpdate() throws Exception { String[] params = new String[] { "1111", "2222", "3333", "myitem" }; String[][] attrs = new String[][] { new String[] { "111", "1111" }, new String[] { "222", "2222" }, new String[] { "333", "3333" } }; assertUpdate("update mydomain set `111`=?, `222`=?, `333`=? where `" + SimpleDBItemName.ITEM_HEADER + "`=?", params, "mydomain", "myitem", attrs); } public void testNonPreparedUpdate() throws Exception { try { String[][] attrs = new String[][] { new String[] { "111", "1111" }, new String[] { "222", "2222" } }; assertUpdate("update mydomain set `111`='1111', `222`='2222' where `" + SimpleDBItemName.ITEM_HEADER + "`='myitem'", null, "mydomain", "myitem", attrs); fail(); } catch (SQLException e) { // ok, simple statements not supported yet } } public void testDeletingAttributes() throws Exception { String[] params = new String[] { "1111", null, "myitem" }; String[][] attrs = new String[][] { new String[] { "111", "1111" } }; List<Object> reqs = assertUpdate("update mydomain set `111`=?, `222`=? where `" + SimpleDBItemName.ITEM_HEADER + "`=?", params, "mydomain", "myitem", attrs); assertEquals(2, reqs.size()); Object req1 = reqs.get(1); assertTrue(req1 instanceof DeleteAttributesRequest); assertEquals(1, ((DeleteAttributesRequest) req1).getAttributes().size()); assertEquals("222", ((DeleteAttributesRequest) req1).getAttributes().get(0).getName()); } public void testSimpleInsert() throws Exception { String[] params = new String[] { "myitem", "222" }; String[][] attrs = new String[][] { new String[] { "111", "222" } }; assertUpdate("insert into `mydomain` (`" + SimpleDBItemName.ITEM_HEADER + "`, `111`) values(?, ?)", params, "mydomain", "myitem", attrs); } public void testDeleteRow() throws Exception { String[] params = new String[] { "myitem" }; List<Object> reqs = assertUpdate("delete from `mydomain` where `" + SimpleDBItemName.ITEM_HEADER + "`=?", params, "mydomain", "myitem", null); assertEquals(1, reqs.size()); Object req = reqs.get(0); assertTrue(req instanceof DeleteAttributesRequest); List<Attribute> attribute = ((DeleteAttributesRequest) req).getAttributes(); assertTrue(attribute == null || attribute.isEmpty()); } private List<Object> assertUpdate(final String sql, final String[] params, final String domain, final String item, final String[][] setAttrs) throws SQLException { final List<Object> reqHolder = new ArrayList<Object>(); JdbcStatement stmt; if (params != null) { stmt = new JdbcPreparedStatement(null, sql) { @Override int executeSDBRequest(final Object req) throws SQLException { if (req instanceof Collection) { return super.executeSDBRequest(req); } reqHolder.add(req); return 0; } }; int tally = 1; for (String param : params) { ((JdbcPreparedStatement) stmt).setObject(tally++, param); } } else { stmt = new JdbcStatement(null) { @Override int executeSDBRequest(final Object req) throws SQLException { if (req instanceof Collection) { return super.executeSDBRequest(req); } reqHolder.add(req); return 0; } }; } stmt.executeUpdate(sql); for (Object req : reqHolder) { if (req instanceof PutAttributesRequest) { PutAttributesRequest pareq = (PutAttributesRequest) req; assertEquals(domain, pareq.getDomainName().toLowerCase()); assertEquals(item, pareq.getItemName()); assertEquals(setAttrs.length, pareq.getAttributes().size()); int tally = 0; for (String[] attr : setAttrs) { assertEquals(attr[0], pareq.getAttributes().get(tally).getName()); assertEquals(attr[1], pareq.getAttributes().get(tally).getValue()); ++tally; } } } return reqHolder; } }
7,062
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/editor/AllTests.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.editor; import junit.framework.Test; import junit.framework.TestSuite; public class AllTests { public static Test suite() { TestSuite suite = new TestSuite("Test for com.amazonaws.eclipse.datatools.enablement.simpledb"); suite.addTestSuite(EditorTest.class); suite.addTestSuite(SDBDataAccessorTest.class); return suite; } }
7,063
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/editor/EditorTest.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.editor; import org.eclipse.swt.widgets.Composite; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.PlatformUI; import org.jmock.integration.junit3.MockObjectTestCase; public class EditorTest extends MockObjectTestCase { private Composite c; private IWorkbench w; // private IWorkbenchPage aPage; @Override protected void setUp() throws Exception { super.setUp(); this.w = PlatformUI.getWorkbench(); // this.aPage = this.w.getActiveWorkbenchWindow().getActivePage(); } public void testGetTableEditor() throws Exception { // Shell shell = this.w.getActiveWorkbenchWindow().getShell(); // this.c = new Composite(shell, SWT.None); // // final String mockName = "testTable"; // final String colName = "colName"; // final TableDataEditor t = new TableDataEditor(); // final IEditorSite editorSite = mock(IEditorSite.class); // final TableForMock table = mock(TableForMock.class); // final Connection con = new JdbcConnection(new JdbcDriver(null), "", "", "") { // // @Override // public AmazonSimpleDB getClient() { // return null;// new AmazonSimpleDBMock(); // } // // }; // final BasicEList<Column> cols = new BasicEList<Column>(); // final Database db = mock(Database.class); // final Schema sc = mock(Schema.class); // final Column col1 = mock(ColumnForMock.class); // // cols.add(col1); // cols.add(col1); // // checking(new Expectations() { // { // allowing(table).getName(); // will(returnValue(mockName)); // allowing(table).getColumns(); // will(returnValue(cols)); // allowing(table).getSchema(); // will(returnValue(sc)); // allowing(table).getConnection(); // will(returnValue(con)); // allowing(table); // // allowing(sc).getCatalog(); // will(returnValue(null)); // allowing(sc).getDatabase(); // will(returnValue(db)); // allowing(sc); // // allowing(editorSite); // // allowing(col1).getTable(); // will(returnValue(table)); // allowing(col1).getName(); // will(returnValue(colName)); // allowing(col1); // // allowing(db).getVendor(); // will(returnValue("SimpleDB")); // allowing(db).getVersion(); // will(returnValue("1.0")); // allowing(db); // } // }); // // t.init(editorSite, new TableDataEditorInput(table)); // t.createPartControl(this.c); // assertEquals(mockName, t.getSqlTable().getName()); // // ITableData tableData = t.getTableData(); // // assertEquals(2, tableData.getColumnCount()); // assertEquals(Types.VARCHAR, tableData.getColumnType(0)); // assertEquals(Types.VARCHAR, tableData.getColumnType(1)); // // Class<?> c = Class.forName(TableDataTableCursorExternalEditingSupport.class.getCanonicalName()); // TableDataTableCursorExternalEditingSupport cursor = (TableDataTableCursorExternalEditingSupport) t.getCursor(); // Field field = c.getDeclaredField("cellEditors"); // field.setAccessible(true); // IExternalTableDataEditor[] edit = (IExternalTableDataEditor[]) field.get(cursor); // assertEquals(SDBTextEditor.class, edit[0].getClass()); // // // assertEquals(SimpleDBDataAccessor.class, tableData.getColumnDataAccessor(1).getClass()); } }
7,064
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/editor/SDBDataAccessorTest.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.editor; import java.util.Arrays; import junit.framework.TestCase; public class SDBDataAccessorTest extends TestCase { private SimpleDBDataAccessor sdb; @Override protected void setUp() throws Exception { this.sdb = new SimpleDBDataAccessor(); } public void testIsSnippet() { assertNotNull(this.sdb); assertFalse("Null is not a snippet", this.sdb.isSnippet(null, -1)); assertFalse("String is not a snippet", this.sdb.isSnippet("String value", -1)); assertTrue("Empty String array is snippet", this.sdb.isSnippet(new String[] {}, -1)); assertTrue("String array with empty string is snippet", this.sdb.isSnippet(new String[] { "" }, -1)); assertTrue("String array is snippet", this.sdb.isSnippet(new String[] { "String value", "String value" }, -1)); } public void testGetLabel() { assertNotNull(this.sdb); assertEquals("NULL", this.sdb.getLabel(null, -1)); assertEquals("", this.sdb.getLabel("", -1)); assertEquals("String value", this.sdb.getLabel("String value", -1)); assertEquals(Arrays.toString(new String[] { "1", "2" }), this.sdb.getLabel(new String[] { "1", "2" }, -1)); } }
7,065
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/editor/TableForMock.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.editor; import org.eclipse.datatools.connectivity.sqm.core.rte.ICatalogObject; import org.eclipse.datatools.modelbase.sql.tables.BaseTable; public interface TableForMock extends BaseTable, ICatalogObject { }
7,066
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.simpledb.tests/src/com/amazonaws/eclipse/datatools/enablement/simpledb/editor/ColumnForMock.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.datatools.enablement.simpledb.editor; import org.eclipse.datatools.connectivity.sqm.core.rte.ICatalogObject; import org.eclipse.datatools.modelbase.sql.tables.Column; public interface ColumnForMock extends ICatalogObject, Column { }
7,067
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda/serverless/ServerlessTemplateMapperTest.java
/* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.eclipse.lambda.serverless; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertArrayEquals; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Consumer; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import com.amazonaws.eclipse.lambda.serverless.model.ServerlessTemplate; import com.amazonaws.eclipse.lambda.serverless.model.TypeProperties; import com.amazonaws.eclipse.lambda.serverless.model.transform.ServerlessFunction; import com.amazonaws.eclipse.lambda.serverless.model.transform.ServerlessModel; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; public class ServerlessTemplateMapperTest { private static final String SERVERLESS_JSON_TEMPLATE_FILE = "serverless-template.json"; private static final String SERVERLESS_YAML_TEMPLATE_FILE = "serverless-template.yaml"; private static final String CODESTAR_YAML_TEMPLATE_FILE = "codestar.template.yml"; private ServerlessModel jsonModel; private ServerlessTemplate jsonTemplate; private ServerlessModel yamlModel; private ServerlessTemplate yamlTemplate; private ServerlessModel codestarModel; private ServerlessTemplate codestarTemplate; @Before public void setUp() throws JsonParseException, JsonMappingException, IOException { try (InputStream serverlessTemplateInputStream = ServerlessTemplateMapperTest.class.getResourceAsStream(SERVERLESS_JSON_TEMPLATE_FILE)) { jsonModel = Serverless.load(serverlessTemplateInputStream); jsonTemplate = Serverless.convert(jsonModel); } try (InputStream serverlessTemplateInputStream = ServerlessTemplateMapperTest.class.getResourceAsStream(SERVERLESS_YAML_TEMPLATE_FILE)) { yamlModel = Serverless.load(serverlessTemplateInputStream); yamlTemplate = Serverless.convert(yamlModel); } try (InputStream serverlessTemplateInputStream = ServerlessTemplateMapperTest.class.getResourceAsStream(CODESTAR_YAML_TEMPLATE_FILE)) { codestarModel = Serverless.load(serverlessTemplateInputStream); codestarTemplate = Serverless.convert(codestarModel); } } @Test public void testCodeStarTransform() throws IOException { Assert.assertArrayEquals(new String[]{"AWS::Serverless-2016-10-31", "AWS::CodeStar"}, codestarTemplate.getTransform().toArray()); } @Test public void testModel_additionalProperties() { Consumer<ServerlessModel> testModel_additionalProperties = (model) -> { Map<String, Object> additionalProperties = model.getAdditionalProperties(); testValuePath(additionalProperties, "bar", "foo"); testValuePath(additionalProperties, "bar", "foo1", "foo"); }; testModel_additionalProperties.accept(jsonModel); testModel_additionalProperties.accept(yamlModel); } @Test public void testModel_ServerlessFunction() { Consumer<ServerlessModel> testModel_ServerlessFunction = (model) -> { Map<String, ServerlessFunction> functions = jsonModel.getServerlessFunctions(); ServerlessFunction function = testServerlessFunction(functions, "ServerlessFunction", "fakeCodeUri", "fakeHandler", "java8", 512, 300, Arrays.asList("Policy1", "Policy2")); Map<String, Object> additionalTopLevelProperties = function.getAdditionalTopLevelProperties(); testValuePath(additionalTopLevelProperties, "bar", "foo"); Map<String, Object> additionalProperties = function.getAdditionalProperties(); assertTrue(additionalProperties.containsKey("Events")); assertS3EventMatches((Map<String, Object>)additionalProperties.get("Events"), "S3Event", "fakeBucket"); }; testModel_ServerlessFunction.accept(jsonModel); testModel_ServerlessFunction.accept(yamlModel); } @Test public void testModel_ServerlessFunction2() { Consumer<ServerlessModel> testModel_ServerlessFunction2 = (model) -> { Map<String, ServerlessFunction> functions = jsonModel.getServerlessFunctions(); ServerlessFunction function = testServerlessFunction(functions, "ServerlessFunction2", "fakeCodeUri", "fakeHandler", "fakeRuntime", 100, 100, Collections.<String>emptyList()); Map<String, Object> additionalTopLevelProperties = function.getAdditionalTopLevelProperties(); assertTrue(additionalTopLevelProperties.isEmpty()); Map<String, Object> additionalProperties = function.getAdditionalProperties(); testValuePath(additionalProperties, "value1", "Environment", "Variables", "key1"); testValuePath(additionalProperties, "value2", "Environment", "Variables", "key2"); }; testModel_ServerlessFunction2.accept(jsonModel); testModel_ServerlessFunction2.accept(yamlModel); } @Test public void testModel_additionalResources() { Consumer<ServerlessModel> testModel_additionalResources = (model) -> { Map<String, TypeProperties> resources = jsonModel.getAdditionalResources(); assertTrue(resources.containsKey("IamRole")); TypeProperties tp = resources.get("IamRole"); assertEquals("AWS::IAM::Role", tp.getType()); Map<String, Object> properties = tp.getProperties(); assertEquals("fakeValue", properties.get("fakeKey")); assertTrue(tp.getAdditionalProperties().containsKey("foo")); assertEquals("bar", tp.getAdditionalProperties().get("foo")); }; testModel_additionalResources.accept(jsonModel); testModel_additionalResources.accept(yamlModel); } @Test public void testTemplate_Metadata() { Consumer<ServerlessModel> testTemplate_Metadata = (model) -> { assertEquals("2010-09-09", jsonTemplate.getAWSTemplateFormatVersion()); assertEquals(null, jsonTemplate.getDescription()); assertArrayEquals(new String[]{"AWS::Serverless-2016-10-31"}, jsonTemplate.getTransform().toArray()); }; testTemplate_Metadata.accept(jsonModel); testTemplate_Metadata.accept(yamlModel); } @Test public void testTemplate_AdditionalProperties() { Consumer<ServerlessTemplate> testTemplate_AdditionalProperties = (template) -> { Map<String, Object> additionalProperties = jsonTemplate.getAdditionalProperties(); testValuePath(additionalProperties, "bar", "foo"); testValuePath(additionalProperties, "bar", "foo1", "foo"); }; testTemplate_AdditionalProperties.accept(jsonTemplate); testTemplate_AdditionalProperties.accept(yamlTemplate); } @Test public void testTemplate_ServerlessFunction() { Consumer<ServerlessTemplate> testTemplate_ServerlessFunction = (template) -> { Map<String, TypeProperties> resources = jsonTemplate.getResources(); TypeProperties resource = testTemplateResource(resources, "ServerlessFunction", "AWS::Serverless::Function"); Map<String, Object> additionalProperties = resource.getAdditionalProperties(); testValuePath(additionalProperties, "bar", "foo"); Map<String, Object> properties = resource.getProperties(); testValuePath(properties, "fakeCodeUri", "CodeUri"); testValuePath(properties, "fakeHandler", "Handler"); testValuePath(properties, "S3", "Events", "S3Event", "Type"); testValuePath(properties, "fakeBucket", "Events", "S3Event", "Properties", "Bucket"); }; testTemplate_ServerlessFunction.accept(jsonTemplate); testTemplate_ServerlessFunction.accept(yamlTemplate); } @Test public void testTemplate_ServerlessFunction2() { Consumer<ServerlessTemplate> testTemplate_ServerlessFunction2 = (template) -> { Map<String, TypeProperties> resources = jsonTemplate.getResources(); TypeProperties resource = testTemplateResource(resources, "ServerlessFunction2", "AWS::Serverless::Function"); Map<String, Object> additionalProperties = resource.getAdditionalProperties(); assertTrue(additionalProperties.isEmpty()); Map<String, Object> properties = resource.getProperties(); testValuePath(properties, "fakeCodeUri", "CodeUri"); testValuePath(properties, "fakeHandler", "Handler"); testValuePath(properties, "fakeRuntime", "Runtime"); testValuePath(properties, "fakeFunctionName", "FunctionName"); testValuePath(properties, new Integer(100), "MemorySize"); testValuePath(properties, new Integer(100), "Timeout"); testValuePath(properties, "value1", "Environment", "Variables", "key1"); testValuePath(properties, "value2", "Environment", "Variables", "key2"); }; testTemplate_ServerlessFunction2.accept(jsonTemplate); testTemplate_ServerlessFunction2.accept(yamlTemplate); } @Test public void testTemplate_IamRole() { Consumer<ServerlessTemplate> testTemplate_IamRole = (template) -> { Map<String, TypeProperties> resources = jsonTemplate.getResources(); TypeProperties resource = testTemplateResource(resources, "ServerlessFunction", "AWS::Serverless::Function"); Map<String, Object> additionalProperties = resource.getAdditionalProperties(); testValuePath(additionalProperties, "bar", "foo"); Map<String, Object> properties = resource.getProperties(); testValuePath(properties, "fakeCodeUri", "CodeUri"); testValuePath(properties, "fakeHandler", "Handler"); testValuePath(properties, "S3", "Events", "S3Event", "Type"); testValuePath(properties, "fakeBucket", "Events", "S3Event", "Properties", "Bucket"); }; testTemplate_IamRole.accept(jsonTemplate); testTemplate_IamRole.accept(yamlTemplate); } private TypeProperties testTemplateResource(Map<String, TypeProperties> resources, String resourceName, String resourceType) { assertTrue(resources.containsKey(resourceName)); TypeProperties resource = resources.get(resourceName); assertEquals(resourceType, resource.getType()); return resource; } private void assertS3EventMatches(Map<String, Object> events, String s3EventName, String bucketName) { testValuePath(events, "S3", s3EventName, "Type"); testValuePath(events, bucketName, s3EventName, "Properties", "Bucket"); } private void testValuePath(Map<String, Object> map, Object value, String... keyPath) { assertTrue(keyPath != null && keyPath.length != 0); Object currentValue = map; for (String key : keyPath) { Map<String, Object> currentMap = (Map<String, Object>) currentValue; assertTrue(currentMap.containsKey(key)); currentValue = currentMap.get(key); } assertEquals(value, currentValue); } private ServerlessFunction testServerlessFunction(Map<String, ServerlessFunction> functions, String resourceName, String codeUri, String handler, String runtime, int memorySize, int timeout, List<String> policies) { assertTrue(functions.containsKey(resourceName)); ServerlessFunction function = functions.get(resourceName); assertEquals(codeUri, function.getCodeUri()); assertEquals(handler, function.getHandler()); assertEquals(runtime, function.getRuntime()); assertEquals(memorySize, function.getMemorySize().intValue()); assertEquals(timeout, function.getTimeout().intValue()); List<String> actualPolicies = function.getPolicies(); assertTrue(actualPolicies.equals(policies)); return function; } }
7,068
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda/launching/SamLocalConsoleLineTrackerTest.java
/* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.eclipse.lambda.launching; import java.util.regex.Matcher; import static com.amazonaws.eclipse.lambda.launching.SamLocalConsoleLineTracker.URL_PATTERN; import org.junit.Assert; import org.junit.Test; public class SamLocalConsoleLineTrackerTest { private TestCase[] testCases = { new TestCase("foo: http://234.12.21.12:2000", true, "http://234.12.21.12:2000"), new TestCase("foo: http://234.12.21.12:2000bar", true, "http://234.12.21.12:2000"), new TestCase("foo: https://234.12.21.12:3000", true, "https://234.12.21.12:3000"), new TestCase("foo: 234.12.255.12:2012", true, "234.12.255.12:2012"), new TestCase("foo: localhost:3000", true, "localhost:3000"), new TestCase("217.0.0.1:3000", true, "217.0.0.1:3000"), new TestCase("217.0.0.1", false, null), new TestCase("This is foo", false, null) }; private static class TestCase { String text; boolean valid; String ipAddress; private TestCase(String text, boolean valid, String ipAddress) { this.text = text; this.valid = valid; this.ipAddress = ipAddress; } } @Test public void testPatterns() { for (TestCase testCase : testCases) { doTestPattern(testCase.text, testCase.valid, testCase.ipAddress); } } private void doTestPattern(String line, boolean match, String matchString) { Matcher matcher = URL_PATTERN.matcher(line); Assert.assertEquals(match, matcher.find()); if (match) { String actualMatchString = matcher.group(); Assert.assertEquals(matchString, actualMatchString); } } }
7,069
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda/blueprint/BlueprintsTest.java
package com.amazonaws.eclipse.lambda.blueprint; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import org.junit.Before; import org.junit.Test; import com.amazonaws.eclipse.lambda.project.template.CodeTemplateManager; import com.amazonaws.eclipse.lambda.project.template.data.LambdaBlueprintTemplateData; import com.amazonaws.eclipse.lambda.project.template.data.PomFileTemplateData; import com.amazonaws.eclipse.lambda.project.template.data.SamFileTemplateData; import com.amazonaws.eclipse.lambda.serverless.Serverless; import com.amazonaws.eclipse.lambda.serverless.model.transform.ServerlessModel; import com.amazonaws.eclipse.lambda.serverless.template.ServerlessDataModelTemplateData; import com.amazonaws.eclipse.lambda.serverless.template.ServerlessHandlerTemplateData; import freemarker.template.Template; import freemarker.template.TemplateException; public class BlueprintsTest { private CodeTemplateManager manager = CodeTemplateManager.getInstance(); private LambdaBlueprintTemplateData lambdaBlueprintTemplateData; private ServerlessDataModelTemplateData serverlessDataModelTemplateData; private ServerlessHandlerTemplateData serverlessHandlerTemplateData; private PomFileTemplateData pomFileTemplateData; private SamFileTemplateData samFileTemplateData; @Before public void setUp() { lambdaBlueprintTemplateData = mockLambdaBlueprintTemplateData(); serverlessDataModelTemplateData = mockServerlessDataModelTemplateData(); serverlessHandlerTemplateData = mockServerlessHandlerTemplateData(); pomFileTemplateData = mockPomFileTemplateData(); samFileTemplateData = mockSamFileTemplateData(); } // Assert all the needed files exist and the templates are valid with the mocked data. @Test public void testBlueprintManager() { assertFilesExist( manager.getLambdaBlueprintsConfigFile(), manager.getServerlessBlueprintsConfigFile(), manager.getServerlessReadmeFile() ); assertTemplatesValid(lambdaBlueprintTemplateData, manager.getlambdaProjectReadmeTemplate(), manager.getTestContextTemplate(), manager.getTestUtilsTemplate() ); assertTemplatesValid(serverlessDataModelTemplateData, manager.getServerlessInputClassTemplate(), manager.getServerlessOutputClassTemplate() ); assertTemplatesValid(serverlessHandlerTemplateData, manager.getServerlessHandlerClassTemplate() ); } @Test public void testLambdaBlueprints() { LambdaBlueprintsConfig config = BlueprintsProvider.provideLambdaBlueprints(); assertNotNull(config.getDefaultBlueprint()); Map<String, LambdaBlueprint> blueprints = config.getBlueprints(); assertNotNull(blueprints); // Assert the default blueprint is included in the Blueprint map. assertNotNull(blueprints.get(config.getDefaultBlueprint())); for (Entry<String, LambdaBlueprint> entry : blueprints.entrySet()) { LambdaBlueprint blueprint = entry.getValue(); assertNotNull(blueprint.getDisplayName()); assertTemplatesValid(lambdaBlueprintTemplateData, manager.getLambdaHandlerTemplate(blueprint), manager.getLambdaHandlerTestTemplate(blueprint)); assertTemplatesValid(pomFileTemplateData, manager.getLambdaBlueprintPomTemplate(blueprint)); if (blueprint.getTestJsonFile() != null) { assertFileExists(manager.getLambdaTestJsonFile(blueprint)); } } } @Test public void testServerlessBlueprints() { ServerlessBlueprintsConfig config = BlueprintsProvider.provideServerlessBlueprints(); assertNotNull(config.getDefaultBlueprint()); Map<String, ServerlessBlueprint> blueprints = config.getBlueprints(); assertNotNull(blueprints); assertNotNull(blueprints.get(config.getDefaultBlueprint())); for (Entry<String, ServerlessBlueprint> entry : blueprints.entrySet()) { ServerlessBlueprint blueprint = entry.getValue(); assertNotNull(blueprint.getDisplayName()); assertSamTemplateValid(samFileTemplateData, manager, blueprint); assertTemplatesValid(pomFileTemplateData, manager.getServerlessPomFile(blueprint)); Map<String, String> handlers = blueprint.getHandlerTemplatePaths(); assertNotNull(handlers); for (String handlerName : handlers.keySet()) { assertTemplatesValid(serverlessHandlerTemplateData, manager.getServerlessHandlerClassTemplate(blueprint, handlerName)); } } } private LambdaBlueprintTemplateData mockLambdaBlueprintTemplateData() { LambdaBlueprintTemplateData data = new LambdaBlueprintTemplateData(); data.setPackageName("com.foo"); data.setHandlerClassName("Foo"); data.setHandlerTestClassName("FooTest"); data.setInputJsonFileName("foo.json"); return data; } private ServerlessDataModelTemplateData mockServerlessDataModelTemplateData() { ServerlessDataModelTemplateData serverlessDataModelTemplateData = new ServerlessDataModelTemplateData(); serverlessDataModelTemplateData.setPackageName("com.foo"); serverlessDataModelTemplateData.setServerlessInputClassName("ServerlessInput"); serverlessDataModelTemplateData.setServerlessOutputClassName("ServerlessOutput"); return serverlessDataModelTemplateData; } private ServerlessHandlerTemplateData mockServerlessHandlerTemplateData() { ServerlessHandlerTemplateData serverlessHandlerTemplateData = new ServerlessHandlerTemplateData(); serverlessHandlerTemplateData.setPackageName("com.foo"); serverlessHandlerTemplateData.setClassName("FooHandler"); serverlessHandlerTemplateData.setInputFqcn("com.foo.Input"); serverlessHandlerTemplateData.setOutputFqcn("com.foo.Output"); return serverlessHandlerTemplateData; } private PomFileTemplateData mockPomFileTemplateData() { PomFileTemplateData pomFileTemplateData = new PomFileTemplateData(); pomFileTemplateData.setGroupId("com.foo"); pomFileTemplateData.setArtifactId("bar"); pomFileTemplateData.setVersion("1.0.0"); pomFileTemplateData.setAwsJavaSdkVersion("1.11.111"); return pomFileTemplateData; } private SamFileTemplateData mockSamFileTemplateData() { SamFileTemplateData data = new SamFileTemplateData(); data.setPackageName("com.foo"); data.setArtifactId("bar"); data.setVersion("1.0.0"); return data; } private void assertTemplatesValid(Object dataModel, Template... templates) { for (Template template : templates) { try { String content = CodeTemplateManager.processTemplateWithData(template, dataModel); assertStringNotEmpty(content); } catch (TemplateException | IOException e) { fail(template.getName()); } } } /* * Assert all the lambda functions defined in the sam file have the corresponding template file. */ private void assertSamTemplateValid(Object dataModel, CodeTemplateManager manager, ServerlessBlueprint blueprint) { Template samTemplate = manager.getServerlessSamTemplate(blueprint); try { String content = CodeTemplateManager.processTemplateWithData(samTemplate, dataModel); assertStringNotEmpty(content); ServerlessModel model = Serverless.loadFromContent(content); Set<String> physicalIds = model.getServerlessFunctions().keySet(); Set<String> pathIds = blueprint.getHandlerTemplatePaths().keySet(); assertEquals(physicalIds, pathIds); } catch (TemplateException | IOException e) { fail(samTemplate.getName()); } } private void assertFilesExist(File... files) { for (File file : files) { assertFileExists(file); } } private void assertFileExists(File file) { assertNotNull(file); assertTrue(file.exists()); } private void assertStringNotEmpty(String value) { assertNotNull(value); assertTrue(value.trim().length() > 0); } }
7,070
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.lambda.tests/src/com/amazonaws/eclipse/lambda/blueprint/NewServerlessProjectDataModelTest.java
/* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.eclipse.lambda.blueprint; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.junit.Test; import com.amazonaws.eclipse.core.model.MavenConfigurationDataModel; import com.amazonaws.eclipse.lambda.project.template.data.SamFileTemplateData; import com.amazonaws.eclipse.lambda.project.wizard.model.NewServerlessProjectDataModel; import com.amazonaws.eclipse.lambda.serverless.template.ServerlessDataModelTemplateData; import com.amazonaws.eclipse.lambda.serverless.template.ServerlessHandlerTemplateData; import freemarker.template.TemplateException; /** * Test class for {@link #NewServerlessProjectDataModel} to correctly return the template data. */ public class NewServerlessProjectDataModelTest { private static final String FAKE_GROUP_ID = "com.foo"; private static final String FAKE_ARTIFACT_ID = "bar"; private static final String FAKE_VERSION = "1.1.1"; private static final String FAKE_PACKAGE_NAME = "com.foo.baz"; @Test public void testServerlessProjectDataModel_withBlueprint() { ServerlessBlueprintsConfig config = BlueprintsProvider.provideServerlessBlueprints(); assertNotNull(config.getDefaultBlueprint()); Map<String, ServerlessBlueprint> blueprints = config.getBlueprints(); assertNotNull(blueprints); assertNotNull(blueprints.get(config.getDefaultBlueprint())); for (Entry<String, ServerlessBlueprint> entry : blueprints.entrySet()) { ServerlessBlueprint blueprint = entry.getValue(); NewServerlessProjectDataModel dataModel = mockServerlessProjectDataModel(blueprint.getDisplayName()); assertServerlessDataModelTemplateData(dataModel); assertServerlessSamFileTemplateData(dataModel); } } // Test Lambda handlers generated from the customer provided template file is correct. // If no package prefix, we use the Maven package name as the name space, while if a // FQCN is provided, we use that instead. @Test public void testServerlessProjectDataModel_withTemplate() { assertServerlessHandlerTemplateData(null, "FooHandler"); assertServerlessHandlerTemplateData("org.bar", "FooHandler"); } // Assert the handler template data in the serverless template file is correct. private void assertServerlessHandlerTemplateData(String packageName, String className) { try { String fqcn = packageName == null ? className : packageName + "." + className; NewServerlessProjectDataModel dataModel = mockServerlessProjectDataModel(mockServerlessTemplateFile(fqcn)); List<ServerlessHandlerTemplateData> template = dataModel.getServerlessHandlerTemplateData(); assertEquals(1, template.size()); assertEquals(template.get(0).getClassName(), className); if (packageName == null) { assertEquals(template.get(0).getPackageName(), FAKE_PACKAGE_NAME + ".function"); } else { assertEquals(template.get(0).getPackageName(), packageName); } } catch (IOException | TemplateException e) { fail("Failed to assert serverless handler template data"); } } // Assert the template data of API Gateway data model for Lambda is correct. private void assertServerlessDataModelTemplateData(NewServerlessProjectDataModel dataModel) { try { ServerlessDataModelTemplateData dataModelTemplateData = dataModel.getServerlessDataModelTemplateData(); assertEquals(FAKE_PACKAGE_NAME + ".model", dataModelTemplateData.getPackageName()); assertEquals("ServerlessInput", dataModelTemplateData.getServerlessInputClassName()); assertEquals("ServerlessOutput", dataModelTemplateData.getServerlessOutputClassName()); } catch (IOException | TemplateException e) { fail("Failed to test data model template data in " + dataModel.getBlueprintName()); } } // Assert the SAM file template data in the blueprint is correct. private void assertServerlessSamFileTemplateData(NewServerlessProjectDataModel dataModel) { SamFileTemplateData samFileTemplateData = dataModel.getServerlessSamTemplateData(); assertEquals(FAKE_PACKAGE_NAME + ".function", samFileTemplateData.getPackageName()); assertEquals(FAKE_ARTIFACT_ID, samFileTemplateData.getArtifactId()); assertEquals(FAKE_VERSION, samFileTemplateData.getVersion()); } // Mock a NewServerlessProjectDataModel that uses an existing blueprint private NewServerlessProjectDataModel mockServerlessProjectDataModel(String blueprintName) { NewServerlessProjectDataModel dataModel = new NewServerlessProjectDataModel(); MavenConfigurationDataModel mavenModel = dataModel.getMavenConfigurationDataModel(); mavenModel.setGroupId(FAKE_GROUP_ID); mavenModel.setArtifactId(FAKE_ARTIFACT_ID); mavenModel.setVersion(FAKE_VERSION); mavenModel.setPackageName(FAKE_PACKAGE_NAME); dataModel.setBlueprintName(blueprintName); dataModel.setUseBlueprint(true); dataModel.setUseServerlessTemplateFile(false); return dataModel; } // Mock a NewServerlessProjectDataModel that uses a serverless template file. private NewServerlessProjectDataModel mockServerlessProjectDataModel(File templateFile) { NewServerlessProjectDataModel dataModel = new NewServerlessProjectDataModel(); MavenConfigurationDataModel mavenModel = dataModel.getMavenConfigurationDataModel(); mavenModel.setGroupId(FAKE_GROUP_ID); mavenModel.setArtifactId(FAKE_ARTIFACT_ID); mavenModel.setVersion(FAKE_VERSION); mavenModel.setPackageName(FAKE_PACKAGE_NAME); dataModel.setUseBlueprint(false); dataModel.setUseServerlessTemplateFile(true); dataModel.getImportFileDataModel().setFilePath(templateFile.getAbsolutePath()); return dataModel; } private File mockServerlessTemplateFile(String handlerName) { String templateContent = String.format( "{\n" + " \"Resources\": {\n" + " \"ServerlessFunction\": {\n" + " \"Type\" : \"AWS::Serverless::Function\",\n" + " \"Properties\" : {\n" + " \"CodeUri\" : \"fakeCodeUri\",\n" + " \"Handler\" : \"%s\",\n" + " \"Policies\" : [\n" + " \"Policy1\", \"Policy2\"\n" + " ]\n" + " }\n" + " }\n" + " }\n" + "}", handlerName); try { File tempFile = File.createTempFile("serverless", ".template"); try (FileWriter writer = new FileWriter(tempFile)) { writer.write(templateContent); } return tempFile; } catch (IOException e) { fail("Failed to write serverless template content to a temp file."); return null; } } }
7,071
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/preferences/PreferenceInitializerTest.java
/* * Copyright 2009-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.core.preferences; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import org.eclipse.jface.preference.IPreferenceStore; import org.eclipse.jface.preference.PreferenceStore; import org.junit.Test; /** * Unit tests for the preference initializer to verify that it correctly imports * any pre-existing preferences from the EC2 plugin. * * @author Jason Fulghum <fulghum@amazon.com> */ public class PreferenceInitializerTest { /** * If the EC2 plugin preferences haven't been imported yet, we expect them * to be pulled from the EC2 plugin preference store and imported into the * AWS Toolkit Core preference store. */ @Test public void testImportPreferences() { MockPreferenceInitializer preferenceInitializer = new MockPreferenceInitializer(); IPreferenceStore preferenceStore = preferenceInitializer.getAwsToolkitCorePreferenceStore(); assertEquals("", preferenceStore.getString(PreferenceConstants.P_ACCESS_KEY)); assertFalse(preferenceStore.getBoolean(PreferenceConstants.P_EC2_PREFERENCES_IMPORTED)); preferenceInitializer.initializeDefaultPreferences(); assertEquals("accessKey", preferenceStore.getString(PreferenceConstants.P_ACCESS_KEY)); assertEquals("secretKey", preferenceStore.getString(PreferenceConstants.P_SECRET_KEY)); assertEquals("userId", preferenceStore.getString(PreferenceConstants.P_USER_ID)); assertEquals("certFile", preferenceStore.getString(PreferenceConstants.P_CERTIFICATE_FILE)); assertEquals("privateKey", preferenceStore.getString(PreferenceConstants.P_PRIVATE_KEY_FILE)); assertTrue(preferenceStore.getBoolean(PreferenceConstants.P_EC2_PREFERENCES_IMPORTED)); } /** * If the EC2 plugin preferences have already been imported, we expect them * to not be imported again. */ @Test public void testOnlyImportOnce() { MockPreferenceInitializer preferenceInitializer = new MockPreferenceInitializer(); IPreferenceStore preferenceStore = preferenceInitializer.getAwsToolkitCorePreferenceStore(); preferenceStore.setValue(PreferenceConstants.P_EC2_PREFERENCES_IMPORTED, true); assertEquals("", preferenceStore.getString(PreferenceConstants.P_ACCESS_KEY)); preferenceInitializer.initializeDefaultPreferences(); assertEquals("", preferenceStore.getString(PreferenceConstants.P_ACCESS_KEY)); } /** * Subclass of AWS Toolkit Core's preference initializer that stubs out real * EC2 and AWS Toolkit Core preference stores for easy testing. */ private static class MockPreferenceInitializer extends PreferenceInitializer { private IPreferenceStore awsToolkitCorePreferenceStore = new PreferenceStore(); private IPreferenceStore ec2PreferenceStore = new PreferenceStore(); MockPreferenceInitializer() { ec2PreferenceStore.setValue(PreferenceConstants.P_ACCESS_KEY, "accessKey"); ec2PreferenceStore.setValue(PreferenceConstants.P_SECRET_KEY, "secretKey"); ec2PreferenceStore.setValue(PreferenceConstants.P_USER_ID, "userId"); ec2PreferenceStore.setValue(PreferenceConstants.P_CERTIFICATE_FILE, "certFile"); ec2PreferenceStore.setValue(PreferenceConstants.P_PRIVATE_KEY_FILE, "privateKey"); } @Override protected IPreferenceStore getEc2PluginPreferenceStore() { return ec2PreferenceStore; } @Override protected IPreferenceStore getAwsToolkitCorePreferenceStore() { return awsToolkitCorePreferenceStore; } } }
7,072
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/regions/AwsClientFactoryTests.java
package com.amazonaws.eclipse.core.regions; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import com.amazonaws.AmazonServiceException; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.eclipse.core.AWSClientFactory; import com.amazonaws.services.cloudfront.AmazonCloudFront; import com.amazonaws.services.cloudfront.model.ListDistributionsRequest; import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; import com.amazonaws.services.s3.AmazonS3; public class AwsClientFactoryTests { private static AWSClientFactory FACTORY; @BeforeClass public static void setUp() { // We are testing invalid signing region, it doesn't matter if the AWS Credentials // is not valid. FACTORY = new AWSClientFactory(new AWSStaticCredentialsProvider( new BasicAWSCredentials("foo", "bar"))); } /** * IAM and CloudFront are using global endpoints. The following two tests assure the clients * are created correctly with different regions. */ @Test public void testIamClient() { testIamClientByRegion("us-west-2"); testIamClientByRegion("cn-north-1"); testIamClientByRegion("cn-northwest-1"); testIamClientByRegion("us-gov-west-1"); } @Test public void testCloudFrontClient() { testCloudFrontClientByRegion("us-west-2"); } @Test public void testS3Client() { testS3ClientByRegion("us-west-2"); testS3ClientByRegion("eu-central-1"); testS3ClientByRegion("cn-north-1"); testS3ClientByRegion("cn-northwest-1"); testS3ClientByRegion("us-gov-west-1"); } /** * For IAM client, if the provided signing region is not "us-east-1" for AWS partition, * the SignatureDoesNotMatch exception would be thrown. This test case assures the IAM * client is created correctly. */ private void testIamClientByRegion(String regionId) { try { AmazonIdentityManagement iam = FACTORY.getIAMClientByRegion(regionId); iam.listAccessKeys(); } catch (AmazonServiceException e) { String errorCode = e.getErrorCode(); Assert.assertNotEquals("SignatureDoesNotMatch", errorCode); } } /** * For CloudFront client, if the provided signing region is not "us-east-1" for AWS partition, * the SignatureDoesNotMatch exception would be thrown. This test case assures the CloudFront * client is created correctly. */ private void testCloudFrontClientByRegion(String regionId) { try { AmazonCloudFront cloudFront = FACTORY.getCloudFrontClientByRegion(regionId); cloudFront.listDistributions(new ListDistributionsRequest()); } catch (AmazonServiceException e) { String errorCode = e.getErrorCode(); Assert.assertNotEquals("SignatureDoesNotMatch", errorCode); } } /** * For S3 client, if the underlying endpoint is the global endpoint "https://s3.amazonaws.com", * but the signing region is not "us-east-1", the AuthorizationHeaderMalformed exception would * be thrown. This test assures the S3 client is created correctly with different regions. */ private void testS3ClientByRegion(String regionId) { try { AmazonS3 s3 = FACTORY.getS3ClientByRegion(regionId); s3.listBuckets(); } catch (AmazonServiceException e) { String errorCode = e.getErrorCode(); Assert.assertNotEquals("AuthorizationHeaderMalformed", errorCode); } } }
7,073
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/regions/RegionUtilsTest.java
package com.amazonaws.eclipse.core.regions; import java.io.IOException; import java.util.List; import java.util.regex.Pattern; import org.junit.Assert; import org.junit.Test; public class RegionUtilsTest { private static final Pattern[] PATTERNS = { // global region endpoint, ex. s3.amazonaws.com Pattern.compile("^(http|https)://\\w+(\\.us-gov)?.amazonaws.com(/)?$"), // s3 irregular regional endpoints Pattern.compile("^(http|https)://s3-(us|eu|ap|sa)-(gov-)?(east|west|south|north|central|northeast|southeast)-(1|2).amazonaws.com(/)?$"), // regular region endpoints Pattern.compile("^(http|https)://\\w+\\.(\\w+\\.)?(ca|us|eu|ap|sa|me|af)-(gov-)?(east|west|south|north|central|northeast|southeast)-(1|2|3)\\.amazonaws\\.com(/)?$"), // China region endpoints, currently we only have cn-north-1 region Pattern.compile("^(http|https)://\\w+\\.(\\w+\\.)?cn-(north|northwest)-1.amazonaws.com.cn(/)?$"), // us-gov region endpoints Pattern.compile("^(http|https)://\\w+\\.us-gov(-west-1)?.amazonaws.com(/)?$") }; @Test public void testRemoteRegionFile() throws IOException { List<Region> regions = RegionUtils.loadRegionsFromCloudFront(); assertRegionEndpointsValid(regions); } @Test public void testLocalRegionFile() throws IOException { List<Region> regions = RegionUtils.loadRegionsFromLocalRegionFile(); assertRegionEndpointsValid(regions); } private void assertRegionEndpointsValid(List<Region> regions) { for (Region region : regions) { if ("local".equals(region.getId())) { continue; } for (String endpoint : region.getServiceEndpoints().values()) { if (RegionUtils.S3_US_EAST_1_REGIONAL_ENDPOINT.equals(endpoint)) { continue; } assertEndpointValid(endpoint); } } } private void assertEndpointValid(String endpoint) { for (Pattern pattern : PATTERNS) { if (pattern.matcher(endpoint).matches()) { return; } } Assert.fail("Endpoint: " + endpoint + " doesn't follow any endpoint patterns."); } }
7,074
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/util/FileUtilsTest.java
/* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.eclipse.core.util; import java.io.File; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.util.Set; import org.junit.Assert; import org.junit.Test; public class FileUtilsTest { @Test (expected = FileAlreadyExistsException.class) public void testCreateFileWithPermission600_FileAlreadyExists() throws IOException { Path file = Files.createTempFile("foo", "txt"); FileUtils.createFileWithPermission600(file.toString()); } @Test public void testCreateFileWithPermission600_NewFile() throws IOException { Path directory = Files.createTempDirectory("foo"); Path file = Paths.get(directory.toString(), "bar"); File newFile = FileUtils.createFileWithPermission600(file.toString()); if (OsPlatformUtils.isLinux() || OsPlatformUtils.isMac()) { Set<PosixFilePermission> permissions = Files.getPosixFilePermissions(newFile.toPath()); Assert.assertTrue(permissions.contains(PosixFilePermission.OWNER_READ)); Assert.assertTrue(permissions.contains(PosixFilePermission.OWNER_WRITE)); Assert.assertFalse(permissions.contains(PosixFilePermission.OWNER_EXECUTE)); Assert.assertFalse(permissions.contains(PosixFilePermission.GROUP_READ)); Assert.assertFalse(permissions.contains(PosixFilePermission.GROUP_WRITE)); Assert.assertFalse(permissions.contains(PosixFilePermission.GROUP_EXECUTE)); Assert.assertFalse(permissions.contains(PosixFilePermission.OTHERS_READ)); Assert.assertFalse(permissions.contains(PosixFilePermission.OTHERS_WRITE)); Assert.assertFalse(permissions.contains(PosixFilePermission.OTHERS_EXECUTE)); } else if (OsPlatformUtils.isWindows()) { Assert.assertTrue(Files.isReadable(newFile.toPath())); Assert.assertTrue(Files.isWritable(newFile.toPath())); } } }
7,075
0
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/accounts
Create_ds/aws-toolkit-eclipse/tests/com.amazonaws.eclipse.core.tests/src/com/amazonaws/eclipse/core/accounts/profiles/SdkCredentialsFileContentMonitorTest.java
/* * Copyright 2015 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.core.accounts.profiles; import java.io.File; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.commons.io.monitor.FileAlterationListenerAdaptor; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class SdkCredentialsFileContentMonitorTest { private File targetFile; private static final long MONITOR_POLLING_INTERVAL_MILLIS = 1000; @Before public void setup() throws IOException { targetFile = File.createTempFile("aws-eclipse-credentials-file-monitor-file", null); } @Test public void testFileChangedCallback() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); SdkCredentialsFileContentMonitor monitor = new SdkCredentialsFileContentMonitor( targetFile, MONITOR_POLLING_INTERVAL_MILLIS, new FileAlterationListenerAdaptor() { @Override public void onFileChange(final File changedFile) { latch.countDown(); } }); monitor.setDebugMode(true); monitor.start(); touch(targetFile); long waitTime = MONITOR_POLLING_INTERVAL_MILLIS * 2; Assert.assertTrue( "File monitor callback not invoked after waiting for " + waitTime + " ms.", latch.await(waitTime, TimeUnit.MILLISECONDS)); } @Test public void testMonitorInStoppedStatus() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); SdkCredentialsFileContentMonitor monitor = new SdkCredentialsFileContentMonitor( targetFile, MONITOR_POLLING_INTERVAL_MILLIS, new FileAlterationListenerAdaptor() { @Override public void onFileChange(final File changedFile) { System.err.println("stopped"); latch.countDown(); } }); monitor.setDebugMode(true); monitor.start(); monitor.stop(); touch(targetFile); long waitTime = MONITOR_POLLING_INTERVAL_MILLIS * 2; Assert.assertFalse( "Who counted it down to zero???", latch.await(waitTime, TimeUnit.MILLISECONDS)); } private void touch(File file) { file.setLastModified(System.currentTimeMillis() / 1000); } }
7,076
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/tst/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/tst/com/amazonaws/eclipse/rds/CreateDriverAndConnectionProfileTest.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.util.Properties; import org.eclipse.datatools.connectivity.IConnectionProfile; import org.eclipse.datatools.connectivity.ProfileManager; import org.eclipse.datatools.connectivity.drivers.DriverInstance; import org.eclipse.datatools.connectivity.drivers.DriverManager; import org.eclipse.datatools.connectivity.drivers.IDriverMgmtConstants; import org.eclipse.datatools.connectivity.drivers.IPropertySet; import org.eclipse.datatools.connectivity.drivers.PropertySetImpl; import org.eclipse.datatools.connectivity.drivers.jdbc.IJDBCConnectionProfileConstants; public class CreateDriverAndConnectionProfileTest { // public static Properties createProperties() { // Properties baseProperties = new Properties(); // baseProperties.setProperty( IDriverMgmtConstants.PROP_DEFN_JARLIST, jarList ); // baseProperties.setProperty(IJDBCConnectionProfileConstants.DRIVER_CLASS_PROP_ID, "org.apache.derby.jdbc.ClientDriver"); //$NON-NLS-1$ // baseProperties.setProperty(IJDBCConnectionProfileConstants.URL_PROP_ID, driverURL); // baseProperties.setProperty(IJDBCConnectionProfileConstants.USERNAME_PROP_ID, username); // baseProperties.setProperty(IJDBCConnectionProfileConstants.PASSWORD_PROP_ID, password); // baseProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VENDOR_PROP_ID, vendor); // baseProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VERSION_PROP_ID, version); // baseProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_NAME_PROP_ID, databasename); // baseProperties.setProperty( IJDBCConnectionProfileConstants.SAVE_PASSWORD_PROP_ID, String.valueOf( true ) ); // baseProperties.setProperty( IDriverMgmtConstants.PROP_DEFN_TYPE, "org.eclipse.datatools.connectivity.db.derby102.clientDriver"); // // return baseProperties; // } public static void main(String[] args) throws Exception { printOutDtpStuff(); } public static void printOutDtpStuff() { DriverInstance[] list = DriverManager.getInstance().getAllDriverInstances(); for(int i = 0; i < list.length; i++){ System.out.println("=============" ); System.out.println("Driver ID: " + list[i].getId() ); System.out.println("Driver Jar List: " + list[i].getJarList() ); System.out.println("Driver Name: " + list[i].getName() ); list[i].getPropertySet().getBaseProperties().list(System.out); } IConnectionProfile[] plist = ProfileManager.getInstance().getProfiles(); for(int i = 0; i < plist.length; i++){ System.out.println("=============" ); System.out.println("Profile Name: " + plist[i].getName() ); System.out.println("Profile Provider ID: " + plist[i].getProviderId() ); System.out.println("Profile Provider Name: " + plist[i].getProviderName() ); plist[i].getBaseProperties().list(System.out); } } }
7,077
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/tst/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/tst/com/amazonaws/eclipse/rds/util/CheckIpUtilIntegrationTest.java
/* * Copyright 2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.util; import static org.junit.Assert.*; import java.util.regex.Pattern; import org.junit.Test; public class CheckIpUtilIntegrationTest { public static final Pattern IP_ADDRESS_PATTERN = Pattern.compile("^\\d+\\.\\d+\\.\\d+\\.\\d+$"); /** Tests that we can find our outgoing IP address */ @Test public void testCheckIp() throws Exception { String ip = CheckIpUtil.checkIp(); assertNotNull(ip); assertTrue(IP_ADDRESS_PATTERN.matcher(ip).matches()); } }
7,078
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer/rds/RDSLabelProvider.java
/* * Copyright 2017 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.explorer.rds; import org.eclipse.swt.graphics.Image; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.explorer.ExplorerNodeLabelProvider; import com.amazonaws.eclipse.explorer.rds.RDSExplorerNodes.RdsRootElement; public class RDSLabelProvider extends ExplorerNodeLabelProvider { @Override public Image getDefaultImage(Object element) { if (element instanceof RdsRootElement) { return AwsToolkitCore.getDefault().getImageRegistry().get(AwsToolkitCore.IMAGE_RDS_SERVICE); } else { return null; } } @Override public String getText(Object element) { if (element instanceof RdsRootElement) { return "Amazon RDS"; } return getExplorerNodeText(element); } }
7,079
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer/rds/RDSContentProvider.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.explorer.rds; import java.util.ArrayList; import java.util.List; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.core.regions.ServiceAbbreviations; import com.amazonaws.eclipse.explorer.AWSResourcesRootElement; import com.amazonaws.eclipse.explorer.AbstractContentProvider; import com.amazonaws.eclipse.explorer.Loading; import com.amazonaws.eclipse.explorer.rds.RDSExplorerNodes.DatabaseNode; import com.amazonaws.eclipse.explorer.rds.RDSExplorerNodes.RdsRootElement; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.model.DBInstance; public class RDSContentProvider extends AbstractContentProvider { @Override public boolean hasChildren(Object element) { return (element instanceof AWSResourcesRootElement || element instanceof RdsRootElement); } @Override public Object[] loadChildren(Object parentElement) { if ( parentElement instanceof AWSResourcesRootElement) { return new Object[] { RdsRootElement.RDS_ROOT_NODE }; } if ( parentElement instanceof RdsRootElement) { new DataLoaderThread(parentElement) { @Override public Object[] loadData() { AmazonRDS rds = AwsToolkitCore.getClientFactory().getRDSClient(); List<DBInstance> dbInstances = rds.describeDBInstances().getDBInstances(); List<DatabaseNode> databaseNodes = new ArrayList<>(); for (DBInstance dbInstance : dbInstances) { databaseNodes.add(new DatabaseNode(dbInstance)); } return databaseNodes.toArray(); } }.start(); } return Loading.LOADING; } @Override public String getServiceAbbreviation() { return ServiceAbbreviations.RDS; } }
7,080
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer/rds/RDSExplorerNodes.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.explorer.rds; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.explorer.ExplorerNode; import com.amazonaws.eclipse.explorer.rds.RDSExplorerActionProvider.ConfigureConnectionProfileAction; import com.amazonaws.services.rds.model.DBInstance; public class RDSExplorerNodes { public static final class RdsRootElement { public static final RdsRootElement RDS_ROOT_NODE = new RdsRootElement(); } public static class DatabaseNode extends ExplorerNode { private final DBInstance dbInstance; public DatabaseNode(DBInstance dbInstance) { super(dbInstance.getDBInstanceIdentifier(), 0, loadImage(AwsToolkitCore.IMAGE_DATABASE), new ConfigureConnectionProfileAction(dbInstance)); this.dbInstance = dbInstance; } public DBInstance getDBInstance() { return dbInstance; } } }
7,081
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer/rds/RDSExplorerActionProvider.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.explorer.rds; import java.util.Properties; import org.eclipse.datatools.connectivity.IConnectionProfile; import org.eclipse.datatools.connectivity.ProfileManager; import org.eclipse.jface.action.Action; import org.eclipse.jface.action.IMenuManager; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.jface.viewers.IStructuredSelection; import org.eclipse.jface.wizard.WizardDialog; import org.eclipse.swt.widgets.Display; import org.eclipse.ui.navigator.CommonActionProvider; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.core.BrowserUtils; import com.amazonaws.eclipse.core.regions.RegionUtils; import com.amazonaws.eclipse.explorer.rds.RDSExplorerNodes.RdsRootElement; import com.amazonaws.eclipse.rds.ImportWizard; import com.amazonaws.eclipse.rds.RDSDriverDefinitionConstants; import com.amazonaws.eclipse.rds.RDSPlugin; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.model.DBInstance; import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; import com.amazonaws.services.rds.model.DescribeDBInstancesResult; public class RDSExplorerActionProvider extends CommonActionProvider { @Override public void fillContextMenu(IMenuManager menu) { IStructuredSelection selection = (IStructuredSelection) getContext().getSelection(); if (selection.getFirstElement() instanceof RdsRootElement) { menu.add(new OpenRdsConsoleAction()); } } private final class OpenRdsConsoleAction extends Action { public OpenRdsConsoleAction() { this.setText("Go to RDS Management Console"); this.setImageDescriptor(AwsToolkitCore.getDefault().getImageRegistry().getDescriptor(AwsToolkitCore.IMAGE_EXTERNAL_LINK)); } @Override public void run() { BrowserUtils.openExternalBrowser("http://console.aws.amazon.com/rds"); } } public static class ConfigureConnectionProfileAction extends Action { private DBInstance dbInstance; public ConfigureConnectionProfileAction(DBInstance dbInstance) { this.dbInstance = dbInstance; this.setText("Connect..."); this.setImageDescriptor(AwsToolkitCore.getDefault().getImageRegistry().getDescriptor(AwsToolkitCore.IMAGE_GEAR)); } @Override public void run() { for (final IConnectionProfile profile : ProfileManager.getInstance().getProfiles()) { Properties properties = profile.getBaseProperties(); String profileInstanceId = properties.getProperty(RDSDriverDefinitionConstants.DB_INSTANCE_ID); String profileRegionId = properties.getProperty(RDSDriverDefinitionConstants.DB_REGION_ID); String profileAccountId = properties.getProperty(RDSDriverDefinitionConstants.DB_ACCCOUNT_ID); if (dbInstance.getDBInstanceIdentifier().equals(profileInstanceId) && RegionUtils.getCurrentRegion().getId().equals(profileRegionId) && AwsToolkitCore.getDefault().getCurrentAccountId().equals(profileAccountId)) { RDSPlugin.connectAndReveal(profile); return; } } AmazonRDS rds = AwsToolkitCore.getClientFactory().getRDSClient(); DescribeDBInstancesResult result = rds.describeDBInstances(new DescribeDBInstancesRequest().withDBInstanceIdentifier(dbInstance.getDBInstanceIdentifier())); if (result.getDBInstances().isEmpty()) { String title = "DB Instance Not Available"; String message = "The DB Instance you selected is no longer available."; openErrorDialog(title, message); return; } dbInstance = result.getDBInstances().get(0); if (dbInstance.getPubliclyAccessible() == false) { String title = "DB Instance Not Publicly Accessible"; String message = "The DB Instance you selected is not publically accessible. " + "For more information about making your DB Instance publically accessible, see the Amazon RDS Developer Guide."; openErrorDialog(title, message); return; } ImportWizard importWizard = new ImportWizard(dbInstance); WizardDialog wizardDialog = new WizardDialog(Display.getDefault().getActiveShell(), importWizard); wizardDialog.open(); } } private static void openErrorDialog(String title, String message) { new MessageDialog(Display.getDefault().getActiveShell(), title, null, message, MessageDialog.ERROR, new String[] { "OK" }, 0).open(); } }
7,082
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/explorer/rds/DatabaseDecorator.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.explorer.rds; import org.eclipse.jface.viewers.IDecoration; import org.eclipse.jface.viewers.ILabelProviderListener; import org.eclipse.jface.viewers.ILightweightLabelDecorator; import com.amazonaws.eclipse.explorer.rds.RDSExplorerNodes.DatabaseNode; public class DatabaseDecorator implements ILightweightLabelDecorator { @Override public void addListener(ILabelProviderListener listener) {} @Override public void removeListener(ILabelProviderListener listener) {} @Override public void dispose() {} @Override public boolean isLabelProperty(Object element, String property) { return false; } @Override public void decorate(Object element, IDecoration decoration) { if (element instanceof DatabaseNode) { DatabaseNode databaseNode = (DatabaseNode)element; String engine = databaseNode.getDBInstance().getEngine(); String version = databaseNode.getDBInstance().getEngineVersion(); decoration.addSuffix(" [" + engine + " " + version + "]"); } } }
7,083
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/SelectExistingDatabasePage.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.eclipse.core.databinding.DataBindingContext; import org.eclipse.core.databinding.beans.PojoObservables; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.jface.databinding.swt.SWTObservables; import org.eclipse.jface.operation.IRunnableWithProgress; import org.eclipse.jface.wizard.WizardPage; import org.eclipse.swt.SWT; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Combo; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Label; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.model.DBInstance; class SelectExistingDatabasePage extends WizardPage { protected DataBindingContext bindingContext = new DataBindingContext(); private Combo dbCombo; private final AmazonRDS rds; private static final String PAGE_NAME = "configureRdsDbWizardPage2"; private final ImportDBInstanceDataModel wizardDataModel; protected SelectExistingDatabasePage(AmazonRDS rds, ImportDBInstanceDataModel wizardDataModel) { super(PAGE_NAME); this.rds = rds; this.wizardDataModel = wizardDataModel; } @Override public void createControl(Composite parent) { Composite composite = new Composite(parent, SWT.NONE); composite.setLayout(new GridLayout(2, false)); setControl(composite); LoadExistingDatabasesRunnable runnable = new LoadExistingDatabasesRunnable(rds); try { getContainer().run(true, true, runnable); } catch (Exception e) { e.printStackTrace(); } /* * TODO: Handle edge cases: * 1 - not signed up for AWS * 2 - security credentials not configured * 3 - not signed up for RDS * 4 - no databases to import yet */ /* * TODO: Filter out the databases that have already been imported... */ new Label(composite, SWT.NONE).setText("RDS DB Instance:"); dbCombo = new Combo(composite, SWT.READ_ONLY); // TODO: Add support for multiple regions // TODO: We need to display more information about each DB than just the ID (maybe in a table) Collection<List<DBInstance>> values = runnable.dbsByRegion.values(); for (DBInstance db : values.iterator().next()) { dbCombo.add(db.getDBInstanceIdentifier()); dbCombo.setData(db.getDBInstanceIdentifier(), db); } // new Label(composite, SWT.NONE).setText("Password:"); // dbPasswordText = new Text(composite, SWT.BORDER); // Don't edit current security groups... // Optionally create a new one // - shared between all connected instances? // - "Remote Client/Tool Access" group // - get correct CIDR range from client bindControls(); } @SuppressWarnings("static-access") private void bindControls() { bindingContext.bindValue( new ControlDataObservableValue(SWTObservables.observeSelection(dbCombo), true), PojoObservables.observeValue(wizardDataModel, wizardDataModel.DB_INSTANCE), null, null); } // TODO: do we need progress (could use for regions) static class LoadExistingDatabasesRunnable implements IRunnableWithProgress { private final AmazonRDS rds; public volatile Map<String, List<DBInstance>> dbsByRegion; public LoadExistingDatabasesRunnable(AmazonRDS rds) { // TODO: Stop passing in the individual client and just use the client factory to query each region this.rds = rds; } @Override public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException { // TODO: we really need one RDS client for each region... but we'll fake it for now... // TODO: host name should be enough to determine if we've imported a db yet? dbsByRegion = new HashMap<>(); System.out.println("Identified DBs in US-EAST-1: "); List<DBInstance> dbInstances = rds.describeDBInstances().getDBInstances(); for (DBInstance db : dbInstances) { System.out.println(" - " + db.getDBName() + " : " + db.getDBInstanceIdentifier()); } dbsByRegion.put("us-east-1", dbInstances); } } }
7,084
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/RDSDriverDefinitionConstants.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; public class RDSDriverDefinitionConstants { public static final String DB_INSTANCE_ID = "rdsDbInstanceId"; public static final String DB_REGION_ID = "rdsDbRegionId"; public static final String DB_ACCCOUNT_ID = "rdsDbAcoccount"; }
7,085
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/ConfigureRDSDBConnectionRunnable.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; import java.util.Properties; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.SubProgressMonitor; import org.eclipse.datatools.connectivity.ConnectionProfileException; import org.eclipse.datatools.connectivity.IConnectionProfile; import org.eclipse.datatools.connectivity.ProfileManager; import org.eclipse.datatools.connectivity.drivers.DriverInstance; import org.eclipse.datatools.connectivity.drivers.DriverManager; import org.eclipse.datatools.connectivity.drivers.IDriverMgmtConstants; import org.eclipse.datatools.connectivity.drivers.IPropertySet; import org.eclipse.datatools.connectivity.drivers.PropertySetImpl; import org.eclipse.datatools.connectivity.drivers.jdbc.IJDBCConnectionProfileConstants; import org.eclipse.datatools.connectivity.drivers.jdbc.IJDBCDriverDefinitionConstants; import org.eclipse.jface.operation.IRunnableWithProgress; import com.amazonaws.AmazonServiceException; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.core.regions.RegionUtils; import com.amazonaws.eclipse.rds.connectionfactories.DatabaseConnectionFactory; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; import com.amazonaws.services.ec2.model.IpPermission; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.model.AuthorizationAlreadyExistsException; import com.amazonaws.services.rds.model.AuthorizeDBSecurityGroupIngressRequest; import com.amazonaws.services.rds.model.CreateDBSecurityGroupRequest; import com.amazonaws.services.rds.model.DBInstance; import com.amazonaws.services.rds.model.DBSecurityGroupMembership; import com.amazonaws.services.rds.model.DBSecurityGroupNotFoundException; import com.amazonaws.services.rds.model.DescribeDBSecurityGroupsRequest; import com.amazonaws.services.rds.model.ModifyDBInstanceRequest; import com.amazonaws.services.rds.model.VpcSecurityGroupMembership; class ConfigureRDSDBConnectionRunnable implements IRunnableWithProgress { private static final String DEFAULT_SECURITY_GROUP_DESCRIPTION = "Security group for remote client tools"; private static final String DEFAULT_SECURITY_GROUP_NAME = "Remote Tool Access Group"; private final AmazonRDS rds; private final ImportDBInstanceDataModel wizardDataModel; private DatabaseConnectionFactory connectionFactory; private boolean completedSuccessfully = false; public ConfigureRDSDBConnectionRunnable(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; this.rds = AwsToolkitCore.getClientFactory().getRDSClient(); this.connectionFactory = DatabaseConnectionFactory.createConnectionFactory(wizardDataModel); } @Override public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException { monitor.beginTask("Configuring database connection", 10); try { configureSecurityGroupPermissions(monitor); DriverInstance driverInstance = getDriver(); /* * If we aren't able to connect, we should warn users, and let them know some of the possible * reasons, such as an incorrect password, or if they're connected through a firewall (like our VPN), * and how to run their DB instance on other ports so corporate firewalls don't cause problems. */ IConnectionProfile connectionProfile = createConnectionProfile(driverInstance, new SubProgressMonitor(monitor, 4)); monitor.subTask("Connecting"); RDSPlugin.connectAndReveal(connectionProfile); monitor.worked(1); completedSuccessfully = (connectionProfile.getConnectionState() == IConnectionProfile.CONNECTED_STATE); } catch (Exception e) { throw new InvocationTargetException(e); } finally { monitor.done(); } } /** * Opens a CIDR IP range ingress for the selected DB instance if the user * has selected to have permissions configured automatically. * * @param monitor * ProgressMonitor for this runnable's progress. */ private void configureSecurityGroupPermissions(IProgressMonitor monitor) { if (wizardDataModel.getConfigurePermissions() == false) { monitor.worked(5); return; } if (isVpcDbInstance()) { /* * DB Instances created with the most recent versions of the RDS * API will always use VPC security groups, which are owned in * the user's EC2 account. */ openVPCSecurityGroupIngress(new SubProgressMonitor(monitor, 5)); } else { /* * For older/legacy DB Instances, we need to modify the RDS * security groups instead of working with EC2 directly. */ openLegacySecurityGroupIngress(new SubProgressMonitor(monitor, 5)); } } /** * Returns true if the selected RDS DB Instance is an RDS VPC DB Instance. */ private boolean isVpcDbInstance() { return wizardDataModel.getDbInstance().getVpcSecurityGroups().isEmpty() == false; } public boolean didCompleteSuccessfully() { return completedSuccessfully; } private void openVPCSecurityGroupIngress(IProgressMonitor monitor) { monitor.beginTask("Configuring database security group", 3); List<VpcSecurityGroupMembership> vpcSecurityGroups = wizardDataModel.getDbInstance().getVpcSecurityGroups(); if (vpcSecurityGroups == null || vpcSecurityGroups.isEmpty()) { throw new RuntimeException("Expected a DB instance with VPC security groups!"); } String vpcSecurityGroupId = vpcSecurityGroups.get(0).getVpcSecurityGroupId(); try { AmazonEC2 ec2 = AwsToolkitCore.getClientFactory().getEC2Client(); ec2.authorizeSecurityGroupIngress(new AuthorizeSecurityGroupIngressRequest() .withGroupId(vpcSecurityGroupId) .withIpPermissions(new IpPermission() .withFromPort(wizardDataModel.getDbInstance().getEndpoint().getPort()) .withToPort(wizardDataModel.getDbInstance().getEndpoint().getPort()) .withIpProtocol("tcp") .withIpRanges(wizardDataModel.getCidrIpRange()))); } catch (AmazonServiceException ase) { // We can safely ignore InvalidPermission.Duplicate errors, // but will rethrow all other errors. if (!ase.getErrorCode().equals("InvalidPermission.Duplicate")) { throw ase; } } } /** * This method opens security group permissions for legacy RDS DB Instances. * Legacy DB Instances do not operate in VPC and connection permissions are * managed through RDS security groups (not directly through EC2 security * groups). */ private void openLegacySecurityGroupIngress(IProgressMonitor monitor) { monitor.beginTask("Configuring database security group", 3); // First make sure our security group exists... try { rds.describeDBSecurityGroups(new DescribeDBSecurityGroupsRequest() .withDBSecurityGroupName(DEFAULT_SECURITY_GROUP_NAME)).getDBSecurityGroups(); } catch (DBSecurityGroupNotFoundException e) { rds.createDBSecurityGroup(new CreateDBSecurityGroupRequest() .withDBSecurityGroupName(DEFAULT_SECURITY_GROUP_NAME) .withDBSecurityGroupDescription(DEFAULT_SECURITY_GROUP_DESCRIPTION)); } monitor.worked(1); // Then make sure that it has usable permission... List<String> existingSecurityGroupNames = new ArrayList<>(); for (DBSecurityGroupMembership groupMembership : wizardDataModel.getDbInstance().getDBSecurityGroups()) { existingSecurityGroupNames.add(groupMembership.getDBSecurityGroupName()); } if (existingSecurityGroupNames.contains(DEFAULT_SECURITY_GROUP_NAME) == false) { existingSecurityGroupNames.add(DEFAULT_SECURITY_GROUP_NAME); rds.modifyDBInstance(new ModifyDBInstanceRequest() .withDBInstanceIdentifier(wizardDataModel.getDbInstance().getDBInstanceIdentifier()) .withDBSecurityGroups(existingSecurityGroupNames)); } monitor.worked(1); try { rds.authorizeDBSecurityGroupIngress(new AuthorizeDBSecurityGroupIngressRequest() .withCIDRIP(wizardDataModel.getCidrIpRange()) .withDBSecurityGroupName(DEFAULT_SECURITY_GROUP_NAME)); } catch (AuthorizationAlreadyExistsException e) {} monitor.worked(1); } /** * Returns a driver for connecting to the user's database. If an existing, * compatible driver is found, it will be used, otherwise a new driver will * be created and returned. * * For more information on creating DriverInstances, see: * http://stevenmcherry.wordpress.com/2009/04/24/programmatically-creating-dtp-driver-and-profile-definitions/ * * @return A driver for connecting to the user's database. */ private DriverInstance getDriver() { if (wizardDataModel.isUseExistingDriverDefinition()) { return wizardDataModel.getDriverDefinition(); } String targetId = "DriverDefn." + connectionFactory.getDriverTemplate() + "." + connectionFactory.createDriverName(); for (DriverInstance driverInstance : DriverManager.getInstance().getAllDriverInstances()) { if (driverInstance.getId().equals(targetId)) return driverInstance; } Properties driverProperties = new Properties(); if (wizardDataModel.getJdbcDriver() != null) { // The MySQL driver is currently shipped with the plugins, so in this one case, // the wizard data model won't have the driver file specified. driverProperties.setProperty(IDriverMgmtConstants.PROP_DEFN_JARLIST, wizardDataModel.getJdbcDriver().getAbsolutePath()); } driverProperties.setProperty(IJDBCConnectionProfileConstants.DRIVER_CLASS_PROP_ID, connectionFactory.getDriverClass()); driverProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VENDOR_PROP_ID, connectionFactory.getDatabaseVendor()); driverProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VERSION_PROP_ID, connectionFactory.getDatabaseVersion()); driverProperties.setProperty(IJDBCConnectionProfileConstants.SAVE_PASSWORD_PROP_ID, String.valueOf(true)); driverProperties.setProperty(IDriverMgmtConstants.PROP_DEFN_TYPE, connectionFactory.getDriverTemplate()); if (connectionFactory.getAdditionalDriverProperties() != null) { driverProperties.putAll(connectionFactory.getAdditionalDriverProperties()); } IPropertySet propertySet = new PropertySetImpl(connectionFactory.createDriverName(), targetId); propertySet.setBaseProperties(driverProperties); DriverInstance driver = new DriverInstance(propertySet); DriverManager.getInstance().addDriverInstance(driver); return driver; } /** * Creates the DTP connection profile by assembling properties from * IJDBCDriverDefinitionConstants, IJDBCConnectionProfileConstants, and a * few AWS custom connection profile properties. */ private IConnectionProfile createConnectionProfile(DriverInstance driverInstance, IProgressMonitor monitor) throws ConnectionProfileException { monitor.beginTask("Creating connection profile", 1); DBInstance dbInstance = wizardDataModel.getDbInstance(); Properties profileProperties = driverInstance.getPropertySet().getBaseProperties(); profileProperties.setProperty(IJDBCDriverDefinitionConstants.URL_PROP_ID, connectionFactory.createJdbcUrl()); profileProperties.setProperty(IJDBCDriverDefinitionConstants.PASSWORD_PROP_ID, wizardDataModel.getDbPassword()); profileProperties.setProperty(IJDBCDriverDefinitionConstants.USERNAME_PROP_ID, dbInstance.getMasterUsername()); if (dbInstance.getDBName() != null) { profileProperties.setProperty(IJDBCDriverDefinitionConstants.DATABASE_NAME_PROP_ID, dbInstance.getDBName()); } profileProperties.setProperty("org.eclipse.datatools.connectivity.driverDefinitionID", driverInstance.getId()); /* * We add custom connection profile properties to help us easily recognize * the source RDS instance. */ profileProperties.setProperty(RDSDriverDefinitionConstants.DB_INSTANCE_ID, dbInstance.getDBInstanceIdentifier()); profileProperties.setProperty(RDSDriverDefinitionConstants.DB_REGION_ID, RegionUtils.getCurrentRegion().getId()); profileProperties.setProperty(RDSDriverDefinitionConstants.DB_ACCCOUNT_ID, AwsToolkitCore.getDefault().getCurrentAccountId()); String profileName = "Amazon RDS DB: " + dbInstance.getDBInstanceIdentifier() + " - " + RegionUtils.getCurrentRegion().getName(); /* * if the connection profile already exists... just modify it */ IConnectionProfile existingProfile = ProfileManager.getInstance().getProfileByName(profileName); if (existingProfile != null) { existingProfile.setBaseProperties(profileProperties); ProfileManager.getInstance().modifyProfile(existingProfile); monitor.worked(1); return existingProfile; } else { IConnectionProfile profile = ProfileManager.getInstance().createProfile( profileName, profileName, connectionFactory.getConnectionProfileProviderId(), profileProperties); monitor.worked(1); return profile; } } }
7,086
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/ControlDataObservableValue.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import org.eclipse.core.databinding.observable.value.DecoratingObservableValue; import org.eclipse.jface.databinding.swt.ISWTObservableValue; final class ControlDataObservableValue extends DecoratingObservableValue { private final ISWTObservableValue swtObservableValue; // TODO: This doesn't have to be a SWTObservable, it could just be the control... public ControlDataObservableValue(ISWTObservableValue swtObservableValue, boolean disposeDecoratedOnDispose) { super(swtObservableValue, disposeDecoratedOnDispose); this.swtObservableValue = swtObservableValue; } @Override public Object getValue() { return swtObservableValue.getWidget().getData((String)swtObservableValue.getValue()); } @Override public void setValue(Object value) { System.out.println("Setting value for ControlDataObservableValue: " + value); super.setValue(value); } @Override public Object getValueType() { return null; } }
7,087
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/ImportWizard.java
/* * Copyright 2011-2012 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import org.eclipse.jface.viewers.IStructuredSelection; import org.eclipse.jface.wizard.Wizard; import org.eclipse.ui.IImportWizard; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.statushandlers.StatusManager; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.services.rds.model.DBInstance; public class ImportWizard extends Wizard implements IImportWizard { private final ImportDBInstanceDataModel wizardDataModel = new ImportDBInstanceDataModel(); public ImportWizard(DBInstance dbInstanceToImport) { if (dbInstanceToImport == null) throw new NullPointerException("dbInstanceToImport must not be null."); this.setDefaultPageImageDescriptor(AwsToolkitCore.getDefault().getImageRegistry().getDescriptor(AwsToolkitCore.IMAGE_WIZARD_CONFIGURE_DATABASE)); this.setWindowTitle("Configure RDS Database Connection"); this.setNeedsProgressMonitor(true); wizardDataModel.setDbInstance(dbInstanceToImport); addPage(new ConfigureImportOptionsPage(wizardDataModel)); } @Override public boolean performFinish() { try { ConfigureRDSDBConnectionRunnable runnable = new ConfigureRDSDBConnectionRunnable(wizardDataModel); getContainer().run(true, true, runnable); return runnable.didCompleteSuccessfully(); } catch (Throwable t) { String errorMessage = "Unable to connect to RDS database"; if (t.getMessage() != null) errorMessage += ": " + t.getMessage(); Status status = new Status(IStatus.ERROR, RDSPlugin.PLUGIN_ID, errorMessage, t); StatusManager.getManager().handle(status, StatusManager.SHOW | StatusManager.LOG); } return false; } @Override public void init(IWorkbench workbench, IStructuredSelection selection) {} }
7,088
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/ImportDBInstanceDataModel.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.io.File; import org.eclipse.datatools.connectivity.drivers.DriverInstance; import com.amazonaws.services.rds.model.DBInstance; public class ImportDBInstanceDataModel { private String dbPassword; private DBInstance dbInstance; private File jdbcDriver; private String cidrIpRange; private boolean configurePermissions; private boolean useExistingDriverDefinition; private DriverInstance driverDefinition; public static final String DB_PASSWORD = "dbPassword"; public static final String DB_INSTANCE = "dbInstance"; public static final String CIDR_IP_RANGE = "cidrIpRange"; public static final String CONFIGURE_PERMISSIONS = "configurePermissions"; public static final String USE_EXISTING_DRIVER_DEFINITION = "useExistingDriverDefinition"; public static final String DRIVER_DEFINITION = "driverDefinition"; public String getDbPassword() { return dbPassword; } public void setDbPassword(String dbPassword) { this.dbPassword = dbPassword; } public DBInstance getDbInstance() { return dbInstance; } public void setDbInstance(DBInstance dbInstance) { this.dbInstance = dbInstance; } public File getJdbcDriver() { return jdbcDriver; } public void setJdbcDriver(File jdbcDriver) { this.jdbcDriver = jdbcDriver; } public String getCidrIpRange() { return cidrIpRange; } public void setCidrIpRange(String cidrIpRange) { this.cidrIpRange = cidrIpRange; } public boolean getConfigurePermissions() { return configurePermissions; } public void setConfigurePermissions(boolean configurePermissions) { this.configurePermissions = configurePermissions; } public boolean isUseExistingDriverDefinition() { return useExistingDriverDefinition; } public void setUseExistingDriverDefinition(boolean useExistingDriverDefinition) { this.useExistingDriverDefinition = useExistingDriverDefinition; } public DriverInstance getDriverDefinition() { return driverDefinition; } public void setDriverDefinition(DriverInstance driverDefinition) { this.driverDefinition = driverDefinition; } }
7,089
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/RDSPlugin.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.io.File; import java.io.IOException; import java.net.URL; import java.util.Properties; import org.apache.commons.io.FileUtils; import org.eclipse.core.runtime.FileLocator; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Path; import org.eclipse.core.runtime.Platform; import org.eclipse.core.runtime.Status; import org.eclipse.datatools.connectivity.IConnectionProfile; import org.eclipse.datatools.connectivity.drivers.DriverInstance; import org.eclipse.datatools.connectivity.drivers.DriverManager; import org.eclipse.datatools.connectivity.drivers.IDriverMgmtConstants; import org.eclipse.datatools.connectivity.drivers.IPropertySet; import org.eclipse.datatools.connectivity.drivers.PropertySetImpl; import org.eclipse.datatools.connectivity.drivers.jdbc.IJDBCConnectionProfileConstants; import org.eclipse.datatools.connectivity.ui.dse.views.DataSourceExplorerView; import org.eclipse.jface.viewers.StructuredSelection; import org.eclipse.swt.widgets.Display; import org.eclipse.ui.IViewPart; import org.eclipse.ui.PlatformUI; import org.eclipse.ui.plugin.AbstractUIPlugin; import org.eclipse.ui.statushandlers.StatusManager; import org.osgi.framework.Bundle; import org.osgi.framework.BundleContext; import com.amazonaws.eclipse.core.AwsToolkitCore; import com.amazonaws.eclipse.rds.connectionfactories.MySqlConnectionFactory; /** * The activator class controls the plug-in life cycle */ public class RDSPlugin extends AbstractUIPlugin { // The plug-in ID public static final String PLUGIN_ID = "com.amazonaws.eclipse.rds"; // The shared instance private static RDSPlugin plugin; private static final String MYSQL_DRIVER_FILE_NAME = "mysql-connector-java-5.1.33-bin.jar"; /* * (non-Javadoc) * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext) */ @Override public void start(BundleContext context) throws Exception { super.start(context); plugin = this; createMySqlDriverDefinition(); } /* * (non-Javadoc) * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext) */ @Override public void stop(BundleContext context) throws Exception { plugin = null; super.stop(context); } /** * We ship a version of the MySQL JDBC driver with the AWS Toolkit for * Eclipse, so when this plugin starts up, we make sure we have a DTP Driver * Definition created for MySQL. */ private void createMySqlDriverDefinition() { MySqlConnectionFactory connectionFactory = new MySqlConnectionFactory(null); String targetId = "DriverDefn." + connectionFactory.getDriverTemplate() + "." + connectionFactory.createDriverName(); DriverInstance existingDriver = DriverManager.getInstance().getDriverInstanceByID(targetId); if (existingDriver != null) { if (existingDriver.getJarList().contains(MYSQL_DRIVER_FILE_NAME)) { return; } else { AwsToolkitCore.getDefault().logInfo( "Removing RDS MySQL Driver instance configured with the legacy jdbc connector..."); DriverManager.getInstance().removeDriverInstance(targetId); } } Properties driverProperties = new Properties(); driverProperties.setProperty(IJDBCConnectionProfileConstants.DRIVER_CLASS_PROP_ID, connectionFactory.getDriverClass()); driverProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VENDOR_PROP_ID, connectionFactory.getDatabaseVendor()); driverProperties.setProperty(IJDBCConnectionProfileConstants.DATABASE_VERSION_PROP_ID, connectionFactory.getDatabaseVersion()); driverProperties.setProperty(IJDBCConnectionProfileConstants.SAVE_PASSWORD_PROP_ID, String.valueOf(true)); driverProperties.setProperty(IDriverMgmtConstants.PROP_DEFN_TYPE, connectionFactory.getDriverTemplate()); String jarList = installMySqlDriverInWorkspace().getAbsolutePath(); driverProperties.setProperty(IDriverMgmtConstants.PROP_DEFN_JARLIST, jarList ); IPropertySet propertySet = new PropertySetImpl(connectionFactory.createDriverName(), targetId); propertySet.setBaseProperties(driverProperties); DriverInstance driver = new DriverInstance(propertySet); DriverManager.getInstance().addDriverInstance(driver); } /** * Unlike the other supported DB engines, the MySQL JDBC driver ships with * the AWS Toolkit for Eclipse. * * We copy the library out of the plugin directory because as new plugin * versions are installed, this location could become invalid once new * plugin versions replace this version and have a different path on disk. * * @return The file where the MySQL driver library was installed in the * workspace. */ private File installMySqlDriverInWorkspace() { Bundle bundle = Platform.getBundle(RDSPlugin.PLUGIN_ID); Path path = new Path("lib/" + MYSQL_DRIVER_FILE_NAME); URL fileURL = FileLocator.find(bundle, path, null); try { IPath stateLocation = Platform.getStateLocation(Platform.getBundle(RDSPlugin.PLUGIN_ID)); File mysqlDriversDir = new File(stateLocation.toFile(), "mysqlDrivers"); String jarPath = FileLocator.resolve(fileURL).getPath(); File sourceFile = new File(jarPath); File destinationFile = new File(mysqlDriversDir, MYSQL_DRIVER_FILE_NAME); FileUtils.copyFile(sourceFile, destinationFile); return destinationFile; } catch (IOException e) { throw new RuntimeException("Unable to locate MySQL driver on disk.", e); } } /** * Connects the specified connection profile and selects and reveals it in * the Data Source Explorer view. * * @param profile * The connection profile to connect and reveal. */ public static void connectAndReveal(final IConnectionProfile profile) { IStatus connectStatus = profile.connect(); if (connectStatus.isOK() == false) { Status status = new Status(IStatus.ERROR, RDSPlugin.PLUGIN_ID, "Unable to connect to the database. Make sure your password is correct and make sure you can access your database through your network and any firewalls you may be connecting through."); StatusManager.getManager().handle(status, StatusManager.BLOCK | StatusManager.LOG); return; } Display.getDefault().syncExec(new Runnable() { @Override public void run() { try { IViewPart view = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage().showView("org.eclipse.datatools.connectivity.DataSourceExplorerNavigator"); if (view instanceof DataSourceExplorerView) { DataSourceExplorerView dse = (DataSourceExplorerView)view; StructuredSelection selection = new StructuredSelection(profile); dse.getCommonViewer().setSelection(selection, true); } } catch (Exception e) { Status status = new Status(IStatus.ERROR, RDSPlugin.PLUGIN_ID, "Unable to reveal connection profile: " + e.getMessage(), e); StatusManager.getManager().handle(status, StatusManager.LOG); } } }); } /** * Returns the shared instance * * @return the shared instance */ public static RDSPlugin getDefault() { return plugin; } }
7,090
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/ConfigureImportOptionsPage.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds; import java.io.File; import java.io.IOException; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.eclipse.core.databinding.DataBindingContext; import org.eclipse.core.databinding.UpdateValueStrategy; import org.eclipse.core.databinding.beans.PojoObservables; import org.eclipse.core.databinding.observable.value.IObservableValue; import org.eclipse.core.databinding.observable.value.IValueChangeListener; import org.eclipse.core.databinding.observable.value.ValueChangeEvent; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import org.eclipse.core.runtime.jobs.Job; import org.eclipse.datatools.connectivity.drivers.DriverInstance; import org.eclipse.datatools.connectivity.drivers.DriverManager; import org.eclipse.jface.databinding.swt.ISWTObservableValue; import org.eclipse.jface.databinding.swt.SWTObservables; import org.eclipse.jface.wizard.WizardPage; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.events.SelectionListener; import org.eclipse.swt.layout.FillLayout; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Combo; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.FileDialog; import org.eclipse.swt.widgets.Group; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Text; import com.amazonaws.eclipse.rds.connectionfactories.DatabaseConnectionFactory; import com.amazonaws.eclipse.rds.util.CheckIpUtil; import com.amazonaws.services.rds.model.DBInstance; public class ConfigureImportOptionsPage extends WizardPage { /** Pattern for matching a CIDR IP range, ex: 72.130.0.0/32 */ private static Pattern CIDR_PATTERN = Pattern.compile("^\\d+\\.\\d+\\.\\d+\\.\\d+/\\d+$"); private final DataBindingContext bindingContext = new DataBindingContext(); private final ImportDBInstanceDataModel wizardDataModel; private Text dbPasswordText; private Text cidrIpRangeText; private Button configurePermissionsRadio; private Text driverText; private ISWTObservableValue driverObservable; private Button useExistingDriverRadio; private Combo existingDriverCombo; private Button selectDriverButton; private Button createNewDriverRadio; protected ConfigureImportOptionsPage(ImportDBInstanceDataModel wizardDataModel) { super("configureRdsDbWizardPage3"); this.wizardDataModel = wizardDataModel; } private void createPermissionsSection(Composite parent) { Composite composite = new Composite(parent, SWT.NONE); composite.setLayout(new FillLayout()); composite.setLayoutData(new GridData(SWT.FILL, SWT.TOP, true, false)); Group group = new Group(composite, SWT.NONE); group.setText("Security Group Permissions:"); group .setLayout(new GridLayout(2, false)); configurePermissionsRadio = new Button(group, SWT.CHECK); configurePermissionsRadio.setText("Configure security group permissions automatically"); configurePermissionsRadio.setSelection(true); configurePermissionsRadio.setLayoutData(new GridData(SWT.LEFT, SWT.TOP, true, false, 2, 1)); Label label = new Label(group, SWT.WRAP); label.setText("Security group permissions must be configured to allow incoming connections in order to connect to your RDS DB instance. " + "If you choose not to open the security group permissions automatically, you MUST configure the security group permissions manually before you can connect to your DB instance."); GridData gridData = new GridData(SWT.LEFT, SWT.TOP, true, false, 2, 1); gridData.widthHint = 500; gridData.horizontalIndent = 20; label.setLayoutData(gridData); Label cidrIpRangeLabel = new Label(group, SWT.NONE); cidrIpRangeLabel.setText("CIDR IP Range:"); gridData = new GridData(SWT.LEFT, SWT.TOP, false, false); gridData.horizontalIndent = 20; cidrIpRangeLabel.setLayoutData(gridData); cidrIpRangeText = new Text(group, SWT.BORDER); GridData gridData2 = new GridData(SWT.LEFT, SWT.TOP, true, false); gridData2.widthHint = 150; cidrIpRangeText.setLayoutData(gridData2); new CheckOutgoingIpRange().schedule(); } private void createBasicOptions(Composite parent) { Group group = new Group(parent, SWT.NONE); group.setText("Connection:"); group.setLayout(new FillLayout()); group.setLayoutData(new GridData(SWT.FILL, SWT.TOP, true, false)); Composite composite = new Composite(group, SWT.NONE); composite.setLayout(new GridLayout(2, false)); DBInstance dbInstance = wizardDataModel.getDbInstance(); new Label(composite, SWT.NONE).setText("DB Instance:"); new Label(composite, SWT.NONE).setText(dbInstance.getDBInstanceIdentifier()); new Label(composite, SWT.NONE).setText("Endpoint:"); new Label(composite, SWT.NONE).setText(dbInstance.getEndpoint().getAddress() + ":" + dbInstance.getEndpoint().getPort()); new Label(composite, SWT.NONE).setText("Engine:"); new Label(composite, SWT.NONE).setText(dbInstance.getEngine() + " (" + dbInstance.getEngineVersion() + ")"); new Label(composite, SWT.NONE).setText("User:"); new Label(composite, SWT.NONE).setText(dbInstance.getMasterUsername()); new Label(composite, SWT.NONE).setText("Password:"); dbPasswordText = new Text(composite, SWT.PASSWORD | SWT.BORDER); dbPasswordText.setLayoutData(new GridData(150, SWT.DEFAULT)); dbPasswordText.setFocus(); } private final class CheckOutgoingIpRange extends Job { private String cidr = "0.0.0.0/0"; private CheckOutgoingIpRange() { super("Checking outgoing IP range"); } @Override protected IStatus run(IProgressMonitor arg0) { try { cidr = CheckIpUtil.checkIp() + "/32"; return Status.OK_STATUS; } catch (IOException ioe) { return new Status(IStatus.WARNING, RDSPlugin.PLUGIN_ID, "Unable to determine outgoing IP address", ioe); } finally { Display.getDefault().asyncExec(new Runnable() { @Override public void run() { cidrIpRangeText.setText(cidr); } }); } } } public void createJdbcDriverSection(Composite parent) { Group group = new Group(parent, SWT.NONE); group.setText("JDBC Driver:"); group.setLayout(new FillLayout()); group.setLayoutData(new GridData(SWT.FILL, SWT.TOP, true, false)); Composite composite = new Composite(group, SWT.NONE); composite.setLayout(new GridLayout(3, false)); useExistingDriverRadio = new Button(composite, SWT.RADIO); useExistingDriverRadio.setText("Use existing driver definition:"); useExistingDriverRadio.setLayoutData(new GridData(SWT.LEFT, SWT.TOP, true, false, 3, 1)); existingDriverCombo = new Combo(composite, SWT.READ_ONLY); GridData gridData = new GridData(SWT.LEFT, SWT.TOP, true, false, 3, 1); gridData.horizontalIndent = 20; gridData.widthHint = 200; existingDriverCombo.setLayoutData(gridData); DatabaseConnectionFactory connectionFactory = DatabaseConnectionFactory.createConnectionFactory(wizardDataModel); for (DriverInstance driverInstance : DriverManager.getInstance().getDriverInstancesByTemplate(connectionFactory.getDriverTemplate())) { existingDriverCombo.add(driverInstance.getName()); existingDriverCombo.setData(driverInstance.getName(), driverInstance); } createNewDriverRadio = new Button(composite, SWT.RADIO); createNewDriverRadio.setText("Create new driver definition:"); createNewDriverRadio.setLayoutData(new GridData(SWT.LEFT, SWT.TOP, true, false, 3, 1)); Label driverJarLabel = new Label(composite, SWT.NONE); driverJarLabel.setText("Driver Jar:"); GridData driverJarGridData = new GridData(SWT.LEFT, SWT.CENTER, false, false); driverJarGridData.horizontalIndent = 20; driverJarLabel.setLayoutData(driverJarGridData); driverText = new Text(composite, SWT.BORDER | SWT.READ_ONLY); driverText.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false)); driverObservable = SWTObservables.observeText(driverText, SWT.Modify); selectDriverButton = new Button(composite, SWT.PUSH); selectDriverButton.setText("Select Jar"); selectDriverButton.addSelectionListener(new SelectionListener() { @Override public void widgetSelected(SelectionEvent e) { FileDialog fileDialog = new FileDialog(Display.getCurrent().getActiveShell(), SWT.MULTI); fileDialog.setFilterExtensions(new String[] {"jar"}); if (fileDialog.open() == null) return; File file = new File(fileDialog.getFileName()); if (fileDialog.getFilterPath() != null) { file = new File(fileDialog.getFilterPath(), fileDialog.getFileName()); } driverText.setText(file.getAbsolutePath()); wizardDataModel.setJdbcDriver(file); } @Override public void widgetDefaultSelected(SelectionEvent e) {} }); if (existingDriverCombo.getItemCount() > 0) { useExistingDriverRadio.setSelection(true); existingDriverCombo.select(0); } else { createNewDriverRadio.setSelection(true); } } @Override public void createControl(Composite parent) { this.setDescription("Specify options for connecting to your Amazon RDS database."); this.setTitle("Configure RDS Database Connection"); Composite composite = new Composite(parent, SWT.NONE); composite.setLayout(new GridLayout(1, false)); setControl(composite); createBasicOptions(composite); createJdbcDriverSection(composite); createPermissionsSection(composite); setPageComplete(false); bindControls(); } @SuppressWarnings("static-access") private void bindControls() { ISWTObservableValue dbPasswordTextObservable = SWTObservables.observeText(dbPasswordText, SWT.Modify); ISWTObservableValue cidrIpTextObservable = SWTObservables.observeText(cidrIpRangeText, SWT.Modify); ISWTObservableValue configurePermissionsRadioObservable = SWTObservables.observeSelection(configurePermissionsRadio); ISWTObservableValue useExistingDriverRadioObservable = SWTObservables.observeSelection(useExistingDriverRadio); ISWTObservableValue createNewDriverObservable = SWTObservables.observeSelection(createNewDriverRadio); ISWTObservableValue existingDriverComboObservable = SWTObservables.observeSelection(existingDriverCombo); IObservableValue existingDriverComboDataObservable = new ControlDataObservableValue(existingDriverComboObservable, true); bindingContext.bindValue(dbPasswordTextObservable, PojoObservables.observeValue(wizardDataModel, wizardDataModel.DB_PASSWORD), null, new UpdateValueStrategy(UpdateValueStrategy.POLICY_NEVER)); bindingContext.bindValue(cidrIpTextObservable, PojoObservables.observeValue(wizardDataModel, wizardDataModel.CIDR_IP_RANGE), null, new UpdateValueStrategy(UpdateValueStrategy.POLICY_NEVER)); bindingContext.bindValue(useExistingDriverRadioObservable, PojoObservables.observeValue(wizardDataModel, wizardDataModel.USE_EXISTING_DRIVER_DEFINITION), null, new UpdateValueStrategy(UpdateValueStrategy.POLICY_NEVER)); bindingContext.bindValue(configurePermissionsRadioObservable, PojoObservables.observeValue(wizardDataModel, wizardDataModel.CONFIGURE_PERMISSIONS), null, new UpdateValueStrategy(UpdateValueStrategy.POLICY_NEVER)); bindingContext.bindValue(existingDriverComboDataObservable, PojoObservables.observeValue(wizardDataModel, wizardDataModel.DRIVER_DEFINITION), null, new UpdateValueStrategy(UpdateValueStrategy.POLICY_NEVER)); IValueChangeListener valueChangeListener = new IValueChangeListener() { @Override public void handleValueChange(ValueChangeEvent event) { validateUserInput(); } }; dbPasswordTextObservable.addValueChangeListener(valueChangeListener); cidrIpTextObservable.addValueChangeListener(valueChangeListener); configurePermissionsRadioObservable.addValueChangeListener(valueChangeListener); useExistingDriverRadioObservable.addValueChangeListener(valueChangeListener); createNewDriverObservable.addValueChangeListener(valueChangeListener); driverObservable.addValueChangeListener(valueChangeListener); bindingContext.updateModels(); } private void validateUserInput() { boolean complete = dbPasswordText.getText().length() > 0; if (configurePermissionsRadio.getSelection()) { cidrIpRangeText.setEnabled(true); Matcher matcher = CIDR_PATTERN.matcher(cidrIpRangeText.getText()); complete &= matcher.matches(); } else { cidrIpRangeText.setEnabled(false); } if (useExistingDriverRadio.getSelection()) { driverText.setEnabled(false); existingDriverCombo.setEnabled(true); selectDriverButton.setEnabled(false); if (existingDriverCombo.getSelectionIndex() < 0) { complete = false; } } else { driverText.setEnabled(true); existingDriverCombo.setEnabled(false); selectDriverButton.setEnabled(true); if (driverText == null || driverText.getText() == null || driverText.getText().length() == 0) { complete = false; } } setPageComplete(complete); } }
7,091
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/util/CheckIpUtil.java
/* * Copyright 2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.util; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.net.InetSocketAddress; import java.net.Proxy; import java.net.URI; import java.net.URL; import java.net.URLConnection; import org.eclipse.core.net.proxy.IProxyData; import org.eclipse.core.net.proxy.IProxyService; import com.amazonaws.eclipse.core.AwsToolkitCore; /** * Utility to find a client's internet routable outgoing IP address using * checkip.amazonaws.com */ public final class CheckIpUtil { private static final int CONNECTION_TIMEOUT_MILLIS = 4000; private static final String CHECKIP_URL = "http://checkip.amazonaws.com/"; public static final String checkIp() throws IOException { URLConnection connection = null; try { connection = openConnection(); BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream())); return reader.readLine(); } finally { try { if (connection != null) connection.getInputStream().close(); } catch (IOException e) {} } } private static URLConnection openConnection() throws IOException { URL url = new URL(CHECKIP_URL); URLConnection connection = url.openConnection(createProxy()); connection.setConnectTimeout(CONNECTION_TIMEOUT_MILLIS); connection.setReadTimeout(CONNECTION_TIMEOUT_MILLIS); return connection; } /** * Creates a Proxy to use when opening a URLConnection, otherwise, it * returns <code>Proxy.NO_PROXY</code>. * * @return A proxy configured with the settings the user has entered in * Eclipse; otherwise, returns <code>Proxy.NO_PROXY</code>. */ private static Proxy createProxy() { IProxyService proxyService = AwsToolkitCore.getDefault().getProxyService(); if ( proxyService.isProxiesEnabled() ) { IProxyData[] proxyData = proxyService.select(URI.create(CHECKIP_URL)); if ( proxyData.length > 0 ) { // NOTE: For proxy authentication support in this class, we should switch // to HttpClient since java.net.Proxy doesn't allow you to configure // per-instance auth settings, and instead, we'd have to use // java.net.Authenticator#setDefault to set JVM-wide auth settings. InetSocketAddress proxyAddress = new InetSocketAddress(proxyData[0].getHost(), proxyData[0].getPort()); return new Proxy(Proxy.Type.HTTP, proxyAddress); } } return Proxy.NO_PROXY; } }
7,092
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/AuroraConnectionFactory.java
/* * Copyright 2015 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Configuration details for Aurora connections. */ public class AuroraConnectionFactory extends DatabaseConnectionFactory { private final ImportDBInstanceDataModel wizardDataModel; public AuroraConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; } @Override public String getDriverClass() { return "com.mysql.jdbc.Driver"; } @Override public String getDatabaseVendor() { return "MySql"; } @Override public String getDatabaseVersion() { return "5.6"; } @Override public String createJdbcUrl() { // Example MySQL JDBC URL: jdbc:mysql://server:1521/db String host = wizardDataModel.getDbInstance().getEndpoint().getAddress(); Integer port = wizardDataModel.getDbInstance().getEndpoint().getPort(); String dbName = wizardDataModel.getDbInstance().getDBName(); return "jdbc:mysql://" + host + ":" + port + "/" + dbName; } @Override public String createDriverName() { return "RDS MySQL Driver"; } @Override public String getDriverTemplate() { return "org.eclipse.datatools.enablement.mysql.5_1.driverTemplate"; } @Override public String getConnectionProfileProviderId() { return "org.eclipse.datatools.enablement.mysql.connectionProfile"; } }
7,093
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/SqlServerConnectionFactory.java
/* * Copyright 2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Configuration details for Microsoft SQL Server connections. */ public class SqlServerConnectionFactory extends DatabaseConnectionFactory { private final ImportDBInstanceDataModel wizardDataModel; public SqlServerConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; } @Override public String getDriverClass() { return "com.microsoft.sqlserver.jdbc.SQLServerDriver"; } @Override public String getDatabaseVendor() { return "SQL Server"; } @Override public String getDatabaseVersion() { return "2012"; } @Override public String createJdbcUrl() { // Example SQL Server JDBC URL: jdbc:sqlserver://server:1521 // // NOTE: For SQL Server, we always use the default database, so // we don't specify a database name in the JDBC connection string String host = wizardDataModel.getDbInstance().getEndpoint().getAddress(); Integer port = wizardDataModel.getDbInstance().getEndpoint().getPort(); return "jdbc:sqlserver://" + host + ":" + port; } @Override public String createDriverName() { return "RDS SQL Server Driver"; } @Override public String getDriverTemplate() { return "org.eclipse.datatools.enablement.msft.sqlserver.2008.driverTemplate"; } @Override public String getConnectionProfileProviderId() { return "org.eclipse.datatools.enablement.msft.sqlserver.connectionProfile"; } }
7,094
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/MySqlConnectionFactory.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Configuration details for MySQL connections. */ public class MySqlConnectionFactory extends DatabaseConnectionFactory { private final ImportDBInstanceDataModel wizardDataModel; public MySqlConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; } @Override public String getDriverClass() { return "com.mysql.jdbc.Driver"; } @Override public String getDatabaseVendor() { return "MySql"; } @Override public String getDatabaseVersion() { return "5.1"; } @Override public String createJdbcUrl() { // Example MySQL JDBC URL: jdbc:mysql://server:1521/db String host = wizardDataModel.getDbInstance().getEndpoint().getAddress(); Integer port = wizardDataModel.getDbInstance().getEndpoint().getPort(); String dbName = wizardDataModel.getDbInstance().getDBName(); return "jdbc:mysql://" + host + ":" + port + "/" + dbName; } @Override public String createDriverName() { return "RDS MySQL Driver"; } @Override public String getDriverTemplate() { return "org.eclipse.datatools.enablement.mysql.5_1.driverTemplate"; } @Override public String getConnectionProfileProviderId() { return "org.eclipse.datatools.enablement.mysql.connectionProfile"; } }
7,095
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/PostgreSqlConnectionFactory.java
/* * Copyright 2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Configuration details for PostgreSQL connections. */ public class PostgreSqlConnectionFactory extends DatabaseConnectionFactory { private final ImportDBInstanceDataModel wizardDataModel; public PostgreSqlConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; } @Override public String getDriverClass() { return "org.postgresql.Driver"; } @Override public String getDatabaseVendor() { return "PostgreSQL"; } @Override public String getDatabaseVersion() { return "9.x"; } @Override public String createJdbcUrl() { // Example PostgreSQL JDBC URL: jdbc:postgresql://server:1521/db String host = wizardDataModel.getDbInstance().getEndpoint().getAddress(); Integer port = wizardDataModel.getDbInstance().getEndpoint().getPort(); String dbName = wizardDataModel.getDbInstance().getDBName(); return "jdbc:postgresql://" + host + ":" + port + "/" + dbName; } @Override public String createDriverName() { return "RDS PostgreSQL Driver"; } @Override public String getDriverTemplate() { return "org.eclipse.datatools.enablement.postgresql.postgresqlDriverTemplate"; } @Override public String getConnectionProfileProviderId() { return "org.eclipse.datatools.enablement.postgresql.connectionProfile"; } }
7,096
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/DatabaseConnectionFactory.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import java.util.Properties; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Implementations of this class describe the configuration values for * configuring a database driver and connecting to a specific RDS database * engine (ex: Oracle, MySQL, PostgreSQL, etc). */ public abstract class DatabaseConnectionFactory { /** * Returns the complete JDBC connection string for connecting to a specific * database engine. */ public abstract String createJdbcUrl(); /** * Returns the driver name to use for a connection to a specific database * engine. */ public abstract String createDriverName(); /** * Returns the Eclipse DTP driver template ID used to create new drivers for * a specific database engine. */ public abstract String getDriverTemplate(); /** * Returns the Eclipse DTP connection profile provider ID used to create new * connection profiles for a specific database engine. */ public abstract String getConnectionProfileProviderId(); /** * Returns the name of the class from the JDBC driver library that * implements the JDBC driver for a specific database engine. */ public abstract String getDriverClass(); /** * Returns the database vendor name for a specific database engine. */ public abstract String getDatabaseVendor(); /** * Returns the database version for a specific database engine. */ public abstract String getDatabaseVersion(); /** * This method can be optionally implemented to supply any custom DB driver properties. */ public Properties getAdditionalDriverProperties() { return null; } /** * Creates a specific database connection factory based on the details in * the provided <code>wizardDataModel</code>, which describes what database * the user is connecting to. * * @param wizardDataModel * Details of the database connection that's being established. * * @return A specific implementation of DatabaseConnectionFactory that has * the database specific logic and configuration to connect to the * specified database. */ public static DatabaseConnectionFactory createConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { final String dbEngine = wizardDataModel.getDbInstance().getEngine(); if (dbEngine.startsWith("oracle")) { return new OracleConnectionFactory(wizardDataModel); } else if (dbEngine.startsWith("mysql")) { return new MySqlConnectionFactory(wizardDataModel); } else if (dbEngine.startsWith("postgres")) { return new PostgreSqlConnectionFactory(wizardDataModel); } else if (dbEngine.startsWith("sqlserver")) { return new SqlServerConnectionFactory(wizardDataModel); } else if (dbEngine.startsWith("aurora")) { return new AuroraConnectionFactory(wizardDataModel); } throw new RuntimeException("Unsupported database engine: " + dbEngine); } }
7,097
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.rds/src/com/amazonaws/eclipse/rds/connectionfactories/OracleConnectionFactory.java
/* * Copyright 2011-2014 Amazon Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://aws.amazon.com/apache2.0 * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES * OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and * limitations under the License. */ package com.amazonaws.eclipse.rds.connectionfactories; import java.util.Properties; import com.amazonaws.eclipse.rds.ImportDBInstanceDataModel; /** * Configuration details for Oracle connections. */ public class OracleConnectionFactory extends DatabaseConnectionFactory { private final ImportDBInstanceDataModel wizardDataModel; public OracleConnectionFactory(ImportDBInstanceDataModel wizardDataModel) { this.wizardDataModel = wizardDataModel; } @Override public String getDriverClass() { return "oracle.jdbc.OracleDriver"; } @Override public String getDatabaseVendor() { return "Oracle"; } @Override public String getDatabaseVersion() { return "11"; } @Override public Properties getAdditionalDriverProperties() { Properties driverProperties = new Properties(); driverProperties.setProperty("org.eclipse.datatools.enablement.oracle.catalogType", "USER"); return driverProperties; } @Override public String createJdbcUrl() { // Example Oracle JDBC URL: jdbc:oracle:thin:@server:1521:db String host = wizardDataModel.getDbInstance().getEndpoint().getAddress(); Integer port = wizardDataModel.getDbInstance().getEndpoint().getPort(); String dbName = wizardDataModel.getDbInstance().getDBName(); return "jdbc:oracle:thin:@" + host + ":" + port + ":" + dbName; } @Override public String createDriverName() { return "RDS Oracle Driver"; } @Override public String getDriverTemplate() { return "org.eclipse.datatools.enablement.oracle.11.driverTemplate"; } @Override public String getConnectionProfileProviderId() { return "org.eclipse.datatools.enablement.oracle.connectionProfile"; } }
7,098
0
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.codedeploy/src/com/amazonaws/eclipse
Create_ds/aws-toolkit-eclipse/bundles/com.amazonaws.eclipse.codedeploy/src/com/amazonaws/eclipse/codedeploy/UrlConstants.java
package com.amazonaws.eclipse.codedeploy; import com.amazonaws.eclipse.core.ui.WebLinkListener; public class UrlConstants { public static final String CODE_DEPLOY_CONSOLE_URL_FORMAT = "http://console.aws.amazon.com/codedeploy?region=%s"; public static final WebLinkListener webLinkListener = new WebLinkListener(); }
7,099