index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.mysql.dao; import java.sql.Connection; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import javax.sql.DataSource; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.mysql.util.Query; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.util.concurrent.Uninterruptibles; public class MySQLQueueDAO extends MySQLBaseDAO implements QueueDAO { private static final Long UNACK_SCHEDULE_MS = 60_000L; public MySQLQueueDAO( RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { super(retryTemplate, objectMapper, dataSource); Executors.newSingleThreadScheduledExecutor() .scheduleAtFixedRate( this::processAllUnacks, UNACK_SCHEDULE_MS, UNACK_SCHEDULE_MS, TimeUnit.MILLISECONDS); logger.debug(MySQLQueueDAO.class.getName() + " is ready to serve"); } @Override public void push(String queueName, String messageId, long offsetTimeInSecond) { push(queueName, messageId, 0, offsetTimeInSecond); } @Override public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) { withTransaction( tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond)); } @Override public void push(String queueName, List<Message> messages) { withTransaction( tx -> messages.forEach( message -> pushMessage( tx, queueName, message.getId(), message.getPayload(), message.getPriority(), 0))); } @Override public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) { return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond); } @Override public boolean pushIfNotExists( String queueName, String messageId, int priority, long offsetTimeInSecond) { return getWithRetriedTransactions( tx -> { if (!existsMessage(tx, queueName, messageId)) { pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond); return true; } return false; }); } @Override public List<String> pop(String queueName, int count, int timeout) { List<Message> messages = getWithTransactionWithOutErrorPropagation( tx -> popMessages(tx, queueName, count, timeout)); if (messages == null) { return new ArrayList<>(); } return messages.stream().map(Message::getId).collect(Collectors.toList()); } @Override public List<Message> pollMessages(String queueName, int count, int timeout) { List<Message> messages = getWithTransactionWithOutErrorPropagation( tx -> popMessages(tx, queueName, count, timeout)); if (messages == null) { return new ArrayList<>(); } return messages; } @Override public void remove(String queueName, String messageId) { withTransaction(tx -> removeMessage(tx, queueName, messageId)); } @Override public int getSize(String queueName) { final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?"; return queryWithTransaction( GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue()); } @Override public boolean ack(String queueName, String messageId) { return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId)); } @Override public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) { long updatedOffsetTimeInSecond = unackTimeout / 1000; final String UPDATE_UNACK_TIMEOUT = "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND, ?, CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?"; return queryWithTransaction( UPDATE_UNACK_TIMEOUT, q -> q.addParameter(updatedOffsetTimeInSecond) .addParameter(updatedOffsetTimeInSecond) .addParameter(queueName) .addParameter(messageId) .executeUpdate()) == 1; } @Override public void flush(String queueName) { final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?"; executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete()); } @Override public Map<String, Long> queuesDetail() { final String GET_QUEUES_DETAIL = "SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q"; return queryWithTransaction( GET_QUEUES_DETAIL, q -> q.executeAndFetch( rs -> { Map<String, Long> detail = Maps.newHashMap(); while (rs.next()) { String queueName = rs.getString("queue_name"); Long size = rs.getLong("size"); detail.put(queueName, size); } return detail; })); } @Override public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() { // @formatter:off final String GET_QUEUES_DETAIL_VERBOSE = "SELECT queue_name, \n" + " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n" + " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n" + "FROM queue q"; // @formatter:on return queryWithTransaction( GET_QUEUES_DETAIL_VERBOSE, q -> q.executeAndFetch( rs -> { Map<String, Map<String, Map<String, Long>>> result = Maps.newHashMap(); while (rs.next()) { String queueName = rs.getString("queue_name"); Long size = rs.getLong("size"); Long queueUnacked = rs.getLong("uacked"); result.put( queueName, ImmutableMap.of( "a", ImmutableMap .of( // sharding not implemented, // returning only // one shard with all the // info "size", size, "uacked", queueUnacked))); } return result; })); } /** * Un-pop all un-acknowledged messages for all queues. * * @since 1.11.6 */ public void processAllUnacks() { logger.trace("processAllUnacks started"); final String PROCESS_ALL_UNACKS = "UPDATE queue_message SET popped = false WHERE popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on"; executeWithTransaction(PROCESS_ALL_UNACKS, Query::executeUpdate); } @Override public void processUnacks(String queueName) { final String PROCESS_UNACKS = "UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on"; executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate()); } @Override public boolean resetOffsetTime(String queueName, String messageId) { long offsetTimeInSecond = 0; // Reset to 0 final String SET_OFFSET_TIME = "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) \n" + "WHERE queue_name = ? AND message_id = ?"; return queryWithTransaction( SET_OFFSET_TIME, q -> q.addParameter(offsetTimeInSecond) .addParameter(offsetTimeInSecond) .addParameter(queueName) .addParameter(messageId) .executeUpdate() == 1); } private boolean existsMessage(Connection connection, String queueName, String messageId) { final String EXISTS_MESSAGE = "SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?)"; return query( connection, EXISTS_MESSAGE, q -> q.addParameter(queueName).addParameter(messageId).exists()); } private void pushMessage( Connection connection, String queueName, String messageId, String payload, Integer priority, long offsetTimeInSecond) { createQueueIfNotExists(connection, queueName); String UPDATE_MESSAGE = "UPDATE queue_message SET payload=?, deliver_on=TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?"; int rowsUpdated = query( connection, UPDATE_MESSAGE, q -> q.addParameter(payload) .addParameter(offsetTimeInSecond) .addParameter(queueName) .addParameter(messageId) .executeUpdate()); if (rowsUpdated == 0) { String PUSH_MESSAGE = "INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES (TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP), ?, ?,?,?,?) ON DUPLICATE KEY UPDATE payload=VALUES(payload), deliver_on=VALUES(deliver_on)"; execute( connection, PUSH_MESSAGE, q -> q.addParameter(offsetTimeInSecond) .addParameter(queueName) .addParameter(messageId) .addParameter(priority) .addParameter(offsetTimeInSecond) .addParameter(payload) .executeUpdate()); } } private boolean removeMessage(Connection connection, String queueName, String messageId) { final String REMOVE_MESSAGE = "DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?"; return query( connection, REMOVE_MESSAGE, q -> q.addParameter(queueName).addParameter(messageId).executeDelete()); } private List<Message> peekMessages(Connection connection, String queueName, int count) { if (count < 1) { return Collections.emptyList(); } final String PEEK_MESSAGES = "SELECT message_id, priority, payload FROM queue_message use index(combo_queue_message) WHERE queue_name = ? AND popped = false AND deliver_on <= TIMESTAMPADD(MICROSECOND, 1000, CURRENT_TIMESTAMP) ORDER BY priority DESC, deliver_on, created_on LIMIT ?"; return query( connection, PEEK_MESSAGES, p -> p.addParameter(queueName) .addParameter(count) .executeAndFetch( rs -> { List<Message> results = new ArrayList<>(); while (rs.next()) { Message m = new Message(); m.setId(rs.getString("message_id")); m.setPriority(rs.getInt("priority")); m.setPayload(rs.getString("payload")); results.add(m); } return results; })); } private List<Message> popMessages( Connection connection, String queueName, int count, int timeout) { long start = System.currentTimeMillis(); List<Message> messages = peekMessages(connection, queueName, count); while (messages.size() < count && ((System.currentTimeMillis() - start) < timeout)) { Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS); messages = peekMessages(connection, queueName, count); } if (messages.isEmpty()) { return messages; } List<Message> poppedMessages = new ArrayList<>(); for (Message message : messages) { final String POP_MESSAGE = "UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false"; int result = query( connection, POP_MESSAGE, q -> q.addParameter(queueName) .addParameter(message.getId()) .executeUpdate()); if (result == 1) { poppedMessages.add(message); } } return poppedMessages; } private void createQueueIfNotExists(Connection connection, String queueName) { logger.trace("Creating new queue '{}'", queueName); final String EXISTS_QUEUE = "SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?)"; boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists()); if (!exists) { final String CREATE_QUEUE = "INSERT IGNORE INTO queue (queue_name) VALUES (?)"; execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate()); } } @Override public boolean containsMessage(String queueName, String messageId) { final String EXISTS_QUEUE = "SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ? )"; return queryWithTransaction( EXISTS_QUEUE, q -> q.addParameter(queueName).addParameter(messageId).exists()); } }
8,200
0
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.mysql.dao; import java.sql.Connection; import java.sql.SQLException; import java.text.SimpleDateFormat; import java.util.*; import java.util.stream.Collectors; import javax.sql.DataSource; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.core.exception.NonTransientException; import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.PollDataDAO; import com.netflix.conductor.dao.RateLimitingDAO; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; import com.netflix.conductor.mysql.util.Query; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; public class MySQLExecutionDAO extends MySQLBaseDAO implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO { public MySQLExecutionDAO( RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { super(retryTemplate, objectMapper, dataSource); } private static String dateStr(Long timeInMs) { Date date = new Date(timeInMs); return dateStr(date); } private static String dateStr(Date date) { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); return format.format(date); } @Override public List<TaskModel> getPendingTasksByWorkflow(String taskDefName, String workflowId) { // @formatter:off String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW = "SELECT json_data FROM task_in_progress tip " + "INNER JOIN task t ON t.task_id = tip.task_id " + "WHERE task_def_name = ? AND workflow_id = ?"; // @formatter:on return queryWithTransaction( GET_IN_PROGRESS_TASKS_FOR_WORKFLOW, q -> q.addParameter(taskDefName) .addParameter(workflowId) .executeAndFetch(TaskModel.class)); } @Override public List<TaskModel> getTasks(String taskDefName, String startKey, int count) { List<TaskModel> tasks = new ArrayList<>(count); List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName); boolean startKeyFound = startKey == null; int found = 0; for (TaskModel pendingTask : pendingTasks) { if (!startKeyFound) { if (pendingTask.getTaskId().equals(startKey)) { startKeyFound = true; // noinspection ConstantConditions if (startKey != null) { continue; } } } if (startKeyFound && found < count) { tasks.add(pendingTask); found++; } } return tasks; } private static String taskKey(TaskModel task) { return task.getReferenceTaskName() + "_" + task.getRetryCount(); } @Override public List<TaskModel> createTasks(List<TaskModel> tasks) { List<TaskModel> created = Lists.newArrayListWithCapacity(tasks.size()); withTransaction( connection -> { for (TaskModel task : tasks) { validate(task); task.setScheduledTime(System.currentTimeMillis()); final String taskKey = taskKey(task); boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey); if (!scheduledTaskAdded) { logger.trace( "Task already scheduled, skipping the run " + task.getTaskId() + ", ref=" + task.getReferenceTaskName() + ", key=" + taskKey); continue; } insertOrUpdateTaskData(connection, task); addWorkflowToTaskMapping(connection, task); addTaskInProgress(connection, task); updateTask(connection, task); created.add(task); } }); return created; } @Override public void updateTask(TaskModel task) { withTransaction(connection -> updateTask(connection, task)); } /** * This is a dummy implementation and this feature is not for Mysql backed Conductor * * @param task: which needs to be evaluated whether it is rateLimited or not */ @Override public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) { return false; } @Override public boolean exceedsLimit(TaskModel task) { Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (taskDefinition.isEmpty()) { return false; } TaskDef taskDef = taskDefinition.get(); int limit = taskDef.concurrencyLimit(); if (limit <= 0) { return false; } long current = getInProgressTaskCount(task.getTaskDefName()); if (current >= limit) { Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); return true; } logger.info( "Task execution count for {}: limit={}, current={}", task.getTaskDefName(), limit, getInProgressTaskCount(task.getTaskDefName())); String taskId = task.getTaskId(); List<String> tasksInProgressInOrderOfArrival = findAllTasksInProgressInOrderOfArrival(task, limit); boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId); if (rateLimited) { logger.info( "Task execution count limited. {}, limit {}, current {}", task.getTaskDefName(), limit, getInProgressTaskCount(task.getTaskDefName())); Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); } return rateLimited; } @Override public boolean removeTask(String taskId) { TaskModel task = getTask(taskId); if (task == null) { logger.warn("No such task found by id {}", taskId); return false; } final String taskKey = taskKey(task); withTransaction( connection -> { removeScheduledTask(connection, task, taskKey); removeWorkflowToTaskMapping(connection, task); removeTaskInProgress(connection, task); removeTaskData(connection, task); }); return true; } @Override public TaskModel getTask(String taskId) { String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?"; return queryWithTransaction( GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class)); } @Override public List<TaskModel> getTasks(List<String> taskIds) { if (taskIds.isEmpty()) { return Lists.newArrayList(); } return getWithRetriedTransactions(c -> getTasks(c, taskIds)); } @Override public List<TaskModel> getPendingTasksForTaskType(String taskName) { Preconditions.checkNotNull(taskName, "task name cannot be null"); // @formatter:off String GET_IN_PROGRESS_TASKS_FOR_TYPE = "SELECT json_data FROM task_in_progress tip " + "INNER JOIN task t ON t.task_id = tip.task_id " + "WHERE task_def_name = ?"; // @formatter:on return queryWithTransaction( GET_IN_PROGRESS_TASKS_FOR_TYPE, q -> q.addParameter(taskName).executeAndFetch(TaskModel.class)); } @Override public List<TaskModel> getTasksForWorkflow(String workflowId) { String GET_TASKS_FOR_WORKFLOW = "SELECT task_id FROM workflow_to_task WHERE workflow_id = ?"; return getWithRetriedTransactions( tx -> query( tx, GET_TASKS_FOR_WORKFLOW, q -> { List<String> taskIds = q.addParameter(workflowId) .executeScalarList(String.class); return getTasks(tx, taskIds); })); } @Override public String createWorkflow(WorkflowModel workflow) { return insertOrUpdateWorkflow(workflow, false); } @Override public String updateWorkflow(WorkflowModel workflow) { return insertOrUpdateWorkflow(workflow, true); } @Override public boolean removeWorkflow(String workflowId) { boolean removed = false; WorkflowModel workflow = getWorkflow(workflowId, true); if (workflow != null) { withTransaction( connection -> { removeWorkflowDefToWorkflowMapping(connection, workflow); removeWorkflow(connection, workflowId); removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId); }); removed = true; for (TaskModel task : workflow.getTasks()) { if (!removeTask(task.getTaskId())) { removed = false; } } } return removed; } /** * This is a dummy implementation and this feature is not supported for MySQL backed Conductor */ @Override public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { throw new UnsupportedOperationException( "This method is not implemented in MySQLExecutionDAO. Please use RedisDAO mode instead for using TTLs."); } @Override public void removeFromPendingWorkflow(String workflowType, String workflowId) { withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId)); } @Override public WorkflowModel getWorkflow(String workflowId) { return getWorkflow(workflowId, true); } @Override public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId)); if (workflow != null) { if (includeTasks) { List<TaskModel> tasks = getTasksForWorkflow(workflowId); tasks.sort(Comparator.comparingInt(TaskModel::getSeq)); workflow.setTasks(tasks); } } return workflow; } /** * @param workflowName name of the workflow * @param version the workflow version * @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions * for the given workflow name</em> */ @Override public List<String> getRunningWorkflowIds(String workflowName, int version) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); String GET_PENDING_WORKFLOW_IDS = "SELECT workflow_id FROM workflow_pending WHERE workflow_type = ?"; return queryWithTransaction( GET_PENDING_WORKFLOW_IDS, q -> q.addParameter(workflowName).executeScalarList(String.class)); } /** * @param workflowName Name of the workflow * @param version the workflow version * @return list of workflows that are in RUNNING state */ @Override public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); return getRunningWorkflowIds(workflowName, version).stream() .map(this::getWorkflow) .filter(workflow -> workflow.getWorkflowVersion() == version) .collect(Collectors.toList()); } @Override public long getPendingWorkflowCount(String workflowName) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); String GET_PENDING_WORKFLOW_COUNT = "SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?"; return queryWithTransaction( GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount()); } @Override public long getInProgressTaskCount(String taskDefName) { String GET_IN_PROGRESS_TASK_COUNT = "SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true"; return queryWithTransaction( GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount()); } @Override public List<WorkflowModel> getWorkflowsByType( String workflowName, Long startTime, Long endTime) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); Preconditions.checkNotNull(startTime, "startTime cannot be null"); Preconditions.checkNotNull(endTime, "endTime cannot be null"); List<WorkflowModel> workflows = new LinkedList<>(); withTransaction( tx -> { // @formatter:off String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF = "SELECT workflow_id FROM workflow_def_to_workflow " + "WHERE workflow_def = ? AND date_str BETWEEN ? AND ?"; // @formatter:on List<String> workflowIds = query( tx, GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF, q -> q.addParameter(workflowName) .addParameter(dateStr(startTime)) .addParameter(dateStr(endTime)) .executeScalarList(String.class)); workflowIds.forEach( workflowId -> { try { WorkflowModel wf = getWorkflow(workflowId); if (wf.getCreateTime() >= startTime && wf.getCreateTime() <= endTime) { workflows.add(wf); } } catch (Exception e) { logger.error( "Unable to load workflow id {} with name {}", workflowId, workflowName, e); } }); }); return workflows; } @Override public List<WorkflowModel> getWorkflowsByCorrelationId( String workflowName, String correlationId, boolean includeTasks) { Preconditions.checkNotNull(correlationId, "correlationId cannot be null"); String GET_WORKFLOWS_BY_CORRELATION_ID = "SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ?"; return queryWithTransaction( GET_WORKFLOWS_BY_CORRELATION_ID, q -> q.addParameter(correlationId) .addParameter(workflowName) .executeAndFetch(WorkflowModel.class)); } @Override public boolean canSearchAcrossWorkflows() { return true; } @Override public boolean addEventExecution(EventExecution eventExecution) { try { return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution)); } catch (Exception e) { throw new NonTransientException( "Unable to add event execution " + eventExecution.getId(), e); } } @Override public void removeEventExecution(EventExecution eventExecution) { try { withTransaction(tx -> removeEventExecution(tx, eventExecution)); } catch (Exception e) { throw new NonTransientException( "Unable to remove event execution " + eventExecution.getId(), e); } } @Override public void updateEventExecution(EventExecution eventExecution) { try { withTransaction(tx -> updateEventExecution(tx, eventExecution)); } catch (Exception e) { throw new NonTransientException( "Unable to update event execution " + eventExecution.getId(), e); } } public List<EventExecution> getEventExecutions( String eventHandlerName, String eventName, String messageId, int max) { try { List<EventExecution> executions = Lists.newLinkedList(); withTransaction( tx -> { for (int i = 0; i < max; i++) { String executionId = messageId + "_" + i; // see SimpleEventProcessor.handle to understand // how the // execution id is set EventExecution ee = readEventExecution( tx, eventHandlerName, eventName, messageId, executionId); if (ee == null) { break; } executions.add(ee); } }); return executions; } catch (Exception e) { String message = String.format( "Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s", eventHandlerName, eventName, messageId); throw new NonTransientException(message, e); } } @Override public void updateLastPollData(String taskDefName, String domain, String workerId) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis()); String effectiveDomain = (domain == null) ? "DEFAULT" : domain; withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain)); } @Override public PollData getPollData(String taskDefName, String domain) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); String effectiveDomain = (domain == null) ? "DEFAULT" : domain; return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain)); } @Override public List<PollData> getPollData(String taskDefName) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); return readAllPollData(taskDefName); } @Override public List<PollData> getAllPollData() { try (Connection tx = dataSource.getConnection()) { boolean previousAutoCommitMode = tx.getAutoCommit(); tx.setAutoCommit(true); try { String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name"; return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class)); } catch (Throwable th) { throw new NonTransientException(th.getMessage(), th); } finally { tx.setAutoCommit(previousAutoCommitMode); } } catch (SQLException ex) { throw new NonTransientException(ex.getMessage(), ex); } } private List<TaskModel> getTasks(Connection connection, List<String> taskIds) { if (taskIds.isEmpty()) { return Lists.newArrayList(); } // Generate a formatted query string with a variable number of bind params based // on taskIds.size() final String GET_TASKS_FOR_IDS = String.format( "SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL", Query.generateInBindings(taskIds.size())); return query( connection, GET_TASKS_FOR_IDS, q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class)); } private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) { Preconditions.checkNotNull(workflow, "workflow object cannot be null"); boolean terminal = workflow.getStatus().isTerminal(); List<TaskModel> tasks = workflow.getTasks(); workflow.setTasks(Lists.newLinkedList()); withTransaction( tx -> { if (!update) { addWorkflow(tx, workflow); addWorkflowDefToWorkflowMapping(tx, workflow); } else { updateWorkflow(tx, workflow); } if (terminal) { removePendingWorkflow( tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } else { addPendingWorkflow( tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } }); workflow.setTasks(tasks); return workflow.getWorkflowId(); } private void updateTask(Connection connection, TaskModel task) { Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { boolean inProgress = task.getStatus() != null && task.getStatus().equals(TaskModel.Status.IN_PROGRESS); updateInProgressStatus(connection, task, inProgress); } insertOrUpdateTaskData(connection, task); if (task.getStatus() != null && task.getStatus().isTerminal()) { removeTaskInProgress(connection, task); } addWorkflowToTaskMapping(connection, task); } private WorkflowModel readWorkflow(Connection connection, String workflowId) { String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?"; return query( connection, GET_WORKFLOW, q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class)); } private void addWorkflow(Connection connection, WorkflowModel workflow) { String INSERT_WORKFLOW = "INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)"; execute( connection, INSERT_WORKFLOW, q -> q.addParameter(workflow.getWorkflowId()) .addParameter(workflow.getCorrelationId()) .addJsonParameter(workflow) .executeUpdate()); } private void updateWorkflow(Connection connection, WorkflowModel workflow) { String UPDATE_WORKFLOW = "UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?"; execute( connection, UPDATE_WORKFLOW, q -> q.addJsonParameter(workflow) .addParameter(workflow.getWorkflowId()) .executeUpdate()); } private void removeWorkflow(Connection connection, String workflowId) { String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?"; execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete()); } private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) { String EXISTS_PENDING_WORKFLOW = "SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)"; boolean exists = query( connection, EXISTS_PENDING_WORKFLOW, q -> q.addParameter(workflowType).addParameter(workflowId).exists()); if (!exists) { String INSERT_PENDING_WORKFLOW = "INSERT IGNORE INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?)"; execute( connection, INSERT_PENDING_WORKFLOW, q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate()); } } private void removePendingWorkflow( Connection connection, String workflowType, String workflowId) { String REMOVE_PENDING_WORKFLOW = "DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?"; execute( connection, REMOVE_PENDING_WORKFLOW, q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete()); } private void insertOrUpdateTaskData(Connection connection, TaskModel task) { /* * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON DUPLICATE KEY update' sql statement. The problem with that * is that if we try the INSERT first, the sequence will be increased even if the ON DUPLICATE KEY happens. */ String UPDATE_TASK = "UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?"; int rowsUpdated = query( connection, UPDATE_TASK, q -> q.addJsonParameter(task) .addParameter(task.getTaskId()) .executeUpdate()); if (rowsUpdated == 0) { String INSERT_TASK = "INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; execute( connection, INSERT_TASK, q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate()); } } private void removeTaskData(Connection connection, TaskModel task) { String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?"; execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete()); } private void addWorkflowToTaskMapping(Connection connection, TaskModel task) { String EXISTS_WORKFLOW_TO_TASK = "SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)"; boolean exists = query( connection, EXISTS_WORKFLOW_TO_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(task.getTaskId()) .exists()); if (!exists) { String INSERT_WORKFLOW_TO_TASK = "INSERT IGNORE INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?)"; execute( connection, INSERT_WORKFLOW_TO_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(task.getTaskId()) .executeUpdate()); } } private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) { String REMOVE_WORKFLOW_TO_TASK = "DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?"; execute( connection, REMOVE_WORKFLOW_TO_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(task.getTaskId()) .executeDelete()); } private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { String INSERT_WORKFLOW_DEF_TO_WORKFLOW = "INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)"; execute( connection, INSERT_WORKFLOW_DEF_TO_WORKFLOW, q -> q.addParameter(workflow.getWorkflowName()) .addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()) .executeUpdate()); } private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { String REMOVE_WORKFLOW_DEF_TO_WORKFLOW = "DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?"; execute( connection, REMOVE_WORKFLOW_DEF_TO_WORKFLOW, q -> q.addParameter(workflow.getWorkflowName()) .addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()) .executeUpdate()); } @VisibleForTesting boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) { final String EXISTS_SCHEDULED_TASK = "SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)"; boolean exists = query( connection, EXISTS_SCHEDULED_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(taskKey) .exists()); if (!exists) { final String INSERT_IGNORE_SCHEDULED_TASK = "INSERT IGNORE INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?)"; int count = query( connection, INSERT_IGNORE_SCHEDULED_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(taskKey) .addParameter(task.getTaskId()) .executeUpdate()); return count > 0; } else { return false; } } private void removeScheduledTask(Connection connection, TaskModel task, String taskKey) { String REMOVE_SCHEDULED_TASK = "DELETE FROM task_scheduled WHERE workflow_id = ? AND task_key = ?"; execute( connection, REMOVE_SCHEDULED_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(taskKey) .executeDelete()); } private void addTaskInProgress(Connection connection, TaskModel task) { String EXISTS_IN_PROGRESS_TASK = "SELECT EXISTS(SELECT 1 FROM task_in_progress WHERE task_def_name = ? AND task_id = ?)"; boolean exists = query( connection, EXISTS_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()) .addParameter(task.getTaskId()) .exists()); if (!exists) { String INSERT_IN_PROGRESS_TASK = "INSERT INTO task_in_progress (task_def_name, task_id, workflow_id) VALUES (?, ?, ?)"; execute( connection, INSERT_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()) .addParameter(task.getTaskId()) .addParameter(task.getWorkflowInstanceId()) .executeUpdate()); } } private void removeTaskInProgress(Connection connection, TaskModel task) { String REMOVE_IN_PROGRESS_TASK = "DELETE FROM task_in_progress WHERE task_def_name = ? AND task_id = ?"; execute( connection, REMOVE_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()) .addParameter(task.getTaskId()) .executeUpdate()); } private void updateInProgressStatus(Connection connection, TaskModel task, boolean inProgress) { String UPDATE_IN_PROGRESS_TASK_STATUS = "UPDATE task_in_progress SET in_progress_status = ?, modified_on = CURRENT_TIMESTAMP " + "WHERE task_def_name = ? AND task_id = ?"; execute( connection, UPDATE_IN_PROGRESS_TASK_STATUS, q -> q.addParameter(inProgress) .addParameter(task.getTaskDefName()) .addParameter(task.getTaskId()) .executeUpdate()); } private boolean insertEventExecution(Connection connection, EventExecution eventExecution) { String INSERT_EVENT_EXECUTION = "INSERT INTO event_execution (event_handler_name, event_name, message_id, execution_id, json_data) " + "VALUES (?, ?, ?, ?, ?)"; int count = query( connection, INSERT_EVENT_EXECUTION, q -> q.addParameter(eventExecution.getName()) .addParameter(eventExecution.getEvent()) .addParameter(eventExecution.getMessageId()) .addParameter(eventExecution.getId()) .addJsonParameter(eventExecution) .executeUpdate()); return count > 0; } private void updateEventExecution(Connection connection, EventExecution eventExecution) { // @formatter:off String UPDATE_EVENT_EXECUTION = "UPDATE event_execution SET " + "json_data = ?, " + "modified_on = CURRENT_TIMESTAMP " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; // @formatter:on execute( connection, UPDATE_EVENT_EXECUTION, q -> q.addJsonParameter(eventExecution) .addParameter(eventExecution.getName()) .addParameter(eventExecution.getEvent()) .addParameter(eventExecution.getMessageId()) .addParameter(eventExecution.getId()) .executeUpdate()); } private void removeEventExecution(Connection connection, EventExecution eventExecution) { String REMOVE_EVENT_EXECUTION = "DELETE FROM event_execution " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; execute( connection, REMOVE_EVENT_EXECUTION, q -> q.addParameter(eventExecution.getName()) .addParameter(eventExecution.getEvent()) .addParameter(eventExecution.getMessageId()) .addParameter(eventExecution.getId()) .executeUpdate()); } private EventExecution readEventExecution( Connection connection, String eventHandlerName, String eventName, String messageId, String executionId) { // @formatter:off String GET_EVENT_EXECUTION = "SELECT json_data FROM event_execution " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; // @formatter:on return query( connection, GET_EVENT_EXECUTION, q -> q.addParameter(eventHandlerName) .addParameter(eventName) .addParameter(messageId) .addParameter(executionId) .executeAndFetchFirst(EventExecution.class)); } private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) { /* * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON DUPLICATE KEY update' sql statement. The problem with that * is that if we try the INSERT first, the sequence will be increased even if the ON DUPLICATE KEY happens. Since polling happens *a lot*, the sequence can increase * dramatically even though it won't be used. */ String UPDATE_POLL_DATA = "UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?"; int rowsUpdated = query( connection, UPDATE_POLL_DATA, q -> q.addJsonParameter(pollData) .addParameter(pollData.getQueueName()) .addParameter(domain) .executeUpdate()); if (rowsUpdated == 0) { String INSERT_POLL_DATA = "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; execute( connection, INSERT_POLL_DATA, q -> q.addParameter(pollData.getQueueName()) .addParameter(domain) .addJsonParameter(pollData) .executeUpdate()); } } private PollData readPollData(Connection connection, String queueName, String domain) { String GET_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?"; return query( connection, GET_POLL_DATA, q -> q.addParameter(queueName) .addParameter(domain) .executeAndFetchFirst(PollData.class)); } private List<PollData> readAllPollData(String queueName) { String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?"; return queryWithTransaction( GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class)); } private List<String> findAllTasksInProgressInOrderOfArrival(TaskModel task, int limit) { String GET_IN_PROGRESS_TASKS_WITH_LIMIT = "SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY created_on LIMIT ?"; return queryWithTransaction( GET_IN_PROGRESS_TASKS_WITH_LIMIT, q -> q.addParameter(task.getTaskDefName()) .addParameter(limit) .executeScalarList(String.class)); } private void validate(TaskModel task) { Preconditions.checkNotNull(task, "task object cannot be null"); Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); Preconditions.checkNotNull( task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); Preconditions.checkNotNull( task.getReferenceTaskName(), "Task reference name cannot be null"); } }
8,201
0
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.mysql.dao; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import java.time.Duration; import java.time.Instant; import java.util.Arrays; import java.util.List; import java.util.function.Consumer; import javax.sql.DataSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.core.exception.NonTransientException; import com.netflix.conductor.mysql.util.*; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; public abstract class MySQLBaseDAO { private static final List<String> EXCLUDED_STACKTRACE_CLASS = ImmutableList.of(MySQLBaseDAO.class.getName(), Thread.class.getName()); protected final Logger logger = LoggerFactory.getLogger(getClass()); protected final ObjectMapper objectMapper; protected final DataSource dataSource; private final RetryTemplate retryTemplate; protected MySQLBaseDAO( RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { this.retryTemplate = retryTemplate; this.objectMapper = objectMapper; this.dataSource = dataSource; } protected final LazyToString getCallingMethod() { return new LazyToString( () -> Arrays.stream(Thread.currentThread().getStackTrace()) .filter( ste -> !EXCLUDED_STACKTRACE_CLASS.contains( ste.getClassName())) .findFirst() .map(StackTraceElement::getMethodName) .orElseThrow(() -> new NullPointerException("Cannot find Caller"))); } protected String toJson(Object value) { try { return objectMapper.writeValueAsString(value); } catch (JsonProcessingException ex) { throw new NonTransientException(ex.getMessage(), ex); } } protected <T> T readValue(String json, Class<T> tClass) { try { return objectMapper.readValue(json, tClass); } catch (IOException ex) { throw new NonTransientException(ex.getMessage(), ex); } } protected <T> T readValue(String json, TypeReference<T> typeReference) { try { return objectMapper.readValue(json, typeReference); } catch (IOException ex) { throw new NonTransientException(ex.getMessage(), ex); } } /** * Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to * {@literal function}. * * <p>Successful executions of {@literal function} will result in a commit and return of {@link * TransactionalFunction#apply(Connection)}. * * <p>If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will * result in a rollback of the transaction and will be wrapped in an {@link * NonTransientException} if it is not already one. * * <p>Generally this is used to wrap multiple {@link #execute(Connection, String, * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that * produce some expected return value. * * @param function The function to apply with a new transactional {@link Connection} * @param <R> The return type. * @return The result of {@code TransactionalFunction#apply(Connection)} * @throws NonTransientException If any errors occur. */ private <R> R getWithTransaction(final TransactionalFunction<R> function) { final Instant start = Instant.now(); LazyToString callingMethod = getCallingMethod(); logger.trace("{} : starting transaction", callingMethod); try (Connection tx = dataSource.getConnection()) { boolean previousAutoCommitMode = tx.getAutoCommit(); tx.setAutoCommit(false); try { R result = function.apply(tx); tx.commit(); return result; } catch (Throwable th) { tx.rollback(); if (th instanceof NonTransientException) { throw th; } throw new NonTransientException(th.getMessage(), th); } finally { tx.setAutoCommit(previousAutoCommitMode); } } catch (SQLException ex) { throw new NonTransientException(ex.getMessage(), ex); } finally { logger.trace( "{} : took {}ms", callingMethod, Duration.between(start, Instant.now()).toMillis()); } } <R> R getWithRetriedTransactions(final TransactionalFunction<R> function) { try { return retryTemplate.execute(context -> getWithTransaction(function)); } catch (Exception e) { throw new NonTransientException(e.getMessage(), e); } } protected <R> R getWithTransactionWithOutErrorPropagation(TransactionalFunction<R> function) { Instant start = Instant.now(); LazyToString callingMethod = getCallingMethod(); logger.trace("{} : starting transaction", callingMethod); try (Connection tx = dataSource.getConnection()) { boolean previousAutoCommitMode = tx.getAutoCommit(); tx.setAutoCommit(false); try { R result = function.apply(tx); tx.commit(); return result; } catch (Throwable th) { tx.rollback(); logger.info(th.getMessage()); return null; } finally { tx.setAutoCommit(previousAutoCommitMode); } } catch (SQLException ex) { throw new NonTransientException(ex.getMessage(), ex); } finally { logger.trace( "{} : took {}ms", callingMethod, Duration.between(start, Instant.now()).toMillis()); } } /** * Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value. * * <p>Generally this is used to wrap multiple {@link #execute(Connection, String, * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that * produce no expected return value. * * @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to. * @throws NonTransientException If any errors occur. * @see #getWithRetriedTransactions(TransactionalFunction) */ protected void withTransaction(Consumer<Connection> consumer) { getWithRetriedTransactions( connection -> { consumer.accept(connection); return null; }); } /** * Initiate a new transaction and execute a {@link Query} within that context, then return the * results of {@literal function}. * * @param query The query string to prepare. * @param function The functional callback to pass a {@link Query} to. * @param <R> The expected return type of {@literal function}. * @return The results of applying {@literal function}. */ protected <R> R queryWithTransaction(String query, QueryFunction<R> function) { return getWithRetriedTransactions(tx -> query(tx, query, function)); } /** * Execute a {@link Query} within the context of a given transaction and return the results of * {@literal function}. * * @param tx The transactional {@link Connection} to use. * @param query The query string to prepare. * @param function The functional callback to pass a {@link Query} to. * @param <R> The expected return type of {@literal function}. * @return The results of applying {@literal function}. */ protected <R> R query(Connection tx, String query, QueryFunction<R> function) { try (Query q = new Query(objectMapper, tx, query)) { return function.apply(q); } catch (SQLException ex) { throw new NonTransientException(ex.getMessage(), ex); } } /** * Execute a statement with no expected return value within a given transaction. * * @param tx The transactional {@link Connection} to use. * @param query The query string to prepare. * @param function The functional callback to pass a {@link Query} to. */ protected void execute(Connection tx, String query, ExecuteFunction function) { try (Query q = new Query(objectMapper, tx, query)) { function.apply(q); } catch (SQLException ex) { throw new NonTransientException(ex.getMessage(), ex); } } /** * Instantiates a new transactional connection and invokes {@link #execute(Connection, String, * ExecuteFunction)} * * @param query The query string to prepare. * @param function The functional callback to pass a {@link Query} to. */ protected void executeWithTransaction(String query, ExecuteFunction function) { withTransaction(tx -> execute(tx, query, function)); } }
8,202
0
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql
Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.mysql.dao; import java.sql.Connection; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import javax.sql.DataSource; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.core.exception.ConflictException; import com.netflix.conductor.core.exception.NotFoundException; import com.netflix.conductor.dao.EventHandlerDAO; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.mysql.config.MySQLProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; public class MySQLMetadataDAO extends MySQLBaseDAO implements MetadataDAO, EventHandlerDAO { private final ConcurrentHashMap<String, TaskDef> taskDefCache = new ConcurrentHashMap<>(); private static final String CLASS_NAME = MySQLMetadataDAO.class.getSimpleName(); public MySQLMetadataDAO( RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource, MySQLProperties properties) { super(retryTemplate, objectMapper, dataSource); long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds(); Executors.newSingleThreadScheduledExecutor() .scheduleWithFixedDelay( this::refreshTaskDefs, cacheRefreshTime, cacheRefreshTime, TimeUnit.SECONDS); } @Override public TaskDef createTaskDef(TaskDef taskDef) { validate(taskDef); insertOrUpdateTaskDef(taskDef); return taskDef; } @Override public TaskDef updateTaskDef(TaskDef taskDef) { validate(taskDef); insertOrUpdateTaskDef(taskDef); return taskDef; } @Override public TaskDef getTaskDef(String name) { Preconditions.checkNotNull(name, "TaskDef name cannot be null"); TaskDef taskDef = taskDefCache.get(name); if (taskDef == null) { if (logger.isTraceEnabled()) { logger.trace("Cache miss: {}", name); } taskDef = getTaskDefFromDB(name); } return taskDef; } @Override public List<TaskDef> getAllTaskDefs() { return getWithRetriedTransactions(this::findAllTaskDefs); } @Override public void removeTaskDef(String name) { final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?"; executeWithTransaction( DELETE_TASKDEF_QUERY, q -> { if (!q.addParameter(name).executeDelete()) { throw new NotFoundException("No such task definition"); } taskDefCache.remove(name); }); } @Override public void createWorkflowDef(WorkflowDef def) { validate(def); withTransaction( tx -> { if (workflowExists(tx, def)) { throw new ConflictException( "Workflow with " + def.key() + " already exists!"); } insertOrUpdateWorkflowDef(tx, def); }); } @Override public void updateWorkflowDef(WorkflowDef def) { validate(def); withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def)); } @Override public Optional<WorkflowDef> getLatestWorkflowDef(String name) { final String GET_LATEST_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND " + "version = latest_version"; return Optional.ofNullable( queryWithTransaction( GET_LATEST_WORKFLOW_DEF_QUERY, q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class))); } @Override public Optional<WorkflowDef> getWorkflowDef(String name, int version) { final String GET_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?"; return Optional.ofNullable( queryWithTransaction( GET_WORKFLOW_DEF_QUERY, q -> q.addParameter(name) .addParameter(version) .executeAndFetchFirst(WorkflowDef.class))); } @Override public void removeWorkflowDef(String name, Integer version) { final String DELETE_WORKFLOW_QUERY = "DELETE from meta_workflow_def WHERE name = ? AND version = ?"; withTransaction( tx -> { // remove specified workflow execute( tx, DELETE_WORKFLOW_QUERY, q -> { if (!q.addParameter(name).addParameter(version).executeDelete()) { throw new NotFoundException( String.format( "No such workflow definition: %s version: %d", name, version)); } }); // reset latest version based on remaining rows for this workflow Optional<Integer> maxVersion = getLatestVersion(tx, name); maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion)); }); } public List<String> findAll() { final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def"; return queryWithTransaction( FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class)); } @Override public List<WorkflowDef> getAllWorkflowDefs() { final String GET_ALL_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def ORDER BY name, version"; return queryWithTransaction( GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); } @Override public List<WorkflowDef> getAllWorkflowDefsLatestVersions() { final String GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY = "SELECT json_data FROM meta_workflow_def wd WHERE wd.version = (SELECT MAX(version) FROM meta_workflow_def wd2 WHERE wd2.name = wd.name)"; return queryWithTransaction( GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); } public List<WorkflowDef> getAllLatest() { final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version"; return queryWithTransaction( GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); } public List<WorkflowDef> getAllVersions(String name) { final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version"; return queryWithTransaction( GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY, q -> q.addParameter(name).executeAndFetch(WorkflowDef.class)); } @Override public void addEventHandler(EventHandler eventHandler) { Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); final String INSERT_EVENT_HANDLER_QUERY = "INSERT INTO meta_event_handler (name, event, active, json_data) " + "VALUES (?, ?, ?, ?)"; withTransaction( tx -> { if (getEventHandler(tx, eventHandler.getName()) != null) { throw new ConflictException( "EventHandler with name " + eventHandler.getName() + " already exists!"); } execute( tx, INSERT_EVENT_HANDLER_QUERY, q -> q.addParameter(eventHandler.getName()) .addParameter(eventHandler.getEvent()) .addParameter(eventHandler.isActive()) .addJsonParameter(eventHandler) .executeUpdate()); }); } @Override public void updateEventHandler(EventHandler eventHandler) { Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); // @formatter:off final String UPDATE_EVENT_HANDLER_QUERY = "UPDATE meta_event_handler SET " + "event = ?, active = ?, json_data = ?, " + "modified_on = CURRENT_TIMESTAMP WHERE name = ?"; // @formatter:on withTransaction( tx -> { EventHandler existing = getEventHandler(tx, eventHandler.getName()); if (existing == null) { throw new NotFoundException( "EventHandler with name " + eventHandler.getName() + " not found!"); } execute( tx, UPDATE_EVENT_HANDLER_QUERY, q -> q.addParameter(eventHandler.getEvent()) .addParameter(eventHandler.isActive()) .addJsonParameter(eventHandler) .addParameter(eventHandler.getName()) .executeUpdate()); }); } @Override public void removeEventHandler(String name) { final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?"; withTransaction( tx -> { EventHandler existing = getEventHandler(tx, name); if (existing == null) { throw new NotFoundException( "EventHandler with name " + name + " not found!"); } execute( tx, DELETE_EVENT_HANDLER_QUERY, q -> q.addParameter(name).executeDelete()); }); } @Override public List<EventHandler> getAllEventHandlers() { final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler"; return queryWithTransaction( READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class)); } @Override public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) { final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY = "SELECT json_data FROM meta_event_handler WHERE event = ?"; return queryWithTransaction( READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY, q -> { q.addParameter(event); return q.executeAndFetch( rs -> { List<EventHandler> handlers = new ArrayList<>(); while (rs.next()) { EventHandler h = readValue(rs.getString(1), EventHandler.class); if (!activeOnly || h.isActive()) { handlers.add(h); } } return handlers; }); }); } /** * Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime * exception if validations fail. * * @param taskDef The {@code TaskDef} to check. */ private void validate(TaskDef taskDef) { Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null"); Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null"); } /** * Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a * Runtime exception if validations fail. * * @param def The {@code WorkflowDef} to check. */ private void validate(WorkflowDef def) { Preconditions.checkNotNull(def, "WorkflowDef object cannot be null"); Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null"); } /** * Retrieve a {@link EventHandler} by {@literal name}. * * @param connection The {@link Connection} to use for queries. * @param name The {@code EventHandler} name to look for. * @return {@literal null} if nothing is found, otherwise the {@code EventHandler}. */ private EventHandler getEventHandler(Connection connection, String name) { final String READ_ONE_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler WHERE name = ?"; return query( connection, READ_ONE_EVENT_HANDLER_QUERY, q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class)); } /** * Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already * exist. * * @param connection The {@link Connection} to use for queries. * @param def The {@code WorkflowDef} to check for. * @return {@literal true} if a {@code WorkflowDef} already exists with the same values. */ private Boolean workflowExists(Connection connection, WorkflowDef def) { final String CHECK_WORKFLOW_DEF_EXISTS_QUERY = "SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?"; return query( connection, CHECK_WORKFLOW_DEF_EXISTS_QUERY, q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists()); } /** * Return the latest version that exists for the provided {@code name}. * * @param tx The {@link Connection} to use for queries. * @param name The {@code name} to check for. * @return {@code Optional.empty()} if no versions exist, otherwise the max {@link * WorkflowDef#getVersion} found. */ private Optional<Integer> getLatestVersion(Connection tx, String name) { final String GET_LATEST_WORKFLOW_DEF_VERSION = "SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?"; Integer val = query( tx, GET_LATEST_WORKFLOW_DEF_VERSION, q -> { q.addParameter(name); return q.executeAndFetch( rs -> { if (!rs.next()) { return null; } return rs.getInt(1); }); }); return Optional.ofNullable(val); } /** * Update the latest version for the workflow with name {@code WorkflowDef} to the version * provided in {@literal version}. * * @param tx The {@link Connection} to use for queries. * @param name Workflow def name to update * @param version The new latest {@code version} value. */ private void updateLatestVersion(Connection tx, String name, int version) { final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY = "UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?"; execute( tx, UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY, q -> q.addParameter(version).addParameter(name).executeUpdate()); } private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) { final String INSERT_WORKFLOW_DEF_QUERY = "INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)"; Optional<Integer> version = getLatestVersion(tx, def.getName()); if (!workflowExists(tx, def)) { execute( tx, INSERT_WORKFLOW_DEF_QUERY, q -> q.addParameter(def.getName()) .addParameter(def.getVersion()) .addJsonParameter(def) .executeUpdate()); } else { // @formatter:off final String UPDATE_WORKFLOW_DEF_QUERY = "UPDATE meta_workflow_def " + "SET json_data = ?, modified_on = CURRENT_TIMESTAMP " + "WHERE name = ? AND version = ?"; // @formatter:on execute( tx, UPDATE_WORKFLOW_DEF_QUERY, q -> q.addJsonParameter(def) .addParameter(def.getName()) .addParameter(def.getVersion()) .executeUpdate()); } int maxVersion = def.getVersion(); if (version.isPresent() && version.get() > def.getVersion()) { maxVersion = version.get(); } updateLatestVersion(tx, def.getName(), maxVersion); } /** * Query persistence for all defined {@link TaskDef} data, and cache it in {@link * #taskDefCache}. */ private void refreshTaskDefs() { try { withTransaction( tx -> { Map<String, TaskDef> map = new HashMap<>(); findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef)); synchronized (taskDefCache) { taskDefCache.clear(); taskDefCache.putAll(map); } if (logger.isTraceEnabled()) { logger.trace("Refreshed {} TaskDefs", taskDefCache.size()); } }); } catch (Exception e) { Monitors.error(CLASS_NAME, "refreshTaskDefs"); logger.error("refresh TaskDefs failed ", e); } } /** * Query persistence for all defined {@link TaskDef} data. * * @param tx The {@link Connection} to use for queries. * @return A new {@code List<TaskDef>} with all the {@code TaskDef} data that was retrieved. */ private List<TaskDef> findAllTaskDefs(Connection tx) { final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def"; return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class)); } /** * Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}. * * @param name The name of the {@code TaskDef} to query for. * @return {@literal null} if nothing is found, otherwise the {@code TaskDef}. */ private TaskDef getTaskDefFromDB(String name) { final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?"; return queryWithTransaction( READ_ONE_TASKDEF_QUERY, q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class)); } private String insertOrUpdateTaskDef(TaskDef taskDef) { final String UPDATE_TASKDEF_QUERY = "UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?"; final String INSERT_TASKDEF_QUERY = "INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)"; return getWithRetriedTransactions( tx -> { execute( tx, UPDATE_TASKDEF_QUERY, update -> { int result = update.addJsonParameter(taskDef) .addParameter(taskDef.getName()) .executeUpdate(); if (result == 0) { execute( tx, INSERT_TASKDEF_QUERY, insert -> insert.addParameter(taskDef.getName()) .addJsonParameter(taskDef) .executeUpdate()); } }); taskDefCache.put(taskDef.getName(), taskDef); return taskDef.getName(); }); } }
8,203
0
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/core/execution
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapperTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.core.execution.mapper; import java.util.HashMap; import java.util.List; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.core.utils.IDGenerator; import com.netflix.conductor.core.utils.ParametersUtils; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; public class KafkaPublishTaskMapperTest { private IDGenerator idGenerator; private KafkaPublishTaskMapper kafkaTaskMapper; @Rule public ExpectedException expectedException = ExpectedException.none(); @Before public void setUp() { ParametersUtils parametersUtils = mock(ParametersUtils.class); MetadataDAO metadataDAO = mock(MetadataDAO.class); kafkaTaskMapper = new KafkaPublishTaskMapper(parametersUtils, metadataDAO); idGenerator = new IDGenerator(); } @Test public void getMappedTasks() { // Given WorkflowTask workflowTask = new WorkflowTask(); workflowTask.setName("kafka_task"); workflowTask.setType(TaskType.KAFKA_PUBLISH.name()); workflowTask.setTaskDefinition(new TaskDef("kafka_task")); String taskId = idGenerator.generate(); String retriedTaskId = idGenerator.generate(); WorkflowModel workflow = new WorkflowModel(); WorkflowDef workflowDef = new WorkflowDef(); workflow.setWorkflowDefinition(workflowDef); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() .withWorkflowModel(workflow) .withTaskDefinition(new TaskDef()) .withWorkflowTask(workflowTask) .withTaskInput(new HashMap<>()) .withRetryCount(0) .withRetryTaskId(retriedTaskId) .withTaskId(taskId) .build(); // when List<TaskModel> mappedTasks = kafkaTaskMapper.getMappedTasks(taskMapperContext); // Then assertEquals(1, mappedTasks.size()); assertEquals(TaskType.KAFKA_PUBLISH.name(), mappedTasks.get(0).getTaskType()); } @Test public void getMappedTasks_WithoutTaskDef() { // Given WorkflowTask workflowTask = new WorkflowTask(); workflowTask.setName("kafka_task"); workflowTask.setType(TaskType.KAFKA_PUBLISH.name()); String taskId = idGenerator.generate(); String retriedTaskId = idGenerator.generate(); WorkflowModel workflow = new WorkflowModel(); WorkflowDef workflowDef = new WorkflowDef(); workflow.setWorkflowDefinition(workflowDef); TaskDef taskdefinition = new TaskDef(); String testExecutionNameSpace = "testExecutionNameSpace"; taskdefinition.setExecutionNameSpace(testExecutionNameSpace); String testIsolationGroupId = "testIsolationGroupId"; taskdefinition.setIsolationGroupId(testIsolationGroupId); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() .withWorkflowModel(workflow) .withTaskDefinition(taskdefinition) .withWorkflowTask(workflowTask) .withTaskInput(new HashMap<>()) .withRetryCount(0) .withRetryTaskId(retriedTaskId) .withTaskId(taskId) .build(); // when List<TaskModel> mappedTasks = kafkaTaskMapper.getMappedTasks(taskMapperContext); // Then assertEquals(1, mappedTasks.size()); assertEquals(TaskType.KAFKA_PUBLISH.name(), mappedTasks.get(0).getTaskType()); assertEquals(testExecutionNameSpace, mappedTasks.get(0).getExecutionNameSpace()); assertEquals(testIsolationGroupId, mappedTasks.get(0).getIsolationGroupId()); } }
8,204
0
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/contribs/tasks
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.tasks.kafka; import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.LongSerializer; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringRunner; import com.netflix.conductor.common.config.TestObjectMapperConfiguration; import com.netflix.conductor.core.execution.WorkflowExecutor; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; import com.fasterxml.jackson.databind.ObjectMapper; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @SuppressWarnings({"unchecked", "rawtypes"}) @ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) @RunWith(SpringRunner.class) public class KafkaPublishTaskTest { @Autowired private ObjectMapper objectMapper; @Test public void missingRequest_Fail() { KafkaPublishTask kafkaPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); TaskModel task = new TaskModel(); kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.FAILED, task.getStatus()); } @Test public void missingValue_Fail() { TaskModel task = new TaskModel(); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setBootStrapServers("localhost:9092"); input.setTopic("testTopic"); task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); KafkaPublishTask kPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.FAILED, task.getStatus()); } @Test public void missingBootStrapServers_Fail() { TaskModel task = new TaskModel(); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); Map<String, Object> value = new HashMap<>(); input.setValue(value); input.setTopic("testTopic"); task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); KafkaPublishTask kPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.FAILED, task.getStatus()); } @Test public void kafkaPublishExecutionException_Fail() throws ExecutionException, InterruptedException { TaskModel task = getTask(); KafkaProducerManager producerManager = mock(KafkaProducerManager.class); KafkaPublishTask kafkaPublishTask = new KafkaPublishTask(producerManager, objectMapper); Producer producer = mock(Producer.class); when(producerManager.getProducer(any())).thenReturn(producer); Future publishingFuture = mock(Future.class); when(producer.send(any())).thenReturn(publishingFuture); ExecutionException executionException = mock(ExecutionException.class); when(executionException.getMessage()).thenReturn("Execution exception"); when(publishingFuture.get()).thenThrow(executionException); kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.FAILED, task.getStatus()); assertEquals( "Failed to invoke kafka task due to: Execution exception", task.getReasonForIncompletion()); } @Test public void kafkaPublishUnknownException_Fail() { TaskModel task = getTask(); KafkaProducerManager producerManager = mock(KafkaProducerManager.class); KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); Producer producer = mock(Producer.class); when(producerManager.getProducer(any())).thenReturn(producer); when(producer.send(any())).thenThrow(new RuntimeException("Unknown exception")); kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.FAILED, task.getStatus()); assertEquals( "Failed to invoke kafka task due to: Unknown exception", task.getReasonForIncompletion()); } @Test public void kafkaPublishSuccess_Completed() { TaskModel task = getTask(); KafkaProducerManager producerManager = mock(KafkaProducerManager.class); KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); Producer producer = mock(Producer.class); when(producerManager.getProducer(any())).thenReturn(producer); when(producer.send(any())).thenReturn(mock(Future.class)); kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.COMPLETED, task.getStatus()); } @Test public void kafkaPublishSuccess_AsyncComplete() { TaskModel task = getTask(); task.getInputData().put("asyncComplete", true); KafkaProducerManager producerManager = mock(KafkaProducerManager.class); KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); Producer producer = mock(Producer.class); when(producerManager.getProducer(any())).thenReturn(producer); when(producer.send(any())).thenReturn(mock(Future.class)); kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus()); } private TaskModel getTask() { TaskModel task = new TaskModel(); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setBootStrapServers("localhost:9092"); Map<String, Object> value = new HashMap<>(); value.put("input_key1", "value1"); value.put("input_key2", 45.3d); input.setValue(value); input.setTopic("testTopic"); task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); return task; } @Test public void integerSerializer_integerObject() { KafkaPublishTask kPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setKeySerializer(IntegerSerializer.class.getCanonicalName()); input.setKey(String.valueOf(Integer.MAX_VALUE)); assertEquals(kPublishTask.getKey(input), Integer.MAX_VALUE); } @Test public void longSerializer_longObject() { KafkaPublishTask kPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setKeySerializer(LongSerializer.class.getCanonicalName()); input.setKey(String.valueOf(Long.MAX_VALUE)); assertEquals(kPublishTask.getKey(input), Long.MAX_VALUE); } @Test public void noSerializer_StringObject() { KafkaPublishTask kPublishTask = new KafkaPublishTask(getKafkaProducerManager(), objectMapper); KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setKey("testStringKey"); assertEquals(kPublishTask.getKey(input), "testStringKey"); } private KafkaProducerManager getKafkaProducerManager() { return new KafkaProducerManager( Duration.ofMillis(100), Duration.ofMillis(500), 120000, Duration.ofMillis(10)); } }
8,205
0
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/contribs/tasks
Create_ds/conductor-community/task/kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.tasks.kafka; import java.time.Duration; import java.util.Properties; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.LongSerializer; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class KafkaProducerManagerTest { @Test public void testRequestTimeoutSetFromDefault() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(100), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); Properties props = manager.getProducerProperties(input); assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "100"); } @Test public void testRequestTimeoutSetFromInput() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(100), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); input.setRequestTimeoutMs(200); Properties props = manager.getProducerProperties(input); assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "200"); } @Test public void testRequestTimeoutSetFromConfig() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(150), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); Properties props = manager.getProducerProperties(input); assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "150"); } @SuppressWarnings("rawtypes") @Test(expected = RuntimeException.class) public void testExecutionException() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(150), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); Producer producer = manager.getProducer(input); assertNotNull(producer); } @SuppressWarnings("rawtypes") @Test public void testCacheInvalidation() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(150), Duration.ofMillis(500), 0, Duration.ofMillis(0)); KafkaPublishTask.Input input = getInput(); input.setBootStrapServers(""); Properties props = manager.getProducerProperties(input); Producer producerMock = mock(Producer.class); Producer producer = manager.getFromCache(props, () -> producerMock); assertNotNull(producer); verify(producerMock, times(1)).close(); } @Test public void testMaxBlockMsFromConfig() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(150), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); Properties props = manager.getProducerProperties(input); assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "500"); } @Test public void testMaxBlockMsFromInput() { KafkaProducerManager manager = new KafkaProducerManager( Duration.ofMillis(150), Duration.ofMillis(500), 10, Duration.ofMillis(120000)); KafkaPublishTask.Input input = getInput(); input.setMaxBlockMs(600); Properties props = manager.getProducerProperties(input); assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "600"); } private KafkaPublishTask.Input getInput() { KafkaPublishTask.Input input = new KafkaPublishTask.Input(); input.setTopic("testTopic"); input.setValue("TestMessage"); input.setKeySerializer(LongSerializer.class.getCanonicalName()); input.setBootStrapServers("servers"); return input; } }
8,206
0
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/core/execution
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapper.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.core.execution.mapper; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.core.exception.TerminateWorkflowException; import com.netflix.conductor.core.utils.ParametersUtils; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; @Component public class KafkaPublishTaskMapper implements TaskMapper { public static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTaskMapper.class); private final ParametersUtils parametersUtils; private final MetadataDAO metadataDAO; @Autowired public KafkaPublishTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { this.parametersUtils = parametersUtils; this.metadataDAO = metadataDAO; } @Override public String getTaskType() { return TaskType.KAFKA_PUBLISH.name(); } /** * This method maps a {@link WorkflowTask} of type {@link TaskType#KAFKA_PUBLISH} to a {@link * TaskModel} in a {@link TaskModel.Status#SCHEDULED} state * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link * WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId * @return a List with just one Kafka task * @throws TerminateWorkflowException In case if the task definition does not exist */ @Override public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) throws TerminateWorkflowException { LOGGER.debug("TaskMapperContext {} in KafkaPublishTaskMapper", taskMapperContext); WorkflowTask workflowTask = taskMapperContext.getWorkflowTask(); WorkflowModel workflowModel = taskMapperContext.getWorkflowModel(); String taskId = taskMapperContext.getTaskId(); int retryCount = taskMapperContext.getRetryCount(); TaskDef taskDefinition = Optional.ofNullable(taskMapperContext.getTaskDefinition()) .orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName())); Map<String, Object> input = parametersUtils.getTaskInputV2( workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition); TaskModel kafkaPublishTask = taskMapperContext.createTaskModel(); kafkaPublishTask.setInputData(input); kafkaPublishTask.setStatus(TaskModel.Status.SCHEDULED); kafkaPublishTask.setRetryCount(retryCount); kafkaPublishTask.setCallbackAfterSeconds(workflowTask.getStartDelay()); if (Objects.nonNull(taskDefinition)) { kafkaPublishTask.setExecutionNameSpace(taskDefinition.getExecutionNameSpace()); kafkaPublishTask.setIsolationGroupId(taskDefinition.getIsolationGroupId()); kafkaPublishTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency()); kafkaPublishTask.setRateLimitFrequencyInSeconds( taskDefinition.getRateLimitFrequencyInSeconds()); } return Collections.singletonList(kafkaPublishTask); } }
8,207
0
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/contribs/tasks
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.tasks.kafka; import java.time.Instant; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import com.netflix.conductor.core.execution.WorkflowExecutor; import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; import com.netflix.conductor.core.utils.Utils; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_KAFKA_PUBLISH; @Component(TASK_TYPE_KAFKA_PUBLISH) public class KafkaPublishTask extends WorkflowSystemTask { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTask.class); static final String REQUEST_PARAMETER_NAME = "kafka_request"; private static final String MISSING_REQUEST = "Missing Kafka request. Task input MUST have a '" + REQUEST_PARAMETER_NAME + "' key with KafkaTask.Input as value. See documentation for KafkaTask for required input parameters"; private static final String MISSING_BOOT_STRAP_SERVERS = "No boot strap servers specified"; private static final String MISSING_KAFKA_TOPIC = "Missing Kafka topic. See documentation for KafkaTask for required input parameters"; private static final String MISSING_KAFKA_VALUE = "Missing Kafka value. See documentation for KafkaTask for required input parameters"; private static final String FAILED_TO_INVOKE = "Failed to invoke kafka task due to: "; private final ObjectMapper objectMapper; private final String requestParameter; private final KafkaProducerManager producerManager; @Autowired public KafkaPublishTask(KafkaProducerManager clientManager, ObjectMapper objectMapper) { super(TASK_TYPE_KAFKA_PUBLISH); this.requestParameter = REQUEST_PARAMETER_NAME; this.producerManager = clientManager; this.objectMapper = objectMapper; LOGGER.info("KafkaTask initialized."); } @Override public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { long taskStartMillis = Instant.now().toEpochMilli(); task.setWorkerId(Utils.getServerId()); Object request = task.getInputData().get(requestParameter); if (Objects.isNull(request)) { markTaskAsFailed(task, MISSING_REQUEST); return; } Input input = objectMapper.convertValue(request, Input.class); if (StringUtils.isBlank(input.getBootStrapServers())) { markTaskAsFailed(task, MISSING_BOOT_STRAP_SERVERS); return; } if (StringUtils.isBlank(input.getTopic())) { markTaskAsFailed(task, MISSING_KAFKA_TOPIC); return; } if (Objects.isNull(input.getValue())) { markTaskAsFailed(task, MISSING_KAFKA_VALUE); return; } try { Future<RecordMetadata> recordMetaDataFuture = kafkaPublish(input); try { recordMetaDataFuture.get(); if (isAsyncComplete(task)) { task.setStatus(TaskModel.Status.IN_PROGRESS); } else { task.setStatus(TaskModel.Status.COMPLETED); } long timeTakenToCompleteTask = Instant.now().toEpochMilli() - taskStartMillis; LOGGER.debug("Published message {}, Time taken {}", input, timeTakenToCompleteTask); } catch (ExecutionException ec) { LOGGER.error( "Failed to invoke kafka task: {} - execution exception ", task.getTaskId(), ec); markTaskAsFailed(task, FAILED_TO_INVOKE + ec.getMessage()); } } catch (Exception e) { LOGGER.error( "Failed to invoke kafka task:{} for input {} - unknown exception", task.getTaskId(), input, e); markTaskAsFailed(task, FAILED_TO_INVOKE + e.getMessage()); } } private void markTaskAsFailed(TaskModel task, String reasonForIncompletion) { task.setReasonForIncompletion(reasonForIncompletion); task.setStatus(TaskModel.Status.FAILED); } /** * @param input Kafka Request * @return Future for execution. */ @SuppressWarnings({"unchecked", "rawtypes"}) private Future<RecordMetadata> kafkaPublish(Input input) throws Exception { long startPublishingEpochMillis = Instant.now().toEpochMilli(); Producer producer = producerManager.getProducer(input); long timeTakenToCreateProducer = Instant.now().toEpochMilli() - startPublishingEpochMillis; LOGGER.debug("Time taken getting producer {}", timeTakenToCreateProducer); Object key = getKey(input); Iterable<Header> headers = input.getHeaders().entrySet().stream() .map( header -> new RecordHeader( header.getKey(), String.valueOf(header.getValue()).getBytes())) .collect(Collectors.toList()); ProducerRecord rec = new ProducerRecord( input.getTopic(), null, null, key, objectMapper.writeValueAsString(input.getValue()), headers); Future send = producer.send(rec); long timeTakenToPublish = Instant.now().toEpochMilli() - startPublishingEpochMillis; LOGGER.debug("Time taken publishing {}", timeTakenToPublish); return send; } @VisibleForTesting Object getKey(Input input) { String keySerializer = input.getKeySerializer(); if (LongSerializer.class.getCanonicalName().equals(keySerializer)) { return Long.parseLong(String.valueOf(input.getKey())); } else if (IntegerSerializer.class.getCanonicalName().equals(keySerializer)) { return Integer.parseInt(String.valueOf(input.getKey())); } else { return String.valueOf(input.getKey()); } } @Override public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { return false; } @Override public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { task.setStatus(TaskModel.Status.CANCELED); } @Override public boolean isAsync() { return true; } public static class Input { public static final String STRING_SERIALIZER = StringSerializer.class.getCanonicalName(); private Map<String, Object> headers = new HashMap<>(); private String bootStrapServers; private Object key; private Object value; private Integer requestTimeoutMs; private Integer maxBlockMs; private String topic; private String keySerializer = STRING_SERIALIZER; public Map<String, Object> getHeaders() { return headers; } public void setHeaders(Map<String, Object> headers) { this.headers = headers; } public String getBootStrapServers() { return bootStrapServers; } public void setBootStrapServers(String bootStrapServers) { this.bootStrapServers = bootStrapServers; } public Object getKey() { return key; } public void setKey(Object key) { this.key = key; } public Object getValue() { return value; } public void setValue(Object value) { this.value = value; } public Integer getRequestTimeoutMs() { return requestTimeoutMs; } public void setRequestTimeoutMs(Integer requestTimeoutMs) { this.requestTimeoutMs = requestTimeoutMs; } public String getTopic() { return topic; } public void setTopic(String topic) { this.topic = topic; } public String getKeySerializer() { return keySerializer; } public void setKeySerializer(String keySerializer) { this.keySerializer = keySerializer; } public Integer getMaxBlockMs() { return maxBlockMs; } public void setMaxBlockMs(Integer maxBlockMs) { this.maxBlockMs = maxBlockMs; } @Override public String toString() { return "Input{" + "headers=" + headers + ", bootStrapServers='" + bootStrapServers + '\'' + ", key=" + key + ", value=" + value + ", requestTimeoutMs=" + requestTimeoutMs + ", maxBlockMs=" + maxBlockMs + ", topic='" + topic + '\'' + ", keySerializer='" + keySerializer + '\'' + '}'; } } }
8,208
0
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/contribs/tasks
Create_ds/conductor-community/task/kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.tasks.kafka; import java.time.Duration; import java.util.Objects; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; @SuppressWarnings("rawtypes") @Component public class KafkaProducerManager { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaProducerManager.class); private final String requestTimeoutConfig; private final Cache<Properties, Producer> kafkaProducerCache; private final String maxBlockMsConfig; private static final String STRING_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer"; private static final RemovalListener<Properties, Producer> LISTENER = notification -> { if (notification.getValue() != null) { notification.getValue().close(); LOGGER.info("Closed producer for {}", notification.getKey()); } }; @Autowired public KafkaProducerManager( @Value("${conductor.tasks.kafka-publish.requestTimeout:100ms}") Duration requestTimeout, @Value("${conductor.tasks.kafka-publish.maxBlock:500ms}") Duration maxBlock, @Value("${conductor.tasks.kafka-publish.cacheSize:10}") int cacheSize, @Value("${conductor.tasks.kafka-publish.cacheTime:120000ms}") Duration cacheTime) { this.requestTimeoutConfig = String.valueOf(requestTimeout.toMillis()); this.maxBlockMsConfig = String.valueOf(maxBlock.toMillis()); this.kafkaProducerCache = CacheBuilder.newBuilder() .removalListener(LISTENER) .maximumSize(cacheSize) .expireAfterAccess(cacheTime.toMillis(), TimeUnit.MILLISECONDS) .build(); } public Producer getProducer(KafkaPublishTask.Input input) { Properties configProperties = getProducerProperties(input); return getFromCache(configProperties, () -> new KafkaProducer(configProperties)); } @VisibleForTesting Producer getFromCache(Properties configProperties, Callable<Producer> createProducerCallable) { try { return kafkaProducerCache.get(configProperties, createProducerCallable); } catch (ExecutionException e) { throw new RuntimeException(e); } } @VisibleForTesting Properties getProducerProperties(KafkaPublishTask.Input input) { Properties configProperties = new Properties(); configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, input.getBootStrapServers()); configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, input.getKeySerializer()); String requestTimeoutMs = requestTimeoutConfig; if (Objects.nonNull(input.getRequestTimeoutMs())) { requestTimeoutMs = String.valueOf(input.getRequestTimeoutMs()); } String maxBlockMs = maxBlockMsConfig; if (Objects.nonNull(input.getMaxBlockMs())) { maxBlockMs = String.valueOf(input.getMaxBlockMs()); } configProperties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs); configProperties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs); configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER); return configProperties; } }
8,209
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.*; import java.util.function.Supplier; import org.joda.time.DateTime; import org.junit.Test; import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.common.metadata.tasks.TaskExecLog; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.es7.utils.TestUtils; import com.google.common.collect.ImmutableMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class TestElasticSearchRestDAOV7 extends ElasticSearchRestDaoBaseTest { private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW"); private static final String INDEX_PREFIX = "conductor"; private static final String WORKFLOW_DOC_TYPE = "workflow"; private static final String TASK_DOC_TYPE = "task"; private static final String MSG_DOC_TYPE = "message"; private static final String EVENT_DOC_TYPE = "event"; private static final String LOG_DOC_TYPE = "task_log"; private boolean indexExists(final String index) throws IOException { return indexDAO.doesResourceExist("/" + index); } private boolean doesMappingExist(final String index, final String mappingName) throws IOException { return indexDAO.doesResourceExist("/" + index + "/_mapping/" + mappingName); } @Test public void assertInitialSetup() throws IOException { SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT")); String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE; String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE; String taskLogIndex = INDEX_PREFIX + "_" + LOG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); String messageIndex = INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); String eventIndex = INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); assertTrue("Index 'conductor_workflow' should exist", indexExists(workflowIndex)); assertTrue("Index 'conductor_task' should exist", indexExists(taskIndex)); assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex)); assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex)); assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex)); assertTrue( "Index template for 'message' should exist", indexDAO.doesResourceExist("/_template/template_" + MSG_DOC_TYPE)); assertTrue( "Index template for 'event' should exist", indexDAO.doesResourceExist("/_template/template_" + EVENT_DOC_TYPE)); assertTrue( "Index template for 'task_log' should exist", indexDAO.doesResourceExist("/_template/template_" + LOG_DOC_TYPE)); } @Test public void shouldIndexWorkflow() { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); } @Test public void shouldIndexWorkflowAsync() throws Exception { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.asyncIndexWorkflow(workflowSummary).get(); assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); } @Test public void shouldRemoveWorkflow() { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); // wait for workflow to be indexed List<String> workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); assertEquals(1, workflows.size()); indexDAO.removeWorkflow(workflowSummary.getWorkflowId()); workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0); assertTrue("Workflow was not removed.", workflows.isEmpty()); } @Test public void shouldAsyncRemoveWorkflow() throws Exception { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); // wait for workflow to be indexed List<String> workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); assertEquals(1, workflows.size()); indexDAO.asyncRemoveWorkflow(workflowSummary.getWorkflowId()).get(); workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0); assertTrue("Workflow was not removed.", workflows.isEmpty()); } @Test public void shouldUpdateWorkflow() { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); indexDAO.updateWorkflow( workflowSummary.getWorkflowId(), new String[] {"status"}, new Object[] {WorkflowStatus.COMPLETED}); workflowSummary.setStatus(WorkflowStatus.COMPLETED); assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); } @Test public void shouldAsyncUpdateWorkflow() throws Exception { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); indexDAO.asyncUpdateWorkflow( workflowSummary.getWorkflowId(), new String[] {"status"}, new Object[] {WorkflowStatus.FAILED}) .get(); workflowSummary.setStatus(WorkflowStatus.FAILED); assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); } @Test public void shouldIndexTask() { TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); indexDAO.indexTask(taskSummary); List<String> tasks = tryFindResults(() -> searchTasks(taskSummary)); assertEquals(taskSummary.getTaskId(), tasks.get(0)); } @Test public void shouldIndexTaskAsync() throws Exception { TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); indexDAO.asyncIndexTask(taskSummary).get(); List<String> tasks = tryFindResults(() -> searchTasks(taskSummary)); assertEquals(taskSummary.getTaskId(), tasks.get(0)); } @Test public void shouldRemoveTask() { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); // wait for workflow to be indexed tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); TaskSummary taskSummary = TestUtils.loadTaskSnapshot( objectMapper, "task_summary", workflowSummary.getWorkflowId()); indexDAO.indexTask(taskSummary); // Wait for the task to be indexed List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1); indexDAO.removeTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()); tasks = tryFindResults(() -> searchTasks(taskSummary), 0); assertTrue("Task was not removed.", tasks.isEmpty()); } @Test public void shouldAsyncRemoveTask() throws Exception { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); // wait for workflow to be indexed tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); TaskSummary taskSummary = TestUtils.loadTaskSnapshot( objectMapper, "task_summary", workflowSummary.getWorkflowId()); indexDAO.indexTask(taskSummary); // Wait for the task to be indexed List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1); indexDAO.asyncRemoveTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()).get(); tasks = tryFindResults(() -> searchTasks(taskSummary), 0); assertTrue("Task was not removed.", tasks.isEmpty()); } @Test public void shouldNotRemoveTaskWhenNotAssociatedWithWorkflow() { TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); indexDAO.indexTask(taskSummary); // Wait for the task to be indexed List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1); indexDAO.removeTask("InvalidWorkflow", taskSummary.getTaskId()); tasks = tryFindResults(() -> searchTasks(taskSummary), 0); assertFalse("Task was removed.", tasks.isEmpty()); } @Test public void shouldNotAsyncRemoveTaskWhenNotAssociatedWithWorkflow() throws Exception { TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); indexDAO.indexTask(taskSummary); // Wait for the task to be indexed List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1); indexDAO.asyncRemoveTask("InvalidWorkflow", taskSummary.getTaskId()).get(); tasks = tryFindResults(() -> searchTasks(taskSummary), 0); assertFalse("Task was removed.", tasks.isEmpty()); } @Test public void shouldAddTaskExecutionLogs() { List<TaskExecLog> logs = new ArrayList<>(); String taskId = uuid(); logs.add(createLog(taskId, "log1")); logs.add(createLog(taskId, "log2")); logs.add(createLog(taskId, "log3")); indexDAO.addTaskExecutionLogs(logs); List<TaskExecLog> indexedLogs = tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3); assertEquals(3, indexedLogs.size()); assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs)); } @Test public void shouldAddTaskExecutionLogsAsync() throws Exception { List<TaskExecLog> logs = new ArrayList<>(); String taskId = uuid(); logs.add(createLog(taskId, "log1")); logs.add(createLog(taskId, "log2")); logs.add(createLog(taskId, "log3")); indexDAO.asyncAddTaskExecutionLogs(logs).get(); List<TaskExecLog> indexedLogs = tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3); assertEquals(3, indexedLogs.size()); assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs)); } @Test public void shouldAddMessage() { String queue = "queue"; Message message1 = new Message(uuid(), "payload1", null); Message message2 = new Message(uuid(), "payload2", null); indexDAO.addMessage(queue, message1); indexDAO.addMessage(queue, message2); List<Message> indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2); assertEquals(2, indexedMessages.size()); assertTrue( "Not all messages was indexed", indexedMessages.containsAll(Arrays.asList(message1, message2))); } @Test public void shouldAddEventExecution() { String event = "event"; EventExecution execution1 = createEventExecution(event); EventExecution execution2 = createEventExecution(event); indexDAO.addEventExecution(execution1); indexDAO.addEventExecution(execution2); List<EventExecution> indexedExecutions = tryFindResults(() -> indexDAO.getEventExecutions(event), 2); assertEquals(2, indexedExecutions.size()); assertTrue( "Not all event executions was indexed", indexedExecutions.containsAll(Arrays.asList(execution1, execution2))); } @Test public void shouldAsyncAddEventExecution() throws Exception { String event = "event2"; EventExecution execution1 = createEventExecution(event); EventExecution execution2 = createEventExecution(event); indexDAO.asyncAddEventExecution(execution1).get(); indexDAO.asyncAddEventExecution(execution2).get(); List<EventExecution> indexedExecutions = tryFindResults(() -> indexDAO.getEventExecutions(event), 2); assertEquals(2, indexedExecutions.size()); assertTrue( "Not all event executions was indexed", indexedExecutions.containsAll(Arrays.asList(execution1, execution2))); } @Test public void shouldAddIndexPrefixToIndexTemplate() throws Exception { String json = TestUtils.loadJsonResource("expected_template_task_log"); String content = indexDAO.loadTypeMappingSource("/template_task_log.json"); assertEquals(json, content); } @Test public void shouldSearchRecentRunningWorkflows() throws Exception { WorkflowSummary oldWorkflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); oldWorkflow.setStatus(WorkflowStatus.RUNNING); oldWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(2).toDate())); WorkflowSummary recentWorkflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); recentWorkflow.setStatus(WorkflowStatus.RUNNING); recentWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(1).toDate())); WorkflowSummary tooRecentWorkflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); tooRecentWorkflow.setStatus(WorkflowStatus.RUNNING); tooRecentWorkflow.setUpdateTime(getFormattedTime(new DateTime().toDate())); indexDAO.indexWorkflow(oldWorkflow); indexDAO.indexWorkflow(recentWorkflow); indexDAO.indexWorkflow(tooRecentWorkflow); Thread.sleep(1000); List<String> ids = indexDAO.searchRecentRunningWorkflows(2, 1); assertEquals(1, ids.size()); assertEquals(recentWorkflow.getWorkflowId(), ids.get(0)); } @Test public void shouldCountWorkflows() { int counts = 1100; for (int i = 0; i < counts; i++) { WorkflowSummary workflowSummary = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); indexDAO.indexWorkflow(workflowSummary); } // wait for workflow to be indexed long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts); assertEquals(counts, result); } private long tryGetCount(Supplier<Long> countFunction, int resultsCount) { long result = 0; for (int i = 0; i < 20; i++) { result = countFunction.get(); if (result == resultsCount) { return result; } try { Thread.sleep(100); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } } return result; } // Get total workflow counts given the name and status private long getWorkflowCount(String workflowName, String status) { return indexDAO.getWorkflowCount( "status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*"); } private void assertWorkflowSummary(String workflowId, WorkflowSummary summary) { assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType")); assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version")); assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId")); assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId")); assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime")); assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime")); assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime")); assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status")); assertEquals(summary.getInput(), indexDAO.get(workflowId, "input")); assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output")); assertEquals( summary.getReasonForIncompletion(), indexDAO.get(workflowId, "reasonForIncompletion")); assertEquals( String.valueOf(summary.getExecutionTime()), indexDAO.get(workflowId, "executionTime")); assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event")); assertEquals( summary.getFailedReferenceTaskNames(), indexDAO.get(workflowId, "failedReferenceTaskNames")); } private String getFormattedTime(Date time) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); sdf.setTimeZone(TimeZone.getTimeZone("GMT")); return sdf.format(time); } private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction) { return tryFindResults(searchFunction, 1); } private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction, int resultsCount) { List<T> result = Collections.emptyList(); for (int i = 0; i < 20; i++) { result = searchFunction.get(); if (result.size() == resultsCount) { return result; } try { Thread.sleep(100); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } } return result; } private List<String> searchWorkflows(String workflowId) { return indexDAO.searchWorkflows( "", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList()) .getResults(); } private List<String> searchTasks(TaskSummary taskSummary) { return indexDAO.searchTasks( "", "workflowId:\"" + taskSummary.getWorkflowId() + "\"", 0, 100, Collections.emptyList()) .getResults(); } private TaskExecLog createLog(String taskId, String log) { TaskExecLog taskExecLog = new TaskExecLog(log); taskExecLog.setTaskId(taskId); return taskExecLog; } private EventExecution createEventExecution(String event) { EventExecution execution = new EventExecution(uuid(), uuid()); execution.setName("name"); execution.setEvent(event); execution.setCreated(System.currentTimeMillis()); execution.setStatus(EventExecution.Status.COMPLETED); execution.setAction(EventHandler.Action.Type.start_workflow); execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3)); return execution; } private String uuid() { return UUID.randomUUID().toString(); } }
8,210
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.Reader; import org.apache.http.HttpHost; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.junit.After; import org.junit.Before; import org.springframework.retry.support.RetryTemplate; public abstract class ElasticSearchRestDaoBaseTest extends ElasticSearchTest { protected RestClient restClient; protected ElasticSearchRestDAOV7 indexDAO; @Before public void setup() throws Exception { String httpHostAddress = container.getHttpHostAddress(); String host = httpHostAddress.split(":")[0]; int port = Integer.parseInt(httpHostAddress.split(":")[1]); properties.setUrl("http://" + httpHostAddress); RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http")); restClient = restClientBuilder.build(); indexDAO = new ElasticSearchRestDAOV7( restClientBuilder, new RetryTemplate(), properties, objectMapper); indexDAO.setup(); } @After public void tearDown() throws Exception { deleteAllIndices(); if (restClient != null) { restClient.close(); } } private void deleteAllIndices() throws IOException { Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices")); Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent()); BufferedReader bufferedReader = new BufferedReader(streamReader); String line; while ((line = bufferedReader.readLine()) != null) { String[] fields = line.split("\\s"); String endpoint = String.format("/%s", fields[2]); restClient.performRequest(new Request("DELETE", endpoint)); } } }
8,211
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.util.HashMap; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.springframework.test.context.TestPropertySource; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.fasterxml.jackson.core.JsonProcessingException; import static org.awaitility.Awaitility.await; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2") public class TestElasticSearchRestDAOV7Batch extends ElasticSearchRestDaoBaseTest { @Test public void indexTaskWithBatchSizeTwo() { String correlationId = "some-correlation-id"; TaskSummary taskSummary = new TaskSummary(); taskSummary.setTaskId("some-task-id"); taskSummary.setWorkflowId("some-workflow-instance-id"); taskSummary.setTaskType("some-task-type"); taskSummary.setStatus(Status.FAILED); try { taskSummary.setInput( objectMapper.writeValueAsString( new HashMap<String, Object>() { { put("input_key", "input_value"); } })); } catch (JsonProcessingException e) { throw new RuntimeException(e); } taskSummary.setCorrelationId(correlationId); taskSummary.setTaskDefName("some-task-def-name"); taskSummary.setReasonForIncompletion("some-failure-reason"); indexDAO.indexTask(taskSummary); indexDAO.indexTask(taskSummary); await().atMost(5, TimeUnit.SECONDS) .untilAsserted( () -> { SearchResult<String> result = indexDAO.searchTasks( "correlationId='" + correlationId + "'", "*", 0, 10000, null); assertTrue( "should return 1 or more search results", result.getResults().size() > 0); assertEquals( "taskId should match the indexed task", "some-task-id", result.getResults().get(0)); }); } }
8,212
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; import org.junit.Test; import org.mockito.Mockito; public class TestBulkRequestBuilderWrapper { BulkRequestBuilder builder = Mockito.mock(BulkRequestBuilder.class); BulkRequestBuilderWrapper wrapper = new BulkRequestBuilderWrapper(builder); @Test(expected = Exception.class) public void testAddNullUpdateRequest() { wrapper.add((UpdateRequest) null); } @Test(expected = Exception.class) public void testAddNullIndexRequest() { wrapper.add((IndexRequest) null); } @Test public void testBuilderCalls() { IndexRequest indexRequest = new IndexRequest(); UpdateRequest updateRequest = new UpdateRequest(); wrapper.add(indexRequest); wrapper.add(updateRequest); wrapper.numberOfActions(); wrapper.execute(); Mockito.verify(builder, Mockito.times(1)).add(indexRequest); Mockito.verify(builder, Mockito.times(1)).add(updateRequest); Mockito.verify(builder, Mockito.times(1)).numberOfActions(); Mockito.verify(builder, Mockito.times(1)).execute(); } }
8,213
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit4.SpringRunner; import org.testcontainers.elasticsearch.ElasticsearchContainer; import org.testcontainers.utility.DockerImageName; import com.netflix.conductor.common.config.TestObjectMapperConfiguration; import com.netflix.conductor.es7.config.ElasticSearchProperties; import com.fasterxml.jackson.databind.ObjectMapper; @ContextConfiguration( classes = {TestObjectMapperConfiguration.class, ElasticSearchTest.TestConfiguration.class}) @RunWith(SpringRunner.class) @TestPropertySource( properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=7"}) public abstract class ElasticSearchTest { @Configuration static class TestConfiguration { @Bean public ElasticSearchProperties elasticSearchProperties() { return new ElasticSearchProperties(); } } protected static final ElasticsearchContainer container = new ElasticsearchContainer( DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch-oss") .withTag("7.6.2")); // this should match the client version @Autowired protected ObjectMapper objectMapper; @Autowired protected ElasticSearchProperties properties; @BeforeClass public static void startServer() { container.start(); } @AfterClass public static void stopServer() { container.stop(); } }
8,214
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import org.junit.Test; /** * @author Viren */ public class TestGroupedExpression { @Test public void test() {} }
8,215
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.InputStream; import org.junit.Test; import com.netflix.conductor.es7.dao.query.parser.internal.AbstractParserTest; import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** * @author Viren */ public class TestExpression extends AbstractParserTest { @Test public void test() throws Exception { String test = "type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)"; // test = "type='IMAGE' AND subType ='sdp'"; // test = "(metadata.type = 'IMAGE')"; InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); Expression expr = new Expression(is); System.out.println(expr); assertTrue(expr.isBinaryExpr()); assertNull(expr.getGroupedExpression()); assertNotNull(expr.getNameValue()); NameValue nv = expr.getNameValue(); assertEquals("type", nv.getName().getName()); assertEquals("=", nv.getOp().getOperator()); assertEquals("\"IMAGE\"", nv.getValue().getValue()); Expression rhs = expr.getRightHandSide(); assertNotNull(rhs); assertTrue(rhs.isBinaryExpr()); nv = rhs.getNameValue(); assertNotNull(nv); // subType = sdp assertNull(rhs.getGroupedExpression()); assertEquals("subType", nv.getName().getName()); assertEquals("=", nv.getOp().getOperator()); assertEquals("\"sdp\"", nv.getValue().getValue()); assertEquals("AND", rhs.getOperator().getOperator()); rhs = rhs.getRightHandSide(); assertNotNull(rhs); assertFalse(rhs.isBinaryExpr()); GroupedExpression ge = rhs.getGroupedExpression(); assertNotNull(ge); expr = ge.getExpression(); assertNotNull(expr); assertTrue(expr.isBinaryExpr()); nv = expr.getNameValue(); assertNotNull(nv); assertEquals("metadata.width", nv.getName().getName()); assertEquals(">", nv.getOp().getOperator()); assertEquals("50", nv.getValue().getValue()); assertEquals("OR", expr.getOperator().getOperator()); rhs = expr.getRightHandSide(); assertNotNull(rhs); assertFalse(rhs.isBinaryExpr()); nv = rhs.getNameValue(); assertNotNull(nv); assertEquals("metadata.height", nv.getName().getName()); assertEquals(">", nv.getOp().getOperator()); assertEquals("50", nv.getValue().getValue()); } @Test public void testWithSysConstants() throws Exception { String test = "type='IMAGE' AND subType ='sdp' AND description IS null"; InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); Expression expr = new Expression(is); System.out.println(expr); assertTrue(expr.isBinaryExpr()); assertNull(expr.getGroupedExpression()); assertNotNull(expr.getNameValue()); NameValue nv = expr.getNameValue(); assertEquals("type", nv.getName().getName()); assertEquals("=", nv.getOp().getOperator()); assertEquals("\"IMAGE\"", nv.getValue().getValue()); Expression rhs = expr.getRightHandSide(); assertNotNull(rhs); assertTrue(rhs.isBinaryExpr()); nv = rhs.getNameValue(); assertNotNull(nv); // subType = sdp assertNull(rhs.getGroupedExpression()); assertEquals("subType", nv.getName().getName()); assertEquals("=", nv.getOp().getOperator()); assertEquals("\"sdp\"", nv.getValue().getValue()); assertEquals("AND", rhs.getOperator().getOperator()); rhs = rhs.getRightHandSide(); assertNotNull(rhs); assertFalse(rhs.isBinaryExpr()); GroupedExpression ge = rhs.getGroupedExpression(); assertNull(ge); nv = rhs.getNameValue(); assertNotNull(nv); assertEquals("description", nv.getName().getName()); assertEquals("IS", nv.getOp().getOperator()); ConstValue cv = nv.getValue(); assertNotNull(cv); assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); test = "description IS not null"; is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); expr = new Expression(is); System.out.println(expr); nv = expr.getNameValue(); assertNotNull(nv); assertEquals("description", nv.getName().getName()); assertEquals("IS", nv.getOp().getOperator()); cv = nv.getValue(); assertNotNull(cv); assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); } }
8,216
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; /** * @author Viren */ public class TestComparisonOp extends AbstractParserTest { @Test public void test() throws Exception { String[] tests = new String[] {"<", ">", "=", "!=", "IN", "BETWEEN", "STARTS_WITH"}; for (String test : tests) { ComparisonOp name = new ComparisonOp(getInputStream(test)); String nameVal = name.getOperator(); assertNotNull(nameVal); assertEquals(test, nameVal); } } @Test(expected = ParserException.class) public void testInvalidOp() throws Exception { String test = "AND"; ComparisonOp name = new ComparisonOp(getInputStream(test)); String nameVal = name.getOperator(); assertNotNull(nameVal); assertEquals(test, nameVal); } }
8,217
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.util.List; import org.junit.Test; import static org.junit.Assert.*; /** * @author Viren */ public class TestConstValue extends AbstractParserTest { @Test public void testStringConst() throws Exception { String test = "'string value'"; String expected = test.replaceAll( "'", "\""); // Quotes are removed but then the result is double quoted. ConstValue cv = new ConstValue(getInputStream(test)); assertNotNull(cv.getValue()); assertEquals(expected, cv.getValue()); assertTrue(cv.getValue() instanceof String); test = "\"string value\""; cv = new ConstValue(getInputStream(test)); assertNotNull(cv.getValue()); assertEquals(expected, cv.getValue()); assertTrue(cv.getValue() instanceof String); } @Test public void testSystemConst() throws Exception { String test = "null"; ConstValue cv = new ConstValue(getInputStream(test)); assertNotNull(cv.getValue()); assertTrue(cv.getValue() instanceof String); assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); test = "null"; test = "not null"; cv = new ConstValue(getInputStream(test)); assertNotNull(cv.getValue()); assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); } @Test(expected = ParserException.class) public void testInvalid() throws Exception { String test = "'string value"; new ConstValue(getInputStream(test)); } @Test public void testNumConst() throws Exception { String test = "12345.89"; ConstValue cv = new ConstValue(getInputStream(test)); assertNotNull(cv.getValue()); assertTrue( cv.getValue() instanceof String); // Numeric values are stored as string as we are just passing thru // them to ES assertEquals(test, cv.getValue()); } @Test public void testRange() throws Exception { String test = "50 AND 100"; Range range = new Range(getInputStream(test)); assertEquals("50", range.getLow()); assertEquals("100", range.getHigh()); } @Test(expected = ParserException.class) public void testBadRange() throws Exception { String test = "50 AND"; new Range(getInputStream(test)); } @Test public void testArray() throws Exception { String test = "(1, 3, 'name', 'value2')"; ListConst lc = new ListConst(getInputStream(test)); List<Object> list = lc.getList(); assertEquals(4, list.size()); assertTrue(list.contains("1")); assertEquals("'value2'", list.get(3)); // Values are preserved as it is... } }
8,218
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; /** * @author Viren */ public class TestName extends AbstractParserTest { @Test public void test() throws Exception { String test = "metadata.en_US.lang "; Name name = new Name(getInputStream(test)); String nameVal = name.getName(); assertNotNull(nameVal); assertEquals(test.trim(), nameVal); } }
8,219
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.InputStream; /** * @author Viren */ public abstract class AbstractParserTest { protected InputStream getInputStream(String expression) { return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes())); } }
8,220
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; /** * @author Viren */ public class TestBooleanOp extends AbstractParserTest { @Test public void test() throws Exception { String[] tests = new String[] {"AND", "OR"}; for (String test : tests) { BooleanOp name = new BooleanOp(getInputStream(test)); String nameVal = name.getOperator(); assertNotNull(nameVal); assertEquals(test, nameVal); } } @Test(expected = ParserException.class) public void testInvalid() throws Exception { String test = "<"; BooleanOp name = new BooleanOp(getInputStream(test)); String nameVal = name.getOperator(); assertNotNull(nameVal); assertEquals(test, nameVal); } }
8,221
0
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7
Create_ds/conductor-community/index/es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.utils; import org.apache.commons.io.Charsets; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.core.utils.IDGenerator; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.Resources; public class TestUtils { private static final String WORKFLOW_SCENARIO_EXTENSION = ".json"; private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID"; public static WorkflowSummary loadWorkflowSnapshot( ObjectMapper objectMapper, String resourceFileName) { try { String content = loadJsonResource(resourceFileName); String workflowId = new IDGenerator().generate(); content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); return objectMapper.readValue(content, WorkflowSummary.class); } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } } public static TaskSummary loadTaskSnapshot(ObjectMapper objectMapper, String resourceFileName) { try { String content = loadJsonResource(resourceFileName); String workflowId = new IDGenerator().generate(); content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); return objectMapper.readValue(content, TaskSummary.class); } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } } public static TaskSummary loadTaskSnapshot( ObjectMapper objectMapper, String resourceFileName, String workflowId) { try { String content = loadJsonResource(resourceFileName); content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); return objectMapper.readValue(content, TaskSummary.class); } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } } public static String loadJsonResource(String resourceFileName) { try { return Resources.toString( TestUtils.class.getResource( "/" + resourceFileName + WORKFLOW_SCENARIO_EXTENSION), Charsets.UTF_8); } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } } }
8,222
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.config; import org.springframework.boot.autoconfigure.condition.AllNestedConditions; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; public class ElasticSearchConditions { private ElasticSearchConditions() {} public static class ElasticSearchV7Enabled extends AllNestedConditions { ElasticSearchV7Enabled() { super(ConfigurationPhase.PARSE_CONFIGURATION); } @SuppressWarnings("unused") @ConditionalOnProperty( name = "conductor.indexing.enabled", havingValue = "true", matchIfMissing = true) static class enabledIndexing {} @SuppressWarnings("unused") @ConditionalOnProperty( name = "conductor.elasticsearch.version", havingValue = "7", matchIfMissing = true) static class enabledES7 {} } }
8,223
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.config; import java.net.MalformedURLException; import java.net.URL; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.convert.DurationUnit; @ConfigurationProperties("conductor.elasticsearch") public class ElasticSearchProperties { /** * The comma separated list of urls for the elasticsearch cluster. Format -- * host1:port1,host2:port2 */ private String url = "localhost:9300"; /** The index prefix to be used when creating indices */ private String indexPrefix = "conductor"; /** The color of the elasticserach cluster to wait for to confirm healthy status */ private String clusterHealthColor = "green"; /** The size of the batch to be used for bulk indexing in async mode */ private int indexBatchSize = 1; /** The size of the queue used for holding async indexing tasks */ private int asyncWorkerQueueSize = 100; /** The maximum number of threads allowed in the async pool */ private int asyncMaxPoolSize = 12; /** * The time in seconds after which the async buffers will be flushed (if no activity) to prevent * data loss */ @DurationUnit(ChronoUnit.SECONDS) private Duration asyncBufferFlushTimeout = Duration.ofSeconds(10); /** The number of shards that the index will be created with */ private int indexShardCount = 5; /** The number of replicas that the index will be configured to have */ private int indexReplicasCount = 1; /** The number of task log results that will be returned in the response */ private int taskLogResultLimit = 10; /** The timeout in milliseconds used when requesting a connection from the connection manager */ private int restClientConnectionRequestTimeout = -1; /** Used to control if index management is to be enabled or will be controlled externally */ private boolean autoIndexManagementEnabled = true; /** * Document types are deprecated in ES6 and removed from ES7. This property can be used to * disable the use of specific document types with an override. This property is currently used * in ES6 module. * * <p><em>Note that this property will only take effect if {@link * ElasticSearchProperties#isAutoIndexManagementEnabled} is set to false and index management is * handled outside of this module.</em> */ private String documentTypeOverride = ""; /** Elasticsearch basic auth username */ private String username; /** Elasticsearch basic auth password */ private String password; public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getIndexPrefix() { return indexPrefix; } public void setIndexPrefix(String indexPrefix) { this.indexPrefix = indexPrefix; } public String getClusterHealthColor() { return clusterHealthColor; } public void setClusterHealthColor(String clusterHealthColor) { this.clusterHealthColor = clusterHealthColor; } public int getIndexBatchSize() { return indexBatchSize; } public void setIndexBatchSize(int indexBatchSize) { this.indexBatchSize = indexBatchSize; } public int getAsyncWorkerQueueSize() { return asyncWorkerQueueSize; } public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) { this.asyncWorkerQueueSize = asyncWorkerQueueSize; } public int getAsyncMaxPoolSize() { return asyncMaxPoolSize; } public void setAsyncMaxPoolSize(int asyncMaxPoolSize) { this.asyncMaxPoolSize = asyncMaxPoolSize; } public Duration getAsyncBufferFlushTimeout() { return asyncBufferFlushTimeout; } public void setAsyncBufferFlushTimeout(Duration asyncBufferFlushTimeout) { this.asyncBufferFlushTimeout = asyncBufferFlushTimeout; } public int getIndexShardCount() { return indexShardCount; } public void setIndexShardCount(int indexShardCount) { this.indexShardCount = indexShardCount; } public int getIndexReplicasCount() { return indexReplicasCount; } public void setIndexReplicasCount(int indexReplicasCount) { this.indexReplicasCount = indexReplicasCount; } public int getTaskLogResultLimit() { return taskLogResultLimit; } public void setTaskLogResultLimit(int taskLogResultLimit) { this.taskLogResultLimit = taskLogResultLimit; } public int getRestClientConnectionRequestTimeout() { return restClientConnectionRequestTimeout; } public void setRestClientConnectionRequestTimeout(int restClientConnectionRequestTimeout) { this.restClientConnectionRequestTimeout = restClientConnectionRequestTimeout; } public boolean isAutoIndexManagementEnabled() { return autoIndexManagementEnabled; } public void setAutoIndexManagementEnabled(boolean autoIndexManagementEnabled) { this.autoIndexManagementEnabled = autoIndexManagementEnabled; } public String getDocumentTypeOverride() { return documentTypeOverride; } public void setDocumentTypeOverride(String documentTypeOverride) { this.documentTypeOverride = documentTypeOverride; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public List<URL> toURLs() { String clusterAddress = getUrl(); String[] hosts = clusterAddress.split(","); return Arrays.stream(hosts) .map( host -> (host.startsWith("http://") || host.startsWith("https://")) ? toURL(host) : toURL("http://" + host)) .collect(Collectors.toList()); } private URL toURL(String url) { try { return new URL(url); } catch (MalformedURLException e) { throw new IllegalArgumentException(url + "can not be converted to java.net.URL"); } } }
8,224
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.config; import java.net.URL; import java.util.List; import org.apache.http.HttpHost; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.impl.client.BasicCredentialsProvider; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Conditional; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Primary; import org.springframework.retry.backoff.FixedBackOffPolicy; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.es7.dao.index.ElasticSearchRestDAOV7; import com.fasterxml.jackson.databind.ObjectMapper; @Configuration(proxyBeanMethods = false) @EnableConfigurationProperties(ElasticSearchProperties.class) @Conditional(ElasticSearchConditions.ElasticSearchV7Enabled.class) public class ElasticSearchV7Configuration { private static final Logger log = LoggerFactory.getLogger(ElasticSearchV7Configuration.class); @Bean public RestClient restClient(ElasticSearchProperties properties) { RestClientBuilder restClientBuilder = RestClient.builder(convertToHttpHosts(properties.toURLs())); if (properties.getRestClientConnectionRequestTimeout() > 0) { restClientBuilder.setRequestConfigCallback( requestConfigBuilder -> requestConfigBuilder.setConnectionRequestTimeout( properties.getRestClientConnectionRequestTimeout())); } return restClientBuilder.build(); } @Bean public RestClientBuilder restClientBuilder(ElasticSearchProperties properties) { RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs())); if (properties.getUsername() != null && properties.getPassword() != null) { log.info( "Configure ElasticSearch with BASIC authentication. User:{}", properties.getUsername()); final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials( properties.getUsername(), properties.getPassword())); builder.setHttpClientConfigCallback( httpClientBuilder -> httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider)); } else { log.info("Configure ElasticSearch with no authentication."); } return builder; } @Primary // If you are including this project, it's assumed you want ES to be your indexing // mechanism @Bean public IndexDAO es7IndexDAO( RestClientBuilder restClientBuilder, @Qualifier("es7RetryTemplate") RetryTemplate retryTemplate, ElasticSearchProperties properties, ObjectMapper objectMapper) { String url = properties.getUrl(); return new ElasticSearchRestDAOV7( restClientBuilder, retryTemplate, properties, objectMapper); } @Bean public RetryTemplate es7RetryTemplate() { RetryTemplate retryTemplate = new RetryTemplate(); FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy(); fixedBackOffPolicy.setBackOffPeriod(1000L); retryTemplate.setBackOffPolicy(fixedBackOffPolicy); return retryTemplate; } private HttpHost[] convertToHttpHosts(List<URL> hosts) { return hosts.stream() .map(host -> new HttpHost(host.getHost(), host.getPort(), host.getProtocol())) .toArray(HttpHost[]::new); } }
8,225
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.util.Objects; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; import org.springframework.lang.NonNull; /** Thread-safe wrapper for {@link BulkRequest}. */ class BulkRequestWrapper { private final BulkRequest bulkRequest; BulkRequestWrapper(@NonNull BulkRequest bulkRequest) { this.bulkRequest = Objects.requireNonNull(bulkRequest); } public void add(@NonNull UpdateRequest req) { synchronized (bulkRequest) { bulkRequest.add(Objects.requireNonNull(req)); } } public void add(@NonNull IndexRequest req) { synchronized (bulkRequest) { bulkRequest.add(Objects.requireNonNull(req)); } } BulkRequest get() { return bulkRequest; } int numberOfActions() { synchronized (bulkRequest) { return bulkRequest.numberOfActions(); } } }
8,226
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.io.IOException; import java.io.InputStream; import java.text.SimpleDateFormat; import java.time.Instant; import java.time.LocalDate; import java.util.*; import java.util.concurrent.*; import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import org.apache.commons.io.IOUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpStatus; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.*; import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.retry.support.RetryTemplate; import com.netflix.conductor.annotations.Trace; import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.tasks.TaskExecLog; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.exception.NonTransientException; import com.netflix.conductor.core.exception.TransientException; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.es7.config.ElasticSearchProperties; import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; import com.netflix.conductor.metrics.Monitors; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.type.MapType; import com.fasterxml.jackson.databind.type.TypeFactory; @Trace public class ElasticSearchRestDAOV7 extends ElasticSearchBaseDAO implements IndexDAO { private static final Logger logger = LoggerFactory.getLogger(ElasticSearchRestDAOV7.class); private static final String CLASS_NAME = ElasticSearchRestDAOV7.class.getSimpleName(); private static final int CORE_POOL_SIZE = 6; private static final long KEEP_ALIVE_TIME = 1L; private static final String WORKFLOW_DOC_TYPE = "workflow"; private static final String TASK_DOC_TYPE = "task"; private static final String LOG_DOC_TYPE = "task_log"; private static final String EVENT_DOC_TYPE = "event"; private static final String MSG_DOC_TYPE = "message"; private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW"); private @interface HttpMethod { String GET = "GET"; String POST = "POST"; String PUT = "PUT"; String HEAD = "HEAD"; } private static final String className = ElasticSearchRestDAOV7.class.getSimpleName(); private final String workflowIndexName; private final String taskIndexName; private final String eventIndexPrefix; private String eventIndexName; private final String messageIndexPrefix; private String messageIndexName; private String logIndexName; private final String logIndexPrefix; private final String clusterHealthColor; private final RestHighLevelClient elasticSearchClient; private final RestClient elasticSearchAdminClient; private final ExecutorService executorService; private final ExecutorService logExecutorService; private final ConcurrentHashMap<String, BulkRequests> bulkRequests; private final int indexBatchSize; private final int asyncBufferFlushTimeout; private final ElasticSearchProperties properties; private final RetryTemplate retryTemplate; static { SIMPLE_DATE_FORMAT.setTimeZone(GMT); } public ElasticSearchRestDAOV7( RestClientBuilder restClientBuilder, RetryTemplate retryTemplate, ElasticSearchProperties properties, ObjectMapper objectMapper) { this.objectMapper = objectMapper; this.elasticSearchAdminClient = restClientBuilder.build(); this.elasticSearchClient = new RestHighLevelClient(restClientBuilder); this.clusterHealthColor = properties.getClusterHealthColor(); this.bulkRequests = new ConcurrentHashMap<>(); this.indexBatchSize = properties.getIndexBatchSize(); this.asyncBufferFlushTimeout = (int) properties.getAsyncBufferFlushTimeout().getSeconds(); this.properties = properties; this.indexPrefix = properties.getIndexPrefix(); this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE); this.taskIndexName = getIndexName(TASK_DOC_TYPE); this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE; this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE; this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE; int workerQueueSize = properties.getAsyncWorkerQueueSize(); int maximumPoolSize = properties.getAsyncMaxPoolSize(); // Set up a workerpool for performing async operations. this.executorService = new ThreadPoolExecutor( CORE_POOL_SIZE, maximumPoolSize, KEEP_ALIVE_TIME, TimeUnit.MINUTES, new LinkedBlockingQueue<>(workerQueueSize), (runnable, executor) -> { logger.warn( "Request {} to async dao discarded in executor {}", runnable, executor); Monitors.recordDiscardedIndexingCount("indexQueue"); }); // Set up a workerpool for performing async operations for task_logs, event_executions, // message int corePoolSize = 1; maximumPoolSize = 2; long keepAliveTime = 30L; this.logExecutorService = new ThreadPoolExecutor( corePoolSize, maximumPoolSize, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<>(workerQueueSize), (runnable, executor) -> { logger.warn( "Request {} to async log dao discarded in executor {}", runnable, executor); Monitors.recordDiscardedIndexingCount("logQueue"); }); Executors.newSingleThreadScheduledExecutor() .scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS); this.retryTemplate = retryTemplate; } @PreDestroy private void shutdown() { logger.info("Gracefully shutdown executor service"); shutdownExecutorService(logExecutorService); shutdownExecutorService(executorService); } private void shutdownExecutorService(ExecutorService execService) { try { execService.shutdown(); if (execService.awaitTermination(30, TimeUnit.SECONDS)) { logger.debug("tasks completed, shutting down"); } else { logger.warn("Forcing shutdown after waiting for 30 seconds"); execService.shutdownNow(); } } catch (InterruptedException ie) { logger.warn( "Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue"); execService.shutdownNow(); Thread.currentThread().interrupt(); } } @Override @PostConstruct public void setup() throws Exception { waitForHealthyCluster(); if (properties.isAutoIndexManagementEnabled()) { createIndexesTemplates(); createWorkflowIndex(); createTaskIndex(); } } private void createIndexesTemplates() { try { initIndexesTemplates(); updateIndexesNames(); Executors.newScheduledThreadPool(1) .scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS); } catch (Exception e) { logger.error("Error creating index templates!", e); } } private void initIndexesTemplates() { initIndexTemplate(LOG_DOC_TYPE); initIndexTemplate(EVENT_DOC_TYPE); initIndexTemplate(MSG_DOC_TYPE); } /** Initializes the index with the required templates and mappings. */ private void initIndexTemplate(String type) { String template = "template_" + type; try { if (doesResourceNotExist("/_template/" + template)) { logger.info("Creating the index template '" + template + "'"); InputStream stream = ElasticSearchRestDAOV7.class.getResourceAsStream("/" + template + ".json"); byte[] templateSource = IOUtils.toByteArray(stream); HttpEntity entity = new NByteArrayEntity(templateSource, ContentType.APPLICATION_JSON); Request request = new Request(HttpMethod.PUT, "/_template/" + template); request.setEntity(entity); String test = IOUtils.toString( elasticSearchAdminClient .performRequest(request) .getEntity() .getContent()); } } catch (Exception e) { logger.error("Failed to init " + template, e); } } private void updateIndexesNames() { logIndexName = updateIndexName(LOG_DOC_TYPE); eventIndexName = updateIndexName(EVENT_DOC_TYPE); messageIndexName = updateIndexName(MSG_DOC_TYPE); } private String updateIndexName(String type) { String indexName = this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date()); try { addIndex(indexName); return indexName; } catch (IOException e) { logger.error("Failed to update log index name: {}", indexName, e); throw new NonTransientException(e.getMessage(), e); } } private void createWorkflowIndex() { String indexName = getIndexName(WORKFLOW_DOC_TYPE); try { addIndex(indexName, "/mappings_docType_workflow.json"); } catch (IOException e) { logger.error("Failed to initialize index '{}'", indexName, e); } } private void createTaskIndex() { String indexName = getIndexName(TASK_DOC_TYPE); try { addIndex(indexName, "/mappings_docType_task.json"); } catch (IOException e) { logger.error("Failed to initialize index '{}'", indexName, e); } } /** * Waits for the ES cluster to become green. * * @throws Exception If there is an issue connecting with the ES cluster. */ private void waitForHealthyCluster() throws Exception { Map<String, String> params = new HashMap<>(); params.put("wait_for_status", this.clusterHealthColor); params.put("timeout", "30s"); Request request = new Request("GET", "/_cluster/health"); request.addParameters(params); elasticSearchAdminClient.performRequest(request); } /** * Adds an index to elasticsearch if it does not exist. * * @param index The name of the index to create. * @param mappingFilename Index mapping filename * @throws IOException If an error occurred during requests to ES. */ private void addIndex(String index, final String mappingFilename) throws IOException { logger.info("Adding index '{}'...", index); String resourcePath = "/" + index; if (doesResourceNotExist(resourcePath)) { try { ObjectNode setting = objectMapper.createObjectNode(); ObjectNode indexSetting = objectMapper.createObjectNode(); ObjectNode root = objectMapper.createObjectNode(); indexSetting.put("number_of_shards", properties.getIndexShardCount()); indexSetting.put("number_of_replicas", properties.getIndexReplicasCount()); JsonNode mappingNodeValue = objectMapper.readTree(loadTypeMappingSource(mappingFilename)); root.set("settings", indexSetting); root.set("mappings", mappingNodeValue); Request request = new Request(HttpMethod.PUT, resourcePath); request.setEntity( new NStringEntity( objectMapper.writeValueAsString(root), ContentType.APPLICATION_JSON)); elasticSearchAdminClient.performRequest(request); logger.info("Added '{}' index", index); } catch (ResponseException e) { boolean errorCreatingIndex = true; Response errorResponse = e.getResponse(); if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { JsonNode root = objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity())); String errorCode = root.get("error").get("type").asText(); if ("index_already_exists_exception".equals(errorCode)) { errorCreatingIndex = false; } } if (errorCreatingIndex) { throw e; } } } else { logger.info("Index '{}' already exists", index); } } /** * Adds an index to elasticsearch if it does not exist. * * @param index The name of the index to create. * @throws IOException If an error occurred during requests to ES. */ private void addIndex(final String index) throws IOException { logger.info("Adding index '{}'...", index); String resourcePath = "/" + index; if (doesResourceNotExist(resourcePath)) { try { ObjectNode setting = objectMapper.createObjectNode(); ObjectNode indexSetting = objectMapper.createObjectNode(); indexSetting.put("number_of_shards", properties.getIndexShardCount()); indexSetting.put("number_of_replicas", properties.getIndexReplicasCount()); setting.set("settings", indexSetting); Request request = new Request(HttpMethod.PUT, resourcePath); request.setEntity( new NStringEntity(setting.toString(), ContentType.APPLICATION_JSON)); elasticSearchAdminClient.performRequest(request); logger.info("Added '{}' index", index); } catch (ResponseException e) { boolean errorCreatingIndex = true; Response errorResponse = e.getResponse(); if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { JsonNode root = objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity())); String errorCode = root.get("error").get("type").asText(); if ("index_already_exists_exception".equals(errorCode)) { errorCreatingIndex = false; } } if (errorCreatingIndex) { throw e; } } } else { logger.info("Index '{}' already exists", index); } } /** * Adds a mapping type to an index if it does not exist. * * @param index The name of the index. * @param mappingType The name of the mapping type. * @param mappingFilename The name of the mapping file to use to add the mapping if it does not * exist. * @throws IOException If an error occurred during requests to ES. */ private void addMappingToIndex( final String index, final String mappingType, final String mappingFilename) throws IOException { logger.info("Adding '{}' mapping to index '{}'...", mappingType, index); String resourcePath = "/" + index + "/_mapping"; if (doesResourceNotExist(resourcePath)) { HttpEntity entity = new NByteArrayEntity( loadTypeMappingSource(mappingFilename).getBytes(), ContentType.APPLICATION_JSON); Request request = new Request(HttpMethod.PUT, resourcePath); request.setEntity(entity); elasticSearchAdminClient.performRequest(request); logger.info("Added '{}' mapping", mappingType); } else { logger.info("Mapping '{}' already exists", mappingType); } } /** * Determines whether a resource exists in ES. This will call a GET method to a particular path * and return true if status 200; false otherwise. * * @param resourcePath The path of the resource to get. * @return True if it exists; false otherwise. * @throws IOException If an error occurred during requests to ES. */ public boolean doesResourceExist(final String resourcePath) throws IOException { Request request = new Request(HttpMethod.HEAD, resourcePath); Response response = elasticSearchAdminClient.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } /** * The inverse of doesResourceExist. * * @param resourcePath The path of the resource to check. * @return True if it does not exist; false otherwise. * @throws IOException If an error occurred during requests to ES. */ public boolean doesResourceNotExist(final String resourcePath) throws IOException { return !doesResourceExist(resourcePath); } @Override public void indexWorkflow(WorkflowSummary workflow) { try { long startTime = Instant.now().toEpochMilli(); String workflowId = workflow.getWorkflowId(); byte[] docBytes = objectMapper.writeValueAsBytes(workflow); IndexRequest request = new IndexRequest(workflowIndexName) .id(workflowId) .source(docBytes, XContentType.JSON); elasticSearchClient.index(request, RequestOptions.DEFAULT); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for indexing workflow: {}", endTime - startTime, workflowId); Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (Exception e) { Monitors.error(className, "indexWorkflow"); logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e); } } @Override public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) { return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService); } @Override public void indexTask(TaskSummary task) { try { long startTime = Instant.now().toEpochMilli(); String taskId = task.getTaskId(); indexObject(taskIndexName, TASK_DOC_TYPE, taskId, task); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for indexing task:{} in workflow: {}", endTime - startTime, taskId, task.getWorkflowId()); Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (Exception e) { logger.error("Failed to index task: {}", task.getTaskId(), e); } } @Override public CompletableFuture<Void> asyncIndexTask(TaskSummary task) { return CompletableFuture.runAsync(() -> indexTask(task), executorService); } @Override public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) { if (taskExecLogs.isEmpty()) { return; } long startTime = Instant.now().toEpochMilli(); BulkRequest bulkRequest = new BulkRequest(); for (TaskExecLog log : taskExecLogs) { byte[] docBytes; try { docBytes = objectMapper.writeValueAsBytes(log); } catch (JsonProcessingException e) { logger.error("Failed to convert task log to JSON for task {}", log.getTaskId()); continue; } IndexRequest request = new IndexRequest(logIndexName); request.source(docBytes, XContentType.JSON); bulkRequest.add(request); } try { elasticSearchClient.bulk(bulkRequest, RequestOptions.DEFAULT); long endTime = Instant.now().toEpochMilli(); logger.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime); Monitors.recordESIndexTime( "index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); } catch (Exception e) { List<String> taskIds = taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList()); logger.error("Failed to index task execution logs for tasks: {}", taskIds, e); } } @Override public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) { return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService); } @Override public List<TaskExecLog> getTaskExecutionLogs(String taskId) { try { BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*"); // Create the searchObjectIdsViaExpression source SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(query); searchSourceBuilder.sort(new FieldSortBuilder("createdTime").order(SortOrder.ASC)); searchSourceBuilder.size(properties.getTaskLogResultLimit()); // Generate the actual request to send to ES. SearchRequest searchRequest = new SearchRequest(logIndexPrefix + "*"); searchRequest.source(searchSourceBuilder); SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); return mapTaskExecLogsResponse(response); } catch (Exception e) { logger.error("Failed to get task execution logs for task: {}", taskId, e); } return null; } private List<TaskExecLog> mapTaskExecLogsResponse(SearchResponse response) throws IOException { SearchHit[] hits = response.getHits().getHits(); List<TaskExecLog> logs = new ArrayList<>(hits.length); for (SearchHit hit : hits) { String source = hit.getSourceAsString(); TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class); logs.add(tel); } return logs; } @Override public List<Message> getMessages(String queue) { try { BoolQueryBuilder query = boolQueryBuilder("queue='" + queue + "'", "*"); // Create the searchObjectIdsViaExpression source SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(query); searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC)); // Generate the actual request to send to ES. SearchRequest searchRequest = new SearchRequest(messageIndexPrefix + "*"); searchRequest.source(searchSourceBuilder); SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); return mapGetMessagesResponse(response); } catch (Exception e) { logger.error("Failed to get messages for queue: {}", queue, e); } return null; } private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException { SearchHit[] hits = response.getHits().getHits(); TypeFactory factory = TypeFactory.defaultInstance(); MapType type = factory.constructMapType(HashMap.class, String.class, String.class); List<Message> messages = new ArrayList<>(hits.length); for (SearchHit hit : hits) { String source = hit.getSourceAsString(); Map<String, String> mapSource = objectMapper.readValue(source, type); Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null); messages.add(msg); } return messages; } @Override public List<EventExecution> getEventExecutions(String event) { try { BoolQueryBuilder query = boolQueryBuilder("event='" + event + "'", "*"); // Create the searchObjectIdsViaExpression source SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(query); searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC)); // Generate the actual request to send to ES. SearchRequest searchRequest = new SearchRequest(eventIndexPrefix + "*"); searchRequest.source(searchSourceBuilder); SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); return mapEventExecutionsResponse(response); } catch (Exception e) { logger.error("Failed to get executions for event: {}", event, e); } return null; } private List<EventExecution> mapEventExecutionsResponse(SearchResponse response) throws IOException { SearchHit[] hits = response.getHits().getHits(); List<EventExecution> executions = new ArrayList<>(hits.length); for (SearchHit hit : hits) { String source = hit.getSourceAsString(); EventExecution tel = objectMapper.readValue(source, EventExecution.class); executions.add(tel); } return executions; } @Override public void addMessage(String queue, Message message) { try { long startTime = Instant.now().toEpochMilli(); Map<String, Object> doc = new HashMap<>(); doc.put("messageId", message.getId()); doc.put("payload", message.getPayload()); doc.put("queue", queue); doc.put("created", System.currentTimeMillis()); indexObject(messageIndexName, MSG_DOC_TYPE, doc); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for indexing message: {}", endTime - startTime, message.getId()); Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime); } catch (Exception e) { logger.error("Failed to index message: {}", message.getId(), e); } } @Override public CompletableFuture<Void> asyncAddMessage(String queue, Message message) { return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService); } @Override public void addEventExecution(EventExecution eventExecution) { try { long startTime = Instant.now().toEpochMilli(); String id = eventExecution.getName() + "." + eventExecution.getEvent() + "." + eventExecution.getMessageId() + "." + eventExecution.getId(); indexObject(eventIndexName, EVENT_DOC_TYPE, id, eventExecution); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for indexing event execution: {}", endTime - startTime, eventExecution.getId()); Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); } catch (Exception e) { logger.error("Failed to index event execution: {}", eventExecution.getId(), e); } } @Override public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) { return CompletableFuture.runAsync( () -> addEventExecution(eventExecution), logExecutorService); } @Override public SearchResult<String> searchWorkflows( String query, String freeText, int start, int count, List<String> sort) { try { return searchObjectIdsViaExpression( query, start, count, sort, freeText, WORKFLOW_DOC_TYPE); } catch (Exception e) { throw new NonTransientException(e.getMessage(), e); } } @Override public SearchResult<WorkflowSummary> searchWorkflowSummary( String query, String freeText, int start, int count, List<String> sort) { try { return searchObjectsViaExpression( query, start, count, sort, freeText, WORKFLOW_DOC_TYPE, false, WorkflowSummary.class); } catch (Exception e) { throw new TransientException(e.getMessage(), e); } } private <T> SearchResult<T> searchObjectsViaExpression( String structuredQuery, int start, int size, List<String> sortOptions, String freeTextQuery, String docType, boolean idOnly, Class<T> clazz) throws ParserException, IOException { QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); return searchObjects( getIndexName(docType), queryBuilder, start, size, sortOptions, idOnly, clazz); } @Override public SearchResult<String> searchTasks( String query, String freeText, int start, int count, List<String> sort) { try { return searchObjectIdsViaExpression(query, start, count, sort, freeText, TASK_DOC_TYPE); } catch (Exception e) { throw new NonTransientException(e.getMessage(), e); } } @Override public SearchResult<TaskSummary> searchTaskSummary( String query, String freeText, int start, int count, List<String> sort) { try { return searchObjectsViaExpression( query, start, count, sort, freeText, TASK_DOC_TYPE, false, TaskSummary.class); } catch (Exception e) { throw new TransientException(e.getMessage(), e); } } @Override public void removeWorkflow(String workflowId) { long startTime = Instant.now().toEpochMilli(); DeleteRequest request = new DeleteRequest(workflowIndexName, workflowId); try { DeleteResponse response = elasticSearchClient.delete(request, RequestOptions.DEFAULT); if (response.getResult() == DocWriteResponse.Result.NOT_FOUND) { logger.error("Index removal failed - document not found by id: {}", workflowId); } long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for removing workflow: {}", endTime - startTime, workflowId); Monitors.recordESIndexTime("remove_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (IOException e) { logger.error("Failed to remove workflow {} from index", workflowId, e); Monitors.error(className, "remove"); } } @Override public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) { return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService); } @Override public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { try { if (keys.length != values.length) { throw new NonTransientException("Number of keys and values do not match"); } long startTime = Instant.now().toEpochMilli(); UpdateRequest request = new UpdateRequest(workflowIndexName, workflowInstanceId); Map<String, Object> source = IntStream.range(0, keys.length) .boxed() .collect(Collectors.toMap(i -> keys[i], i -> values[i])); request.doc(source); logger.debug("Updating workflow {} with {}", workflowInstanceId, source); elasticSearchClient.update(request, RequestOptions.DEFAULT); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for updating workflow: {}", endTime - startTime, workflowInstanceId); Monitors.recordESIndexTime("update_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (Exception e) { logger.error("Failed to update workflow {}", workflowInstanceId, e); Monitors.error(className, "update"); } } @Override public void removeTask(String workflowId, String taskId) { long startTime = Instant.now().toEpochMilli(); SearchResult<String> taskSearchResult = searchTasks( String.format("(taskId='%s') AND (workflowId='%s')", taskId, workflowId), "*", 0, 1, null); if (taskSearchResult.getTotalHits() == 0) { logger.error("Task: {} does not belong to workflow: {}", taskId, workflowId); Monitors.error(className, "removeTask"); return; } DeleteRequest request = new DeleteRequest(taskIndexName, taskId); try { DeleteResponse response = elasticSearchClient.delete(request, RequestOptions.DEFAULT); if (response.getResult() != DocWriteResponse.Result.DELETED) { logger.error("Index removal failed - task not found by id: {}", workflowId); Monitors.error(className, "removeTask"); return; } long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for removing task:{} of workflow: {}", endTime - startTime, taskId, workflowId); Monitors.recordESIndexTime("remove_task", "", endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (IOException e) { logger.error( "Failed to remove task {} of workflow: {} from index", taskId, workflowId, e); Monitors.error(className, "removeTask"); } } @Override public CompletableFuture<Void> asyncRemoveTask(String workflowId, String taskId) { return CompletableFuture.runAsync(() -> removeTask(workflowId, taskId), executorService); } @Override public void updateTask(String workflowId, String taskId, String[] keys, Object[] values) { try { if (keys.length != values.length) { throw new IllegalArgumentException("Number of keys and values do not match"); } long startTime = Instant.now().toEpochMilli(); UpdateRequest request = new UpdateRequest(taskIndexName, taskId); Map<String, Object> source = IntStream.range(0, keys.length) .boxed() .collect(Collectors.toMap(i -> keys[i], i -> values[i])); request.doc(source); logger.debug("Updating task: {} of workflow: {} with {}", taskId, workflowId, source); elasticSearchClient.update(request, RequestOptions.DEFAULT); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for updating task: {} of workflow: {}", endTime - startTime, taskId, workflowId); Monitors.recordESIndexTime("update_task", "", endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); } catch (Exception e) { logger.error("Failed to update task: {} of workflow: {}", taskId, workflowId, e); Monitors.error(className, "update"); } } @Override public CompletableFuture<Void> asyncUpdateTask( String workflowId, String taskId, String[] keys, Object[] values) { return CompletableFuture.runAsync( () -> updateTask(workflowId, taskId, keys, values), executorService); } @Override public CompletableFuture<Void> asyncUpdateWorkflow( String workflowInstanceId, String[] keys, Object[] values) { return CompletableFuture.runAsync( () -> updateWorkflow(workflowInstanceId, keys, values), executorService); } @Override public String get(String workflowInstanceId, String fieldToGet) { GetRequest request = new GetRequest(workflowIndexName, workflowInstanceId); GetResponse response; try { response = elasticSearchClient.get(request, RequestOptions.DEFAULT); } catch (IOException e) { logger.error( "Unable to get Workflow: {} from ElasticSearch index: {}", workflowInstanceId, workflowIndexName, e); return null; } if (response.isExists()) { Map<String, Object> sourceAsMap = response.getSourceAsMap(); if (sourceAsMap.get(fieldToGet) != null) { return sourceAsMap.get(fieldToGet).toString(); } } logger.debug( "Unable to find Workflow: {} in ElasticSearch index: {}.", workflowInstanceId, workflowIndexName); return null; } private SearchResult<String> searchObjectIdsViaExpression( String structuredQuery, int start, int size, List<String> sortOptions, String freeTextQuery, String docType) throws ParserException, IOException { QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); return searchObjectIds(getIndexName(docType), queryBuilder, start, size, sortOptions); } private <T> SearchResult<T> searchObjectIdsViaExpression( String structuredQuery, int start, int size, List<String> sortOptions, String freeTextQuery, String docType, Class<T> clazz) throws ParserException, IOException { QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); return searchObjects( getIndexName(docType), queryBuilder, start, size, sortOptions, false, clazz); } private SearchResult<String> searchObjectIds( String indexName, QueryBuilder queryBuilder, int start, int size) throws IOException { return searchObjectIds(indexName, queryBuilder, start, size, null); } /** * Tries to find object ids for a given query in an index. * * @param indexName The name of the index. * @param queryBuilder The query to use for searching. * @param start The start to use. * @param size The total return size. * @param sortOptions A list of string options to sort in the form VALUE:ORDER; where ORDER is * optional and can be either ASC OR DESC. * @return The SearchResults which includes the count and IDs that were found. * @throws IOException If we cannot communicate with ES. */ private SearchResult<String> searchObjectIds( String indexName, QueryBuilder queryBuilder, int start, int size, List<String> sortOptions) throws IOException { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(queryBuilder); searchSourceBuilder.from(start); searchSourceBuilder.size(size); if (sortOptions != null && !sortOptions.isEmpty()) { for (String sortOption : sortOptions) { SortOrder order = SortOrder.ASC; String field = sortOption; int index = sortOption.indexOf(":"); if (index > 0) { field = sortOption.substring(0, index); order = SortOrder.valueOf(sortOption.substring(index + 1)); } searchSourceBuilder.sort(new FieldSortBuilder(field).order(order)); } } // Generate the actual request to send to ES. SearchRequest searchRequest = new SearchRequest(indexName); searchRequest.source(searchSourceBuilder); SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); List<String> result = new LinkedList<>(); response.getHits().forEach(hit -> result.add(hit.getId())); long count = response.getHits().getTotalHits().value; return new SearchResult<>(count, result); } private <T> SearchResult<T> searchObjects( String indexName, QueryBuilder queryBuilder, int start, int size, List<String> sortOptions, boolean idOnly, Class<T> clazz) throws IOException { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(queryBuilder); searchSourceBuilder.from(start); searchSourceBuilder.size(size); if (idOnly) { searchSourceBuilder.fetchSource(false); } if (sortOptions != null && !sortOptions.isEmpty()) { for (String sortOption : sortOptions) { SortOrder order = SortOrder.ASC; String field = sortOption; int index = sortOption.indexOf(":"); if (index > 0) { field = sortOption.substring(0, index); order = SortOrder.valueOf(sortOption.substring(index + 1)); } searchSourceBuilder.sort(new FieldSortBuilder(field).order(order)); } } // Generate the actual request to send to ES. SearchRequest searchRequest = new SearchRequest(indexName); searchRequest.source(searchSourceBuilder); SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); return mapSearchResult(response, idOnly, clazz); } private <T> SearchResult<T> mapSearchResult( SearchResponse response, boolean idOnly, Class<T> clazz) { SearchHits searchHits = response.getHits(); long count = searchHits.getTotalHits().value; List<T> result; if (idOnly) { result = Arrays.stream(searchHits.getHits()) .map(hit -> clazz.cast(hit.getId())) .collect(Collectors.toList()); } else { result = Arrays.stream(searchHits.getHits()) .map( hit -> { try { return objectMapper.readValue( hit.getSourceAsString(), clazz); } catch (JsonProcessingException e) { logger.error( "Failed to de-serialize elasticsearch from source: {}", hit.getSourceAsString(), e); } return null; }) .collect(Collectors.toList()); } return new SearchResult<>(count, result); } @Override public List<String> searchArchivableWorkflows(String indexName, long archiveTtlDays) { QueryBuilder q = QueryBuilders.boolQuery() .must( QueryBuilders.rangeQuery("endTime") .lt(LocalDate.now().minusDays(archiveTtlDays).toString()) .gte( LocalDate.now() .minusDays(archiveTtlDays) .minusDays(1) .toString())) .should(QueryBuilders.termQuery("status", "COMPLETED")) .should(QueryBuilders.termQuery("status", "FAILED")) .should(QueryBuilders.termQuery("status", "TIMED_OUT")) .should(QueryBuilders.termQuery("status", "TERMINATED")) .mustNot(QueryBuilders.existsQuery("archived")) .minimumShouldMatch(1); SearchResult<String> workflowIds; try { workflowIds = searchObjectIds(indexName, q, 0, 1000); } catch (IOException e) { logger.error("Unable to communicate with ES to find archivable workflows", e); return Collections.emptyList(); } return workflowIds.getResults(); } @Override public long getWorkflowCount(String query, String freeText) { try { return getObjectCounts(query, freeText, WORKFLOW_DOC_TYPE); } catch (Exception e) { throw new NonTransientException(e.getMessage(), e); } } private long getObjectCounts(String structuredQuery, String freeTextQuery, String docType) throws ParserException, IOException { QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); String indexName = getIndexName(docType); CountRequest countRequest = new CountRequest(new String[] {indexName}, queryBuilder); CountResponse countResponse = elasticSearchClient.count(countRequest, RequestOptions.DEFAULT); return countResponse.getCount(); } public List<String> searchRecentRunningWorkflows( int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo) { DateTime dateTime = new DateTime(); QueryBuilder q = QueryBuilders.boolQuery() .must( QueryBuilders.rangeQuery("updateTime") .gt(dateTime.minusHours(lastModifiedHoursAgoFrom))) .must( QueryBuilders.rangeQuery("updateTime") .lt(dateTime.minusHours(lastModifiedHoursAgoTo))) .must(QueryBuilders.termQuery("status", "RUNNING")); SearchResult<String> workflowIds; try { workflowIds = searchObjectIds( workflowIndexName, q, 0, 5000, Collections.singletonList("updateTime:ASC")); } catch (IOException e) { logger.error("Unable to communicate with ES to find recent running workflows", e); return Collections.emptyList(); } return workflowIds.getResults(); } private void indexObject(final String index, final String docType, final Object doc) { indexObject(index, docType, null, doc); } private void indexObject( final String index, final String docType, final String docId, final Object doc) { byte[] docBytes; try { docBytes = objectMapper.writeValueAsBytes(doc); } catch (JsonProcessingException e) { logger.error("Failed to convert {} '{}' to byte string", docType, docId); return; } IndexRequest request = new IndexRequest(index); request.id(docId).source(docBytes, XContentType.JSON); if (bulkRequests.get(docType) == null) { bulkRequests.put( docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest())); } bulkRequests.get(docType).getBulkRequest().add(request); if (bulkRequests.get(docType).getBulkRequest().numberOfActions() >= this.indexBatchSize) { indexBulkRequest(docType); } } private synchronized void indexBulkRequest(String docType) { if (bulkRequests.get(docType).getBulkRequest() != null && bulkRequests.get(docType).getBulkRequest().numberOfActions() > 0) { synchronized (bulkRequests.get(docType).getBulkRequest()) { indexWithRetry( bulkRequests.get(docType).getBulkRequest().get(), "Bulk Indexing " + docType, docType); bulkRequests.put( docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest())); } } } /** * Performs an index operation with a retry. * * @param request The index request that we want to perform. * @param operationDescription The type of operation that we are performing. */ private void indexWithRetry( final BulkRequest request, final String operationDescription, String docType) { try { long startTime = Instant.now().toEpochMilli(); retryTemplate.execute( context -> elasticSearchClient.bulk(request, RequestOptions.DEFAULT)); long endTime = Instant.now().toEpochMilli(); logger.debug( "Time taken {} for indexing object of type: {}", endTime - startTime, docType); Monitors.recordESIndexTime("index_object", docType, endTime - startTime); Monitors.recordWorkerQueueSize( "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); Monitors.recordWorkerQueueSize( "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); } catch (Exception e) { Monitors.error(className, "index"); logger.error("Failed to index {} for request type: {}", request, docType, e); } } /** * Flush the buffers if bulk requests have not been indexed for the past {@link * ElasticSearchProperties#getAsyncBufferFlushTimeout()} seconds This is to prevent data loss in * case the instance is terminated, while the buffer still holds documents to be indexed. */ private void flushBulkRequests() { bulkRequests.entrySet().stream() .filter( entry -> (System.currentTimeMillis() - entry.getValue().getLastFlushTime()) >= asyncBufferFlushTimeout * 1000L) .filter( entry -> entry.getValue().getBulkRequest() != null && entry.getValue().getBulkRequest().numberOfActions() > 0) .forEach( entry -> { logger.debug( "Flushing bulk request buffer for type {}, size: {}", entry.getKey(), entry.getValue().getBulkRequest().numberOfActions()); indexBulkRequest(entry.getKey()); }); } private static class BulkRequests { private final long lastFlushTime; private final BulkRequestWrapper bulkRequest; long getLastFlushTime() { return lastFlushTime; } BulkRequestWrapper getBulkRequest() { return bulkRequest; } BulkRequests(long lastFlushTime, BulkRequest bulkRequest) { this.lastFlushTime = lastFlushTime; this.bulkRequest = new BulkRequestWrapper(bulkRequest); } } }
8,227
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.util.Objects; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; import org.springframework.lang.NonNull; /** Thread-safe wrapper for {@link BulkRequestBuilder}. */ public class BulkRequestBuilderWrapper { private final BulkRequestBuilder bulkRequestBuilder; public BulkRequestBuilderWrapper(@NonNull BulkRequestBuilder bulkRequestBuilder) { this.bulkRequestBuilder = Objects.requireNonNull(bulkRequestBuilder); } public void add(@NonNull UpdateRequest req) { synchronized (bulkRequestBuilder) { bulkRequestBuilder.add(Objects.requireNonNull(req)); } } public void add(@NonNull IndexRequest req) { synchronized (bulkRequestBuilder) { bulkRequestBuilder.add(Objects.requireNonNull(req)); } } public int numberOfActions() { synchronized (bulkRequestBuilder) { return bulkRequestBuilder.numberOfActions(); } } public ActionFuture<BulkResponse> execute() { synchronized (bulkRequestBuilder) { return bulkRequestBuilder.execute(); } } }
8,228
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.index; import java.io.IOException; import java.util.ArrayList; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.es7.dao.query.parser.Expression; import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; abstract class ElasticSearchBaseDAO implements IndexDAO { String indexPrefix; ObjectMapper objectMapper; String loadTypeMappingSource(String path) throws IOException { return applyIndexPrefixToTemplate( IOUtils.toString(ElasticSearchBaseDAO.class.getResourceAsStream(path))); } private String applyIndexPrefixToTemplate(String text) throws JsonProcessingException { String indexPatternsFieldName = "index_patterns"; JsonNode root = objectMapper.readTree(text); if (root != null) { JsonNode indexPatternsNodeValue = root.get(indexPatternsFieldName); if (indexPatternsNodeValue != null && indexPatternsNodeValue.isArray()) { ArrayList<String> patternsWithPrefix = new ArrayList<>(); indexPatternsNodeValue.forEach( v -> { String patternText = v.asText(); StringBuilder sb = new StringBuilder(); if (patternText.startsWith("*")) { sb.append("*") .append(indexPrefix) .append("_") .append(patternText.substring(1)); } else { sb.append(indexPrefix).append("_").append(patternText); } patternsWithPrefix.add(sb.toString()); }); ((ObjectNode) root) .set(indexPatternsFieldName, objectMapper.valueToTree(patternsWithPrefix)); System.out.println( objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root)); return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); } } return text; } BoolQueryBuilder boolQueryBuilder(String expression, String queryString) throws ParserException { QueryBuilder queryBuilder = QueryBuilders.matchAllQuery(); if (StringUtils.isNotEmpty(expression)) { Expression exp = Expression.fromString(expression); queryBuilder = exp.getFilterBuilder(); } BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(queryString); return QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); } protected String getIndexName(String documentType) { return indexPrefix + "_" + documentType; } }
8,229
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import java.io.InputStream; import org.elasticsearch.index.query.QueryBuilder; import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; /** * @author Viren */ public class GroupedExpression extends AbstractNode implements FilterProvider { private Expression expression; public GroupedExpression(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] peeked = read(1); assertExpected(peeked, "("); this.expression = new Expression(is); peeked = read(1); assertExpected(peeked, ")"); } @Override public String toString() { return "(" + expression + ")"; } /** * @return the expression */ public Expression getExpression() { return expression; } @Override public QueryBuilder getFilterBuilder() { return expression.getFilterBuilder(); } }
8,230
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import org.elasticsearch.index.query.QueryBuilder; /** * @author Viren */ public interface FilterProvider { /** * @return FilterBuilder for elasticsearch */ public QueryBuilder getFilterBuilder(); }
8,231
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import java.io.InputStream; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp; import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp.Operators; import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue; import com.netflix.conductor.es7.dao.query.parser.internal.ListConst; import com.netflix.conductor.es7.dao.query.parser.internal.Name; import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; import com.netflix.conductor.es7.dao.query.parser.internal.Range; /** * @author Viren * <pre> * Represents an expression of the form as below: * key OPR value * OPR is the comparison operator which could be on the following: * &gt;, &lt;, = , !=, IN, BETWEEN * </pre> */ public class NameValue extends AbstractNode implements FilterProvider { private Name name; private ComparisonOp op; private ConstValue value; private Range range; private ListConst valueList; public NameValue(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { this.name = new Name(is); this.op = new ComparisonOp(is); if (this.op.getOperator().equals(Operators.BETWEEN.value())) { this.range = new Range(is); } if (this.op.getOperator().equals(Operators.IN.value())) { this.valueList = new ListConst(is); } else { this.value = new ConstValue(is); } } @Override public String toString() { return "" + name + op + value; } /** * @return the name */ public Name getName() { return name; } /** * @return the op */ public ComparisonOp getOp() { return op; } /** * @return the value */ public ConstValue getValue() { return value; } @Override public QueryBuilder getFilterBuilder() { if (op.getOperator().equals(Operators.EQUALS.value())) { return QueryBuilders.queryStringQuery( name.getName() + ":" + value.getValue().toString()); } else if (op.getOperator().equals(Operators.BETWEEN.value())) { return QueryBuilders.rangeQuery(name.getName()) .from(range.getLow()) .to(range.getHigh()); } else if (op.getOperator().equals(Operators.IN.value())) { return QueryBuilders.termsQuery(name.getName(), valueList.getList()); } else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) { return QueryBuilders.queryStringQuery( "NOT " + name.getName() + ":" + value.getValue().toString()); } else if (op.getOperator().equals(Operators.GREATER_THAN.value())) { return QueryBuilders.rangeQuery(name.getName()) .from(value.getValue()) .includeLower(false) .includeUpper(false); } else if (op.getOperator().equals(Operators.IS.value())) { if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) { return QueryBuilders.boolQuery() .mustNot( QueryBuilders.boolQuery() .must(QueryBuilders.matchAllQuery()) .mustNot(QueryBuilders.existsQuery(name.getName()))); } else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) { return QueryBuilders.boolQuery() .mustNot( QueryBuilders.boolQuery() .must(QueryBuilders.matchAllQuery()) .must(QueryBuilders.existsQuery(name.getName()))); } } else if (op.getOperator().equals(Operators.LESS_THAN.value())) { return QueryBuilders.rangeQuery(name.getName()) .to(value.getValue()) .includeLower(false) .includeUpper(false); } else if (op.getOperator().equals(Operators.STARTS_WITH.value())) { return QueryBuilders.prefixQuery(name.getName(), value.getUnquotedValue()); } throw new IllegalStateException("Incorrect/unsupported operators"); } }
8,232
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.InputStream; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; import com.netflix.conductor.es7.dao.query.parser.internal.BooleanOp; import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; /** * @author Viren */ public class Expression extends AbstractNode implements FilterProvider { private NameValue nameVal; private GroupedExpression ge; private BooleanOp op; private Expression rhs; public Expression(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] peeked = peek(1); if (peeked[0] == '(') { this.ge = new GroupedExpression(is); } else { this.nameVal = new NameValue(is); } peeked = peek(3); if (isBoolOpr(peeked)) { // we have an expression next this.op = new BooleanOp(is); this.rhs = new Expression(is); } } public boolean isBinaryExpr() { return this.op != null; } public BooleanOp getOperator() { return this.op; } public Expression getRightHandSide() { return this.rhs; } public boolean isNameValue() { return this.nameVal != null; } public NameValue getNameValue() { return this.nameVal; } public GroupedExpression getGroupedExpression() { return this.ge; } @Override public QueryBuilder getFilterBuilder() { QueryBuilder lhs = null; if (nameVal != null) { lhs = nameVal.getFilterBuilder(); } else { lhs = ge.getFilterBuilder(); } if (this.isBinaryExpr()) { QueryBuilder rhsFilter = rhs.getFilterBuilder(); if (this.op.isAnd()) { return QueryBuilders.boolQuery().must(lhs).must(rhsFilter); } else { return QueryBuilders.boolQuery().should(lhs).should(rhsFilter); } } else { return lhs; } } @Override public String toString() { if (isBinaryExpr()) { return "" + (nameVal == null ? ge : nameVal) + op + rhs; } else { return "" + (nameVal == null ? ge : nameVal); } } public static Expression fromString(String value) throws ParserException { return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes()))); } }
8,233
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; /** * @author Viren */ public class BooleanOp extends AbstractNode { private String value; public BooleanOp(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] buffer = peek(3); if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') { this.value = "OR"; } else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') { this.value = "AND"; } else { throw new ParserException("No valid boolean operator found..."); } read(this.value.length()); } @Override public String toString() { return " " + value + " "; } public String getOperator() { return value; } public boolean isAnd() { return "AND".equals(value); } public boolean isOr() { return "OR".equals(value); } }
8,234
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; /** * @author Viren Constant value can be: * <ol> * <li>List of values (a,b,c) * <li>Range of values (m AND n) * <li>A value (x) * <li>A value is either a string or a number * </ol> */ public class ConstValue extends AbstractNode { public static enum SystemConsts { NULL("null"), NOT_NULL("not null"); private String value; SystemConsts(String value) { this.value = value; } public String value() { return value; } } private static String QUOTE = "\""; private Object value; private SystemConsts sysConsts; public ConstValue(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] peeked = peek(4); String sp = new String(peeked).trim(); // Read a constant value (number or a string) if (peeked[0] == '"' || peeked[0] == '\'') { this.value = readString(is); } else if (sp.toLowerCase().startsWith("not")) { this.value = SystemConsts.NOT_NULL.value(); sysConsts = SystemConsts.NOT_NULL; read(SystemConsts.NOT_NULL.value().length()); } else if (sp.equalsIgnoreCase(SystemConsts.NULL.value())) { this.value = SystemConsts.NULL.value(); sysConsts = SystemConsts.NULL; read(SystemConsts.NULL.value().length()); } else { this.value = readNumber(is); } } private String readNumber(InputStream is) throws Exception { StringBuilder sb = new StringBuilder(); while (is.available() > 0) { is.mark(1); char c = (char) is.read(); if (!isNumeric(c)) { is.reset(); break; } else { sb.append(c); } } String numValue = sb.toString().trim(); return numValue; } /** * Reads an escaped string * * @throws Exception */ private String readString(InputStream is) throws Exception { char delim = (char) read(1)[0]; StringBuilder sb = new StringBuilder(); boolean valid = false; while (is.available() > 0) { char c = (char) is.read(); if (c == delim) { valid = true; break; } else if (c == '\\') { // read the next character as part of the value c = (char) is.read(); sb.append(c); } else { sb.append(c); } } if (!valid) { throw new ParserException( "String constant is not quoted with <" + delim + "> : " + sb.toString()); } return QUOTE + sb.toString() + QUOTE; } public Object getValue() { return value; } @Override public String toString() { return "" + value; } public String getUnquotedValue() { String result = toString(); if (result.length() >= 2 && result.startsWith(QUOTE) && result.endsWith(QUOTE)) { result = result.substring(1, result.length() - 1); } return result; } public boolean isSysConstant() { return this.sysConsts != null; } public SystemConsts getSysConstant() { return this.sysConsts; } }
8,235
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; /** * @author Viren */ @SuppressWarnings("serial") public class ParserException extends Exception { public ParserException(String message) { super(message); } public ParserException(String message, Throwable cause) { super(message, cause); } }
8,236
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; import java.math.BigDecimal; import java.util.HashSet; import java.util.Set; import java.util.regex.Pattern; /** * @author Viren */ public abstract class AbstractNode { public static final Pattern WHITESPACE = Pattern.compile("\\s"); protected static Set<Character> comparisonOprs = new HashSet<Character>(); static { comparisonOprs.add('>'); comparisonOprs.add('<'); comparisonOprs.add('='); } protected InputStream is; protected AbstractNode(InputStream is) throws ParserException { this.is = is; this.parse(); } protected boolean isNumber(String test) { try { // If you can convert to a big decimal value, then it is a number. new BigDecimal(test); return true; } catch (NumberFormatException e) { // Ignore } return false; } protected boolean isBoolOpr(byte[] buffer) { if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') { return true; } else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') { return true; } return false; } protected boolean isComparisonOpr(byte[] buffer) { if (buffer[0] == 'I' && buffer[1] == 'N') { return true; } else if (buffer[0] == '!' && buffer[1] == '=') { return true; } else { return comparisonOprs.contains((char) buffer[0]); } } protected byte[] peek(int length) throws Exception { return read(length, true); } protected byte[] read(int length) throws Exception { return read(length, false); } protected String readToken() throws Exception { skipWhitespace(); StringBuilder sb = new StringBuilder(); while (is.available() > 0) { char c = (char) peek(1)[0]; if (c == ' ' || c == '\t' || c == '\n' || c == '\r') { is.skip(1); break; } else if (c == '=' || c == '>' || c == '<' || c == '!') { // do not skip break; } sb.append(c); is.skip(1); } return sb.toString().trim(); } protected boolean isNumeric(char c) { if (c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.') { return true; } return false; } protected void assertExpected(byte[] found, String expected) throws ParserException { assertExpected(new String(found), expected); } protected void assertExpected(String found, String expected) throws ParserException { if (!found.equals(expected)) { throw new ParserException("Expected " + expected + ", found " + found); } } protected void assertExpected(char found, char expected) throws ParserException { if (found != expected) { throw new ParserException("Expected " + expected + ", found " + found); } } protected static void efor(int length, FunctionThrowingException<Integer> consumer) throws Exception { for (int i = 0; i < length; i++) { consumer.accept(i); } } protected abstract void _parse() throws Exception; // Public stuff here private void parse() throws ParserException { // skip white spaces skipWhitespace(); try { _parse(); } catch (Exception e) { System.out.println("\t" + this.getClass().getSimpleName() + "->" + this.toString()); if (!(e instanceof ParserException)) { throw new ParserException("Error parsing", e); } else { throw (ParserException) e; } } skipWhitespace(); } // Private methods private byte[] read(int length, boolean peekOnly) throws Exception { byte[] buf = new byte[length]; if (peekOnly) { is.mark(length); } efor(length, (Integer c) -> buf[c] = (byte) is.read()); if (peekOnly) { is.reset(); } return buf; } protected void skipWhitespace() throws ParserException { try { while (is.available() > 0) { byte c = peek(1)[0]; if (c == ' ' || c == '\t' || c == '\n' || c == '\r') { // skip read(1); } else { break; } } } catch (Exception e) { throw new ParserException(e.getMessage(), e); } } }
8,237
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; /** * @author Viren */ @FunctionalInterface public interface FunctionThrowingException<T> { void accept(T t) throws Exception; }
8,238
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; import java.util.LinkedList; import java.util.List; /** * @author Viren List of constants */ public class ListConst extends AbstractNode { private List<Object> values; public ListConst(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] peeked = read(1); assertExpected(peeked, "("); this.values = readList(); } private List<Object> readList() throws Exception { List<Object> list = new LinkedList<Object>(); boolean valid = false; char c; StringBuilder sb = new StringBuilder(); while (is.available() > 0) { c = (char) is.read(); if (c == ')') { valid = true; break; } else if (c == ',') { list.add(sb.toString().trim()); sb = new StringBuilder(); } else { sb.append(c); } } list.add(sb.toString().trim()); if (!valid) { throw new ParserException("Expected ')' but never encountered in the stream"); } return list; } public List<Object> getList() { return (List<Object>) values; } @Override public String toString() { return values.toString(); } }
8,239
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; /** * @author Viren */ public class Range extends AbstractNode { private String low; private String high; public Range(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { this.low = readNumber(is); skipWhitespace(); byte[] peeked = read(3); assertExpected(peeked, "AND"); skipWhitespace(); String num = readNumber(is); if (num == null || "".equals(num)) { throw new ParserException("Missing the upper range value..."); } this.high = num; } private String readNumber(InputStream is) throws Exception { StringBuilder sb = new StringBuilder(); while (is.available() > 0) { is.mark(1); char c = (char) is.read(); if (!isNumeric(c)) { is.reset(); break; } else { sb.append(c); } } String numValue = sb.toString().trim(); return numValue; } /** * @return the low */ public String getLow() { return low; } /** * @return the high */ public String getHigh() { return high; } @Override public String toString() { return low + " AND " + high; } }
8,240
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; /** * @author Viren Represents the name of the field to be searched against. */ public class Name extends AbstractNode { private String value; public Name(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { this.value = readToken(); } @Override public String toString() { return value; } public String getName() { return value; } }
8,241
0
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser
Create_ds/conductor-community/index/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.es7.dao.query.parser.internal; import java.io.InputStream; /** * @author Viren */ public class ComparisonOp extends AbstractNode { public enum Operators { BETWEEN("BETWEEN"), EQUALS("="), LESS_THAN("<"), GREATER_THAN(">"), IN("IN"), NOT_EQUALS("!="), IS("IS"), STARTS_WITH("STARTS_WITH"); private final String value; Operators(String value) { this.value = value; } public String value() { return value; } } static { int max = 0; for (Operators op : Operators.values()) { max = Math.max(max, op.value().length()); } maxOperatorLength = max; } private static final int maxOperatorLength; private static final int betweenLen = Operators.BETWEEN.value().length(); private static final int startsWithLen = Operators.STARTS_WITH.value().length(); private String value; public ComparisonOp(InputStream is) throws ParserException { super(is); } @Override protected void _parse() throws Exception { byte[] peeked = peek(maxOperatorLength); if (peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<') { this.value = new String(peeked, 0, 1); } else if (peeked[0] == 'I' && peeked[1] == 'N') { this.value = "IN"; } else if (peeked[0] == 'I' && peeked[1] == 'S') { this.value = "IS"; } else if (peeked[0] == '!' && peeked[1] == '=') { this.value = "!="; } else if (peeked.length >= betweenLen && peeked[0] == 'B' && peeked[1] == 'E' && peeked[2] == 'T' && peeked[3] == 'W' && peeked[4] == 'E' && peeked[5] == 'E' && peeked[6] == 'N') { this.value = Operators.BETWEEN.value(); } else if (peeked.length == startsWithLen && new String(peeked).equals(Operators.STARTS_WITH.value())) { this.value = Operators.STARTS_WITH.value(); } else { throw new ParserException( "Expecting an operator (=, >, <, !=, BETWEEN, IN, STARTS_WITH), but found none. Peeked=>" + new String(peeked)); } read(this.value.length()); } @Override public String toString() { return " " + value + " "; } public String getOperator() { return value; } }
8,242
0
Create_ds/conductor-community/community-server/src/main/java/com/netflix
Create_ds/conductor-community/community-server/src/main/java/com/netflix/conductor/Conductor.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor; import java.io.IOException; import java.util.Properties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; import org.springframework.core.io.FileSystemResource; import org.springframework.util.StringUtils; // Prevents from the datasource beans to be loaded, AS they are needed only for specific databases. // In case that SQL database is selected this class will be imported back in the appropriate // database persistence module. @SpringBootApplication(exclude = DataSourceAutoConfiguration.class) public class Conductor { private static final Logger log = LoggerFactory.getLogger(Conductor.class); public static void main(String[] args) throws IOException { loadExternalConfig(); SpringApplication.run(Conductor.class, args); } /** * Reads properties from the location specified in <code>CONDUCTOR_CONFIG_FILE</code> and sets * them as system properties so they override the default properties. * * <p>Spring Boot property hierarchy is documented here, * https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config * * @throws IOException if file can't be read. */ private static void loadExternalConfig() throws IOException { String configFile = System.getProperty("CONDUCTOR_CONFIG_FILE"); if (!StringUtils.isEmpty(configFile)) { FileSystemResource resource = new FileSystemResource(configFile); if (resource.exists()) { Properties properties = new Properties(); properties.load(resource.getInputStream()); properties.forEach( (key, value) -> System.setProperty((String) key, (String) value)); log.info("Loaded {} properties from {}", properties.size(), configFile); } else { log.warn("Ignoring {} since it does not exist", configFile); } } } }
8,243
0
Create_ds/conductor-community/workflow-event-listener/src/test/java/com/netflix/conductor/test
Create_ds/conductor-community/workflow-event-listener/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.test.listener; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit4.SpringRunner; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.service.ExecutionService; import com.netflix.conductor.service.MetadataService; import com.netflix.conductor.service.WorkflowService; import com.fasterxml.jackson.databind.ObjectMapper; import static com.netflix.conductor.common.metadata.tasks.Task.Status.COMPLETED; import static org.junit.Assert.assertEquals; @RunWith(SpringRunner.class) @SpringBootTest( properties = { "conductor.workflow-status-listener.type=queue_publisher", "conductor.workflow-status-listener.queue-publisher.successQueue=dummy", "conductor.workflow-status-listener.queue-publisher.failureQueue=dummy", "conductor.workflow-status-listener.queue-publisher.finalizeQueue=final" }) @TestPropertySource(locations = "classpath:application-integrationtest.properties") public class WorkflowStatusPublisherIntegrationTest { private final String CALLBACK_QUEUE = "dummy"; private final String FINALIZED_QUEUE = "final"; private static final String LINEAR_WORKFLOW_T1_T2 = "junit_test_wf"; private static final int WORKFLOW_VERSION = 1; private static final String INCOMPLETION_REASON = "test reason"; private static final String DEFAULT_OWNER_EMAIL = "test@harness.com"; @Autowired private ObjectMapper objectMapper; @Autowired QueueDAO queueDAO; @Autowired protected MetadataService metadataService; @Autowired protected ExecutionService workflowExecutionService; @Autowired protected WorkflowService workflowExecutor; @Before public void setUp() { TaskDef taskDef = new TaskDef(); taskDef.setName("junit_task_1"); taskDef.setTimeoutSeconds(120); taskDef.setResponseTimeoutSeconds(120); taskDef.setRetryCount(1); taskDef.setOwnerEmail(DEFAULT_OWNER_EMAIL); metadataService.registerTaskDef(Collections.singletonList(taskDef)); } @After public void cleanUp() { List<String> workflows = metadataService.getWorkflowDefs().stream() .map(WorkflowDef::getName) .collect(Collectors.toList()); for (String wfName : workflows) { List<String> running = workflowExecutionService.getRunningWorkflows(wfName, WORKFLOW_VERSION); for (String wfid : running) { workflowExecutor.terminateWorkflow(wfid, "cleanup"); } } queueDAO.queuesDetail().keySet().forEach(queueDAO::flush); } @Test public void testListenerOnTerminatedWorkflow() throws IOException { String id = startOrLoadWorkflowExecution( LINEAR_WORKFLOW_T1_T2, 1, "testWorkflowTerminatedListener", new HashMap<>()); workflowExecutor.terminateWorkflow(id, INCOMPLETION_REASON); List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200); queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); WorkflowSummary payload = objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); assertEquals(id, callbackMessages.get(0).getId()); assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId()); assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus()); assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion()); // check finalized queue callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200); queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); payload = objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); assertEquals(id, callbackMessages.get(0).getId()); assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId()); assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus()); assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion()); } @Test public void testListenerOnCompletedWorkflow() throws IOException, InterruptedException { WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setName(LINEAR_WORKFLOW_T1_T2); workflowDef.setDescription(workflowDef.getName()); workflowDef.setVersion(WORKFLOW_VERSION); workflowDef.setSchemaVersion(2); workflowDef.setOwnerEmail(DEFAULT_OWNER_EMAIL); workflowDef.setWorkflowStatusListenerEnabled(true); LinkedList<WorkflowTask> wftasks = new LinkedList<>(); WorkflowTask wft1 = new WorkflowTask(); wft1.setName("junit_task_1"); wft1.setTaskReferenceName("t1"); wftasks.add(wft1); workflowDef.setTasks(wftasks); metadataService.updateWorkflowDef(Collections.singletonList(workflowDef)); String id = startOrLoadWorkflowExecution( workflowDef.getName(), 1, "testWorkflowCompletedListener", new HashMap<>()); List<Task> tasks = workflowExecutionService.getTasks("junit_task_1", null, 1); tasks.get(0).setStatus(COMPLETED); workflowExecutionService.updateTask(new TaskResult(tasks.get(0))); checkIfWorkflowIsCompleted(id); List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200); queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); WorkflowSummary payload = objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); assertEquals(id, callbackMessages.get(0).getId()); assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); assertEquals("testWorkflowCompletedListener", payload.getCorrelationId()); assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus()); // check finalized queue callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200); queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); payload = objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); assertEquals(id, callbackMessages.get(0).getId()); assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); assertEquals("testWorkflowCompletedListener", payload.getCorrelationId()); assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus()); } @SuppressWarnings("BusyWait") private void checkIfWorkflowIsCompleted(String id) throws InterruptedException { int statusRetrieveAttempts = 0; while (workflowExecutor.getExecutionStatus(id, false).getStatus() != Workflow.WorkflowStatus.COMPLETED) { if (statusRetrieveAttempts > 5) { break; } Thread.sleep(100); statusRetrieveAttempts++; } } private String startOrLoadWorkflowExecution( String workflowName, int version, String correlationId, Map<String, Object> input) { StartWorkflowRequest startWorkflowInput = new StartWorkflowRequest(); startWorkflowInput.setName(workflowName); startWorkflowInput.setVersion(version); startWorkflowInput.setCorrelationId(correlationId); startWorkflowInput.setInput(input); return workflowExecutor.startWorkflow(startWorkflowInput); } }
8,244
0
Create_ds/conductor-community/workflow-event-listener/src/test/java/com/netflix/conductor/contribs
Create_ds/conductor-community/workflow-event-listener/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener; import java.util.UUID; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.contribs.listener.archive.ArchivingWorkflowStatusListener; import com.netflix.conductor.core.dal.ExecutionDAOFacade; import com.netflix.conductor.model.WorkflowModel; import static org.mockito.Mockito.*; /** * @author pavel.halabala */ public class ArchivingWorkflowStatusListenerTest { WorkflowModel workflow; ExecutionDAOFacade executionDAOFacade; ArchivingWorkflowStatusListener listener; @Before public void before() { workflow = new WorkflowModel(); WorkflowDef def = new WorkflowDef(); def.setName("name1"); def.setVersion(1); workflow.setWorkflowDefinition(def); workflow.setWorkflowId(UUID.randomUUID().toString()); executionDAOFacade = Mockito.mock(ExecutionDAOFacade.class); listener = new ArchivingWorkflowStatusListener(executionDAOFacade); } @Test public void testArchiveOnWorkflowCompleted() { listener.onWorkflowCompleted(workflow); verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true); verifyNoMoreInteractions(executionDAOFacade); } @Test public void testArchiveOnWorkflowTerminated() { listener.onWorkflowTerminated(workflow); verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true); verifyNoMoreInteractions(executionDAOFacade); } }
8,245
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.archive; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.core.dal.ExecutionDAOFacade; import com.netflix.conductor.core.listener.WorkflowStatusListener; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.model.WorkflowModel; /** * Provides default implementation of workflow archiving immediately after workflow is completed or * terminated. * * @author pavel.halabala */ public class ArchivingWorkflowStatusListener implements WorkflowStatusListener { private static final Logger LOGGER = LoggerFactory.getLogger(ArchivingWorkflowStatusListener.class); private final ExecutionDAOFacade executionDAOFacade; public ArchivingWorkflowStatusListener(ExecutionDAOFacade executionDAOFacade) { this.executionDAOFacade = executionDAOFacade; } @Override public void onWorkflowCompleted(WorkflowModel workflow) { LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId()); this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); } @Override public void onWorkflowTerminated(WorkflowModel workflow) { LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId()); this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); } }
8,246
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.archive; import java.time.Duration; import java.time.temporal.ChronoUnit; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.convert.DurationUnit; import org.springframework.core.env.Environment; @ConfigurationProperties("conductor.workflow-status-listener.archival") public class ArchivingWorkflowListenerProperties { private final Environment environment; @Autowired public ArchivingWorkflowListenerProperties(Environment environment) { this.environment = environment; } /** * The time to live in seconds for workflow archiving module. Currently, only RedisExecutionDAO * supports this */ @DurationUnit(ChronoUnit.SECONDS) private Duration ttlDuration = Duration.ZERO; /** The number of threads to process the delay queue in workflow archival */ private int delayQueueWorkerThreadCount = 5; public Duration getTtlDuration() { return ttlDuration; } public void setTtlDuration(Duration ttlDuration) { this.ttlDuration = ttlDuration; } public int getDelayQueueWorkerThreadCount() { return delayQueueWorkerThreadCount; } public void setDelayQueueWorkerThreadCount(int delayQueueWorkerThreadCount) { this.delayQueueWorkerThreadCount = delayQueueWorkerThreadCount; } /** The time to delay the archival of workflow */ public int getWorkflowArchivalDelay() { return environment.getProperty( "conductor.workflow-status-listener.archival.delaySeconds", Integer.class, environment.getProperty( "conductor.app.asyncUpdateDelaySeconds", Integer.class, 60)); } }
8,247
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.archive; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.core.dal.ExecutionDAOFacade; import com.netflix.conductor.core.listener.WorkflowStatusListener; @Configuration @EnableConfigurationProperties(ArchivingWorkflowListenerProperties.class) @ConditionalOnProperty(name = "conductor.workflow-status-listener.type", havingValue = "archive") public class ArchivingWorkflowListenerConfiguration { @Bean public WorkflowStatusListener getWorkflowStatusListener( ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) { if (properties.getTtlDuration().getSeconds() > 0) { return new ArchivingWithTTLWorkflowStatusListener(executionDAOFacade, properties); } else { return new ArchivingWorkflowStatusListener(executionDAOFacade); } } }
8,248
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.archive; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import javax.annotation.PreDestroy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.core.dal.ExecutionDAOFacade; import com.netflix.conductor.core.listener.WorkflowStatusListener; import com.netflix.conductor.metrics.Monitors; import com.netflix.conductor.model.WorkflowModel; public class ArchivingWithTTLWorkflowStatusListener implements WorkflowStatusListener { private static final Logger LOGGER = LoggerFactory.getLogger(ArchivingWithTTLWorkflowStatusListener.class); private final ExecutionDAOFacade executionDAOFacade; private final int archiveTTLSeconds; private final int delayArchiveSeconds; private final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor; public ArchivingWithTTLWorkflowStatusListener( ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) { this.executionDAOFacade = executionDAOFacade; this.archiveTTLSeconds = (int) properties.getTtlDuration().getSeconds(); this.delayArchiveSeconds = properties.getWorkflowArchivalDelay(); this.scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor( properties.getDelayQueueWorkerThreadCount(), (runnable, executor) -> { LOGGER.warn( "Request {} to delay archiving index dropped in executor {}", runnable, executor); Monitors.recordDiscardedArchivalCount(); }); this.scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true); LOGGER.warn( "Workflow removal with TTL is no longer supported, " + "when using this class, workflows will be removed immediately"); } @PreDestroy public void shutdownExecutorService() { try { LOGGER.info("Gracefully shutdown executor service"); scheduledThreadPoolExecutor.shutdown(); if (scheduledThreadPoolExecutor.awaitTermination( delayArchiveSeconds, TimeUnit.SECONDS)) { LOGGER.debug("tasks completed, shutting down"); } else { LOGGER.warn("Forcing shutdown after waiting for {} seconds", delayArchiveSeconds); scheduledThreadPoolExecutor.shutdownNow(); } } catch (InterruptedException ie) { LOGGER.warn( "Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue"); scheduledThreadPoolExecutor.shutdownNow(); Thread.currentThread().interrupt(); } } @Override public void onWorkflowCompleted(WorkflowModel workflow) { LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId()); if (delayArchiveSeconds > 0) { scheduledThreadPoolExecutor.schedule( new DelayArchiveWorkflow(workflow, executionDAOFacade), delayArchiveSeconds, TimeUnit.SECONDS); } else { this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); } } @Override public void onWorkflowTerminated(WorkflowModel workflow) { LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId()); if (delayArchiveSeconds > 0) { scheduledThreadPoolExecutor.schedule( new DelayArchiveWorkflow(workflow, executionDAOFacade), delayArchiveSeconds, TimeUnit.SECONDS); } else { this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); } } private class DelayArchiveWorkflow implements Runnable { private final String workflowId; private final String workflowName; private final WorkflowModel.Status status; private final ExecutionDAOFacade executionDAOFacade; DelayArchiveWorkflow(WorkflowModel workflow, ExecutionDAOFacade executionDAOFacade) { this.workflowId = workflow.getWorkflowId(); this.workflowName = workflow.getWorkflowName(); this.status = workflow.getStatus(); this.executionDAOFacade = executionDAOFacade; } @Override public void run() { try { this.executionDAOFacade.removeWorkflow(workflowId, true); LOGGER.info("Archived workflow {}", workflowId); Monitors.recordWorkflowArchived(workflowName, status); Monitors.recordArchivalDelayQueueSize( scheduledThreadPoolExecutor.getQueue().size()); } catch (Exception e) { LOGGER.error("Unable to archive workflow: {}", workflowId, e); } } } }
8,249
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.conductorqueue; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.core.listener.WorkflowStatusListener; import com.netflix.conductor.dao.QueueDAO; import com.fasterxml.jackson.databind.ObjectMapper; @Configuration @EnableConfigurationProperties(ConductorQueueStatusPublisherProperties.class) @ConditionalOnProperty( name = "conductor.workflow-status-listener.type", havingValue = "queue_publisher") public class ConductorQueueStatusPublisherConfiguration { @Bean public WorkflowStatusListener getWorkflowStatusListener( QueueDAO queueDAO, ConductorQueueStatusPublisherProperties properties, ObjectMapper objectMapper) { return new ConductorQueueStatusPublisher(queueDAO, objectMapper, properties); } }
8,250
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.conductorqueue; import java.util.Collections; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.listener.WorkflowStatusListener; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.model.WorkflowModel; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; /** * Publishes a {@link Message} containing a {@link WorkflowSummary} to the undlerying {@link * QueueDAO} implementation on a workflow completion or termination event. */ public class ConductorQueueStatusPublisher implements WorkflowStatusListener { private static final Logger LOGGER = LoggerFactory.getLogger(ConductorQueueStatusPublisher.class); private final QueueDAO queueDAO; private final ObjectMapper objectMapper; private final String successStatusQueue; private final String failureStatusQueue; private final String finalizeStatusQueue; public ConductorQueueStatusPublisher( QueueDAO queueDAO, ObjectMapper objectMapper, ConductorQueueStatusPublisherProperties properties) { this.queueDAO = queueDAO; this.objectMapper = objectMapper; this.successStatusQueue = properties.getSuccessQueue(); this.failureStatusQueue = properties.getFailureQueue(); this.finalizeStatusQueue = properties.getFinalizeQueue(); } @Override public void onWorkflowCompleted(WorkflowModel workflow) { LOGGER.info("Publishing callback of workflow {} on completion ", workflow.getWorkflowId()); queueDAO.push(successStatusQueue, Collections.singletonList(workflowToMessage(workflow))); } @Override public void onWorkflowTerminated(WorkflowModel workflow) { LOGGER.info("Publishing callback of workflow {} on termination", workflow.getWorkflowId()); queueDAO.push(failureStatusQueue, Collections.singletonList(workflowToMessage(workflow))); } @Override public void onWorkflowFinalized(WorkflowModel workflow) { LOGGER.info("Publishing callback of workflow {} on finalization", workflow.getWorkflowId()); queueDAO.push(finalizeStatusQueue, Collections.singletonList(workflowToMessage(workflow))); } private Message workflowToMessage(WorkflowModel workflowModel) { String jsonWfSummary; WorkflowSummary summary = new WorkflowSummary(workflowModel.toWorkflow()); try { jsonWfSummary = objectMapper.writeValueAsString(summary); } catch (JsonProcessingException e) { LOGGER.error( "Failed to convert WorkflowSummary: {} to String. Exception: {}", summary, e); throw new RuntimeException(e); } return new Message(workflowModel.getWorkflowId(), jsonWfSummary, null); } }
8,251
0
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener
Create_ds/conductor-community/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.listener.conductorqueue; import org.springframework.boot.context.properties.ConfigurationProperties; @ConfigurationProperties("conductor.workflow-status-listener.queue-publisher") public class ConductorQueueStatusPublisherProperties { private String successQueue = "_callbackSuccessQueue"; private String failureQueue = "_callbackFailureQueue"; private String finalizeQueue = "_callbackFinalizeQueue"; public String getSuccessQueue() { return successQueue; } public void setSuccessQueue(String successQueue) { this.successQueue = successQueue; } public String getFailureQueue() { return failureQueue; } public void setFailureQueue(String failureQueue) { this.failureQueue = failureQueue; } public String getFinalizeQueue() { return finalizeQueue; } public void setFinalizeQueue(String finalizeQueue) { this.finalizeQueue = finalizeQueue; } }
8,252
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSStreamObservableQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan; import java.util.UUID; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.nats.client.Connection; import io.nats.streaming.*; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSStreamObservableQueue extends NATSAbstractQueue { private static final Logger LOGGER = LoggerFactory.getLogger(NATSStreamObservableQueue.class); private final StreamingConnectionFactory fact; private StreamingConnection conn; private Subscription subs; private final String durableName; public NATSStreamObservableQueue( String clusterId, String natsUrl, String durableName, String queueURI, Scheduler scheduler) { super(queueURI, "nats_stream", scheduler); Options.Builder options = new Options.Builder(); options.clusterId(clusterId); options.clientId(UUID.randomUUID().toString()); options.natsUrl(natsUrl); this.fact = new StreamingConnectionFactory(options.build()); this.durableName = durableName; open(); } @Override public boolean isConnected() { return (conn != null && conn.getNatsConnection() != null && Connection.Status.CONNECTED.equals(conn.getNatsConnection().getStatus())); } @Override public void connect() { try { StreamingConnection temp = fact.createConnection(); LOGGER.info("Successfully connected for " + queueURI); conn = temp; } catch (Exception e) { LOGGER.error("Unable to establish nats streaming connection for " + queueURI, e); throw new RuntimeException(e); } } @Override public void subscribe() { // do nothing if already subscribed if (subs != null) { return; } try { ensureConnected(); SubscriptionOptions subscriptionOptions = new SubscriptionOptions.Builder().durableName(durableName).build(); // Create subject/queue subscription if the queue has been provided if (StringUtils.isNotEmpty(queue)) { LOGGER.info( "No subscription. Creating a queue subscription. subject={}, queue={}", subject, queue); subs = conn.subscribe( subject, queue, msg -> onMessage(msg.getSubject(), msg.getData()), subscriptionOptions); } else { LOGGER.info( "No subscription. Creating a pub/sub subscription. subject={}", subject); subs = conn.subscribe( subject, msg -> onMessage(msg.getSubject(), msg.getData()), subscriptionOptions); } } catch (Exception ex) { LOGGER.error( "Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI, ex); } } @Override public void publish(String subject, byte[] data) throws Exception { ensureConnected(); conn.publish(subject, data); } @Override public void closeSubs() { if (subs != null) { try { subs.close(true); } catch (Exception ex) { LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex); } subs = null; } } @Override public void closeConn() { if (conn != null) { try { conn.close(); } catch (Exception ex) { LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex); } conn = null; } } @Override public boolean rePublishIfNoAck() { return false; } }
8,253
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSObservableQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.nats.client.Connection; import io.nats.client.Nats; import io.nats.client.Subscription; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSObservableQueue extends NATSAbstractQueue { private static final Logger LOGGER = LoggerFactory.getLogger(NATSObservableQueue.class); private Subscription subs; private Connection conn; public NATSObservableQueue(String queueURI, Scheduler scheduler) { super(queueURI, "nats", scheduler); open(); } @Override public boolean isConnected() { return (conn != null && Connection.Status.CONNECTED.equals(conn.getStatus())); } @Override public void connect() { try { Connection temp = Nats.connect(); LOGGER.info("Successfully connected for " + queueURI); conn = temp; } catch (Exception e) { LOGGER.error("Unable to establish nats connection for " + queueURI, e); throw new RuntimeException(e); } } @Override public void subscribe() { // do nothing if already subscribed if (subs != null) { return; } try { ensureConnected(); // Create subject/queue subscription if the queue has been provided if (StringUtils.isNotEmpty(queue)) { LOGGER.info( "No subscription. Creating a queue subscription. subject={}, queue={}", subject, queue); conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData())); subs = conn.subscribe(subject, queue); } else { LOGGER.info( "No subscription. Creating a pub/sub subscription. subject={}", subject); conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData())); subs = conn.subscribe(subject); } } catch (Exception ex) { LOGGER.error( "Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI, ex); } } @Override public void publish(String subject, byte[] data) throws Exception { ensureConnected(); conn.publish(subject, data); } @Override public void closeSubs() { if (subs != null) { try { subs.unsubscribe(); } catch (Exception ex) { LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex); } subs = null; } } @Override public void closeConn() { if (conn != null) { try { conn.close(); } catch (Exception ex) { LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex); } conn = null; } } }
8,254
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSAbstractQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; import io.nats.client.NUID; import rx.Observable; import rx.Scheduler; /** * @author Oleksiy Lysak */ public abstract class NATSAbstractQueue implements ObservableQueue { private static final Logger LOGGER = LoggerFactory.getLogger(NATSAbstractQueue.class); protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>(); protected final Lock mu = new ReentrantLock(); private final String queueType; private ScheduledExecutorService execs; private final Scheduler scheduler; protected final String queueURI; protected final String subject; protected String queue; // Indicates that observe was called (Event Handler) and we must to re-initiate subscription // upon reconnection private boolean observable; private boolean isOpened; private volatile boolean running; NATSAbstractQueue(String queueURI, String queueType, Scheduler scheduler) { this.queueURI = queueURI; this.queueType = queueType; this.scheduler = scheduler; // If queue specified (e.g. subject:queue) - split to subject & queue if (queueURI.contains(":")) { this.subject = queueURI.substring(0, queueURI.indexOf(':')); queue = queueURI.substring(queueURI.indexOf(':') + 1); } else { this.subject = queueURI; queue = null; } LOGGER.info( String.format( "Initialized with queueURI=%s, subject=%s, queue=%s", queueURI, subject, queue)); } void onMessage(String subject, byte[] data) { String payload = new String(data); LOGGER.info(String.format("Received message for %s: %s", subject, payload)); Message dstMsg = new Message(); dstMsg.setId(NUID.nextGlobal()); dstMsg.setPayload(payload); messages.add(dstMsg); } @Override public Observable<Message> observe() { LOGGER.info("Observe invoked for queueURI " + queueURI); observable = true; mu.lock(); try { subscribe(); } finally { mu.unlock(); } Observable.OnSubscribe<Message> onSubscribe = subscriber -> { Observable<Long> interval = Observable.interval(100, TimeUnit.MILLISECONDS, scheduler); interval.flatMap( (Long x) -> { if (!isRunning()) { LOGGER.debug( "Component stopped, skip listening for messages from NATS Queue"); return Observable.from(Collections.emptyList()); } else { List<Message> available = new LinkedList<>(); messages.drainTo(available); if (!available.isEmpty()) { AtomicInteger count = new AtomicInteger(0); StringBuilder buffer = new StringBuilder(); available.forEach( msg -> { buffer.append(msg.getId()) .append("=") .append(msg.getPayload()); count.incrementAndGet(); if (count.get() < available.size()) { buffer.append(","); } }); LOGGER.info( String.format( "Batch from %s to conductor is %s", subject, buffer.toString())); } return Observable.from(available); } }) .subscribe(subscriber::onNext, subscriber::onError); }; return Observable.create(onSubscribe); } @Override public String getType() { return queueType; } @Override public String getName() { return queueURI; } @Override public String getURI() { return queueURI; } @Override public List<String> ack(List<Message> messages) { return Collections.emptyList(); } @Override public void setUnackTimeout(Message message, long unackTimeout) {} @Override public long size() { return messages.size(); } @Override public void publish(List<Message> messages) { messages.forEach( message -> { try { String payload = message.getPayload(); publish(subject, payload.getBytes()); LOGGER.info(String.format("Published message to %s: %s", subject, payload)); } catch (Exception ex) { LOGGER.error( "Failed to publish message " + message.getPayload() + " to " + subject, ex); throw new RuntimeException(ex); } }); } @Override public boolean rePublishIfNoAck() { return true; } @Override public void close() { LOGGER.info("Closing connection for " + queueURI); mu.lock(); try { if (execs != null) { execs.shutdownNow(); execs = null; } closeSubs(); closeConn(); isOpened = false; } finally { mu.unlock(); } } public void open() { // do nothing if not closed if (isOpened) { return; } mu.lock(); try { try { connect(); // Re-initiated subscription if existed if (observable) { subscribe(); } } catch (Exception ignore) { } execs = Executors.newScheduledThreadPool(1); execs.scheduleAtFixedRate(this::monitor, 0, 500, TimeUnit.MILLISECONDS); isOpened = true; } finally { mu.unlock(); } } private void monitor() { if (isConnected()) { return; } LOGGER.error("Monitor invoked for " + queueURI); mu.lock(); try { closeSubs(); closeConn(); // Connect connect(); // Re-initiated subscription if existed if (observable) { subscribe(); } } catch (Exception ex) { LOGGER.error("Monitor failed with " + ex.getMessage() + " for " + queueURI, ex); } finally { mu.unlock(); } } public boolean isClosed() { return !isOpened; } void ensureConnected() { if (!isConnected()) { throw new RuntimeException("No nats connection"); } } @Override public void start() { LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueURI); running = true; } @Override public void stop() { LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueURI); running = false; } @Override public boolean isRunning() { return running; } abstract void connect(); abstract boolean isConnected(); abstract void publish(String subject, byte[] data) throws Exception; abstract void subscribe(); abstract void closeSubs(); abstract void closeConn(); }
8,255
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan.config; import java.util.HashMap; import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.contribs.queue.stan.NATSStreamObservableQueue; import com.netflix.conductor.core.config.ConductorProperties; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.model.TaskModel; import rx.Scheduler; @Configuration @EnableConfigurationProperties(NATSStreamProperties.class) @ConditionalOnProperty(name = "conductor.event-queues.nats-stream.enabled", havingValue = "true") public class NATSStreamConfiguration { @Bean public EventQueueProvider natsEventQueueProvider( NATSStreamProperties properties, Scheduler scheduler) { return new NATSStreamEventQueueProvider(properties, scheduler); } @ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "nats_stream") @Bean public Map<TaskModel.Status, ObservableQueue> getQueues( ConductorProperties conductorProperties, NATSStreamProperties properties, Scheduler scheduler) { String stack = ""; if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) { stack = conductorProperties.getStack() + "_"; } TaskModel.Status[] statuses = new TaskModel.Status[] {TaskModel.Status.COMPLETED, TaskModel.Status.FAILED}; Map<TaskModel.Status, ObservableQueue> queues = new HashMap<>(); for (TaskModel.Status status : statuses) { String queuePrefix = StringUtils.isBlank(properties.getListenerQueuePrefix()) ? conductorProperties.getAppId() + "_nats_stream_notify_" + stack : properties.getListenerQueuePrefix(); String queueName = queuePrefix + status.name() + getQueueGroup(properties); ObservableQueue queue = new NATSStreamObservableQueue( properties.getClusterId(), properties.getUrl(), properties.getDurableName(), queueName, scheduler); queues.put(status, queue); } return queues; } private String getQueueGroup(final NATSStreamProperties properties) { if (properties.getDefaultQueueGroup() == null || properties.getDefaultQueueGroup().isBlank()) { return ""; } return ":" + properties.getDefaultQueueGroup(); } }
8,256
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan.config; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.env.Environment; import com.netflix.conductor.core.events.EventQueueProvider; import rx.Scheduler; @Configuration @ConditionalOnProperty(name = "conductor.event-queues.nats.enabled", havingValue = "true") public class NATSConfiguration { @Bean public EventQueueProvider natsEventQueueProvider(Environment environment, Scheduler scheduler) { return new NATSEventQueueProvider(environment, scheduler); } }
8,257
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan.config; import org.springframework.boot.context.properties.ConfigurationProperties; import io.nats.client.Options; @ConfigurationProperties("conductor.event-queues.nats-stream") public class NATSStreamProperties { /** The cluster id of the STAN session */ private String clusterId = "test-cluster"; /** The durable subscriber name for the subscription */ private String durableName = null; /** The NATS connection url */ private String url = Options.DEFAULT_URL; /** The prefix to be used for the default listener queues */ private String listenerQueuePrefix = ""; /** WAIT tasks default queue group, to make subscription round-robin delivery to single sub */ private String defaultQueueGroup = "wait-group"; public String getClusterId() { return clusterId; } public void setClusterId(String clusterId) { this.clusterId = clusterId; } public String getDurableName() { return durableName; } public void setDurableName(String durableName) { this.durableName = durableName; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getListenerQueuePrefix() { return listenerQueuePrefix; } public void setListenerQueuePrefix(String listenerQueuePrefix) { this.listenerQueuePrefix = listenerQueuePrefix; } public String getDefaultQueueGroup() { return defaultQueueGroup; } public void setDefaultQueueGroup(String defaultQueueGroup) { this.defaultQueueGroup = defaultQueueGroup; } }
8,258
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamEventQueueProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.lang.NonNull; import com.netflix.conductor.contribs.queue.stan.NATSStreamObservableQueue; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSStreamEventQueueProvider implements EventQueueProvider { private static final Logger LOGGER = LoggerFactory.getLogger(NATSStreamEventQueueProvider.class); protected final Map<String, NATSStreamObservableQueue> queues = new ConcurrentHashMap<>(); private final String durableName; private final String clusterId; private final String natsUrl; private final Scheduler scheduler; public NATSStreamEventQueueProvider(NATSStreamProperties properties, Scheduler scheduler) { LOGGER.info("NATS Stream Event Queue Provider init"); this.scheduler = scheduler; // Get NATS Streaming options clusterId = properties.getClusterId(); durableName = properties.getDurableName(); natsUrl = properties.getUrl(); LOGGER.info( "NATS Streaming clusterId=" + clusterId + ", natsUrl=" + natsUrl + ", durableName=" + durableName); LOGGER.info("NATS Stream Event Queue Provider initialized..."); } @Override public String getQueueType() { return "nats_stream"; } @Override @NonNull public ObservableQueue getQueue(String queueURI) { NATSStreamObservableQueue queue = queues.computeIfAbsent( queueURI, q -> new NATSStreamObservableQueue( clusterId, natsUrl, durableName, queueURI, scheduler)); if (queue.isClosed()) { queue.open(); } return queue; } }
8,259
0
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan
Create_ds/conductor-community/event-queue/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSEventQueueProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.stan.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.env.Environment; import org.springframework.lang.NonNull; import com.netflix.conductor.contribs.queue.stan.NATSObservableQueue; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSEventQueueProvider implements EventQueueProvider { private static final Logger LOGGER = LoggerFactory.getLogger(NATSEventQueueProvider.class); protected Map<String, NATSObservableQueue> queues = new ConcurrentHashMap<>(); private final Scheduler scheduler; public NATSEventQueueProvider(Environment environment, Scheduler scheduler) { this.scheduler = scheduler; LOGGER.info("NATS Event Queue Provider initialized..."); } @Override public String getQueueType() { return "nats"; } @Override @NonNull public ObservableQueue getQueue(String queueURI) { NATSObservableQueue queue = queues.computeIfAbsent(queueURI, q -> new NATSObservableQueue(queueURI, scheduler)); if (queue.isClosed()) { queue.open(); } return queue; } }
8,260
0
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp; import java.time.Duration; import org.junit.Before; import org.junit.Test; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; import com.rabbitmq.client.AMQP.PROTOCOL; import com.rabbitmq.client.ConnectionFactory; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class AMQPSettingsTest { private AMQPEventQueueProperties properties; @Before public void setUp() { properties = mock(AMQPEventQueueProperties.class); when(properties.getBatchSize()).thenReturn(1); when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); when(properties.getPort()).thenReturn(PROTOCOL.PORT); when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); when(properties.isUseNio()).thenReturn(false); when(properties.isDurable()).thenReturn(true); when(properties.isExclusive()).thenReturn(false); when(properties.isAutoDelete()).thenReturn(false); when(properties.getContentType()).thenReturn("application/json"); when(properties.getContentEncoding()).thenReturn("UTF-8"); when(properties.getExchangeType()).thenReturn("topic"); when(properties.getDeliveryMode()).thenReturn(2); when(properties.isUseExchange()).thenReturn(true); } @Test public void testAMQPSettings_exchange_fromuri_defaultconfig() { String exchangestring = "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2"; AMQPSettings settings = new AMQPSettings(properties); settings.fromURI(exchangestring); assertEquals("topic", settings.getExchangeType()); assertEquals("test", settings.getRoutingKey()); assertEquals("myExchangeName", settings.getQueueOrExchangeName()); } @Test public void testAMQPSettings_queue_fromuri_defaultconfig() { String exchangestring = "amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true"; AMQPSettings settings = new AMQPSettings(properties); settings.fromURI(exchangestring); assertFalse(settings.isDurable()); assertTrue(settings.isExclusive()); assertTrue(settings.autoDelete()); assertEquals(2, settings.getDeliveryMode()); assertEquals("myQueueName", settings.getQueueOrExchangeName()); } @Test(expected = IllegalArgumentException.class) public void testAMQPSettings_exchange_fromuri_wrongdeliverymode() { String exchangestring = "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=3"; AMQPSettings settings = new AMQPSettings(properties); settings.fromURI(exchangestring); } }
8,261
0
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp; import java.io.IOException; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Random; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.DoesNothing; import org.mockito.stubbing.OngoingStubbing; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; import com.netflix.conductor.contribs.queue.amqp.util.RetryType; import com.netflix.conductor.core.events.queue.Message; import com.rabbitmq.client.AMQP; import com.rabbitmq.client.AMQP.PROTOCOL; import com.rabbitmq.client.AMQP.Queue.DeclareOk; import com.rabbitmq.client.Address; import com.rabbitmq.client.Channel; import com.rabbitmq.client.Connection; import com.rabbitmq.client.ConnectionFactory; import com.rabbitmq.client.Consumer; import com.rabbitmq.client.Envelope; import com.rabbitmq.client.GetResponse; import com.rabbitmq.client.impl.AMQImpl; import rx.Observable; import rx.observers.Subscribers; import rx.observers.TestSubscriber; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @SuppressWarnings({"rawtypes", "unchecked"}) public class AMQPObservableQueueTest { final int batchSize = 10; final int pollTimeMs = 500; Address[] addresses; AMQPEventQueueProperties properties; @Before public void setUp() { properties = mock(AMQPEventQueueProperties.class); when(properties.getBatchSize()).thenReturn(1); when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); when(properties.getPort()).thenReturn(PROTOCOL.PORT); when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); when(properties.isUseNio()).thenReturn(false); when(properties.isDurable()).thenReturn(true); when(properties.isExclusive()).thenReturn(false); when(properties.isAutoDelete()).thenReturn(false); when(properties.getContentType()).thenReturn("application/json"); when(properties.getContentEncoding()).thenReturn("UTF-8"); when(properties.getExchangeType()).thenReturn("topic"); when(properties.getDeliveryMode()).thenReturn(2); when(properties.isUseExchange()).thenReturn(true); addresses = new Address[] {new Address("localhost", PROTOCOL.PORT)}; AMQPConnection.setAMQPConnection(null); } List<GetResponse> buildQueue(final Random random, final int bound) { final LinkedList<GetResponse> queue = new LinkedList(); for (int i = 0; i < bound; i++) { AMQP.BasicProperties props = mock(AMQP.BasicProperties.class); when(props.getMessageId()).thenReturn(UUID.randomUUID().toString()); Envelope envelope = mock(Envelope.class); when(envelope.getDeliveryTag()).thenReturn(random.nextLong()); GetResponse response = mock(GetResponse.class); when(response.getProps()).thenReturn(props); when(response.getEnvelope()).thenReturn(envelope); when(response.getBody()).thenReturn("{}".getBytes()); when(response.getMessageCount()).thenReturn(bound - i); queue.add(response); } return queue; } Channel mockBaseChannel() throws IOException, TimeoutException { Channel channel = mock(Channel.class); when(channel.isOpen()).thenReturn(Boolean.TRUE); /* * doAnswer(invocation -> { when(channel.isOpen()).thenReturn(Boolean.FALSE); * return DoesNothing.doesNothing(); }).when(channel).close(); */ return channel; } Channel mockChannelForQueue( Channel channel, boolean isWorking, boolean exists, String name, List<GetResponse> queue) throws IOException { // queueDeclarePassive final AMQImpl.Queue.DeclareOk queueDeclareOK = new AMQImpl.Queue.DeclareOk(name, queue.size(), 1); if (exists) { when(channel.queueDeclarePassive(eq(name))).thenReturn(queueDeclareOK); } else { when(channel.queueDeclarePassive(eq(name))) .thenThrow(new IOException("Queue " + name + " exists")); } // queueDeclare OngoingStubbing<DeclareOk> declareOkOngoingStubbing = when(channel.queueDeclare( eq(name), anyBoolean(), anyBoolean(), anyBoolean(), anyMap())) .thenReturn(queueDeclareOK); if (!isWorking) { declareOkOngoingStubbing.thenThrow( new IOException("Cannot declare queue " + name), new RuntimeException("Not working")); } // messageCount when(channel.messageCount(eq(name))).thenReturn((long) queue.size()); // basicGet OngoingStubbing<String> getResponseOngoingStubbing = Mockito.when(channel.basicConsume(eq(name), anyBoolean(), any(Consumer.class))) .thenReturn(name); if (!isWorking) { getResponseOngoingStubbing.thenThrow( new IOException("Not working"), new RuntimeException("Not working")); } // basicPublish if (isWorking) { doNothing() .when(channel) .basicPublish( eq(StringUtils.EMPTY), eq(name), any(AMQP.BasicProperties.class), any(byte[].class)); } else { doThrow(new IOException("Not working")) .when(channel) .basicPublish( eq(StringUtils.EMPTY), eq(name), any(AMQP.BasicProperties.class), any(byte[].class)); } return channel; } Channel mockChannelForExchange( Channel channel, boolean isWorking, boolean exists, String queueName, String name, String type, String routingKey, List<GetResponse> queue) throws IOException { // exchangeDeclarePassive final AMQImpl.Exchange.DeclareOk exchangeDeclareOK = new AMQImpl.Exchange.DeclareOk(); if (exists) { when(channel.exchangeDeclarePassive(eq(name))).thenReturn(exchangeDeclareOK); } else { when(channel.exchangeDeclarePassive(eq(name))) .thenThrow(new IOException("Exchange " + name + " exists")); } // exchangeDeclare OngoingStubbing<AMQP.Exchange.DeclareOk> declareOkOngoingStubbing = when(channel.exchangeDeclare( eq(name), eq(type), anyBoolean(), anyBoolean(), anyMap())) .thenReturn(exchangeDeclareOK); if (!isWorking) { declareOkOngoingStubbing.thenThrow( new IOException("Cannot declare exchange " + name + " of type " + type), new RuntimeException("Not working")); } // queueDeclarePassive final AMQImpl.Queue.DeclareOk queueDeclareOK = new AMQImpl.Queue.DeclareOk(queueName, queue.size(), 1); if (exists) { when(channel.queueDeclarePassive(eq(queueName))).thenReturn(queueDeclareOK); } else { when(channel.queueDeclarePassive(eq(queueName))) .thenThrow(new IOException("Queue " + queueName + " exists")); } // queueDeclare when(channel.queueDeclare( eq(queueName), anyBoolean(), anyBoolean(), anyBoolean(), anyMap())) .thenReturn(queueDeclareOK); // queueBind when(channel.queueBind(eq(queueName), eq(name), eq(routingKey))) .thenReturn(new AMQImpl.Queue.BindOk()); // messageCount when(channel.messageCount(eq(name))).thenReturn((long) queue.size()); // basicGet OngoingStubbing<String> getResponseOngoingStubbing = Mockito.when(channel.basicConsume(eq(queueName), anyBoolean(), any(Consumer.class))) .thenReturn(queueName); if (!isWorking) { getResponseOngoingStubbing.thenThrow( new IOException("Not working"), new RuntimeException("Not working")); } // basicPublish if (isWorking) { doNothing() .when(channel) .basicPublish( eq(name), eq(routingKey), any(AMQP.BasicProperties.class), any(byte[].class)); } else { doThrow(new IOException("Not working")) .when(channel) .basicPublish( eq(name), eq(routingKey), any(AMQP.BasicProperties.class), any(byte[].class)); } return channel; } Connection mockGoodConnection(Channel channel) throws IOException { Connection connection = mock(Connection.class); when(connection.createChannel()).thenReturn(channel); when(connection.isOpen()).thenReturn(Boolean.TRUE); /* * doAnswer(invocation -> { when(connection.isOpen()).thenReturn(Boolean.FALSE); * return DoesNothing.doesNothing(); }).when(connection).close(); */ return connection; } Connection mockBadConnection() throws IOException { Connection connection = mock(Connection.class); when(connection.createChannel()).thenThrow(new IOException("Can't create channel")); when(connection.isOpen()).thenReturn(Boolean.TRUE); doThrow(new IOException("Can't close connection")).when(connection).close(); return connection; } ConnectionFactory mockConnectionFactory(Connection connection) throws IOException, TimeoutException { ConnectionFactory connectionFactory = mock(ConnectionFactory.class); when(connectionFactory.newConnection(eq(addresses), Mockito.anyString())) .thenReturn(connection); return connectionFactory; } void runObserve( Channel channel, AMQPObservableQueue observableQueue, String queueName, boolean useWorkingChannel, int batchSize) throws IOException { final List<Message> found = new ArrayList<>(batchSize); TestSubscriber<Message> subscriber = TestSubscriber.create(Subscribers.create(found::add)); rx.Observable<Message> observable = observableQueue.observe().take(pollTimeMs * 2, TimeUnit.MILLISECONDS); assertNotNull(observable); observable.subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertNoErrors(); subscriber.assertCompleted(); if (useWorkingChannel) { verify(channel, atLeast(1)) .basicConsume(eq(queueName), anyBoolean(), any(Consumer.class)); doNothing().when(channel).basicAck(anyLong(), eq(false)); doAnswer(DoesNothing.doesNothing()).when(channel).basicAck(anyLong(), eq(false)); observableQueue.ack(Collections.synchronizedList(found)); } else { assertNotNull(found); assertTrue(found.isEmpty()); } observableQueue.close(); } @Test public void testGetMessagesFromExistingExchangeWithDurableExclusiveAutoDeleteQueueConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromExchangeAndCustomConfigurationFromURI( channel, connection, true, true, true, true, true); } @Test public void testGetMessagesFromExistingExchangeWithDefaultConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, true, true); } @Test public void testPublishMessagesToNotExistingExchangeAndDefaultConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, false, true); } @Test public void testAck() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); final Random random = new Random(); final String name = RandomStringUtils.randomAlphabetic(30), type = "topic", routingKey = RandomStringUtils.randomAlphabetic(30); AMQPRetryPattern retrySettings = null; final AMQPSettings settings = new AMQPSettings(properties) .fromURI( "amqp_exchange:" + name + "?exchangeType=" + type + "&routingKey=" + routingKey); AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, true, settings, retrySettings, batchSize, pollTimeMs); List<Message> messages = new LinkedList<>(); Message msg = new Message(); msg.setId("0e3eef8f-ebb1-4244-9665-759ab5bdf433"); msg.setPayload("Payload"); msg.setReceipt("1"); messages.add(msg); List<String> failedMessages = observableQueue.ack(messages); assertNotNull(failedMessages); assertTrue(failedMessages.isEmpty()); } private void testGetMessagesFromExchangeAndDefaultConfiguration( Channel channel, Connection connection, boolean exists, boolean useWorkingChannel) throws IOException, TimeoutException { final Random random = new Random(); final String name = RandomStringUtils.randomAlphabetic(30), type = "topic", routingKey = RandomStringUtils.randomAlphabetic(30); final String queueName = String.format("bound_to_%s", name); final AMQPSettings settings = new AMQPSettings(properties) .fromURI( "amqp_exchange:" + name + "?exchangeType=" + type + "&routingKey=" + routingKey); assertTrue(settings.isDurable()); assertFalse(settings.isExclusive()); assertFalse(settings.autoDelete()); assertEquals(2, settings.getDeliveryMode()); assertEquals(name, settings.getQueueOrExchangeName()); assertEquals(type, settings.getExchangeType()); assertEquals(routingKey, settings.getRoutingKey()); assertEquals(queueName, settings.getExchangeBoundQueueName()); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForExchange( channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, true, settings, retrySettings, batchSize, pollTimeMs); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); assertEquals( AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType=" + type + "&routingKey=" + routingKey, observableQueue.getName()); assertEquals(name, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); if (useWorkingChannel) { verify(channel, atLeastOnce()) .exchangeDeclare( eq(name), eq(type), eq(settings.isDurable()), eq(settings.autoDelete()), eq(Collections.emptyMap())); verify(channel, atLeastOnce()) .queueDeclare( eq(queueName), eq(settings.isDurable()), eq(settings.isExclusive()), eq(settings.autoDelete()), anyMap()); verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey)); } } private void testGetMessagesFromExchangeAndCustomConfigurationFromURI( Channel channel, Connection connection, boolean exists, boolean useWorkingChannel, boolean durable, boolean exclusive, boolean autoDelete) throws IOException, TimeoutException { final Random random = new Random(); final String name = RandomStringUtils.randomAlphabetic(30), type = "topic", routingKey = RandomStringUtils.randomAlphabetic(30); final String queueName = String.format("bound_to_%s", name); final AMQPSettings settings = new AMQPSettings(properties) .fromURI( "amqp_exchange:" + name + "?exchangeType=" + type + "&bindQueueName=" + queueName + "&routingKey=" + routingKey + "&deliveryMode=2" + "&durable=" + durable + "&exclusive=" + exclusive + "&autoDelete=" + autoDelete); assertEquals(durable, settings.isDurable()); assertEquals(exclusive, settings.isExclusive()); assertEquals(autoDelete, settings.autoDelete()); assertEquals(2, settings.getDeliveryMode()); assertEquals(name, settings.getQueueOrExchangeName()); assertEquals(type, settings.getExchangeType()); assertEquals(queueName, settings.getExchangeBoundQueueName()); assertEquals(routingKey, settings.getRoutingKey()); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForExchange( channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, true, settings, retrySettings, batchSize, pollTimeMs); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); assertEquals( AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType=" + type + "&bindQueueName=" + queueName + "&routingKey=" + routingKey + "&deliveryMode=2" + "&durable=" + durable + "&exclusive=" + exclusive + "&autoDelete=" + autoDelete, observableQueue.getName()); assertEquals(name, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); if (useWorkingChannel) { verify(channel, atLeastOnce()) .exchangeDeclare( eq(name), eq(type), eq(settings.isDurable()), eq(settings.autoDelete()), eq(Collections.emptyMap())); verify(channel, atLeastOnce()) .queueDeclare( eq(queueName), eq(settings.isDurable()), eq(settings.isExclusive()), eq(settings.autoDelete()), anyMap()); verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey)); } } private void testPublishMessagesToExchangeAndDefaultConfiguration( Channel channel, Connection connection, boolean exists, boolean useWorkingChannel) throws IOException, TimeoutException { final Random random = new Random(); final String name = RandomStringUtils.randomAlphabetic(30), type = "topic", queueName = RandomStringUtils.randomAlphabetic(30), routingKey = RandomStringUtils.randomAlphabetic(30); final AMQPSettings settings = new AMQPSettings(properties) .fromURI( "amqp_exchange:" + name + "?exchangeType=" + type + "&routingKey=" + routingKey + "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true"); assertTrue(settings.isDurable()); assertFalse(settings.isExclusive()); assertTrue(settings.autoDelete()); assertEquals(2, settings.getDeliveryMode()); assertEquals(name, settings.getQueueOrExchangeName()); assertEquals(type, settings.getExchangeType()); assertEquals(routingKey, settings.getRoutingKey()); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForExchange( channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, true, settings, retrySettings, batchSize, pollTimeMs); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); assertEquals( AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType=" + type + "&routingKey=" + routingKey + "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true", observableQueue.getName()); assertEquals(name, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); List<Message> messages = new LinkedList<>(); Observable.range(0, batchSize) .forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null))); assertEquals(batchSize, messages.size()); observableQueue.publish(messages); if (useWorkingChannel) { verify(channel, times(batchSize)) .basicPublish( eq(name), eq(routingKey), any(AMQP.BasicProperties.class), any(byte[].class)); } } @Test public void testGetMessagesFromExistingQueueAndDefaultConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, true); } @Test public void testGetMessagesFromNotExistingQueueAndDefaultConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, false, true); } @Test public void testGetMessagesFromQueueWithBadChannel() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, false); } @Test(expected = RuntimeException.class) public void testPublishMessagesToQueueWithBadChannel() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, true, false); } @Test(expected = IllegalArgumentException.class) public void testAMQPObservalbleQueue_empty() throws IOException, TimeoutException { AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( null, addresses, false, settings, retrySettings, batchSize, pollTimeMs); } @Test(expected = IllegalArgumentException.class) public void testAMQPObservalbleQueue_addressEmpty() throws IOException, TimeoutException { AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(mockGoodConnection(mockBaseChannel())), null, false, settings, retrySettings, batchSize, pollTimeMs); } @Test(expected = IllegalArgumentException.class) public void testAMQPObservalbleQueue_settingsEmpty() throws IOException, TimeoutException { AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(mockGoodConnection(mockBaseChannel())), addresses, false, null, retrySettings, batchSize, pollTimeMs); } @Test(expected = IllegalArgumentException.class) public void testAMQPObservalbleQueue_batchsizezero() throws IOException, TimeoutException { AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(mockGoodConnection(mockBaseChannel())), addresses, false, settings, retrySettings, 0, pollTimeMs); } @Test(expected = IllegalArgumentException.class) public void testAMQPObservalbleQueue_polltimezero() throws IOException, TimeoutException { AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(mockGoodConnection(mockBaseChannel())), addresses, false, settings, retrySettings, batchSize, 0); } @Test public void testclosetExistingQueueAndDefaultConfiguration() throws IOException, TimeoutException { // Mock channel and connection Channel channel = mockBaseChannel(); Connection connection = mockGoodConnection(channel); testGetMessagesFromQueueAndDefaultConfiguration_close(channel, connection, false, true); } private void testGetMessagesFromQueueAndDefaultConfiguration( Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) throws IOException, TimeoutException { final Random random = new Random(); final String queueName = RandomStringUtils.randomAlphabetic(30); AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, false, settings, retrySettings, batchSize, pollTimeMs); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName()); assertEquals(queueName, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); } private void testGetMessagesFromQueueAndDefaultConfiguration_close( Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) throws IOException, TimeoutException { final Random random = new Random(); final String queueName = RandomStringUtils.randomAlphabetic(30); AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); AMQPRetryPattern retrySettings = null; AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, false, settings, retrySettings, batchSize, pollTimeMs); observableQueue.close(); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName()); assertEquals(queueName, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); } private void testPublishMessagesToQueueAndDefaultConfiguration( Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) throws IOException, TimeoutException { final Random random = new Random(); final String queueName = RandomStringUtils.randomAlphabetic(30); final AMQPSettings settings = new AMQPSettings(properties) .fromURI( "amqp_queue:" + queueName + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true"); assertTrue(settings.isDurable()); assertFalse(settings.isExclusive()); assertTrue(settings.autoDelete()); assertEquals(2, settings.getDeliveryMode()); List<GetResponse> queue = buildQueue(random, batchSize); channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); AMQPRetryPattern retrySettings = new AMQPRetryPattern(3, 5, RetryType.REGULARINTERVALS); AMQPObservableQueue observableQueue = new AMQPObservableQueue( mockConnectionFactory(connection), addresses, false, settings, retrySettings, batchSize, pollTimeMs); assertArrayEquals(addresses, observableQueue.getAddresses()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); assertEquals( AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true", observableQueue.getName()); assertEquals(queueName, observableQueue.getURI()); assertEquals(batchSize, observableQueue.getBatchSize()); assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); assertEquals(queue.size(), observableQueue.size()); List<Message> messages = new LinkedList<>(); Observable.range(0, batchSize) .forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null))); assertEquals(batchSize, messages.size()); observableQueue.publish(messages); if (useWorkingChannel) { verify(channel, times(batchSize)) .basicPublish( eq(StringUtils.EMPTY), eq(queueName), any(AMQP.BasicProperties.class), any(byte[].class)); } } }
8,262
0
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp; import java.time.Duration; import org.junit.Before; import org.junit.Test; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProvider; import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.rabbitmq.client.AMQP.PROTOCOL; import com.rabbitmq.client.ConnectionFactory; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class AMQPEventQueueProviderTest { private AMQPEventQueueProperties properties; @Before public void setUp() { properties = mock(AMQPEventQueueProperties.class); when(properties.getBatchSize()).thenReturn(1); when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); when(properties.getPort()).thenReturn(PROTOCOL.PORT); when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); when(properties.isUseNio()).thenReturn(false); when(properties.isDurable()).thenReturn(true); when(properties.isExclusive()).thenReturn(false); when(properties.isAutoDelete()).thenReturn(false); when(properties.getContentType()).thenReturn("application/json"); when(properties.getContentEncoding()).thenReturn("UTF-8"); when(properties.getExchangeType()).thenReturn("topic"); when(properties.getDeliveryMode()).thenReturn(2); when(properties.isUseExchange()).thenReturn(true); } @Test public void testAMQPEventQueueProvider_defaultconfig_exchange() { String exchangestring = "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2"; AMQPEventQueueProvider eventqProvider = new AMQPEventQueueProvider(properties, "amqp_exchange", true); ObservableQueue queue = eventqProvider.getQueue(exchangestring); assertNotNull(queue); assertEquals(exchangestring, queue.getName()); assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, queue.getType()); } @Test public void testAMQPEventQueueProvider_defaultconfig_queue() { String exchangestring = "amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true"; AMQPEventQueueProvider eventqProvider = new AMQPEventQueueProvider(properties, "amqp_queue", false); ObservableQueue queue = eventqProvider.getQueue(exchangestring); assertNotNull(queue); assertEquals(exchangestring, queue.getName()); assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, queue.getType()); } }
8,263
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType; import com.rabbitmq.client.Address; import com.rabbitmq.client.BlockedListener; import com.rabbitmq.client.Channel; import com.rabbitmq.client.Connection; import com.rabbitmq.client.ConnectionFactory; import com.rabbitmq.client.ShutdownListener; import com.rabbitmq.client.ShutdownSignalException; public class AMQPConnection { private static Logger LOGGER = LoggerFactory.getLogger(AMQPConnection.class); private volatile Connection publisherConnection = null; private volatile Connection subscriberConnection = null; private ConnectionFactory factory = null; private Address[] addresses = null; private static AMQPConnection amqpConnection = null; private static final String PUBLISHER = "Publisher"; private static final String SUBSCRIBER = "Subscriber"; private static final Map<ConnectionType, Set<Channel>> availableChannelPool = new ConcurrentHashMap<ConnectionType, Set<Channel>>(); private static final Map<String, Channel> subscriberReservedChannelPool = new ConcurrentHashMap<String, Channel>(); private static AMQPRetryPattern retrySettings = null; private AMQPConnection() {} private AMQPConnection(final ConnectionFactory factory, final Address[] address) { this.factory = factory; this.addresses = address; } public static synchronized AMQPConnection getInstance( final ConnectionFactory factory, final Address[] address, final AMQPRetryPattern retrySettings) { if (AMQPConnection.amqpConnection == null) { AMQPConnection.amqpConnection = new AMQPConnection(factory, address); } AMQPConnection.retrySettings = retrySettings; return AMQPConnection.amqpConnection; } // Exposed for UT public static void setAMQPConnection(AMQPConnection amqpConnection) { AMQPConnection.amqpConnection = amqpConnection; } public Address[] getAddresses() { return addresses; } private Connection createConnection(String connectionPrefix) { int retryIndex = 1; while (true) { try { Connection connection = factory.newConnection( addresses, System.getenv("HOSTNAME") + "-" + connectionPrefix); if (connection == null || !connection.isOpen()) { throw new RuntimeException("Failed to open connection"); } connection.addShutdownListener( new ShutdownListener() { @Override public void shutdownCompleted(ShutdownSignalException cause) { LOGGER.error( "Received a shutdown exception for the connection {}. reason {} cause{}", connection.getClientProvidedName(), cause.getMessage(), cause); } }); connection.addBlockedListener( new BlockedListener() { @Override public void handleUnblocked() throws IOException { LOGGER.info( "Connection {} is unblocked", connection.getClientProvidedName()); } @Override public void handleBlocked(String reason) throws IOException { LOGGER.error( "Connection {} is blocked. reason: {}", connection.getClientProvidedName(), reason); } }); return connection; } catch (final IOException e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { final String error = "IO error while connecting to " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")); LOGGER.error(error, e); throw new RuntimeException(error, e); } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { final String error = "Retries completed. IO error while connecting to " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")); LOGGER.error(error, e); throw new RuntimeException(error, e); } retryIndex++; } catch (final TimeoutException e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { final String error = "Timeout while connecting to " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")); LOGGER.error(error, e); throw new RuntimeException(error, e); } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { final String error = "Retries completed. Timeout while connecting to " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")); LOGGER.error(error, e); throw new RuntimeException(error, e); } retryIndex++; } } } public Channel getOrCreateChannel(ConnectionType connectionType, String queueOrExchangeName) throws Exception { LOGGER.debug( "Accessing the channel for queueOrExchange {} with type {} ", queueOrExchangeName, connectionType); switch (connectionType) { case SUBSCRIBER: String subChnName = connectionType + ";" + queueOrExchangeName; if (subscriberReservedChannelPool.containsKey(subChnName)) { Channel locChn = subscriberReservedChannelPool.get(subChnName); if (locChn != null && locChn.isOpen()) { return locChn; } } synchronized (this) { if (subscriberConnection == null || !subscriberConnection.isOpen()) { subscriberConnection = createConnection(SUBSCRIBER); } } Channel subChn = borrowChannel(connectionType, subscriberConnection); // Add the subscribed channels to Map to avoid messages being acknowledged on // different from the subscribed one subscriberReservedChannelPool.put(subChnName, subChn); return subChn; case PUBLISHER: synchronized (this) { if (publisherConnection == null || !publisherConnection.isOpen()) { publisherConnection = createConnection(PUBLISHER); } } return borrowChannel(connectionType, publisherConnection); default: return null; } } private Channel getOrCreateChannel(ConnectionType connType, Connection rmqConnection) { // Channel creation is required Channel locChn = null; int retryIndex = 1; while (true) { try { LOGGER.debug("Creating a channel for " + connType); locChn = rmqConnection.createChannel(); if (locChn == null || !locChn.isOpen()) { throw new RuntimeException("Fail to open " + connType + " channel"); } locChn.addShutdownListener( cause -> { LOGGER.error( connType + " Channel has been shutdown: {}", cause.getMessage(), cause); }); return locChn; } catch (final IOException e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { throw new RuntimeException( "Cannot open " + connType + " channel on " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")), e); } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { throw new RuntimeException( "Retries completed. Cannot open " + connType + " channel on " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")), e); } retryIndex++; } catch (final Exception e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { throw new RuntimeException( "Cannot open " + connType + " channel on " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")), e); } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { throw new RuntimeException( "Retries completed. Cannot open " + connType + " channel on " + Arrays.stream(addresses) .map(address -> address.toString()) .collect(Collectors.joining(",")), e); } retryIndex++; } } } public void close() { LOGGER.info("Closing all connections and channels"); try { closeChannelsInMap(ConnectionType.PUBLISHER); closeChannelsInMap(ConnectionType.SUBSCRIBER); closeConnection(publisherConnection); closeConnection(subscriberConnection); } finally { availableChannelPool.clear(); publisherConnection = null; subscriberConnection = null; } } private void closeChannelsInMap(ConnectionType conType) { Set<Channel> channels = availableChannelPool.get(conType); if (channels != null && !channels.isEmpty()) { Iterator<Channel> itr = channels.iterator(); while (itr.hasNext()) { Channel channel = itr.next(); closeChannel(channel); } channels.clear(); } } private void closeConnection(Connection connection) { if (connection == null || !connection.isOpen()) { LOGGER.warn("Connection is null or closed already. Not closing it again"); } else { try { connection.close(); } catch (Exception e) { LOGGER.warn("Fail to close connection: {}", e.getMessage(), e); } } } private void closeChannel(Channel channel) { if (channel == null || !channel.isOpen()) { LOGGER.warn("Channel is null or closed already. Not closing it again"); } else { try { channel.close(); } catch (Exception e) { LOGGER.warn("Fail to close channel: {}", e.getMessage(), e); } } } /** * Gets the channel for specified connectionType. * * @param connectionType holds the multiple channels for different connection types for thread * safe operation. * @param rmqConnection publisher or subscriber connection instance * @return channel instance * @throws Exception */ private synchronized Channel borrowChannel( ConnectionType connectionType, Connection rmqConnection) throws Exception { if (!availableChannelPool.containsKey(connectionType)) { Channel channel = getOrCreateChannel(connectionType, rmqConnection); LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType)); return channel; } Set<Channel> channels = availableChannelPool.get(connectionType); if (channels != null && channels.isEmpty()) { Channel channel = getOrCreateChannel(connectionType, rmqConnection); LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType)); return channel; } Iterator<Channel> itr = channels.iterator(); while (itr.hasNext()) { Channel channel = itr.next(); if (channel != null && channel.isOpen()) { itr.remove(); LOGGER.info( String.format(AMQPConstants.INFO_CHANNEL_BORROW_SUCCESS, connectionType)); return channel; } else { itr.remove(); } } Channel channel = getOrCreateChannel(connectionType, rmqConnection); LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RESET_SUCCESS, connectionType)); return channel; } /** * Returns the channel to connection pool for specified connectionType. * * @param connectionType * @param channel * @throws Exception */ public synchronized void returnChannel(ConnectionType connectionType, Channel channel) throws Exception { if (channel == null || !channel.isOpen()) { channel = null; // channel is reset. } Set<Channel> channels = availableChannelPool.get(connectionType); if (channels == null) { channels = new HashSet<Channel>(); availableChannelPool.put(connectionType, channels); } channels.add(channel); LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RETURN_SUCCESS, connectionType)); } }
8,264
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp; import java.io.IOException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.metrics.Monitors; import com.google.common.collect.Maps; import com.rabbitmq.client.AMQP; import com.rabbitmq.client.Address; import com.rabbitmq.client.Channel; import com.rabbitmq.client.ConnectionFactory; import com.rabbitmq.client.Consumer; import com.rabbitmq.client.DefaultConsumer; import com.rabbitmq.client.Envelope; import com.rabbitmq.client.GetResponse; import rx.Observable; import rx.Subscriber; /** * @author Ritu Parathody */ public class AMQPObservableQueue implements ObservableQueue { private static final Logger LOGGER = LoggerFactory.getLogger(AMQPObservableQueue.class); private final AMQPSettings settings; private final AMQPRetryPattern retrySettings; private final String QUEUE_TYPE = "x-queue-type"; private final int batchSize; private final boolean useExchange; private int pollTimeInMS; private AMQPConnection amqpConnection; protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>(); private volatile boolean running; public AMQPObservableQueue( ConnectionFactory factory, Address[] addresses, boolean useExchange, AMQPSettings settings, AMQPRetryPattern retrySettings, int batchSize, int pollTimeInMS) { if (factory == null) { throw new IllegalArgumentException("Connection factory is undefined"); } if (addresses == null || addresses.length == 0) { throw new IllegalArgumentException("Addresses are undefined"); } if (settings == null) { throw new IllegalArgumentException("Settings are undefined"); } if (batchSize <= 0) { throw new IllegalArgumentException("Batch size must be greater than 0"); } if (pollTimeInMS <= 0) { throw new IllegalArgumentException("Poll time must be greater than 0 ms"); } this.useExchange = useExchange; this.settings = settings; this.batchSize = batchSize; this.amqpConnection = AMQPConnection.getInstance(factory, addresses, retrySettings); this.retrySettings = retrySettings; this.setPollTimeInMS(pollTimeInMS); } @Override public Observable<Message> observe() { Observable.OnSubscribe<Message> onSubscribe = null; // This will enabled the messages to be processed one after the other as per the // observable next behavior. if (settings.isSequentialProcessing()) { LOGGER.info("Subscribing for the message processing on schedule basis"); receiveMessages(); onSubscribe = subscriber -> { Observable<Long> interval = Observable.interval(pollTimeInMS, TimeUnit.MILLISECONDS); interval.flatMap( (Long x) -> { if (!isRunning()) { LOGGER.debug( "Component stopped, skip listening for messages from RabbitMQ"); return Observable.from(Collections.emptyList()); } else { List<Message> available = new LinkedList<>(); messages.drainTo(available); if (!available.isEmpty()) { AtomicInteger count = new AtomicInteger(0); StringBuilder buffer = new StringBuilder(); available.forEach( msg -> { buffer.append(msg.getId()) .append("=") .append(msg.getPayload()); count.incrementAndGet(); if (count.get() < available.size()) { buffer.append(","); } }); LOGGER.info( String.format( "Batch from %s to conductor is %s", settings .getQueueOrExchangeName(), buffer.toString())); } return Observable.from(available); } }) .subscribe(subscriber::onNext, subscriber::onError); }; LOGGER.info("Subscribed for the message processing on schedule basis"); } else { onSubscribe = subscriber -> { LOGGER.info("Subscribing for the event based AMQP message processing"); receiveMessages(subscriber); LOGGER.info("Subscribed for the event based AMQP message processing"); }; } return Observable.create(onSubscribe); } @Override public String getType() { return useExchange ? AMQPConstants.AMQP_EXCHANGE_TYPE : AMQPConstants.AMQP_QUEUE_TYPE; } @Override public String getName() { return settings.getEventName(); } @Override public String getURI() { return settings.getQueueOrExchangeName(); } public int getBatchSize() { return batchSize; } public AMQPSettings getSettings() { return settings; } public Address[] getAddresses() { return amqpConnection.getAddresses(); } public List<String> ack(List<Message> messages) { final List<String> failedMessages = new ArrayList<>(); for (final Message message : messages) { try { ackMsg(message); } catch (final Exception e) { LOGGER.error("Cannot ACK message with delivery tag {}", message.getReceipt(), e); failedMessages.add(message.getReceipt()); } } return failedMessages; } public void ackMsg(Message message) throws Exception { int retryIndex = 1; while (true) { try { LOGGER.info("ACK message with delivery tag {}", message.getReceipt()); Channel chn = amqpConnection.getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()); chn.basicAck(Long.parseLong(message.getReceipt()), false); LOGGER.info("Ack'ed the message with delivery tag {}", message.getReceipt()); break; } catch (final Exception e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { LOGGER.error( "Cannot ACK message with delivery tag {}", message.getReceipt(), e); throw e; } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { LOGGER.error( "Retries completed. Cannot ACK message with delivery tag {}", message.getReceipt(), e); throw ex; } retryIndex++; } } } @Override public void nack(List<Message> messages) { for (final Message message : messages) { int retryIndex = 1; while (true) { try { LOGGER.info("NACK message with delivery tag {}", message.getReceipt()); Channel chn = amqpConnection.getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()); chn.basicNack(Long.parseLong(message.getReceipt()), false, false); LOGGER.info("Nack'ed the message with delivery tag {}", message.getReceipt()); break; } catch (final Exception e) { AMQPRetryPattern retry = retrySettings; if (retry == null) { LOGGER.error( "Cannot NACK message with delivery tag {}", message.getReceipt(), e); } try { retry.continueOrPropogate(e, retryIndex); } catch (Exception ex) { LOGGER.error( "Retries completed. Cannot NACK message with delivery tag {}", message.getReceipt(), e); break; } retryIndex++; } } } } private static AMQP.BasicProperties buildBasicProperties( final Message message, final AMQPSettings settings) { return new AMQP.BasicProperties.Builder() .messageId( StringUtils.isEmpty(message.getId()) ? UUID.randomUUID().toString() : message.getId()) .correlationId( StringUtils.isEmpty(message.getReceipt()) ? UUID.randomUUID().toString() : message.getReceipt()) .contentType(settings.getContentType()) .contentEncoding(settings.getContentEncoding()) .deliveryMode(settings.getDeliveryMode()) .build(); } private void publishMessage(Message message, String exchange, String routingKey) { Channel chn = null; int retryIndex = 1; while (true) { try { final String payload = message.getPayload(); chn = amqpConnection.getOrCreateChannel( ConnectionType.PUBLISHER, getSettings().getQueueOrExchangeName()); chn.basicPublish( exchange, routingKey, buildBasicProperties(message, settings), payload.getBytes(settings.getContentEncoding())); LOGGER.info(String.format("Published message to %s: %s", exchange, payload)); break; } catch (Exception ex) { AMQPRetryPattern retry = retrySettings; if (retry == null) { LOGGER.error( "Failed to publish message {} to {}", message.getPayload(), exchange, ex); throw new RuntimeException(ex); } try { retry.continueOrPropogate(ex, retryIndex); } catch (Exception e) { LOGGER.error( "Retries completed. Failed to publish message {} to {}", message.getPayload(), exchange, ex); throw new RuntimeException(ex); } retryIndex++; } finally { if (chn != null) { try { amqpConnection.returnChannel(ConnectionType.PUBLISHER, chn); } catch (Exception e) { LOGGER.error( "Failed to return the channel of {}. {}", ConnectionType.PUBLISHER, e); } } } } } @Override public void publish(List<Message> messages) { try { final String exchange, routingKey; if (useExchange) { // Use exchange + routing key for publishing getOrCreateExchange( ConnectionType.PUBLISHER, settings.getQueueOrExchangeName(), settings.getExchangeType(), settings.isDurable(), settings.autoDelete(), settings.getArguments()); exchange = settings.getQueueOrExchangeName(); routingKey = settings.getRoutingKey(); } else { // Use queue for publishing final AMQP.Queue.DeclareOk declareOk = getOrCreateQueue( ConnectionType.PUBLISHER, settings.getQueueOrExchangeName(), settings.isDurable(), settings.isExclusive(), settings.autoDelete(), settings.getArguments()); exchange = StringUtils.EMPTY; // Empty exchange name for queue routingKey = declareOk.getQueue(); // Routing name is the name of queue } messages.forEach(message -> publishMessage(message, exchange, routingKey)); } catch (final RuntimeException ex) { throw ex; } catch (final Exception ex) { LOGGER.error("Failed to publish messages: {}", ex.getMessage(), ex); throw new RuntimeException(ex); } } @Override public void setUnackTimeout(Message message, long unackTimeout) { throw new UnsupportedOperationException(); } @Override public long size() { Channel chn = null; try { chn = amqpConnection.getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()); return chn.messageCount(settings.getQueueOrExchangeName()); } catch (final Exception e) { throw new RuntimeException(e); } finally { if (chn != null) { try { amqpConnection.returnChannel(ConnectionType.SUBSCRIBER, chn); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } } } @Override public void close() { amqpConnection.close(); } @Override public void start() { LOGGER.info( "Started listening to {}:{}", getClass().getSimpleName(), settings.getQueueOrExchangeName()); running = true; } @Override public void stop() { LOGGER.info( "Stopped listening to {}:{}", getClass().getSimpleName(), settings.getQueueOrExchangeName()); running = false; } @Override public boolean isRunning() { return running; } public static class Builder { private final Address[] addresses; private final int batchSize; private final int pollTimeInMS; private final ConnectionFactory factory; private final AMQPEventQueueProperties properties; public Builder(AMQPEventQueueProperties properties) { this.properties = properties; this.addresses = buildAddressesFromHosts(); this.factory = buildConnectionFactory(); // messages polling settings this.batchSize = properties.getBatchSize(); this.pollTimeInMS = (int) properties.getPollTimeDuration().toMillis(); } private Address[] buildAddressesFromHosts() { // Read hosts from config final String hosts = properties.getHosts(); if (StringUtils.isEmpty(hosts)) { throw new IllegalArgumentException("Hosts are undefined"); } return Address.parseAddresses(hosts); } private ConnectionFactory buildConnectionFactory() { final ConnectionFactory factory = new ConnectionFactory(); // Get rabbitmq username from config final String username = properties.getUsername(); if (StringUtils.isEmpty(username)) { throw new IllegalArgumentException("Username is null or empty"); } else { factory.setUsername(username); } // Get rabbitmq password from config final String password = properties.getPassword(); if (StringUtils.isEmpty(password)) { throw new IllegalArgumentException("Password is null or empty"); } else { factory.setPassword(password); } // Get vHost from config final String virtualHost = properties.getVirtualHost(); ; if (StringUtils.isEmpty(virtualHost)) { throw new IllegalArgumentException("Virtual host is null or empty"); } else { factory.setVirtualHost(virtualHost); } // Get server port from config final int port = properties.getPort(); if (port <= 0) { throw new IllegalArgumentException("Port must be greater than 0"); } else { factory.setPort(port); } final boolean useNio = properties.isUseNio(); if (useNio) { factory.useNio(); } final boolean useSslProtocol = properties.isUseSslProtocol(); if (useSslProtocol) { try { factory.useSslProtocol(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new IllegalArgumentException("Invalid sslProtocol ", e); } } factory.setConnectionTimeout(properties.getConnectionTimeoutInMilliSecs()); factory.setRequestedHeartbeat(properties.getRequestHeartbeatTimeoutInSecs()); factory.setNetworkRecoveryInterval(properties.getNetworkRecoveryIntervalInMilliSecs()); factory.setHandshakeTimeout(properties.getHandshakeTimeoutInMilliSecs()); factory.setAutomaticRecoveryEnabled(true); factory.setTopologyRecoveryEnabled(true); factory.setRequestedChannelMax(properties.getMaxChannelCount()); return factory; } public AMQPObservableQueue build(final boolean useExchange, final String queueURI) { final AMQPSettings settings = new AMQPSettings(properties).fromURI(queueURI); final AMQPRetryPattern retrySettings = new AMQPRetryPattern( properties.getLimit(), properties.getDuration(), properties.getType()); return new AMQPObservableQueue( factory, addresses, useExchange, settings, retrySettings, batchSize, pollTimeInMS); } } private AMQP.Exchange.DeclareOk getOrCreateExchange(ConnectionType connectionType) throws Exception { return getOrCreateExchange( connectionType, settings.getQueueOrExchangeName(), settings.getExchangeType(), settings.isDurable(), settings.autoDelete(), settings.getArguments()); } private AMQP.Exchange.DeclareOk getOrCreateExchange( ConnectionType connectionType, String name, final String type, final boolean isDurable, final boolean autoDelete, final Map<String, Object> arguments) throws Exception { if (StringUtils.isEmpty(name)) { throw new RuntimeException("Exchange name is undefined"); } if (StringUtils.isEmpty(type)) { throw new RuntimeException("Exchange type is undefined"); } Channel chn = null; try { LOGGER.debug("Creating exchange {} of type {}", name, type); chn = amqpConnection.getOrCreateChannel( connectionType, getSettings().getQueueOrExchangeName()); return chn.exchangeDeclare(name, type, isDurable, autoDelete, arguments); } catch (final Exception e) { LOGGER.warn("Failed to create exchange {} of type {}", name, type, e); throw e; } finally { if (chn != null) { try { amqpConnection.returnChannel(connectionType, chn); } catch (Exception e) { LOGGER.error("Failed to return the channel of {}. {}", connectionType, e); } } } } private AMQP.Queue.DeclareOk getOrCreateQueue(ConnectionType connectionType) throws Exception { return getOrCreateQueue( connectionType, settings.getQueueOrExchangeName(), settings.isDurable(), settings.isExclusive(), settings.autoDelete(), settings.getArguments()); } private AMQP.Queue.DeclareOk getOrCreateQueue( ConnectionType connectionType, final String name, final boolean isDurable, final boolean isExclusive, final boolean autoDelete, final Map<String, Object> arguments) throws Exception { if (StringUtils.isEmpty(name)) { throw new RuntimeException("Queue name is undefined"); } arguments.put(QUEUE_TYPE, settings.getQueueType()); Channel chn = null; try { LOGGER.debug("Creating queue {}", name); chn = amqpConnection.getOrCreateChannel( connectionType, getSettings().getQueueOrExchangeName()); return chn.queueDeclare(name, isDurable, isExclusive, autoDelete, arguments); } catch (final Exception e) { LOGGER.warn("Failed to create queue {}", name, e); throw e; } finally { if (chn != null) { try { amqpConnection.returnChannel(connectionType, chn); } catch (Exception e) { LOGGER.error("Failed to return the channel of {}. {}", connectionType, e); } } } } private static Message asMessage(AMQPSettings settings, GetResponse response) throws Exception { if (response == null) { return null; } final Message message = new Message(); message.setId(response.getProps().getMessageId()); message.setPayload(new String(response.getBody(), settings.getContentEncoding())); message.setReceipt(String.valueOf(response.getEnvelope().getDeliveryTag())); return message; } private void receiveMessagesFromQueue(String queueName) throws Exception { LOGGER.debug("Accessing channel for queue {}", queueName); Consumer consumer = new DefaultConsumer( amqpConnection.getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName())) { @Override public void handleDelivery( final String consumerTag, final Envelope envelope, final AMQP.BasicProperties properties, final byte[] body) throws IOException { try { Message message = asMessage( settings, new GetResponse( envelope, properties, body, Integer.MAX_VALUE)); if (message != null) { if (LOGGER.isDebugEnabled()) { LOGGER.debug( "Got message with ID {} and receipt {}", message.getId(), message.getReceipt()); } messages.add(message); LOGGER.info("receiveMessagesFromQueue- End method {}", messages); } } catch (InterruptedException e) { LOGGER.error( "Issue in handling the mesages for the subscriber with consumer tag {}. {}", consumerTag, e); Thread.currentThread().interrupt(); } catch (Exception e) { LOGGER.error( "Issue in handling the mesages for the subscriber with consumer tag {}. {}", consumerTag, e); } } public void handleCancel(String consumerTag) throws IOException { LOGGER.error( "Recieved a consumer cancel notification for subscriber {}", consumerTag); } }; amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) .basicConsume(queueName, false, consumer); Monitors.recordEventQueueMessagesProcessed(getType(), queueName, messages.size()); } private void receiveMessagesFromQueue(String queueName, Subscriber<? super Message> subscriber) throws Exception { LOGGER.debug("Accessing channel for queue {}", queueName); Consumer consumer = new DefaultConsumer( amqpConnection.getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName())) { @Override public void handleDelivery( final String consumerTag, final Envelope envelope, final AMQP.BasicProperties properties, final byte[] body) throws IOException { try { Message message = asMessage( settings, new GetResponse( envelope, properties, body, Integer.MAX_VALUE)); if (message == null) { return; } LOGGER.info( "Got message with ID {} and receipt {}", message.getId(), message.getReceipt()); LOGGER.debug("Message content {}", message); // Not using thread-pool here as the number of concurrent threads are // controlled // by the number of messages delivery using pre-fetch count in RabbitMQ Thread newThread = new Thread( () -> { LOGGER.info( "Spawning a new thread for message with ID {}", message.getId()); subscriber.onNext(message); }); newThread.start(); } catch (InterruptedException e) { LOGGER.error( "Issue in handling the mesages for the subscriber with consumer tag {}. {}", consumerTag, e); Thread.currentThread().interrupt(); } catch (Exception e) { LOGGER.error( "Issue in handling the mesages for the subscriber with consumer tag {}. {}", consumerTag, e); } } public void handleCancel(String consumerTag) throws IOException { LOGGER.error( "Recieved a consumer cancel notification for subscriber {}", consumerTag); } }; amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) .basicConsume(queueName, false, consumer); } protected void receiveMessages() { try { amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) .basicQos(batchSize); String queueName; if (useExchange) { // Consume messages from an exchange getOrCreateExchange(ConnectionType.SUBSCRIBER); /* * Create queue if not present based on the settings provided in the queue URI * or configuration properties. Sample URI format: * amqp_exchange:myExchange?bindQueueName=myQueue&exchangeType=topic&routingKey=myRoutingKey&exclusive * =false&autoDelete=false&durable=true Default settings if not provided in the * queue URI or properties: isDurable: true, autoDelete: false, isExclusive: * false The same settings are currently used during creation of exchange as * well as queue. TODO: This can be enhanced further to get the settings * separately for exchange and queue from the URI */ final AMQP.Queue.DeclareOk declareOk = getOrCreateQueue( ConnectionType.SUBSCRIBER, settings.getExchangeBoundQueueName(), settings.isDurable(), settings.isExclusive(), settings.autoDelete(), Maps.newHashMap()); // Bind the declared queue to exchange queueName = declareOk.getQueue(); amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) .queueBind( queueName, settings.getQueueOrExchangeName(), settings.getRoutingKey()); } else { // Consume messages from a queue queueName = getOrCreateQueue(ConnectionType.SUBSCRIBER).getQueue(); } // Consume messages LOGGER.info("Consuming from queue {}", queueName); receiveMessagesFromQueue(queueName); } catch (Exception exception) { LOGGER.error("Exception while getting messages from RabbitMQ", exception); Monitors.recordObservableQMessageReceivedErrors(getType()); } } protected void receiveMessages(Subscriber<? super Message> subscriber) { try { amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) .basicQos(batchSize); String queueName; if (useExchange) { // Consume messages from an exchange getOrCreateExchange(ConnectionType.SUBSCRIBER); /* * Create queue if not present based on the settings provided in the queue URI * or configuration properties. Sample URI format: * amqp_exchange:myExchange?bindQueueName=myQueue&exchangeType=topic&routingKey=myRoutingKey&exclusive * =false&autoDelete=false&durable=true Default settings if not provided in the * queue URI or properties: isDurable: true, autoDelete: false, isExclusive: * false The same settings are currently used during creation of exchange as * well as queue. TODO: This can be enhanced further to get the settings * separately for exchange and queue from the URI */ final AMQP.Queue.DeclareOk declareOk = getOrCreateQueue( ConnectionType.SUBSCRIBER, settings.getExchangeBoundQueueName(), settings.isDurable(), settings.isExclusive(), settings.autoDelete(), Maps.newHashMap()); // Bind the declared queue to exchange queueName = declareOk.getQueue(); amqpConnection .getOrCreateChannel( ConnectionType.SUBSCRIBER, settings.getQueueOrExchangeName()) .queueBind( queueName, settings.getQueueOrExchangeName(), settings.getRoutingKey()); } else { // Consume messages from a queue queueName = getOrCreateQueue(ConnectionType.SUBSCRIBER).getQueue(); } // Consume messages LOGGER.info("Consuming from queue {}", queueName); receiveMessagesFromQueue(queueName, subscriber); } catch (Exception exception) { LOGGER.error("Exception while getting messages from RabbitMQ", exception); Monitors.recordObservableQMessageReceivedErrors(getType()); } } public int getPollTimeInMS() { return pollTimeInMS; } public void setPollTimeInMS(int pollTimeInMS) { this.pollTimeInMS = pollTimeInMS; } }
8,265
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.util; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.*; /** * @author Ritu Parathody */ public class AMQPSettings { private static final Pattern URI_PATTERN = Pattern.compile( "^(?:amqp\\_(queue|exchange))?\\:?(?<name>[^\\?]+)\\??(?<params>.*)$", Pattern.CASE_INSENSITIVE); private String queueOrExchangeName; private String eventName; private String exchangeType; private String exchangeBoundQueueName; private String queueType; private String routingKey; private final String contentEncoding; private final String contentType; private boolean durable; private boolean exclusive; private boolean autoDelete; private boolean sequentialProcessing; private int deliveryMode; private final Map<String, Object> arguments = new HashMap<>(); private static final Logger LOGGER = LoggerFactory.getLogger(AMQPSettings.class); public AMQPSettings(final AMQPEventQueueProperties properties) { // Initialize with a default values durable = properties.isDurable(); exclusive = properties.isExclusive(); autoDelete = properties.isAutoDelete(); contentType = properties.getContentType(); contentEncoding = properties.getContentEncoding(); exchangeType = properties.getExchangeType(); routingKey = StringUtils.EMPTY; queueType = properties.getQueueType(); sequentialProcessing = properties.isSequentialMsgProcessing(); // Set common settings for publishing and consuming setDeliveryMode(properties.getDeliveryMode()); } public final boolean isDurable() { return durable; } public final boolean isExclusive() { return exclusive; } public final boolean autoDelete() { return autoDelete; } public final Map<String, Object> getArguments() { return arguments; } public final String getContentEncoding() { return contentEncoding; } /** * Use queue for publishing * * @param queueName the name of queue */ public void setQueue(String queueName) { if (StringUtils.isEmpty(queueName)) { throw new IllegalArgumentException("Queue name for publishing is undefined"); } this.queueOrExchangeName = queueName; } public String getQueueOrExchangeName() { return queueOrExchangeName; } public String getExchangeBoundQueueName() { if (StringUtils.isEmpty(exchangeBoundQueueName)) { return String.format("bound_to_%s", queueOrExchangeName); } return exchangeBoundQueueName; } public String getExchangeType() { return exchangeType; } public String getRoutingKey() { return routingKey; } public int getDeliveryMode() { return deliveryMode; } public AMQPSettings setDeliveryMode(int deliveryMode) { if (deliveryMode != 1 && deliveryMode != 2) { throw new IllegalArgumentException("Delivery mode must be 1 or 2"); } this.deliveryMode = deliveryMode; return this; } public String getContentType() { return contentType; } /** * Complete settings from the queue URI. * * <p><u>Example for queue:</u> * * <pre> * amqp_queue:myQueue?deliveryMode=1&autoDelete=true&exclusive=true * </pre> * * <u>Example for exchange:</u> * * <pre> * amqp_exchange:myExchange?bindQueueName=myQueue&exchangeType=topic&routingKey=myRoutingKey&exclusive=true * </pre> * * @param queueURI * @return */ public final AMQPSettings fromURI(final String queueURI) { final Matcher matcher = URI_PATTERN.matcher(queueURI); if (!matcher.matches()) { throw new IllegalArgumentException("Queue URI doesn't matches the expected regexp"); } // Set name of queue or exchange from group "name" LOGGER.info("Queue URI:{}", queueURI); queueOrExchangeName = matcher.group("name"); eventName = queueURI; if (matcher.groupCount() > 1) { final String queryParams = matcher.group("params"); if (StringUtils.isNotEmpty(queryParams)) { // Handle parameters Arrays.stream(queryParams.split("\\s*\\&\\s*")) .forEach( param -> { final String[] kv = param.split("\\s*=\\s*"); if (kv.length == 2) { if (kv[0].equalsIgnoreCase( String.valueOf(PARAM_EXCHANGE_TYPE))) { String value = kv[1]; if (StringUtils.isEmpty(value)) { throw new IllegalArgumentException( "The provided exchange type is empty"); } exchangeType = value; } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_QUEUE_NAME)))) { exchangeBoundQueueName = kv[1]; } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_ROUTING_KEY)))) { String value = kv[1]; if (StringUtils.isEmpty(value)) { throw new IllegalArgumentException( "The provided routing key is empty"); } routingKey = value; } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_DURABLE)))) { durable = Boolean.parseBoolean(kv[1]); } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_EXCLUSIVE)))) { exclusive = Boolean.parseBoolean(kv[1]); } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_AUTO_DELETE)))) { autoDelete = Boolean.parseBoolean(kv[1]); } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_DELIVERY_MODE)))) { setDeliveryMode(Integer.parseInt(kv[1])); } if (kv[0].equalsIgnoreCase( (String.valueOf(PARAM_MAX_PRIORITY)))) { arguments.put("x-max-priority", Integer.valueOf(kv[1])); } } }); } } return this; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!(obj instanceof AMQPSettings)) return false; AMQPSettings other = (AMQPSettings) obj; return Objects.equals(arguments, other.arguments) && autoDelete == other.autoDelete && Objects.equals(contentEncoding, other.contentEncoding) && Objects.equals(contentType, other.contentType) && deliveryMode == other.deliveryMode && durable == other.durable && Objects.equals(eventName, other.eventName) && Objects.equals(exchangeType, other.exchangeType) && exclusive == other.exclusive && Objects.equals(queueOrExchangeName, other.queueOrExchangeName) && Objects.equals(exchangeBoundQueueName, other.exchangeBoundQueueName) && Objects.equals(queueType, other.queueType) && Objects.equals(routingKey, other.routingKey) && sequentialProcessing == other.sequentialProcessing; } @Override public int hashCode() { return Objects.hash( arguments, autoDelete, contentEncoding, contentType, deliveryMode, durable, eventName, exchangeType, exclusive, queueOrExchangeName, exchangeBoundQueueName, queueType, routingKey, sequentialProcessing); } @Override public String toString() { return "AMQPSettings [queueOrExchangeName=" + queueOrExchangeName + ", eventName=" + eventName + ", exchangeType=" + exchangeType + ", exchangeQueueName=" + exchangeBoundQueueName + ", queueType=" + queueType + ", routingKey=" + routingKey + ", contentEncoding=" + contentEncoding + ", contentType=" + contentType + ", durable=" + durable + ", exclusive=" + exclusive + ", autoDelete=" + autoDelete + ", sequentialProcessing=" + sequentialProcessing + ", deliveryMode=" + deliveryMode + ", arguments=" + arguments + "]"; } public String getEventName() { return eventName; } /** * @return the queueType */ public String getQueueType() { return queueType; } /** * @return the sequentialProcessing */ public boolean isSequentialProcessing() { return sequentialProcessing; } }
8,266
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.util; /** * @author Ritu Parathody */ public class AMQPConstants { /** this when set will create a rabbitmq queue */ public static String AMQP_QUEUE_TYPE = "amqp_queue"; /** this when set will create a rabbitmq exchange */ public static String AMQP_EXCHANGE_TYPE = "amqp_exchange"; public static String PROPERTY_KEY_TEMPLATE = "conductor.event-queues.amqp.%s"; /** default content type for the message read from rabbitmq */ public static String DEFAULT_CONTENT_TYPE = "application/json"; /** default encoding for the message read from rabbitmq */ public static String DEFAULT_CONTENT_ENCODING = "UTF-8"; /** default rabbitmq exchange type */ public static String DEFAULT_EXCHANGE_TYPE = "topic"; /** * default rabbitmq durability When set to true the queues are persisted to the disk. * * <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}. */ public static boolean DEFAULT_DURABLE = true; /** * default rabbitmq exclusivity When set to true the queues can be only used by one connection. * * <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}. */ public static boolean DEFAULT_EXCLUSIVE = false; /** * default rabbitmq auto delete When set to true the queues will be deleted when the last * consumer is cancelled * * <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}. */ public static boolean DEFAULT_AUTO_DELETE = false; /** * default rabbitmq delivery mode This is a property of the message When set to 1 the will be * non persistent and 2 will be persistent {@see <a * href="https://www.rabbitmq.com/releases/rabbitmq-java-client/v3.5.4/rabbitmq-java-client-javadoc-3.5.4/com/rabbitmq/client/MessageProperties.html> * Message Properties</a>}. */ public static int DEFAULT_DELIVERY_MODE = 2; /** * default rabbitmq delivery mode This is a property of the channel limit to get the number of * unacknowledged messages. {@see <a * href="https://www.rabbitmq.com/consumer-prefetch.html>Consumer Prefetch</a>}. */ public static int DEFAULT_BATCH_SIZE = 1; /** * default rabbitmq delivery mode This is a property of the amqp implementation which sets teh * polling time to drain the in-memory queue. */ public static int DEFAULT_POLL_TIME_MS = 100; // info channel messages. public static final String INFO_CHANNEL_BORROW_SUCCESS = "Borrowed the channel object from the channel pool for " + "the connection type [%s]"; public static final String INFO_CHANNEL_RETURN_SUCCESS = "Returned the borrowed channel object to the pool for " + "the connection type [%s]"; public static final String INFO_CHANNEL_CREATION_SUCCESS = "Channels are not available in the pool. Created a" + " channel for the connection type [%s]"; public static final String INFO_CHANNEL_RESET_SUCCESS = "No proper channels available in the pool. Created a " + "channel for the connection type [%s]"; }
8,267
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.util; public enum ConnectionType { PUBLISHER, SUBSCRIBER }
8,268
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.util; /** * @author Ritu Parathody */ public enum AMQPConfigurations { // queue exchange settings PARAM_EXCHANGE_TYPE("exchangeType"), PARAM_QUEUE_NAME("bindQueueName"), PARAM_ROUTING_KEY("routingKey"), PARAM_DELIVERY_MODE("deliveryMode"), PARAM_DURABLE("durable"), PARAM_EXCLUSIVE("exclusive"), PARAM_AUTO_DELETE("autoDelete"), PARAM_MAX_PRIORITY("maxPriority"); String propertyName; AMQPConfigurations(String propertyName) { this.propertyName = propertyName; } @Override public String toString() { return propertyName; } }
8,269
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.util; /** RetryType holds the retry type */ public enum RetryType { REGULARINTERVALS, EXPONENTIALBACKOFF, INCREMENTALINTERVALS }
8,270
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.config; import java.util.HashMap; import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder; import com.netflix.conductor.core.config.ConductorProperties; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.model.TaskModel.Status; @Configuration(proxyBeanMethods = false) @EnableConfigurationProperties(AMQPEventQueueProperties.class) @ConditionalOnProperty(name = "conductor.event-queues.amqp.enabled", havingValue = "true") public class AMQPEventQueueConfiguration { private enum QUEUE_TYPE { AMQP_QUEUE("amqp_queue"), AMQP_EXCHANGE("amqp_exchange"); private final String type; QUEUE_TYPE(String type) { this.type = type; } public String getType() { return type; } } @Bean public EventQueueProvider amqpEventQueueProvider(AMQPEventQueueProperties properties) { return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_QUEUE.getType(), false); } @Bean public EventQueueProvider amqpExchangeEventQueueProvider(AMQPEventQueueProperties properties) { return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_EXCHANGE.getType(), true); } @ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "amqp") @Bean public Map<Status, ObservableQueue> getQueues( ConductorProperties conductorProperties, AMQPEventQueueProperties properties) { String stack = ""; if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) { stack = conductorProperties.getStack() + "_"; } final boolean useExchange = properties.isUseExchange(); Status[] statuses = new Status[] {Status.COMPLETED, Status.FAILED}; Map<Status, ObservableQueue> queues = new HashMap<>(); for (Status status : statuses) { String queuePrefix = StringUtils.isBlank(properties.getListenerQueuePrefix()) ? conductorProperties.getAppId() + "_amqp_notify_" + stack : properties.getListenerQueuePrefix(); String queueName = queuePrefix + status.name(); final ObservableQueue queue = new Builder(properties).build(useExchange, queueName); queues.put(status, queue); } return queues; } }
8,271
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.lang.NonNull; import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue; import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; /** * @author Ritu Parathody */ public class AMQPEventQueueProvider implements EventQueueProvider { private static final Logger LOGGER = LoggerFactory.getLogger(AMQPEventQueueProvider.class); protected Map<String, AMQPObservableQueue> queues = new ConcurrentHashMap<>(); private final boolean useExchange; private final AMQPEventQueueProperties properties; private final String queueType; public AMQPEventQueueProvider( AMQPEventQueueProperties properties, String queueType, boolean useExchange) { this.properties = properties; this.queueType = queueType; this.useExchange = useExchange; } @Override public String getQueueType() { return queueType; } @Override @NonNull public ObservableQueue getQueue(String queueURI) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Retrieve queue with URI {}", queueURI); } // Build the queue with the inner Builder class of AMQPObservableQueue return queues.computeIfAbsent(queueURI, q -> new Builder(properties).build(useExchange, q)); } }
8,272
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.config; import java.time.Duration; import org.springframework.boot.context.properties.ConfigurationProperties; import com.netflix.conductor.contribs.queue.amqp.util.RetryType; import com.rabbitmq.client.AMQP.PROTOCOL; import com.rabbitmq.client.ConnectionFactory; @ConfigurationProperties("conductor.event-queues.amqp") public class AMQPEventQueueProperties { private int batchSize = 1; private Duration pollTimeDuration = Duration.ofMillis(100); private String hosts = ConnectionFactory.DEFAULT_HOST; private String username = ConnectionFactory.DEFAULT_USER; private String password = ConnectionFactory.DEFAULT_PASS; private String virtualHost = ConnectionFactory.DEFAULT_VHOST; private int port = PROTOCOL.PORT; private int connectionTimeoutInMilliSecs = 180000; private int networkRecoveryIntervalInMilliSecs = 5000; private int requestHeartbeatTimeoutInSecs = 30; private int handshakeTimeoutInMilliSecs = 180000; private int maxChannelCount = 5000; private int limit = 50; private int duration = 1000; private RetryType retryType = RetryType.REGULARINTERVALS; public int getLimit() { return limit; } public void setLimit(int limit) { this.limit = limit; } public int getDuration() { return duration; } public void setDuration(int duration) { this.duration = duration; } public RetryType getType() { return retryType; } public void setType(RetryType type) { this.retryType = type; } public int getConnectionTimeoutInMilliSecs() { return connectionTimeoutInMilliSecs; } public void setConnectionTimeoutInMilliSecs(int connectionTimeoutInMilliSecs) { this.connectionTimeoutInMilliSecs = connectionTimeoutInMilliSecs; } public int getHandshakeTimeoutInMilliSecs() { return handshakeTimeoutInMilliSecs; } public void setHandshakeTimeoutInMilliSecs(int handshakeTimeoutInMilliSecs) { this.handshakeTimeoutInMilliSecs = handshakeTimeoutInMilliSecs; } public int getMaxChannelCount() { return maxChannelCount; } public void setMaxChannelCount(int maxChannelCount) { this.maxChannelCount = maxChannelCount; } private boolean useNio = false; private boolean durable = true; private boolean exclusive = false; private boolean autoDelete = false; private String contentType = "application/json"; private String contentEncoding = "UTF-8"; private String exchangeType = "topic"; private String queueType = "classic"; private boolean sequentialMsgProcessing = true; private int deliveryMode = 2; private boolean useExchange = true; private String listenerQueuePrefix = ""; private boolean useSslProtocol = false; public int getBatchSize() { return batchSize; } public void setBatchSize(int batchSize) { this.batchSize = batchSize; } public Duration getPollTimeDuration() { return pollTimeDuration; } public void setPollTimeDuration(Duration pollTimeDuration) { this.pollTimeDuration = pollTimeDuration; } public String getHosts() { return hosts; } public void setHosts(String hosts) { this.hosts = hosts; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getVirtualHost() { return virtualHost; } public void setVirtualHost(String virtualHost) { this.virtualHost = virtualHost; } public int getPort() { return port; } public void setPort(int port) { this.port = port; } public boolean isUseNio() { return useNio; } public void setUseNio(boolean useNio) { this.useNio = useNio; } public boolean isDurable() { return durable; } public void setDurable(boolean durable) { this.durable = durable; } public boolean isExclusive() { return exclusive; } public void setExclusive(boolean exclusive) { this.exclusive = exclusive; } public boolean isAutoDelete() { return autoDelete; } public void setAutoDelete(boolean autoDelete) { this.autoDelete = autoDelete; } public String getContentType() { return contentType; } public void setContentType(String contentType) { this.contentType = contentType; } public String getContentEncoding() { return contentEncoding; } public void setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; } public String getExchangeType() { return exchangeType; } public void setExchangeType(String exchangeType) { this.exchangeType = exchangeType; } public int getDeliveryMode() { return deliveryMode; } public void setDeliveryMode(int deliveryMode) { this.deliveryMode = deliveryMode; } public boolean isUseExchange() { return useExchange; } public void setUseExchange(boolean useExchange) { this.useExchange = useExchange; } public String getListenerQueuePrefix() { return listenerQueuePrefix; } public void setListenerQueuePrefix(String listenerQueuePrefix) { this.listenerQueuePrefix = listenerQueuePrefix; } public String getQueueType() { return queueType; } public boolean isUseSslProtocol() { return useSslProtocol; } public void setUseSslProtocol(boolean useSslProtocol) { this.useSslProtocol = useSslProtocol; } /** * @param queueType Supports two queue types, 'classic' and 'quorum'. Classic will be be * deprecated in 2022 and its usage discouraged from RabbitMQ community. So not using enum * type here to hold different values. */ public void setQueueType(String queueType) { this.queueType = queueType; } /** * @return the sequentialMsgProcessing */ public boolean isSequentialMsgProcessing() { return sequentialMsgProcessing; } /** * @param sequentialMsgProcessing the sequentialMsgProcessing to set Supports sequential and * parallel message processing capabilities. In parallel message processing, number of * threads are controlled by batch size. No thread control or execution framework required * here as threads are limited and short-lived. */ public void setSequentialMsgProcessing(boolean sequentialMsgProcessing) { this.sequentialMsgProcessing = sequentialMsgProcessing; } public int getNetworkRecoveryIntervalInMilliSecs() { return networkRecoveryIntervalInMilliSecs; } public void setNetworkRecoveryIntervalInMilliSecs(int networkRecoveryIntervalInMilliSecs) { this.networkRecoveryIntervalInMilliSecs = networkRecoveryIntervalInMilliSecs; } public int getRequestHeartbeatTimeoutInSecs() { return requestHeartbeatTimeoutInSecs; } public void setRequestHeartbeatTimeoutInSecs(int requestHeartbeatTimeoutInSecs) { this.requestHeartbeatTimeoutInSecs = requestHeartbeatTimeoutInSecs; } }
8,273
0
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp
Create_ds/conductor-community/event-queue/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.amqp.config; import com.netflix.conductor.contribs.queue.amqp.util.RetryType; public class AMQPRetryPattern { private int limit = 50; private int duration = 1000; private RetryType type = RetryType.REGULARINTERVALS; public AMQPRetryPattern() {} public AMQPRetryPattern(int limit, int duration, RetryType type) { this.limit = limit; this.duration = duration; this.type = type; } /** * This gets executed if the retry index is within the allowed limits, otherwise exception will * be thrown. * * @throws Exception */ public void continueOrPropogate(Exception ex, int retryIndex) throws Exception { if (retryIndex > limit) { throw ex; } // Regular Intervals is the default long waitDuration = duration; if (type == RetryType.INCREMENTALINTERVALS) { waitDuration = duration * retryIndex; } else if (type == RetryType.EXPONENTIALBACKOFF) { waitDuration = (long) Math.pow(2, retryIndex) * duration; } try { Thread.sleep(waitDuration); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } } }
8,274
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JsmMessage.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats; import com.netflix.conductor.core.events.queue.Message; /** * @author andrey.stelmashenko@gmail.com */ public class JsmMessage extends Message { private io.nats.client.Message jsmMsg; public io.nats.client.Message getJsmMsg() { return jsmMsg; } public void setJsmMsg(io.nats.client.Message jsmMsg) { this.jsmMsg = jsmMsg; } }
8,275
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.nats.client.Connection; import io.nats.client.Nats; import io.nats.client.Subscription; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSObservableQueue extends NATSAbstractQueue { private static final Logger LOGGER = LoggerFactory.getLogger(NATSObservableQueue.class); private Subscription subs; private Connection conn; public NATSObservableQueue(String queueURI, Scheduler scheduler) { super(queueURI, "nats", scheduler); open(); } @Override public boolean isConnected() { return (conn != null && Connection.Status.CONNECTED.equals(conn.getStatus())); } @Override public void connect() { try { Connection temp = Nats.connect(); LOGGER.info("Successfully connected for " + queueURI); conn = temp; } catch (Exception e) { LOGGER.error("Unable to establish nats connection for " + queueURI, e); throw new RuntimeException(e); } } @Override public void subscribe() { // do nothing if already subscribed if (subs != null) { return; } try { ensureConnected(); // Create subject/queue subscription if the queue has been provided if (StringUtils.isNotEmpty(queue)) { LOGGER.info( "No subscription. Creating a queue subscription. subject={}, queue={}", subject, queue); conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData())); subs = conn.subscribe(subject, queue); } else { LOGGER.info( "No subscription. Creating a pub/sub subscription. subject={}", subject); conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData())); subs = conn.subscribe(subject); } } catch (Exception ex) { LOGGER.error( "Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI, ex); } } @Override public void publish(String subject, byte[] data) throws Exception { ensureConnected(); conn.publish(subject, data); } @Override public void closeSubs() { if (subs != null) { try { subs.unsubscribe(); } catch (Exception ex) { LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex); } subs = null; } } @Override public void closeConn() { if (conn != null) { try { conn.close(); } catch (Exception ex) { LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex); } conn = null; } } }
8,276
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NatsException.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats; public class NatsException extends RuntimeException { public NatsException() { super(); } public NatsException(String message) { super(message); } public NatsException(String message, Throwable cause) { super(message, cause); } public NatsException(Throwable cause) { super(cause); } protected NatsException( String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { super(message, cause, enableSuppression, writableStackTrace); } }
8,277
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.contribs.queue.nats.config.JetStreamProperties; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; import io.nats.client.Connection; import io.nats.client.ConnectionListener; import io.nats.client.JetStream; import io.nats.client.JetStreamApiException; import io.nats.client.JetStreamManagement; import io.nats.client.JetStreamSubscription; import io.nats.client.Nats; import io.nats.client.Options; import io.nats.client.PushSubscribeOptions; import io.nats.client.api.RetentionPolicy; import io.nats.client.api.StorageType; import io.nats.client.api.StreamConfiguration; import io.nats.client.api.StreamInfo; import rx.Observable; import rx.Scheduler; /** * @author andrey.stelmashenko@gmail.com */ public class JetStreamObservableQueue implements ObservableQueue { private static final Logger LOG = LoggerFactory.getLogger(JetStreamObservableQueue.class); private final LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>(); private final Lock mu = new ReentrantLock(); private final String queueType; private final String subject; private final String queueUri; private final JetStreamProperties properties; private final Scheduler scheduler; private final AtomicBoolean running = new AtomicBoolean(false); private Connection nc; private JetStreamSubscription sub; private Observable<Long> interval; private final String queueGroup; public JetStreamObservableQueue( JetStreamProperties properties, String queueType, String queueUri, Scheduler scheduler) { LOG.debug("JSM obs queue create, qtype={}, quri={}", queueType, queueUri); this.queueUri = queueUri; // If queue specified (e.g. subject:queue) - split to subject & queue if (queueUri.contains(":")) { this.subject = queueUri.substring(0, queueUri.indexOf(':')); queueGroup = queueUri.substring(queueUri.indexOf(':') + 1); } else { this.subject = queueUri; queueGroup = null; } this.queueType = queueType; this.properties = properties; this.scheduler = scheduler; } @Override public Observable<Message> observe() { return Observable.create(getOnSubscribe()); } private Observable.OnSubscribe<Message> getOnSubscribe() { return subscriber -> { interval = Observable.interval( properties.getPollTimeDuration().toMillis(), TimeUnit.MILLISECONDS, scheduler); interval.flatMap( (Long x) -> { if (!this.isRunning()) { LOG.debug( "Component stopped, skip listening for messages from JSM Queue '{}'", subject); return Observable.from(Collections.emptyList()); } else { List<Message> available = new ArrayList<>(); messages.drainTo(available); if (!available.isEmpty()) { LOG.debug( "Processing JSM queue '{}' batch messages count={}", subject, available.size()); } return Observable.from(available); } }) .subscribe(subscriber::onNext, subscriber::onError); }; } @Override public String getType() { return queueType; } @Override public String getName() { return queueUri; } @Override public String getURI() { return getName(); } @Override public List<String> ack(List<Message> messages) { messages.forEach(m -> ((JsmMessage) m).getJsmMsg().ack()); return Collections.emptyList(); } @Override public void publish(List<Message> messages) { try (Connection conn = Nats.connect(properties.getUrl())) { JetStream js = conn.jetStream(); for (Message msg : messages) { js.publish(subject, msg.getPayload().getBytes()); } } catch (IOException | JetStreamApiException e) { throw new NatsException("Failed to publish to jsm", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new NatsException("Failed to publish to jsm", e); } } @Override public void setUnackTimeout(Message message, long unackTimeout) { // do nothing, not supported } @Override public long size() { try { return sub.getConsumerInfo().getNumPending(); } catch (IOException | JetStreamApiException e) { LOG.warn("Failed to get stream '{}' info", subject); } return 0; } @Override public void start() { mu.lock(); try { natsConnect(); } finally { mu.unlock(); } } @Override public void stop() { interval.unsubscribeOn(scheduler); try { if (nc != null) { nc.close(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.error("Failed to close Nats connection", e); } running.set(false); } @Override public boolean isRunning() { return this.running.get(); } private void natsConnect() { if (running.get()) { return; } LOG.info("Starting JSM observable, name={}", queueUri); try { Nats.connectAsynchronously( new Options.Builder() .connectionListener( (conn, type) -> { LOG.info("Connection to JSM updated: {}", type); this.nc = conn; subscribeOnce(conn, type); }) .server(properties.getUrl()) .maxReconnects(-1) .build(), true); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new NatsException("Failed to connect to JSM", e); } } private void createStream(Connection nc) { JetStreamManagement jsm; try { jsm = nc.jetStreamManagement(); } catch (IOException e) { throw new NatsException("Failed to get jsm management", e); } StreamConfiguration streamConfig = StreamConfiguration.builder() .name(subject) .retentionPolicy(RetentionPolicy.WorkQueue) .storageType(StorageType.get(properties.getStreamStorageType())) .build(); try { StreamInfo streamInfo = jsm.addStream(streamConfig); LOG.debug("Create stream, info: {}", streamInfo); } catch (IOException | JetStreamApiException e) { LOG.error("Failed to add stream: " + streamConfig, e); } } private void subscribeOnce(Connection nc, ConnectionListener.Events type) { if (type.equals(ConnectionListener.Events.CONNECTED) || type.equals(ConnectionListener.Events.RECONNECTED)) { createStream(nc); subscribe(nc); } } private void subscribe(Connection nc) { try { JetStream js = nc.jetStream(); PushSubscribeOptions pso = PushSubscribeOptions.builder().durable(properties.getDurableName()).build(); LOG.debug("Subscribing jsm, subject={}, options={}", subject, pso); sub = js.subscribe( subject, queueGroup, nc.createDispatcher(), msg -> { var message = new JsmMessage(); message.setJsmMsg(msg); message.setId(msg.getSID()); message.setPayload(new String(msg.getData())); messages.add(message); }, /*autoAck*/ false, pso); LOG.debug("Subscribed successfully {}", sub.getConsumerInfo()); this.running.set(true); } catch (IOException | JetStreamApiException e) { LOG.error("Failed to subscribe", e); } } }
8,278
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; import io.nats.client.NUID; import rx.Observable; import rx.Scheduler; /** * @author Oleksiy Lysak */ public abstract class NATSAbstractQueue implements ObservableQueue { private static final Logger LOGGER = LoggerFactory.getLogger(NATSAbstractQueue.class); protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>(); protected final Lock mu = new ReentrantLock(); private final String queueType; private ScheduledExecutorService execs; private final Scheduler scheduler; protected final String queueURI; protected final String subject; protected String queue; // Indicates that observe was called (Event Handler) and we must to re-initiate subscription // upon reconnection private boolean observable; private boolean isOpened; private volatile boolean running; NATSAbstractQueue(String queueURI, String queueType, Scheduler scheduler) { this.queueURI = queueURI; this.queueType = queueType; this.scheduler = scheduler; // If queue specified (e.g. subject:queue) - split to subject & queue if (queueURI.contains(":")) { this.subject = queueURI.substring(0, queueURI.indexOf(':')); queue = queueURI.substring(queueURI.indexOf(':') + 1); } else { this.subject = queueURI; queue = null; } LOGGER.info( String.format( "Initialized with queueURI=%s, subject=%s, queue=%s", queueURI, subject, queue)); } void onMessage(String subject, byte[] data) { String payload = new String(data); LOGGER.info(String.format("Received message for %s: %s", subject, payload)); Message dstMsg = new Message(); dstMsg.setId(NUID.nextGlobal()); dstMsg.setPayload(payload); messages.add(dstMsg); } @Override public Observable<Message> observe() { LOGGER.info("Observe invoked for queueURI " + queueURI); observable = true; mu.lock(); try { subscribe(); } finally { mu.unlock(); } Observable.OnSubscribe<Message> onSubscribe = subscriber -> { Observable<Long> interval = Observable.interval(100, TimeUnit.MILLISECONDS, scheduler); interval.flatMap( (Long x) -> { if (!isRunning()) { LOGGER.debug( "Component stopped, skip listening for messages from NATS Queue"); return Observable.from(Collections.emptyList()); } else { List<Message> available = new LinkedList<>(); messages.drainTo(available); if (!available.isEmpty()) { AtomicInteger count = new AtomicInteger(0); StringBuilder buffer = new StringBuilder(); available.forEach( msg -> { buffer.append(msg.getId()) .append("=") .append(msg.getPayload()); count.incrementAndGet(); if (count.get() < available.size()) { buffer.append(","); } }); LOGGER.info( String.format( "Batch from %s to conductor is %s", subject, buffer.toString())); } return Observable.from(available); } }) .subscribe(subscriber::onNext, subscriber::onError); }; return Observable.create(onSubscribe); } @Override public String getType() { return queueType; } @Override public String getName() { return queueURI; } @Override public String getURI() { return queueURI; } @Override public List<String> ack(List<Message> messages) { return Collections.emptyList(); } @Override public void setUnackTimeout(Message message, long unackTimeout) {} @Override public long size() { return messages.size(); } @Override public void publish(List<Message> messages) { messages.forEach( message -> { try { String payload = message.getPayload(); publish(subject, payload.getBytes()); LOGGER.info(String.format("Published message to %s: %s", subject, payload)); } catch (Exception ex) { LOGGER.error( "Failed to publish message " + message.getPayload() + " to " + subject, ex); throw new RuntimeException(ex); } }); } @Override public boolean rePublishIfNoAck() { return true; } @Override public void close() { LOGGER.info("Closing connection for " + queueURI); mu.lock(); try { if (execs != null) { execs.shutdownNow(); execs = null; } closeSubs(); closeConn(); isOpened = false; } finally { mu.unlock(); } } public void open() { // do nothing if not closed if (isOpened) { return; } mu.lock(); try { try { connect(); // Re-initiated subscription if existed if (observable) { subscribe(); } } catch (Exception ignore) { } execs = Executors.newScheduledThreadPool(1); execs.scheduleAtFixedRate(this::monitor, 0, 500, TimeUnit.MILLISECONDS); isOpened = true; } finally { mu.unlock(); } } private void monitor() { if (isConnected()) { return; } LOGGER.error("Monitor invoked for " + queueURI); mu.lock(); try { closeSubs(); closeConn(); // Connect connect(); // Re-initiated subscription if existed if (observable) { subscribe(); } } catch (Exception ex) { LOGGER.error("Monitor failed with " + ex.getMessage() + " for " + queueURI, ex); } finally { mu.unlock(); } } public boolean isClosed() { return !isOpened; } void ensureConnected() { if (!isConnected()) { throw new RuntimeException("No nats connection"); } } @Override public void start() { LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueURI); running = true; } @Override public void stop() { LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueURI); running = false; } @Override public boolean isRunning() { return running; } abstract void connect(); abstract boolean isConnected(); abstract void publish(String subject, byte[] data) throws Exception; abstract void subscribe(); abstract void closeSubs(); abstract void closeConn(); }
8,279
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats.config; import java.time.Duration; import org.springframework.boot.context.properties.ConfigurationProperties; import io.nats.client.Options; /** * @author andrey.stelmashenko@gmail.com */ @ConfigurationProperties("conductor.event-queues.jsm") public class JetStreamProperties { private String listenerQueuePrefix = ""; /** The durable subscriber name for the subscription */ private String durableName = "defaultQueue"; private String streamStorageType = "file"; /** The NATS connection url */ private String url = Options.DEFAULT_URL; private Duration pollTimeDuration = Duration.ofMillis(100); /** WAIT tasks default queue group, to make subscription round-robin delivery to single sub */ private String defaultQueueGroup = "wait-group"; public Duration getPollTimeDuration() { return pollTimeDuration; } public void setPollTimeDuration(Duration pollTimeDuration) { this.pollTimeDuration = pollTimeDuration; } public String getListenerQueuePrefix() { return listenerQueuePrefix; } public void setListenerQueuePrefix(String listenerQueuePrefix) { this.listenerQueuePrefix = listenerQueuePrefix; } public String getDurableName() { return durableName; } public void setDurableName(String durableName) { this.durableName = durableName; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getStreamStorageType() { return streamStorageType; } public void setStreamStorageType(String streamStorageType) { this.streamStorageType = streamStorageType; } public String getDefaultQueueGroup() { return defaultQueueGroup; } public void setDefaultQueueGroup(String defaultQueueGroup) { this.defaultQueueGroup = defaultQueueGroup; } }
8,280
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats.config; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.env.Environment; import com.netflix.conductor.core.events.EventQueueProvider; import rx.Scheduler; @Configuration @ConditionalOnProperty(name = "conductor.event-queues.nats.enabled", havingValue = "true") public class NATSConfiguration { @Bean public EventQueueProvider natsEventQueueProvider(Environment environment, Scheduler scheduler) { return new NATSEventQueueProvider(environment, scheduler); } }
8,281
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats.config; import java.util.EnumMap; import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.netflix.conductor.core.config.ConductorProperties; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.model.TaskModel; import rx.Scheduler; /** * @author andrey.stelmashenko@gmail.com */ @Configuration @EnableConfigurationProperties(JetStreamProperties.class) @ConditionalOnProperty(name = "conductor.event-queues.jsm.enabled", havingValue = "true") public class JetStreamConfiguration { @Bean public EventQueueProvider jsmEventQueueProvider( JetStreamProperties properties, Scheduler scheduler) { return new JetStreamEventQueueProvider(properties, scheduler); } @ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "jsm") @Bean public Map<TaskModel.Status, ObservableQueue> getQueues( JetStreamEventQueueProvider provider, ConductorProperties conductorProperties, JetStreamProperties properties) { String stack = ""; if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) { stack = conductorProperties.getStack() + "_"; } TaskModel.Status[] statuses = new TaskModel.Status[] {TaskModel.Status.COMPLETED, TaskModel.Status.FAILED}; Map<TaskModel.Status, ObservableQueue> queues = new EnumMap<>(TaskModel.Status.class); for (TaskModel.Status status : statuses) { String queuePrefix = StringUtils.isBlank(properties.getListenerQueuePrefix()) ? conductorProperties.getAppId() + "_jsm_notify_" + stack : properties.getListenerQueuePrefix(); String queueName = queuePrefix + status.name() + getQueueGroup(properties); ObservableQueue queue = provider.getQueue(queueName); queues.put(status, queue); } return queues; } private String getQueueGroup(final JetStreamProperties properties) { if (properties.getDefaultQueueGroup() == null || properties.getDefaultQueueGroup().isBlank()) { return ""; } return ":" + properties.getDefaultQueueGroup(); } }
8,282
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.lang.NonNull; import com.netflix.conductor.contribs.queue.nats.JetStreamObservableQueue; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import rx.Scheduler; /** * @author andrey.stelmashenko@gmail.com */ public class JetStreamEventQueueProvider implements EventQueueProvider { public static final String QUEUE_TYPE = "jsm"; private static final Logger LOG = LoggerFactory.getLogger(JetStreamEventQueueProvider.class); private final Map<String, ObservableQueue> queues = new ConcurrentHashMap<>(); private final JetStreamProperties properties; private final Scheduler scheduler; public JetStreamEventQueueProvider(JetStreamProperties properties, Scheduler scheduler) { LOG.info("NATS Event Queue Provider initialized..."); this.properties = properties; this.scheduler = scheduler; } @Override public String getQueueType() { return QUEUE_TYPE; } @Override @NonNull public ObservableQueue getQueue(String queueURI) throws IllegalArgumentException { LOG.debug("Getting obs queue, quri={}", queueURI); return queues.computeIfAbsent( queueURI, q -> new JetStreamObservableQueue(properties, getQueueType(), queueURI, scheduler)); } }
8,283
0
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats
Create_ds/conductor-community/event-queue/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.contribs.queue.nats.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.env.Environment; import org.springframework.lang.NonNull; import com.netflix.conductor.contribs.queue.nats.NATSObservableQueue; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import rx.Scheduler; /** * @author Oleksiy Lysak */ public class NATSEventQueueProvider implements EventQueueProvider { private static final Logger LOGGER = LoggerFactory.getLogger(NATSEventQueueProvider.class); protected Map<String, NATSObservableQueue> queues = new ConcurrentHashMap<>(); private final Scheduler scheduler; public NATSEventQueueProvider(Environment environment, Scheduler scheduler) { this.scheduler = scheduler; LOGGER.info("NATS Event Queue Provider initialized..."); } @Override public String getQueueType() { return "nats"; } @Override @NonNull public ObservableQueue getQueue(String queueURI) { NATSObservableQueue queue = queues.computeIfAbsent(queueURI, q -> new NATSObservableQueue(queueURI, scheduler)); if (queue.isClosed()) { queue.open(); } return queue; } }
8,284
0
Create_ds/conductor-community/test-util/src/test/java/com/netflix
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/ConductorTestApp.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor; import java.io.IOException; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; /** Copy of com.netflix.conductor.Conductor for use by @SpringBootTest in AbstractSpecification. */ // Prevents from the datasource beans to be loaded, AS they are needed only for specific databases. // In case that SQL database is selected this class will be imported back in the appropriate // database persistence module. @SpringBootApplication(exclude = DataSourceAutoConfiguration.class) public class ConductorTestApp { public static void main(String[] args) throws IOException { SpringApplication.run(ConductorTestApp.class, args); } }
8,285
0
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/test
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.test.integration; import java.io.BufferedReader; import java.io.InputStreamReader; import java.io.Reader; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Optional; import org.apache.http.HttpHost; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.test.context.TestPropertySource; import org.testcontainers.elasticsearch.ElasticsearchContainer; import org.testcontainers.utility.DockerImageName; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @TestPropertySource( properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=6"}) public abstract class AbstractEndToEndTest { private static final Logger log = LoggerFactory.getLogger(AbstractEndToEndTest.class); private static final String TASK_DEFINITION_PREFIX = "task_"; private static final String DEFAULT_DESCRIPTION = "description"; // Represents null value deserialized from the redis in memory db private static final String DEFAULT_NULL_VALUE = "null"; protected static final String DEFAULT_EMAIL_ADDRESS = "test@harness.com"; private static final ElasticsearchContainer container = new ElasticsearchContainer( DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch-oss") .withTag("6.8.12")); // this should match the client version private static RestClient restClient; // Initialization happens in a static block so the container is initialized // only once for all the sub-class tests in a CI environment // container is stopped when JVM exits // https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/#singleton-containers static { container.start(); String httpHostAddress = container.getHttpHostAddress(); System.setProperty("conductor.elasticsearch.url", "http://" + httpHostAddress); log.info("Initialized Elasticsearch {}", container.getContainerId()); } @BeforeClass public static void initializeEs() { String httpHostAddress = container.getHttpHostAddress(); String host = httpHostAddress.split(":")[0]; int port = Integer.parseInt(httpHostAddress.split(":")[1]); RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http")); restClient = restClientBuilder.build(); } @AfterClass public static void cleanupEs() throws Exception { // deletes all indices Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices")); Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent()); BufferedReader bufferedReader = new BufferedReader(streamReader); String line; while ((line = bufferedReader.readLine()) != null) { String[] fields = line.split("\\s"); String endpoint = String.format("/%s", fields[2]); restClient.performRequest(new Request("DELETE", endpoint)); } if (restClient != null) { restClient.close(); } } @Test public void testEphemeralWorkflowsWithStoredTasks() { String workflowExecutionName = "testEphemeralWorkflow"; createAndRegisterTaskDefinitions("storedTaskDef", 5); WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName); WorkflowTask workflowTask1 = createWorkflowTask("storedTaskDef1"); WorkflowTask workflowTask2 = createWorkflowTask("storedTaskDef2"); workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2)); String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); assertNotNull(workflowId); Workflow workflow = getWorkflow(workflowId, true); WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); assertNotNull(ephemeralWorkflow); assertEquals(workflowDefinition, ephemeralWorkflow); } @Test public void testEphemeralWorkflowsWithEphemeralTasks() { String workflowExecutionName = "ephemeralWorkflowWithEphemeralTasks"; WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName); WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1"); TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1"); workflowTask1.setTaskDefinition(taskDefinition1); WorkflowTask workflowTask2 = createWorkflowTask("ephemeralTask2"); TaskDef taskDefinition2 = createTaskDefinition("ephemeralTaskDef2"); workflowTask2.setTaskDefinition(taskDefinition2); workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2)); String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); assertNotNull(workflowId); Workflow workflow = getWorkflow(workflowId, true); WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); assertNotNull(ephemeralWorkflow); assertEquals(workflowDefinition, ephemeralWorkflow); List<WorkflowTask> ephemeralTasks = ephemeralWorkflow.getTasks(); assertEquals(2, ephemeralTasks.size()); for (WorkflowTask ephemeralTask : ephemeralTasks) { assertNotNull(ephemeralTask.getTaskDefinition()); } } @Test public void testEphemeralWorkflowsWithEphemeralAndStoredTasks() { createAndRegisterTaskDefinitions("storedTask", 1); WorkflowDef workflowDefinition = createWorkflowDefinition("testEphemeralWorkflowsWithEphemeralAndStoredTasks"); WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1"); TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1"); workflowTask1.setTaskDefinition(taskDefinition1); WorkflowTask workflowTask2 = createWorkflowTask("storedTask0"); workflowDefinition.getTasks().add(workflowTask1); workflowDefinition.getTasks().add(workflowTask2); String workflowExecutionName = "ephemeralWorkflowWithEphemeralAndStoredTasks"; String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); assertNotNull(workflowId); Workflow workflow = getWorkflow(workflowId, true); WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); assertNotNull(ephemeralWorkflow); assertEquals(workflowDefinition, ephemeralWorkflow); TaskDef storedTaskDefinition = getTaskDefinition("storedTask0"); List<WorkflowTask> tasks = ephemeralWorkflow.getTasks(); assertEquals(2, tasks.size()); assertEquals(workflowTask1, tasks.get(0)); TaskDef currentStoredTaskDefinition = tasks.get(1).getTaskDefinition(); assertNotNull(currentStoredTaskDefinition); assertEquals(storedTaskDefinition, currentStoredTaskDefinition); } @Test public void testEventHandler() { String eventName = "conductor:test_workflow:complete_task_with_event"; EventHandler eventHandler = new EventHandler(); eventHandler.setName("test_complete_task_event"); EventHandler.Action completeTaskAction = new EventHandler.Action(); completeTaskAction.setAction(EventHandler.Action.Type.complete_task); completeTaskAction.setComplete_task(new EventHandler.TaskDetails()); completeTaskAction.getComplete_task().setTaskRefName("test_task"); completeTaskAction.getComplete_task().setWorkflowId("test_id"); completeTaskAction.getComplete_task().setOutput(new HashMap<>()); eventHandler.getActions().add(completeTaskAction); eventHandler.setEvent(eventName); eventHandler.setActive(true); registerEventHandler(eventHandler); Iterator<EventHandler> it = getEventHandlers(eventName, true); EventHandler result = it.next(); assertFalse(it.hasNext()); assertEquals(eventHandler.getName(), result.getName()); } protected WorkflowTask createWorkflowTask(String name) { WorkflowTask workflowTask = new WorkflowTask(); workflowTask.setName(name); workflowTask.setWorkflowTaskType(TaskType.SIMPLE); workflowTask.setTaskReferenceName(name); workflowTask.setDescription(getDefaultDescription(name)); workflowTask.setDynamicTaskNameParam(DEFAULT_NULL_VALUE); workflowTask.setCaseValueParam(DEFAULT_NULL_VALUE); workflowTask.setCaseExpression(DEFAULT_NULL_VALUE); workflowTask.setDynamicForkTasksParam(DEFAULT_NULL_VALUE); workflowTask.setDynamicForkTasksInputParamName(DEFAULT_NULL_VALUE); workflowTask.setSink(DEFAULT_NULL_VALUE); workflowTask.setEvaluatorType(DEFAULT_NULL_VALUE); workflowTask.setExpression(DEFAULT_NULL_VALUE); return workflowTask; } protected TaskDef createTaskDefinition(String name) { TaskDef taskDefinition = new TaskDef(); taskDefinition.setName(name); return taskDefinition; } protected WorkflowDef createWorkflowDefinition(String workflowName) { WorkflowDef workflowDefinition = new WorkflowDef(); workflowDefinition.setName(workflowName); workflowDefinition.setDescription(getDefaultDescription(workflowName)); workflowDefinition.setFailureWorkflow(DEFAULT_NULL_VALUE); workflowDefinition.setOwnerEmail(DEFAULT_EMAIL_ADDRESS); return workflowDefinition; } protected List<TaskDef> createAndRegisterTaskDefinitions( String prefixTaskDefinition, int numberOfTaskDefinitions) { String prefix = Optional.ofNullable(prefixTaskDefinition).orElse(TASK_DEFINITION_PREFIX); List<TaskDef> definitions = new LinkedList<>(); for (int i = 0; i < numberOfTaskDefinitions; i++) { TaskDef def = new TaskDef( prefix + i, "task " + i + DEFAULT_DESCRIPTION, DEFAULT_EMAIL_ADDRESS, 3, 60, 60); def.setTimeoutPolicy(TaskDef.TimeoutPolicy.RETRY); definitions.add(def); } this.registerTaskDefinitions(definitions); return definitions; } private String getDefaultDescription(String nameResource) { return nameResource + " " + DEFAULT_DESCRIPTION; } protected abstract String startWorkflow( String workflowExecutionName, WorkflowDef workflowDefinition); protected abstract Workflow getWorkflow(String workflowId, boolean includeTasks); protected abstract TaskDef getTaskDefinition(String taskName); protected abstract void registerTaskDefinitions(List<TaskDef> taskDefinitionList); protected abstract void registerWorkflowDefinition(WorkflowDef workflowDefinition); protected abstract void registerEventHandler(EventHandler eventHandler); protected abstract Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly); }
8,286
0
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/test/integration
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/test/integration/grpc/AbstractGrpcEndToEndTest.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.test.integration.grpc; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit4.SpringRunner; import com.netflix.conductor.client.grpc.EventClient; import com.netflix.conductor.client.grpc.MetadataClient; import com.netflix.conductor.client.grpc.TaskClient; import com.netflix.conductor.client.grpc.WorkflowClient; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; import com.netflix.conductor.common.run.WorkflowSummary; import com.netflix.conductor.test.integration.AbstractEndToEndTest; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @RunWith(SpringRunner.class) @SpringBootTest( properties = {"conductor.grpc-server.enabled=true", "conductor.grpc-server.port=8092"}) @TestPropertySource(locations = "classpath:application-integrationtest.properties") public abstract class AbstractGrpcEndToEndTest extends AbstractEndToEndTest { protected static TaskClient taskClient; protected static WorkflowClient workflowClient; protected static MetadataClient metadataClient; protected static EventClient eventClient; @Override protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) { StartWorkflowRequest workflowRequest = new StartWorkflowRequest() .withName(workflowExecutionName) .withWorkflowDef(workflowDefinition); return workflowClient.startWorkflow(workflowRequest); } @Override protected Workflow getWorkflow(String workflowId, boolean includeTasks) { return workflowClient.getWorkflow(workflowId, includeTasks); } @Override protected TaskDef getTaskDefinition(String taskName) { return metadataClient.getTaskDef(taskName); } @Override protected void registerTaskDefinitions(List<TaskDef> taskDefinitionList) { metadataClient.registerTaskDefs(taskDefinitionList); } @Override protected void registerWorkflowDefinition(WorkflowDef workflowDefinition) { metadataClient.registerWorkflowDef(workflowDefinition); } @Override protected void registerEventHandler(EventHandler eventHandler) { eventClient.registerEventHandler(eventHandler); } @Override protected Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly) { return eventClient.getEventHandlers(event, activeOnly); } @Test public void testAll() throws Exception { assertNotNull(taskClient); List<TaskDef> defs = new LinkedList<>(); for (int i = 0; i < 5; i++) { TaskDef def = new TaskDef("t" + i, "task " + i, DEFAULT_EMAIL_ADDRESS, 3, 60, 60); def.setTimeoutPolicy(TimeoutPolicy.RETRY); defs.add(def); } metadataClient.registerTaskDefs(defs); for (int i = 0; i < 5; i++) { final String taskName = "t" + i; TaskDef def = metadataClient.getTaskDef(taskName); assertNotNull(def); assertEquals(taskName, def.getName()); } WorkflowDef def = createWorkflowDefinition("test"); WorkflowTask t0 = createWorkflowTask("t0"); WorkflowTask t1 = createWorkflowTask("t1"); def.getTasks().add(t0); def.getTasks().add(t1); metadataClient.registerWorkflowDef(def); WorkflowDef found = metadataClient.getWorkflowDef(def.getName(), null); assertNotNull(found); assertEquals(def, found); String correlationId = "test_corr_id"; StartWorkflowRequest startWf = new StartWorkflowRequest(); startWf.setName(def.getName()); startWf.setCorrelationId(correlationId); String workflowId = workflowClient.startWorkflow(startWf); assertNotNull(workflowId); Workflow workflow = workflowClient.getWorkflow(workflowId, false); assertEquals(0, workflow.getTasks().size()); assertEquals(workflowId, workflow.getWorkflowId()); workflow = workflowClient.getWorkflow(workflowId, true); assertNotNull(workflow); assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); assertEquals(1, workflow.getTasks().size()); assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName()); assertEquals(workflowId, workflow.getWorkflowId()); List<String> runningIds = workflowClient.getRunningWorkflow(def.getName(), def.getVersion()); assertNotNull(runningIds); assertEquals(1, runningIds.size()); assertEquals(workflowId, runningIds.get(0)); List<Task> polled = taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100); assertNotNull(polled); assertEquals(0, polled.size()); polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); assertNotNull(polled); assertEquals(1, polled.size()); assertEquals(t0.getName(), polled.get(0).getTaskDefName()); Task task = polled.get(0); task.getOutputData().put("key1", "value1"); task.setStatus(Status.COMPLETED); taskClient.updateTask(new TaskResult(task)); polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); assertNotNull(polled); assertTrue(polled.toString(), polled.isEmpty()); workflow = workflowClient.getWorkflow(workflowId, true); assertNotNull(workflow); assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); assertEquals(2, workflow.getTasks().size()); assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName()); assertEquals(t1.getTaskReferenceName(), workflow.getTasks().get(1).getReferenceTaskName()); assertEquals(Status.COMPLETED, workflow.getTasks().get(0).getStatus()); assertEquals(Status.SCHEDULED, workflow.getTasks().get(1).getStatus()); Task taskById = taskClient.getTaskDetails(task.getTaskId()); assertNotNull(taskById); assertEquals(task.getTaskId(), taskById.getTaskId()); Thread.sleep(1000); SearchResult<WorkflowSummary> searchResult = workflowClient.search("workflowType='" + def.getName() + "'"); assertNotNull(searchResult); assertEquals(1, searchResult.getTotalHits()); assertEquals(workflow.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId()); SearchResult<Workflow> searchResultV2 = workflowClient.searchV2("workflowType='" + def.getName() + "'"); assertNotNull(searchResultV2); assertEquals(1, searchResultV2.getTotalHits()); assertEquals(workflow.getWorkflowId(), searchResultV2.getResults().get(0).getWorkflowId()); SearchResult<WorkflowSummary> searchResultAdvanced = workflowClient.search(0, 1, null, null, "workflowType='" + def.getName() + "'"); assertNotNull(searchResultAdvanced); assertEquals(1, searchResultAdvanced.getTotalHits()); assertEquals( workflow.getWorkflowId(), searchResultAdvanced.getResults().get(0).getWorkflowId()); SearchResult<Workflow> searchResultV2Advanced = workflowClient.searchV2(0, 1, null, null, "workflowType='" + def.getName() + "'"); assertNotNull(searchResultV2Advanced); assertEquals(1, searchResultV2Advanced.getTotalHits()); assertEquals( workflow.getWorkflowId(), searchResultV2Advanced.getResults().get(0).getWorkflowId()); SearchResult<TaskSummary> taskSearchResult = taskClient.search("taskType='" + t0.getName() + "'"); assertNotNull(taskSearchResult); assertEquals(1, searchResultV2Advanced.getTotalHits()); assertEquals(t0.getName(), taskSearchResult.getResults().get(0).getTaskDefName()); SearchResult<TaskSummary> taskSearchResultAdvanced = taskClient.search(0, 1, null, null, "taskType='" + t0.getName() + "'"); assertNotNull(taskSearchResultAdvanced); assertEquals(1, taskSearchResultAdvanced.getTotalHits()); assertEquals(t0.getName(), taskSearchResultAdvanced.getResults().get(0).getTaskDefName()); SearchResult<Task> taskSearchResultV2 = taskClient.searchV2("taskType='" + t0.getName() + "'"); assertNotNull(taskSearchResultV2); assertEquals(1, searchResultV2Advanced.getTotalHits()); assertEquals( t0.getTaskReferenceName(), taskSearchResultV2.getResults().get(0).getReferenceTaskName()); SearchResult<Task> taskSearchResultV2Advanced = taskClient.searchV2(0, 1, null, null, "taskType='" + t0.getName() + "'"); assertNotNull(taskSearchResultV2Advanced); assertEquals(1, taskSearchResultV2Advanced.getTotalHits()); assertEquals( t0.getTaskReferenceName(), taskSearchResultV2Advanced.getResults().get(0).getReferenceTaskName()); workflowClient.terminateWorkflow(workflowId, "terminate reason"); workflow = workflowClient.getWorkflow(workflowId, true); assertNotNull(workflow); assertEquals(WorkflowStatus.TERMINATED, workflow.getStatus()); workflowClient.restart(workflowId, false); workflow = workflowClient.getWorkflow(workflowId, true); assertNotNull(workflow); assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); assertEquals(1, workflow.getTasks().size()); } }
8,287
0
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/common
Create_ds/conductor-community/test-util/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java
/* * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.common.config; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.fasterxml.jackson.databind.ObjectMapper; /** Supplies the standard Conductor {@link ObjectMapper} for tests that need them. */ @Configuration public class TestObjectMapperConfiguration { @Bean public ObjectMapper testObjectMapper() { return new ObjectMapperProvider().getObjectMapper(); } }
8,288
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftNotification.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; /** * This interface defines the public Redshift extension for Notifications. */ public interface RedshiftNotification { /** * Returns name of this notification. * * @return name of this notification * @since 7.3 */ String getName(); /** * Returns the process id of the backend process making this notification. * * @return process id of the backend process making this notification * @since 7.3 */ int getPID(); /** * Returns additional information from the notifying process. This feature has only been * implemented in server versions 9.0 and later, so previous versions will always return an empty * String. * * @return additional information from the notifying process * @since 8.0 */ String getParameter(); }
8,289
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftProperty.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.Connection; import java.sql.DriverPropertyInfo; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Collections; import java.util.HashSet; import java.util.Arrays; /** * All connection parameters that can be either set in JDBC URL, in Driver properties or in * datasource setters. */ public enum RedshiftProperty { /** * When using the V3 protocol the driver monitors changes in certain server configuration * parameters that should not be touched by end users. The {@code client_encoding} setting is set * by the driver and should not be altered. If the driver detects a change it will abort the * connection. */ ALLOW_ENCODING_CHANGES( "allowencodingchanges", "true", "Allow for changes in client_encoding"), /** * The application name. */ APPLICATION_NAME( "applicationname", null, "Name of the Application"), /** * Assume the server is at least that version. */ ASSUME_MIN_SERVER_VERSION( "assumeminserverversion", null, "Assume the server is at least that version"), /** * The authentication profile referring to connection props as JSON in the coral service. */ AUTH_PROFILE( "authprofile", null, "Authentication profile having connection props as JSON"), /** * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query, * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever. * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries */ AUTOSAVE( "autosave", "never", "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, " + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. " + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases" + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries", false, new String[] {"always", "never", "conservative"}), /** * Use binary format for sending and receiving data if possible. */ BINARY_TRANSFER( "binarytransfer", "true", "Use binary format for sending and receiving data if possible"), /** * Comma separated list of types to disable binary transfer. Either OID numbers or names. * Overrides values in the driver default set and values set with binaryTransferEnable. */ BINARY_TRANSFER_DISABLE( "binarytransferdisable", "", "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."), /** * Comma separated list of types to enable binary transfer. Either OID numbers or names */ BINARY_TRANSFER_ENABLE( "binarytransferenable", "", "Comma separated list of types to enable binary transfer. Either OID numbers or names"), /** * Cancel command is sent out of band over its own connection, so cancel message can itself get * stuck. * This property controls "connect timeout" and "socket timeout" used for cancel commands. * The timeout is specified in seconds. Default value is 10 seconds. */ CANCEL_SIGNAL_TIMEOUT( "cancelsignaltimeout", "10", "The timeout that is used for sending cancel command."), /** * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not */ CLEANUP_SAVEPOINTS( "cleanupsavepoints", "false", "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not", false, new String[] {"true", "false"}), /** * <p>The compression method used.</p> */ COMPRESSION( "compression", "lz4:1", "The compression method used for wire protocol communication between Redshift server and the client/driver"), /** * <p>The timeout value used for socket connect operations. If connecting to the server takes longer * than this value, the connection is broken.</p> * * <p>The timeout is specified in seconds and a value of zero means that it is disabled.</p> */ CONNECT_TIMEOUT( "connecttimeout", "10", "The timeout value used for socket connect operations."), /** * <p>Configures the session level timezone to one of the two logical values LOCAL or SERVER</p> */ CONNECTION_TIMEZONE( "connectiontimezone", "LOCAL", "Configures the session level timezone to one of the two logical values LOCAL or SERVER"), /** * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve * unqualified object names used in statements over this connection. */ CURRENT_SCHEMA( "currentschema", null, "Specify the schema (or several schema separated by commas) to be set in the search-path"), /** * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache. */ DATABASE_METADATA_CACHE_FIELDS( "databasemetadatacachefields", "65536", "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."), /** * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache. */ DATABASE_METADATA_CACHE_FIELDS_MIB( "databasemetadatacachefieldsmib", "5", "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."), /** * Returns metadata for the connected database only. * Application is ready to accept metadata from all databases, * can set the value of this parameter "false". * Default value is "true" means application gets metadata from single * databases. */ DATABASE_METADATA_CURRENT_DB_ONLY( "databasemetadatacurrentdbonly", "true", "Control the behavior of metadata API to return data from all accessible databases or only from connected database"), /** * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means * that need fetch all rows at once */ DEFAULT_ROW_FETCH_SIZE( "defaultrowfetchsize", "0", "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"), /** * Added for backward compatibility. * */ BLOCKING_ROWS_MODE( "blockingrowsmode", "0", "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"), /** * Enable optimization that disables column name sanitiser. */ DISABLE_COLUMN_SANITISER( "disablecolumnsanitiser", "false", "Enable optimization that disables column name sanitiser"), /** * This option specifies whether the driver submits a new database query when using the * Connection.isValid() method to determine whether the database connection is active. * * true: The driver does not submit a query when using Connection.isValid() to * determine whether the database connection is active. This may cause the driver * to incorrectly identify the database connection as active if the database server * has shut down unexpectedly. * * false: The driver submits a query when using Connection.isValid() to * determine whether the database connection is active. * When using the V3 protocol the driver monitors changes in certain server configuration * parameters that should not be touched by end users. The {@code client_encoding} setting is set * by the driver and should not be altered. If the driver detects a change it will abort the * connection. */ DISABLE_ISVALID_QUERY( "disableisvalidquery", "false", "Disable isValid query"), /** * The Redshift fetch rows using a ring buffer on a separate thread. * */ ENABLE_FETCH_RING_BUFFER("enablefetchringbuffer", "true", "The Redshift fetch rows using a ring buffer on a separate thread"), /** * Use generated statement name cursor for prepared statements. * */ ENABLE_GENERATED_NAME_FOR_PREPARED_STATEMENT("enablegeneratedname", "true", "The Redshift uses generated statement name and portal name"), /** * "true" means driver supports multiple SQL commands (semicolon separated) in a Statement object. * "false" means driver throws an exception when see multiple SQL commands. * Default value is "true". */ ENABLE_MULTI_SQL_SUPPORT( "enablemultisqlsupport", "true", "Control the behavior of semicolon separated SQL commands in a Statement"), /** * The statement cache enable/disable. * */ ENABLE_STATEMENT_CACHE("enablestatementcache", "false", "The Redshift statement cache using SQL as key"), /** * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend &gt;= 11) * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only). * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement. * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only). */ ESCAPE_SYNTAX_CALL_MODE( "escapesyntaxcallmode", "call", "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)" + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)." + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement." + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).", false, new String[] {"select", "callIfNoReturn", "call"}), /** * Specifies size of buffer during fetching result set. Can be specified as specified size or * percent of heap memory. */ FETCH_RING_BUFFER_SIZE( "fetchringbuffersize", "1G", "Specifies size of ring buffer during fetching result set. Can be specified as specified size or percent of heap memory."), /** * Force one of * <ul> * <li>SSPI (Windows transparent single-sign-on)</li> * <li>GSSAPI (Kerberos, via JSSE)</li> * </ul> * to be used when the server requests Kerberos or SSPI authentication. */ GSS_LIB( "gsslib", "auto", "Force SSSPI or GSSAPI", false, new String[] {"auto", "sspi", "gssapi"}), /** * Enable mode to filter out the names of database objects for which the current user has no privileges * granted from appearing in the DatabaseMetaData returned by the driver. */ HIDE_UNPRIVILEGED_OBJECTS( "hideunprivilegedobjects", "false", "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"), HOST_RECHECK_SECONDS( "hostrecheckseconds", "10", "Specifies period (seconds) after which the host status is checked again in case it has changed"), /** * The JDBC INI file name. */ INI_FILE("inifile", null, "The JDBC INI file. Easy to configure connection properties."), /** * The JDBC INI file section name. * Section name to use for connection configuration. */ INI_SECTION("inisection", null, "The JDBC INI file section name."), /** * Specifies the name of the JAAS system or application login configuration. */ JAAS_APPLICATION_NAME( "jaasapplicationname", null, "Specifies the name of the JAAS system or application login configuration."), /** * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating. * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false * or using native GSS with system property sun.security.jgss.native=true */ JAAS_LOGIN( "jaaslogin", "true", "Login with JAAS before doing GSSAPI authentication"), /** * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's * PGKRBSRVNAME environment variable. */ KERBEROS_SERVER_NAME( "kerberosservername", null, "The Kerberos service name to use when authenticating with GSSAPI."), LOAD_BALANCE_HOSTS( "loadbalancehosts", "false", "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"), LOG_PATH( "logpath", null, "File Path output of the Logger"), MAX_LOG_FILE_SIZE( "maxlogfilesize", null, "Maximum single log file size"), MAX_LOG_FILE_COUNT( "maxlogfilecount", null, "Maximum number of log files"), /** * Added for backward compatibility. */ LOG_LEVEL( "loglevel", null, "Log level of the driver", false, new String[] {"OFF", "FATAL", "ERROR", "WARNING", "INFO", "FUNCTION", "DEBUG", "TRACE"}), /** * Added for backward compatibility. */ DSI_LOG_LEVEL( "dsiloglevel", null, "Log level of the driver", false, new String[] {"OFF", "FATAL", "ERROR", "WARNING", "INFO", "FUNCTION", "DEBUG", "TRACE"}), /** * Specify how long to wait for establishment of a database connection. The timeout is specified * in seconds. */ LOGIN_TIMEOUT( "logintimeout", "0", "Specify how long to wait for establishment of a database connection."), /** * Whether to include full server error detail in exception messages. */ LOG_SERVER_ERROR_DETAIL( "logservererrordetail", "true", "Include full server error detail in exception messages. If disabled then only the error itself will be included."), /** * When connections that are not explicitly closed are garbage collected, log the stacktrace from * the opening of the connection to trace the leak source. */ LOG_UNCLOSED_CONNECTIONS( "logunclosedconnections", "false", "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"), /** * Specifies size of buffer during fetching result set. Can be specified as specified size or * percent of heap memory. */ MAX_RESULT_BUFFER( "maxresultbuffer", null, "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."), /** * Specify 'options' connection initialization parameter. * The value of this parameter may contain spaces and other special characters or their URL representation. */ OPTIONS( "options", null, "Specify 'options' connection initialization parameter."), /** * * Override Schema Pattern Type used in getTables() */ OVERRIDE_SCHEMA_PATTERN_TYPE( "overrideschemapatterntype", null, "Override the type of query used in getTables calls"), /** * Password to use when authenticating. */ PASSWORD( "password", null, "Password to use when authenticating.", false), /** * Password to use when authenticating. It's an alias for the Password. */ PWD( "pwd", null, "Password to use when authenticating.", false), /** * Set the query group on a connection. */ QUERY_GROUP( "querygroup", null, "Assign a query to a queue at runtime by assigning your query to the appropriate query group"), /** * Database name to connect to (may be specified directly in the JDBC URL). */ DBNAME( "dbname", null, "Database name to connect to (may be specified directly in the JDBC URL)", true), /** * Hostname of the Redshift server (may be specified directly in the JDBC URL). */ HOST( "host", null, "Hostname of the Redshift server (may be specified directly in the JDBC URL)", false), /** * Port of the Redshift server (may be specified directly in the JDBC URL). */ PORT( "port", null, "Port of the Redshift server (may be specified directly in the JDBC URL)"), /** * <p>Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.</p> * * <p>This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)</p> */ PREFER_QUERY_MODE( "preferquerymode", "extended", "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), " + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, " + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false, new String[] {"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}), /** * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0} * disables the cache. */ PREPARED_STATEMENT_CACHE_QUERIES( "preparedstatementcachequeries", "256", "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."), /** * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0} * disables the cache. */ PREPARED_STATEMENT_CACHE_SIZE_MIB( "preparedstatementcachesizemib", "5", "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."), /** * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for * forceBinary */ PREPARE_THRESHOLD( "preparethreshold", "5", "Statement prepare threshold. A value of {@code -1} stands for forceBinary"), /** * Force use of a particular protocol version when connecting, if set, disables protocol version * fallback. */ PROTOCOL_VERSION( "protocolversion", null, "Force use of a particular protocol version when connecting, currently only version 3 is supported.", false, new String[] {"3"}), /** * Certain database versions perform a silent rollback instead of commit in case the transaction was in a failed state. */ RAISE_EXCEPTION_ON_SILENT_ROLLBACK( "raiseexceptiononsilentrollback", "false", "Certain database versions perform a silent rollback instead of commit in case the transaction was in a failed state"), /** * Puts this connection in read-only mode. */ READ_ONLY( "readonly", "false", "Puts this connection in read-only mode"), /** * Connection parameter to control behavior when * {@link Connection#setReadOnly(boolean)} is set to {@code true}. */ READ_ONLY_MODE( "readonlymode", "always", // transaction "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' " + "When 'ignore', setting readOnly has no effect. " + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. " + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' " + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.", false, new String[] {"ignore", "transaction", "always"}), /** * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system * default. */ RECEIVE_BUFFER_SIZE( "receivebuffersize", "-1", "Socket read buffer size"), /** * <p>Connection parameter passed in the startup message. This parameter accepts two values; "true" * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set * of replication commands can be issued instead of SQL statements. Only the simple query protocol * can be used in walsender mode. Passing "database" as the value instructs walsender to connect * to the database specified in the dbname parameter, which will allow the connection to be used * for logical replication from that database.</p> * <p>Parameter should be use together with {@link RedshiftProperty#ASSUME_MIN_SERVER_VERSION} with * parameter &gt;= 9.4 (backend &gt;= 9.4)</p> */ REPLICATION( "replication", null, "Connection parameter passed in startup message, one of 'true' or 'database' " + "Passing 'true' tells the backend to go into walsender mode, " + "wherein a small set of replication commands can be issued instead of SQL statements. " + "Only the simple query protocol can be used in walsender mode. " + "Passing 'database' as the value instructs walsender to connect " + "to the database specified in the dbname parameter, " + "which will allow the connection to be used for logical replication " + "from that database. " + "(backend >= 9.4)"), /** * Configure optimization to enable batch insert re-writing. */ REWRITE_BATCHED_INSERTS( "rewritebatchedinserts", "false", "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."), /** * Configure optimization to batch insert size re-writing. * This must be power of 2. */ REWRITE_BATCHED_INSERTS_SIZE( "rewritebatchedinsertssize", "128", "Enable optimization size to rewrite and collapse compatible INSERT statements that are batched. This must be power of 2"), /** * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system * default. */ SEND_BUFFER_SIZE( "sendbuffersize", "-1", "Socket write buffer size"), /** * Socket factory used to create socket. A null value, which is the default, means system default. */ SOCKET_FACTORY( "socketfactory", null, "Specify a socket factory for socket creation"), /** * The String argument to give to the constructor of the Socket Factory. * @deprecated use {@code ..Factory(Properties)} constructor. */ @Deprecated SOCKET_FACTORY_ARG( "socketfactoryarg", null, "Argument forwarded to constructor of SocketFactory class."), /** * The timeout value used for socket read operations. If reading from the server takes longer than * this value, the connection is closed. This can be used as both a brute force global query * timeout and a method of detecting network problems. The timeout is specified in seconds and a * value of zero means that it is disabled. */ SOCKET_TIMEOUT( "sockettimeout", "0", "The timeout value used for socket read operations."), /** * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full} */ SSL( "ssl", "true", "Control use of SSL (any non-null value causes SSL to be required)"), /** * File containing the SSL Certificate. Default will be the file {@code redshift.crt} in {@code * $HOME/.redshift} (*nix) or {@code %APPDATA%\redshift} (windows). */ SSL_CERT( "sslcert", null, "The location of the client's SSL certificate"), /** * Classname of the SSL Factory to use (instance of {@code javax.net.ssl.SSLSocketFactory}). */ SSL_FACTORY( "sslfactory", null, "Provide a SSLSocketFactory class when using SSL."), /** * The String argument to give to the constructor of the SSL Factory. * @deprecated use {@code ..Factory(Properties)} constructor. */ @Deprecated SSL_FACTORY_ARG( "sslfactoryarg", null, "Argument forwarded to constructor of SSLSocketFactory class."), /** * Classname of the SSL HostnameVerifier to use (instance of {@code * javax.net.ssl.HostnameVerifier}). */ SSL_HOSTNAME_VERIFIER( "sslhostnameverifier", null, "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"), /** * File containing the SSL Key. Default will be the file {@code redshift.pk8} in {@code * $HOME/.redshift} (*nix) or {@code %APPDATA%\redshift} (windows). */ SSL_KEY( "sslkey", null, "The location of the client's PKCS#8 SSL key"), /** * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow}, * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}. * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}. * Default mode is "require" */ SSL_MODE( "sslmode", null, "Parameter governing the use of SSL", false, new String[] {"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}), /** * Added for backward compatibility. */ AUTH_MECH( "authmech", null, "Parameter governing the use of SSL. Alias for sslMode", false, new String[] {"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}), /** * The SSL password to use in the default CallbackHandler. */ SSL_PASSWORD( "sslpassword", null, "The password for the client's ssl key (ignored if sslpasswordcallback is set)"), /** * The classname instantiating {@code javax.security.auth.callback.CallbackHandler} to use. */ SSL_PASSWORD_CALLBACK( "sslpasswordcallback", null, "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PassworCallback for the ssl password."), /** * File containing the root certificate when validating server ({@code sslmode} = {@code * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code * $HOME/.redshift} (*nix) or {@code %APPDATA%\redshift} (windows). */ SSL_ROOT_CERT( "sslrootcert", null, "The location of the root certificate for authenticating the server."), /** * The SSL Truststore path key. */ SSL_TRUSTSTORE_PATH_KEY( "ssltruststorepath", null, "The SSL Truststore path key."), /** * The SSL Truststore path key. */ SSL_TRUSTSTORE_PWD_KEY( "ssltruststore ", null, "The SSL Truststore password key."), /** * Specifies the name of the SSPI service class that forms the service class part of the SPN. The * default, {@code REDSHIFT}, is almost always correct. */ SSPI_SERVICE_CLASS( "sspiserviceclass", "REDSHIFT", "The Windows SSPI service class for SPN"), /** * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for * 8.0+ backends. */ STRING_TYPE( "stringtype", "unspecified", "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)", false, new String[] {"unspecified", "varchar"}), TARGET_SERVER_TYPE( "targetservertype", "any", "Specifies what kind of server to connect", false, new String [] {"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary"}), /** * Enable or disable TCP keep-alive. The default is {@code true}. */ TCP_KEEP_ALIVE( "tcpkeepalive", "true", "Enable or disable TCP keep-alive. The default is {@code true}."), /** * Specifies the length to return for types of unknown length. */ UNKNOWN_LENGTH( "unknownlength", Integer.toString(Integer.MAX_VALUE), "Specifies the length to return for types of unknown length"), /** * Username to connect to the database as. */ USER( "user", null, "Username to connect to the database as.", true), /** * Username to connect to the database as. It's an alias for the USER. */ UID( "uid", null, "Username to connect to the database as.", true), /** * Use SPNEGO in SSPI authentication requests. */ USE_SPNEGO( "usespnego", "false", "Use SPNEGO in SSPI authentication requests"), // IAM properties /** * The name of the Redshift cluster to connect to. * Used only by JDBC driver internally only. */ CLUSTER_IDENTIFIER("clusterid", null, "The name of the Redshift cluster to connect to"), /** * The length of time (in seconds) until the temporary IAM credentials expire. */ IAM_DURATION("iamduration", null, "The length of time (in seconds) until the temporary IAM credentials expire."), /** * The IAM access key id for the IAM user or role. */ IAM_ACCESS_KEY_ID("accesskeyid", null, "The IAM access key id for the IAM user or role"), /** * Indicates whether use IAM authentication. * Used only by JDBC driver internally only. */ IAM_AUTH("iamauth", "false", "Indicates whether use IAM authentication"), /** * Disable IAM credentials cache. * Enable cache gives protection against throttling API calls. * Default value is false. */ IAM_DISABLE_CACHE("iamdisablecache", "false", "Indicates to disable credential cache. Enable cache gives protection against throttling API calls"), /** * Indicates the identity namespace to be used for connection. * It is used by Redshift server to decide which IdC instance to use. */ IDC_IDENTITY_NAMESPACE("identity_namespace", null, "Indicates the identity namespace which helps to decide which IdC instance to use in Redshift server"), /** * Indicates the token type to be used for connection. * It is an optional param used in the CommonCredentialsProvider plugin to allow the client to set token type */ TOKEN_TYPE("token_type", null, "Indicates the optional token type to be used for connection"), /** * Indicates the display name to be used for the client that is using IdC browser auth plugin. */ IDC_CLIENT_DISPLAY_NAME("idc_client_display_name", "Amazon Redshift JDBC driver", "Indicates the display name to be used for the client that is using IdC browser auth plugin"), /** * The AWS region where the cluster is located. * Used only by JDBC driver internally only. */ AWS_REGION("region", null, "The AWS region where the cluster is located"), /** * The Redshift endpoint url. * Used only AWS internal team. */ ENDPOINT_URL("endpointurl", null, "The Redshift endpoint url"), /** * The STS endpoint url. */ STS_ENDPOINT_URL("stsendpointurl", null, "The STS endpoint url"), /** * The AWS profile name for credentials. */ AWS_PROFILE("profile", null, "The AWS profile name for credentials"), /** * The IAM secret key for the IAM user or role. */ IAM_SECRET_ACCESS_KEY("secretaccesskey", null, "The IAM secret key for the IAM user or role"), /** * The IAM security token for an IAM user or role. */ IAM_SESSION_TOKEN("sessiontoken", null, "The IAM security token for an IAM user or role"), /** * The fully qualified class path for a class that implements AWSCredentialsProvider. */ CREDENTIALS_PROVIDER("plugin_name", null, "The fully qualified class path for a class that implements AWSCredentialsProvider"), /** * Indicates whether the user should be created if not exists. */ USER_AUTOCREATE("autocreate", null, "Indicates whether the user should be created if not exists"), /** * The database user name. */ DB_USER("dbuser", null, "The database user name"), /** * A comma delimited database group names. */ DB_GROUPS("dbgroups", null, "A comma delimited database group names"), /** * Regex for filtering out dbGroups. */ DB_GROUPS_FILTER("dbgroupsfilter", null, "Regex for filtering out dbGroups from final result"), /** * Forces database group names to be lower case. */ FORCE_LOWERCASE("forcelowercase", null, "Forces database group names to be lower case"), /** * Use the IDP Groups in the Redshift. * This is supported by new GetClusterCredentialsV2 API. * Default value is false for backward compatibility, which uses * STS API and GetClusterCredentials for user federation and explictily * specifying DbGroups in connection. */ GROUP_FEDERATION("groupfederation", "false", "Use the IDP Groups in the Redshift"), /** * The Oauth access token for an idp connection. */ WEB_IDENTITY_TOKEN("webidentitytoken", null, "The Oauth access token for an idp connection"), /** * The name of the Redshift Native Auth Provider. */ PROVIDER_NAME("providername", null, "The name of the Redshift Native Auth Provider"), /** * Set true when end point host is for serverless. * Driver auto detect from the given host. * For NLB, it won't so user can set explicitly. * Default value is false. */ IS_SERVERLESS("isserverless", "false", "Redshift end-point is serverless or provisional."), /** * The account ID of the serverless. * Driver auto detect from the given host. * For NLB, it won't so user can set explicitly. * Default value is null. * */ SERVERLESS_ACCT_ID("serverlessacctid", null, "The account ID of the serverless"), /** * The work group of the serverless. * Driver auto detect from the given host. * For NLB, it won't so user can set explicitly. * Default value is null. * */ SERVERLESS_WORK_GROUP("serverlessworkgroup", null, "The work group of the serverless"), ; private final String name; private final String defaultValue; private final boolean required; private final String description; private final String[] choices; private final boolean deprecated; private static final Set<String> publicProperties = Collections.unmodifiableSet( new HashSet<>(Arrays.asList ( "AccessKeyID" ,"AllowDBUserOverride" ,"App_ID" ,"App_Name" ,"ApplicationName" ,"AuthProfile" ,"AutoCreate" ,"Client_ID" ,"Client_Secret" ,"client_protocol_version" ,"ClusterID" ,"connectTimeout" ,"databaseMetadataCurrentDbOnly" ,"DbUser" ,"DbGroups" ,"DBNAME" ,"defaultRowFetchSize" ,"DisableIsValidQuery" ,"enableFetchReadAndProcessBuffers" ,"enableFetchRingBuffer" ,"enableMultiSqlSupport" ,"endpointUrl" ,"fetchRingBufferSize" ,"ForceLowercase" ,"groupFederation" ,"HOST" ,"IAMDisableCache" ,"IAMDuration" ,"IdP_Host" ,"IdP_Port" ,"IdP_Tenant" ,"IdP_Response_Timeout" ,"IniFile" ,"IniSection" ,"isServerless" ,"Login_URL" ,"loginTimeout" ,"loginToRp" ,"LogLevel" ,"LogPath" ,"OverrideSchemaPatternType" ,"Partner_SPID" ,"Password" ,"Plugin_Name" ,"PORT" ,"Preferred_Role" ,"Profile" ,"PWD" ,"queryGroup" ,"readOnly" ,"Region" ,"reWriteBatchedInserts" ,"reWriteBatchedInsertsSize" ,"roleArn" ,"roleSessionName" ,"scope" ,"SecretAccessKey" ,"SessionToken" ,"serverlessAcctId" ,"serverlessWorkGroup" ,"socketFactory" ,"socketTimeout" ,"SSL" ,"SSL_Insecure" ,"SSLCert" ,"SSLFactory" ,"SSLKey" ,"SSLMode" ,"SSLPassword" ,"SSLRootCert" ,"StsEndpointUrl" ,"tcpKeepAlive" ,"UID" ,"User" ,"Username" ,"webIdentityToken" ) )); RedshiftProperty(String name, String defaultValue, String description) { this(name, defaultValue, description, false); } RedshiftProperty(String name, String defaultValue, String description, boolean required) { this(name, defaultValue, description, required, (String[]) null); } RedshiftProperty(String name, String defaultValue, String description, boolean required, String[] choices) { this.name = name; this.defaultValue = defaultValue; this.required = required; this.description = description; this.choices = choices; try { this.deprecated = RedshiftProperty.class.getField(name()).getAnnotation(Deprecated.class) != null; } catch (NoSuchFieldException e) { throw new RuntimeException(e); } } private static final Map<String, RedshiftProperty> PROPS_BY_NAME = new HashMap<String, RedshiftProperty>(); static { for (RedshiftProperty prop : RedshiftProperty.values()) { if (PROPS_BY_NAME.put(prop.getName(), prop) != null) { throw new IllegalStateException("Duplicate RedshiftProperty name: " + prop.getName()); } } } /** * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL * or in Driver properties * * @return the name of the connection parameter */ public String getName() { return name; } /** * Returns the default value for this connection parameter. * * @return the default value for this connection parameter or null */ public String getDefaultValue() { return defaultValue; } /** * Returns whether this parameter is required. * * @return whether this parameter is required */ public boolean isRequired() { return required; } /** * Returns the description for this connection parameter. * * @return the description for this connection parameter */ public String getDescription() { return description; } /** * Returns the available values for this connection parameter. * * @return the available values for this connection parameter or null */ public String[] getChoices() { return choices; } /** * Returns whether this connection parameter is deprecated. * * @return whether this connection parameter is deprecated */ public boolean isDeprecated() { return deprecated; } /** * Returns the value of the connection parameters according to the given {@code Properties} or the * default value. * * @param properties properties to take actual value from * @return evaluated value for this connection parameter */ public String get(Properties properties) { return properties.getProperty(name, defaultValue); } /** * Set the value for this connection parameter in the given {@code Properties}. * * @param properties properties in which the value should be set * @param value value for this connection parameter */ public void set(Properties properties, String value) { if (value == null) { properties.remove(name); } else { properties.setProperty(name, value); } } /** * Return the boolean value for this connection parameter in the given {@code Properties}. * * @param properties properties to take actual value from * @return evaluated value for this connection parameter converted to boolean */ public boolean getBoolean(Properties properties) { return Boolean.valueOf(get(properties)); } /** * Return the int value for this connection parameter in the given {@code Properties}. Prefer the * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}. * * @param properties properties to take actual value from * @return evaluated value for this connection parameter converted to int * @throws NumberFormatException if it cannot be converted to int. */ public int getIntNoCheck(Properties properties) { String value = get(properties); return Integer.parseInt(value); } /** * Return the int value for this connection parameter in the given {@code Properties}. * * @param properties properties to take actual value from * @return evaluated value for this connection parameter converted to int * @throws RedshiftException if it cannot be converted to int. */ public int getInt(Properties properties) throws RedshiftException { String value = get(properties); try { return Integer.parseInt(value); } catch (NumberFormatException nfe) { throw new RedshiftException(GT.tr("{0} parameter value must be an integer but was: {1}", getName(), value), RedshiftState.INVALID_PARAMETER_VALUE, nfe); } } /** * Return the {@code Integer} value for this connection parameter in the given {@code Properties}. * * @param properties properties to take actual value from * @return evaluated value for this connection parameter converted to Integer or null * @throws RedshiftException if unable to parse property as integer */ public Integer getInteger(Properties properties) throws RedshiftException { String value = get(properties); if (value == null) { return null; } try { return Integer.parseInt(value); } catch (NumberFormatException nfe) { throw new RedshiftException(GT.tr("{0} parameter value must be an integer but was: {1}", getName(), value), RedshiftState.INVALID_PARAMETER_VALUE, nfe); } } /** * Set the boolean value for this connection parameter in the given {@code Properties}. * * @param properties properties in which the value should be set * @param value boolean value for this connection parameter */ public void set(Properties properties, boolean value) { properties.setProperty(name, Boolean.toString(value)); } /** * Set the int value for this connection parameter in the given {@code Properties}. * * @param properties properties in which the value should be set * @param value int value for this connection parameter */ public void set(Properties properties, int value) { properties.setProperty(name, Integer.toString(value)); } /** * Test whether this property is present in the given {@code Properties}. * * @param properties set of properties to check current in * @return true if the parameter is specified in the given properties */ public boolean isPresent(Properties properties) { return getSetString(properties) != null; } /** * Convert this connection parameter and the value read from the given {@code Properties} into a * {@code DriverPropertyInfo}. * * @param properties properties to take actual value from * @return a DriverPropertyInfo representing this connection parameter */ public DriverPropertyInfo toDriverPropertyInfo(Properties properties) { DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, get(properties)); propertyInfo.required = required; propertyInfo.description = description; propertyInfo.choices = choices; return propertyInfo; } public static RedshiftProperty forName(String name) { return PROPS_BY_NAME.get(name); } /** * Return the property if exists but avoiding the default. Allowing the caller to detect the lack * of a property. * * @param properties properties bundle * @return the value of a set property */ public String getSetString(Properties properties) { Object o = properties.get(name); if (o instanceof String) { return (String) o; } return null; } /** * Return the public property * * @return the value of a set property */ public static Set<String> getPublicProperties() { return publicProperties; } }
8,290
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftConnection.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; import com.amazon.redshift.copy.CopyManager; import com.amazon.redshift.fastpath.Fastpath; import com.amazon.redshift.jdbc.AutoSave; import com.amazon.redshift.jdbc.PreferQueryMode; import com.amazon.redshift.largeobject.LargeObjectManager; import com.amazon.redshift.replication.RedshiftReplicationConnection; import com.amazon.redshift.util.RedshiftObject; import java.sql.Array; import java.sql.SQLException; import java.sql.Statement; import java.util.Map; /** * This interface defines the public Redshift extensions to java.sql.Connection. All Connections * returned by the Redshift driver implement RedshiftConnection. */ public interface RedshiftConnection { /** * Creates an {@link Array} wrapping <i>elements</i>. This is similar to * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also * provides support for primitive arrays. * * @param typeName * The SQL name of the type to map the <i>elements</i> to. * Must not be {@code null}. * @param elements * The array of objects to map. A {@code null} value will result in * an {@link Array} representing {@code null}. * @return An {@link Array} wrapping <i>elements</i>. * @throws SQLException * If for some reason the array cannot be created. * @see java.sql.Connection#createArrayOf(String, Object[]) */ Array createArrayOf(String typeName, Object elements) throws SQLException; /** * This method returns any notifications that have been received since the last call to this * method. Returns null if there have been no notifications. * * @return notifications that have been received * @throws SQLException if something wrong happens * @since 7.3 */ RedshiftNotification[] getNotifications() throws SQLException; Long getBytesReadFromStream(); /** * This method returns any notifications that have been received since the last call to this * method. Returns null if there have been no notifications. A timeout can be specified so the * driver waits for notifications. * * @param timeoutMillis when 0, blocks forever. when &gt; 0, blocks up to the specified number of millies * or until at least one notification has been received. If more than one notification is * about to be received, these will be returned in one batch. * @return notifications that have been received * @throws SQLException if something wrong happens * @since 43 */ RedshiftNotification[] getNotifications(int timeoutMillis) throws SQLException; /** * This returns the COPY API for the current connection. * * @return COPY API for the current connection * @throws SQLException if something wrong happens * @since 8.4 */ CopyManager getCopyAPI() throws SQLException; /** * This returns the LargeObject API for the current connection. * * @return LargeObject API for the current connection * @throws SQLException if something wrong happens * @since 7.3 */ LargeObjectManager getLargeObjectAPI() throws SQLException; /** * This returns the Fastpath API for the current connection. * * @return Fastpath API for the current connection * @throws SQLException if something wrong happens * @since 7.3 * @deprecated This API is somewhat obsolete, as one may achieve similar performance * and greater functionality by setting up a prepared statement to define * the function call. Then, executing the statement with binary transmission of parameters * and results substitutes for a fast-path function call. */ @Deprecated Fastpath getFastpathAPI() throws SQLException; /** * This allows client code to add a handler for one of com.amazon.redshift's more unique data types. It * is approximately equivalent to <code>addDataType(type, Class.forName(name))</code>. * * @param type JDBC type name * @param className class name * @throws RuntimeException if the type cannot be registered (class not found, etc). * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method * does not work correctly for registering classes that cannot be directly loaded by * the JDBC driver's classloader. */ @Deprecated void addDataType(String type, String className); /** * <p>This allows client code to add a handler for one of com.amazon.redshift's more unique data types.</p> * * <p><b>NOTE:</b> This is not part of JDBC, but an extension.</p> * * <p>The best way to use this is as follows:</p> * * <pre> * ... * ((com.amazon.redshift.RedshiftConnection)myconn).addDataType("mytype", my.class.name.class); * ... * </pre> * * <p>where myconn is an open Connection to com.amazon.redshift.</p> * * <p>The handling class must extend com.amazon.redshift.util.RedshiftObject</p> * * @param type the Redshift type to register * @param klass the class implementing the Java representation of the type; this class must * implement {@link com.amazon.redshift.util.RedshiftObject}). * @throws SQLException if <code>klass</code> does not implement * {@link com.amazon.redshift.util.RedshiftObject}). * @see com.amazon.redshift.util.RedshiftObject * @since 8.0 */ void addDataType(String type, Class<? extends RedshiftObject> klass) throws SQLException; /** * Set the default statement reuse threshold before enabling server-side prepare. See * {@link com.amazon.redshift.RedshiftStatement#setPrepareThreshold(int)} for details. * * @param threshold the new threshold * @since build 302 */ void setPrepareThreshold(int threshold); /** * Get the default server-side prepare reuse threshold for statements created from this * connection. * * @return the current threshold * @since build 302 */ int getPrepareThreshold(); /** * Set the default fetch size for statements created from this connection. * * @param fetchSize new default fetch size * @throws SQLException if specified negative <code>fetchSize</code> parameter * @see Statement#setFetchSize(int) */ void setDefaultFetchSize(int fetchSize) throws SQLException; /** * Get the default fetch size for statements created from this connection. * * @return current state for default fetch size * @see RedshiftProperty#DEFAULT_ROW_FETCH_SIZE * @see Statement#getFetchSize() */ int getDefaultFetchSize(); /** * Return the process ID (PID) of the backend server process handling this connection. * * @return PID of backend server process. */ int getBackendPID(); /** * Sends a query cancellation for this connection. * @throws SQLException if there are problems cancelling the query */ void cancelQuery() throws SQLException; /** * Return the given string suitably quoted to be used as an identifier in an SQL statement string. * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or * would be case-folded). Embedded quotes are properly doubled. * * @param identifier input identifier * @return the escaped identifier * @throws SQLException if something goes wrong */ String escapeIdentifier(String identifier) throws SQLException; /** * Return the given string suitably quoted to be used as a string literal in an SQL statement * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal * returns null on null input. * * @param literal input literal * @return the quoted literal * @throws SQLException if something goes wrong */ String escapeLiteral(String literal) throws SQLException; /** * <p>Returns the query mode for this connection.</p> * * <p>When running in simple query mode, certain features are not available: callable statements, * partial result set fetch, bytea type, etc.</p> * <p>The list of supported features is subject to change.</p> * * @return the preferred query mode * @see PreferQueryMode */ PreferQueryMode getPreferQueryMode(); /** * Connection configuration regarding automatic per-query savepoints. * * @return connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ AutoSave getAutosave(); /** * Configures if connection should use automatic savepoints. * @param autoSave connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ void setAutosave(AutoSave autoSave); /** * @return replication API for the current connection */ RedshiftReplicationConnection getReplicationAPI(); /** * <p>Returns the current values of all parameters reported by the server.</p> * * <p>Redshift reports values for a subset of parameters (GUCs) to the client * at connect-time, then sends update messages whenever the values change * during a session. PgJDBC records the latest values and exposes it to client * applications via <code>getParameterStatuses()</code>.</p> * * <p>PgJDBC exposes individual accessors for some of these parameters as * listed below. They are more backwarrds-compatible and should be preferred * where possible.</p> * * <p>Not all parameters are reported, only those marked * <code>GUC_REPORT</code> in the source code. The <code>pg_settings</code> * view does not expose information about which parameters are reportable. * PgJDBC's map will only contain the parameters the server reports values * for, so you cannot use this method as a substitute for running a * <code>SHOW paramname;</code> or <code>SELECT * current_setting('paramname');</code> query for arbitrary parameters.</p> * * <p>Parameter names are <i>case-insensitive</i> and <i>case-preserving</i> * in this map, like in Redshift itself. So <code>DateStyle</code> and * <code>datestyle</code> are the same key.</p> * * <p> * As of PostgreSQL 11 the reportable parameter list, and related PgJDBC * interfaces or accesors, are: * </p> * * <ul> * <li> * <code>application_name</code> - * {@link java.sql.Connection#getClientInfo()}, * {@link java.sql.Connection#setClientInfo(java.util.Properties)} * and <code>ApplicationName</code> connection property. * </li> * <li> * <code>client_encoding</code> - PgJDBC always sets this to <code>UTF8</code>. * See <code>allowEncodingChanges</code> connection property. * </li> * <li><code>DateStyle</code> - PgJDBC requires this to always be set to <code>ISO</code></li> * <li><code>standard_conforming_strings</code> - indirectly via {@link #escapeLiteral(String)}</li> * <li> * <code>TimeZone</code> - set from JDK timezone see {@link java.util.TimeZone#getDefault()} * and {@link java.util.TimeZone#setDefault(TimeZone)} * </li> * <li><code>integer_datetimes</code></li> * <li><code>IntervalStyle</code></li> * <li><code>server_encoding</code></li> * <li><code>server_version</code></li> * <li><code>is_superuser</code> </li> * <li><code>session_authorization</code></li> * </ul> * * <p>Note that some PgJDBC operations will change server parameters * automatically.</p> * * @return unmodifiable map of case-insensitive parameter names to parameter values * @since 42.2.6 */ Map<String,String> getParameterStatuses(); /** * Shorthand for getParameterStatuses().get(...) . * * @param parameterName case-insensitive parameter name * @return parameter value if defined, or null if no parameter known * @see #getParameterStatuses * @since 42.2.6 */ String getParameterStatus(String parameterName); }
8,291
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/NativeTokenHolder.java
package com.amazon.redshift; import com.amazon.redshift.plugin.utils.RequestUtils; import java.util.Date; public class NativeTokenHolder { protected String m_accessToken; private Date m_expiration; private boolean refresh; // true means newly added, false means from cache. protected NativeTokenHolder(String accessToken) { this(accessToken, new Date(System.currentTimeMillis() + 15 * 60 * 1000)); } protected NativeTokenHolder(String accessToken, Date expiration) { this.m_accessToken = accessToken; this.m_expiration = expiration; } public static NativeTokenHolder newInstance(String accessToken) { return new NativeTokenHolder(accessToken); } public static NativeTokenHolder newInstance(String accessToken, Date expiration) { return new NativeTokenHolder(accessToken, expiration); } public boolean isExpired() { return RequestUtils.isCredentialExpired(m_expiration); } public String getAccessToken() { return m_accessToken; } public Date getExpiration() { return m_expiration; } public void setRefresh(boolean flag) { refresh = flag; } public boolean isRefresh() { return refresh; } }
8,292
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/Driver.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; import com.amazon.redshift.jdbc.RedshiftConnectionImpl; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.DriverInfo; import com.amazon.redshift.util.ExpressionProperties; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.HostSpec; import com.amazon.redshift.util.IniFile; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.util.SharedTimer; import com.amazon.redshift.util.URLCoder; import com.amazon.redshift.util.RedshiftProperties; import javax.naming.Context; import javax.naming.NamingException; import javax.naming.directory.Attribute; import javax.naming.directory.Attributes; import javax.naming.directory.InitialDirContext; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.net.URL; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.sql.Connection; import java.sql.DriverManager; import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; import java.util.Enumeration; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * <p>The Java SQL framework allows for multiple database drivers. Each driver should supply a class * that implements the Driver interface</p> * * <p>The DriverManager will try to load as many drivers as it can find and then for any given * connection request, it will ask each driver in turn to try to connect to the target URL.</p> * * <p>It is strongly recommended that each Driver class should be small and standalone so that the * Driver class can be loaded and queried without bringing in vast quantities of supporting code.</p> * * <p>When a Driver class is loaded, it should create an instance of itself and register it with the * DriverManager. This means that a user can load and register a driver by doing * Class.forName("foo.bah.Driver")</p> * * @see com.amazon.redshift.RedshiftConnection * @see java.sql.Driver */ public class Driver implements java.sql.Driver { private static Driver registeredDriver; private static SharedTimer sharedTimer = new SharedTimer(); private static final String DEFAULT_PORT = "5439"; private static final String URL_PREFIX = "jdbc:redshift:"; private static final Pattern URL_PATTERN = Pattern.compile("(iam:)?//([^:/?]+)(:([^/?]*))?(/([^?;]*))?([?;](.*))?"); private static final Pattern HOST_PATTERN = Pattern.compile("(.+)\\.(.+)\\.(.+).redshift(-dev)?\\.amazonaws\\.com(.)*"); // private static final Pattern SERVERLESS_HOST_PATTERN = // Pattern.compile("(.+)\\.(.+).redshift-serverless(-dev)?\\.amazonaws\\.com(.)*"); private static final Pattern SERVERLESS_WORKGROUP_HOST_PATTERN = Pattern.compile("(.+)\\.(.+)\\.(.+).redshift-serverless(-dev)?\\.amazonaws\\.com(.)*"); private static RedshiftLogger logger; private static final String DEFAULT_INI_FILE = "rsjdbc.ini"; private static final String DEFAULT_DRIVER_SECTION = "DRIVER"; static { try { // moved the registerDriver from the constructor to here // because some clients call the driver themselves (I know, as // my early jdbc work did - and that was based on other examples). // Placing it here, means that the driver is registered once only. register(); } catch (SQLException e) { throw new ExceptionInInitializerError(e); } } // Helper to retrieve default properties from classloader resource // properties files. private Properties defaultProperties; private synchronized Properties getDefaultProperties() throws IOException { if (defaultProperties != null) { return defaultProperties; } // Make sure we load properties with the maximum possible privileges. try { defaultProperties = AccessController.doPrivileged(new PrivilegedExceptionAction<Properties>() { public Properties run() throws IOException { return loadDefaultProperties(); } }); } catch (PrivilegedActionException e) { throw (IOException) e.getException(); } return defaultProperties; } private Properties loadDefaultProperties() throws IOException { Properties merged = new RedshiftProperties(); try { RedshiftProperty.USER.set(merged, System.getProperty("user.name")); } catch (SecurityException se) { // We're just trying to set a default, so if we can't // it's not a big deal. } // If we are loaded by the bootstrap classloader, getClassLoader() // may return null. In that case, try to fall back to the system // classloader. // // We should not need to catch SecurityException here as we are // accessing either our own classloader, or the system classloader // when our classloader is null. The ClassLoader javadoc claims // neither case can throw SecurityException. ClassLoader cl = getClass().getClassLoader(); if (cl == null) { cl = ClassLoader.getSystemClassLoader(); } if (cl == null) { return merged; // Give up on finding defaults. } // When loading the driver config files we don't want settings found // in later files in the classpath to override settings specified in // earlier files. To do this we've got to read the returned // Enumeration into temporary storage. ArrayList<URL> urls = new ArrayList<URL>(); Enumeration<URL> urlEnum = cl.getResources("com/amazon/redshift/driverconfig.properties"); while (urlEnum.hasMoreElements()) { urls.add(urlEnum.nextElement()); } for (int i = urls.size() - 1; i >= 0; i--) { URL url = urls.get(i); InputStream is = url.openStream(); merged.load(is); is.close(); } return merged; } /** * <p>Try to make a database connection to the given URL. The driver should return "null" if it * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each * loaded driver in turn.</p> * * <p>The driver should raise an SQLException if it is the right driver to connect to the given URL, * but has trouble connecting to the database.</p> * * <p>The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as * connection arguments.</p> * * <ul> * <li>user - (required) The user to connect as</li> * <li>password - (optional) The password for the user</li> * <li>ssl -(optional) Use SSL when connecting to the server</li> * <li>readOnly - (optional) Set connection to read-only by default</li> * <li>charSet - (optional) The character set to be used for converting to/from * the database to unicode. If multibyte is enabled on the server then the character set of the * database is used as the default, otherwise the jvm character encoding is used as the default. * This value is only used when connecting to a 7.2 or older server.</li> * <li>loglevel - (optional) Enable logging of messages from the driver. The value is an integer * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.</li> * <li>compatible - (optional) This is used to toggle between different functionality * as it changes across different releases of the jdbc driver code. The values here are versions * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in * functionality could be disabled by setting the compatible level to be "7.1", in which case the * driver will revert to the 7.1 functionality.</li> * </ul> * * <p>Normally, at least "user" and "password" properties should be included in the properties. For a * list of supported character encoding , see * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will * probably want to have set up the Postgres database itself to use the same encoding, with the * {@code -E <encoding>} argument to createdb.</p> * * <p>Our protocol takes the forms:</p> * * <pre> * jdbc:redshift://host:port/database?param1=val1&amp;... * </pre> * * @param url the URL of the database to connect to * @param info a list of arbitrary tag/value pairs as connection arguments * @return a connection to the URL or null if it isnt us * @exception SQLException if a database access error occurs or the url is * {@code null} * @see java.sql.Driver#connect */ @Override public Connection connect(String url, Properties info) throws SQLException { if (url == null) { throw new SQLException("url is null"); } // get defaults Properties defaults; if (!url.startsWith(URL_PREFIX)) { return null; } try { defaults = getDefaultProperties(); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Error loading default settings from driverconfig.properties"), RedshiftState.UNEXPECTED_ERROR, ioe); } // override defaults with provided properties RedshiftProperties props = new RedshiftProperties(info, defaults); // parse URL and add more properties if ((props = parseURL(url, props)) == null) { return null; } // Read INI file String iniFileName = getJdbcIniFile(props); if (iniFileName != null) props = readJdbcIniFile(iniFileName, props); RedshiftLogger connLogger = null; try { // Setup java.util.logging.Logger using connection properties. connLogger = getLogger(props); if(RedshiftLogger.isEnable()) { StackTraceElement[] stacktrace = Thread.currentThread().getStackTrace(); String temp = RedshiftLogger.maskSecureInfoInUrl(url); logger.log(LogLevel.DEBUG, "==================================="); logger.log(LogLevel.DEBUG, "Connecting with URL: {0}", temp); connLogger.log(LogLevel.DEBUG, "==================================="); connLogger.logFunction(true, temp, RedshiftLogger.maskSecureInfoInProps(props)); connLogger.log(LogLevel.DEBUG, "Connecting with URL: {0}", temp); if(iniFileName != null) { connLogger.log(LogLevel.DEBUG, "JDBC INI FileName {0}", iniFileName); // connLogger.log(LogLevel.DEBUG, "After merging JDBC INI FileName props:" + props); } connLogger.log(LogLevel.DEBUG, "Caller stack[{0}]: {1}", Thread.currentThread().getName(), stacktrace[stacktrace.length-1].toString()); /* String useProxyStr = System.getProperty("http.useProxy"); String proxyHost = System.getProperty("https.proxyHost"); String proxyPort = System.getProperty("https.proxyPort"); connLogger.logDebug( String.format("useProxy: %s proxyHost: %s proxyPort:%s" , useProxyStr, proxyHost, proxyPort)); */ } // Enforce login timeout, if specified, by running the connection // attempt in a separate thread. If we hit the timeout without the // connection completing, we abandon the connection attempt in // the calling thread, but the separate thread will keep trying. // Eventually, the separate thread will either fail or complete // the connection; at that point we clean up the connection if // we managed to establish one after all. See ConnectThread for // more details. long timeout = timeout(props); if (timeout <= 0) { Connection conn = makeConnection(url, props, connLogger); if(RedshiftLogger.isEnable()) connLogger.logFunction(false, conn); return conn; } ConnectThread ct = new ConnectThread(url, props, connLogger); Thread thread = new Thread(ct, "Redshift JDBC driver connection thread"); thread.setDaemon(true); // Don't prevent the VM from shutting down thread.start(); Connection conn = ct.getResult(timeout); if(RedshiftLogger.isEnable()) connLogger.logFunction(false, conn); return conn; } catch (RedshiftException ex1) { if(RedshiftLogger.isEnable()) connLogger.logError(ex1); // re-throw the exception, otherwise it will be caught next, and a // com.amazon.redshift.unusual error will be returned instead. throw ex1.getSQLException(); } catch (java.security.AccessControlException ace) { if(RedshiftLogger.isEnable()) connLogger.logError(ace); throw new RedshiftException( GT.tr( "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."), RedshiftState.UNEXPECTED_ERROR, ace); } catch (Exception ex2) { if(RedshiftLogger.isEnable()) connLogger.logError(ex2); throw new RedshiftException( GT.tr( "Something unusual has occurred to cause the driver to fail. Please report this exception:" + ex2.getMessage()), RedshiftState.UNEXPECTED_ERROR, ex2); } } /** * <p>Setup java.util.logging.Logger using connection properties.</p> * * @param props Connection Properties */ private RedshiftLogger getLogger(final Properties props) { final String alias1LogLevel = RedshiftProperty.LOG_LEVEL.get(props); final String alias2LogLevel = RedshiftProperty.DSI_LOG_LEVEL.get(props); final String driverLogLevel = (alias1LogLevel != null) ? LogLevel.getLogLevel(alias1LogLevel).toString() : (alias2LogLevel != null) ? LogLevel.getLogLevel(alias2LogLevel).toString() : null; ExpressionProperties exprProps = new ExpressionProperties(props, System.getProperties()); final String logPath = RedshiftProperty.LOG_PATH.get(exprProps); final String driverLogFile = RedshiftLogger.getLogFileUsingPath(driverLogLevel, logPath); String maxLogFileSize = RedshiftProperty.MAX_LOG_FILE_SIZE.get(exprProps); String maxLogFileCount = RedshiftProperty.MAX_LOG_FILE_COUNT.get(exprProps); // Driver logger if (logger == null) logger = new RedshiftLogger(driverLogFile, driverLogLevel, true, maxLogFileSize, maxLogFileCount); RedshiftLogger connLogger = new RedshiftLogger(driverLogFile, driverLogLevel, false, maxLogFileSize, maxLogFileCount); return connLogger; } /** * Perform a connect in a separate thread; supports getting the results from the original thread * while enforcing a login timeout. */ private static class ConnectThread implements Runnable { ConnectThread(String url, RedshiftProperties props, RedshiftLogger connLogger) { this.url = url; this.props = props; this.connLogger = connLogger; } public void run() { Connection conn; Throwable error; try { conn = makeConnection(url, props, connLogger); error = null; } catch (Throwable t) { conn = null; error = t; } synchronized (this) { if (abandoned) { if (conn != null) { try { conn.close(); } catch (SQLException e) { } } } else { result = conn; resultException = error; notify(); } } } /** * Get the connection result from this (assumed running) thread. If the timeout is reached * without a result being available, a SQLException is thrown. * * @param timeout timeout in milliseconds * @return the new connection, if successful * @throws SQLException if a connection error occurs or the timeout is reached */ public Connection getResult(long timeout) throws SQLException { long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout; synchronized (this) { while (true) { if (result != null) { return result; } if (resultException != null) { if (resultException instanceof SQLException) { resultException.fillInStackTrace(); throw (SQLException) resultException; } else { throw new RedshiftException( GT.tr( "Something unusual has occurred to cause the driver to fail. Please report this exception."), RedshiftState.UNEXPECTED_ERROR, resultException); } } long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); if (delay <= 0) { abandoned = true; throw new RedshiftException(GT.tr("Connection attempt timed out."), RedshiftState.CONNECTION_UNABLE_TO_CONNECT); } try { wait(delay); } catch (InterruptedException ie) { // reset the interrupt flag Thread.currentThread().interrupt(); abandoned = true; // throw an unchecked exception which will hopefully not be ignored by the calling code throw new RuntimeException(GT.tr("Interrupted while attempting to connect.")); } } } } private final String url; private final RedshiftProperties props; private Connection result; private Throwable resultException; private boolean abandoned; private RedshiftLogger connLogger; } /** * Create a connection from URL and properties. Always does the connection work in the current * thread without enforcing a timeout, regardless of any timeout specified in the properties. * * @param url the original URL * @param props the parsed/defaulted connection properties * @return a new connection * @throws SQLException if the connection could not be made */ private static Connection makeConnection(String url, RedshiftProperties props, RedshiftLogger logger) throws SQLException { return new RedshiftConnectionImpl(hostSpecs(props), user(props), database(props), props, url, logger); } /** * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers * will return true if they understand the subprotocol specified in the URL and false if they * don't. Our protocols start with jdbc:redshift: * * @param url the URL of the driver * @return true if this driver accepts the given URL * @see java.sql.Driver#acceptsURL */ @Override public boolean acceptsURL(String url) throws RedshiftException { return parseURL(url, null) != null; } /** * <p>The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties * it should prompt a human for in order to get enough information to connect to a database.</p> * * <p>Note that depending on the values the human has supplied so far, additional values may become * necessary, so it may be necessary to iterate through several calls to getPropertyInfo</p> * * @param url the Url of the database to connect to * @param info a proposed list of tag/value pairs that will be sent on connect open. * @return An array of DriverPropertyInfo objects describing possible properties. This array may * be an empty array if no properties are required * @see java.sql.Driver#getPropertyInfo */ @Override public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws RedshiftException { RedshiftProperties copy = new RedshiftProperties(info); RedshiftProperties parse = parseURL(url, copy); if (parse != null) { copy = parse; } RedshiftProperty[] knownProperties = RedshiftProperty.values(); DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length]; for (int i = 0; i < props.length; ++i) { props[i] = knownProperties[i].toDriverPropertyInfo(copy); } return props; } @Override public int getMajorVersion() { return com.amazon.redshift.util.DriverInfo.MAJOR_VERSION; } @Override public int getMinorVersion() { return com.amazon.redshift.util.DriverInfo.MINOR_VERSION; } /** * Returns the server version series of this driver and the specific build number. * * @return JDBC driver version * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead */ @Deprecated public static String getVersion() { return DriverInfo.DRIVER_FULL_NAME; } /** * <p>Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true" * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.</p> * * <p>For Redshift, this is not yet possible, as we are not SQL92 compliant (yet).</p> */ @Override public boolean jdbcCompliant() { return false; } /** * Constructs a new DriverURL, splitting the specified URL into its component parts. * URL formats: * 1. jdbc:redshift://endpoint:port/database * 2. jdbc:redshift:iam://endpoint:port/database * 3. jdbc:redshift:iam://cluster-id:region/database * * if endpoint contains "redshift-serverless" then it's * serverless end point. * * @param url JDBC URL to parse * @param defaults Default properties * @return Properties with elements added from the url */ public static RedshiftProperties parseURL(String url, RedshiftProperties defaults) throws RedshiftException { RedshiftProperties urlProps = defaults == null ? new RedshiftProperties() : defaults; String urlServer = url; String urlArgs = ""; boolean iamAuth = false; int qPos = url.indexOf('?'); if (qPos != -1) { urlServer = url.substring(0, qPos); urlArgs = url.substring(qPos + 1); } else { qPos = url.indexOf(';'); if (qPos != -1) { urlServer = url.substring(0, qPos); urlArgs = url.substring(qPos + 1); } } if (!urlServer.startsWith(URL_PREFIX)) { return null; } urlServer = urlServer.substring(URL_PREFIX.length()); if (urlServer.startsWith("iam:")) { String subname = urlServer; // Parse the IAM URL Matcher matcher = URL_PATTERN.matcher(subname); if (!matcher.matches()) { // Host is a required value. return null; } iamAuth = matcher.group(1) != null; // This must be true String host = matcher.group(2); String port = matcher.group(4); String schema = matcher.group(6); String queryString = matcher.group(8); urlProps.setProperty(RedshiftProperty.IAM_AUTH.getName(), String.valueOf(iamAuth)); if (null != port && !port.matches("\\d*")) { // This is new cluster_id:region type url. urlProps.setProperty(RedshiftProperty.CLUSTER_IDENTIFIER.getName(), host); urlProps.setProperty(RedshiftProperty.AWS_REGION.getName(), port); } else { urlProps.setProperty(RedshiftProperty.HOST.getName(), host); if (null == port || port.isEmpty()) { port = DEFAULT_PORT; } urlProps.setProperty(RedshiftProperty.PORT.getName(), port); urlProps = parseHostName(urlProps, host); } if (null != schema) { urlProps.setProperty(RedshiftProperty.DBNAME.getName(), URLCoder.decode(schema)); } if (queryString != null) urlArgs = queryString; } // IAM else { urlProps.setProperty(RedshiftProperty.IAM_AUTH.getName(), String.valueOf(iamAuth)); if (urlServer.startsWith("//")) { urlServer = urlServer.substring(2); int slash = urlServer.indexOf('/'); if (slash == -1) { return null; } urlProps.setProperty(RedshiftProperty.DBNAME.getName(), URLCoder.decode(urlServer.substring(slash + 1))); String[] addresses = urlServer.substring(0, slash).split(","); StringBuilder hosts = new StringBuilder(); StringBuilder ports = new StringBuilder(); for (String address : addresses) { int portIdx = address.lastIndexOf(':'); if (portIdx != -1 && address.lastIndexOf(']') < portIdx) { String portStr = address.substring(portIdx + 1); try { int port = Integer.parseInt(portStr); if (port < 1 || port > 65535) { return null; } } catch (NumberFormatException ignore) { return null; } ports.append(portStr); hosts.append(address.subSequence(0, portIdx)); } else { ports.append(DEFAULT_PORT); hosts.append(address); } ports.append(','); hosts.append(','); } ports.setLength(ports.length() - 1); hosts.setLength(hosts.length() - 1); urlProps.setProperty(RedshiftProperty.PORT.getName(), ports.toString()); urlProps.setProperty(RedshiftProperty.HOST.getName(), hosts.toString()); } else { /* if there are no defaults set or any one of PORT, HOST, DBNAME not set then set it to default */ if (defaults == null || !defaults.containsKey(RedshiftProperty.PORT.getName())) { urlProps.setProperty(RedshiftProperty.PORT.getName(), DEFAULT_PORT); } if (defaults == null || !defaults.containsKey(RedshiftProperty.HOST.getName())) { urlProps.setProperty(RedshiftProperty.HOST.getName(), "localhost"); } if (defaults == null || !defaults.containsKey(RedshiftProperty.DBNAME.getName())) { urlProps.setProperty(RedshiftProperty.DBNAME.getName(), URLCoder.decode(urlServer)); } } } // parse the args part of the url String[] args = urlArgs.split("[;&]"); for (String token : args) { if (token.isEmpty()) { continue; } int pos = token.indexOf('='); if (pos == -1) { urlProps.setProperty(token, ""); } else { urlProps.setProperty(token.substring(0, pos), URLCoder.decode(token.substring(pos + 1))); } } urlProps = detectRegionAutomatically(urlProps); return urlProps; } /** * Parses the hostname to get connection properties * @param urlProps the redshift properties collection * @param host the hostname to parse. * @return the filled in redshift properties collection */ public static RedshiftProperties parseHostName(RedshiftProperties urlProps, String host) { // Try first provision cluster endpoint host format. // Trying to infer clusterID and region, // ClusterID and region can be overridden by parameters. Matcher m = HOST_PATTERN.matcher(host); if (m.matches()) { urlProps.setProperty(RedshiftProperty.CLUSTER_IDENTIFIER.getName(), m.group(1)); urlProps.setProperty(RedshiftProperty.AWS_REGION.getName(), m.group(3)); } else { Matcher m2; // Try serverless endpoint host format with WorkGroup m2 = SERVERLESS_WORKGROUP_HOST_PATTERN.matcher(host); if (m2.matches()) { String awsRegion = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AWS_REGION.getName(), urlProps); String workGroup = m2.group(1); String acctId = m2.group(2); String region = m2.group(3); // urlProps.setProperty(RedshiftProperty.CLUSTER_IDENTIFIER.getName(), m.group(1)); if(awsRegion == null || awsRegion.length() == 0) urlProps.setProperty(RedshiftProperty.AWS_REGION.getName(), region); //urlProps.setProperty(RedshiftProperty.IS_SERVERLESS.getName(),"true"); urlProps.setProperty(RedshiftProperty.SERVERLESS_ACCT_ID.getName(),acctId); urlProps.setProperty(RedshiftProperty.SERVERLESS_WORK_GROUP.getName(),workGroup); } // with workgroup } return urlProps; } /** * Determines the region automatically if not provided by user as part of jdbc url or additional properties * We do not need to do this for non-IAM url as driver does not use the region parameter for non-IAM endpoints * @param urlProps the redshift properties collection * @return the redshift properties collection with region if it was missing earlier and connection is iam auth */ private static RedshiftProperties detectRegionAutomatically(RedshiftProperties urlProps) { if(null == urlProps.getProperty(RedshiftProperty.AWS_REGION.getName()) && urlProps.getProperty(RedshiftProperty.IAM_AUTH.getName()).equalsIgnoreCase("true")) { //fetch region using jndi-dns from cname endpoint try { String cnameHost = urlProps.getProperty(RedshiftProperty.HOST.getName()); Properties env = new Properties(); env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); InitialDirContext idc = new InitialDirContext(env); Attributes attrs = idc.getAttributes(cnameHost); Attribute attr = attrs.get("CNAME"); String fqdn = attr.get().toString(); urlProps = parseHostName(urlProps, fqdn); } catch (Exception ex) { if(RedshiftLogger.isEnable()) { logger.logInfo("No CNAME detected for URL"); } } } return urlProps; } /** * * @param props the connection properties. * @return the address portion of the URL */ public static HostSpec[] hostSpecs(Properties props) { HostSpec[] hostSpecs = null; String hostProp = props.getProperty(RedshiftProperty.HOST.getName()); String portProp = props.getProperty(RedshiftProperty.PORT.getName()); if (hostProp != null && portProp != null) { String[] hosts = hostProp.split(","); String[] ports = portProp.split(","); hostSpecs = new HostSpec[hosts.length]; for (int i = 0; i < hostSpecs.length; ++i) { hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i])); } } return hostSpecs; } /** * @return the username of the URL */ private static String user(Properties props) { String user = props.getProperty(RedshiftProperty.USER.getName()); if(user == null) user = props.getProperty(RedshiftProperty.UID.getName(), ""); return user; } /** * @return the database name of the URL */ private static String database(Properties props) { return props.getProperty(RedshiftProperty.DBNAME.getName(), ""); } /** * @return the timeout from the URL, in milliseconds */ private static long timeout(Properties props) { String timeout = RedshiftProperty.LOGIN_TIMEOUT.get(props); if (timeout != null) { try { return (long) (Float.parseFloat(timeout) * 1000); } catch (NumberFormatException e) { // Ignore the error. } } return (long) DriverManager.getLoginTimeout() * 1000; } /** * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help * keep the overall driver size down. It now requires the call Class and the function name to help * when the driver is used with closed software that don't report the stack strace * * @param callClass the call Class * @param functionName the name of the unimplemented function with the type of its arguments * @return PSQLException with a localized message giving the complete description of the * unimplemeted function */ public static SQLFeatureNotSupportedException notImplemented(Class<?> callClass, String functionName) { return new SQLFeatureNotSupportedException( GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName), RedshiftState.NOT_IMPLEMENTED.getState()); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1" @Override public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException { // java.util.logging.logger is not used in Redshift JDBC throw new SQLFeatureNotSupportedException ("java.util.logging is not used"); } //JCP! endif public static SharedTimer getSharedTimer() { return sharedTimer; } /** * Register the driver against {@link DriverManager}. This is done automatically when the class is * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()} * method. * * @throws IllegalStateException if the driver is already registered * @throws SQLException if registering the driver fails */ public static void register() throws SQLException { if (isRegistered()) { throw new IllegalStateException( "Driver is already registered. It can only be registered once."); } Driver registeredDriver = new Driver(); DriverManager.registerDriver(registeredDriver); Driver.registeredDriver = registeredDriver; } /** * According to JDBC specification, this driver is registered against {@link DriverManager} when * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the * class can be gc'ed if necessary. * * @throws IllegalStateException if the driver is not registered * @throws SQLException if deregistering the driver fails */ public static void deregister() throws SQLException { if (!isRegistered()) { throw new IllegalStateException( "Driver is not registered (or it has not been registered using Driver.register() method)"); } DriverManager.deregisterDriver(registeredDriver); registeredDriver = null; } /** * @return {@code true} if the driver is registered against {@link DriverManager} */ public static boolean isRegistered() { return registeredDriver != null; } /** * Get JDBC INI file, if any exist. * * Default file name is rsjdbc.ini. * * The file location is in the following search order in the driver: * * 1. IniFile as connection parameter either in URL or in connection property. IniFile must be full path including file name * 2. The environment variable such as AMAZON_REDSHIFT_JDBC_INI_FILE with full path with any name of the file user wants * 3. The directory from where the driver jar get loaded * 4. The user home directory * 5. The temp directory of the system * * @param props * @return file name if exist otherwise null. * @throws RedshiftException */ private String getJdbcIniFile(Properties props) throws RedshiftException { // 1. Get file name from URL/property String fileName = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.INI_FILE.getName(), props);; if (!isFileExist(fileName, true)) { // 2. Get file name from environment variable fileName = System.getenv("AMAZON_REDSHIFT_JDBC_INI_FILE"); if (!isFileExist(fileName, true)) { // 3. Get Driver Jar file location String filePath = null; if((Driver.class.getProtectionDomain().getCodeSource() != null) && (Driver.class.getProtectionDomain().getCodeSource().getLocation() != null)) filePath = Driver.class.getProtectionDomain().getCodeSource().getLocation().getPath(); if(filePath != null) filePath = filePath.substring(0,filePath.lastIndexOf("/") ); fileName = getIniFullFileName(filePath); if(!isFileExist(fileName, false)) { // 4. Get USER directory filePath = System.getProperty("user.home"); fileName = getIniFullFileName(filePath); if(!isFileExist(fileName, false)) { // 5. Get temp directory filePath = System.getProperty("java.io.tmpdir"); fileName = getIniFullFileName(filePath); if(!isFileExist(fileName, false)) fileName = null; } } } } return fileName; } private String getIniFullFileName(String filePath) { String fileName = null; if(filePath != null && filePath.length() > 0) { fileName = filePath + File.separator + DEFAULT_INI_FILE; } return fileName; } /** * * @param fileName full file name including path * @param fileMustExist * @return * @throws RedshiftException */ private boolean isFileExist(String fileName, boolean fileMustExist) throws RedshiftException { boolean fileExist = false; if (fileName != null && fileName.length() > 0) { File file = new File(fileName); if(!file.exists()) { if (fileMustExist) { throw new RedshiftException( GT.tr("JDBC INI file doesn't exist: ") + fileName, RedshiftState.UNEXPECTED_ERROR); } } else fileExist = true; } return fileExist; } /** * Read JDBC INI file and load properties. * * The driver loads connection properties as follows: * 1. Load default properties values as in the code * 2. Load [DRIVER] section properties from the INI file, if exist * 3. Load custom section properties from the INI file, if *IniSection* provided in the connection * 4. Load properties from connection property object given in the getConnection() call. * 5. Load properties from URL * * @param fileName * @param props * @return * @throws RedshiftException */ private RedshiftProperties readJdbcIniFile(String fileName, RedshiftProperties props) throws RedshiftException { String connectionSectionName = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.INI_SECTION.getName(), props);; String driverSectionName = DEFAULT_DRIVER_SECTION; try { IniFile iniFile = new IniFile(fileName); Map<String, String> kv; Properties driverSectionProps = null; Properties connectionSectionProps = null; // Load properties from DRIVER section kv = iniFile.getAllKeyVals(driverSectionName); if (kv != null) { driverSectionProps = new Properties(); driverSectionProps.putAll(kv); } // Load properties from IniSection provided by user if (connectionSectionName != null) { kv = iniFile.getAllKeyVals(connectionSectionName); if(kv != null) { connectionSectionProps = new Properties(); connectionSectionProps.putAll(kv); } else { throw new RedshiftException( GT.tr("User specified section " + connectionSectionName + " not found in the JDBC INI file " + fileName), RedshiftState.UNEXPECTED_ERROR); } } if (driverSectionProps != null || connectionSectionProps != null) { // Get default properties from original props RedshiftProperties iniProps = new RedshiftProperties(props); // Add driver section props if (driverSectionProps != null) { iniProps.putAll(driverSectionProps); } // Add IniSection props if (connectionSectionProps != null) { iniProps.putAll(connectionSectionProps); } // URL and user connection props override INI pros iniProps.putAll(props); props = iniProps; } } catch (IOException e) { throw new RedshiftException( GT.tr("Error loading JDBC INI file: ") + fileName, RedshiftState.UNEXPECTED_ERROR, e); } return props; } }
8,293
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftStatement.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; import java.sql.SQLException; /** * This interface defines the public Redshift extensions to java.sql.Statement. All Statements * constructed by the Redshift driver implement RedshiftStatement. */ public interface RedshiftStatement { // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date // because this would break the 'normalization contract' of the // java.sql.Date API. // The follow values are the nearest MAX/MIN values with hour, // minute, second, millisecond set to 0 - this is used for // -infinity / infinity representation in Java long DATE_POSITIVE_INFINITY = 9223372036825200000L; long DATE_NEGATIVE_INFINITY = -9223372036832400000L; long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L; long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L; /** * Returns the Last inserted/updated oid. * * @return OID of last insert * @throws SQLException if something goes wrong * @since 7.3 */ long getLastOID() throws SQLException; /** * Turn on the use of prepared statements in the server (server side prepared statements are * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to * <code>setPrepareThreshold(1)</code>. * * @param flag use server prepare * @throws SQLException if something goes wrong * @since 7.3 * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)} */ @Deprecated void setUseServerPrepare(boolean flag) throws SQLException; /** * Checks if this statement will be executed as a server-prepared statement. A return value of * <code>true</code> indicates that the next execution of the statement will be done as a * server-prepared statement, assuming the underlying protocol supports it. * * @return true if the next reuse of this statement will use a server-prepared statement */ boolean isUseServerPrepare(); /** * <p>Sets the reuse threshold for using server-prepared statements.</p> * * <p>If <code>threshold</code> is a non-zero value N, the Nth and subsequent reuses of a * PreparedStatement will use server-side prepare.</p> * * <p>If <code>threshold</code> is zero, server-side prepare will not be used.</p> * * <p>The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is * ignored for plain Statements.</p> * * @param threshold the new threshold for this statement * @throws SQLException if an exception occurs while changing the threshold * @since build 302 */ void setPrepareThreshold(int threshold) throws SQLException; /** * Gets the server-side prepare reuse threshold in use for this statement. * * @return the current threshold * @see #setPrepareThreshold(int) * @since build 302 */ int getPrepareThreshold(); }
8,294
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/CredentialsHolder.java
package com.amazon.redshift; import java.util.Date; import com.amazon.redshift.plugin.utils.RequestUtils; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSSessionCredentials; public class CredentialsHolder implements AWSCredentials { protected AWSCredentials m_credentials; private Date m_expiration; private IamMetadata m_metadata; private boolean refresh; // true means newly added, false means from cache. protected CredentialsHolder(AWSCredentials credentials) { this(credentials, new Date(System.currentTimeMillis() + 15 * 60 * 1000)); } protected CredentialsHolder(AWSCredentials credentials, Date expiration) { this.m_credentials = credentials; if (credentials instanceof CredentialsHolder) { CredentialsHolder h = (CredentialsHolder) credentials; this.m_metadata = h.getMetadata(); this.m_expiration = h.getExpiration(); } else { this.m_expiration = expiration; } } public static CredentialsHolder newInstance(AWSCredentials credentials) { if (credentials instanceof AWSSessionCredentials) { return new SessionCredentialsHolder(credentials); } return new CredentialsHolder(credentials); } public static CredentialsHolder newInstance(AWSCredentials credentials, Date expiration) { if (credentials instanceof AWSSessionCredentials) { return new SessionCredentialsHolder(credentials, expiration); } return new CredentialsHolder(credentials, expiration); } /** * @return The AWS Access Key ID for this credentials object. */ @Override public String getAWSAccessKeyId() { return m_credentials.getAWSAccessKeyId(); } @Override public String getAWSSecretKey() { return m_credentials.getAWSSecretKey(); } public boolean isExpired() { return RequestUtils.isCredentialExpired(m_expiration); } public Date getExpiration() { return m_expiration; } public IamMetadata getMetadata() { if (m_metadata == null) { return null; } return m_metadata.clone(); } public IamMetadata getThisMetadata() { if (m_metadata == null) { return null; } return m_metadata; } public void setRefresh(boolean flag) { refresh = flag; } public boolean isRefresh() { return refresh; } public void setMetadata(IamMetadata metadata) { this.m_metadata = metadata; } private static final class SessionCredentialsHolder extends CredentialsHolder implements AWSSessionCredentials { protected SessionCredentialsHolder(AWSCredentials credentials) { super(credentials); } protected SessionCredentialsHolder(AWSCredentials credentials, Date expiration) { super(credentials, expiration); } @Override public String getSessionToken() { return ((AWSSessionCredentials) m_credentials).getSessionToken(); } } public static final class IamMetadata implements Cloneable { private Boolean autoCreate; /** * Connection string setting. */ private String dbUser; /** * Value from SAML assertion. */ private String samlDbUser; /** * Connection profile setting. */ private String profileDbUser; private String dbGroups; /** * Property set by the datasource. We extract its value from SAML response. * If it's true, the dbUser in SAML response overwrites the dbUser passed in connection * string. */ private boolean allowDbUserOverride = false; /** * Forces the passed in dbGroups setting to be lower case. */ private boolean forceLowercase = false; public Boolean getAutoCreate() { return autoCreate; } public String getDbUser() { return dbUser; } public void setDbUser(String dbUser) { this.dbUser = dbUser; } public void setAutoCreate(Boolean autoCreate) { this.autoCreate = autoCreate; } public String getSamlDbUser() { return samlDbUser; } public void setSamlDbUser(String dbUser) { this.samlDbUser = dbUser; } public String getProfileDbUser() { return profileDbUser; } public void setProfileDbUser(String dbUser) { this.profileDbUser = dbUser; } public String getDbGroups() { return dbGroups; } public void setDbGroups(String dbGroups) { this.dbGroups = dbGroups; } public boolean getForceLowercase() { return forceLowercase; } public void setForceLowercase(boolean forceLowercase) { this.forceLowercase = forceLowercase; } public boolean getAllowDbUserOverride() { return allowDbUserOverride; } public void setAllowDbUserOverride(boolean allowDbUserOverride) { this.allowDbUserOverride = allowDbUserOverride; } @Override public IamMetadata clone() { try { return (IamMetadata) super.clone(); } catch (CloneNotSupportedException e) { return null; } } } }
8,295
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftResultSetMetaData.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; import com.amazon.redshift.core.Field; import java.sql.SQLException; public interface RedshiftResultSetMetaData { /** * Returns the underlying column name of a query result, or "" if it is unable to be determined. * * @param column column position (1-based) * @return underlying column name of a query result * @throws SQLException if something wrong happens * @since 8.0 */ String getBaseColumnName(int column) throws SQLException; /** * Returns the underlying table name of query result, or "" if it is unable to be determined. * * @param column column position (1-based) * @return underlying table name of query result * @throws SQLException if something wrong happens * @since 8.0 */ String getBaseTableName(int column) throws SQLException; /** * Returns the underlying schema name of query result, or "" if it is unable to be determined. * * @param column column position (1-based) * @return underlying schema name of query result * @throws SQLException if something wrong happens * @since 8.0 */ String getBaseSchemaName(int column) throws SQLException; /** * Is a column Text or Binary? * * @param column column position (1-based) * @return 0 if column data foramt is TEXT, or 1 if BINARY * @throws SQLException if something wrong happens * @see Field#BINARY_FORMAT * @see Field#TEXT_FORMAT * @since 9.4 */ int getFormat(int column) throws SQLException; }
8,296
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/INativePlugin.java
package com.amazon.redshift; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.RedshiftException; public interface INativePlugin { void addParameter(String key, String value); void setLogger(RedshiftLogger log); String getPluginSpecificCacheKey(); String getIdpToken() throws RedshiftException; String getCacheKey(); NativeTokenHolder getCredentials() throws RedshiftException; void refresh() throws RedshiftException; }
8,297
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/RedshiftRefCursorResultSet.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift; /** * A ref cursor based result set. * * @deprecated As of 8.0, this interface is only present for backwards- compatibility purposes. New * code should call getString() on the ResultSet that contains the refcursor to obtain * the underlying cursor name. */ @Deprecated public interface RedshiftRefCursorResultSet { /** * @return the name of the cursor. * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet * was obtained from. */ @Deprecated String getRefCursor(); }
8,298
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/IPlugin.java
package com.amazon.redshift; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.auth.AWSCredentialsProvider; public interface IPlugin extends AWSCredentialsProvider { void addParameter(String key, String value); void setLogger(RedshiftLogger log); String getPluginSpecificCacheKey(); void setGroupFederation(boolean groupFederation); String getIdpToken(); String getCacheKey(); int getSubType(); }
8,299