repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/test/java/com/netflix/conductor/redis/config/utils/RedisQueuesShardingStrategyProviderTest.java | redis-persistence/src/test/java/com/netflix/conductor/redis/config/utils/RedisQueuesShardingStrategyProviderTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config.utils;
import java.util.Collections;
import org.junit.Test;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RedisQueuesShardingStrategyProviderTest {
@Test
public void testStrategy() {
ShardSupplier shardSupplier = mock(ShardSupplier.class);
doReturn("current").when(shardSupplier).getCurrentShard();
RedisQueuesShardingStrategyProvider.LocalOnlyStrategy strat =
new RedisQueuesShardingStrategyProvider.LocalOnlyStrategy(shardSupplier);
assertEquals("current", strat.getNextShard(Collections.emptyList(), new Message("a", "b")));
}
@Test
public void testProvider() {
ShardSupplier shardSupplier = mock(ShardSupplier.class);
RedisProperties properties = mock(RedisProperties.class);
when(properties.getQueueShardingStrategy()).thenReturn("localOnly");
RedisQueuesShardingStrategyProvider stratProvider =
new RedisQueuesShardingStrategyProvider(shardSupplier, properties);
assertTrue(
stratProvider.get()
instanceof RedisQueuesShardingStrategyProvider.LocalOnlyStrategy);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisPollDataDAO extends BaseDynoDAO implements PollDataDAO {
private static final String POLL_DATA = "POLL_DATA";
public RedisPollDataDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
String key = nsKey(POLL_DATA, pollData.getQueueName());
String field = (domain == null) ? "DEFAULT" : domain;
String payload = toJson(pollData);
recordRedisDaoRequests("updatePollData");
recordRedisDaoPayloadSize("updatePollData", payload.length(), "n/a", "n/a");
jedisProxy.hset(key, field, payload);
}
@Override
public PollData getPollData(String taskDefName, String domain) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String key = nsKey(POLL_DATA, taskDefName);
String field = (domain == null) ? "DEFAULT" : domain;
String pollDataJsonString = jedisProxy.hget(key, field);
recordRedisDaoRequests("getPollData");
recordRedisDaoPayloadSize(
"getPollData", StringUtils.length(pollDataJsonString), "n/a", "n/a");
PollData pollData = null;
if (StringUtils.isNotBlank(pollDataJsonString)) {
pollData = readValue(pollDataJsonString, PollData.class);
}
return pollData;
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String key = nsKey(POLL_DATA, taskDefName);
Map<String, String> pMapdata = jedisProxy.hgetAll(key);
List<PollData> pollData = new ArrayList<>();
if (pMapdata != null) {
pMapdata.values()
.forEach(
pollDataJsonString -> {
pollData.add(readValue(pollDataJsonString, PollData.class));
recordRedisDaoRequests("getPollData");
recordRedisDaoPayloadSize(
"getPollData", pollDataJsonString.length(), "n/a", "n/a");
});
}
return pollData;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisExecutionDAO extends BaseDynoDAO
implements ExecutionDAO, ConcurrentExecutionLimitDAO {
public static final Logger LOGGER = LoggerFactory.getLogger(RedisExecutionDAO.class);
// Keys Families
private static final String TASK_LIMIT_BUCKET = "TASK_LIMIT_BUCKET";
private static final String IN_PROGRESS_TASKS = "IN_PROGRESS_TASKS";
private static final String TASKS_IN_PROGRESS_STATUS =
"TASKS_IN_PROGRESS_STATUS"; // Tasks which are in IN_PROGRESS status.
private static final String WORKFLOW_TO_TASKS = "WORKFLOW_TO_TASKS";
private static final String SCHEDULED_TASKS = "SCHEDULED_TASKS";
private static final String TASK = "TASK";
private static final String WORKFLOW = "WORKFLOW";
private static final String PENDING_WORKFLOWS = "PENDING_WORKFLOWS";
private static final String WORKFLOW_DEF_TO_WORKFLOWS = "WORKFLOW_DEF_TO_WORKFLOWS";
private static final String CORR_ID_TO_WORKFLOWS = "CORR_ID_TO_WORKFLOWS";
private static final String EVENT_EXECUTION = "EVENT_EXECUTION";
private final int ttlEventExecutionSeconds;
public RedisExecutionDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
ttlEventExecutionSeconds = (int) properties.getEventExecutionPersistenceTTL().getSeconds();
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
private static List<String> dateStrBetweenDates(Long startdatems, Long enddatems) {
List<String> dates = new ArrayList<>();
Calendar calendar = new GregorianCalendar();
Date startdate = new Date(startdatems);
Date enddate = new Date(enddatems);
calendar.setTime(startdate);
while (calendar.getTime().before(enddate) || calendar.getTime().equals(enddate)) {
Date result = calendar.getTime();
dates.add(dateStr(result));
calendar.add(Calendar.DATE, 1);
}
return dates;
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskName, String workflowId) {
List<TaskModel> tasks = new LinkedList<>();
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskName);
pendingTasks.forEach(
pendingTask -> {
if (pendingTask.getWorkflowInstanceId().equals(workflowId)) {
tasks.add(pendingTask);
}
});
return tasks;
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new LinkedList<>();
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int foundcount = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && foundcount < count) {
tasks.add(pendingTask);
foundcount++;
}
}
return tasks;
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> tasksCreated = new LinkedList<>();
for (TaskModel task : tasks) {
validate(task);
recordRedisDaoRequests("createTask", task.getTaskType(), task.getWorkflowType());
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
Long added =
jedisProxy.hset(
nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()),
taskKey,
task.getTaskId());
if (added < 1) {
LOGGER.debug(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
continue;
}
if (task.getStatus() != null
&& !task.getStatus().isTerminal()
&& task.getScheduledTime() == 0) {
task.setScheduledTime(System.currentTimeMillis());
}
correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.debug(
"Scheduled task added to WORKFLOW_TO_TASKS workflowId: {}, taskId: {}, taskType: {} during createTasks",
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
String inProgressTaskKey = nsKey(IN_PROGRESS_TASKS, task.getTaskDefName());
jedisProxy.sadd(inProgressTaskKey, task.getTaskId());
LOGGER.debug(
"Scheduled task added to IN_PROGRESS_TASKS with inProgressTaskKey: {}, workflowId: {}, taskId: {}, taskType: {} during createTasks",
inProgressTaskKey,
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
updateTask(task);
tasksCreated.add(task);
}
return tasksCreated;
}
@Override
public void updateTask(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
if (task.getStatus() != null && task.getStatus().equals(TaskModel.Status.IN_PROGRESS)) {
jedisProxy.sadd(
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task added to TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
} else {
jedisProxy.srem(
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
String key = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName());
jedisProxy.zrem(key, task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASK_LIMIT_BUCKET with taskLimitBucketKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
key,
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
}
}
String payload = toJson(task);
recordRedisDaoPayloadSize(
"updateTask",
payload.length(),
taskDefinition.map(TaskDef::getName).orElse("n/a"),
task.getWorkflowType());
recordRedisDaoRequests("updateTask", task.getTaskType(), task.getWorkflowType());
jedisProxy.set(nsKey(TASK, task.getTaskId()), payload);
LOGGER.debug(
"Workflow task payload saved to TASK with taskKey: {}, workflowId: {}, taskId: {}, taskType: {} during updateTask",
nsKey(TASK, task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
if (task.getStatus() != null && task.getStatus().isTerminal()) {
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
}
Set<String> taskIds =
jedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId()));
if (!taskIds.contains(task.getTaskId())) {
correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId());
}
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
int limit = taskDefinition.get().concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
LOGGER.info(
"Task execution count limited. task - {}:{}, limit: {}, current: {}",
task.getTaskId(),
task.getTaskDefName(),
limit,
current);
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
String rateLimitKey = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName());
double score = System.currentTimeMillis();
String taskId = task.getTaskId();
jedisProxy.zaddnx(rateLimitKey, score, taskId);
recordRedisDaoRequests("checkTaskRateLimiting", task.getTaskType(), task.getWorkflowType());
Set<String> ids = jedisProxy.zrangeByScore(rateLimitKey, 0, score + 1, Integer.MAX_VALUE);
boolean rateLimited = !ids.contains(taskId);
if (rateLimited) {
LOGGER.info(
"Task execution count limited. task - {}:{}, limit: {}, current: {}",
task.getTaskId(),
task.getTaskDefName(),
limit,
current);
String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName());
// Cleanup any items that are still present in the rate limit bucket but not in progress
// anymore!
ids.stream()
.filter(id -> !jedisProxy.sismember(inProgressKey, id))
.forEach(id2 -> jedisProxy.zrem(rateLimitKey, id2));
Monitors.recordTaskRateLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
private void removeTaskMappings(TaskModel task) {
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
jedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey);
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
jedisProxy.srem(nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId()), task.getTaskId());
jedisProxy.srem(nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
jedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId());
}
private void removeTaskMappingsWithExpiry(TaskModel task) {
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
jedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey);
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
jedisProxy.srem(nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
jedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId());
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
LOGGER.warn("No such task found by id {}", taskId);
return false;
}
removeTaskMappings(task);
jedisProxy.del(nsKey(TASK, task.getTaskId()));
recordRedisDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType());
return true;
}
private boolean removeTaskWithExpiry(String taskId, int ttlSeconds) {
TaskModel task = getTask(taskId);
if (task == null) {
LOGGER.warn("No such task found by id {}", taskId);
return false;
}
removeTaskMappingsWithExpiry(task);
jedisProxy.expire(nsKey(TASK, task.getTaskId()), ttlSeconds);
recordRedisDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType());
return true;
}
@Override
public TaskModel getTask(String taskId) {
Preconditions.checkNotNull(taskId, "taskId cannot be null");
return Optional.ofNullable(jedisProxy.get(nsKey(TASK, taskId)))
.map(
json -> {
TaskModel task = readValue(json, TaskModel.class);
recordRedisDaoRequests(
"getTask", task.getTaskType(), task.getWorkflowType());
recordRedisDaoPayloadSize(
"getTask",
toJson(task).length(),
task.getTaskType(),
task.getWorkflowType());
return task;
})
.orElse(null);
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
return taskIds.stream()
.map(taskId -> nsKey(TASK, taskId))
.map(jedisProxy::get)
.filter(Objects::nonNull)
.map(
jsonString -> {
TaskModel task = readValue(jsonString, TaskModel.class);
recordRedisDaoRequests(
"getTask", task.getTaskType(), task.getWorkflowType());
recordRedisDaoPayloadSize(
"getTask",
jsonString.length(),
task.getTaskType(),
task.getWorkflowType());
return task;
})
.collect(Collectors.toList());
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
Preconditions.checkNotNull(workflowId, "workflowId cannot be null");
Set<String> taskIds = jedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, workflowId));
recordRedisDaoRequests("getTasksForWorkflow");
return getTasks(new ArrayList<>(taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
Set<String> taskIds = jedisProxy.smembers(nsKey(IN_PROGRESS_TASKS, taskName));
recordRedisDaoRequests("getPendingTasksForTaskType");
return getTasks(new ArrayList<>(taskIds));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
recordRedisDaoRequests("removeWorkflow");
// Remove from lists
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.srem(key, workflowId);
jedisProxy.srem(nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()), workflowId);
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId);
// Remove the object
jedisProxy.del(nsKey(WORKFLOW, workflowId));
for (TaskModel task : workflow.getTasks()) {
removeTask(task.getTaskId());
}
return true;
}
return false;
}
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
recordRedisDaoRequests("removeWorkflow");
// Remove from lists
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.srem(key, workflowId);
jedisProxy.srem(nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()), workflowId);
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId);
// Remove the object
jedisProxy.expire(nsKey(WORKFLOW, workflowId), ttlSeconds);
for (TaskModel task : workflow.getTasks()) {
removeTaskWithExpiry(task.getTaskId(), ttlSeconds);
}
jedisProxy.expire(nsKey(WORKFLOW_TO_TASKS, workflowId), ttlSeconds);
return true;
}
return false;
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
recordRedisDaoRequests("removePendingWorkflow");
jedisProxy.del(nsKey(SCHEDULED_TASKS, workflowId));
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflowType), workflowId);
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
String json = jedisProxy.get(nsKey(WORKFLOW, workflowId));
WorkflowModel workflow = null;
if (json != null) {
workflow = readValue(json, WorkflowModel.class);
recordRedisDaoRequests("getWorkflow", "n/a", workflow.getWorkflowName());
recordRedisDaoPayloadSize(
"getWorkflow", json.length(), "n/a", workflow.getWorkflowName());
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
List<String> workflowIds;
recordRedisDaoRequests("getRunningWorkflowsByName");
Set<String> pendingWorkflows = jedisProxy.smembers(nsKey(PENDING_WORKFLOWS, workflowName));
workflowIds = new LinkedList<>(pendingWorkflows);
return workflowIds;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
List<String> workflowIds = getRunningWorkflowIds(workflowName, version);
return workflowIds.stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
// Get all date strings between start and end
List<String> dateStrs = dateStrBetweenDates(startTime, endTime);
dateStrs.forEach(
dateStr -> {
String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflowName, dateStr);
jedisProxy
.smembers(key)
.forEach(
workflowId -> {
try {
WorkflowModel workflow = getWorkflow(workflowId);
if (workflow.getCreateTime() >= startTime
&& workflow.getCreateTime() <= endTime) {
workflows.add(workflow);
}
} catch (Exception e) {
LOGGER.error(
"Failed to get workflow: {}", workflowId, e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
throw new UnsupportedOperationException(
"This method is not implemented in RedisExecutionDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public boolean canSearchAcrossWorkflows() {
return false;
}
/**
* Inserts a new workflow/ updates an existing workflow in the datastore. Additionally, if a
* workflow is in terminal state, it is removed from the set of pending workflows.
*
* @param workflow the workflow instance
* @param update flag to identify if update or create operation
* @return the workflowId
*/
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(new LinkedList<>());
String payload = toJson(workflow);
// Store the workflow object
jedisProxy.set(nsKey(WORKFLOW, workflow.getWorkflowId()), payload);
recordRedisDaoRequests("storeWorkflow", "n/a", workflow.getWorkflowName());
recordRedisDaoPayloadSize(
"storeWorkflow", payload.length(), "n/a", workflow.getWorkflowName());
if (!update) {
// Add to list of workflows for a workflowdef
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.sadd(key, workflow.getWorkflowId());
if (workflow.getCorrelationId() != null) {
// Add to list of workflows for a correlationId
jedisProxy.sadd(
nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()),
workflow.getWorkflowId());
}
}
// Add or remove from the pending workflows
if (workflow.getStatus().isTerminal()) {
jedisProxy.srem(
nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());
} else {
jedisProxy.sadd(
nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());
}
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
/**
* Stores the correlation of a task to the workflow instance in the datastore
*
* @param taskId the taskId to be correlated
* @param workflowInstanceId the workflowId to which the tasks belongs to
*/
@VisibleForTesting
void correlateTaskToWorkflowInDS(String taskId, String workflowInstanceId) {
String workflowToTaskKey = nsKey(WORKFLOW_TO_TASKS, workflowInstanceId);
jedisProxy.sadd(workflowToTaskKey, taskId);
LOGGER.debug(
"Task mapped in WORKFLOW_TO_TASKS with workflowToTaskKey: {}, workflowId: {}, taskId: {}",
workflowToTaskKey,
workflowInstanceId,
taskId);
}
public long getPendingWorkflowCount(String workflowName) {
String key = nsKey(PENDING_WORKFLOWS, workflowName);
recordRedisDaoRequests("getPendingWorkflowCount");
return jedisProxy.scard(key);
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, taskDefName);
recordRedisDaoRequests("getInProgressTaskCount");
return jedisProxy.scard(inProgressKey);
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
String json = objectMapper.writeValueAsString(eventExecution);
recordRedisDaoEventRequests("addEventExecution", eventExecution.getEvent());
recordRedisDaoPayloadSize(
"addEventExecution", json.length(), eventExecution.getEvent(), "n/a");
boolean added = jedisProxy.hsetnx(key, eventExecution.getId(), json) == 1L;
if (ttlEventExecutionSeconds > 0) {
jedisProxy.expire(key, ttlEventExecutionSeconds);
}
return added;
} catch (Exception e) {
throw new TransientException(
"Unable to add event execution for " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
String json = objectMapper.writeValueAsString(eventExecution);
LOGGER.info("updating event execution {}", key);
jedisProxy.hset(key, eventExecution.getId(), json);
recordRedisDaoEventRequests("updateEventExecution", eventExecution.getEvent());
recordRedisDaoPayloadSize(
"updateEventExecution", json.length(), eventExecution.getEvent(), "n/a");
} catch (Exception e) {
throw new TransientException(
"Unable to update event execution for " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
LOGGER.info("removing event execution {}", key);
jedisProxy.hdel(key, eventExecution.getId());
recordRedisDaoEventRequests("removeEventExecution", eventExecution.getEvent());
} catch (Exception e) {
throw new TransientException(
"Unable to remove event execution for " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
String key = nsKey(EVENT_EXECUTION, eventHandlerName, eventName, messageId);
LOGGER.info("getting event execution {}", key);
List<EventExecution> executions = new LinkedList<>();
for (int i = 0; i < max; i++) {
String field = messageId + "_" + i;
String value = jedisProxy.hget(key, field);
if (value == null) {
break;
}
recordRedisDaoEventRequests("getEventExecution", eventHandlerName);
recordRedisDaoPayloadSize(
"getEventExecution", value.length(), eventHandlerName, "n/a");
EventExecution eventExecution = objectMapper.readValue(value, EventExecution.class);
executions.add(eventExecution);
}
return executions;
} catch (Exception e) {
throw new TransientException(
"Unable to get event executions for " + eventHandlerName, e);
}
}
private void validate(TaskModel task) {
try {
Preconditions.checkNotNull(task, "task object cannot be null");
Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null");
Preconditions.checkNotNull(
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.Optional;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisRateLimitingDAO extends BaseDynoDAO implements RateLimitingDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisRateLimitingDAO.class);
private static final String TASK_RATE_LIMIT_BUCKET = "TASK_RATE_LIMIT_BUCKET";
public RedisRateLimitingDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
/**
* This method evaluates if the {@link TaskDef} is rate limited or not based on {@link
* TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()}
* if not checks the {@link TaskModel} is rate limited or not based on {@link
* TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()}
*
* <p>The rate limiting is implemented using the Redis constructs of sorted set and TTL of each
* element in the rate limited bucket.
*
* <ul>
* <li>All the entries that are in the not in the frequency bucket are cleaned up by
* leveraging {@link JedisProxy#zremrangeByScore(String, String, String)}, this is done to
* make the next step of evaluation efficient
* <li>A current count(tasks executed within the frequency) is calculated based on the current
* time and the beginning of the rate limit frequency time(which is current time - {@link
* TaskModel#getRateLimitFrequencyInSeconds()} in millis), this is achieved by using
* {@link JedisProxy#zcount(String, double, double)}
* <li>Once the count is calculated then a evaluation is made to determine if it is within the
* bounds of {@link TaskModel#getRateLimitPerFrequency()}, if so the count is increased
* and an expiry TTL is added to the entry
* </ul>
*
* @param task: which needs to be evaluated whether it is rateLimited or not
* @return true: If the {@link TaskModel} is rateLimited false: If the {@link TaskModel} is not
* rateLimited
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
// Check if the TaskDefinition is not null then pick the definition values or else pick from
// the Task
ImmutablePair<Integer, Integer> rateLimitPair =
Optional.ofNullable(taskDef)
.map(
definition ->
new ImmutablePair<>(
definition.getRateLimitPerFrequency(),
definition.getRateLimitFrequencyInSeconds()))
.orElse(
new ImmutablePair<>(
task.getRateLimitPerFrequency(),
task.getRateLimitFrequencyInSeconds()));
int rateLimitPerFrequency = rateLimitPair.getLeft();
int rateLimitFrequencyInSeconds = rateLimitPair.getRight();
if (rateLimitPerFrequency <= 0 || rateLimitFrequencyInSeconds <= 0) {
LOGGER.debug(
"Rate limit not applied to the Task: {} either rateLimitPerFrequency: {} or rateLimitFrequencyInSeconds: {} is 0 or less",
task,
rateLimitPerFrequency,
rateLimitFrequencyInSeconds);
return false;
} else {
LOGGER.debug(
"Evaluating rate limiting for TaskId: {} with TaskDefinition of: {} with rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds);
long currentTimeEpochMillis = System.currentTimeMillis();
long currentTimeEpochMinusRateLimitBucket =
currentTimeEpochMillis - (rateLimitFrequencyInSeconds * 1000L);
String key = nsKey(TASK_RATE_LIMIT_BUCKET, task.getTaskDefName());
jedisProxy.zremrangeByScore(
key, "-inf", String.valueOf(currentTimeEpochMinusRateLimitBucket));
int currentBucketCount =
Math.toIntExact(
jedisProxy.zcount(
key,
currentTimeEpochMinusRateLimitBucket,
currentTimeEpochMillis));
if (currentBucketCount < rateLimitPerFrequency) {
jedisProxy.zadd(
key, currentTimeEpochMillis, String.valueOf(currentTimeEpochMillis));
jedisProxy.expire(key, rateLimitFrequencyInSeconds);
LOGGER.info(
"TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} within the rate limit with current count {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds,
++currentBucketCount);
Monitors.recordTaskRateLimited(task.getTaskDefName(), rateLimitPerFrequency);
return false;
} else {
LOGGER.info(
"TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} is out of bounds of rate limit with current count {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds,
currentBucketCount);
return true;
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
public class BaseDynoDAO {
private static final String NAMESPACE_SEP = ".";
private static final String DAO_NAME = "redis";
private final RedisProperties properties;
private final ConductorProperties conductorProperties;
protected JedisProxy jedisProxy;
protected ObjectMapper objectMapper;
protected BaseDynoDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
this.jedisProxy = jedisProxy;
this.objectMapper = objectMapper;
this.conductorProperties = conductorProperties;
this.properties = properties;
}
String nsKey(String... nsValues) {
String rootNamespace = properties.getWorkflowNamespacePrefix();
StringBuilder namespacedKey = new StringBuilder();
if (StringUtils.isNotBlank(rootNamespace)) {
namespacedKey.append(rootNamespace).append(NAMESPACE_SEP);
}
String stack = conductorProperties.getStack();
if (StringUtils.isNotBlank(stack)) {
namespacedKey.append(stack).append(NAMESPACE_SEP);
}
String domain = properties.getKeyspaceDomain();
if (StringUtils.isNotBlank(domain)) {
namespacedKey.append(domain).append(NAMESPACE_SEP);
}
for (String nsValue : nsValues) {
namespacedKey.append(nsValue).append(NAMESPACE_SEP);
}
return StringUtils.removeEnd(namespacedKey.toString(), NAMESPACE_SEP);
}
public JedisProxy getDyno() {
return jedisProxy;
}
String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
<T> T readValue(String json, Class<T> clazz) {
try {
return objectMapper.readValue(json, clazz);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void recordRedisDaoRequests(String action) {
recordRedisDaoRequests(action, "n/a", "n/a");
}
void recordRedisDaoRequests(String action, String taskType, String workflowType) {
Monitors.recordDaoRequests(DAO_NAME, action, taskType, workflowType);
}
void recordRedisDaoEventRequests(String action, String event) {
Monitors.recordDaoEventRequests(DAO_NAME, action, event);
}
void recordRedisDaoPayloadSize(String action, int size, String taskType, String workflowType) {
Monitors.recordDaoPayloadSize(
DAO_NAME,
action,
StringUtils.defaultIfBlank(taskType, ""),
StringUtils.defaultIfBlank(workflowType, ""),
size);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import static com.netflix.conductor.common.metadata.tasks.TaskDef.ONE_HOUR;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisMetadataDAO extends BaseDynoDAO implements MetadataDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisMetadataDAO.class);
// Keys Families
private static final String ALL_TASK_DEFS = "TASK_DEFS";
private static final String WORKFLOW_DEF_NAMES = "WORKFLOW_DEF_NAMES";
private static final String WORKFLOW_DEF = "WORKFLOW_DEF";
private static final String LATEST = "latest";
private static final String className = RedisMetadataDAO.class.getSimpleName();
private Map<String, TaskDef> taskDefCache = new HashMap<>();
public RedisMetadataDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
refreshTaskDefs();
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::refreshTaskDefs,
cacheRefreshTime,
cacheRefreshTime,
TimeUnit.SECONDS);
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
private TaskDef insertOrUpdateTaskDef(TaskDef taskDef) {
// Store all task def in under one key
String payload = toJson(taskDef);
jedisProxy.hset(nsKey(ALL_TASK_DEFS), taskDef.getName(), payload);
recordRedisDaoRequests("storeTaskDef");
recordRedisDaoPayloadSize("storeTaskDef", payload.length(), taskDef.getName(), "n/a");
refreshTaskDefs();
return taskDef;
}
private void refreshTaskDefs() {
try {
Map<String, TaskDef> map = new HashMap<>();
getAllTaskDefs().forEach(taskDef -> map.put(taskDef.getName(), taskDef));
this.taskDefCache = map;
LOGGER.debug("Refreshed task defs " + this.taskDefCache.size());
} catch (Exception e) {
Monitors.error(className, "refreshTaskDefs");
LOGGER.error("refresh TaskDefs failed ", e);
}
}
@Override
public TaskDef getTaskDef(String name) {
return Optional.ofNullable(taskDefCache.get(name)).orElseGet(() -> getTaskDefFromDB(name));
}
private TaskDef getTaskDefFromDB(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
TaskDef taskDef = null;
String taskDefJsonStr = jedisProxy.hget(nsKey(ALL_TASK_DEFS), name);
if (taskDefJsonStr != null) {
taskDef = readValue(taskDefJsonStr, TaskDef.class);
recordRedisDaoRequests("getTaskDef");
recordRedisDaoPayloadSize(
"getTaskDef", taskDefJsonStr.length(), taskDef.getName(), "n/a");
}
setDefaults(taskDef);
return taskDef;
}
private void setDefaults(TaskDef taskDef) {
if (taskDef != null && taskDef.getResponseTimeoutSeconds() == 0) {
taskDef.setResponseTimeoutSeconds(
taskDef.getTimeoutSeconds() == 0 ? ONE_HOUR : taskDef.getTimeoutSeconds() - 1);
}
}
@Override
public List<TaskDef> getAllTaskDefs() {
List<TaskDef> allTaskDefs = new LinkedList<>();
recordRedisDaoRequests("getAllTaskDefs");
Map<String, String> taskDefs = jedisProxy.hgetAll(nsKey(ALL_TASK_DEFS));
int size = 0;
if (taskDefs.size() > 0) {
for (String taskDefJsonStr : taskDefs.values()) {
if (taskDefJsonStr != null) {
TaskDef taskDef = readValue(taskDefJsonStr, TaskDef.class);
setDefaults(taskDef);
allTaskDefs.add(taskDef);
size += taskDefJsonStr.length();
}
}
recordRedisDaoPayloadSize("getAllTaskDefs", size, "n/a", "n/a");
}
return allTaskDefs;
}
@Override
public void removeTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
Long result = jedisProxy.hdel(nsKey(ALL_TASK_DEFS), name);
if (!result.equals(1L)) {
throw new NotFoundException("Cannot remove the task - no such task definition");
}
recordRedisDaoRequests("removeTaskDef");
refreshTaskDefs();
}
@Override
public void createWorkflowDef(WorkflowDef def) {
if (jedisProxy.hexists(
nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) {
throw new ConflictException("Workflow with %s already exists!", def.key());
}
_createOrUpdate(def);
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
_createOrUpdate(def);
}
@Override
/*
* @param name Name of the workflow definition
* @return Latest version of workflow definition
* @see WorkflowDef
*/
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
WorkflowDef workflowDef = null;
Optional<Integer> optionalMaxVersion = getWorkflowMaxVersion(name);
if (optionalMaxVersion.isPresent()) {
String latestdata =
jedisProxy.hget(nsKey(WORKFLOW_DEF, name), optionalMaxVersion.get().toString());
if (latestdata != null) {
workflowDef = readValue(latestdata, WorkflowDef.class);
}
}
return Optional.ofNullable(workflowDef);
}
private Optional<Integer> getWorkflowMaxVersion(String workflowName) {
return jedisProxy.hkeys(nsKey(WORKFLOW_DEF, workflowName)).stream()
.filter(key -> !key.equals(LATEST))
.map(Integer::valueOf)
.max(Comparator.naturalOrder());
}
public List<WorkflowDef> getAllVersions(String name) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
List<WorkflowDef> workflows = new LinkedList<>();
recordRedisDaoRequests("getAllWorkflowDefsByName");
Map<String, String> workflowDefs = jedisProxy.hgetAll(nsKey(WORKFLOW_DEF, name));
int size = 0;
for (String key : workflowDefs.keySet()) {
if (key.equals(LATEST)) {
continue;
}
String workflowDef = workflowDefs.get(key);
workflows.add(readValue(workflowDef, WorkflowDef.class));
size += workflowDef.length();
}
recordRedisDaoPayloadSize("getAllWorkflowDefsByName", size, "n/a", name);
return workflows;
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
WorkflowDef def = null;
recordRedisDaoRequests("getWorkflowDef");
String workflowDefJsonString =
jedisProxy.hget(nsKey(WORKFLOW_DEF, name), String.valueOf(version));
if (workflowDefJsonString != null) {
def = readValue(workflowDefJsonString, WorkflowDef.class);
recordRedisDaoPayloadSize(
"getWorkflowDef", workflowDefJsonString.length(), "n/a", name);
}
return Optional.ofNullable(def);
}
@Override
public void removeWorkflowDef(String name, Integer version) {
Preconditions.checkArgument(
StringUtils.isNotBlank(name), "WorkflowDef name cannot be null");
Preconditions.checkNotNull(version, "Input version cannot be null");
Long result = jedisProxy.hdel(nsKey(WORKFLOW_DEF, name), String.valueOf(version));
if (!result.equals(1L)) {
throw new NotFoundException(
"Cannot remove the workflow - no such workflow" + " definition: %s version: %d",
name, version);
}
// check if there are any more versions remaining if not delete the
// workflow name
Optional<Integer> optionMaxVersion = getWorkflowMaxVersion(name);
// delete workflow name
if (optionMaxVersion.isEmpty()) {
jedisProxy.srem(nsKey(WORKFLOW_DEF_NAMES), name);
}
recordRedisDaoRequests("removeWorkflowDef");
}
public List<String> findAll() {
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
return new ArrayList<>(wfNames);
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
List<WorkflowDef> workflows = new LinkedList<>();
// Get all from WORKFLOW_DEF_NAMES
recordRedisDaoRequests("getAllWorkflowDefs");
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
int size = 0;
for (String wfName : wfNames) {
Map<String, String> workflowDefs = jedisProxy.hgetAll(nsKey(WORKFLOW_DEF, wfName));
for (String key : workflowDefs.keySet()) {
if (key.equals(LATEST)) {
continue;
}
String workflowDef = workflowDefs.get(key);
workflows.add(readValue(workflowDef, WorkflowDef.class));
size += workflowDef.length();
}
}
recordRedisDaoPayloadSize("getAllWorkflowDefs", size, "n/a", "n/a");
return workflows;
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
List<WorkflowDef> workflows = new LinkedList<>();
// Get all definitions latest versions from WORKFLOW_DEF_NAMES
recordRedisDaoRequests("getAllWorkflowLatestVersionsDefs");
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
int size = 0;
// Place all workflows into the Priority Queue. The PQ will allow us to grab the latest
// version of the workflows.
for (String wfName : wfNames) {
WorkflowDef def = getLatestWorkflowDef(wfName).orElse(null);
if (def != null) {
workflows.add(def);
size += def.toString().length();
}
}
recordRedisDaoPayloadSize("getAllWorkflowLatestVersionsDefs", size, "n/a", "n/a");
return workflows;
}
private void _createOrUpdate(WorkflowDef workflowDef) {
// First set the workflow def
jedisProxy.hset(
nsKey(WORKFLOW_DEF, workflowDef.getName()),
String.valueOf(workflowDef.getVersion()),
toJson(workflowDef));
jedisProxy.sadd(nsKey(WORKFLOW_DEF_NAMES), workflowDef.getName());
recordRedisDaoRequests("storeWorkflowDef", "n/a", workflowDef.getName());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisEventHandlerDAO extends BaseDynoDAO implements EventHandlerDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisEventHandlerDAO.class);
private static final String EVENT_HANDLERS = "EVENT_HANDLERS";
private static final String EVENT_HANDLERS_BY_EVENT = "EVENT_HANDLERS_BY_EVENT";
public RedisEventHandlerDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
@Override
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "Missing Name");
if (getEventHandler(eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name %s already exists!", eventHandler.getName());
}
index(eventHandler);
jedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler));
recordRedisDaoRequests("addEventHandler");
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "Missing Name");
EventHandler existing = getEventHandler(eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name %s not found!", eventHandler.getName());
}
if (!existing.getEvent().equals(eventHandler.getEvent())) {
removeIndex(existing);
}
index(eventHandler);
jedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler));
recordRedisDaoRequests("updateEventHandler");
}
@Override
public void removeEventHandler(String name) {
EventHandler existing = getEventHandler(name);
if (existing == null) {
throw new NotFoundException("EventHandler with name %s not found!", name);
}
jedisProxy.hdel(nsKey(EVENT_HANDLERS), name);
recordRedisDaoRequests("removeEventHandler");
removeIndex(existing);
}
@Override
public List<EventHandler> getAllEventHandlers() {
Map<String, String> all = jedisProxy.hgetAll(nsKey(EVENT_HANDLERS));
List<EventHandler> handlers = new LinkedList<>();
all.forEach(
(key, json) -> {
EventHandler eventHandler = readValue(json, EventHandler.class);
handlers.add(eventHandler);
});
recordRedisDaoRequests("getAllEventHandlers");
return handlers;
}
private void index(EventHandler eventHandler) {
String event = eventHandler.getEvent();
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
jedisProxy.sadd(key, eventHandler.getName());
}
private void removeIndex(EventHandler eventHandler) {
String event = eventHandler.getEvent();
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
jedisProxy.srem(key, eventHandler.getName());
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
Set<String> names = jedisProxy.smembers(key);
List<EventHandler> handlers = new LinkedList<>();
for (String name : names) {
try {
EventHandler eventHandler = getEventHandler(name);
recordRedisDaoEventRequests("getEventHandler", event);
if (eventHandler.getEvent().equals(event)
&& (!activeOnly || eventHandler.isActive())) {
handlers.add(eventHandler);
}
} catch (NotFoundException nfe) {
LOGGER.info("No matching event handler found for event: {}", event);
throw nfe;
}
}
return handlers;
}
private EventHandler getEventHandler(String name) {
EventHandler eventHandler = null;
String json;
try {
json = jedisProxy.hget(nsKey(EVENT_HANDLERS), name);
} catch (Exception e) {
throw new TransientException("Unable to get event handler named " + name, e);
}
if (json != null) {
eventHandler = readValue(json, EventHandler.class);
}
return eventHandler;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/DynoQueueDAO.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dao/DynoQueueDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.RedisQueues;
@Component
@Conditional(AnyRedisCondition.class)
public class DynoQueueDAO implements QueueDAO {
private final RedisQueues queues;
public DynoQueueDAO(RedisQueues queues) {
this.queues = queues;
}
@Override
public void push(String queueName, String id, long offsetTimeInSecond) {
push(queueName, id, -1, offsetTimeInSecond);
}
@Override
public void push(String queueName, String id, int priority, long offsetTimeInSecond) {
Message msg = new Message(id, null);
msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS);
if (priority >= 0 && priority <= 99) {
msg.setPriority(priority);
}
queues.get(queueName).push(Collections.singletonList(msg));
}
@Override
public void push(
String queueName, List<com.netflix.conductor.core.events.queue.Message> messages) {
List<Message> msgs =
messages.stream()
.map(
msg -> {
Message m = new Message(msg.getId(), msg.getPayload());
if (msg.getPriority() > 0) {
m.setPriority(msg.getPriority());
}
return m;
})
.collect(Collectors.toList());
queues.get(queueName).push(msgs);
}
@Override
public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) {
return pushIfNotExists(queueName, id, -1, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String id, int priority, long offsetTimeInSecond) {
DynoQueue queue = queues.get(queueName);
if (queue.get(id) != null) {
return false;
}
Message msg = new Message(id, null);
if (priority >= 0 && priority <= 99) {
msg.setPriority(priority);
}
msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS);
queue.push(Collections.singletonList(msg));
return true;
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
List<Message> msg = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS);
return msg.stream().map(Message::getId).collect(Collectors.toList());
}
@Override
public List<com.netflix.conductor.core.events.queue.Message> pollMessages(
String queueName, int count, int timeout) {
List<Message> msgs = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS);
return msgs.stream()
.map(
msg ->
new com.netflix.conductor.core.events.queue.Message(
msg.getId(), msg.getPayload(), null, msg.getPriority()))
.collect(Collectors.toList());
}
@Override
public void remove(String queueName, String messageId) {
queues.get(queueName).remove(messageId);
}
@Override
public int getSize(String queueName) {
return (int) queues.get(queueName).size();
}
@Override
public boolean ack(String queueName, String messageId) {
return queues.get(queueName).ack(messageId);
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long timeout) {
return queues.get(queueName).setUnackTimeout(messageId, timeout);
}
@Override
public void flush(String queueName) {
DynoQueue queue = queues.get(queueName);
if (queue != null) {
queue.clear();
}
}
@Override
public Map<String, Long> queuesDetail() {
return queues.queues().stream()
.collect(Collectors.toMap(DynoQueue::getName, DynoQueue::size));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
return queues.queues().stream()
.collect(Collectors.toMap(DynoQueue::getName, DynoQueue::shardSizes));
}
public void processUnacks(String queueName) {
queues.get(queueName).processUnacks();
}
@Override
public boolean resetOffsetTime(String queueName, String id) {
DynoQueue queue = queues.get(queueName);
return queue.setTimeout(id, 0);
}
@Override
public boolean containsMessage(String queueName, String messageId) {
DynoQueue queue = queues.get(queueName);
Message message = queue.get(messageId);
return Objects.nonNull(message);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
public class ConfigurationHostSupplier implements HostSupplier {
private static final Logger log = LoggerFactory.getLogger(ConfigurationHostSupplier.class);
private final RedisProperties properties;
public ConfigurationHostSupplier(RedisProperties properties) {
this.properties = properties;
}
@Override
public List<Host> getHosts() {
return parseHostsFromConfig();
}
private List<Host> parseHostsFromConfig() {
String hosts = properties.getHosts();
if (hosts == null) {
String message =
"Missing dynomite/redis hosts. Ensure 'conductor.redis.hosts' has been set in the supplied configuration.";
log.error(message);
throw new RuntimeException(message);
}
return parseHostsFrom(hosts);
}
private List<Host> parseHostsFrom(String hostConfig) {
List<String> hostConfigs = Arrays.asList(hostConfig.split(";"));
return hostConfigs.stream()
.map(
hc -> {
String[] hostConfigValues = hc.split(":");
String host = hostConfigValues[0];
int port = Integer.parseInt(hostConfigValues[1]);
String rack = hostConfigValues[2];
if (hostConfigValues.length >= 4) {
String password = hostConfigValues[3];
return new HostBuilder()
.setHostname(host)
.setPort(port)
.setRack(rack)
.setStatus(Host.Status.Up)
.setPassword(password)
.createHost();
}
return new HostBuilder()
.setHostname(host)
.setPort(port)
.setRack(rack)
.setStatus(Host.Status.Up)
.createHost();
})
.collect(Collectors.toList());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/RedisQueuesShardingStrategyProvider.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/RedisQueuesShardingStrategyProvider.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.sharding.RoundRobinStrategy;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
public class RedisQueuesShardingStrategyProvider {
public static final String LOCAL_ONLY_STRATEGY = "localOnly";
public static final String ROUND_ROBIN_STRATEGY = "roundRobin";
private static final Logger LOGGER =
LoggerFactory.getLogger(RedisQueuesShardingStrategyProvider.class);
private final ShardSupplier shardSupplier;
private final RedisProperties properties;
public RedisQueuesShardingStrategyProvider(
ShardSupplier shardSupplier, RedisProperties properties) {
this.shardSupplier = shardSupplier;
this.properties = properties;
}
public ShardingStrategy get() {
String shardingStrat = properties.getQueueShardingStrategy();
if (shardingStrat.equals(LOCAL_ONLY_STRATEGY)) {
LOGGER.info(
"Using {} sharding strategy for queues",
LocalOnlyStrategy.class.getSimpleName());
return new LocalOnlyStrategy(shardSupplier);
} else {
LOGGER.info(
"Using {} sharding strategy for queues",
RoundRobinStrategy.class.getSimpleName());
return new RoundRobinStrategy();
}
}
public static final class LocalOnlyStrategy implements ShardingStrategy {
private static final Logger LOGGER = LoggerFactory.getLogger(LocalOnlyStrategy.class);
private final ShardSupplier shardSupplier;
public LocalOnlyStrategy(ShardSupplier shardSupplier) {
this.shardSupplier = shardSupplier;
}
@Override
public String getNextShard(List<String> allShards, Message message) {
LOGGER.debug(
"Always using {} shard out of {}", shardSupplier.getCurrentShard(), allShards);
return shardSupplier.getCurrentShard();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java | redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.List;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.google.common.collect.Lists;
public class LocalhostHostSupplier implements HostSupplier {
private final RedisProperties properties;
public LocalhostHostSupplier(RedisProperties properties) {
this.properties = properties;
}
@Override
public List<Host> getHosts() {
Host dynoHost =
new HostBuilder()
.setHostname("localhost")
.setIpAddress("0")
.setRack(properties.getAvailabilityZone())
.setStatus(Host.Status.Up)
.createHost();
return Lists.newArrayList(dynoHost);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java | redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.rarefiedredis.redis.IRedisClient;
import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair;
import org.rarefiedredis.redis.RedisMock;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.exceptions.JedisException;
import redis.clients.jedis.params.ZAddParams;
public class JedisMock extends Jedis {
private final IRedisClient redis;
public JedisMock() {
super("");
this.redis = new RedisMock();
}
private Set<Tuple> toTupleSet(Set<ZsetPair> pairs) {
Set<Tuple> set = new HashSet<>();
for (ZsetPair pair : pairs) {
set.add(new Tuple(pair.member, pair.score));
}
return set;
}
@Override
public String set(final String key, String value) {
try {
return redis.set(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String get(final String key) {
try {
return redis.get(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean exists(final String key) {
try {
return redis.exists(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(final String... keys) {
try {
return redis.del(keys);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(String key) {
try {
return redis.del(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String type(final String key) {
try {
return redis.type(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long expire(final String key, final int seconds) {
try {
return redis.expire(key, seconds) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long expireAt(final String key, final long unixTime) {
try {
return redis.expireat(key, unixTime) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long ttl(final String key) {
try {
return redis.ttl(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long move(final String key, final int dbIndex) {
try {
return redis.move(key, dbIndex);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String getSet(final String key, final String value) {
try {
return redis.getset(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> mget(final String... keys) {
try {
String[] mget = redis.mget(keys);
List<String> lst = new ArrayList<>(mget.length);
for (String get : mget) {
lst.add(get);
}
return lst;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long setnx(final String key, final String value) {
try {
return redis.setnx(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String setex(final String key, final int seconds, final String value) {
try {
return redis.setex(key, seconds, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String mset(final String... keysvalues) {
try {
return redis.mset(keysvalues);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long msetnx(final String... keysvalues) {
try {
return redis.msetnx(keysvalues) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decrBy(final String key, final long integer) {
try {
return redis.decrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decr(final String key) {
try {
return redis.decr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incrBy(final String key, final long integer) {
try {
return redis.incrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double incrByFloat(final String key, final double value) {
try {
return Double.parseDouble(redis.incrbyfloat(key, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incr(final String key) {
try {
return redis.incr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long append(final String key, final String value) {
try {
return redis.append(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String substr(final String key, final int start, final int end) {
try {
return redis.getrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hset(final String key, final String field, final String value) {
try {
return redis.hset(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hget(final String key, final String field) {
try {
return redis.hget(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hsetnx(final String key, final String field, final String value) {
try {
return redis.hsetnx(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hmset(final String key, final Map<String, String> hash) {
try {
String field = null, value = null;
String[] args = new String[(hash.size() - 1) * 2];
int idx = 0;
for (String f : hash.keySet()) {
if (field == null) {
field = f;
value = hash.get(f);
continue;
}
args[idx] = f;
args[idx + 1] = hash.get(f);
idx += 2;
}
return redis.hmset(key, field, value, args);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hmget(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hmget(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hincrBy(final String key, final String field, final long value) {
try {
return redis.hincrby(key, field, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double hincrByFloat(final String key, final String field, final double value) {
try {
return Double.parseDouble(redis.hincrbyfloat(key, field, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean hexists(final String key, final String field) {
try {
return redis.hexists(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hdel(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hdel(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hlen(final String key) {
try {
return redis.hlen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> hkeys(final String key) {
try {
return redis.hkeys(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hvals(final String key) {
try {
return redis.hvals(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Map<String, String> hgetAll(final String key) {
try {
return redis.hgetall(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long rpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.rpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.lpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long llen(final String key) {
try {
return redis.llen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> lrange(final String key, final long start, final long end) {
try {
return redis.lrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String ltrim(final String key, final long start, final long end) {
try {
return redis.ltrim(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lindex(final String key, final long index) {
try {
return redis.lindex(key, index);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lset(final String key, final long index, final String value) {
try {
return redis.lset(key, index, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lrem(final String key, final long count, final String value) {
try {
return redis.lrem(key, count, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lpop(final String key) {
try {
return redis.lpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpop(final String key) {
try {
return redis.rpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpoplpush(final String srckey, final String dstkey) {
try {
return redis.rpoplpush(srckey, dstkey);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sadd(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.sadd(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> smembers(final String key) {
try {
return redis.smembers(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long srem(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.srem(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String spop(final String key) {
try {
return redis.spop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long smove(final String srckey, final String dstkey, final String member) {
try {
return redis.smove(srckey, dstkey, member) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long scard(final String key) {
try {
return redis.scard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean sismember(final String key, final String member) {
try {
return redis.sismember(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sinter(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinter(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sinterstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinterstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sunion(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunion(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sunionstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunionstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sdiff(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiff(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sdiffstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiffstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String srandmember(final String key) {
try {
return redis.srandmember(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> srandmember(final String key, final int count) {
try {
return redis.srandmember(key, count);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final double score, final String member) {
try {
return redis.zadd(key, new ZsetPair(member, score));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
try {
if (params.getParam("xx") != null) {
Double existing = redis.zscore(key, member);
if (existing == null) {
return 0L;
}
return redis.zadd(key, new ZsetPair(member, score));
} else {
return redis.zadd(key, new ZsetPair(member, score));
}
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final Map<String, Double> scoreMembers) {
try {
Double score = null;
String member = null;
List<ZsetPair> scoresmembers = new ArrayList<>((scoreMembers.size() - 1) * 2);
for (String m : scoreMembers.keySet()) {
if (m == null) {
member = m;
score = scoreMembers.get(m);
continue;
}
scoresmembers.add(new ZsetPair(m, scoreMembers.get(m)));
}
return redis.zadd(
key, new ZsetPair(member, score), (ZsetPair[]) scoresmembers.toArray());
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrem(final String key, final String... members) {
try {
String member = members[0];
String[] ms = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
ms[idx - 1] = members[idx];
}
return redis.zrem(key, member, ms);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zincrby(final String key, final double score, final String member) {
try {
return Double.parseDouble(redis.zincrby(key, score, member));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrank(final String key, final String member) {
try {
return redis.zrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrevrank(final String key, final String member) {
try {
return redis.zrevrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrevrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrevrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcard(final String key) {
try {
return redis.zcard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zscore(final String key, final String member) {
try {
return redis.zscore(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String watch(final String... keys) {
try {
for (String key : keys) {
redis.watch(key);
}
return "OK";
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcount(final String key, final double min, final double max) {
try {
return redis.zcount(key, min, max);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcount(final String key, final String min, final String max) {
try {
return redis.zcount(key, Double.parseDouble(min), Double.parseDouble(max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final double min, final double max) {
try {
return ZsetPair.members(
redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final String min, final String max) {
try {
return ZsetPair.members(redis.zrangebyscore(key, min, max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(
final String key,
final double min,
final double max,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrangebyscore(
key,
String.valueOf(min),
String.valueOf(max),
"limit",
String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(
final String key,
final String min,
final String max,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrangebyscore(
key, min, max, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key, final double min, final double max) {
try {
return toTupleSet(
redis.zrangebyscore(
key, String.valueOf(min), String.valueOf(max), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key, final String min, final String max) {
try {
return toTupleSet(redis.zrangebyscore(key, min, max, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key,
final double min,
final double max,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrangebyscore(
key,
String.valueOf(min),
String.valueOf(max),
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key,
final String min,
final String max,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrangebyscore(
key,
min,
max,
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final double max, final double min) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final String max, final String min) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, max, min));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(
final String key,
final double max,
final double min,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(
key,
String.valueOf(max),
String.valueOf(min),
"limit",
String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key, final double max, final double min) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key, String.valueOf(max), String.valueOf(min), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key,
final double max,
final double min,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key,
String.valueOf(max),
String.valueOf(min),
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key,
final String max,
final String min,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key,
max,
min,
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(
final String key,
final String max,
final String min,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(
key, max, min, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key, final String max, final String min) {
try {
return toTupleSet(redis.zrevrangebyscore(key, max, min, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByRank(final String key, final long start, final long end) {
try {
return redis.zremrangebyrank(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final double start, final double end) {
try {
return redis.zremrangebyscore(key, String.valueOf(start), String.valueOf(end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final String start, final String end) {
try {
return redis.zremrangebyscore(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zunionstore(final String dstkey, final String... sets) {
try {
return redis.zunionstore(dstkey, sets.length, sets);
} catch (Exception e) {
throw new JedisException(e);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java | redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPoolAbstract;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
public class JedisSentinel implements JedisCommands {
private final JedisPoolAbstract jedisPool;
public JedisSentinel(JedisPoolAbstract jedisPool) {
this.jedisPool = jedisPool;
}
@Override
public String set(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.set(key, value);
}
}
@Override
public String set(String key, String value, SetParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.set(key, value, params);
}
}
@Override
public String get(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.get(key);
}
}
@Override
public Boolean exists(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.exists(key);
}
}
@Override
public Long persist(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.persist(key);
}
}
@Override
public String type(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.type(key);
}
}
@Override
public byte[] dump(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.dump(key);
}
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.restore(key, ttl, serializedValue);
}
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.restoreReplace(key, ttl, serializedValue);
}
}
@Override
public Long expire(String key, int seconds) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.expire(key, seconds);
}
}
@Override
public Long pexpire(String key, long milliseconds) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pexpire(key, milliseconds);
}
}
@Override
public Long expireAt(String key, long unixTime) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.expireAt(key, unixTime);
}
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pexpireAt(key, millisecondsTimestamp);
}
}
@Override
public Long ttl(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.ttl(key);
}
}
@Override
public Long pttl(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pttl(key);
}
}
@Override
public Long touch(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.touch(key);
}
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setbit(key, offset, value);
}
}
@Override
public Boolean setbit(String key, long offset, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setbit(key, offset, value);
}
}
@Override
public Boolean getbit(String key, long offset) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getbit(key, offset);
}
}
@Override
public Long setrange(String key, long offset, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setrange(key, offset, value);
}
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getrange(key, startOffset, endOffset);
}
}
@Override
public String getSet(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getSet(key, value);
}
}
@Override
public Long setnx(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setnx(key, value);
}
}
@Override
public String setex(String key, int seconds, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setex(key, seconds, value);
}
}
@Override
public String psetex(String key, long milliseconds, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.psetex(key, milliseconds, value);
}
}
@Override
public Long decrBy(String key, long integer) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.decrBy(key, integer);
}
}
@Override
public Long decr(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.decr(key);
}
}
@Override
public Long incrBy(String key, long integer) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incrBy(key, integer);
}
}
@Override
public Double incrByFloat(String key, double value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incrByFloat(key, value);
}
}
@Override
public Long incr(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incr(key);
}
}
@Override
public Long append(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.append(key, value);
}
}
@Override
public String substr(String key, int start, int end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.substr(key, start, end);
}
}
@Override
public Long hset(String key, String field, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hset(key, field, value);
}
}
@Override
public Long hset(String key, Map<String, String> hash) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hset(key, hash);
}
}
@Override
public String hget(String key, String field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hget(key, field);
}
}
@Override
public Long hsetnx(String key, String field, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hsetnx(key, field, value);
}
}
@Override
public String hmset(String key, Map<String, String> hash) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hmset(key, hash);
}
}
@Override
public List<String> hmget(String key, String... fields) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hmget(key, fields);
}
}
@Override
public Long hincrBy(String key, String field, long value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hincrBy(key, field, value);
}
}
@Override
public Double hincrByFloat(String key, String field, double value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hincrByFloat(key, field, value);
}
}
@Override
public Boolean hexists(String key, String field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hexists(key, field);
}
}
@Override
public Long hdel(String key, String... field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hdel(key, field);
}
}
@Override
public Long hlen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hlen(key);
}
}
@Override
public Set<String> hkeys(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hkeys(key);
}
}
@Override
public List<String> hvals(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hvals(key);
}
}
@Override
public Map<String, String> hgetAll(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hgetAll(key);
}
}
@Override
public Long rpush(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpush(key, string);
}
}
@Override
public Long lpush(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpush(key, string);
}
}
@Override
public Long llen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.llen(key);
}
}
@Override
public List<String> lrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lrange(key, start, end);
}
}
@Override
public String ltrim(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.ltrim(key, start, end);
}
}
@Override
public String lindex(String key, long index) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lindex(key, index);
}
}
@Override
public String lset(String key, long index, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lset(key, index, value);
}
}
@Override
public Long lrem(String key, long count, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lrem(key, count, value);
}
}
@Override
public String lpop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpop(key);
}
}
@Override
public String rpop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpop(key);
}
}
@Override
public Long sadd(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sadd(key, member);
}
}
@Override
public Set<String> smembers(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.smembers(key);
}
}
@Override
public Long srem(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srem(key, member);
}
}
@Override
public String spop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.spop(key);
}
}
@Override
public Set<String> spop(String key, long count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.spop(key, count);
}
}
@Override
public Long scard(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.scard(key);
}
}
@Override
public Boolean sismember(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sismember(key, member);
}
}
@Override
public String srandmember(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srandmember(key);
}
}
@Override
public List<String> srandmember(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srandmember(key, count);
}
}
@Override
public Long strlen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.strlen(key);
}
}
@Override
public Long zadd(String key, double score, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, score, member);
}
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, score, member, params);
}
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, scoreMembers);
}
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, scoreMembers, params);
}
}
@Override
public Set<String> zrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrange(key, start, end);
}
}
@Override
public Long zrem(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrem(key, member);
}
}
@Override
public Double zincrby(String key, double score, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zincrby(key, score, member);
}
}
@Override
public Double zincrby(String key, double score, String member, ZIncrByParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zincrby(key, score, member, params);
}
}
@Override
public Long zrank(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrank(key, member);
}
}
@Override
public Long zrevrank(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrank(key, member);
}
}
@Override
public Set<String> zrevrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrange(key, start, end);
}
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeWithScores(key, start, end);
}
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeWithScores(key, start, end);
}
}
@Override
public Long zcard(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcard(key);
}
}
@Override
public Double zscore(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscore(key, member);
}
}
@Override
public Tuple zpopmax(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmax(key);
}
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmax(key, count);
}
}
@Override
public Tuple zpopmin(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmin(key);
}
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmin(key, count);
}
}
@Override
public List<String> sort(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sort(key);
}
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sort(key, sortingParameters);
}
}
@Override
public Long zcount(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcount(key, min, max);
}
}
@Override
public Long zcount(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcount(key, min, max);
}
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max);
}
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max);
}
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min);
}
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min);
}
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}
@Override
public Long zremrangeByRank(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByRank(key, start, end);
}
}
@Override
public Long zremrangeByScore(String key, double start, double end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByScore(key, start, end);
}
}
@Override
public Long zremrangeByScore(String key, String start, String end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByScore(key, start, end);
}
}
@Override
public Long zlexcount(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zlexcount(key, min, max);
}
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByLex(key, min, max);
}
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByLex(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByLex(key, max, min);
}
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByLex(key, max, min, offset, count);
}
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByLex(key, min, max);
}
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.linsert(key, where, pivot, value);
}
}
@Override
public Long lpushx(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpushx(key, string);
}
}
@Override
public Long rpushx(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpushx(key, string);
}
}
@Override
public List<String> blpop(int timeout, String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.blpop(timeout, key);
}
}
@Override
public List<String> brpop(int timeout, String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.brpop(timeout, key);
}
}
@Override
public Long del(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.del(key);
}
}
@Override
public Long unlink(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.unlink(key);
}
}
@Override
public String echo(String string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.echo(string);
}
}
@Override
public Long move(String key, int dbIndex) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.move(key, dbIndex);
}
}
@Override
public Long bitcount(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitcount(key);
}
}
@Override
public Long bitcount(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitcount(key, start, end);
}
}
@Override
public Long bitpos(String key, boolean value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitpos(key, value);
}
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitpos(key, value, params);
}
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hscan(key, cursor);
}
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hscan(key, cursor, params);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sscan(key, cursor);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sscan(key, cursor, params);
}
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscan(key, cursor);
}
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscan(key, cursor, params);
}
}
@Override
public Long pfadd(String key, String... elements) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pfadd(key, elements);
}
}
@Override
public long pfcount(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pfcount(key);
}
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geoadd(key, longitude, latitude, member);
}
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geoadd(key, memberCoordinateMap);
}
}
@Override
public Double geodist(String key, String member1, String member2) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geodist(key, member1, member2);
}
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geodist(key, member1, member2, unit);
}
}
@Override
public List<String> geohash(String key, String... members) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geohash(key, members);
}
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geopos(key, members);
}
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadius(key, longitude, latitude, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusReadonly(key, longitude, latitude, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadius(key, longitude, latitude, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMember(key, member, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMemberReadonly(key, member, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMember(key, member, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMemberReadonly(key, member, radius, unit, param);
}
}
@Override
public List<Long> bitfield(String key, String... arguments) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitfield(key, arguments);
}
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisProxy.java | redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisProxy.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.ZAddParams;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
/** Proxy for the {@link JedisCommands} object. */
@Component
@Conditional(AnyRedisCondition.class)
public class JedisProxy {
private static final Logger LOGGER = LoggerFactory.getLogger(JedisProxy.class);
protected JedisCommands jedisCommands;
public JedisProxy(@Qualifier(DEFAULT_CLIENT_INJECTION_NAME) JedisCommands jedisCommands) {
this.jedisCommands = jedisCommands;
}
public Set<String> zrange(String key, long start, long end) {
return jedisCommands.zrange(key, start, end);
}
public Set<Tuple> zrangeByScoreWithScores(String key, double maxScore, int count) {
return jedisCommands.zrangeByScoreWithScores(key, 0, maxScore, 0, count);
}
public Set<String> zrangeByScore(String key, double maxScore, int count) {
return jedisCommands.zrangeByScore(key, 0, maxScore, 0, count);
}
public Set<String> zrangeByScore(String key, double minScore, double maxScore, int count) {
return jedisCommands.zrangeByScore(key, minScore, maxScore, 0, count);
}
public ScanResult<Tuple> zscan(String key, int cursor) {
return jedisCommands.zscan(key, "" + cursor);
}
public String get(String key) {
return jedisCommands.get(key);
}
public Long zcard(String key) {
return jedisCommands.zcard(key);
}
public Long del(String key) {
return jedisCommands.del(key);
}
public Long zrem(String key, String member) {
return jedisCommands.zrem(key, member);
}
public long zremrangeByScore(String key, String start, String end) {
return jedisCommands.zremrangeByScore(key, start, end);
}
public long zcount(String key, double min, double max) {
return jedisCommands.zcount(key, min, max);
}
public String set(String key, String value) {
return jedisCommands.set(key, value);
}
public Long setnx(String key, String value) {
return jedisCommands.setnx(key, value);
}
public Long zadd(String key, double score, String member) {
return jedisCommands.zadd(key, score, member);
}
public Long zaddnx(String key, double score, String member) {
ZAddParams params = ZAddParams.zAddParams().nx();
return jedisCommands.zadd(key, score, member, params);
}
public Long hset(String key, String field, String value) {
return jedisCommands.hset(key, field, value);
}
public Long hsetnx(String key, String field, String value) {
return jedisCommands.hsetnx(key, field, value);
}
public Long hlen(String key) {
return jedisCommands.hlen(key);
}
public String hget(String key, String field) {
return jedisCommands.hget(key, field);
}
public Optional<String> optionalHget(String key, String field) {
return Optional.ofNullable(jedisCommands.hget(key, field));
}
public Map<String, String> hscan(String key, int count) {
Map<String, String> m = new HashMap<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> scanResult = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(scanResult.getCursor());
for (Entry<String, String> r : scanResult.getResult()) {
m.put(r.getKey(), r.getValue());
}
if (m.size() > count) {
break;
}
} while (cursor > 0);
return m;
}
public Map<String, String> hgetAll(String key) {
Map<String, String> m = new HashMap<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> scanResult = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(scanResult.getCursor());
for (Entry<String, String> r : scanResult.getResult()) {
m.put(r.getKey(), r.getValue());
}
} while (cursor > 0);
return m;
}
public List<String> hvals(String key) {
LOGGER.trace("hvals {}", key);
return jedisCommands.hvals(key);
}
public Set<String> hkeys(String key) {
LOGGER.trace("hkeys {}", key);
Set<String> keys = new HashSet<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> sr = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(sr.getCursor());
List<Entry<String, String>> result = sr.getResult();
for (Entry<String, String> e : result) {
keys.add(e.getKey());
}
} while (cursor > 0);
return keys;
}
public Long hdel(String key, String... fields) {
LOGGER.trace("hdel {} {}", key, fields[0]);
return jedisCommands.hdel(key, fields);
}
public Long expire(String key, int seconds) {
return jedisCommands.expire(key, seconds);
}
public Boolean hexists(String key, String field) {
return jedisCommands.hexists(key, field);
}
public Long sadd(String key, String value) {
LOGGER.trace("sadd {} {}", key, value);
return jedisCommands.sadd(key, value);
}
public Long srem(String key, String member) {
LOGGER.trace("srem {} {}", key, member);
return jedisCommands.srem(key, member);
}
public boolean sismember(String key, String member) {
return jedisCommands.sismember(key, member);
}
public Set<String> smembers(String key) {
LOGGER.trace("smembers {}", key);
Set<String> r = new HashSet<>();
int cursor = 0;
ScanParams sp = new ScanParams();
sp.count(50);
do {
ScanResult<String> scanResult = jedisCommands.sscan(key, "" + cursor, sp);
cursor = Integer.parseInt(scanResult.getCursor());
r.addAll(scanResult.getResult());
} while (cursor > 0);
return r;
}
public Long scard(String key) {
return jedisCommands.scard(key);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java | redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.AbstractMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.stream.Collectors;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
public class JedisCluster implements JedisCommands {
private final redis.clients.jedis.JedisCluster jedisCluster;
public JedisCluster(redis.clients.jedis.JedisCluster jedisCluster) {
this.jedisCluster = jedisCluster;
}
@Override
public String set(String key, String value) {
return jedisCluster.set(key, value);
}
@Override
public String set(String key, String value, SetParams params) {
return jedisCluster.set(key, value, params);
}
@Override
public String get(String key) {
return jedisCluster.get(key);
}
@Override
public Boolean exists(String key) {
return jedisCluster.exists(key);
}
@Override
public Long persist(String key) {
return jedisCluster.persist(key);
}
@Override
public String type(String key) {
return jedisCluster.type(key);
}
@Override
public byte[] dump(String key) {
return jedisCluster.dump(key);
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
return jedisCluster.restore(key, ttl, serializedValue);
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException();
}
@Override
public Long expire(String key, int seconds) {
return jedisCluster.expire(key, seconds);
}
@Override
public Long pexpire(String key, long milliseconds) {
return jedisCluster.pexpire(key, milliseconds);
}
@Override
public Long expireAt(String key, long unixTime) {
return jedisCluster.expireAt(key, unixTime);
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
return jedisCluster.pexpireAt(key, millisecondsTimestamp);
}
@Override
public Long ttl(String key) {
return jedisCluster.ttl(key);
}
@Override
public Long pttl(String key) {
return jedisCluster.pttl(key);
}
@Override
public Long touch(String key) {
return jedisCluster.touch(key);
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
return jedisCluster.setbit(key, offset, value);
}
@Override
public Boolean setbit(String key, long offset, String value) {
return jedisCluster.setbit(key, offset, value);
}
@Override
public Boolean getbit(String key, long offset) {
return jedisCluster.getbit(key, offset);
}
@Override
public Long setrange(String key, long offset, String value) {
return jedisCluster.setrange(key, offset, value);
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
return jedisCluster.getrange(key, startOffset, endOffset);
}
@Override
public String getSet(String key, String value) {
return jedisCluster.getSet(key, value);
}
@Override
public Long setnx(String key, String value) {
return jedisCluster.setnx(key, value);
}
@Override
public String setex(String key, int seconds, String value) {
return jedisCluster.setex(key, seconds, value);
}
@Override
public String psetex(String key, long milliseconds, String value) {
return jedisCluster.psetex(key, milliseconds, value);
}
@Override
public Long decrBy(String key, long integer) {
return jedisCluster.decrBy(key, integer);
}
@Override
public Long decr(String key) {
return jedisCluster.decr(key);
}
@Override
public Long incrBy(String key, long integer) {
return jedisCluster.incrBy(key, integer);
}
@Override
public Double incrByFloat(String key, double value) {
return jedisCluster.incrByFloat(key, value);
}
@Override
public Long incr(String key) {
return jedisCluster.incr(key);
}
@Override
public Long append(String key, String value) {
return jedisCluster.append(key, value);
}
@Override
public String substr(String key, int start, int end) {
return jedisCluster.substr(key, start, end);
}
@Override
public Long hset(String key, String field, String value) {
return jedisCluster.hset(key, field, value);
}
@Override
public Long hset(String key, Map<String, String> hash) {
return jedisCluster.hset(key, hash);
}
@Override
public String hget(String key, String field) {
return jedisCluster.hget(key, field);
}
@Override
public Long hsetnx(String key, String field, String value) {
return jedisCluster.hsetnx(key, field, value);
}
@Override
public String hmset(String key, Map<String, String> hash) {
return jedisCluster.hmset(key, hash);
}
@Override
public List<String> hmget(String key, String... fields) {
return jedisCluster.hmget(key, fields);
}
@Override
public Long hincrBy(String key, String field, long value) {
return jedisCluster.hincrBy(key, field, value);
}
@Override
public Double hincrByFloat(String key, String field, double value) {
return jedisCluster.hincrByFloat(key.getBytes(), field.getBytes(), value);
}
@Override
public Boolean hexists(String key, String field) {
return jedisCluster.hexists(key, field);
}
@Override
public Long hdel(String key, String... field) {
return jedisCluster.hdel(key, field);
}
@Override
public Long hlen(String key) {
return jedisCluster.hlen(key);
}
@Override
public Set<String> hkeys(String key) {
return jedisCluster.hkeys(key);
}
@Override
public List<String> hvals(String key) {
return jedisCluster.hvals(key);
}
@Override
public Map<String, String> hgetAll(String key) {
return jedisCluster.hgetAll(key);
}
@Override
public Long rpush(String key, String... string) {
return jedisCluster.rpush(key, string);
}
@Override
public Long lpush(String key, String... string) {
return jedisCluster.lpush(key, string);
}
@Override
public Long llen(String key) {
return jedisCluster.llen(key);
}
@Override
public List<String> lrange(String key, long start, long end) {
return jedisCluster.lrange(key, start, end);
}
@Override
public String ltrim(String key, long start, long end) {
return jedisCluster.ltrim(key, start, end);
}
@Override
public String lindex(String key, long index) {
return jedisCluster.lindex(key, index);
}
@Override
public String lset(String key, long index, String value) {
return jedisCluster.lset(key, index, value);
}
@Override
public Long lrem(String key, long count, String value) {
return jedisCluster.lrem(key, count, value);
}
@Override
public String lpop(String key) {
return jedisCluster.lpop(key);
}
@Override
public String rpop(String key) {
return jedisCluster.rpop(key);
}
@Override
public Long sadd(String key, String... member) {
return jedisCluster.sadd(key, member);
}
@Override
public Set<String> smembers(String key) {
return jedisCluster.smembers(key);
}
@Override
public Long srem(String key, String... member) {
return jedisCluster.srem(key, member);
}
@Override
public String spop(String key) {
return jedisCluster.spop(key);
}
@Override
public Set<String> spop(String key, long count) {
return jedisCluster.spop(key, count);
}
@Override
public Long scard(String key) {
return jedisCluster.scard(key);
}
@Override
public Boolean sismember(String key, String member) {
return jedisCluster.sismember(key, member);
}
@Override
public String srandmember(String key) {
return jedisCluster.srandmember(key);
}
@Override
public List<String> srandmember(String key, int count) {
return jedisCluster.srandmember(key, count);
}
@Override
public Long strlen(String key) {
return jedisCluster.strlen(key);
}
@Override
public Long zadd(String key, double score, String member) {
return jedisCluster.zadd(key, score, member);
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
return jedisCluster.zadd(key, score, member, params);
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
return jedisCluster.zadd(key, scoreMembers);
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
return jedisCluster.zadd(key, scoreMembers, params);
}
@Override
public Set<String> zrange(String key, long start, long end) {
return jedisCluster.zrange(key, start, end);
}
@Override
public Long zrem(String key, String... member) {
return jedisCluster.zrem(key, member);
}
@Override
public Double zincrby(String key, double score, String member) {
return jedisCluster.zincrby(key, score, member);
}
@Override
public Double zincrby(String key, double score, String member, ZIncrByParams params) {
return jedisCluster.zincrby(key, score, member, params);
}
@Override
public Long zrank(String key, String member) {
return jedisCluster.zrank(key, member);
}
@Override
public Long zrevrank(String key, String member) {
return jedisCluster.zrevrank(key, member);
}
@Override
public Set<String> zrevrange(String key, long start, long end) {
return jedisCluster.zrevrange(key, start, end);
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long end) {
return jedisCluster.zrangeWithScores(key, start, end);
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long end) {
return jedisCluster.zrevrangeWithScores(key, start, end);
}
@Override
public Long zcard(String key) {
return jedisCluster.zcard(key);
}
@Override
public Double zscore(String key, String member) {
return jedisCluster.zscore(key, member);
}
@Override
public Tuple zpopmax(String key) {
return jedisCluster.zpopmax(key);
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
return jedisCluster.zpopmax(key, count);
}
@Override
public Tuple zpopmin(String key) {
return jedisCluster.zpopmin(key);
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
return jedisCluster.zpopmin(key, count);
}
@Override
public List<String> sort(String key) {
return jedisCluster.sort(key);
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
return jedisCluster.sort(key, sortingParameters);
}
@Override
public Long zcount(String key, double min, double max) {
return jedisCluster.zcount(key, min, max);
}
@Override
public Long zcount(String key, String min, String max) {
return jedisCluster.zcount(key, min, max);
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
return jedisCluster.zrangeByScore(key, min, max);
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
return jedisCluster.zrangeByScore(key, min, max);
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
return jedisCluster.zrevrangeByScore(key, max, min);
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
return jedisCluster.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
return jedisCluster.zrevrangeByScore(key, max, min);
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
return jedisCluster.zrevrangeByScore(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
return jedisCluster.zrangeByScoreWithScores(key, min, max);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByScore(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
return jedisCluster.zrangeByScoreWithScores(key, min, max);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
@Override
public Long zremrangeByRank(String key, long start, long end) {
return jedisCluster.zremrangeByRank(key, start, end);
}
@Override
public Long zremrangeByScore(String key, double start, double end) {
return jedisCluster.zremrangeByScore(key, start, end);
}
@Override
public Long zremrangeByScore(String key, String start, String end) {
return jedisCluster.zremrangeByScore(key, start, end);
}
@Override
public Long zlexcount(String key, String min, String max) {
return jedisCluster.zlexcount(key, min, max);
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
return jedisCluster.zrangeByLex(key, min, max);
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByLex(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
return jedisCluster.zrevrangeByLex(key, max, min);
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByLex(key, max, min, offset, count);
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
return jedisCluster.zremrangeByLex(key, min, max);
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
return jedisCluster.linsert(key, where, pivot, value);
}
@Override
public Long lpushx(String key, String... string) {
return jedisCluster.lpushx(key, string);
}
@Override
public Long rpushx(String key, String... string) {
return jedisCluster.rpushx(key, string);
}
@Override
public List<String> blpop(int timeout, String key) {
return jedisCluster.blpop(timeout, key);
}
@Override
public List<String> brpop(int timeout, String key) {
return jedisCluster.brpop(timeout, key);
}
@Override
public Long del(String key) {
return jedisCluster.del(key);
}
@Override
public Long unlink(String key) {
return jedisCluster.unlink(key);
}
@Override
public String echo(String string) {
return jedisCluster.echo(string);
}
@Override
public Long move(String key, int dbIndex) {
throw new UnsupportedOperationException();
}
@Override
public Long bitcount(String key) {
return jedisCluster.bitcount(key);
}
@Override
public Long bitcount(String key, long start, long end) {
return jedisCluster.bitcount(key, start, end);
}
@Override
public Long bitpos(String key, boolean value) {
throw new UnsupportedOperationException();
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
throw new UnsupportedOperationException();
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor) {
return jedisCluster.hscan(key, cursor);
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(
String key, String cursor, ScanParams params) {
ScanResult<Map.Entry<byte[], byte[]>> scanResult =
jedisCluster.hscan(key.getBytes(), cursor.getBytes(), params);
List<Map.Entry<String, String>> results =
scanResult.getResult().stream()
.map(
entry ->
new AbstractMap.SimpleEntry<>(
new String(entry.getKey()),
new String(entry.getValue())))
.collect(Collectors.toList());
return new ScanResult<>(scanResult.getCursorAsBytes(), results);
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
return jedisCluster.sscan(key, cursor);
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
ScanResult<byte[]> scanResult =
jedisCluster.sscan(key.getBytes(), cursor.getBytes(), params);
List<String> results =
scanResult.getResult().stream().map(String::new).collect(Collectors.toList());
return new ScanResult<>(scanResult.getCursorAsBytes(), results);
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
return jedisCluster.zscan(key, cursor);
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
return jedisCluster.zscan(key.getBytes(), cursor.getBytes(), params);
}
@Override
public Long pfadd(String key, String... elements) {
return jedisCluster.pfadd(key, elements);
}
@Override
public long pfcount(String key) {
return jedisCluster.pfcount(key);
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
return jedisCluster.geoadd(key, longitude, latitude, member);
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
return jedisCluster.geoadd(key, memberCoordinateMap);
}
@Override
public Double geodist(String key, String member1, String member2) {
return jedisCluster.geodist(key, member1, member2);
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
return jedisCluster.geodist(key, member1, member2, unit);
}
@Override
public List<String> geohash(String key, String... members) {
return jedisCluster.geohash(key, members);
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
return jedisCluster.geopos(key, members);
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return jedisCluster.georadius(key, longitude, latitude, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return jedisCluster.georadius(key, longitude, latitude, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
return jedisCluster.georadiusByMember(key, member, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return jedisCluster.georadiusByMember(key, member, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit, param);
}
@Override
public List<Long> bitfield(String key, String... arguments) {
return jedisCluster.bitfield(key, arguments);
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
return jedisCluster.bitfieldReadonly(key, arguments);
}
@Override
public Long hstrlen(String key, String field) {
return jedisCluster.hstrlen(key, field);
}
@Override
public StreamEntryID xadd(String key, StreamEntryID id, Map<String, String> hash) {
return jedisCluster.xadd(key, id, hash);
}
@Override
public StreamEntryID xadd(
String key,
StreamEntryID id,
Map<String, String> hash,
long maxLen,
boolean approximateLength) {
return jedisCluster.xadd(key, id, hash, maxLen, approximateLength);
}
@Override
public Long xlen(String key) {
return jedisCluster.xlen(key);
}
@Override
public List<StreamEntry> xrange(String key, StreamEntryID start, StreamEntryID end, int count) {
return jedisCluster.xrange(key, start, end, count);
}
@Override
public List<StreamEntry> xrevrange(
String key, StreamEntryID end, StreamEntryID start, int count) {
return jedisCluster.xrevrange(key, end, start, count);
}
@Override
public long xack(String key, String group, StreamEntryID... ids) {
return jedisCluster.xack(key, group, ids);
}
@Override
public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) {
return jedisCluster.xgroupCreate(key, groupname, id, makeStream);
}
@Override
public String xgroupSetID(String key, String groupname, StreamEntryID id) {
return jedisCluster.xgroupSetID(key, groupname, id);
}
@Override
public long xgroupDestroy(String key, String groupname) {
return jedisCluster.xgroupDestroy(key, groupname);
}
@Override
public Long xgroupDelConsumer(String key, String groupname, String consumername) {
return jedisCluster.xgroupDelConsumer(key, groupname, consumername);
}
@Override
public List<StreamPendingEntry> xpending(
String key,
String groupname,
StreamEntryID start,
StreamEntryID end,
int count,
String consumername) {
return jedisCluster.xpending(key, groupname, start, end, count, consumername);
}
@Override
public long xdel(String key, StreamEntryID... ids) {
return jedisCluster.xdel(key, ids);
}
@Override
public long xtrim(String key, long maxLen, boolean approximate) {
return jedisCluster.xtrim(key, maxLen, approximate);
}
@Override
public List<StreamEntry> xclaim(
String key,
String group,
String consumername,
long minIdleTime,
long newIdleTime,
int retries,
boolean force,
StreamEntryID... ids) {
return jedisCluster.xclaim(
key, group, consumername, minIdleTime, newIdleTime, retries, force, ids);
}
@Override
public StreamInfo xinfoStream(String key) {
return null;
}
@Override
public List<StreamGroupInfo> xinfoGroup(String key) {
return null;
}
@Override
public List<StreamConsumersInfo> xinfoConsumers(String key, String group) {
return null;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java | redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
/** A {@link JedisCommands} implementation that delegates to {@link JedisPool}. */
public class JedisStandalone implements JedisCommands {
private final JedisPool jedisPool;
public JedisStandalone(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
private <R> R executeInJedis(Function<Jedis, R> function) {
try (Jedis jedis = jedisPool.getResource()) {
return function.apply(jedis);
}
}
@Override
public String set(String key, String value) {
return executeInJedis(jedis -> jedis.set(key, value));
}
@Override
public String set(String key, String value, SetParams params) {
return executeInJedis(jedis -> jedis.set(key, value, params));
}
@Override
public String get(String key) {
return executeInJedis(jedis -> jedis.get(key));
}
@Override
public Boolean exists(String key) {
return executeInJedis(jedis -> jedis.exists(key));
}
@Override
public Long persist(String key) {
return executeInJedis(jedis -> jedis.persist(key));
}
@Override
public String type(String key) {
return executeInJedis(jedis -> jedis.type(key));
}
@Override
public byte[] dump(String key) {
return executeInJedis(jedis -> jedis.dump(key));
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
return executeInJedis(jedis -> jedis.restore(key, ttl, serializedValue));
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
return executeInJedis(jedis -> jedis.restoreReplace(key, ttl, serializedValue));
}
@Override
public Long expire(String key, int seconds) {
return executeInJedis(jedis -> jedis.expire(key, seconds));
}
@Override
public Long pexpire(String key, long milliseconds) {
return executeInJedis(jedis -> jedis.pexpire(key, milliseconds));
}
@Override
public Long expireAt(String key, long unixTime) {
return executeInJedis(jedis -> jedis.expireAt(key, unixTime));
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
return executeInJedis(jedis -> jedis.pexpireAt(key, millisecondsTimestamp));
}
@Override
public Long ttl(String key) {
return executeInJedis(jedis -> jedis.ttl(key));
}
@Override
public Long pttl(String key) {
return executeInJedis(jedis -> jedis.pttl(key));
}
@Override
public Long touch(String key) {
return executeInJedis(jedis -> jedis.touch(key));
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
return executeInJedis(jedis -> jedis.setbit(key, offset, value));
}
@Override
public Boolean setbit(String key, long offset, String value) {
return executeInJedis(jedis -> jedis.setbit(key, offset, value));
}
@Override
public Boolean getbit(String key, long offset) {
return executeInJedis(jedis -> jedis.getbit(key, offset));
}
@Override
public Long setrange(String key, long offset, String value) {
return executeInJedis(jedis -> jedis.setrange(key, offset, value));
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
return executeInJedis(jedis -> jedis.getrange(key, startOffset, endOffset));
}
@Override
public String getSet(String key, String value) {
return executeInJedis(jedis -> jedis.getSet(key, value));
}
@Override
public Long setnx(String key, String value) {
return executeInJedis(jedis -> jedis.setnx(key, value));
}
@Override
public String setex(String key, int seconds, String value) {
return executeInJedis(jedis -> jedis.setex(key, seconds, value));
}
@Override
public String psetex(String key, long milliseconds, String value) {
return executeInJedis(jedis -> jedis.psetex(key, milliseconds, value));
}
@Override
public Long decrBy(String key, long decrement) {
return executeInJedis(jedis -> jedis.decrBy(key, decrement));
}
@Override
public Long decr(String key) {
return executeInJedis(jedis -> jedis.decr(key));
}
@Override
public Long incrBy(String key, long increment) {
return executeInJedis(jedis -> jedis.incrBy(key, increment));
}
@Override
public Double incrByFloat(String key, double increment) {
return executeInJedis(jedis -> jedis.incrByFloat(key, increment));
}
@Override
public Long incr(String key) {
return executeInJedis(jedis -> jedis.incr(key));
}
@Override
public Long append(String key, String value) {
return executeInJedis(jedis -> jedis.append(key, value));
}
@Override
public String substr(String key, int start, int end) {
return executeInJedis(jedis -> jedis.substr(key, start, end));
}
@Override
public Long hset(String key, String field, String value) {
return executeInJedis(jedis -> jedis.hset(key, field, value));
}
@Override
public Long hset(String key, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.hset(key, hash));
}
@Override
public String hget(String key, String field) {
return executeInJedis(jedis -> jedis.hget(key, field));
}
@Override
public Long hsetnx(String key, String field, String value) {
return executeInJedis(jedis -> jedis.hsetnx(key, field, value));
}
@Override
public String hmset(String key, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.hmset(key, hash));
}
@Override
public List<String> hmget(String key, String... fields) {
return executeInJedis(jedis -> jedis.hmget(key, fields));
}
@Override
public Long hincrBy(String key, String field, long value) {
return executeInJedis(jedis -> jedis.hincrBy(key, field, value));
}
@Override
public Double hincrByFloat(String key, String field, double value) {
return executeInJedis(jedis -> jedis.hincrByFloat(key, field, value));
}
@Override
public Boolean hexists(String key, String field) {
return executeInJedis(jedis -> jedis.hexists(key, field));
}
@Override
public Long hdel(String key, String... field) {
return executeInJedis(jedis -> jedis.hdel(key, field));
}
@Override
public Long hlen(String key) {
return executeInJedis(jedis -> jedis.hlen(key));
}
@Override
public Set<String> hkeys(String key) {
return executeInJedis(jedis -> jedis.hkeys(key));
}
@Override
public List<String> hvals(String key) {
return executeInJedis(jedis -> jedis.hvals(key));
}
@Override
public Map<String, String> hgetAll(String key) {
return executeInJedis(jedis -> jedis.hgetAll(key));
}
@Override
public Long rpush(String key, String... string) {
return executeInJedis(jedis -> jedis.rpush(key));
}
@Override
public Long lpush(String key, String... string) {
return executeInJedis(jedis -> jedis.lpush(key, string));
}
@Override
public Long llen(String key) {
return executeInJedis(jedis -> jedis.llen(key));
}
@Override
public List<String> lrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.lrange(key, start, stop));
}
@Override
public String ltrim(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.ltrim(key, start, stop));
}
@Override
public String lindex(String key, long index) {
return executeInJedis(jedis -> jedis.lindex(key, index));
}
@Override
public String lset(String key, long index, String value) {
return executeInJedis(jedis -> jedis.lset(key, index, value));
}
@Override
public Long lrem(String key, long count, String value) {
return executeInJedis(jedis -> jedis.lrem(key, count, value));
}
@Override
public String lpop(String key) {
return executeInJedis(jedis -> jedis.lpop(key));
}
@Override
public String rpop(String key) {
return executeInJedis(jedis -> jedis.rpop(key));
}
@Override
public Long sadd(String key, String... member) {
return executeInJedis(jedis -> jedis.sadd(key, member));
}
@Override
public Set<String> smembers(String key) {
return executeInJedis(jedis -> jedis.smembers(key));
}
@Override
public Long srem(String key, String... member) {
return executeInJedis(jedis -> jedis.srem(key, member));
}
@Override
public String spop(String key) {
return executeInJedis(jedis -> jedis.spop(key));
}
@Override
public Set<String> spop(String key, long count) {
return executeInJedis(jedis -> jedis.spop(key, count));
}
@Override
public Long scard(String key) {
return executeInJedis(jedis -> jedis.scard(key));
}
@Override
public Boolean sismember(String key, String member) {
return executeInJedis(jedis -> jedis.sismember(key, member));
}
@Override
public String srandmember(String key) {
return executeInJedis(jedis -> jedis.srandmember(key));
}
@Override
public List<String> srandmember(String key, int count) {
return executeInJedis(jedis -> jedis.srandmember(key, count));
}
@Override
public Long strlen(String key) {
return executeInJedis(jedis -> jedis.strlen(key));
}
@Override
public Long zadd(String key, double score, String member) {
return executeInJedis(jedis -> jedis.zadd(key, score, member));
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
return executeInJedis(jedis -> jedis.zadd(key, score, member, params));
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
return executeInJedis(jedis -> jedis.zadd(key, scoreMembers));
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
return executeInJedis(jedis -> jedis.zadd(key, scoreMembers, params));
}
@Override
public Set<String> zrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrange(key, start, stop));
}
@Override
public Long zrem(String key, String... members) {
return executeInJedis(jedis -> jedis.zrem(key, members));
}
@Override
public Double zincrby(String key, double increment, String member) {
return executeInJedis(jedis -> jedis.zincrby(key, increment, member));
}
@Override
public Double zincrby(String key, double increment, String member, ZIncrByParams params) {
return executeInJedis(jedis -> jedis.zincrby(key, increment, member, params));
}
@Override
public Long zrank(String key, String member) {
return executeInJedis(jedis -> jedis.zrank(key, member));
}
@Override
public Long zrevrank(String key, String member) {
return executeInJedis(jedis -> jedis.zrevrank(key, member));
}
@Override
public Set<String> zrevrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrevrange(key, start, stop));
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrangeWithScores(key, start, stop));
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrevrangeWithScores(key, start, stop));
}
@Override
public Long zcard(String key) {
return executeInJedis(jedis -> jedis.zcard(key));
}
@Override
public Double zscore(String key, String member) {
return executeInJedis(jedis -> jedis.zscore(key, member));
}
@Override
public Tuple zpopmax(String key) {
return executeInJedis(jedis -> jedis.zpopmax(key));
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
return executeInJedis(jedis -> jedis.zpopmax(key, count));
}
@Override
public Tuple zpopmin(String key) {
return executeInJedis(jedis -> jedis.zpopmin(key));
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
return executeInJedis(jedis -> jedis.zpopmin(key, count));
}
@Override
public List<String> sort(String key) {
return executeInJedis(jedis -> jedis.sort(key));
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
return executeInJedis(jedis -> jedis.sort(key, sortingParameters));
}
@Override
public Long zcount(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zcount(key, min, max));
}
@Override
public Long zcount(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zcount(key, min, max));
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max));
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max));
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min));
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min));
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
return executeInJedis(
jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
return executeInJedis(
jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count));
}
@Override
public Long zremrangeByRank(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zremrangeByRank(key, start, stop));
}
@Override
public Long zremrangeByScore(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max));
}
@Override
public Long zremrangeByScore(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max));
}
@Override
public Long zlexcount(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zlexcount(key, min, max));
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max));
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min));
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min, offset, count));
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zremrangeByLex(key, min, max));
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
return executeInJedis(jedis -> jedis.linsert(key, where, pivot, value));
}
@Override
public Long lpushx(String key, String... string) {
return executeInJedis(jedis -> jedis.lpushx(key, string));
}
@Override
public Long rpushx(String key, String... string) {
return executeInJedis(jedis -> jedis.rpushx(key, string));
}
@Override
public List<String> blpop(int timeout, String key) {
return executeInJedis(jedis -> jedis.blpop(timeout, key));
}
@Override
public List<String> brpop(int timeout, String key) {
return executeInJedis(jedis -> jedis.brpop(timeout, key));
}
@Override
public Long del(String key) {
return executeInJedis(jedis -> jedis.del(key));
}
@Override
public Long unlink(String key) {
return executeInJedis(jedis -> jedis.unlink(key));
}
@Override
public String echo(String string) {
return executeInJedis(jedis -> jedis.echo(string));
}
@Override
public Long move(String key, int dbIndex) {
return executeInJedis(jedis -> jedis.move(key, dbIndex));
}
@Override
public Long bitcount(String key) {
return executeInJedis(jedis -> jedis.bitcount(key));
}
@Override
public Long bitcount(String key, long start, long end) {
return executeInJedis(jedis -> jedis.bitcount(key, start, end));
}
@Override
public Long bitpos(String key, boolean value) {
return executeInJedis(jedis -> jedis.bitpos(key, value));
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
return executeInJedis(jedis -> jedis.bitpos(key, value, params));
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.hscan(key, cursor));
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(
String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.hscan(key, cursor, params));
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.sscan(key, cursor));
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.zscan(key, cursor));
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.zscan(key, cursor, params));
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.sscan(key, cursor, params));
}
@Override
public Long pfadd(String key, String... elements) {
return executeInJedis(jedis -> jedis.pfadd(key, elements));
}
@Override
public long pfcount(String key) {
return executeInJedis(jedis -> jedis.pfcount(key));
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
return executeInJedis(jedis -> jedis.geoadd(key, longitude, latitude, member));
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
return executeInJedis(jedis -> jedis.geoadd(key, memberCoordinateMap));
}
@Override
public Double geodist(String key, String member1, String member2) {
return executeInJedis(jedis -> jedis.geodist(key, member1, member2));
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
return executeInJedis(jedis -> jedis.geodist(key, member1, member2, unit));
}
@Override
public List<String> geohash(String key, String... members) {
return executeInJedis(jedis -> jedis.geohash(key, members));
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
return executeInJedis(jedis -> jedis.geopos(key, members));
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadius(key, longitude, latitude, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return executeInJedis(
jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadius(key, longitude, latitude, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit, param));
}
@Override
public List<Long> bitfield(String key, String... arguments) {
return executeInJedis(jedis -> jedis.bitfield(key, arguments));
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
return executeInJedis(jedis -> jedis.bitfieldReadonly(key, arguments));
}
@Override
public Long hstrlen(String key, String field) {
return executeInJedis(jedis -> jedis.hstrlen(key, field));
}
@Override
public StreamEntryID xadd(String key, StreamEntryID id, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.xadd(key, id, hash));
}
@Override
public StreamEntryID xadd(
String key,
StreamEntryID id,
Map<String, String> hash,
long maxLen,
boolean approximateLength) {
return executeInJedis(jedis -> jedis.xadd(key, id, hash, maxLen, approximateLength));
}
@Override
public Long xlen(String key) {
return executeInJedis(jedis -> jedis.xlen(key));
}
@Override
public List<StreamEntry> xrange(String key, StreamEntryID start, StreamEntryID end, int count) {
return executeInJedis(jedis -> jedis.xrange(key, start, end, count));
}
@Override
public List<StreamEntry> xrevrange(
String key, StreamEntryID end, StreamEntryID start, int count) {
return executeInJedis(jedis -> jedis.xrevrange(key, end, start, count));
}
@Override
public long xack(String key, String group, StreamEntryID... ids) {
return executeInJedis(jedis -> jedis.xack(key, group, ids));
}
@Override
public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) {
return executeInJedis(jedis -> jedis.xgroupCreate(key, groupname, id, makeStream));
}
@Override
public String xgroupSetID(String key, String groupname, StreamEntryID id) {
return executeInJedis(jedis -> jedis.xgroupSetID(key, groupname, id));
}
@Override
public long xgroupDestroy(String key, String groupname) {
return executeInJedis(jedis -> jedis.xgroupDestroy(key, groupname));
}
@Override
public Long xgroupDelConsumer(String key, String groupname, String consumername) {
return executeInJedis(jedis -> jedis.hsetnx(key, groupname, consumername));
}
@Override
public List<StreamPendingEntry> xpending(
String key,
String groupname,
StreamEntryID start,
StreamEntryID end,
int count,
String consumername) {
return executeInJedis(
jedis -> jedis.xpending(key, groupname, start, end, count, consumername));
}
@Override
public long xdel(String key, StreamEntryID... ids) {
return executeInJedis(jedis -> jedis.xdel(key, ids));
}
@Override
public long xtrim(String key, long maxLen, boolean approximate) {
return executeInJedis(jedis -> jedis.xtrim(key, maxLen, approximate));
}
@Override
public List<StreamEntry> xclaim(
String key,
String group,
String consumername,
long minIdleTime,
long newIdleTime,
int retries,
boolean force,
StreamEntryID... ids) {
return executeInJedis(
jedis ->
jedis.xclaim(
key,
group,
consumername,
minIdleTime,
newIdleTime,
retries,
force,
ids));
}
@Override
public StreamInfo xinfoStream(String key) {
return executeInJedis(jedis -> jedis.xinfoStream(key));
}
@Override
public List<StreamGroupInfo> xinfoGroup(String key) {
return executeInJedis(jedis -> jedis.xinfoGroup(key));
}
@Override
public List<StreamConsumersInfo> xinfoConsumers(String key, String group) {
return executeInJedis(jedis -> jedis.xinfoConsumers(key, group));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.AnyNestedCondition;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
public class AnyRedisCondition extends AnyNestedCondition {
public AnyRedisCondition() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "dynomite")
static class DynomiteClusterCondition {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory")
static class InMemoryRedisCondition {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster")
static class RedisClusterConfiguration {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel")
static class RedisSentinelConfiguration {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone")
static class RedisStandaloneConfiguration {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/JedisCommandsConfigurer.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/JedisCommandsConfigurer.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.context.annotation.Bean;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.commands.JedisCommands;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.READ_CLIENT_INJECTION_NAME;
abstract class JedisCommandsConfigurer {
@Bean
public HostSupplier hostSupplier(RedisProperties properties) {
return new ConfigurationHostSupplier(properties);
}
@Bean(name = DEFAULT_CLIENT_INJECTION_NAME)
public JedisCommands jedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
return createJedisCommands(properties, conductorProperties, hostSupplier, tokenMapSupplier);
}
@Bean(name = READ_CLIENT_INJECTION_NAME)
public JedisCommands readJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
return createJedisCommands(properties, conductorProperties, hostSupplier, tokenMapSupplier);
}
protected abstract JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.connectionpool.RetryPolicy.RetryPolicyFactory;
import com.netflix.dyno.connectionpool.impl.RetryNTimes;
import com.netflix.dyno.connectionpool.impl.RunOnce;
@ConfigurationProperties("conductor.redis")
public class RedisProperties {
private final ConductorProperties conductorProperties;
@Autowired
public RedisProperties(ConductorProperties conductorProperties) {
this.conductorProperties = conductorProperties;
}
/**
* Data center region. If hosting on Amazon the value is something like us-east-1, us-west-2
* etc.
*/
private String dataCenterRegion = "us-east-1";
/**
* Local rack / availability zone. For AWS deployments, the value is something like us-east-1a,
* etc.
*/
private String availabilityZone = "us-east-1c";
/** The name of the redis / dynomite cluster */
private String clusterName = "";
/** Dynomite Cluster details. Format is host:port:rack separated by semicolon */
private String hosts = null;
/** The prefix used to prepend workflow data in redis */
private String workflowNamespacePrefix = null;
/** The prefix used to prepend keys for queues in redis */
private String queueNamespacePrefix = null;
/**
* The domain name to be used in the key prefix for logical separation of workflow data and
* queues in a shared redis setup
*/
private String keyspaceDomain = null;
/**
* The maximum number of connections that can be managed by the connection pool on a given
* instance
*/
private int maxConnectionsPerHost = 10;
/**
* The maximum amount of time to wait for a connection to become available from the connection
* pool
*/
private Duration maxTimeoutWhenExhausted = Duration.ofMillis(800);
/** The maximum retry attempts to use with this connection pool */
private int maxRetryAttempts = 0;
/** The read connection port to be used for connecting to dyno-queues */
private int queuesNonQuorumPort = 22122;
/** The sharding strategy to be used for the dyno queue configuration */
private String queueShardingStrategy = RedisQueuesShardingStrategyProvider.ROUND_ROBIN_STRATEGY;
/** The time in seconds after which the in-memory task definitions cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
/** The time to live in seconds for which the event execution will be persisted */
@DurationUnit(ChronoUnit.SECONDS)
private Duration eventExecutionPersistenceTTL = Duration.ofSeconds(60);
// Maximum number of idle connections to be maintained
private int maxIdleConnections = 8;
// Minimum number of idle connections to be maintained
private int minIdleConnections = 5;
private long minEvictableIdleTimeMillis = 1800000;
private long timeBetweenEvictionRunsMillis = -1L;
private boolean testWhileIdle = false;
private int numTestsPerEvictionRun = 3;
private int database = 0;
private String username = null;
private boolean ssl = false;
private String clientName = null;
public int getNumTestsPerEvictionRun() {
return numTestsPerEvictionRun;
}
public void setNumTestsPerEvictionRun(int numTestsPerEvictionRun) {
this.numTestsPerEvictionRun = numTestsPerEvictionRun;
}
public boolean isTestWhileIdle() {
return testWhileIdle;
}
public void setTestWhileIdle(boolean testWhileIdle) {
this.testWhileIdle = testWhileIdle;
}
public long getMinEvictableIdleTimeMillis() {
return minEvictableIdleTimeMillis;
}
public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) {
this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis;
}
public long getTimeBetweenEvictionRunsMillis() {
return timeBetweenEvictionRunsMillis;
}
public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) {
this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis;
}
public int getMinIdleConnections() {
return minIdleConnections;
}
public void setMinIdleConnections(int minIdleConnections) {
this.minIdleConnections = minIdleConnections;
}
public int getMaxIdleConnections() {
return maxIdleConnections;
}
public void setMaxIdleConnections(int maxIdleConnections) {
this.maxIdleConnections = maxIdleConnections;
}
public String getDataCenterRegion() {
return dataCenterRegion;
}
public void setDataCenterRegion(String dataCenterRegion) {
this.dataCenterRegion = dataCenterRegion;
}
public String getAvailabilityZone() {
return availabilityZone;
}
public void setAvailabilityZone(String availabilityZone) {
this.availabilityZone = availabilityZone;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getHosts() {
return hosts;
}
public void setHosts(String hosts) {
this.hosts = hosts;
}
public String getWorkflowNamespacePrefix() {
return workflowNamespacePrefix;
}
public void setWorkflowNamespacePrefix(String workflowNamespacePrefix) {
this.workflowNamespacePrefix = workflowNamespacePrefix;
}
public String getQueueNamespacePrefix() {
return queueNamespacePrefix;
}
public void setQueueNamespacePrefix(String queueNamespacePrefix) {
this.queueNamespacePrefix = queueNamespacePrefix;
}
public String getKeyspaceDomain() {
return keyspaceDomain;
}
public void setKeyspaceDomain(String keyspaceDomain) {
this.keyspaceDomain = keyspaceDomain;
}
public int getMaxConnectionsPerHost() {
return maxConnectionsPerHost;
}
public void setMaxConnectionsPerHost(int maxConnectionsPerHost) {
this.maxConnectionsPerHost = maxConnectionsPerHost;
}
public Duration getMaxTimeoutWhenExhausted() {
return maxTimeoutWhenExhausted;
}
public void setMaxTimeoutWhenExhausted(Duration maxTimeoutWhenExhausted) {
this.maxTimeoutWhenExhausted = maxTimeoutWhenExhausted;
}
public int getMaxRetryAttempts() {
return maxRetryAttempts;
}
public void setMaxRetryAttempts(int maxRetryAttempts) {
this.maxRetryAttempts = maxRetryAttempts;
}
public int getQueuesNonQuorumPort() {
return queuesNonQuorumPort;
}
public void setQueuesNonQuorumPort(int queuesNonQuorumPort) {
this.queuesNonQuorumPort = queuesNonQuorumPort;
}
public String getQueueShardingStrategy() {
return queueShardingStrategy;
}
public void setQueueShardingStrategy(String queueShardingStrategy) {
this.queueShardingStrategy = queueShardingStrategy;
}
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Duration getEventExecutionPersistenceTTL() {
return eventExecutionPersistenceTTL;
}
public void setEventExecutionPersistenceTTL(Duration eventExecutionPersistenceTTL) {
this.eventExecutionPersistenceTTL = eventExecutionPersistenceTTL;
}
public String getQueuePrefix() {
String prefix = getQueueNamespacePrefix() + "." + conductorProperties.getStack();
if (getKeyspaceDomain() != null) {
prefix = prefix + "." + getKeyspaceDomain();
}
return prefix;
}
public RetryPolicyFactory getConnectionRetryPolicy() {
if (getMaxRetryAttempts() == 0) {
return RunOnce::new;
} else {
return () -> new RetryNTimes(maxRetryAttempts, false);
}
}
public int getDatabase() {
return database;
}
public void setDatabase(int database) {
this.database = database;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public boolean isSsl() {
return ssl;
}
public void setSsl(boolean ssl) {
this.ssl = ssl;
}
public String getClientName() {
return clientName;
}
public void setClientName(String clientName) {
this.clientName = clientName;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/DynomiteClusterConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/DynomiteClusterConfiguration.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.jedis.DynoJedisClient;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "dynomite")
public class DynomiteClusterConfiguration extends JedisCommandsConfigurer {
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(properties.getClusterName())
.withTokenSupplier(tokenMapSupplier)
.setLocalRack(properties.getAvailabilityZone())
.setLocalDataCenter(properties.getDataCenterRegion())
.setSocketTimeout(0)
.setConnectTimeout(0)
.setMaxConnsPerHost(properties.getMaxConnectionsPerHost())
.setMaxTimeoutWhenExhausted(
(int) properties.getMaxTimeoutWhenExhausted().toMillis())
.setRetryPolicyFactory(properties.getConnectionRetryPolicy());
return new DynoJedisClient.Builder()
.withHostSupplier(hostSupplier)
.withApplicationName(conductorProperties.getAppId())
.withDynomiteClusterName(properties.getClusterName())
.withCPConfig(connectionPoolConfiguration)
.build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisCommonConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisCommonConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.RedisQueues;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import com.netflix.dyno.queues.shard.DynoShardSupplier;
import com.google.inject.ProvisionException;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(RedisProperties.class)
@Conditional(AnyRedisCondition.class)
public class RedisCommonConfiguration {
public static final String DEFAULT_CLIENT_INJECTION_NAME = "DefaultJedisCommands";
public static final String READ_CLIENT_INJECTION_NAME = "ReadJedisCommands";
private static final Logger LOGGER = LoggerFactory.getLogger(RedisCommonConfiguration.class);
@Bean
public ShardSupplier shardSupplier(HostSupplier hostSupplier, RedisProperties properties) {
if (properties.getAvailabilityZone() == null) {
throw new ProvisionException(
"Availability zone is not defined. Ensure Configuration.getAvailabilityZone() returns a non-null "
+ "and non-empty value.");
}
String localDC =
properties.getAvailabilityZone().replaceAll(properties.getDataCenterRegion(), "");
return new DynoShardSupplier(hostSupplier, properties.getDataCenterRegion(), localDC);
}
@Bean
public TokenMapSupplier tokenMapSupplier() {
final List<HostToken> hostTokens = new ArrayList<>();
return new TokenMapSupplier() {
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
long i = activeHosts.size();
for (Host host : activeHosts) {
HostToken hostToken = new HostToken(i, host);
hostTokens.add(hostToken);
i--;
}
return hostTokens;
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return CollectionUtils.find(
hostTokens, token -> token.getHost().compareTo(host) == 0);
}
};
}
@Bean
public ShardingStrategy shardingStrategy(
ShardSupplier shardSupplier, RedisProperties properties) {
return new RedisQueuesShardingStrategyProvider(shardSupplier, properties).get();
}
@Bean
public RedisQueues redisQueues(
@Qualifier(DEFAULT_CLIENT_INJECTION_NAME) JedisCommands jedisCommands,
@Qualifier(READ_CLIENT_INJECTION_NAME) JedisCommands jedisCommandsRead,
ShardSupplier shardSupplier,
RedisProperties properties,
ShardingStrategy shardingStrategy) {
RedisQueues queues =
new RedisQueues(
jedisCommands,
jedisCommandsRead,
properties.getQueuePrefix(),
shardSupplier,
60_000,
60_000,
shardingStrategy);
LOGGER.info("DynoQueueDAO initialized with prefix " + properties.getQueuePrefix() + "!");
return queues;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisCluster;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster")
public class RedisClusterConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(JedisCommandsConfigurer.class);
// Same as redis.clients.jedis.BinaryJedisCluster
protected static final int DEFAULT_MAX_ATTEMPTS = 5;
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
GenericObjectPoolConfig<?> genericObjectPoolConfig = new GenericObjectPoolConfig<>();
genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost());
Set<HostAndPort> hosts =
hostSupplier.getHosts().stream()
.map(h -> new HostAndPort(h.getHostName(), h.getPort()))
.collect(Collectors.toSet());
String password = getPassword(hostSupplier.getHosts());
if (properties.getUsername() != null && password != null) {
log.info("Connecting to Redis Cluster with user AUTH");
return new JedisCluster(
new redis.clients.jedis.JedisCluster(
hosts,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
DEFAULT_MAX_ATTEMPTS,
properties.getUsername(),
password,
properties.getClientName(),
genericObjectPoolConfig,
properties.isSsl()));
} else if (password != null) {
log.info("Connecting to Redis Cluster with AUTH");
return new JedisCluster(
new redis.clients.jedis.JedisCluster(
hosts,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
DEFAULT_MAX_ATTEMPTS,
password,
properties.getClientName(),
genericObjectPoolConfig,
properties.isSsl()));
} else {
return new JedisCluster(
new redis.clients.jedis.JedisCluster(hosts, genericObjectPoolConfig));
}
}
private String getPassword(List<Host> hosts) {
return hosts.isEmpty() ? null : hosts.get(0).getPassword();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisStandalone;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone")
public class RedisStandaloneConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class);
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
JedisPoolConfig config = new JedisPoolConfig();
config.setMinIdle(2);
config.setMaxTotal(properties.getMaxConnectionsPerHost());
log.info("Starting conductor server using redis_standalone.");
Host host = hostSupplier.getHosts().get(0);
return new JedisStandalone(getJedisPool(config, host, properties));
}
private JedisPool getJedisPool(JedisPoolConfig config, Host host, RedisProperties properties) {
if (properties.getUsername() != null && host.getPassword() != null) {
log.info("Connecting to Redis Standalone with AUTH");
return new JedisPool(
config,
host.getHostName(),
host.getPort(),
Protocol.DEFAULT_TIMEOUT,
properties.getUsername(),
host.getPassword(),
properties.getDatabase(),
properties.isSsl());
} else if (host.getPassword() != null) {
log.info("Connecting to Redis Standalone with AUTH");
return new JedisPool(
config,
host.getHostName(),
host.getPort(),
Protocol.DEFAULT_TIMEOUT,
host.getPassword(),
properties.getDatabase(),
properties.isSsl());
} else {
return new JedisPool(config, host.getHostName(), host.getPort(), properties.isSsl());
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisSentinel;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.JedisSentinelPool;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel")
public class RedisSentinelConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class);
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
GenericObjectPoolConfig<?> genericObjectPoolConfig = new GenericObjectPoolConfig<>();
genericObjectPoolConfig.setMinIdle(properties.getMinIdleConnections());
genericObjectPoolConfig.setMaxIdle(properties.getMaxIdleConnections());
genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost());
genericObjectPoolConfig.setTestWhileIdle(properties.isTestWhileIdle());
genericObjectPoolConfig.setMinEvictableIdleTimeMillis(
properties.getMinEvictableIdleTimeMillis());
genericObjectPoolConfig.setTimeBetweenEvictionRunsMillis(
properties.getTimeBetweenEvictionRunsMillis());
genericObjectPoolConfig.setNumTestsPerEvictionRun(properties.getNumTestsPerEvictionRun());
log.info(
"Starting conductor server using redis_sentinel and cluster "
+ properties.getClusterName());
Set<String> sentinels = new HashSet<>();
for (Host host : hostSupplier.getHosts()) {
sentinels.add(host.getHostName() + ":" + host.getPort());
}
// We use the password of the first sentinel host as password and sentinelPassword
String password = getPassword(hostSupplier.getHosts());
if (properties.getUsername() != null && password != null) {
return new JedisSentinel(
new JedisSentinelPool(
properties.getClusterName(),
sentinels,
genericObjectPoolConfig,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
properties.getUsername(),
password,
properties.getDatabase(),
null,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
properties.getUsername(),
password,
null));
} else if (password != null) {
return new JedisSentinel(
new JedisSentinelPool(
properties.getClusterName(),
sentinels,
genericObjectPoolConfig,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
password,
properties.getDatabase(),
null,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
password,
null));
} else {
return new JedisSentinel(
new JedisSentinelPool(
properties.getClusterName(), sentinels, genericObjectPoolConfig));
}
}
private String getPassword(List<Host> hosts) {
return hosts.isEmpty() ? null : hosts.get(0).getPassword();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java | redis-persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.redis.dynoqueue.LocalhostHostSupplier;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.dyno.connectionpool.HostSupplier;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.READ_CLIENT_INJECTION_NAME;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory")
public class InMemoryRedisConfiguration {
@Bean
public HostSupplier hostSupplier(RedisProperties properties) {
return new LocalhostHostSupplier(properties);
}
@Bean(name = {DEFAULT_CLIENT_INJECTION_NAME, READ_CLIENT_INJECTION_NAME})
public JedisMock jedisMock() {
return new JedisMock();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteIndexDAOTest.java | sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteIndexDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.*;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.sqlite.config.SqliteConfiguration;
import com.netflix.conductor.sqlite.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
SqliteConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=sqlite",
"conductor.db.type=sqlite",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class SqliteIndexDAOTest {
@Autowired private SqliteIndexDAO indexDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
private WorkflowSummary getMockWorkflowSummary(String id) {
WorkflowSummary wfs = new WorkflowSummary();
wfs.setWorkflowId(id);
wfs.setCorrelationId("correlation-id");
wfs.setWorkflowType("workflow-type");
wfs.setStartTime("2023-02-07T08:42:45Z");
wfs.setUpdateTime("2023-02-07T08:43:45Z");
wfs.setStatus(Workflow.WorkflowStatus.COMPLETED);
return wfs;
}
private TaskSummary getMockTaskSummary(String taskId) {
TaskSummary ts = new TaskSummary();
ts.setTaskId(taskId);
ts.setTaskType("task-type1");
ts.setTaskDefName("task-def-name1");
ts.setStatus(Task.Status.COMPLETED);
ts.setStartTime("2023-02-07T09:41:45Z");
ts.setUpdateTime("2023-02-07T09:42:45Z");
ts.setWorkflowType("workflow-type");
return ts;
}
private TaskExecLog getMockTaskExecutionLog(String taskId, long createdTime, String log) {
TaskExecLog tse = new TaskExecLog();
tse.setTaskId(taskId);
tse.setLog(log);
tse.setCreatedTime(createdTime);
return tse;
}
private void compareWorkflowSummary(WorkflowSummary wfs) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM workflow_index WHERE workflow_id = '%s'",
wfs.getWorkflowId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals(
"Workflow id does not match",
wfs.getWorkflowId(),
result.get(0).get("workflow_id"));
assertEquals(
"Correlation id does not match",
wfs.getCorrelationId(),
result.get(0).get("correlation_id"));
assertEquals(
"Workflow type does not match",
wfs.getWorkflowType(),
result.get(0).get("workflow_type"));
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(wfs.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
assertEquals(
"Start time does not match", startTime.toString(), result.get(0).get("start_time"));
assertEquals(
"Status does not match", wfs.getStatus().toString(), result.get(0).get("status"));
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
private void compareTaskSummary(TaskSummary ts) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM task_index WHERE task_id = '%s'", ts.getTaskId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals("Task id does not match", ts.getTaskId(), result.get(0).get("task_id"));
assertEquals("Task type does not match", ts.getTaskType(), result.get(0).get("task_type"));
assertEquals(
"Task def name does not match",
ts.getTaskDefName(),
result.get(0).get("task_def_name"));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
assertEquals(
"Start time does not match", startTime.toString(), result.get(0).get("start_time"));
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
assertEquals(
"Update time does not match",
updateTime.toString(),
result.get(0).get("update_time"));
assertEquals(
"Status does not match", ts.getStatus().toString(), result.get(0).get("status"));
assertEquals(
"Workflow type does not match",
ts.getWorkflowType().toString(),
result.get(0).get("workflow_type"));
}
@Test
public void testIndexNewWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-new");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testIndexExistingWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-existing");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
wfs.setStatus(Workflow.WorkflowStatus.FAILED);
wfs.setUpdateTime("2023-02-07T08:44:45Z");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testWhenWorkflowIsIndexedOutOfOrderOnlyLatestIsIndexed() throws SQLException {
WorkflowSummary firstWorkflowUpdate =
getMockWorkflowSummary("workflow-id-existing-no-index");
firstWorkflowUpdate.setUpdateTime("2023-02-07T08:42:45Z");
WorkflowSummary secondWorkflowUpdateSummary =
getMockWorkflowSummary("workflow-id-existing-no-index");
secondWorkflowUpdateSummary.setUpdateTime("2023-02-07T08:43:45Z");
secondWorkflowUpdateSummary.setStatus(Workflow.WorkflowStatus.FAILED);
indexDAO.indexWorkflow(secondWorkflowUpdateSummary);
compareWorkflowSummary(secondWorkflowUpdateSummary);
indexDAO.indexWorkflow(firstWorkflowUpdate);
compareWorkflowSummary(secondWorkflowUpdateSummary);
}
@Test
public void testWhenWorkflowUpdatesHaveTheSameUpdateTimeTheLastIsIndexed() throws SQLException {
WorkflowSummary firstWorkflowUpdate =
getMockWorkflowSummary("workflow-id-existing-same-time-index");
firstWorkflowUpdate.setUpdateTime("2023-02-07T08:42:45Z");
WorkflowSummary secondWorkflowUpdateSummary =
getMockWorkflowSummary("workflow-id-existing-same-time-index");
secondWorkflowUpdateSummary.setUpdateTime("2023-02-07T08:42:45Z");
secondWorkflowUpdateSummary.setStatus(Workflow.WorkflowStatus.FAILED);
indexDAO.indexWorkflow(firstWorkflowUpdate);
compareWorkflowSummary(firstWorkflowUpdate);
indexDAO.indexWorkflow(secondWorkflowUpdateSummary);
compareWorkflowSummary(secondWorkflowUpdateSummary);
}
@Test
public void testIndexNewTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id-new");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testIndexExistingTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id-existing");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
ts.setUpdateTime("2023-02-07T09:43:45Z");
ts.setStatus(Task.Status.FAILED);
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testWhenTaskIsIndexedOutOfOrderOnlyLatestIsIndexed() throws SQLException {
TaskSummary firstTaskState = getMockTaskSummary("task-id-exiting-no-update");
firstTaskState.setUpdateTime("2023-02-07T09:41:45Z");
firstTaskState.setStatus(Task.Status.FAILED);
TaskSummary secondTaskState = getMockTaskSummary("task-id-exiting-no-update");
secondTaskState.setUpdateTime("2023-02-07T09:42:45Z");
indexDAO.indexTask(secondTaskState);
compareTaskSummary(secondTaskState);
indexDAO.indexTask(firstTaskState);
compareTaskSummary(secondTaskState);
}
@Test
public void testWhenTaskUpdatesHaveTheSameUpdateTimeTheLastIsIndexed() throws SQLException {
TaskSummary firstTaskState = getMockTaskSummary("task-id-exiting-same-time-update");
firstTaskState.setUpdateTime("2023-02-07T09:42:45Z");
firstTaskState.setStatus(Task.Status.FAILED);
TaskSummary secondTaskState = getMockTaskSummary("task-id-exiting-same-time-update");
secondTaskState.setUpdateTime("2023-02-07T09:42:45Z");
indexDAO.indexTask(firstTaskState);
compareTaskSummary(firstTaskState);
indexDAO.indexTask(secondTaskState);
compareTaskSummary(secondTaskState);
}
@Test
public void testAddTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = UUID.randomUUID().toString();
logs.add(getMockTaskExecutionLog(taskId, 1675845986000L, "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, 1675845987000L, "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<Map<String, Object>> records =
queryDb("SELECT * FROM task_execution_logs ORDER BY created_time ASC");
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).get("log"));
assertEquals(1675845986000L, records.get(0).get("created_time"));
assertEquals(logs.get(1).getLog(), records.get(1).get("log"));
assertEquals(1675845987000L, records.get(1).get("created_time"));
}
@Test
public void testSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String query = String.format("workflowId=\"%s\"", wfs.getWorkflowId());
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testFullTextSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String freeText = "notworkflow-id";
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("Wrong number of results returned", 0, results.getResults().size());
freeText = "workflow-id";
results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
// json working not working
// @Test
// public void testJsonSearchWorkflowSummary() {
// WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-summary");
// wfs.setVersion(3);
//
// indexDAO.indexWorkflow(wfs);
//
// String freeText = "{\"correlationId\":\"not-the-id\"}";
// SearchResult<WorkflowSummary> results =
// indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
// assertEquals("Wrong number of results returned", 0, results.getResults().size());
//
// freeText = "{\"correlationId\":\"correlation-id\", \"version\":3}";
// results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
// assertEquals("No results returned", 1, results.getResults().size());
// assertEquals(
// "Wrong workflow returned",
// wfs.getWorkflowId(),
// results.getResults().get(0).getWorkflowId());
// }
@Test
public void testSearchWorkflowSummaryPagination() {
for (int i = 0; i < 5; i++) {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-pagination-" + i);
indexDAO.indexWorkflow(wfs);
}
List<String> orderBy = Arrays.asList(new String[] {"workflowId:DESC"});
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", "workflow-id-pagination", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-4",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-3",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-2",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-1",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 1, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-0",
results.getResults().get(0).getWorkflowId());
}
@Test
public void testSearchTaskSummary() {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
String query = String.format("taskId=\"%s\"", ts.getTaskId());
SearchResult<TaskSummary> results =
indexDAO.searchTaskSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong task returned", ts.getTaskId(), results.getResults().get(0).getTaskId());
}
@Test
public void testSearchTaskSummaryPagination() {
for (int i = 0; i < 5; i++) {
TaskSummary ts = getMockTaskSummary("task-id-pagination-" + i);
indexDAO.indexTask(ts);
}
List<String> orderBy = Arrays.asList(new String[] {"taskId:DESC"});
SearchResult<TaskSummary> results = indexDAO.searchTaskSummary("", "*", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-4",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-3",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-2",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-1",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 1, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-0",
results.getResults().get(0).getTaskId());
}
@Test
public void testGetTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = UUID.randomUUID().toString();
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845986000L).getTime(), "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845987000L).getTime(), "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> records = indexDAO.getTaskExecutionLogs(logs.get(0).getTaskId());
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).getLog());
assertEquals(logs.get(0).getCreatedTime(), 1675845986000L);
assertEquals(logs.get(1).getLog(), records.get(1).getLog());
assertEquals(logs.get(1).getCreatedTime(), 1675845987000L);
}
@Test
public void testRemoveWorkflow() throws SQLException {
String workflowId = UUID.randomUUID().toString();
WorkflowSummary wfs = getMockWorkflowSummary(workflowId);
indexDAO.indexWorkflow(wfs);
List<Map<String, Object>> workflow_records =
queryDb("SELECT * FROM workflow_index WHERE workflow_id = '" + workflowId + "'");
assertEquals("Workflow index record was not created", 1, workflow_records.size());
indexDAO.removeWorkflow(workflowId);
workflow_records =
queryDb("SELECT * FROM workflow_index WHERE workflow_id = '" + workflowId + "'");
assertEquals("Workflow index record was not deleted", 0, workflow_records.size());
}
@Test
public void testRemoveTask() throws SQLException {
String workflowId = UUID.randomUUID().toString();
String taskId = UUID.randomUUID().toString();
TaskSummary ts = getMockTaskSummary(taskId);
indexDAO.indexTask(ts);
List<TaskExecLog> logs = new ArrayList<>();
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845986000L).getTime(), "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845987000L).getTime(), "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<Map<String, Object>> task_records =
queryDb("SELECT * FROM task_index WHERE task_id = '" + taskId + "'");
assertEquals("Task index record was not created", 1, task_records.size());
List<Map<String, Object>> log_records =
queryDb("SELECT * FROM task_execution_logs WHERE task_id = '" + taskId + "'");
assertEquals("Task execution logs were not created", 2, log_records.size());
indexDAO.removeTask(workflowId, taskId);
task_records = queryDb("SELECT * FROM task_index WHERE task_id = '" + taskId + "'");
assertEquals("Task index record was not deleted", 0, task_records.size());
log_records = queryDb("SELECT * FROM task_execution_logs WHERE task_id = '" + taskId + "'");
assertEquals("Task execution logs were not deleted", 0, log_records.size());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteQueueDAOTest.java | sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteQueueDAOTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.sqlite.config.SqliteConfiguration;
import com.netflix.conductor.sqlite.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.*;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
SqliteConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class SqliteQueueDAOTest {
private static final Logger LOGGER = LoggerFactory.getLogger(SqliteQueueDAOTest.class);
@Autowired private SqliteQueueDAO queueDAO;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired private ObjectMapper objectMapper;
@Rule public TestName name = new TestName();
@Autowired Flyway flyway;
@Before
public void before() {
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
String[] stmts = new String[] {"delete from queue;", "delete from queue_message;"};
for (String stmt : stmts) {
conn.prepareStatement(stmt).executeUpdate();
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
@Test
public void complexQueueTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
*
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
List<Message> zeroPoll = queueDAO.pollMessages(queueName, 0, 10_000);
assertTrue("Zero poll should be empty", zeroPoll.isEmpty());
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/** Test fix for https://github.com/Netflix/conductor/issues/1892 */
@Test
public void containsMessageTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertFalse(queueDAO.containsMessage(queueName, messageId));
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
*
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't
// accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
queueDAO.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds =
messages.stream()
.map(Message::getId)
.collect(Collectors.toList())
.subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.info("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
// @Test
public void processUnacksTest() {
processUnacks(
() -> {
// Process unacks
queueDAO.processUnacks("process_unacks_test");
},
"process_unacks_test");
}
// @Test
public void processAllUnacksTest() {
processUnacks(
() -> {
// Process all unacks
queueDAO.processAllUnacks();
},
"process_unacks_test");
}
private void processUnacks(Runnable unack, String queueName) {
// Count of messages in the queue(s)
final int count = 10;
// Number of messages to process acks for
final int unackedCount = 4;
// A secondary queue to make sure we don't accidentally process other queues
final String otherQueueName = "process_unacks_test_other_queue";
// Create testing queue with some messages (but not all) that will be popped/acked.
for (int i = 0; i < count; i++) {
int offset = 0;
if (i >= unackedCount) {
offset = 1_000_000;
}
queueDAO.push(queueName, "unack-" + i, offset);
}
// Create a second queue to make sure that unacks don't occur for it
for (int i = 0; i < count; i++) {
queueDAO.push(otherQueueName, "other-" + i, 0);
}
// Poll for first batch of messages (should be equal to unackedCount)
List<Message> polled = queueDAO.pollMessages(queueName, 100, 10_000);
assertNotNull(polled);
assertFalse(polled.isEmpty());
assertEquals(unackedCount, polled.size());
// Poll messages from the other queue so we know they don't get unacked later
queueDAO.pollMessages(otherQueueName, 100, 10_000);
// Ack one of the polled messages
assertTrue(queueDAO.ack(queueName, "unack-1"));
// Should have one less un-acked popped message in the queue
Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(uacked.longValue(), unackedCount - 1);
unack.run();
// Check uacks for both queues after processing
Map<String, Map<String, Map<String, Long>>> details = queueDAO.queuesDetailVerbose();
uacked = details.get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(
"The messages that were polled should be unacked still",
uacked.longValue(),
unackedCount - 1);
Long otherUacked = details.get(otherQueueName).get("a").get("uacked");
assertNotNull(otherUacked);
assertEquals(
"Other queue should have all unacked messages", otherUacked.longValue(), count);
Long size = queueDAO.queuesDetail().get(queueName);
assertNotNull(size);
assertEquals(size.longValue(), count - unackedCount);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqlitePollDataTest.java | sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqlitePollDataTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.sqlite.config.SqliteConfiguration;
import com.netflix.conductor.sqlite.util.Query;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
SqliteConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=sqlite",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class SqlitePollDataTest {
@Autowired private PollDataDAO pollDataDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
conn.prepareStatement("delete from poll_data").executeUpdate();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
@Test
public void updateLastPollDataTest() throws SQLException, JsonProcessingException {
pollDataDAO.updateLastPollData("dummy-task", "dummy-domain", "dummy-worker-id");
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("More than one poll data records returned", 1, records.size());
assertEquals("Wrong domain set", "dummy-domain", records.get(0).get("domain"));
JsonNode jsonData = objectMapper.readTree(records.get(0).get("json_data").toString());
assertEquals(
"Poll data is incorrect", "dummy-worker-id", jsonData.get("workerId").asText());
}
@Test
public void updateLastPollDataNullDomainTest() throws SQLException, JsonProcessingException {
pollDataDAO.updateLastPollData("dummy-task", null, "dummy-worker-id");
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("More than one poll data records returned", 1, records.size());
assertEquals("Wrong domain set", "DEFAULT", records.get(0).get("domain"));
JsonNode jsonData = objectMapper.readTree(records.get(0).get("json_data").toString());
assertEquals(
"Poll data is incorrect", "dummy-worker-id", jsonData.get("workerId").asText());
}
@Test
public void getPollDataByDomainTest() {
pollDataDAO.updateLastPollData("dummy-task", "dummy-domain", "dummy-worker-id");
PollData pollData = pollDataDAO.getPollData("dummy-task", "dummy-domain");
assertEquals("dummy-task", pollData.getQueueName());
assertEquals("dummy-domain", pollData.getDomain());
assertEquals("dummy-worker-id", pollData.getWorkerId());
}
@Test
public void getPollDataByNullDomainTest() {
pollDataDAO.updateLastPollData("dummy-task", null, "dummy-worker-id");
PollData pollData = pollDataDAO.getPollData("dummy-task", null);
assertEquals("dummy-task", pollData.getQueueName());
assertNull(pollData.getDomain());
assertEquals("dummy-worker-id", pollData.getWorkerId());
}
@Test
public void getPollDataByTaskTest() {
pollDataDAO.updateLastPollData("dummy-task1", "domain1", "dummy-worker-id1");
pollDataDAO.updateLastPollData("dummy-task1", "domain2", "dummy-worker-id2");
pollDataDAO.updateLastPollData("dummy-task1", null, "dummy-worker-id3");
pollDataDAO.updateLastPollData("dummy-task2", "domain2", "dummy-worker-id4");
List<PollData> pollData = pollDataDAO.getPollData("dummy-task1");
assertEquals("Wrong number of records returned", 3, pollData.size());
List<String> queueNames =
pollData.stream().map(x -> x.getQueueName()).collect(Collectors.toList());
assertEquals(3, Collections.frequency(queueNames, "dummy-task1"));
List<String> domains =
pollData.stream().map(x -> x.getDomain()).collect(Collectors.toList());
assertTrue(domains.contains("domain1"));
assertTrue(domains.contains("domain2"));
assertTrue(domains.contains(null));
List<String> workerIds =
pollData.stream().map(x -> x.getWorkerId()).collect(Collectors.toList());
assertTrue(workerIds.contains("dummy-worker-id1"));
assertTrue(workerIds.contains("dummy-worker-id2"));
assertTrue(workerIds.contains("dummy-worker-id3"));
}
@Test
public void getAllPollDataTest() {
pollDataDAO.updateLastPollData("dummy-task1", "domain1", "dummy-worker-id1");
pollDataDAO.updateLastPollData("dummy-task1", "domain2", "dummy-worker-id2");
pollDataDAO.updateLastPollData("dummy-task1", null, "dummy-worker-id3");
pollDataDAO.updateLastPollData("dummy-task2", "domain2", "dummy-worker-id4");
List<PollData> pollData = pollDataDAO.getAllPollData();
assertEquals("Wrong number of records returned", 4, pollData.size());
List<String> queueNames =
pollData.stream().map(x -> x.getQueueName()).collect(Collectors.toList());
assertEquals(3, Collections.frequency(queueNames, "dummy-task1"));
assertEquals(1, Collections.frequency(queueNames, "dummy-task2"));
List<String> domains =
pollData.stream().map(x -> x.getDomain()).collect(Collectors.toList());
assertEquals(1, Collections.frequency(domains, "domain1"));
assertEquals(2, Collections.frequency(domains, "domain2"));
assertEquals(1, Collections.frequency(domains, null));
List<String> workerIds =
pollData.stream().map(x -> x.getWorkerId()).collect(Collectors.toList());
assertTrue(workerIds.contains("dummy-worker-id1"));
assertTrue(workerIds.contains("dummy-worker-id2"));
assertTrue(workerIds.contains("dummy-worker-id3"));
assertTrue(workerIds.contains("dummy-worker-id4"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteMetadataDAOTest.java | sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteMetadataDAOTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.sqlite.config.SqliteConfiguration;
import com.netflix.conductor.sqlite.dao.metadata.SqliteMetadataDAO;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
SqliteConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class SqliteMetadataDAOTest {
@Autowired private SqliteMetadataDAO metadataDAO;
@Rule public TestName name = new TestName();
@Autowired private Flyway flyway;
@Before
public void before() {
flyway.migrate();
}
@Test
public void testDuplicateWorkflowDef() {
WorkflowDef def = new WorkflowDef();
def.setName("testDuplicate");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
NonTransientException applicationException =
assertThrows(NonTransientException.class, () -> metadataDAO.createWorkflowDef(def));
assertEquals(
"Workflow with testDuplicate.1 already exists!", applicationException.getMessage());
}
@Test
public void testRemoveNotExistingWorkflowDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeWorkflowDef("test", 1));
assertEquals(
"No such workflow definition: test version: 1", applicationException.getMessage());
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get();
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(3, found.getVersion());
all = metadataDAO.getAllLatest();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(3, all.get(0).getVersion());
all = metadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(3, all.get(1).getVersion());
def.setDescription("updated");
metadataDAO.updateWorkflowDef(def);
found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = metadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(3, found.getVersion());
metadataDAO.removeWorkflowDef("test", 3);
Optional<WorkflowDef> deleted = metadataDAO.getWorkflowDef("test", 3);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
metadataDAO.removeWorkflowDef("test", 1);
deleted = metadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(TaskDef.RetryLogic.FIXED);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
def.setRateLimitFrequencyInSeconds(1);
def.setRateLimitPerFrequency(1);
metadataDAO.createTaskDef(def);
TaskDef found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setDescription("updated description");
metadataDAO.updateTaskDef(def);
found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
metadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
metadataDAO.removeTaskDef(def.getName() + i);
}
all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test
public void testRemoveNotExistingTaskDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()));
assertEquals("No such task definition", applicationException.getMessage());
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
EventHandler.Action action = new EventHandler.Action();
action.setAction(EventHandler.Action.Type.start_workflow);
action.setStart_workflow(new EventHandler.StartWorkflow());
action.getStart_workflow().setName("workflow_x");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
metadataDAO.addEventHandler(eventHandler);
List<EventHandler> all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(eventHandler.getName(), all.get(0).getName());
assertEquals(eventHandler.getEvent(), all.get(0).getEvent());
List<EventHandler> byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
metadataDAO.updateEventHandler(eventHandler);
all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = metadataDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
def.setName("test2");
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
metadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(4, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteExecutionDAOTest.java | sqlite-persistence/src/test/java/com/netflix/conductor/sqlite/dao/SqliteExecutionDAOTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.util.List;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.sqlite.config.SqliteConfiguration;
import com.google.common.collect.Iterables;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
SqliteConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class SqliteExecutionDAOTest extends ExecutionDAOTest {
@Autowired private SqliteExecutionDAO executionDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
@Test
public void testPendingByCorrelationId() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_correlation_jtest");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
generateWorkflows(workflow, 10);
List<WorkflowModel> bycorrelationId =
getExecutionDAO()
.getWorkflowsByCorrelationId(
"pending_count_correlation_jtest", "corr001", true);
assertNotNull(bycorrelationId);
assertEquals(10, bycorrelationId.size());
}
@Test
public void testRemoveWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
assertEquals(1, getExecutionDAO().getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> getExecutionDAO().removeWorkflow(wfId));
assertEquals(0, getExecutionDAO().getPendingWorkflowCount("workflow"));
}
@Test
public void testRemoveWorkflowWithExpiry() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
final ExecutionDAO execDao = Mockito.spy(getExecutionDAO());
assertEquals(1, execDao.getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> execDao.removeWorkflowWithExpiry(wfId, 1));
Mockito.verify(execDao, Mockito.timeout(10 * 1000)).removeWorkflow(Iterables.getLast(ids));
}
@Override
public ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqlitePollDataDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqlitePollDataDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.PollDataDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class SqlitePollDataDAO extends SqliteBaseDAO implements PollDataDAO {
public SqlitePollDataDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = domain == null ? "DEFAULT" : domain;
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
}
@Override
public PollData getPollData(String taskDefName, String domain) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
return readAllPollData(taskDefName);
}
@Override
public List<PollData> getAllPollData() {
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(true);
try {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
} catch (Throwable th) {
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) {
try {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase
* dramatically even though it won't be used.
*/
String UPDATE_POLL_DATA =
"UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?";
int rowsUpdated =
query(
connection,
UPDATE_POLL_DATA,
q ->
q.addJsonParameter(pollData)
.addParameter(pollData.getQueueName())
.addParameter(domain)
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_POLL_DATA =
"INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_POLL_DATA,
q ->
q.addParameter(pollData.getQueueName())
.addParameter(domain)
.addJsonParameter(pollData)
.executeUpdate());
}
} catch (NonTransientException e) {
if (!e.getMessage().startsWith("ERROR: lastPollTime cannot be set to a lower value")) {
throw e;
}
}
}
private PollData readPollData(Connection connection, String queueName, String domain) {
String GET_POLL_DATA =
"SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?";
return query(
connection,
GET_POLL_DATA,
q ->
q.addParameter(queueName)
.addParameter(domain)
.executeAndFetchFirst(PollData.class));
}
private List<PollData> readAllPollData(String queueName) {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?";
return queryWithTransaction(
GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteIndexDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteIndexDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.sqlite.config.SqliteProperties;
import com.netflix.conductor.sqlite.util.SqliteIndexQueryBuilder;
import com.fasterxml.jackson.databind.ObjectMapper;
public class SqliteIndexDAO extends SqliteBaseDAO implements IndexDAO {
private final SqliteProperties properties;
private final ExecutorService executorService;
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private boolean onlyIndexOnStatusChange;
public SqliteIndexDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
SqliteProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.properties = properties;
this.onlyIndexOnStatusChange = properties.getOnlyIndexOnStatusChange();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
int workerQueueSize = properties.getAsyncWorkerQueueSize();
// Set up a workerpool for performing async operations.
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
String INSERT_WORKFLOW_INDEX_SQL =
"INSERT INTO workflow_index (workflow_id, correlation_id, workflow_type, start_time, update_time, status, json_data) "
+ " VALUES (?, ?, ?, ?, ?, ?, ?) ON CONFLICT (workflow_id) "
+ " DO UPDATE SET correlation_id = excluded.correlation_id, workflow_type = excluded.workflow_type, "
+ " start_time = excluded.start_time, status = excluded.status, json_data = excluded.json_data, "
+ " update_time = excluded.update_time "
+ " WHERE excluded.update_time >= workflow_index.update_time";
if (onlyIndexOnStatusChange) {
INSERT_WORKFLOW_INDEX_SQL += " AND workflow_index.status != excluded.status";
}
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(workflow.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(workflow.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
int rowsUpdated =
queryWithTransaction(
INSERT_WORKFLOW_INDEX_SQL,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addParameter(workflow.getWorkflowType())
.addParameter(startTime.toString())
.addParameter(updateTime.toString())
.addParameter(workflow.getStatus().toString())
.addJsonParameter(workflow)
.executeUpdate());
logger.debug("Sqlite index workflow rows updated: {}", rowsUpdated);
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
SqliteIndexQueryBuilder queryBuilder =
new SqliteIndexQueryBuilder(
"workflow_index", query, freeText, start, count, sort, properties);
List<WorkflowSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
queryBuilder.addPagingParameters(q);
return q.executeAndFetch(WorkflowSummary.class);
});
List<String> totalHitResults =
queryWithTransaction(
queryBuilder.getCountQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(String.class);
});
int totalHits = Integer.valueOf(totalHitResults.get(0));
return new SearchResult<>(totalHits, results);
}
@Override
public void indexTask(TaskSummary task) {
String INSERT_TASK_INDEX_SQL =
"INSERT INTO task_index (task_id, task_type, task_def_name, status, start_time, update_time, workflow_type, json_data)"
+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT (task_id) "
+ "DO UPDATE SET task_type = excluded.task_type, task_def_name = excluded.task_def_name, "
+ "status = excluded.status, update_time = excluded.update_time, json_data = excluded.json_data "
+ "WHERE excluded.update_time >= task_index.update_time";
if (onlyIndexOnStatusChange) {
INSERT_TASK_INDEX_SQL += " AND task_index.status != excluded.status";
}
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(task.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(task.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
int rowsUpdated =
queryWithTransaction(
INSERT_TASK_INDEX_SQL,
q ->
q.addParameter(task.getTaskId())
.addParameter(task.getTaskType())
.addParameter(task.getTaskDefName())
.addParameter(task.getStatus().toString())
.addParameter(startTime.toString())
.addParameter(updateTime.toString())
.addParameter(task.getWorkflowType())
.addJsonParameter(task)
.executeUpdate());
logger.debug("Sqlite index task rows updated: {}", rowsUpdated);
}
@Override
public SearchResult<TaskSummary> searchTaskSummary(
String query, String freeText, int start, int count, List<String> sort) {
SqliteIndexQueryBuilder queryBuilder =
new SqliteIndexQueryBuilder(
"task_index", query, freeText, start, count, sort, properties);
List<TaskSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
queryBuilder.addPagingParameters(q);
return q.executeAndFetch(TaskSummary.class);
});
List<String> totalHitResults =
queryWithTransaction(
queryBuilder.getCountQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(String.class);
});
int totalHits = Integer.valueOf(totalHitResults.get(0));
return new SearchResult<>(totalHits, results);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> logs) {
String INSERT_LOG =
"INSERT INTO task_execution_logs (task_id, created_time, log) VALUES (?, ?, ?)";
for (TaskExecLog log : logs) {
queryWithTransaction(
INSERT_LOG,
q ->
q.addParameter(log.getTaskId())
.addParameter(new Timestamp(log.getCreatedTime()))
.addParameter(log.getLog())
.executeUpdate());
}
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
return queryWithTransaction(
"SELECT log, task_id, created_time FROM task_execution_logs WHERE task_id = ? ORDER BY created_time ASC",
q ->
q.addParameter(taskId)
.executeAndFetch(
rs -> {
List<TaskExecLog> result = new ArrayList<>();
while (rs.next()) {
TaskExecLog log = new TaskExecLog();
log.setLog(rs.getString("log"));
log.setTaskId(rs.getString("task_id"));
log.setCreatedTime(
rs.getTimestamp("created_time").getTime());
result.add(log);
}
return result;
}));
}
@Override
public void setup() {}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
logger.info("asyncIndexWorkflow is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
logger.info("asyncIndexTask is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchWorkflows is not supported for Sqlite indexing");
return null;
}
@Override
public SearchResult<String> searchTasks(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchTasks is not supported for Sqlite indexing");
return null;
}
@Override
public void removeWorkflow(String workflowId) {
String REMOVE_WORKFLOW_SQL = "DELETE FROM workflow_index WHERE workflow_id = ?";
queryWithTransaction(REMOVE_WORKFLOW_SQL, q -> q.addParameter(workflowId).executeUpdate());
}
@Override
public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) {
return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService);
}
@Override
public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
logger.info("updateWorkflow is not supported for Sqlite indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateWorkflow(
String workflowInstanceId, String[] keys, Object[] values) {
logger.info("asyncUpdateWorkflow is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void removeTask(String workflowId, String taskId) {
String REMOVE_TASK_SQL = "DELETE FROM task_index WHERE task_id = ?";
String REMOVE_TASK_EXECUTION_SQL = "DELETE FROM task_execution_logs WHERE task_id =?";
withTransaction(
connection -> {
queryWithTransaction(
REMOVE_TASK_SQL, q -> q.addParameter(taskId).executeUpdate());
queryWithTransaction(
REMOVE_TASK_EXECUTION_SQL, q -> q.addParameter(taskId).executeUpdate());
});
}
@Override
public CompletableFuture<Void> asyncRemoveTask(String workflowId, String taskId) {
return CompletableFuture.runAsync(() -> removeTask(workflowId, taskId), executorService);
}
@Override
public void updateTask(String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("updateTask is not supported for Sqlite indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateTask(
String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("asyncUpdateTask is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public String get(String workflowInstanceId, String key) {
logger.info("get is not supported for Sqlite indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
logger.info("asyncAddTaskExecutionLogs is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
logger.info("addEventExecution is not supported for Sqlite indexing");
}
@Override
public List<EventExecution> getEventExecutions(String event) {
logger.info("getEventExecutions is not supported for Sqlite indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
logger.info("asyncAddEventExecution is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addMessage(String queue, Message msg) {
logger.info("addMessage is not supported for Sqlite indexing");
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
logger.info("asyncAddMessage is not supported for Sqlite indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public List<Message> getMessages(String queue) {
logger.info("getMessages is not supported for Sqlite indexing");
return null;
}
@Override
public List<String> searchArchivableWorkflows(String indexName, long archiveTtlDays) {
logger.info("searchArchivableWorkflows is not supported for Sqlite indexing");
return null;
}
public long getWorkflowCount(String query, String freeText) {
logger.info("getWorkflowCount is not supported for Sqlite indexing");
return 0;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteQueueDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteQueueDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.sqlite.config.SqliteProperties;
import com.netflix.conductor.sqlite.util.ExecutorsUtil;
import com.netflix.conductor.sqlite.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Uninterruptibles;
import jakarta.annotation.PreDestroy;
public class SqliteQueueDAO extends SqliteBaseDAO implements QueueDAO {
private static final Long UNACK_SCHEDULE_MS = 60_000L;
private final ScheduledExecutorService scheduledExecutorService;
public SqliteQueueDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
SqliteProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("sqlite-queue-"));
this.scheduledExecutorService.scheduleAtFixedRate(
this::processAllUnacks,
UNACK_SCHEDULE_MS,
UNACK_SCHEDULE_MS,
TimeUnit.MILLISECONDS);
logger.debug("{} is ready to serve", SqliteQueueDAO.class.getName());
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for processAllUnacks",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public void push(String queueName, String id, long offsetTimeInSecond) {
push(queueName, id, 0, offsetTimeInSecond);
}
@Override
public void push(String queueName, String id, int priority, long offsetTimeInSecond) {
withTransaction(tx -> pushMessage(tx, queueName, id, null, priority, offsetTimeInSecond));
}
@Override
public void push(String queueName, List<Message> messages) {
withTransaction(
tx ->
messages.forEach(
message ->
pushMessage(
tx,
queueName,
message.getId(),
message.getPayload(),
message.getPriority(),
0)));
}
@Override
public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) {
return pushIfNotExists(queueName, id, 0, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String id, int priority, long offsetTimeInSecond) {
return getWithRetriedTransactions(
tx -> {
if (!existsMessage(tx, queueName, id)) {
pushMessage(tx, queueName, id, null, priority, offsetTimeInSecond);
return true;
}
return false;
});
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
return pollMessages(queueName, count, timeout).stream()
.map(Message::getId)
.collect(Collectors.toList());
}
@Override
public List<Message> pollMessages(String queueName, int count, int timeout) {
if (timeout < 1) {
List<Message> messages =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count, timeout));
if (messages == null) {
return new ArrayList<>();
}
return messages;
}
long start = System.currentTimeMillis();
final List<Message> messages = new ArrayList<>();
while (true) {
List<Message> messagesSlice =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count - messages.size(), timeout));
if (messagesSlice == null) {
logger.warn(
"Unable to poll {} messages from {} due to tx conflict, only {} popped",
count,
queueName,
messages.size());
// conflict could have happened, returned messages popped so far
return messages;
}
messages.addAll(messagesSlice);
if (messages.size() >= count || ((System.currentTimeMillis() - start) > timeout)) {
return messages;
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
@Override
public void remove(String queueName, String messageId) {
withTransaction(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public int getSize(String queueName) {
final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?";
return queryWithTransaction(
GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue());
}
@Override
public boolean ack(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) {
long updatedOffsetTimeInSecond = unackTimeout / 1000;
final String UPDATE_UNACK_TIMEOUT =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = ? WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
UPDATE_UNACK_TIMEOUT,
q ->
q.addParameter(updatedOffsetTimeInSecond)
.addParameter(updatedOffsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate())
== 1;
}
@Override
public void flush(String queueName) {
final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?";
executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete());
}
@Override
public Map<String, Long> queuesDetail() {
final String GET_QUEUES_DETAIL =
"SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q";
return queryWithTransaction(
GET_QUEUES_DETAIL,
q ->
q.executeAndFetch(
rs -> {
Map<String, Long> detail = Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
detail.put(queueName, size);
}
return detail;
}));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
// @formatter:off
final String GET_QUEUES_DETAIL_VERBOSE =
"SELECT queue_name, \n"
+ " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n"
+ " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n"
+ "FROM queue q";
// @formatter:on
return queryWithTransaction(
GET_QUEUES_DETAIL_VERBOSE,
q ->
q.executeAndFetch(
rs -> {
Map<String, Map<String, Map<String, Long>>> result =
Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
Long queueUnacked = rs.getLong("uacked");
result.put(
queueName,
ImmutableMap.of(
"a",
ImmutableMap
.of( // sharding not implemented,
// returning only
// one shard with all the
// info
"size",
size,
"uacked",
queueUnacked)));
}
return result;
}));
}
public void processAllUnacks() {
logger.trace("processAllUnacks started");
getWithRetriedTransactions(
tx -> {
String LOCK_TASKS =
"SELECT queue_name, message_id FROM queue_message WHERE popped = true AND (deliver_on '+60 seconds') < datetime(current_timestamp) limit 1000";
List<QueueMessage> messages =
query(
tx,
LOCK_TASKS,
p ->
p.executeAndFetch(
rs -> {
List<QueueMessage> results =
new ArrayList<QueueMessage>();
while (rs.next()) {
QueueMessage qm = new QueueMessage();
qm.queueName =
rs.getString("queue_name");
qm.messageId =
rs.getString("message_id");
results.add(qm);
}
return results;
}));
if (messages.size() == 0) {
return 0;
}
Map<String, List<String>> queueMessageMap = new HashMap<String, List<String>>();
for (QueueMessage qm : messages) {
if (!queueMessageMap.containsKey(qm.queueName)) {
queueMessageMap.put(qm.queueName, new ArrayList<String>());
}
queueMessageMap.get(qm.queueName).add(qm.messageId);
}
int totalUnacked = 0;
for (String queueName : queueMessageMap.keySet()) {
Integer unacked = 0;
try {
final List<String> msgIds = queueMessageMap.get(queueName);
final String UPDATE_POPPED =
String.format(
"UPDATE queue_message SET popped = false WHERE queue_name = ? and message_id IN (%s)",
Query.generateInBindings(msgIds.size()));
unacked =
query(
tx,
UPDATE_POPPED,
q ->
q.addParameter(queueName)
.addParameters(msgIds)
.executeUpdate());
} catch (Exception e) {
e.printStackTrace();
}
totalUnacked += unacked;
logger.debug("Unacked {} messages from all queues", unacked);
}
if (totalUnacked > 0) {
logger.debug("Unacked {} messages from all queues", totalUnacked);
}
return totalUnacked;
});
}
@Override
public void processUnacks(String queueName) {
final String PROCESS_UNACKS =
"UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND datetime(current_timestamp - ('60 seconds')) > deliver_on";
executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate());
}
@Override
public boolean resetOffsetTime(String queueName, String id) {
long offsetTimeInSecond = 0; // Reset to 0
final String SET_OFFSET_TIME =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = datetime(CURRENT_TIMESTAMP, '+' || ? || ' seconds')"
+ "WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
SET_OFFSET_TIME,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(id)
.executeUpdate()
== 1);
}
private boolean existsMessage(Connection connection, String queueName, String messageId) {
final String EXISTS_MESSAGE =
"SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?)";
return query(
connection,
EXISTS_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).exists());
}
private void pushMessage(
Connection connection,
String queueName,
String messageId,
String payload,
Integer priority,
long offsetTimeInSecond) {
createQueueIfNotExists(connection, queueName);
String UPDATE_MESSAGE =
"UPDATE queue_message SET payload=?, deliver_on=datetime(CURRENT_TIMESTAMP, '+' || ? || ' seconds') WHERE queue_name = ? AND message_id = ?";
int rowsUpdated =
query(
connection,
UPDATE_MESSAGE,
q ->
q.addParameter(payload)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate());
if (rowsUpdated == 0) {
String PUSH_MESSAGE =
"INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES (datetime(CURRENT_TIMESTAMP, '+' || ? || ' seconds'), ?,?,?,?,?) ON CONFLICT (queue_name,message_id) DO UPDATE SET payload=excluded.payload, deliver_on=excluded.deliver_on";
execute(
connection,
PUSH_MESSAGE,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.addParameter(priority)
.addParameter(offsetTimeInSecond)
.addParameter(payload)
.executeUpdate());
}
}
private boolean removeMessage(Connection connection, String queueName, String messageId) {
final String REMOVE_MESSAGE =
"DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?";
return query(
connection,
REMOVE_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).executeDelete());
}
private List<Message> popMessages(
Connection connection, String queueName, int count, int timeout) {
String POP_QUERY =
"UPDATE queue_message SET popped = true WHERE message_id IN ("
+ "SELECT message_id FROM queue_message WHERE queue_name = ? AND popped = false AND "
+ "deliver_on <= datetime(CURRENT_TIMESTAMP, '+1 seconds') "
+ "ORDER BY priority DESC, deliver_on, created_on LIMIT ?"
+ ") RETURNING message_id, priority, payload";
return query(
connection,
POP_QUERY,
p ->
p.addParameter(queueName)
.addParameter(count)
.executeAndFetch(
rs -> {
List<Message> results = new ArrayList<>();
while (rs.next()) {
Message m = new Message();
m.setId(rs.getString("message_id"));
m.setPriority(rs.getInt("priority"));
m.setPayload(rs.getString("payload"));
results.add(m);
}
return results;
}));
}
@Override
public boolean containsMessage(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> existsMessage(tx, queueName, messageId));
}
private void createQueueIfNotExists(Connection connection, String queueName) {
logger.trace("Creating new queue '{}'", queueName);
final String EXISTS_QUEUE = "SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?)";
boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists());
if (!exists) {
final String CREATE_QUEUE =
"INSERT INTO queue (queue_name) VALUES (?) ON CONFLICT (queue_name) DO NOTHING";
execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate());
}
}
private class QueueMessage {
public String queueName;
public String messageId;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteExecutionDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteExecutionDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.sql.Connection;
import java.sql.Date;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.sqlite.util.ExecutorsUtil;
import com.netflix.conductor.sqlite.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import jakarta.annotation.PreDestroy;
public class SqliteExecutionDAO extends SqliteBaseDAO
implements ExecutionDAO, RateLimitingDAO, ConcurrentExecutionLimitDAO {
private final ScheduledExecutorService scheduledExecutorService;
public SqliteExecutionDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("sqlite-execution-"));
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for removeWorkflowWithExpiry",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskDefName, String workflowId) {
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? AND workflow_id = ?";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_WORKFLOW,
q ->
q.addParameter(taskDefName)
.addParameter(workflowId)
.executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new ArrayList<>(count);
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int found = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
// noinspection ConstantConditions
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && found < count) {
tasks.add(pendingTask);
found++;
}
}
return tasks;
}
private static String taskKey(TaskModel task) {
return task.getReferenceTaskName() + "_" + task.getRetryCount();
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> created = Lists.newArrayListWithCapacity(tasks.size());
withTransaction(
connection -> {
for (TaskModel task : tasks) {
validate(task);
task.setScheduledTime(System.currentTimeMillis());
final String taskKey = taskKey(task);
boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey);
if (!scheduledTaskAdded) {
logger.trace(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
continue;
}
insertOrUpdateTaskData(connection, task);
addWorkflowToTaskMapping(connection, task);
addTaskInProgress(connection, task);
updateTask(connection, task);
created.add(task);
}
});
return created;
}
@Override
public void updateTask(TaskModel task) {
withTransaction(connection -> updateTask(connection, task));
}
/**
* This is a dummy implementation and this feature is not for sqlite backed Conductor
*
* @param task: which needs to be evaluated whether it is rateLimited or not
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
return false;
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
TaskDef taskDef = taskDefinition.get();
int limit = taskDef.concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
logger.info(
"Task execution count for {}: limit={}, current={}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
String taskId = task.getTaskId();
List<String> tasksInProgressInOrderOfArrival =
findAllTasksInProgressInOrderOfArrival(task, limit);
boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId);
if (rateLimited) {
logger.info(
"Task execution count limited. {}, limit {}, current {}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
logger.warn("No such task found by id {}", taskId);
return false;
}
final String taskKey = taskKey(task);
withTransaction(
connection -> {
removeScheduledTask(connection, task, taskKey);
removeWorkflowToTaskMapping(connection, task);
removeTaskInProgress(connection, task);
removeTaskData(connection, task);
});
return true;
}
@Override
public TaskModel getTask(String taskId) {
String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?";
return queryWithTransaction(
GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
return getWithRetriedTransactions(c -> getTasks(c, taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_TYPE =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ?";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_TYPE,
q -> q.addParameter(taskName).executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
String GET_TASKS_FOR_WORKFLOW =
"SELECT task_id FROM workflow_to_task WHERE workflow_id = ?";
return getWithRetriedTransactions(
tx ->
query(
tx,
GET_TASKS_FOR_WORKFLOW,
q -> {
List<String> taskIds =
q.addParameter(workflowId)
.executeScalarList(String.class);
return getTasks(tx, taskIds);
}));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
boolean removed = false;
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
withTransaction(
connection -> {
removeWorkflowDefToWorkflowMapping(connection, workflow);
removeWorkflow(connection, workflowId);
removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId);
});
removed = true;
for (TaskModel task : workflow.getTasks()) {
if (!removeTask(task.getTaskId())) {
removed = false;
}
}
}
return removed;
}
/** Scheduled executor based implementation. */
@Override
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
scheduledExecutorService.schedule(
() -> {
try {
removeWorkflow(workflowId);
} catch (Throwable e) {
logger.warn("Unable to remove workflow: {} with expiry", workflowId, e);
}
},
ttlSeconds,
TimeUnit.SECONDS);
return true;
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId));
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId));
if (workflow != null) {
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_IDS =
"SELECT workflow_id FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_IDS,
q -> q.addParameter(workflowName).executeScalarList(String.class));
}
/**
* @param workflowName Name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
return getRunningWorkflowIds(workflowName, version).stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public long getPendingWorkflowCount(String workflowName) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_COUNT =
"SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount());
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String GET_IN_PROGRESS_TASK_COUNT =
"SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true";
return queryWithTransaction(
GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
withTransaction(
tx -> {
// @formatter:off
String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF =
"SELECT workflow_id FROM workflow_def_to_workflow "
+ "WHERE workflow_def = ? AND date_str BETWEEN ? AND ?";
// @formatter:on
List<String> workflowIds =
query(
tx,
GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF,
q ->
q.addParameter(workflowName)
.addParameter(dateStr(startTime))
.addParameter(dateStr(endTime))
.executeScalarList(String.class));
workflowIds.forEach(
workflowId -> {
try {
WorkflowModel wf = getWorkflow(workflowId);
if (wf.getCreateTime() >= startTime
&& wf.getCreateTime() <= endTime) {
workflows.add(wf);
}
} catch (Exception e) {
logger.error(
"Unable to load workflow id {} with name {}",
workflowId,
workflowName,
e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
Preconditions.checkNotNull(correlationId, "correlationId cannot be null");
String GET_WORKFLOWS_BY_CORRELATION_ID =
"SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ?";
return queryWithTransaction(
GET_WORKFLOWS_BY_CORRELATION_ID,
q ->
q.addParameter(correlationId)
.addParameter(workflowName)
.executeAndFetch(WorkflowModel.class));
}
@Override
public boolean canSearchAcrossWorkflows() {
return true;
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to add event execution " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> removeEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to remove event execution " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> updateEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to update event execution " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
List<EventExecution> executions = Lists.newLinkedList();
withTransaction(
tx -> {
for (int i = 0; i < max; i++) {
String executionId =
messageId + "_"
+ i; // see SimpleEventProcessor.handle to understand
// how the
// execution id is set
EventExecution ee =
readEventExecution(
tx,
eventHandlerName,
eventName,
messageId,
executionId);
if (ee == null) {
break;
}
executions.add(ee);
}
});
return executions;
} catch (Exception e) {
String message =
String.format(
"Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s",
eventHandlerName, eventName, messageId);
throw new NonTransientException(message, e);
}
}
private List<TaskModel> getTasks(Connection connection, List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
// Generate a formatted query string with a variable number of bind params based
// on taskIds.size()
final String GET_TASKS_FOR_IDS =
String.format(
"SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL",
Query.generateInBindings(taskIds.size()));
return query(
connection,
GET_TASKS_FOR_IDS,
q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class));
}
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
boolean terminal = workflow.getStatus().isTerminal();
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(Lists.newLinkedList());
withTransaction(
tx -> {
if (!update) {
addWorkflow(tx, workflow);
addWorkflowDefToWorkflowMapping(tx, workflow);
} else {
updateWorkflow(tx, workflow);
}
if (terminal) {
removePendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
} else {
addPendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
}
});
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
private void updateTask(Connection connection, TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
boolean inProgress =
task.getStatus() != null
&& task.getStatus().equals(TaskModel.Status.IN_PROGRESS);
updateInProgressStatus(connection, task, inProgress);
}
insertOrUpdateTaskData(connection, task);
if (task.getStatus() != null && task.getStatus().isTerminal()) {
removeTaskInProgress(connection, task);
}
addWorkflowToTaskMapping(connection, task);
}
private WorkflowModel readWorkflow(Connection connection, String workflowId) {
String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?";
return query(
connection,
GET_WORKFLOW,
q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class));
}
private void addWorkflow(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW =
"INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addJsonParameter(workflow)
.executeUpdate());
}
private void updateWorkflow(Connection connection, WorkflowModel workflow) {
String UPDATE_WORKFLOW =
"UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?";
execute(
connection,
UPDATE_WORKFLOW,
q ->
q.addJsonParameter(workflow)
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflow(Connection connection, String workflowId) {
String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?";
execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete());
}
private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) {
String EXISTS_PENDING_WORKFLOW =
"SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)";
boolean exists =
query(
connection,
EXISTS_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).exists());
if (!exists) {
String INSERT_PENDING_WORKFLOW =
"INSERT INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?) ON CONFLICT (workflow_type,workflow_id) DO NOTHING";
execute(
connection,
INSERT_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate());
}
}
private void removePendingWorkflow(
Connection connection, String workflowType, String workflowId) {
String REMOVE_PENDING_WORKFLOW =
"DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?";
execute(
connection,
REMOVE_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete());
}
private void insertOrUpdateTaskData(Connection connection, TaskModel task) {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens.
*/
String UPDATE_TASK =
"UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?";
int rowsUpdated =
query(
connection,
UPDATE_TASK,
q ->
q.addJsonParameter(task)
.addParameter(task.getTaskId())
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_TASK =
"INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON CONFLICT (task_id) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_TASK,
q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate());
}
}
private void removeTaskData(Connection connection, TaskModel task) {
String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?";
execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete());
}
private void addWorkflowToTaskMapping(Connection connection, TaskModel task) {
String EXISTS_WORKFLOW_TO_TASK =
"SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)";
boolean exists =
query(
connection,
EXISTS_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.exists());
if (!exists) {
String INSERT_WORKFLOW_TO_TASK =
"INSERT INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?) ON CONFLICT (workflow_id,task_id) DO NOTHING";
execute(
connection,
INSERT_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeUpdate());
}
}
private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) {
String REMOVE_WORKFLOW_TO_TASK =
"DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?";
execute(
connection,
REMOVE_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeDelete());
}
private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW_DEF_TO_WORKFLOW =
"INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String REMOVE_WORKFLOW_DEF_TO_WORKFLOW =
"DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?";
execute(
connection,
REMOVE_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
@VisibleForTesting
boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) {
final String EXISTS_SCHEDULED_TASK =
"SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)";
boolean exists =
query(
connection,
EXISTS_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.exists());
if (!exists) {
final String INSERT_IGNORE_SCHEDULED_TASK =
"INSERT INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?) ON CONFLICT (workflow_id,task_key) DO NOTHING";
int count =
query(
connection,
INSERT_IGNORE_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.addParameter(task.getTaskId())
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteBaseDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/SqliteBaseDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.sqlite.util.*;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
public abstract class SqliteBaseDAO {
private static final List<String> EXCLUDED_STACKTRACE_CLASS =
List.of(SqliteBaseDAO.class.getName(), Thread.class.getName());
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final ObjectMapper objectMapper;
protected final DataSource dataSource;
private final RetryTemplate retryTemplate;
protected SqliteBaseDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
this.retryTemplate = retryTemplate;
this.objectMapper = objectMapper;
this.dataSource = dataSource;
}
protected String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, Class<T> tClass) {
try {
return objectMapper.readValue(json, tClass);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, TypeReference<T> typeReference) {
try {
return objectMapper.readValue(json, typeReference);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
private <R> R getWithTransaction(final TransactionalFunction<R> function) {
final Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
tx.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
try {
// Enable foreign keys for SQLite
try (Statement stmt = tx.createStatement()) {
stmt.execute("PRAGMA foreign_keys = ON");
}
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
if (th instanceof NonTransientException) {
throw th;
}
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
protected <R> R getWithRetriedTransactions(final TransactionalFunction<R> function) {
try {
return retryTemplate.execute(context -> getWithTransaction(function));
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
protected <R> R getWithTransactionWithOutErrorPropagation(TransactionalFunction<R> function) {
Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
tx.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
try {
// Enable foreign keys for SQLite
try (Statement stmt = tx.createStatement()) {
stmt.execute("PRAGMA foreign_keys = ON");
}
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
logger.info(th.getMessage());
return null;
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
protected void withTransaction(Consumer<Connection> consumer) {
getWithRetriedTransactions(
connection -> {
consumer.accept(connection);
return null;
});
}
protected <R> R queryWithTransaction(String query, QueryFunction<R> function) {
return getWithRetriedTransactions(tx -> query(tx, query, function));
}
protected <R> R query(Connection tx, String query, QueryFunction<R> function) {
try (Query q = new Query(objectMapper, tx, query)) {
return function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected void execute(Connection tx, String query, ExecuteFunction function) {
try (Query q = new Query(objectMapper, tx, query)) {
function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected void executeWithTransaction(String query, ExecuteFunction function) {
withTransaction(tx -> execute(tx, query, function));
}
protected final LazyToString getCallingMethod() {
return new LazyToString(
() ->
Arrays.stream(Thread.currentThread().getStackTrace())
.filter(
ste ->
!EXCLUDED_STACKTRACE_CLASS.contains(
ste.getClassName()))
.findFirst()
.map(StackTraceElement::getMethodName)
.orElseThrow(() -> new NullPointerException("Cannot find Caller")));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteEventHandlerMetadataDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteEventHandlerMetadataDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao.metadata;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.List;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.sqlite.dao.SqliteBaseDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class SqliteEventHandlerMetadataDAO extends SqliteBaseDAO {
public SqliteEventHandlerMetadataDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
final String INSERT_EVENT_HANDLER_QUERY =
"INSERT INTO meta_event_handler (name, event, active, json_data) "
+ "VALUES (?, ?, ?, ?)";
withTransaction(
tx -> {
if (getEventHandler(tx, eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name "
+ eventHandler.getName()
+ " already exists!");
}
execute(
tx,
INSERT_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getName())
.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.executeUpdate());
});
}
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
// @formatter:off
final String UPDATE_EVENT_HANDLER_QUERY =
"UPDATE meta_event_handler SET "
+ "event = ?, active = ?, json_data = ?, "
+ "modified_on = CURRENT_TIMESTAMP WHERE name = ?";
// @formatter:on
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + eventHandler.getName() + " not found!");
}
execute(
tx,
UPDATE_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.addParameter(eventHandler.getName())
.executeUpdate());
});
}
public void removeEventHandler(String name) {
final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?";
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, name);
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + name + " not found!");
}
execute(
tx,
DELETE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeDelete());
});
}
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY =
"SELECT json_data FROM meta_event_handler WHERE event = ?";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY,
q -> {
q.addParameter(event);
return q.executeAndFetch(
rs -> {
List<EventHandler> handlers = new ArrayList<>();
while (rs.next()) {
EventHandler h = readValue(rs.getString(1), EventHandler.class);
if (!activeOnly || h.isActive()) {
handlers.add(h);
}
}
return handlers;
});
});
}
public List<EventHandler> getAllEventHandlers() {
final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class));
}
private EventHandler getEventHandler(Connection connection, String name) {
final String READ_ONE_EVENT_HANDLER_QUERY =
"SELECT json_data FROM meta_event_handler WHERE name = ?";
return query(
connection,
READ_ONE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteMetadataDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteMetadataDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao.metadata;
import java.util.List;
import java.util.Optional;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.MetadataDAO;
import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor
public class SqliteMetadataDAO implements MetadataDAO, EventHandlerDAO {
private final SqliteTaskMetadataDAO taskMetadataDAO;
private final SqliteWorkflowMetadataDAO workflowMetadataDAO;
private final SqliteEventHandlerMetadataDAO eventHandlerMetadataDAO;
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
return taskMetadataDAO.createTaskDef(taskDef);
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
return taskMetadataDAO.updateTaskDef(taskDef);
}
@Override
public TaskDef getTaskDef(String name) {
return taskMetadataDAO.getTaskDef(name);
}
@Override
public List<TaskDef> getAllTaskDefs() {
return taskMetadataDAO.getAllTaskDefs();
}
@Override
public void removeTaskDef(String name) {
taskMetadataDAO.removeTaskDef(name);
}
@Override
public void createWorkflowDef(WorkflowDef def) {
workflowMetadataDAO.createWorkflowDef(def);
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
workflowMetadataDAO.updateWorkflowDef(def);
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
return workflowMetadataDAO.getLatestWorkflowDef(name);
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
return workflowMetadataDAO.getWorkflowDef(name, version);
}
@Override
public void removeWorkflowDef(String name, Integer version) {
workflowMetadataDAO.removeWorkflowDef(name, version);
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
return workflowMetadataDAO.getAllWorkflowDefs();
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
return workflowMetadataDAO.getAllWorkflowDefsLatestVersions();
}
@Override
public void addEventHandler(EventHandler eventHandler) {
eventHandlerMetadataDAO.addEventHandler(eventHandler);
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
eventHandlerMetadataDAO.updateEventHandler(eventHandler);
}
@Override
public void removeEventHandler(String name) {
eventHandlerMetadataDAO.removeEventHandler(name);
}
@Override
public List<EventHandler> getAllEventHandlers() {
return eventHandlerMetadataDAO.getAllEventHandlers();
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
return eventHandlerMetadataDAO.getEventHandlersForEvent(event, activeOnly);
}
public List<String> findAll() {
return workflowMetadataDAO.findAll();
}
public List<WorkflowDef> getAllLatest() {
return workflowMetadataDAO.getAllLatest();
}
public List<WorkflowDef> getAllVersions(String name) {
return workflowMetadataDAO.getAllVersions(name);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteTaskMetadataDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteTaskMetadataDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao.metadata;
import java.sql.Connection;
import java.util.List;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.sqlite.dao.SqliteBaseDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class SqliteTaskMetadataDAO extends SqliteBaseDAO {
public SqliteTaskMetadataDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
public TaskDef createTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
public TaskDef updateTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
public TaskDef getTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
return getTaskDefFromDB(name);
}
public List<TaskDef> getAllTaskDefs() {
return getWithRetriedTransactions(this::findAllTaskDefs);
}
public void removeTaskDef(String name) {
final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?";
executeWithTransaction(
DELETE_TASKDEF_QUERY,
q -> {
if (!q.addParameter(name).executeDelete()) {
throw new NotFoundException("No such task definition");
}
});
}
private void validate(TaskDef taskDef) {
Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null");
Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null");
}
private TaskDef getTaskDefFromDB(String name) {
final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?";
return queryWithTransaction(
READ_ONE_TASKDEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class));
}
private String insertOrUpdateTaskDef(TaskDef taskDef) {
final String UPDATE_TASKDEF_QUERY =
"UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?";
final String INSERT_TASKDEF_QUERY =
"INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)";
return getWithRetriedTransactions(
tx -> {
execute(
tx,
UPDATE_TASKDEF_QUERY,
update -> {
int result =
update.addJsonParameter(taskDef)
.addParameter(taskDef.getName())
.executeUpdate();
if (result == 0) {
execute(
tx,
INSERT_TASKDEF_QUERY,
insert ->
insert.addParameter(taskDef.getName())
.addJsonParameter(taskDef)
.executeUpdate());
}
});
return taskDef.getName();
});
}
private List<TaskDef> findAllTaskDefs(Connection tx) {
final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def";
return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteWorkflowMetadataDAO.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/dao/metadata/SqliteWorkflowMetadataDAO.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.dao.metadata;
import java.sql.Connection;
import java.util.List;
import java.util.Optional;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.sqlite.dao.SqliteBaseDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class SqliteWorkflowMetadataDAO extends SqliteBaseDAO {
public SqliteWorkflowMetadataDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
public void createWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(
tx -> {
if (workflowExists(tx, def)) {
throw new ConflictException(
"Workflow with " + def.key() + " already exists!");
}
insertOrUpdateWorkflowDef(tx, def);
});
}
public void updateWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def));
}
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
final String GET_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND "
+ "version = latest_version";
return Optional.ofNullable(
queryWithTransaction(
GET_LATEST_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)));
}
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
final String GET_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?";
return Optional.ofNullable(
queryWithTransaction(
GET_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(name)
.addParameter(version)
.executeAndFetchFirst(WorkflowDef.class)));
}
public void removeWorkflowDef(String name, Integer version) {
final String DELETE_WORKFLOW_QUERY =
"DELETE from meta_workflow_def WHERE name = ? AND version = ?";
withTransaction(
tx -> {
// remove specified workflow
execute(
tx,
DELETE_WORKFLOW_QUERY,
q -> {
if (!q.addParameter(name).addParameter(version).executeDelete()) {
throw new NotFoundException(
String.format(
"No such workflow definition: %s version: %d",
name, version));
}
});
// reset latest version based on remaining rows for this workflow
Optional<Integer> maxVersion = getLatestVersion(tx, name);
maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion));
});
}
public List<WorkflowDef> getAllWorkflowDefs() {
final String GET_ALL_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def ORDER BY name, version";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
final String GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY =
"SELECT json_data FROM meta_workflow_def wd WHERE wd.version = (SELECT MAX(version) FROM meta_workflow_def wd2 WHERE wd2.name = wd.name)";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY,
q -> q.executeAndFetch(WorkflowDef.class));
}
public List<String> findAll() {
final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def";
return queryWithTransaction(
FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class));
}
public List<WorkflowDef> getAllLatest() {
final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version";
return queryWithTransaction(
GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllVersions(String name) {
final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version";
return queryWithTransaction(
GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetch(WorkflowDef.class));
}
private Boolean workflowExists(Connection connection, WorkflowDef def) {
final String CHECK_WORKFLOW_DEF_EXISTS_QUERY =
"SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?";
return query(
connection,
CHECK_WORKFLOW_DEF_EXISTS_QUERY,
q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists());
}
private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) {
final String INSERT_WORKFLOW_DEF_QUERY =
"INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)";
Optional<Integer> version = getLatestVersion(tx, def.getName());
if (!workflowExists(tx, def)) {
execute(
tx,
INSERT_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(def.getName())
.addParameter(def.getVersion())
.addJsonParameter(def)
.executeUpdate());
} else {
// @formatter:off
final String UPDATE_WORKFLOW_DEF_QUERY =
"UPDATE meta_workflow_def "
+ "SET json_data = ?, modified_on = CURRENT_TIMESTAMP "
+ "WHERE name = ? AND version = ?";
// @formatter:on
execute(
tx,
UPDATE_WORKFLOW_DEF_QUERY,
q ->
q.addJsonParameter(def)
.addParameter(def.getName())
.addParameter(def.getVersion())
.executeUpdate());
}
int maxVersion = def.getVersion();
if (version.isPresent() && version.get() > def.getVersion()) {
maxVersion = version.get();
}
updateLatestVersion(tx, def.getName(), maxVersion);
}
private Optional<Integer> getLatestVersion(Connection tx, String name) {
final String GET_LATEST_WORKFLOW_DEF_VERSION =
"SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?";
Integer val =
query(
tx,
GET_LATEST_WORKFLOW_DEF_VERSION,
q -> {
q.addParameter(name);
return q.executeAndFetch(
rs -> {
if (!rs.next()) {
return null;
}
return rs.getInt(1);
});
});
return Optional.ofNullable(val);
}
private void updateLatestVersion(Connection tx, String name, int version) {
final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY =
"UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?";
execute(
tx,
UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY,
q -> q.addParameter(version).addParameter(name).executeUpdate());
}
private void validate(WorkflowDef def) {
Preconditions.checkNotNull(def, "WorkflowDef object cannot be null");
Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ExecuteFunction.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ExecuteFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions with no expected result.
*
* @author mustafa
*/
@FunctionalInterface
public interface ExecuteFunction {
void apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/LazyToString.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/LazyToString.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.util.function.Supplier;
/** Functional class to support the lazy execution of a String result. */
public class LazyToString {
private final Supplier<String> supplier;
/**
* @param supplier Supplier to execute when {@link #toString()} is called.
*/
public LazyToString(Supplier<String> supplier) {
this.supplier = supplier;
}
@Override
public String toString() {
return supplier.get();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/QueryFunction.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/QueryFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions that return results.
*
* @author mustafa
*/
@FunctionalInterface
public interface QueryFunction<R> {
R apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/QueueStats.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/QueueStats.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
public class QueueStats {
private Integer depth;
private long nextDelivery;
public void setDepth(Integer depth) {
this.depth = depth;
}
public Integer getDepth() {
return depth;
}
public void setNextDelivery(long nextDelivery) {
this.nextDelivery = nextDelivery;
}
public long getNextDelivery() {
return nextDelivery;
}
public String toString() {
return "{nextDelivery: " + nextDelivery + " depth: " + depth + "}";
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/TransactionalFunction.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/TransactionalFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Functional interface for operations within a transactional context.
*
* @author mustafa
*/
@FunctionalInterface
public interface TransactionalFunction<R> {
R apply(Connection tx) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ResultSetHandler.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ResultSetHandler.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}.
*
* @author mustafa
*/
@FunctionalInterface
public interface ResultSetHandler<R> {
R apply(ResultSet resultSet) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/Query.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/Query.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.io.IOException;
import java.sql.*;
import java.sql.Date;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities.
*
* <p>This class simulates a parameter building pattern and all {@literal addParameter(*)} methods
* must be called in the proper order of their expected binding sequence.
*
* @author mustafa
*/
public class Query implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(getClass());
/** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */
protected final ObjectMapper objectMapper;
/** The initial supplied query String that was used to prepare {@link #statement}. */
private final String rawQuery;
/**
* Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a
* parameter is added to the {@code PreparedStatement} {@link #statement}.
*/
private final AtomicInteger index = new AtomicInteger(1);
/** The {@link PreparedStatement} that will be managed and executed by this class. */
private final PreparedStatement statement;
private final Connection connection;
public Query(ObjectMapper objectMapper, Connection connection, String query) {
this.rawQuery = query;
this.objectMapper = objectMapper;
this.connection = connection;
try {
this.statement = connection.prepareStatement(query);
} catch (SQLException ex) {
throw new NonTransientException(
"Cannot prepare statement for query: " + ex.getMessage(), ex);
}
}
/**
* Generate a String with {@literal count} number of '?' placeholders for {@link
* PreparedStatement} queries.
*
* @param count The number of '?' chars to generate.
* @return a comma delimited string of {@literal count} '?' binding placeholders.
*/
public static String generateInBindings(int count) {
String[] questions = new String[count];
for (int i = 0; i < count; i++) {
questions[i] = "?";
}
return String.join(", ", questions);
}
public Query addParameter(final String value) {
return addParameterInternal((ps, idx) -> ps.setString(idx, value));
}
public Query addParameter(final List<String> value) throws SQLException {
String[] valueStringArray = value.toArray(new String[0]);
Array valueArray = this.connection.createArrayOf("VARCHAR", valueStringArray);
return addParameterInternal((ps, idx) -> ps.setArray(idx, valueArray));
}
public Query addParameter(final int value) {
return addParameterInternal((ps, idx) -> ps.setInt(idx, value));
}
public Query addParameter(final boolean value) {
return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value)));
}
public Query addParameter(final long value) {
return addParameterInternal((ps, idx) -> ps.setLong(idx, value));
}
public Query addParameter(final double value) {
return addParameterInternal((ps, idx) -> ps.setDouble(idx, value));
}
public Query addParameter(Date date) {
return addParameterInternal((ps, idx) -> ps.setDate(idx, date));
}
public Query addParameter(Timestamp timestamp) {
return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp));
}
/**
* Serializes {@literal value} to a JSON string for persistence.
*
* @param value The value to serialize.
* @return {@literal this}
*/
public Query addJsonParameter(Object value) {
return addParameter(toJson(value));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Date}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addDateParameter(java.util.Date date) {
return addParameter(new Date(date.getTime()));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Timestamp}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addTimestampParameter(java.util.Date date) {
return addParameter(new Timestamp(date.getTime()));
}
/**
* Bind the given epoch millis to the PreparedStatement as a {@link Timestamp}.
*
* @param epochMillis The epoch ms to create a new {@literal Timestamp} from.
* @return {@literal this}
*/
public Query addTimestampParameter(long epochMillis) {
return addParameter(new Timestamp(epochMillis));
}
/**
* Add a collection of primitive values at once, in the order of the collection.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the
* collection.
* @see #addParameters(Object...)
*/
public Query addParameters(Collection values) {
return addParameters(values.toArray());
}
/**
* Add many primitive values at once.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered.
*/
public Query addParameters(Object... values) {
for (Object v : values) {
if (v instanceof String) {
addParameter((String) v);
} else if (v instanceof Integer) {
addParameter((Integer) v);
} else if (v instanceof Long) {
addParameter((Long) v);
} else if (v instanceof Double) {
addParameter((Double) v);
} else if (v instanceof Boolean) {
addParameter((Boolean) v);
} else if (v instanceof Date) {
addParameter((Date) v);
} else if (v instanceof Timestamp) {
addParameter((Timestamp) v);
} else {
throw new IllegalArgumentException(
"Type "
+ v.getClass().getName()
+ " is not supported by automatic property assignment");
}
}
return this;
}
/**
* Utility method for evaluating the prepared statement as a query to check the existence of a
* record using a numeric count or boolean return value.
*
* <p>The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result.
*
* @return {@literal true} If a count query returned more than 0 or an exists query returns
* {@literal true}.
* @throws NonTransientException If an unexpected return type cannot be evaluated to a {@code
* Boolean} result.
*/
public boolean exists() {
Object val = executeScalar();
if (null == val) {
return false;
}
if (val instanceof Number) {
return convertLong(val) > 0;
}
if (val instanceof Boolean) {
return (Boolean) val;
}
if (val instanceof String) {
return convertBoolean(val);
}
throw new NonTransientException(
"Expected a Numeric or Boolean scalar return value from the query, received "
+ val.getClass().getName());
}
/**
* Convenience method for executing delete statements.
*
* @return {@literal true} if the statement affected 1 or more rows.
* @see #executeUpdate()
*/
public boolean executeDelete() {
int count = executeUpdate();
if (count > 1) {
logger.trace("Removed {} row(s) for query {}", count, rawQuery);
}
return count > 0;
}
/**
* Convenience method for executing statements that return a single numeric value, typically
* {@literal SELECT COUNT...} style queries.
*
* @return The result of the query as a {@literal long}.
*/
public long executeCount() {
return executeScalar(Long.class);
}
/**
* @return The result of {@link PreparedStatement#executeUpdate()}
*/
public int executeUpdate() {
try {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
final int val = this.statement.executeUpdate();
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery);
}
return val;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a query from the PreparedStatement and return the ResultSet.
*
* <p><em>NOTE:</em> The returned ResultSet must be closed/managed by the calling methods.
*
* @return {@link PreparedStatement#executeQuery()}
* @throws NonTransientException If any SQL errors occur.
*/
public ResultSet executeQuery() {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
try {
return this.statement.executeQuery();
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}", (end - start), rawQuery);
}
}
}
/**
* @return The single result of the query as an Object.
*/
public Object executeScalar() {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
return null;
}
return rs.getObject(1);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a single 'primitive' value from the ResultSet.
*
* @param returnType The type to return.
* @param <V> The type parameter to return a List of.
* @return A single result from the execution of the statement, as a type of {@literal
* returnType}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> V executeScalar(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
Object value = null;
if (Integer.class == returnType) {
value = 0;
} else if (Long.class == returnType) {
value = 0L;
} else if (Boolean.class == returnType) {
value = false;
}
return returnType.cast(value);
} else {
return getScalarFromResultSet(rs, returnType);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeScalarList(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> values = new ArrayList<>();
while (rs.next()) {
values.add(getScalarFromResultSet(rs, returnType));
}
return values;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the statement and return only the first record from the result set.
*
* @param returnType The Class to return.
* @param <V> The type parameter.
* @return An instance of {@literal <V>} from the result set.
*/
public <V> V executeAndFetchFirst(Class<V> returnType) {
Object o = executeScalar();
if (null == o) {
return null;
}
return convert(o, returnType);
}
/**
* Execute the PreparedStatement and return a List of {@literal returnType} values from the
* ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeAndFetch(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> list = new ArrayList<>();
while (rs.next()) {
list.add(convert(rs.getObject(1), returnType));
}
return list;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of {@literal Map} values from the ResultSet.
*
* @return A {@code List<Map>}.
* @throws SQLException if any SQL errors occur.
* @throws NonTransientException if any SQL errors occur.
*/
public List<Map<String, Object>> executeAndFetchMap() {
try (ResultSet rs = executeQuery()) {
List<Map<String, Object>> result = new ArrayList<>();
ResultSetMetaData metadata = rs.getMetaData();
int columnCount = metadata.getColumnCount();
while (rs.next()) {
HashMap<String, Object> row = new HashMap<>();
for (int i = 1; i <= columnCount; i++) {
row.put(metadata.getColumnLabel(i), rs.getObject(i));
}
result.add(row);
}
return result;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the query and pass the {@link ResultSet} to the given handler.
*
* @param handler The {@link ResultSetHandler} to execute.
* @param <V> The return type of this method.
* @return The results of {@link ResultSetHandler#apply(ResultSet)}.
*/
public <V> V executeAndFetch(ResultSetHandler<V> handler) {
try (ResultSet rs = executeQuery()) {
return handler.apply(rs);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
@Override
public void close() {
try {
if (null != statement && !statement.isClosed()) {
statement.close();
}
} catch (SQLException ex) {
logger.warn("Error closing prepared statement: {}", ex.getMessage());
}
}
protected final Query addParameterInternal(InternalParameterSetter setter) {
int index = getAndIncrementIndex();
try {
setter.apply(this.statement, index);
return this;
} catch (SQLException ex) {
throw new NonTransientException("Could not apply bind parameter at index " + index, ex);
}
}
protected <V> V getScalarFromResultSet(ResultSet rs, Class<V> returnType) throws SQLException {
Object value = null;
if (Integer.class == returnType) {
value = rs.getInt(1);
} else if (Long.class == returnType) {
value = rs.getLong(1);
} else if (String.class == returnType) {
value = rs.getString(1);
} else if (Boolean.class == returnType) {
value = rs.getBoolean(1);
} else if (Double.class == returnType) {
value = rs.getDouble(1);
} else if (Date.class == returnType) {
value = rs.getDate(1);
} else if (Timestamp.class == returnType) {
value = rs.getTimestamp(1);
} else {
value = rs.getObject(1);
}
if (null == value) {
throw new NullPointerException(
"Cannot get value from ResultSet of type " + returnType.getName());
}
return returnType.cast(value);
}
protected <V> V convert(Object value, Class<V> returnType) {
if (Boolean.class == returnType) {
return returnType.cast(convertBoolean(value));
} else if (Integer.class == returnType) {
return returnType.cast(convertInt(value));
} else if (Long.class == returnType) {
return returnType.cast(convertLong(value));
} else if (Double.class == returnType) {
return returnType.cast(convertDouble(value));
} else if (String.class == returnType) {
return returnType.cast(convertString(value));
} else if (value instanceof String) {
return fromJson((String) value, returnType);
}
final String vName = value.getClass().getName();
final String rName = returnType.getName();
throw new NonTransientException("Cannot convert type " + vName + " to " + rName);
}
protected Integer convertInt(Object value) {
if (null == value) {
return null;
}
if (value instanceof Integer) {
return (Integer) value;
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
return NumberUtils.toInt(value.toString());
}
protected Double convertDouble(Object value) {
if (null == value) {
return null;
}
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
return NumberUtils.toDouble(value.toString());
}
protected Long convertLong(Object value) {
if (null == value) {
return null;
}
if (value instanceof Long) {
return (Long) value;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
return NumberUtils.toLong(value.toString());
}
protected String convertString(Object value) {
if (null == value) {
return null;
}
if (value instanceof String) {
return (String) value;
}
return value.toString().trim();
}
protected Boolean convertBoolean(Object value) {
if (null == value) {
return null;
}
if (value instanceof Boolean) {
return (Boolean) value;
}
if (value instanceof Number) {
return ((Number) value).intValue() != 0;
}
String text = value.toString().trim();
return "Y".equalsIgnoreCase(text)
|| "YES".equalsIgnoreCase(text)
|| "TRUE".equalsIgnoreCase(text)
|| "T".equalsIgnoreCase(text)
|| "1".equalsIgnoreCase(text);
}
protected String toJson(Object value) {
if (null == value) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <V> V fromJson(String value, Class<V> returnType) {
if (null == value) {
return null;
}
try {
return objectMapper.readValue(value, returnType);
} catch (IOException ex) {
throw new NonTransientException(
"Could not convert JSON '" + value + "' to " + returnType.getName(), ex);
}
}
protected final int getIndex() {
return index.get();
}
protected final int getAndIncrementIndex() {
return index.getAndIncrement();
}
@FunctionalInterface
private interface InternalParameterSetter {
void apply(PreparedStatement ps, int idx) throws SQLException;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ExecutorsUtil.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/ExecutorsUtil.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
public class ExecutorsUtil {
private ExecutorsUtil() {}
public static ThreadFactory newNamedThreadFactory(final String threadNamePrefix) {
return new ThreadFactory() {
private final AtomicInteger counter = new AtomicInteger();
@SuppressWarnings("NullableProblems")
@Override
public Thread newThread(Runnable r) {
Thread thread = Executors.defaultThreadFactory().newThread(r);
thread.setName(threadNamePrefix + counter.getAndIncrement());
return thread;
}
};
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/SqliteIndexQueryBuilder.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/util/SqliteIndexQueryBuilder.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.util;
import java.sql.SQLException;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.sqlite.config.SqliteProperties;
public class SqliteIndexQueryBuilder {
private final String table;
private final String freeText;
private final int start;
private final int count;
private final List<String> sort;
private final List<Condition> conditions = new ArrayList<>();
private boolean allowJsonQueries;
private boolean allowFullTextQueries;
private static final String[] VALID_FIELDS = {
"workflow_id",
"correlation_id",
"workflow_type",
"start_time",
"status",
"task_id",
"task_type",
"task_def_name",
"update_time",
"json_data"
};
private static final String[] VALID_SORT_ORDER = {"ASC", "DESC"};
private static class Condition {
private String attribute;
private String operator;
private List<String> values;
private final String CONDITION_REGEX = "([a-zA-Z]+)\\s?(=|>|<|IN)\\s?(.*)";
public Condition() {}
public Condition(String query) {
Pattern conditionRegex = Pattern.compile(CONDITION_REGEX);
Matcher conditionMatcher = conditionRegex.matcher(query);
if (conditionMatcher.find()) {
String[] valueArr = conditionMatcher.group(3).replaceAll("[\"()]", "").split(",");
ArrayList<String> values = new ArrayList<>(Arrays.asList(valueArr));
this.attribute = camelToSnake(conditionMatcher.group(1));
this.values = values;
this.operator = getOperator(conditionMatcher.group(2));
if (this.attribute.endsWith("_time")) {
values.set(0, millisToUtc(values.get(0)));
}
} else {
throw new IllegalArgumentException("Incorrectly formatted query string: " + query);
}
}
public String getQueryFragment() {
if (operator.equals("IN")) {
// Create proper IN clause for SQLite
return attribute
+ " IN ("
+ String.join(",", Collections.nCopies(values.size(), "?"))
+ ")";
} else if (operator.equals("MATCH")) {
// SQLite FTS5 full-text search
return "json_data MATCH ?";
} else if (operator.equals("JSON_CONTAINS")) {
// SQLite JSON1 extension query
return "json_extract(json_data, ?) IS NOT NULL";
} else if (operator.equals("LIKE")) {
return "lower(" + attribute + ") LIKE ?";
} else {
if (attribute.endsWith("_time")) {
return attribute + " " + operator + " datetime(?)";
} else {
return attribute + " " + operator + " ?";
}
}
}
private String getOperator(String op) {
if (op.equals("IN") && values.size() == 1) {
return "=";
}
return op;
}
public void addParameter(Query q) throws SQLException {
if (values.size() > 1) {
// For IN clause, add each value separately
for (String value : values) {
q.addParameter(value);
}
} else {
q.addParameter(values.get(0));
}
}
private String millisToUtc(String millis) {
Long startTimeMilli = Long.parseLong(millis);
ZonedDateTime startDate =
ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimeMilli), ZoneOffset.UTC);
return DateTimeFormatter.ISO_DATE_TIME.format(startDate);
}
private boolean isValid() {
return Arrays.asList(VALID_FIELDS).contains(attribute);
}
public void setAttribute(String attribute) {
this.attribute = attribute;
}
public void setOperator(String operator) {
this.operator = operator;
}
public void setValues(List<String> values) {
this.values = values;
}
}
public SqliteIndexQueryBuilder(
String table,
String query,
String freeText,
int start,
int count,
List<String> sort,
SqliteProperties properties) {
this.table = table;
this.freeText = freeText;
this.start = start;
this.count = count;
this.sort = sort;
this.allowFullTextQueries = true;
this.allowJsonQueries = true;
this.parseQuery(query);
this.parseFreeText(freeText);
}
public String getQuery() {
String queryString = "";
List<Condition> validConditions =
conditions.stream().filter(c -> c.isValid()).collect(Collectors.toList());
if (validConditions.size() > 0) {
queryString =
" WHERE "
+ String.join(
" AND ",
validConditions.stream()
.map(c -> c.getQueryFragment())
.collect(Collectors.toList()));
}
return "SELECT json_data FROM " + table + queryString + getSort() + " LIMIT ? OFFSET ?";
}
public String getCountQuery() {
String queryString = "";
List<Condition> validConditions =
conditions.stream().filter(c -> c.isValid()).collect(Collectors.toList());
if (validConditions.size() > 0) {
queryString =
" WHERE "
+ String.join(
" AND ",
validConditions.stream()
.map(c -> c.getQueryFragment())
.collect(Collectors.toList()));
}
return "SELECT COUNT(*) FROM " + table + queryString;
}
public void addParameters(Query q) throws SQLException {
for (Condition condition : conditions) {
if (condition.isValid()) {
condition.addParameter(q);
}
}
}
public void addPagingParameters(Query q) throws SQLException {
q.addParameter(count);
q.addParameter(start);
}
private void parseQuery(String query) {
if (!StringUtils.isEmpty(query)) {
for (String s : query.split(" AND ")) {
conditions.add(new Condition(s));
}
Collections.sort(conditions, Comparator.comparing(Condition::getQueryFragment));
}
}
private void parseFreeText(String freeText) {
if (!StringUtils.isEmpty(freeText) && !freeText.equals("*")) {
Condition cond = new Condition();
cond.setAttribute("json_data");
cond.setOperator("LIKE");
String[] values = {freeText};
cond.setValues(
Arrays.stream(values)
.map(v -> "%" + v.toLowerCase() + "%")
.collect(Collectors.toList()));
conditions.add(cond);
}
}
private String getSort() {
ArrayList<String> sortConds = new ArrayList<>();
for (String s : sort) {
String[] splitCond = s.split(":");
if (splitCond.length == 2) {
String attribute = camelToSnake(splitCond[0]);
String order = splitCond[1].toUpperCase();
if (Arrays.asList(VALID_FIELDS).contains(attribute)
&& Arrays.asList(VALID_SORT_ORDER).contains(order)) {
sortConds.add(attribute + " " + order);
}
}
}
if (sortConds.size() > 0) {
return " ORDER BY " + String.join(", ", sortConds);
}
return "";
}
private static String camelToSnake(String camel) {
return camel.replaceAll("\\B([A-Z])", "_$1").toLowerCase();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/config/SqliteConfiguration.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/config/SqliteConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.config;
import java.sql.SQLException;
import java.util.Optional;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.flywaydb.core.api.configuration.FluentConfiguration;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
import org.springframework.retry.RetryContext;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.core.sync.local.LocalOnlyLock;
import com.netflix.conductor.sqlite.dao.*;
import com.netflix.conductor.sqlite.dao.metadata.SqliteEventHandlerMetadataDAO;
import com.netflix.conductor.sqlite.dao.metadata.SqliteMetadataDAO;
import com.netflix.conductor.sqlite.dao.metadata.SqliteTaskMetadataDAO;
import com.netflix.conductor.sqlite.dao.metadata.SqliteWorkflowMetadataDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import jakarta.annotation.PostConstruct;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(SqliteProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "sqlite")
@Import(DataSourceAutoConfiguration.class)
@ConfigurationProperties(prefix = "conductor.sqlite")
public class SqliteConfiguration {
DataSource dataSource;
private final SqliteProperties properties;
public SqliteConfiguration(DataSource dataSource, SqliteProperties properties) {
this.dataSource = dataSource;
this.properties = properties;
}
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForPrimaryDb() {
FluentConfiguration config =
Flyway.configure()
.dataSource(dataSource) // SQLite doesn't need username/password
.locations("classpath:db/migration_sqlite") // Location of migration files
.sqlMigrationPrefix("V") // V1, V2, etc.
.sqlMigrationSeparator("__") // V1__description
.mixed(true) // Allow mixed migrations (both versioned and repeatable)
.validateOnMigrate(true)
.cleanDisabled(false);
return new Flyway(config);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteMetadataDAO sqliteMetadataDAO(
SqliteTaskMetadataDAO taskMetadataDAO,
SqliteWorkflowMetadataDAO workflowMetadataDAO,
SqliteEventHandlerMetadataDAO eventHandlerMetadataDAO) {
return new SqliteMetadataDAO(taskMetadataDAO, workflowMetadataDAO, eventHandlerMetadataDAO);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteEventHandlerMetadataDAO sqliteEventHandlerMetadataDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new SqliteEventHandlerMetadataDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteWorkflowMetadataDAO sqliteWorkflowMetadataDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new SqliteWorkflowMetadataDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteTaskMetadataDAO sqliteTaskMetadataDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new SqliteTaskMetadataDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteExecutionDAO sqliteExecutionDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new SqliteExecutionDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqlitePollDataDAO sqlitePollDataDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new SqlitePollDataDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public SqliteQueueDAO sqliteQueueDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
SqliteProperties properties) {
return new SqliteQueueDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(name = "conductor.indexing.type", havingValue = "sqlite")
public SqliteIndexDAO sqliteIndexDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
SqliteProperties properties) {
return new SqliteIndexDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(name = "conductor.workflow-execution-lock.type", havingValue = "sqlite")
public Lock sqliteLockDAO(
@Qualifier("sqliteRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new LocalOnlyLock();
}
@Bean
public RetryTemplate sqliteRetryTemplate(SqliteProperties properties) {
SimpleRetryPolicy retryPolicy = new CustomRetryPolicy();
retryPolicy.setMaxAttempts(3);
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(new NoBackOffPolicy());
return retryTemplate;
}
public static class CustomRetryPolicy extends SimpleRetryPolicy {
private static final String ER_LOCK_DEADLOCK = "40P01";
private static final String ER_SERIALIZATION_FAILURE = "40001";
@Override
public boolean canRetry(final RetryContext context) {
final Optional<Throwable> lastThrowable =
Optional.ofNullable(context.getLastThrowable());
return lastThrowable
.map(throwable -> super.canRetry(context) && isDeadLockError(throwable))
.orElseGet(() -> super.canRetry(context));
}
private boolean isDeadLockError(Throwable throwable) {
SQLException sqlException = findCauseSQLException(throwable);
if (sqlException == null) {
return false;
}
return ER_LOCK_DEADLOCK.equals(sqlException.getSQLState())
|| ER_SERIALIZATION_FAILURE.equals(sqlException.getSQLState());
}
private SQLException findCauseSQLException(Throwable throwable) {
Throwable causeException = throwable;
while (null != causeException && !(causeException instanceof SQLException)) {
causeException = causeException.getCause();
}
return (SQLException) causeException;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/config/SqliteProperties.java | sqlite-persistence/src/main/java/com/netflix/conductor/sqlite/config/SqliteProperties.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.sqlite.config;
import java.time.Duration;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.sqlite")
public class SqliteProperties {
/** The time (in seconds) after which the in-memory task definitions cache will be refreshed */
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
private Integer deadlockRetryMax = 3;
private boolean onlyIndexOnStatusChange = false;
private Integer asyncMaxPoolSize = 10;
private Integer asyncWorkerQueueSize = 10;
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
public void setDeadlockRetryMax(Integer deadlockRetryMax) {
this.deadlockRetryMax = deadlockRetryMax;
}
public int getAsyncMaxPoolSize() {
return asyncMaxPoolSize;
}
public int getAsyncWorkerQueueSize() {
return asyncWorkerQueueSize;
}
public boolean getOnlyIndexOnStatusChange() {
return onlyIndexOnStatusChange;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/server-lite/src/main/java/org/conductoross/conductor/Conductor.java | server-lite/src/main/java/org/conductoross/conductor/Conductor.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.conductoross.conductor;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.core.io.FileSystemResource;
import com.netflix.conductor.rest.config.RestConfiguration;
// Prevents from the datasource beans to be loaded, AS they are needed only for specific databases.
// In case that SQL database is selected this class will be imported back in the appropriate
// database persistence module.
@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class})
@ComponentScan(
basePackages = {"com.netflix.conductor", "io.orkes.conductor", "org.conductoross"},
excludeFilters =
@ComponentScan.Filter(
type = FilterType.ASSIGNABLE_TYPE,
classes = {RestConfiguration.class}))
public class Conductor {
private static final Logger log = LoggerFactory.getLogger(Conductor.class);
public static void main(String[] args) throws IOException {
loadExternalConfig();
SpringApplication.run(Conductor.class, args);
}
/**
* Reads properties from the location specified in <code>CONDUCTOR_CONFIG_FILE</code> and sets
* them as system properties so they override the default properties.
*
* <p>Spring Boot property hierarchy is documented here,
* https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config
*
* @throws IOException if file can't be read.
*/
private static void loadExternalConfig() throws IOException {
String configFile = System.getProperty("CONDUCTOR_CONFIG_FILE2");
if (StringUtils.isBlank(configFile)) {
configFile = System.getenv("CONDUCTOR_CONFIG_FILE2");
}
if (StringUtils.isNotBlank(configFile)) {
log.info("Loading {}", configFile);
FileSystemResource resource = new FileSystemResource(configFile);
if (resource.exists()) {
Properties properties = new Properties();
properties.load(resource.getInputStream());
properties.forEach(
(key, value) -> System.setProperty((String) key, (String) value));
log.info("Loaded {} properties from {}", properties.size(), configFile);
} else {
log.warn("Ignoring {} since it does not exist", configFile);
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/server-lite/src/main/java/org/conductoross/conductor/RestConfiguration.java | server-lite/src/main/java/org/conductoross/conductor/RestConfiguration.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.conductoross.conductor;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.Ordered;
import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
import lombok.extern.slf4j.Slf4j;
import static org.springframework.http.MediaType.APPLICATION_JSON;
import static org.springframework.http.MediaType.APPLICATION_OCTET_STREAM;
import static org.springframework.http.MediaType.TEXT_PLAIN;
@Configuration
@Slf4j
public class RestConfiguration implements WebMvcConfigurer {
private final SpaInterceptor spaInterceptor;
public RestConfiguration(SpaInterceptor spaInterceptor) {
this.spaInterceptor = spaInterceptor;
log.info("spaInterceptor: {}", spaInterceptor);
}
@Override
public void configureContentNegotiation(ContentNegotiationConfigurer configurer) {
configurer
.favorParameter(false)
.favorPathExtension(false)
.ignoreAcceptHeader(true)
.defaultContentType(APPLICATION_JSON, TEXT_PLAIN, APPLICATION_OCTET_STREAM);
}
@Override
public void addInterceptors(InterceptorRegistry registry) {
if (spaInterceptor != null) {
registry.addInterceptor(spaInterceptor)
.excludePathPatterns("/api/**")
.excludePathPatterns("/actuator/**")
.excludePathPatterns("/health/**")
.excludePathPatterns("/v3/api-docs")
.excludePathPatterns("/v3/api-docs/**")
.excludePathPatterns("/swagger-ui/**")
.order(Ordered.HIGHEST_PRECEDENCE);
}
}
@Override
public void addResourceHandlers(ResourceHandlerRegistry registry) {
if (spaInterceptor != null) {
log.info("Serving static resources");
registry.addResourceHandler("/static/ui/**")
.addResourceLocations("classpath:/static/ui/");
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/server-lite/src/main/java/org/conductoross/conductor/SpaInterceptor.java | server-lite/src/main/java/org/conductoross/conductor/SpaInterceptor.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.conductoross.conductor;
import org.springframework.stereotype.Component;
import org.springframework.web.servlet.HandlerInterceptor;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import lombok.extern.slf4j.Slf4j;
@Component
@Slf4j
public class SpaInterceptor implements HandlerInterceptor {
public SpaInterceptor() {
log.info("Serving UI on /");
}
@Override
public boolean preHandle(
HttpServletRequest request, HttpServletResponse response, Object handler)
throws Exception {
String path = request.getRequestURI();
log.debug("Service SPA page {}", path);
// Skip API, health checks, actuator, and static resources
if (path.startsWith("/api/")
|| path.equals("/health")
|| path.equals("/api-docs")
|| path.equals("/error")
|| path.contains(".")) {
return true;
}
// Forward to index.html
request.getRequestDispatcher("/index.html").forward(request, response);
return false;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc/src/test/java/com/netflix/conductor/grpc/TestProtoMapper.java | grpc/src/test/java/com/netflix/conductor/grpc/TestProtoMapper.java | /*
* Copyright 2023 Conductor authors
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.proto.WorkflowTaskPb;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestProtoMapper {
private final ProtoMapper mapper = ProtoMapper.INSTANCE;
@Test
public void workflowTaskToProto() {
final WorkflowTask taskWithDefaultRetryCount = new WorkflowTask();
final WorkflowTask taskWith1RetryCount = new WorkflowTask();
taskWith1RetryCount.setRetryCount(1);
final WorkflowTask taskWithNoRetryCount = new WorkflowTask();
taskWithNoRetryCount.setRetryCount(0);
assertEquals(-1, mapper.toProto(taskWithDefaultRetryCount).getRetryCount());
assertEquals(1, mapper.toProto(taskWith1RetryCount).getRetryCount());
assertEquals(0, mapper.toProto(taskWithNoRetryCount).getRetryCount());
}
@Test
public void workflowTaskFromProto() {
final WorkflowTaskPb.WorkflowTask taskWithDefaultRetryCount = WorkflowTaskPb.WorkflowTask.newBuilder().build();
final WorkflowTaskPb.WorkflowTask taskWith1RetryCount = WorkflowTaskPb.WorkflowTask.newBuilder().setRetryCount(1).build();
final WorkflowTaskPb.WorkflowTask taskWithNoRetryCount = WorkflowTaskPb.WorkflowTask.newBuilder().setRetryCount(-1).build();
assertEquals(Integer.valueOf(0), mapper.fromProto(taskWithDefaultRetryCount).getRetryCount());
assertEquals(1, mapper.fromProto(taskWith1RetryCount).getRetryCount().intValue());
assertNull(mapper.fromProto(taskWithNoRetryCount).getRetryCount());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java | grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java | package com.netflix.conductor.grpc;
import com.google.protobuf.Any;
import com.google.protobuf.Value;
import com.netflix.conductor.common.metadata.SchemaDef;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.ExecutionMetadata;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.workflow.CacheConfig;
import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTask;
import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList;
import com.netflix.conductor.common.metadata.workflow.RateLimitConfig;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.StateChangeEvent;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.UpgradeWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDefSummary;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.proto.CacheConfigPb;
import com.netflix.conductor.proto.DynamicForkJoinTaskListPb;
import com.netflix.conductor.proto.DynamicForkJoinTaskPb;
import com.netflix.conductor.proto.EventExecutionPb;
import com.netflix.conductor.proto.EventHandlerPb;
import com.netflix.conductor.proto.ExecutionMetadataPb;
import com.netflix.conductor.proto.PollDataPb;
import com.netflix.conductor.proto.RateLimitConfigPb;
import com.netflix.conductor.proto.RerunWorkflowRequestPb;
import com.netflix.conductor.proto.SchemaDefPb;
import com.netflix.conductor.proto.SkipTaskRequestPb;
import com.netflix.conductor.proto.StartWorkflowRequestPb;
import com.netflix.conductor.proto.StateChangeEventPb;
import com.netflix.conductor.proto.SubWorkflowParamsPb;
import com.netflix.conductor.proto.TaskDefPb;
import com.netflix.conductor.proto.TaskExecLogPb;
import com.netflix.conductor.proto.TaskPb;
import com.netflix.conductor.proto.TaskResultPb;
import com.netflix.conductor.proto.TaskSummaryPb;
import com.netflix.conductor.proto.UpgradeWorkflowRequestPb;
import com.netflix.conductor.proto.WorkflowDefPb;
import com.netflix.conductor.proto.WorkflowDefSummaryPb;
import com.netflix.conductor.proto.WorkflowPb;
import com.netflix.conductor.proto.WorkflowSummaryPb;
import com.netflix.conductor.proto.WorkflowTaskPb;
import jakarta.annotation.Generated;
import java.lang.IllegalArgumentException;
import java.lang.Object;
import java.lang.String;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Generated("com.netflix.conductor.annotationsprocessor.protogen")
public abstract class AbstractProtoMapper {
public CacheConfigPb.CacheConfig toProto(CacheConfig from) {
CacheConfigPb.CacheConfig.Builder to = CacheConfigPb.CacheConfig.newBuilder();
if (from.getKey() != null) {
to.setKey( from.getKey() );
}
to.setTtlInSecond( from.getTtlInSecond() );
return to.build();
}
public CacheConfig fromProto(CacheConfigPb.CacheConfig from) {
CacheConfig to = new CacheConfig();
to.setKey( from.getKey() );
to.setTtlInSecond( from.getTtlInSecond() );
return to;
}
public DynamicForkJoinTaskPb.DynamicForkJoinTask toProto(DynamicForkJoinTask from) {
DynamicForkJoinTaskPb.DynamicForkJoinTask.Builder to = DynamicForkJoinTaskPb.DynamicForkJoinTask.newBuilder();
if (from.getTaskName() != null) {
to.setTaskName( from.getTaskName() );
}
if (from.getWorkflowName() != null) {
to.setWorkflowName( from.getWorkflowName() );
}
if (from.getReferenceName() != null) {
to.setReferenceName( from.getReferenceName() );
}
for (Map.Entry<String, Object> pair : from.getInput().entrySet()) {
to.putInput( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.getType() != null) {
to.setType( from.getType() );
}
return to.build();
}
public DynamicForkJoinTask fromProto(DynamicForkJoinTaskPb.DynamicForkJoinTask from) {
DynamicForkJoinTask to = new DynamicForkJoinTask();
to.setTaskName( from.getTaskName() );
to.setWorkflowName( from.getWorkflowName() );
to.setReferenceName( from.getReferenceName() );
Map<String, Object> inputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getInputMap().entrySet()) {
inputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setInput(inputMap);
to.setType( from.getType() );
return to;
}
public DynamicForkJoinTaskListPb.DynamicForkJoinTaskList toProto(DynamicForkJoinTaskList from) {
DynamicForkJoinTaskListPb.DynamicForkJoinTaskList.Builder to = DynamicForkJoinTaskListPb.DynamicForkJoinTaskList.newBuilder();
for (DynamicForkJoinTask elem : from.getDynamicTasks()) {
to.addDynamicTasks( toProto(elem) );
}
return to.build();
}
public DynamicForkJoinTaskList fromProto(
DynamicForkJoinTaskListPb.DynamicForkJoinTaskList from) {
DynamicForkJoinTaskList to = new DynamicForkJoinTaskList();
to.setDynamicTasks( from.getDynamicTasksList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) );
return to;
}
public EventExecutionPb.EventExecution toProto(EventExecution from) {
EventExecutionPb.EventExecution.Builder to = EventExecutionPb.EventExecution.newBuilder();
if (from.getId() != null) {
to.setId( from.getId() );
}
if (from.getMessageId() != null) {
to.setMessageId( from.getMessageId() );
}
if (from.getName() != null) {
to.setName( from.getName() );
}
if (from.getEvent() != null) {
to.setEvent( from.getEvent() );
}
to.setCreated( from.getCreated() );
if (from.getStatus() != null) {
to.setStatus( toProto( from.getStatus() ) );
}
if (from.getAction() != null) {
to.setAction( toProto( from.getAction() ) );
}
for (Map.Entry<String, Object> pair : from.getOutput().entrySet()) {
to.putOutput( pair.getKey(), toProto( pair.getValue() ) );
}
return to.build();
}
public EventExecution fromProto(EventExecutionPb.EventExecution from) {
EventExecution to = new EventExecution();
to.setId( from.getId() );
to.setMessageId( from.getMessageId() );
to.setName( from.getName() );
to.setEvent( from.getEvent() );
to.setCreated( from.getCreated() );
to.setStatus( fromProto( from.getStatus() ) );
to.setAction( fromProto( from.getAction() ) );
Map<String, Object> outputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getOutputMap().entrySet()) {
outputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setOutput(outputMap);
return to;
}
public EventExecutionPb.EventExecution.Status toProto(EventExecution.Status from) {
EventExecutionPb.EventExecution.Status to;
switch (from) {
case IN_PROGRESS: to = EventExecutionPb.EventExecution.Status.IN_PROGRESS; break;
case COMPLETED: to = EventExecutionPb.EventExecution.Status.COMPLETED; break;
case FAILED: to = EventExecutionPb.EventExecution.Status.FAILED; break;
case SKIPPED: to = EventExecutionPb.EventExecution.Status.SKIPPED; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public EventExecution.Status fromProto(EventExecutionPb.EventExecution.Status from) {
EventExecution.Status to;
switch (from) {
case IN_PROGRESS: to = EventExecution.Status.IN_PROGRESS; break;
case COMPLETED: to = EventExecution.Status.COMPLETED; break;
case FAILED: to = EventExecution.Status.FAILED; break;
case SKIPPED: to = EventExecution.Status.SKIPPED; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public EventHandlerPb.EventHandler toProto(EventHandler from) {
EventHandlerPb.EventHandler.Builder to = EventHandlerPb.EventHandler.newBuilder();
if (from.getName() != null) {
to.setName( from.getName() );
}
if (from.getEvent() != null) {
to.setEvent( from.getEvent() );
}
if (from.getCondition() != null) {
to.setCondition( from.getCondition() );
}
for (EventHandler.Action elem : from.getActions()) {
to.addActions( toProto(elem) );
}
to.setActive( from.isActive() );
if (from.getEvaluatorType() != null) {
to.setEvaluatorType( from.getEvaluatorType() );
}
return to.build();
}
public EventHandler fromProto(EventHandlerPb.EventHandler from) {
EventHandler to = new EventHandler();
to.setName( from.getName() );
to.setEvent( from.getEvent() );
to.setCondition( from.getCondition() );
to.setActions( from.getActionsList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) );
to.setActive( from.getActive() );
to.setEvaluatorType( from.getEvaluatorType() );
return to;
}
public EventHandlerPb.EventHandler.UpdateWorkflowVariables toProto(
EventHandler.UpdateWorkflowVariables from) {
EventHandlerPb.EventHandler.UpdateWorkflowVariables.Builder to = EventHandlerPb.EventHandler.UpdateWorkflowVariables.newBuilder();
if (from.getWorkflowId() != null) {
to.setWorkflowId( from.getWorkflowId() );
}
for (Map.Entry<String, Object> pair : from.getVariables().entrySet()) {
to.putVariables( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.isAppendArray() != null) {
to.setAppendArray( from.isAppendArray() );
}
return to.build();
}
public EventHandler.UpdateWorkflowVariables fromProto(
EventHandlerPb.EventHandler.UpdateWorkflowVariables from) {
EventHandler.UpdateWorkflowVariables to = new EventHandler.UpdateWorkflowVariables();
to.setWorkflowId( from.getWorkflowId() );
Map<String, Object> variablesMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getVariablesMap().entrySet()) {
variablesMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setVariables(variablesMap);
to.setAppendArray( from.getAppendArray() );
return to;
}
public EventHandlerPb.EventHandler.TerminateWorkflow toProto(
EventHandler.TerminateWorkflow from) {
EventHandlerPb.EventHandler.TerminateWorkflow.Builder to = EventHandlerPb.EventHandler.TerminateWorkflow.newBuilder();
if (from.getWorkflowId() != null) {
to.setWorkflowId( from.getWorkflowId() );
}
if (from.getTerminationReason() != null) {
to.setTerminationReason( from.getTerminationReason() );
}
return to.build();
}
public EventHandler.TerminateWorkflow fromProto(
EventHandlerPb.EventHandler.TerminateWorkflow from) {
EventHandler.TerminateWorkflow to = new EventHandler.TerminateWorkflow();
to.setWorkflowId( from.getWorkflowId() );
to.setTerminationReason( from.getTerminationReason() );
return to;
}
public EventHandlerPb.EventHandler.StartWorkflow toProto(EventHandler.StartWorkflow from) {
EventHandlerPb.EventHandler.StartWorkflow.Builder to = EventHandlerPb.EventHandler.StartWorkflow.newBuilder();
if (from.getName() != null) {
to.setName( from.getName() );
}
if (from.getVersion() != null) {
to.setVersion( from.getVersion() );
}
if (from.getCorrelationId() != null) {
to.setCorrelationId( from.getCorrelationId() );
}
for (Map.Entry<String, Object> pair : from.getInput().entrySet()) {
to.putInput( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.getInputMessage() != null) {
to.setInputMessage( toProto( from.getInputMessage() ) );
}
to.putAllTaskToDomain( from.getTaskToDomain() );
return to.build();
}
public EventHandler.StartWorkflow fromProto(EventHandlerPb.EventHandler.StartWorkflow from) {
EventHandler.StartWorkflow to = new EventHandler.StartWorkflow();
to.setName( from.getName() );
to.setVersion( from.getVersion() );
to.setCorrelationId( from.getCorrelationId() );
Map<String, Object> inputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getInputMap().entrySet()) {
inputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setInput(inputMap);
if (from.hasInputMessage()) {
to.setInputMessage( fromProto( from.getInputMessage() ) );
}
to.setTaskToDomain( from.getTaskToDomainMap() );
return to;
}
public EventHandlerPb.EventHandler.TaskDetails toProto(EventHandler.TaskDetails from) {
EventHandlerPb.EventHandler.TaskDetails.Builder to = EventHandlerPb.EventHandler.TaskDetails.newBuilder();
if (from.getWorkflowId() != null) {
to.setWorkflowId( from.getWorkflowId() );
}
if (from.getTaskRefName() != null) {
to.setTaskRefName( from.getTaskRefName() );
}
for (Map.Entry<String, Object> pair : from.getOutput().entrySet()) {
to.putOutput( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.getOutputMessage() != null) {
to.setOutputMessage( toProto( from.getOutputMessage() ) );
}
if (from.getTaskId() != null) {
to.setTaskId( from.getTaskId() );
}
return to.build();
}
public EventHandler.TaskDetails fromProto(EventHandlerPb.EventHandler.TaskDetails from) {
EventHandler.TaskDetails to = new EventHandler.TaskDetails();
to.setWorkflowId( from.getWorkflowId() );
to.setTaskRefName( from.getTaskRefName() );
Map<String, Object> outputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getOutputMap().entrySet()) {
outputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setOutput(outputMap);
if (from.hasOutputMessage()) {
to.setOutputMessage( fromProto( from.getOutputMessage() ) );
}
to.setTaskId( from.getTaskId() );
return to;
}
public EventHandlerPb.EventHandler.Action toProto(EventHandler.Action from) {
EventHandlerPb.EventHandler.Action.Builder to = EventHandlerPb.EventHandler.Action.newBuilder();
if (from.getAction() != null) {
to.setAction( toProto( from.getAction() ) );
}
if (from.getStart_workflow() != null) {
to.setStartWorkflow( toProto( from.getStart_workflow() ) );
}
if (from.getComplete_task() != null) {
to.setCompleteTask( toProto( from.getComplete_task() ) );
}
if (from.getFail_task() != null) {
to.setFailTask( toProto( from.getFail_task() ) );
}
to.setExpandInlineJson( from.isExpandInlineJSON() );
if (from.getTerminate_workflow() != null) {
to.setTerminateWorkflow( toProto( from.getTerminate_workflow() ) );
}
if (from.getUpdate_workflow_variables() != null) {
to.setUpdateWorkflowVariables( toProto( from.getUpdate_workflow_variables() ) );
}
return to.build();
}
public EventHandler.Action fromProto(EventHandlerPb.EventHandler.Action from) {
EventHandler.Action to = new EventHandler.Action();
to.setAction( fromProto( from.getAction() ) );
if (from.hasStartWorkflow()) {
to.setStart_workflow( fromProto( from.getStartWorkflow() ) );
}
if (from.hasCompleteTask()) {
to.setComplete_task( fromProto( from.getCompleteTask() ) );
}
if (from.hasFailTask()) {
to.setFail_task( fromProto( from.getFailTask() ) );
}
to.setExpandInlineJSON( from.getExpandInlineJson() );
if (from.hasTerminateWorkflow()) {
to.setTerminate_workflow( fromProto( from.getTerminateWorkflow() ) );
}
if (from.hasUpdateWorkflowVariables()) {
to.setUpdate_workflow_variables( fromProto( from.getUpdateWorkflowVariables() ) );
}
return to;
}
public EventHandlerPb.EventHandler.Action.Type toProto(EventHandler.Action.Type from) {
EventHandlerPb.EventHandler.Action.Type to;
switch (from) {
case start_workflow: to = EventHandlerPb.EventHandler.Action.Type.START_WORKFLOW; break;
case complete_task: to = EventHandlerPb.EventHandler.Action.Type.COMPLETE_TASK; break;
case fail_task: to = EventHandlerPb.EventHandler.Action.Type.FAIL_TASK; break;
case terminate_workflow: to = EventHandlerPb.EventHandler.Action.Type.TERMINATE_WORKFLOW; break;
case update_workflow_variables: to = EventHandlerPb.EventHandler.Action.Type.UPDATE_WORKFLOW_VARIABLES; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public EventHandler.Action.Type fromProto(EventHandlerPb.EventHandler.Action.Type from) {
EventHandler.Action.Type to;
switch (from) {
case START_WORKFLOW: to = EventHandler.Action.Type.start_workflow; break;
case COMPLETE_TASK: to = EventHandler.Action.Type.complete_task; break;
case FAIL_TASK: to = EventHandler.Action.Type.fail_task; break;
case TERMINATE_WORKFLOW: to = EventHandler.Action.Type.terminate_workflow; break;
case UPDATE_WORKFLOW_VARIABLES: to = EventHandler.Action.Type.update_workflow_variables; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public ExecutionMetadataPb.ExecutionMetadata toProto(ExecutionMetadata from) {
ExecutionMetadataPb.ExecutionMetadata.Builder to = ExecutionMetadataPb.ExecutionMetadata.newBuilder();
if (from.getServerSendTime() != null) {
to.setServerSendTime( from.getServerSendTime() );
}
if (from.getClientReceiveTime() != null) {
to.setClientReceiveTime( from.getClientReceiveTime() );
}
if (from.getExecutionStartTime() != null) {
to.setExecutionStartTime( from.getExecutionStartTime() );
}
if (from.getExecutionEndTime() != null) {
to.setExecutionEndTime( from.getExecutionEndTime() );
}
if (from.getClientSendTime() != null) {
to.setClientSendTime( from.getClientSendTime() );
}
if (from.getPollNetworkLatency() != null) {
to.setPollNetworkLatency( from.getPollNetworkLatency() );
}
if (from.getUpdateNetworkLatency() != null) {
to.setUpdateNetworkLatency( from.getUpdateNetworkLatency() );
}
for (Map.Entry<String, Object> pair : from.getAdditionalContext().entrySet()) {
to.putAdditionalContext( pair.getKey(), toProto( pair.getValue() ) );
}
return to.build();
}
public ExecutionMetadata fromProto(ExecutionMetadataPb.ExecutionMetadata from) {
ExecutionMetadata to = new ExecutionMetadata();
to.setServerSendTime( from.getServerSendTime() );
to.setClientReceiveTime( from.getClientReceiveTime() );
to.setExecutionStartTime( from.getExecutionStartTime() );
to.setExecutionEndTime( from.getExecutionEndTime() );
to.setClientSendTime( from.getClientSendTime() );
to.setPollNetworkLatency( from.getPollNetworkLatency() );
to.setUpdateNetworkLatency( from.getUpdateNetworkLatency() );
Map<String, Object> additionalContextMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getAdditionalContextMap().entrySet()) {
additionalContextMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setAdditionalContext(additionalContextMap);
return to;
}
public PollDataPb.PollData toProto(PollData from) {
PollDataPb.PollData.Builder to = PollDataPb.PollData.newBuilder();
if (from.getQueueName() != null) {
to.setQueueName( from.getQueueName() );
}
if (from.getDomain() != null) {
to.setDomain( from.getDomain() );
}
if (from.getWorkerId() != null) {
to.setWorkerId( from.getWorkerId() );
}
to.setLastPollTime( from.getLastPollTime() );
return to.build();
}
public PollData fromProto(PollDataPb.PollData from) {
PollData to = new PollData();
to.setQueueName( from.getQueueName() );
to.setDomain( from.getDomain() );
to.setWorkerId( from.getWorkerId() );
to.setLastPollTime( from.getLastPollTime() );
return to;
}
public RateLimitConfigPb.RateLimitConfig toProto(RateLimitConfig from) {
RateLimitConfigPb.RateLimitConfig.Builder to = RateLimitConfigPb.RateLimitConfig.newBuilder();
if (from.getRateLimitKey() != null) {
to.setRateLimitKey( from.getRateLimitKey() );
}
to.setConcurrentExecLimit( from.getConcurrentExecLimit() );
if (from.getPolicy() != null) {
to.setPolicy( toProto( from.getPolicy() ) );
}
return to.build();
}
public RateLimitConfig fromProto(RateLimitConfigPb.RateLimitConfig from) {
RateLimitConfig to = new RateLimitConfig();
to.setRateLimitKey( from.getRateLimitKey() );
to.setConcurrentExecLimit( from.getConcurrentExecLimit() );
to.setPolicy( fromProto( from.getPolicy() ) );
return to;
}
public RateLimitConfigPb.RateLimitConfig.RateLimitPolicy toProto(
RateLimitConfig.RateLimitPolicy from) {
RateLimitConfigPb.RateLimitConfig.RateLimitPolicy to;
switch (from) {
case QUEUE: to = RateLimitConfigPb.RateLimitConfig.RateLimitPolicy.QUEUE; break;
case REJECT: to = RateLimitConfigPb.RateLimitConfig.RateLimitPolicy.REJECT; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public RateLimitConfig.RateLimitPolicy fromProto(
RateLimitConfigPb.RateLimitConfig.RateLimitPolicy from) {
RateLimitConfig.RateLimitPolicy to;
switch (from) {
case QUEUE: to = RateLimitConfig.RateLimitPolicy.QUEUE; break;
case REJECT: to = RateLimitConfig.RateLimitPolicy.REJECT; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public RerunWorkflowRequestPb.RerunWorkflowRequest toProto(RerunWorkflowRequest from) {
RerunWorkflowRequestPb.RerunWorkflowRequest.Builder to = RerunWorkflowRequestPb.RerunWorkflowRequest.newBuilder();
if (from.getReRunFromWorkflowId() != null) {
to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() );
}
for (Map.Entry<String, Object> pair : from.getWorkflowInput().entrySet()) {
to.putWorkflowInput( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.getReRunFromTaskId() != null) {
to.setReRunFromTaskId( from.getReRunFromTaskId() );
}
for (Map.Entry<String, Object> pair : from.getTaskInput().entrySet()) {
to.putTaskInput( pair.getKey(), toProto( pair.getValue() ) );
}
if (from.getCorrelationId() != null) {
to.setCorrelationId( from.getCorrelationId() );
}
return to.build();
}
public RerunWorkflowRequest fromProto(RerunWorkflowRequestPb.RerunWorkflowRequest from) {
RerunWorkflowRequest to = new RerunWorkflowRequest();
to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() );
Map<String, Object> workflowInputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getWorkflowInputMap().entrySet()) {
workflowInputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setWorkflowInput(workflowInputMap);
to.setReRunFromTaskId( from.getReRunFromTaskId() );
Map<String, Object> taskInputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getTaskInputMap().entrySet()) {
taskInputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setTaskInput(taskInputMap);
to.setCorrelationId( from.getCorrelationId() );
return to;
}
public SchemaDefPb.SchemaDef toProto(SchemaDef from) {
SchemaDefPb.SchemaDef.Builder to = SchemaDefPb.SchemaDef.newBuilder();
if (from.getName() != null) {
to.setName( from.getName() );
}
to.setVersion( from.getVersion() );
if (from.getType() != null) {
to.setType( toProto( from.getType() ) );
}
return to.build();
}
public SchemaDef fromProto(SchemaDefPb.SchemaDef from) {
SchemaDef to = new SchemaDef();
to.setName( from.getName() );
to.setVersion( from.getVersion() );
to.setType( fromProto( from.getType() ) );
return to;
}
public SchemaDefPb.SchemaDef.Type toProto(SchemaDef.Type from) {
SchemaDefPb.SchemaDef.Type to;
switch (from) {
case JSON: to = SchemaDefPb.SchemaDef.Type.JSON; break;
case AVRO: to = SchemaDefPb.SchemaDef.Type.AVRO; break;
case PROTOBUF: to = SchemaDefPb.SchemaDef.Type.PROTOBUF; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public SchemaDef.Type fromProto(SchemaDefPb.SchemaDef.Type from) {
SchemaDef.Type to;
switch (from) {
case JSON: to = SchemaDef.Type.JSON; break;
case AVRO: to = SchemaDef.Type.AVRO; break;
case PROTOBUF: to = SchemaDef.Type.PROTOBUF; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
}
public SkipTaskRequest fromProto(SkipTaskRequestPb.SkipTaskRequest from) {
SkipTaskRequest to = new SkipTaskRequest();
Map<String, Object> taskInputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getTaskInputMap().entrySet()) {
taskInputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setTaskInput(taskInputMap);
Map<String, Object> taskOutputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getTaskOutputMap().entrySet()) {
taskOutputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setTaskOutput(taskOutputMap);
if (from.hasTaskInputMessage()) {
to.setTaskInputMessage( fromProto( from.getTaskInputMessage() ) );
}
if (from.hasTaskOutputMessage()) {
to.setTaskOutputMessage( fromProto( from.getTaskOutputMessage() ) );
}
return to;
}
public StartWorkflowRequestPb.StartWorkflowRequest toProto(StartWorkflowRequest from) {
StartWorkflowRequestPb.StartWorkflowRequest.Builder to = StartWorkflowRequestPb.StartWorkflowRequest.newBuilder();
if (from.getName() != null) {
to.setName( from.getName() );
}
if (from.getVersion() != null) {
to.setVersion( from.getVersion() );
}
if (from.getCorrelationId() != null) {
to.setCorrelationId( from.getCorrelationId() );
}
for (Map.Entry<String, Object> pair : from.getInput().entrySet()) {
to.putInput( pair.getKey(), toProto( pair.getValue() ) );
}
to.putAllTaskToDomain( from.getTaskToDomain() );
if (from.getWorkflowDef() != null) {
to.setWorkflowDef( toProto( from.getWorkflowDef() ) );
}
if (from.getExternalInputPayloadStoragePath() != null) {
to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() );
}
if (from.getPriority() != null) {
to.setPriority( from.getPriority() );
}
if (from.getCreatedBy() != null) {
to.setCreatedBy( from.getCreatedBy() );
}
return to.build();
}
public StartWorkflowRequest fromProto(StartWorkflowRequestPb.StartWorkflowRequest from) {
StartWorkflowRequest to = new StartWorkflowRequest();
to.setName( from.getName() );
to.setVersion( from.getVersion() );
to.setCorrelationId( from.getCorrelationId() );
Map<String, Object> inputMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getInputMap().entrySet()) {
inputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setInput(inputMap);
to.setTaskToDomain( from.getTaskToDomainMap() );
if (from.hasWorkflowDef()) {
to.setWorkflowDef( fromProto( from.getWorkflowDef() ) );
}
to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() );
to.setPriority( from.getPriority() );
to.setCreatedBy( from.getCreatedBy() );
return to;
}
public StateChangeEventPb.StateChangeEvent toProto(StateChangeEvent from) {
StateChangeEventPb.StateChangeEvent.Builder to = StateChangeEventPb.StateChangeEvent.newBuilder();
if (from.getType() != null) {
to.setType( from.getType() );
}
for (Map.Entry<String, Object> pair : from.getPayload().entrySet()) {
to.putPayload( pair.getKey(), toProto( pair.getValue() ) );
}
return to.build();
}
public StateChangeEvent fromProto(StateChangeEventPb.StateChangeEvent from) {
StateChangeEvent to = new StateChangeEvent();
to.setType( from.getType() );
Map<String, Object> payloadMap = new HashMap<String, Object>();
for (Map.Entry<String, Value> pair : from.getPayloadMap().entrySet()) {
payloadMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setPayload(payloadMap);
return to;
}
public SubWorkflowParamsPb.SubWorkflowParams toProto(SubWorkflowParams from) {
SubWorkflowParamsPb.SubWorkflowParams.Builder to = SubWorkflowParamsPb.SubWorkflowParams.newBuilder();
if (from.getName() != null) {
to.setName( from.getName() );
}
if (from.getVersion() != null) {
to.setVersion( from.getVersion() );
}
to.putAllTaskToDomain( from.getTaskToDomain() );
if (from.getWorkflowDefinition() != null) {
to.setWorkflowDefinition( toProto( from.getWorkflowDefinition() ) );
}
return to.build();
}
public SubWorkflowParams fromProto(SubWorkflowParamsPb.SubWorkflowParams from) {
SubWorkflowParams to = new SubWorkflowParams();
to.setName( from.getName() );
to.setVersion( from.getVersion() );
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc/src/main/java/com/netflix/conductor/grpc/ProtoMapper.java | grpc/src/main/java/com/netflix/conductor/grpc/ProtoMapper.java | /*
* Copyright 2023 Conductor authors
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc;
import com.google.protobuf.Any;
import com.google.protobuf.ListValue;
import com.google.protobuf.NullValue;
import com.google.protobuf.Struct;
import com.google.protobuf.Value;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.proto.WorkflowTaskPb;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* ProtoMapper implements conversion code between the internal models
* used by Conductor (POJOs) and their corresponding equivalents in
* the exposed Protocol Buffers interface.
*
* The vast majority of the mapping logic is implemented in the autogenerated
* {@link AbstractProtoMapper} class. This class only implements the custom
* logic for objects that need to be special cased in the API.
*/
public final class ProtoMapper extends AbstractProtoMapper {
public static final ProtoMapper INSTANCE = new ProtoMapper();
private static final int NO_RETRY_VALUE = -1;
private ProtoMapper() {}
/**
* Convert an {@link Object} instance into its equivalent {@link Value}
* ProtoBuf object.
*
* The {@link Value} ProtoBuf message is a variant type that can define any
* value representable as a native JSON type. Consequently, this method expects
* the given {@link Object} instance to be a Java object instance of JSON-native
* value, namely: null, {@link Boolean}, {@link Double}, {@link String},
* {@link Map}, {@link List}.
*
* Any other values will cause an exception to be thrown.
* See {@link ProtoMapper#fromProto(Value)} for the reverse mapping.
*
* @param val a Java object that can be represented natively in JSON
* @return an instance of a {@link Value} ProtoBuf message
*/
@Override
public Value toProto(Object val) {
Value.Builder builder = Value.newBuilder();
if (val == null) {
builder.setNullValue(NullValue.NULL_VALUE);
} else if (val instanceof Boolean) {
builder.setBoolValue((Boolean) val);
} else if (val instanceof Double) {
builder.setNumberValue((Double) val);
} else if (val instanceof String) {
builder.setStringValue((String) val);
} else if (val instanceof Map) {
Map<String, Object> map = (Map<String, Object>) val;
Struct.Builder struct = Struct.newBuilder();
for (Map.Entry<String, Object> pair : map.entrySet()) {
struct.putFields(pair.getKey(), toProto(pair.getValue()));
}
builder.setStructValue(struct.build());
} else if (val instanceof List) {
ListValue.Builder list = ListValue.newBuilder();
for (Object obj : (List<Object>)val) {
list.addValues(toProto(obj));
}
builder.setListValue(list.build());
} else {
throw new ClassCastException("cannot map to Value type: "+val);
}
return builder.build();
}
/**
* Convert a ProtoBuf {@link Value} message into its native Java object
* equivalent.
*
* See {@link ProtoMapper#toProto(Object)} for the reverse mapping and the
* possible values that can be returned from this method.
*
* @param any an instance of a ProtoBuf {@link Value} message
* @return a native Java object representing the value
*/
@Override
public Object fromProto(Value any) {
switch (any.getKindCase()) {
case NULL_VALUE:
return null;
case BOOL_VALUE:
return any.getBoolValue();
case NUMBER_VALUE:
return any.getNumberValue();
case STRING_VALUE:
return any.getStringValue();
case STRUCT_VALUE:
Struct struct = any.getStructValue();
Map<String, Object> map = new HashMap<>();
for (Map.Entry<String, Value> pair : struct.getFieldsMap().entrySet()) {
map.put(pair.getKey(), fromProto(pair.getValue()));
}
return map;
case LIST_VALUE:
List<Object> list = new ArrayList<>();
for (Value val : any.getListValue().getValuesList()) {
list.add(fromProto(val));
}
return list;
default:
throw new ClassCastException("unset Value element: "+any);
}
}
/**
* Convert a WorkflowTaskList message wrapper into a {@link List} instance
* with its contents.
*
* @param list an instance of a ProtoBuf message
* @return a list with the contents of the message
*/
@Override
public List<WorkflowTask> fromProto(WorkflowTaskPb.WorkflowTask.WorkflowTaskList list) {
return list.getTasksList().stream().map(this::fromProto).collect(Collectors.toList());
}
@Override public WorkflowTaskPb.WorkflowTask toProto(final WorkflowTask from) {
final WorkflowTaskPb.WorkflowTask.Builder to = WorkflowTaskPb.WorkflowTask.newBuilder(super.toProto(from));
if (from.getRetryCount() == null) {
to.setRetryCount(NO_RETRY_VALUE);
}
return to.build();
}
@Override public WorkflowTask fromProto(final WorkflowTaskPb.WorkflowTask from) {
final WorkflowTask workflowTask = super.fromProto(from);
if (from.getRetryCount() == NO_RETRY_VALUE) {
workflowTask.setRetryCount(null);
}
return workflowTask;
}
/**
* Convert a list of {@link WorkflowTask} instances into a ProtoBuf wrapper object.
*
* @param list a list of {@link WorkflowTask} instances
* @return a ProtoBuf message wrapping the contents of the list
*/
@Override
public WorkflowTaskPb.WorkflowTask.WorkflowTaskList toProto(List<WorkflowTask> list) {
return WorkflowTaskPb.WorkflowTask.WorkflowTaskList.newBuilder()
.addAllTasks(list.stream().map(this::toProto)::iterator)
.build();
}
@Override
public Any toProto(Any in) {
return in;
}
@Override
public Any fromProto(Any in) {
return in;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/common-persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java | common-persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.*;
public abstract class ExecutionDAOTest {
protected abstract ExecutionDAO getExecutionDAO();
protected ConcurrentExecutionLimitDAO getConcurrentExecutionLimitDAO() {
return (ConcurrentExecutionLimitDAO) getExecutionDAO();
}
@Rule public ExpectedException expectedException = ExpectedException.none();
@Test
public void testTaskExceedsLimit() {
TaskDef taskDefinition = new TaskDef();
taskDefinition.setName("task100");
taskDefinition.setConcurrentExecLimit(1);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("task1");
workflowTask.setTaskDefinition(taskDefinition);
workflowTask.setTaskDefinition(taskDefinition);
List<TaskModel> tasks = new LinkedList<>();
for (int i = 0; i < 15; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId("t_" + i);
task.setWorkflowInstanceId("workflow_" + i);
task.setReferenceTaskName("task1");
task.setTaskDefName("task100");
tasks.add(task);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setWorkflowTask(workflowTask);
}
getExecutionDAO().createTasks(tasks);
assertFalse(getConcurrentExecutionLimitDAO().exceedsLimit(tasks.get(0)));
tasks.get(0).setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().updateTask(tasks.get(0));
for (TaskModel task : tasks) {
assertTrue(getConcurrentExecutionLimitDAO().exceedsLimit(task));
}
}
@Test
public void testCreateTaskException() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Workflow instance id cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
}
@Test
public void testCreateTaskException2() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
}
@Test
public void testTaskCreateDups() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("t" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
// Let's insert a retried task
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 2);
task.setReferenceTaskName("t" + 2);
task.setRetryCount(1);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 2);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
// Duplicate task!
task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 1);
task.setReferenceTaskName("t" + 1);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 1);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size() - 1, created.size()); // 1 less
Set<String> srcIds =
tasks.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
Set<String> createdIds =
created.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
assertEquals(srcIds, createdIds);
List<TaskModel> pending = getExecutionDAO().getPendingTasksByWorkflow("task0", workflowId);
assertNotNull(pending);
assertEquals(1, pending.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0)));
List<TaskModel> found = getExecutionDAO().getTasks(tasks.get(0).getTaskDefName(), null, 1);
assertNotNull(found);
assertEquals(1, found.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0)));
}
@Test
public void testTaskOps() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId("x" + workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId("x" + workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().createTasks(Collections.singletonList(task));
}
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size(), created.size());
List<TaskModel> pending =
getExecutionDAO().getPendingTasksForTaskType(tasks.get(0).getTaskDefName());
assertNotNull(pending);
assertEquals(2, pending.size());
// Pending list can come in any order. finding the one we are looking for and then
// comparing
TaskModel matching =
pending.stream()
.filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId()))
.findAny()
.get();
assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0)));
for (int i = 0; i < 3; i++) {
TaskModel found = getExecutionDAO().getTask(workflowId + "_t" + i);
assertNotNull(found);
found.getOutputData().put("updated", true);
found.setStatus(TaskModel.Status.COMPLETED);
getExecutionDAO().updateTask(found);
}
List<String> taskIds =
tasks.stream().map(TaskModel::getTaskId).collect(Collectors.toList());
List<TaskModel> found = getExecutionDAO().getTasks(taskIds);
assertEquals(taskIds.size(), found.size());
found.forEach(
task -> {
assertTrue(task.getOutputData().containsKey("updated"));
assertEquals(true, task.getOutputData().get("updated"));
boolean removed = getExecutionDAO().removeTask(task.getTaskId());
assertTrue(removed);
});
found = getExecutionDAO().getTasks(taskIds);
assertTrue(found.isEmpty());
}
@Test
public void testPending() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_test");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> workflowIds = generateWorkflows(workflow, 10);
long count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(10, count);
for (int i = 0; i < 10; i++) {
getExecutionDAO().removeFromPendingWorkflow(def.getName(), workflowIds.get(i));
}
count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(0, count);
}
@Test
public void complexExecutionTest() {
WorkflowModel workflow = createTestWorkflow();
int numTasks = workflow.getTasks().size();
String workflowId = getExecutionDAO().createWorkflow(workflow);
assertEquals(workflow.getWorkflowId(), workflowId);
List<TaskModel> created = getExecutionDAO().createTasks(workflow.getTasks());
assertEquals(workflow.getTasks().size(), created.size());
WorkflowModel workflowWithTasks =
getExecutionDAO().getWorkflow(workflow.getWorkflowId(), true);
assertEquals(workflowId, workflowWithTasks.getWorkflowId());
assertEquals(numTasks, workflowWithTasks.getTasks().size());
WorkflowModel found = getExecutionDAO().getWorkflow(workflowId, false);
assertTrue(found.getTasks().isEmpty());
workflow.getTasks().clear();
assertEquals(workflow, found);
workflow.getInput().put("updated", true);
getExecutionDAO().updateWorkflow(workflow);
found = getExecutionDAO().getWorkflow(workflowId);
assertNotNull(found);
assertTrue(found.getInput().containsKey("updated"));
assertEquals(true, found.getInput().get("updated"));
List<String> running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
workflow.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertEquals(1, running.size());
assertEquals(workflow.getWorkflowId(), running.get(0));
List<WorkflowModel> pending =
getExecutionDAO()
.getPendingWorkflowsByType(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(pending);
assertEquals(1, pending.size());
assertEquals(3, pending.get(0).getTasks().size());
pending.get(0).getTasks().clear();
assertEquals(workflow, pending.get(0));
workflow.setStatus(WorkflowModel.Status.COMPLETED);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
List<WorkflowModel> bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
System.currentTimeMillis(),
System.currentTimeMillis() + 100);
assertNotNull(bytime);
assertTrue(bytime.isEmpty());
bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
workflow.getCreateTime() - 10,
workflow.getCreateTime() + 10);
assertNotNull(bytime);
assertEquals(1, bytime.size());
}
protected WorkflowModel createTestWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("Junit Workflow");
def.setVersion(3);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCorrelationId("correlationX");
workflow.setCreatedBy("junit_tester");
workflow.setEndTime(200L);
Map<String, Object> input = new HashMap<>();
input.put("param1", "param1 value");
input.put("param2", 100);
workflow.setInput(input);
Map<String, Object> output = new HashMap<>();
output.put("ouput1", "output 1 value");
output.put("op2", 300);
workflow.setOutput(output);
workflow.setOwnerApp("workflow");
workflow.setParentWorkflowId("parentWorkflowId");
workflow.setParentWorkflowTaskId("parentWFTaskId");
workflow.setReasonForIncompletion("missing recipe");
workflow.setReRunFromWorkflowId("re-run from id1");
workflow.setCreateTime(90L);
workflow.setStatus(WorkflowModel.Status.FAILED);
workflow.setWorkflowId(UUID.randomUUID().toString());
List<TaskModel> tasks = new LinkedList<>();
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setReferenceTaskName("t1");
task.setWorkflowInstanceId(workflow.getWorkflowId());
task.setTaskDefName("task1");
TaskModel task2 = new TaskModel();
task2.setScheduledTime(2L);
task2.setSeq(2);
task2.setTaskId(UUID.randomUUID().toString());
task2.setReferenceTaskName("t2");
task2.setWorkflowInstanceId(workflow.getWorkflowId());
task2.setTaskDefName("task2");
TaskModel task3 = new TaskModel();
task3.setScheduledTime(2L);
task3.setSeq(3);
task3.setTaskId(UUID.randomUUID().toString());
task3.setReferenceTaskName("t3");
task3.setWorkflowInstanceId(workflow.getWorkflowId());
task3.setTaskDefName("task3");
tasks.add(task);
tasks.add(task2);
tasks.add(task3);
workflow.setTasks(tasks);
workflow.setUpdatedBy("junit_tester");
workflow.setUpdatedTime(800L);
return workflow;
}
protected List<String> generateWorkflows(WorkflowModel base, int count) {
List<String> workflowIds = new ArrayList<>();
for (int i = 0; i < count; i++) {
String workflowId = UUID.randomUUID().toString();
base.setWorkflowId(workflowId);
base.setCorrelationId("corr001");
base.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().createWorkflow(base);
workflowIds.add(workflowId);
}
return workflowIds;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/common-persistence/src/test/java/com/netflix/conductor/dao/TestBase.java | common-persistence/src/test/java/com/netflix/conductor/dao/TestBase.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
public class TestBase {}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/HealthServiceImplTest.java | grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/HealthServiceImplTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
public class HealthServiceImplTest {
// SBMTODO: Move this Spring boot health check
// @Rule
// public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule();
//
// @Rule
// public ExpectedException thrown = ExpectedException.none();
//
// @Test
// public void healthServing() throws Exception {
// // Generate a unique in-process server name.
// String serverName = InProcessServerBuilder.generateName();
// HealthCheckAggregator hca = mock(HealthCheckAggregator.class);
// CompletableFuture<HealthCheckStatus> hcsf = mock(CompletableFuture.class);
// HealthCheckStatus hcs = mock(HealthCheckStatus.class);
// when(hcs.isHealthy()).thenReturn(true);
// when(hcsf.get()).thenReturn(hcs);
// when(hca.check()).thenReturn(hcsf);
// HealthServiceImpl healthyService = new HealthServiceImpl(hca);
//
// addService(serverName, healthyService);
// HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub(
// // Create a client channel and register for automatic graceful shutdown.
//
// grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build()));
//
//
// HealthCheckResponse reply =
// blockingStub.check(HealthCheckRequest.newBuilder().build());
//
// assertEquals(HealthCheckResponse.ServingStatus.SERVING, reply.getStatus());
// }
//
// @Test
// public void healthNotServing() throws Exception {
// // Generate a unique in-process server name.
// String serverName = InProcessServerBuilder.generateName();
// HealthCheckAggregator hca = mock(HealthCheckAggregator.class);
// CompletableFuture<HealthCheckStatus> hcsf = mock(CompletableFuture.class);
// HealthCheckStatus hcs = mock(HealthCheckStatus.class);
// when(hcs.isHealthy()).thenReturn(false);
// when(hcsf.get()).thenReturn(hcs);
// when(hca.check()).thenReturn(hcsf);
// HealthServiceImpl healthyService = new HealthServiceImpl(hca);
//
// addService(serverName, healthyService);
// HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub(
// // Create a client channel and register for automatic graceful shutdown.
//
// grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build()));
//
//
// HealthCheckResponse reply =
// blockingStub.check(HealthCheckRequest.newBuilder().build());
//
// assertEquals(HealthCheckResponse.ServingStatus.NOT_SERVING, reply.getStatus());
// }
//
// @Test
// public void healthException() throws Exception {
// // Generate a unique in-process server name.
// String serverName = InProcessServerBuilder.generateName();
// HealthCheckAggregator hca = mock(HealthCheckAggregator.class);
// CompletableFuture<HealthCheckStatus> hcsf = mock(CompletableFuture.class);
// when(hcsf.get()).thenThrow(InterruptedException.class);
// when(hca.check()).thenReturn(hcsf);
// HealthServiceImpl healthyService = new HealthServiceImpl(hca);
//
// addService(serverName, healthyService);
// HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub(
// // Create a client channel and register for automatic graceful shutdown.
//
// grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build()));
//
// thrown.expect(StatusRuntimeException.class);
// thrown.expect(hasProperty("status", is(Status.INTERNAL)));
// blockingStub.check(HealthCheckRequest.newBuilder().build());
//
// }
//
// private void addService(String name, BindableService service) throws Exception {
// // Create a server, add service, start, and register for automatic graceful shutdown.
// grpcCleanup.register(InProcessServerBuilder
// .forName(name).directExecutor().addService(service).build().start());
// }
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImplTest.java | grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImplTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.WorkflowServicePb;
import com.netflix.conductor.proto.WorkflowPb;
import com.netflix.conductor.proto.WorkflowSummaryPb;
import com.netflix.conductor.service.WorkflowService;
import io.grpc.stub.StreamObserver;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import static org.mockito.MockitoAnnotations.initMocks;
public class WorkflowServiceImplTest {
private static final String WORKFLOW_ID = "anyWorkflowId";
private static final Boolean RESUME_SUBWORKFLOW_TASKS = true;
@Mock private WorkflowService workflowService;
private WorkflowServiceImpl workflowServiceImpl;
@Before
public void init() {
initMocks(this);
workflowServiceImpl = new WorkflowServiceImpl(workflowService, 5000);
}
@SuppressWarnings("unchecked")
@Test
public void givenWorkflowIdWhenRetryWorkflowThenRetriedSuccessfully() {
// Given
WorkflowServicePb.RetryWorkflowRequest req =
WorkflowServicePb.RetryWorkflowRequest.newBuilder()
.setWorkflowId(WORKFLOW_ID)
.setResumeSubworkflowTasks(true)
.build();
// When
workflowServiceImpl.retryWorkflow(req, mock(StreamObserver.class));
// Then
verify(workflowService).retryWorkflow(WORKFLOW_ID, RESUME_SUBWORKFLOW_TASKS);
}
@Test
public void searchExceptionTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<Throwable> throwable = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(50000)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSummarySearchResult value) {}
@Override
public void onError(Throwable t) {
throwable.set(t);
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
workflowServiceImpl.search(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
assertEquals(
"INVALID_ARGUMENT: Cannot return more than 5000 results",
throwable.get().getMessage());
}
@Test
public void searchV2ExceptionTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<Throwable> throwable = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(50000)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSearchResult value) {}
@Override
public void onError(Throwable t) {
throwable.set(t);
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
workflowServiceImpl.searchV2(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
assertEquals(
"INVALID_ARGUMENT: Cannot return more than 5000 results",
throwable.get().getMessage());
}
@Test
public void searchTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<WorkflowServicePb.WorkflowSummarySearchResult> result =
new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSummarySearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
WorkflowSummary workflow = new WorkflowSummary();
SearchResult<WorkflowSummary> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(workflow));
when(workflowService.searchWorkflows(
anyInt(), anyInt(), anyList(), anyString(), anyString()))
.thenReturn(searchResult);
workflowServiceImpl.search(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
WorkflowServicePb.WorkflowSummarySearchResult workflowSearchResult = result.get();
assertEquals(1, workflowSearchResult.getTotalHits());
assertEquals(
WorkflowSummaryPb.WorkflowSummary.newBuilder().build(),
workflowSearchResult.getResultsList().get(0));
}
@Test
public void searchByTasksTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<WorkflowServicePb.WorkflowSummarySearchResult> result =
new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSummarySearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
WorkflowSummary workflow = new WorkflowSummary();
SearchResult<WorkflowSummary> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(workflow));
when(workflowService.searchWorkflowsByTasks(
anyInt(), anyInt(), anyList(), anyString(), anyString()))
.thenReturn(searchResult);
workflowServiceImpl.searchByTasks(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
WorkflowServicePb.WorkflowSummarySearchResult workflowSearchResult = result.get();
assertEquals(1, workflowSearchResult.getTotalHits());
assertEquals(
WorkflowSummaryPb.WorkflowSummary.newBuilder().build(),
workflowSearchResult.getResultsList().get(0));
}
@Test
public void searchV2Test() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<WorkflowServicePb.WorkflowSearchResult> result = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
Workflow workflow = new Workflow();
SearchResult<Workflow> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(workflow));
when(workflowService.searchWorkflowsV2(1, 1, Collections.singletonList("strings"), "*", ""))
.thenReturn(searchResult);
workflowServiceImpl.searchV2(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
WorkflowServicePb.WorkflowSearchResult workflowSearchResult = result.get();
assertEquals(1, workflowSearchResult.getTotalHits());
assertEquals(
WorkflowPb.Workflow.newBuilder().build(),
workflowSearchResult.getResultsList().get(0));
}
@Test
public void searchByTasksV2Test() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<WorkflowServicePb.WorkflowSearchResult> result = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("")
.build();
StreamObserver<WorkflowServicePb.WorkflowSearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(WorkflowServicePb.WorkflowSearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
Workflow workflow = new Workflow();
SearchResult<Workflow> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(workflow));
when(workflowService.searchWorkflowsByTasksV2(
1, 1, Collections.singletonList("strings"), "*", ""))
.thenReturn(searchResult);
workflowServiceImpl.searchByTasksV2(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
WorkflowServicePb.WorkflowSearchResult workflowSearchResult = result.get();
assertEquals(1, workflowSearchResult.getTotalHits());
assertEquals(
WorkflowPb.Workflow.newBuilder().build(),
workflowSearchResult.getResultsList().get(0));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/TaskServiceImplTest.java | grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/TaskServiceImplTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.TaskServicePb;
import com.netflix.conductor.proto.TaskPb;
import com.netflix.conductor.proto.TaskSummaryPb;
import com.netflix.conductor.service.ExecutionService;
import com.netflix.conductor.service.TaskService;
import io.grpc.stub.StreamObserver;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import static org.mockito.MockitoAnnotations.initMocks;
public class TaskServiceImplTest {
@Mock private TaskService taskService;
@Mock private ExecutionService executionService;
private TaskServiceImpl taskServiceImpl;
@Before
public void init() {
initMocks(this);
taskServiceImpl = new TaskServiceImpl(executionService, taskService, 5000);
}
@Test
public void searchExceptionTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<Throwable> throwable = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(50000)
.setSort("strings")
.setQuery("")
.setFreeText("*")
.build();
StreamObserver<TaskServicePb.TaskSummarySearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(TaskServicePb.TaskSummarySearchResult value) {}
@Override
public void onError(Throwable t) {
throwable.set(t);
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
taskServiceImpl.search(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
assertEquals(
"INVALID_ARGUMENT: Cannot return more than 5000 results",
throwable.get().getMessage());
}
@Test
public void searchV2ExceptionTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<Throwable> throwable = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(50000)
.setSort("strings")
.setQuery("")
.setFreeText("*")
.build();
StreamObserver<TaskServicePb.TaskSearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(TaskServicePb.TaskSearchResult value) {}
@Override
public void onError(Throwable t) {
throwable.set(t);
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
taskServiceImpl.searchV2(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
assertEquals(
"INVALID_ARGUMENT: Cannot return more than 5000 results",
throwable.get().getMessage());
}
@Test
public void searchTest() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<TaskServicePb.TaskSummarySearchResult> result = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("*")
.build();
StreamObserver<TaskServicePb.TaskSummarySearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(TaskServicePb.TaskSummarySearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
TaskSummary taskSummary = new TaskSummary();
SearchResult<TaskSummary> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(taskSummary));
when(taskService.search(1, 1, "strings", "*", "")).thenReturn(searchResult);
taskServiceImpl.search(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
TaskServicePb.TaskSummarySearchResult taskSummarySearchResult = result.get();
assertEquals(1, taskSummarySearchResult.getTotalHits());
assertEquals(
TaskSummaryPb.TaskSummary.newBuilder().build(),
taskSummarySearchResult.getResultsList().get(0));
}
@Test
public void searchV2Test() throws InterruptedException {
CountDownLatch streamAlive = new CountDownLatch(1);
AtomicReference<TaskServicePb.TaskSearchResult> result = new AtomicReference<>();
SearchPb.Request req =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(1)
.setSort("strings")
.setQuery("")
.setFreeText("*")
.build();
StreamObserver<TaskServicePb.TaskSearchResult> streamObserver =
new StreamObserver<>() {
@Override
public void onNext(TaskServicePb.TaskSearchResult value) {
result.set(value);
}
@Override
public void onError(Throwable t) {
streamAlive.countDown();
}
@Override
public void onCompleted() {
streamAlive.countDown();
}
};
Task task = new Task();
SearchResult<Task> searchResult = new SearchResult<>();
searchResult.setTotalHits(1);
searchResult.setResults(Collections.singletonList(task));
when(taskService.searchV2(1, 1, "strings", "*", "")).thenReturn(searchResult);
taskServiceImpl.searchV2(req, streamObserver);
streamAlive.await(10, TimeUnit.MILLISECONDS);
TaskServicePb.TaskSearchResult taskSearchResult = result.get();
assertEquals(1, taskSearchResult.getTotalHits());
assertEquals(
TaskPb.Task.newBuilder().setCallbackFromWorker(true).build(),
taskSearchResult.getResultsList().get(0));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServer.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServer.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server;
import java.io.IOException;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.grpc.BindableService;
import io.grpc.Server;
import io.grpc.ServerBuilder;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
public class GRPCServer {
private static final Logger LOGGER = LoggerFactory.getLogger(GRPCServer.class);
private final Server server;
public GRPCServer(int port, List<BindableService> services) {
ServerBuilder<?> builder = ServerBuilder.forPort(port);
services.forEach(builder::addService);
server = builder.build();
}
@PostConstruct
public void start() throws IOException {
server.start();
LOGGER.info("grpc: Server started, listening on " + server.getPort());
}
@PreDestroy
public void stop() {
if (server != null) {
LOGGER.info("grpc: server shutting down");
server.shutdown();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GrpcConfiguration.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/GrpcConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server;
import java.util.List;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import io.grpc.BindableService;
import io.grpc.protobuf.services.ProtoReflectionService;
@Configuration
@ConditionalOnProperty(name = "conductor.grpc-server.enabled", havingValue = "true")
@EnableConfigurationProperties(GRPCServerProperties.class)
public class GrpcConfiguration {
@Bean
public GRPCServer grpcServer(
List<BindableService> bindableServices, // all gRPC service implementations
GRPCServerProperties grpcServerProperties) {
if (grpcServerProperties.isReflectionEnabled()) {
bindableServices.add(ProtoReflectionService.newInstance());
}
return new GRPCServer(grpcServerProperties.getPort(), bindableServices);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerProperties.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerProperties.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.grpc-server")
public class GRPCServerProperties {
/** The port at which the gRPC server will serve requests */
private int port = 8090;
/** Enables the reflection service for Protobuf services */
private boolean reflectionEnabled = true;
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public boolean isReflectionEnabled() {
return reflectionEnabled;
}
public void setReflectionEnabled(boolean reflectionEnabled) {
this.reflectionEnabled = reflectionEnabled;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImpl.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImpl.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.WorkflowServiceGrpc;
import com.netflix.conductor.grpc.WorkflowServicePb;
import com.netflix.conductor.proto.RerunWorkflowRequestPb;
import com.netflix.conductor.proto.StartWorkflowRequestPb;
import com.netflix.conductor.proto.WorkflowPb;
import com.netflix.conductor.service.WorkflowService;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
@Service("grpcWorkflowService")
public class WorkflowServiceImpl extends WorkflowServiceGrpc.WorkflowServiceImplBase {
private static final Logger LOGGER = LoggerFactory.getLogger(TaskServiceImpl.class);
private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE;
private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER);
private final WorkflowService workflowService;
private final int maxSearchSize;
public WorkflowServiceImpl(
WorkflowService workflowService,
@Value("${workflow.max.search.size:5000}") int maxSearchSize) {
this.workflowService = workflowService;
this.maxSearchSize = maxSearchSize;
}
@Override
public void startWorkflow(
StartWorkflowRequestPb.StartWorkflowRequest pbRequest,
StreamObserver<WorkflowServicePb.StartWorkflowResponse> response) {
// TODO: better handling of optional 'version'
final StartWorkflowRequest request = PROTO_MAPPER.fromProto(pbRequest);
try {
String id =
workflowService.startWorkflow(
pbRequest.getName(),
GRPC_HELPER.optional(request.getVersion()),
request.getCorrelationId(),
request.getPriority(),
request.getInput(),
request.getExternalInputPayloadStoragePath(),
request.getTaskToDomain(),
request.getWorkflowDef());
response.onNext(
WorkflowServicePb.StartWorkflowResponse.newBuilder().setWorkflowId(id).build());
response.onCompleted();
} catch (NotFoundException nfe) {
response.onError(
Status.NOT_FOUND
.withDescription("No such workflow found by name=" + request.getName())
.asRuntimeException());
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void getWorkflows(
WorkflowServicePb.GetWorkflowsRequest req,
StreamObserver<WorkflowServicePb.GetWorkflowsResponse> response) {
final String name = req.getName();
final boolean includeClosed = req.getIncludeClosed();
final boolean includeTasks = req.getIncludeTasks();
WorkflowServicePb.GetWorkflowsResponse.Builder builder =
WorkflowServicePb.GetWorkflowsResponse.newBuilder();
for (String correlationId : req.getCorrelationIdList()) {
List<Workflow> workflows =
workflowService.getWorkflows(name, correlationId, includeClosed, includeTasks);
builder.putWorkflowsById(
correlationId,
WorkflowServicePb.GetWorkflowsResponse.Workflows.newBuilder()
.addAllWorkflows(
workflows.stream().map(PROTO_MAPPER::toProto)::iterator)
.build());
}
response.onNext(builder.build());
response.onCompleted();
}
@Override
public void getWorkflowStatus(
WorkflowServicePb.GetWorkflowStatusRequest req,
StreamObserver<WorkflowPb.Workflow> response) {
try {
Workflow workflow =
workflowService.getExecutionStatus(req.getWorkflowId(), req.getIncludeTasks());
response.onNext(PROTO_MAPPER.toProto(workflow));
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void removeWorkflow(
WorkflowServicePb.RemoveWorkflowRequest req,
StreamObserver<WorkflowServicePb.RemoveWorkflowResponse> response) {
try {
workflowService.deleteWorkflow(req.getWorkflodId(), req.getArchiveWorkflow());
response.onNext(WorkflowServicePb.RemoveWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void getRunningWorkflows(
WorkflowServicePb.GetRunningWorkflowsRequest req,
StreamObserver<WorkflowServicePb.GetRunningWorkflowsResponse> response) {
try {
List<String> workflowIds =
workflowService.getRunningWorkflows(
req.getName(), req.getVersion(), req.getStartTime(), req.getEndTime());
response.onNext(
WorkflowServicePb.GetRunningWorkflowsResponse.newBuilder()
.addAllWorkflowIds(workflowIds)
.build());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void decideWorkflow(
WorkflowServicePb.DecideWorkflowRequest req,
StreamObserver<WorkflowServicePb.DecideWorkflowResponse> response) {
try {
workflowService.decideWorkflow(req.getWorkflowId());
response.onNext(WorkflowServicePb.DecideWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void pauseWorkflow(
WorkflowServicePb.PauseWorkflowRequest req,
StreamObserver<WorkflowServicePb.PauseWorkflowResponse> response) {
try {
workflowService.pauseWorkflow(req.getWorkflowId());
response.onNext(WorkflowServicePb.PauseWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void resumeWorkflow(
WorkflowServicePb.ResumeWorkflowRequest req,
StreamObserver<WorkflowServicePb.ResumeWorkflowResponse> response) {
try {
workflowService.resumeWorkflow(req.getWorkflowId());
response.onNext(WorkflowServicePb.ResumeWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void skipTaskFromWorkflow(
WorkflowServicePb.SkipTaskRequest req,
StreamObserver<WorkflowServicePb.SkipTaskResponse> response) {
try {
SkipTaskRequest skipTask = PROTO_MAPPER.fromProto(req.getRequest());
workflowService.skipTaskFromWorkflow(
req.getWorkflowId(), req.getTaskReferenceName(), skipTask);
response.onNext(WorkflowServicePb.SkipTaskResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void rerunWorkflow(
RerunWorkflowRequestPb.RerunWorkflowRequest req,
StreamObserver<WorkflowServicePb.RerunWorkflowResponse> response) {
try {
String id =
workflowService.rerunWorkflow(
req.getReRunFromWorkflowId(), PROTO_MAPPER.fromProto(req));
response.onNext(
WorkflowServicePb.RerunWorkflowResponse.newBuilder().setWorkflowId(id).build());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void restartWorkflow(
WorkflowServicePb.RestartWorkflowRequest req,
StreamObserver<WorkflowServicePb.RestartWorkflowResponse> response) {
try {
workflowService.restartWorkflow(req.getWorkflowId(), req.getUseLatestDefinitions());
response.onNext(WorkflowServicePb.RestartWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void retryWorkflow(
WorkflowServicePb.RetryWorkflowRequest req,
StreamObserver<WorkflowServicePb.RetryWorkflowResponse> response) {
try {
workflowService.retryWorkflow(req.getWorkflowId(), req.getResumeSubworkflowTasks());
response.onNext(WorkflowServicePb.RetryWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void resetWorkflowCallbacks(
WorkflowServicePb.ResetWorkflowCallbacksRequest req,
StreamObserver<WorkflowServicePb.ResetWorkflowCallbacksResponse> response) {
try {
workflowService.resetWorkflow(req.getWorkflowId());
response.onNext(WorkflowServicePb.ResetWorkflowCallbacksResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void terminateWorkflow(
WorkflowServicePb.TerminateWorkflowRequest req,
StreamObserver<WorkflowServicePb.TerminateWorkflowResponse> response) {
try {
workflowService.terminateWorkflow(req.getWorkflowId(), req.getReason());
response.onNext(WorkflowServicePb.TerminateWorkflowResponse.getDefaultInstance());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
private void doSearch(
boolean searchByTask,
SearchPb.Request req,
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> response) {
final int start = req.getStart();
final int size = GRPC_HELPER.optionalOr(req.getSize(), maxSearchSize);
final List<String> sort = convertSort(req.getSort());
final String freeText = GRPC_HELPER.optionalOr(req.getFreeText(), "*");
final String query = req.getQuery();
if (size > maxSearchSize) {
response.onError(
Status.INVALID_ARGUMENT
.withDescription(
"Cannot return more than " + maxSearchSize + " results")
.asRuntimeException());
return;
}
SearchResult<WorkflowSummary> search;
if (searchByTask) {
search = workflowService.searchWorkflowsByTasks(start, size, sort, freeText, query);
} else {
search = workflowService.searchWorkflows(start, size, sort, freeText, query);
}
response.onNext(
WorkflowServicePb.WorkflowSummarySearchResult.newBuilder()
.setTotalHits(search.getTotalHits())
.addAllResults(
search.getResults().stream().map(PROTO_MAPPER::toProto)::iterator)
.build());
response.onCompleted();
}
private void doSearchV2(
boolean searchByTask,
SearchPb.Request req,
StreamObserver<WorkflowServicePb.WorkflowSearchResult> response) {
final int start = req.getStart();
final int size = GRPC_HELPER.optionalOr(req.getSize(), maxSearchSize);
final List<String> sort = convertSort(req.getSort());
final String freeText = GRPC_HELPER.optionalOr(req.getFreeText(), "*");
final String query = req.getQuery();
if (size > maxSearchSize) {
response.onError(
Status.INVALID_ARGUMENT
.withDescription(
"Cannot return more than " + maxSearchSize + " results")
.asRuntimeException());
return;
}
SearchResult<Workflow> search;
if (searchByTask) {
search = workflowService.searchWorkflowsByTasksV2(start, size, sort, freeText, query);
} else {
search = workflowService.searchWorkflowsV2(start, size, sort, freeText, query);
}
response.onNext(
WorkflowServicePb.WorkflowSearchResult.newBuilder()
.setTotalHits(search.getTotalHits())
.addAllResults(
search.getResults().stream().map(PROTO_MAPPER::toProto)::iterator)
.build());
response.onCompleted();
}
private List<String> convertSort(String sortStr) {
List<String> list = new ArrayList<>();
if (sortStr != null && sortStr.length() != 0) {
list = Arrays.asList(sortStr.split("\\|"));
}
return list;
}
@Override
public void search(
SearchPb.Request request,
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> responseObserver) {
doSearch(false, request, responseObserver);
}
@Override
public void searchByTasks(
SearchPb.Request request,
StreamObserver<WorkflowServicePb.WorkflowSummarySearchResult> responseObserver) {
doSearch(true, request, responseObserver);
}
@Override
public void searchV2(
SearchPb.Request request,
StreamObserver<WorkflowServicePb.WorkflowSearchResult> responseObserver) {
doSearchV2(false, request, responseObserver);
}
@Override
public void searchByTasksV2(
SearchPb.Request request,
StreamObserver<WorkflowServicePb.WorkflowSearchResult> responseObserver) {
doSearchV2(true, request, responseObserver);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/MetadataServiceImpl.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/MetadataServiceImpl.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.grpc.MetadataServiceGrpc;
import com.netflix.conductor.grpc.MetadataServicePb;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.proto.TaskDefPb;
import com.netflix.conductor.proto.WorkflowDefPb;
import com.netflix.conductor.service.MetadataService;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
@Service("grpcMetadataService")
public class MetadataServiceImpl extends MetadataServiceGrpc.MetadataServiceImplBase {
private static final Logger LOGGER = LoggerFactory.getLogger(MetadataServiceImpl.class);
private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE;
private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER);
private final MetadataService service;
public MetadataServiceImpl(MetadataService service) {
this.service = service;
}
@Override
public void createWorkflow(
MetadataServicePb.CreateWorkflowRequest req,
StreamObserver<MetadataServicePb.CreateWorkflowResponse> response) {
WorkflowDef workflow = PROTO_MAPPER.fromProto(req.getWorkflow());
service.registerWorkflowDef(workflow);
response.onNext(MetadataServicePb.CreateWorkflowResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void validateWorkflow(
MetadataServicePb.ValidateWorkflowRequest req,
StreamObserver<MetadataServicePb.ValidateWorkflowResponse> response) {
WorkflowDef workflow = PROTO_MAPPER.fromProto(req.getWorkflow());
service.validateWorkflowDef(workflow);
response.onNext(MetadataServicePb.ValidateWorkflowResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void updateWorkflows(
MetadataServicePb.UpdateWorkflowsRequest req,
StreamObserver<MetadataServicePb.UpdateWorkflowsResponse> response) {
List<WorkflowDef> workflows =
req.getDefsList().stream()
.map(PROTO_MAPPER::fromProto)
.collect(Collectors.toList());
service.updateWorkflowDef(workflows);
response.onNext(MetadataServicePb.UpdateWorkflowsResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void getWorkflow(
MetadataServicePb.GetWorkflowRequest req,
StreamObserver<MetadataServicePb.GetWorkflowResponse> response) {
try {
WorkflowDef workflowDef =
service.getWorkflowDef(req.getName(), GRPC_HELPER.optional(req.getVersion()));
WorkflowDefPb.WorkflowDef workflow = PROTO_MAPPER.toProto(workflowDef);
response.onNext(
MetadataServicePb.GetWorkflowResponse.newBuilder()
.setWorkflow(workflow)
.build());
response.onCompleted();
} catch (NotFoundException e) {
// TODO replace this with gRPC exception interceptor.
response.onError(
Status.NOT_FOUND
.withDescription("No such workflow found by name=" + req.getName())
.asRuntimeException());
}
}
@Override
public void createTasks(
MetadataServicePb.CreateTasksRequest req,
StreamObserver<MetadataServicePb.CreateTasksResponse> response) {
service.registerTaskDef(
req.getDefsList().stream()
.map(PROTO_MAPPER::fromProto)
.collect(Collectors.toList()));
response.onNext(MetadataServicePb.CreateTasksResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void updateTask(
MetadataServicePb.UpdateTaskRequest req,
StreamObserver<MetadataServicePb.UpdateTaskResponse> response) {
TaskDef task = PROTO_MAPPER.fromProto(req.getTask());
service.updateTaskDef(task);
response.onNext(MetadataServicePb.UpdateTaskResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void getTask(
MetadataServicePb.GetTaskRequest req,
StreamObserver<MetadataServicePb.GetTaskResponse> response) {
TaskDef def = service.getTaskDef(req.getTaskType());
if (def != null) {
TaskDefPb.TaskDef task = PROTO_MAPPER.toProto(def);
response.onNext(MetadataServicePb.GetTaskResponse.newBuilder().setTask(task).build());
response.onCompleted();
} else {
response.onError(
Status.NOT_FOUND
.withDescription(
"No such TaskDef found by taskType=" + req.getTaskType())
.asRuntimeException());
}
}
@Override
public void deleteTask(
MetadataServicePb.DeleteTaskRequest req,
StreamObserver<MetadataServicePb.DeleteTaskResponse> response) {
service.unregisterTaskDef(req.getTaskType());
response.onNext(MetadataServicePb.DeleteTaskResponse.getDefaultInstance());
response.onCompleted();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/HealthServiceImpl.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/HealthServiceImpl.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import org.springframework.stereotype.Service;
import io.grpc.health.v1.HealthCheckRequest;
import io.grpc.health.v1.HealthCheckResponse;
import io.grpc.health.v1.HealthGrpc;
import io.grpc.stub.StreamObserver;
@Service("grpcHealthService")
public class HealthServiceImpl extends HealthGrpc.HealthImplBase {
// SBMTODO: Move this Spring boot health check
@Override
public void check(
HealthCheckRequest request, StreamObserver<HealthCheckResponse> responseObserver) {
responseObserver.onNext(
HealthCheckResponse.newBuilder()
.setStatus(HealthCheckResponse.ServingStatus.SERVING)
.build());
responseObserver.onCompleted();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/GRPCHelper.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/GRPCHelper.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.Arrays;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.Logger;
import com.google.rpc.DebugInfo;
import io.grpc.Metadata;
import io.grpc.Status;
import io.grpc.StatusException;
import io.grpc.stub.StreamObserver;
import jakarta.annotation.Nonnull;
import static io.grpc.protobuf.ProtoUtils.metadataMarshaller;
public class GRPCHelper {
private final Logger logger;
private static final Metadata.Key<DebugInfo> STATUS_DETAILS_KEY =
Metadata.Key.of(
"grpc-status-details-bin", metadataMarshaller(DebugInfo.getDefaultInstance()));
public GRPCHelper(Logger log) {
this.logger = log;
}
/**
* Converts an internal exception thrown by Conductor into an StatusException that uses modern
* "Status" metadata for GRPC.
*
* <p>Note that this is trickier than it ought to be because the GRPC APIs have not been
* upgraded yet. Here's a quick breakdown of how this works in practice:
*
* <p>Reporting a "status" result back to a client with GRPC is pretty straightforward. GRPC
* implementations simply serialize the status into several HTTP/2 trailer headers that are sent
* back to the client before shutting down the HTTP/2 stream.
*
* <p>- 'grpc-status', which is a string representation of a {@link com.google.rpc.Code} -
* 'grpc-message', which is the description of the returned status - 'grpc-status-details-bin'
* (optional), which is an arbitrary payload with a serialized ProtoBuf object, containing an
* accurate description of the error in case the status is not successful.
*
* <p>By convention, Google provides a default set of ProtoBuf messages for the most common
* error cases. Here, we'll be using {@link DebugInfo}, as we're reporting an internal Java
* exception which we couldn't properly handle.
*
* <p>Now, how do we go about sending all those headers _and_ the {@link DebugInfo} payload
* using the Java GRPC API?
*
* <p>The only way we can return an error with the Java API is by passing an instance of {@link
* io.grpc.StatusException} or {@link io.grpc.StatusRuntimeException} to {@link
* StreamObserver#onError(Throwable)}. The easiest way to create either of these exceptions is
* by using the {@link Status} class and one of its predefined code identifiers (in this case,
* {@link Status#INTERNAL} because we're reporting an internal exception). The {@link Status}
* class has setters to set its most relevant attributes, namely those that will be
* automatically serialized into the 'grpc-status' and 'grpc-message' trailers in the response.
* There is, however, no setter to pass an arbitrary ProtoBuf message to be serialized into a
* `grpc-status-details-bin` trailer. This feature exists in the other language implementations
* but it hasn't been brought to Java yet.
*
* <p>Fortunately, {@link Status#asException(Metadata)} exists, allowing us to pass any amount
* of arbitrary trailers before we close the response. So we're using this API to manually craft
* the 'grpc-status-detail-bin' trailer, in the same way that the GRPC server implementations
* for Go and C++ craft and serialize the header. This will allow us to access the metadata
* cleanly from Go and C++ clients by using the 'details' method which _has_ been implemented in
* those two clients.
*
* @param t The exception to convert
* @return an instance of {@link StatusException} which will properly serialize all its headers
* into the response.
*/
private StatusException throwableToStatusException(Throwable t) {
String[] frames = ExceptionUtils.getStackFrames(t);
Metadata metadata = new Metadata();
metadata.put(
STATUS_DETAILS_KEY,
DebugInfo.newBuilder()
.addAllStackEntries(Arrays.asList(frames))
.setDetail(ExceptionUtils.getMessage(t))
.build());
return Status.INTERNAL.withDescription(t.getMessage()).withCause(t).asException(metadata);
}
void onError(StreamObserver<?> response, Throwable t) {
logger.error("internal exception during GRPC request", t);
response.onError(throwableToStatusException(t));
}
/**
* Convert a non-null String instance to a possibly null String instance based on ProtoBuf's
* rules for optional arguments.
*
* <p>This helper converts an String instance from a ProtoBuf object into a possibly null
* String. In ProtoBuf objects, String fields are not nullable, but an empty String field is
* considered to be "missing".
*
* <p>The internal Conductor APIs expect missing arguments to be passed as null values, so this
* helper performs such conversion.
*
* @param str a string from a ProtoBuf object
* @return the original string, or null
*/
String optional(@Nonnull String str) {
return str.isEmpty() ? null : str;
}
/**
* Check if a given non-null String instance is "missing" according to ProtoBuf's missing field
* rules. If the String is missing, the given default value will be returned. Otherwise, the
* string itself will be returned.
*
* @param str the input String
* @param defaults the default value for the string
* @return 'str' if it is not empty according to ProtoBuf rules; 'defaults' otherwise
*/
String optionalOr(@Nonnull String str, String defaults) {
return str.isEmpty() ? defaults : str;
}
/**
* Convert a non-null Integer instance to a possibly null Integer instance based on ProtoBuf's
* rules for optional arguments.
*
* <p>This helper converts an Integer instance from a ProtoBuf object into a possibly null
* Integer. In ProtoBuf objects, Integer fields are not nullable, but a zero-value Integer field
* is considered to be "missing".
*
* <p>The internal Conductor APIs expect missing arguments to be passed as null values, so this
* helper performs such conversion.
*
* @param i an Integer from a ProtoBuf object
* @return the original Integer, or null
*/
Integer optional(@Nonnull Integer i) {
return i == 0 ? null : i;
}
/**
* Check if a given non-null Integer instance is "missing" according to ProtoBuf's missing field
* rules. If the Integer is missing (i.e. if it has a zero-value), the given default value will
* be returned. Otherwise, the Integer itself will be returned.
*
* @param i the input Integer
* @param defaults the default value for the Integer
* @return 'i' if it is not a zero-value according to ProtoBuf rules; 'defaults' otherwise
*/
Integer optionalOr(@Nonnull Integer i, int defaults) {
return i == 0 ? defaults : i;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/EventServiceImpl.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/EventServiceImpl.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import com.netflix.conductor.grpc.EventServiceGrpc;
import com.netflix.conductor.grpc.EventServicePb;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.proto.EventHandlerPb;
import com.netflix.conductor.service.MetadataService;
import io.grpc.stub.StreamObserver;
@Service("grpcEventService")
public class EventServiceImpl extends EventServiceGrpc.EventServiceImplBase {
private static final Logger LOGGER = LoggerFactory.getLogger(EventServiceImpl.class);
private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE;
private final MetadataService metadataService;
public EventServiceImpl(MetadataService metadataService) {
this.metadataService = metadataService;
}
@Override
public void addEventHandler(
EventServicePb.AddEventHandlerRequest req,
StreamObserver<EventServicePb.AddEventHandlerResponse> response) {
metadataService.addEventHandler(PROTO_MAPPER.fromProto(req.getHandler()));
response.onNext(EventServicePb.AddEventHandlerResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void updateEventHandler(
EventServicePb.UpdateEventHandlerRequest req,
StreamObserver<EventServicePb.UpdateEventHandlerResponse> response) {
metadataService.updateEventHandler(PROTO_MAPPER.fromProto(req.getHandler()));
response.onNext(EventServicePb.UpdateEventHandlerResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void removeEventHandler(
EventServicePb.RemoveEventHandlerRequest req,
StreamObserver<EventServicePb.RemoveEventHandlerResponse> response) {
metadataService.removeEventHandlerStatus(req.getName());
response.onNext(EventServicePb.RemoveEventHandlerResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void getEventHandlers(
EventServicePb.GetEventHandlersRequest req,
StreamObserver<EventHandlerPb.EventHandler> response) {
metadataService.getAllEventHandlers().stream()
.map(PROTO_MAPPER::toProto)
.forEach(response::onNext);
response.onCompleted();
}
@Override
public void getEventHandlersForEvent(
EventServicePb.GetEventHandlersForEventRequest req,
StreamObserver<EventHandlerPb.EventHandler> response) {
metadataService.getEventHandlersForEvent(req.getEvent(), req.getActiveOnly()).stream()
.map(PROTO_MAPPER::toProto)
.forEach(response::onNext);
response.onCompleted();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/TaskServiceImpl.java | grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/TaskServiceImpl.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.grpc.server.service;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.TaskServiceGrpc;
import com.netflix.conductor.grpc.TaskServicePb;
import com.netflix.conductor.proto.TaskPb;
import com.netflix.conductor.service.ExecutionService;
import com.netflix.conductor.service.TaskService;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
@Service("grpcTaskService")
public class TaskServiceImpl extends TaskServiceGrpc.TaskServiceImplBase {
private static final Logger LOGGER = LoggerFactory.getLogger(TaskServiceImpl.class);
private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE;
private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER);
private static final int POLL_TIMEOUT_MS = 100;
private static final int MAX_POLL_TIMEOUT_MS = 5000;
private final TaskService taskService;
private final int maxSearchSize;
private final ExecutionService executionService;
public TaskServiceImpl(
ExecutionService executionService,
TaskService taskService,
@Value("${workflow.max.search.size:5000}") int maxSearchSize) {
this.executionService = executionService;
this.taskService = taskService;
this.maxSearchSize = maxSearchSize;
}
@Override
public void poll(
TaskServicePb.PollRequest req, StreamObserver<TaskServicePb.PollResponse> response) {
try {
List<Task> tasks =
executionService.poll(
req.getTaskType(),
req.getWorkerId(),
GRPC_HELPER.optional(req.getDomain()),
1,
POLL_TIMEOUT_MS);
if (!tasks.isEmpty()) {
TaskPb.Task t = PROTO_MAPPER.toProto(tasks.get(0));
response.onNext(TaskServicePb.PollResponse.newBuilder().setTask(t).build());
}
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void batchPoll(
TaskServicePb.BatchPollRequest req, StreamObserver<TaskPb.Task> response) {
final int count = GRPC_HELPER.optionalOr(req.getCount(), 1);
final int timeout = GRPC_HELPER.optionalOr(req.getTimeout(), POLL_TIMEOUT_MS);
if (timeout > MAX_POLL_TIMEOUT_MS) {
response.onError(
Status.INVALID_ARGUMENT
.withDescription(
"longpoll timeout cannot be longer than "
+ MAX_POLL_TIMEOUT_MS
+ "ms")
.asRuntimeException());
return;
}
try {
List<Task> polledTasks =
taskService.batchPoll(
req.getTaskType(),
req.getWorkerId(),
GRPC_HELPER.optional(req.getDomain()),
count,
timeout);
LOGGER.info("polled tasks: " + polledTasks);
polledTasks.stream().map(PROTO_MAPPER::toProto).forEach(response::onNext);
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void updateTask(
TaskServicePb.UpdateTaskRequest req,
StreamObserver<TaskServicePb.UpdateTaskResponse> response) {
try {
TaskResult task = PROTO_MAPPER.fromProto(req.getResult());
taskService.updateTask(task);
response.onNext(
TaskServicePb.UpdateTaskResponse.newBuilder()
.setTaskId(task.getTaskId())
.build());
response.onCompleted();
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void addLog(
TaskServicePb.AddLogRequest req,
StreamObserver<TaskServicePb.AddLogResponse> response) {
taskService.log(req.getTaskId(), req.getLog());
response.onNext(TaskServicePb.AddLogResponse.getDefaultInstance());
response.onCompleted();
}
@Override
public void getTaskLogs(
TaskServicePb.GetTaskLogsRequest req,
StreamObserver<TaskServicePb.GetTaskLogsResponse> response) {
List<TaskExecLog> logs = taskService.getTaskLogs(req.getTaskId());
response.onNext(
TaskServicePb.GetTaskLogsResponse.newBuilder()
.addAllLogs(logs.stream().map(PROTO_MAPPER::toProto)::iterator)
.build());
response.onCompleted();
}
@Override
public void getTask(
TaskServicePb.GetTaskRequest req,
StreamObserver<TaskServicePb.GetTaskResponse> response) {
try {
Task task = taskService.getTask(req.getTaskId());
if (task == null) {
response.onError(
Status.NOT_FOUND
.withDescription("No such task found by id=" + req.getTaskId())
.asRuntimeException());
} else {
response.onNext(
TaskServicePb.GetTaskResponse.newBuilder()
.setTask(PROTO_MAPPER.toProto(task))
.build());
response.onCompleted();
}
} catch (Exception e) {
GRPC_HELPER.onError(response, e);
}
}
@Override
public void getQueueSizesForTasks(
TaskServicePb.QueueSizesRequest req,
StreamObserver<TaskServicePb.QueueSizesResponse> response) {
Map<String, Integer> sizes = taskService.getTaskQueueSizes(req.getTaskTypesList());
response.onNext(
TaskServicePb.QueueSizesResponse.newBuilder().putAllQueueForTask(sizes).build());
response.onCompleted();
}
@Override
public void getQueueInfo(
TaskServicePb.QueueInfoRequest req,
StreamObserver<TaskServicePb.QueueInfoResponse> response) {
Map<String, Long> queueInfo = taskService.getAllQueueDetails();
response.onNext(
TaskServicePb.QueueInfoResponse.newBuilder().putAllQueues(queueInfo).build());
response.onCompleted();
}
@Override
public void getQueueAllInfo(
TaskServicePb.QueueAllInfoRequest req,
StreamObserver<TaskServicePb.QueueAllInfoResponse> response) {
Map<String, Map<String, Map<String, Long>>> info = taskService.allVerbose();
TaskServicePb.QueueAllInfoResponse.Builder queuesBuilder =
TaskServicePb.QueueAllInfoResponse.newBuilder();
for (Map.Entry<String, Map<String, Map<String, Long>>> queue : info.entrySet()) {
final String queueName = queue.getKey();
final Map<String, Map<String, Long>> queueShards = queue.getValue();
TaskServicePb.QueueAllInfoResponse.QueueInfo.Builder queueInfoBuilder =
TaskServicePb.QueueAllInfoResponse.QueueInfo.newBuilder();
for (Map.Entry<String, Map<String, Long>> shard : queueShards.entrySet()) {
final String shardName = shard.getKey();
final Map<String, Long> shardInfo = shard.getValue();
// FIXME: make shardInfo an actual type
// shardInfo is an immutable map with predefined keys, so we can always
// access 'size' and 'uacked'. It would be better if shardInfo
// were actually a POJO.
queueInfoBuilder.putShards(
shardName,
TaskServicePb.QueueAllInfoResponse.ShardInfo.newBuilder()
.setSize(shardInfo.get("size"))
.setUacked(shardInfo.get("uacked"))
.build());
}
queuesBuilder.putQueues(queueName, queueInfoBuilder.build());
}
response.onNext(queuesBuilder.build());
response.onCompleted();
}
@Override
public void search(
SearchPb.Request req, StreamObserver<TaskServicePb.TaskSummarySearchResult> response) {
final int start = req.getStart();
final int size = GRPC_HELPER.optionalOr(req.getSize(), maxSearchSize);
final String sort = req.getSort();
final String freeText = GRPC_HELPER.optionalOr(req.getFreeText(), "*");
final String query = req.getQuery();
if (size > maxSearchSize) {
response.onError(
Status.INVALID_ARGUMENT
.withDescription(
"Cannot return more than " + maxSearchSize + " results")
.asRuntimeException());
return;
}
SearchResult<TaskSummary> searchResult =
taskService.search(start, size, sort, freeText, query);
response.onNext(
TaskServicePb.TaskSummarySearchResult.newBuilder()
.setTotalHits(searchResult.getTotalHits())
.addAllResults(
searchResult.getResults().stream().map(PROTO_MAPPER::toProto)
::iterator)
.build());
response.onCompleted();
}
@Override
public void searchV2(
SearchPb.Request req, StreamObserver<TaskServicePb.TaskSearchResult> response) {
final int start = req.getStart();
final int size = GRPC_HELPER.optionalOr(req.getSize(), maxSearchSize);
final String sort = req.getSort();
final String freeText = GRPC_HELPER.optionalOr(req.getFreeText(), "*");
final String query = req.getQuery();
if (size > maxSearchSize) {
response.onError(
Status.INVALID_ARGUMENT
.withDescription(
"Cannot return more than " + maxSearchSize + " results")
.asRuntimeException());
return;
}
SearchResult<Task> searchResult = taskService.searchV2(start, size, sort, freeText, query);
response.onNext(
TaskServicePb.TaskSearchResult.newBuilder()
.setTotalHits(searchResult.getTotalHits())
.addAllResults(
searchResult.getResults().stream().map(PROTO_MAPPER::toProto)
::iterator)
.build());
response.onCompleted();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-util/src/test/java/com/netflix/conductor/ConductorTestApp.java | test-util/src/test/java/com/netflix/conductor/ConductorTestApp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor;
import java.io.IOException;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
/** Copy of com.netflix.conductor.Conductor for use by @SpringBootTest in AbstractSpecification. */
// Prevents from the datasource beans to be loaded, AS they are needed only for specific databases.
// In case that SQL database is selected this class will be imported back in the appropriate
// database persistence module.
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
public class ConductorTestApp {
public static void main(String[] args) throws IOException {
SpringApplication.run(ConductorTestApp.class, args);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-util/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java | test-util/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.test.context.TestPropertySource;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.Workflow;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@TestPropertySource(
properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=7"})
public abstract class AbstractEndToEndTest {
private static final Logger log = LoggerFactory.getLogger(AbstractEndToEndTest.class);
private static final String TASK_DEFINITION_PREFIX = "task_";
private static final String DEFAULT_DESCRIPTION = "description";
// Represents null value deserialized from the redis in memory db
private static final String DEFAULT_NULL_VALUE = "null";
protected static final String DEFAULT_EMAIL_ADDRESS = "test@harness.com";
private static final ElasticsearchContainer container =
new ElasticsearchContainer(
DockerImageName.parse("elasticsearch")
.withTag("7.17.11")); // this should match the client version
private static RestClient restClient;
// Initialization happens in a static block so the container is initialized
// only once for all the sub-class tests in a CI environment
// container is stopped when JVM exits
// https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/#singleton-containers
static {
container.start();
String httpHostAddress = container.getHttpHostAddress();
System.setProperty("conductor.elasticsearch.url", "http://" + httpHostAddress);
log.info("Initialized Elasticsearch {}", container.getContainerId());
}
@BeforeClass
public static void initializeEs() {
String httpHostAddress = container.getHttpHostAddress();
String host = httpHostAddress.split(":")[0];
int port = Integer.parseInt(httpHostAddress.split(":")[1]);
RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http"));
restClient = restClientBuilder.build();
}
@AfterClass
public static void cleanupEs() throws Exception {
// deletes all indices
Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices"));
Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent());
BufferedReader bufferedReader = new BufferedReader(streamReader);
String line;
while ((line = bufferedReader.readLine()) != null) {
String[] fields = line.split("\\s");
String endpoint = String.format("/%s", fields[2]);
restClient.performRequest(new Request("DELETE", endpoint));
}
if (restClient != null) {
restClient.close();
}
}
@Test
public void testEphemeralWorkflowsWithStoredTasks() {
String workflowExecutionName = "testEphemeralWorkflow";
createAndRegisterTaskDefinitions("storedTaskDef", 5);
WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName);
WorkflowTask workflowTask1 = createWorkflowTask("storedTaskDef1");
WorkflowTask workflowTask2 = createWorkflowTask("storedTaskDef2");
workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
}
@Test
public void testEphemeralWorkflowsWithEphemeralTasks() {
String workflowExecutionName = "ephemeralWorkflowWithEphemeralTasks";
WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName);
WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1");
TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1");
workflowTask1.setTaskDefinition(taskDefinition1);
WorkflowTask workflowTask2 = createWorkflowTask("ephemeralTask2");
TaskDef taskDefinition2 = createTaskDefinition("ephemeralTaskDef2");
workflowTask2.setTaskDefinition(taskDefinition2);
workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
List<WorkflowTask> ephemeralTasks = ephemeralWorkflow.getTasks();
assertEquals(2, ephemeralTasks.size());
for (WorkflowTask ephemeralTask : ephemeralTasks) {
assertNotNull(ephemeralTask.getTaskDefinition());
}
}
@Test
public void testEphemeralWorkflowsWithEphemeralAndStoredTasks() {
createAndRegisterTaskDefinitions("storedTask", 1);
WorkflowDef workflowDefinition =
createWorkflowDefinition("testEphemeralWorkflowsWithEphemeralAndStoredTasks");
WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1");
TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1");
workflowTask1.setTaskDefinition(taskDefinition1);
WorkflowTask workflowTask2 = createWorkflowTask("storedTask0");
workflowDefinition.getTasks().add(workflowTask1);
workflowDefinition.getTasks().add(workflowTask2);
String workflowExecutionName = "ephemeralWorkflowWithEphemeralAndStoredTasks";
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
TaskDef storedTaskDefinition = getTaskDefinition("storedTask0");
List<WorkflowTask> tasks = ephemeralWorkflow.getTasks();
assertEquals(2, tasks.size());
assertEquals(workflowTask1, tasks.get(0));
TaskDef currentStoredTaskDefinition = tasks.get(1).getTaskDefinition();
assertNotNull(currentStoredTaskDefinition);
assertEquals(storedTaskDefinition, currentStoredTaskDefinition);
}
@Test
public void testEventHandler() {
String eventName = "conductor:test_workflow:complete_task_with_event";
EventHandler eventHandler = new EventHandler();
eventHandler.setName("test_complete_task_event");
EventHandler.Action completeTaskAction = new EventHandler.Action();
completeTaskAction.setAction(EventHandler.Action.Type.complete_task);
completeTaskAction.setComplete_task(new EventHandler.TaskDetails());
completeTaskAction.getComplete_task().setTaskRefName("test_task");
completeTaskAction.getComplete_task().setWorkflowId("test_id");
completeTaskAction.getComplete_task().setOutput(new HashMap<>());
eventHandler.getActions().add(completeTaskAction);
eventHandler.setEvent(eventName);
eventHandler.setActive(true);
registerEventHandler(eventHandler);
Iterator<EventHandler> it = getEventHandlers(eventName, true);
EventHandler result = it.next();
assertFalse(it.hasNext());
assertEquals(eventHandler.getName(), result.getName());
}
protected WorkflowTask createWorkflowTask(String name) {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName(name);
workflowTask.setWorkflowTaskType(TaskType.SIMPLE);
workflowTask.setTaskReferenceName(name);
workflowTask.setDescription(getDefaultDescription(name));
workflowTask.setDynamicTaskNameParam(DEFAULT_NULL_VALUE);
workflowTask.setCaseValueParam(DEFAULT_NULL_VALUE);
workflowTask.setCaseExpression(DEFAULT_NULL_VALUE);
workflowTask.setDynamicForkTasksParam(DEFAULT_NULL_VALUE);
workflowTask.setDynamicForkTasksInputParamName(DEFAULT_NULL_VALUE);
workflowTask.setSink(DEFAULT_NULL_VALUE);
workflowTask.setEvaluatorType(DEFAULT_NULL_VALUE);
workflowTask.setExpression(DEFAULT_NULL_VALUE);
return workflowTask;
}
protected TaskDef createTaskDefinition(String name) {
TaskDef taskDefinition = new TaskDef();
taskDefinition.setName(name);
return taskDefinition;
}
protected WorkflowDef createWorkflowDefinition(String workflowName) {
WorkflowDef workflowDefinition = new WorkflowDef();
workflowDefinition.setName(workflowName);
workflowDefinition.setDescription(getDefaultDescription(workflowName));
workflowDefinition.setFailureWorkflow(DEFAULT_NULL_VALUE);
workflowDefinition.setOwnerEmail(DEFAULT_EMAIL_ADDRESS);
return workflowDefinition;
}
protected List<TaskDef> createAndRegisterTaskDefinitions(
String prefixTaskDefinition, int numberOfTaskDefinitions) {
String prefix = Optional.ofNullable(prefixTaskDefinition).orElse(TASK_DEFINITION_PREFIX);
List<TaskDef> definitions = new LinkedList<>();
for (int i = 0; i < numberOfTaskDefinitions; i++) {
TaskDef def =
new TaskDef(
prefix + i,
"task " + i + DEFAULT_DESCRIPTION,
DEFAULT_EMAIL_ADDRESS,
3,
60,
60);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.RETRY);
definitions.add(def);
}
this.registerTaskDefinitions(definitions);
return definitions;
}
private String getDefaultDescription(String nameResource) {
return nameResource + " " + DEFAULT_DESCRIPTION;
}
protected abstract String startWorkflow(
String workflowExecutionName, WorkflowDef workflowDefinition);
protected abstract Workflow getWorkflow(String workflowId, boolean includeTasks);
protected abstract TaskDef getTaskDefinition(String taskName);
protected abstract void registerTaskDefinitions(List<TaskDef> taskDefinitionList);
protected abstract void registerWorkflowDefinition(WorkflowDef workflowDefinition);
protected abstract void registerEventHandler(EventHandler eventHandler);
protected abstract Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-util/src/test/java/com/netflix/conductor/test/integration/grpc/AbstractGrpcEndToEndTest.java | test-util/src/test/java/com/netflix/conductor/test/integration/grpc/AbstractGrpcEndToEndTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc;
import java.util.*;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.ConductorTestApp;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.test.integration.AbstractEndToEndTest;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@RunWith(SpringRunner.class)
@SpringBootTest(
classes = ConductorTestApp.class,
properties = {"conductor.grpc-server.enabled=true", "conductor.grpc-server.port=8092"})
@TestPropertySource(locations = "classpath:application-integrationtest.properties")
public abstract class AbstractGrpcEndToEndTest extends AbstractEndToEndTest {
protected static TaskClient taskClient;
protected static WorkflowClient workflowClient;
protected static MetadataClient metadataClient;
protected static EventClient eventClient;
@Override
protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) {
StartWorkflowRequest workflowRequest =
new StartWorkflowRequest()
.withName(workflowExecutionName)
.withWorkflowDef(workflowDefinition);
return workflowClient.startWorkflow(workflowRequest);
}
@Override
protected Workflow getWorkflow(String workflowId, boolean includeTasks) {
return workflowClient.getWorkflow(workflowId, includeTasks);
}
@Override
protected TaskDef getTaskDefinition(String taskName) {
return metadataClient.getTaskDef(taskName);
}
@Override
protected void registerTaskDefinitions(List<TaskDef> taskDefinitionList) {
metadataClient.registerTaskDefs(taskDefinitionList);
}
@Override
protected void registerWorkflowDefinition(WorkflowDef workflowDefinition) {
metadataClient.registerWorkflowDef(workflowDefinition);
}
@Override
protected void registerEventHandler(EventHandler eventHandler) {
eventClient.registerEventHandler(eventHandler);
}
@Override
protected Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly) {
return eventClient.getEventHandlers(event, activeOnly);
}
@Test
public void testAll() throws Exception {
assertNotNull(taskClient);
List<TaskDef> defs = new LinkedList<>();
for (int i = 0; i < 5; i++) {
TaskDef def = new TaskDef("t" + i, "task " + i, DEFAULT_EMAIL_ADDRESS, 3, 60, 60);
def.setTimeoutPolicy(TimeoutPolicy.RETRY);
defs.add(def);
}
metadataClient.registerTaskDefs(defs);
for (int i = 0; i < 5; i++) {
final String taskName = "t" + i;
TaskDef def = metadataClient.getTaskDef(taskName);
assertNotNull(def);
assertEquals(taskName, def.getName());
}
WorkflowDef def = createWorkflowDefinition("test" + UUID.randomUUID());
WorkflowTask t0 = createWorkflowTask("t0");
WorkflowTask t1 = createWorkflowTask("t1");
def.getTasks().add(t0);
def.getTasks().add(t1);
metadataClient.registerWorkflowDef(def);
WorkflowDef found = metadataClient.getWorkflowDef(def.getName(), null);
assertNotNull(found);
assertEquals(def, found);
String correlationId = "test_corr_id";
StartWorkflowRequest startWf = new StartWorkflowRequest();
startWf.setName(def.getName());
startWf.setCorrelationId(correlationId);
String workflowId = workflowClient.startWorkflow(startWf);
assertNotNull(workflowId);
Workflow workflow = workflowClient.getWorkflow(workflowId, false);
assertEquals(0, workflow.getTasks().size());
assertEquals(workflowId, workflow.getWorkflowId());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(workflowId, workflow.getWorkflowId());
List<String> runningIds =
workflowClient.getRunningWorkflow(def.getName(), def.getVersion());
assertNotNull(runningIds);
assertEquals(1, runningIds.size());
assertEquals(workflowId, runningIds.get(0));
List<Task> polled =
taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100);
assertNotNull(polled);
assertEquals(0, polled.size());
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100);
assertNotNull(polled);
assertEquals(1, polled.size());
assertEquals(t0.getName(), polled.get(0).getTaskDefName());
Task task = polled.get(0);
task.getOutputData().put("key1", "value1");
task.setStatus(Status.COMPLETED);
taskClient.updateTask(new TaskResult(task));
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100);
assertNotNull(polled);
assertTrue(polled.toString(), polled.isEmpty());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(2, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(t1.getTaskReferenceName(), workflow.getTasks().get(1).getReferenceTaskName());
assertEquals(Status.COMPLETED, workflow.getTasks().get(0).getStatus());
assertEquals(Status.SCHEDULED, workflow.getTasks().get(1).getStatus());
Task taskById = taskClient.getTaskDetails(task.getTaskId());
assertNotNull(taskById);
assertEquals(task.getTaskId(), taskById.getTaskId());
Thread.sleep(1000);
SearchResult<WorkflowSummary> searchResult =
workflowClient.search("workflowType='" + def.getName() + "'");
assertNotNull(searchResult);
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2 =
workflowClient.searchV2("workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2);
assertEquals(1, searchResultV2.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResultV2.getResults().get(0).getWorkflowId());
SearchResult<WorkflowSummary> searchResultAdvanced =
workflowClient.search(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultAdvanced);
assertEquals(1, searchResultAdvanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(), searchResultAdvanced.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2Advanced =
workflowClient.searchV2(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2Advanced);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(),
searchResultV2Advanced.getResults().get(0).getWorkflowId());
SearchResult<TaskSummary> taskSearchResult =
taskClient.search("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResult);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResult.getResults().get(0).getTaskDefName());
SearchResult<TaskSummary> taskSearchResultAdvanced =
taskClient.search(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultAdvanced);
assertEquals(1, taskSearchResultAdvanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResultAdvanced.getResults().get(0).getTaskDefName());
SearchResult<Task> taskSearchResultV2 =
taskClient.searchV2("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2.getResults().get(0).getReferenceTaskName());
SearchResult<Task> taskSearchResultV2Advanced =
taskClient.searchV2(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2Advanced);
assertEquals(1, taskSearchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2Advanced.getResults().get(0).getReferenceTaskName());
workflowClient.terminateWorkflow(workflowId, "terminate reason");
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.TERMINATED, workflow.getStatus());
workflowClient.restart(workflowId, false);
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-util/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java | test-util/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.common.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.fasterxml.jackson.databind.ObjectMapper;
/** Supplies the standard Conductor {@link ObjectMapper} for tests that need them. */
@Configuration
public class TestObjectMapperConfiguration {
@Bean
public ObjectMapper testObjectMapper() {
return new ObjectMapperProvider().getObjectMapper();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/TestUtils.java | core/src/test/java/com/netflix/conductor/TestUtils.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import jakarta.validation.ConstraintViolation;
public class TestUtils {
public static Set<String> getConstraintViolationMessages(
Set<ConstraintViolation<?>> constraintViolations) {
Set<String> messages = new HashSet<>(constraintViolations.size());
messages.addAll(
constraintViolations.stream()
.map(ConstraintViolation::getMessage)
.collect(Collectors.toList()));
return messages;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/dao/PollDataDAOTest.java | core/src/test/java/com/netflix/conductor/dao/PollDataDAOTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
import java.util.List;
import org.junit.Test;
import com.netflix.conductor.common.metadata.tasks.PollData;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public abstract class PollDataDAOTest {
protected abstract PollDataDAO getPollDataDAO();
@Test
public void testPollData() {
getPollDataDAO().updateLastPollData("taskDef", null, "workerId1");
PollData pollData = getPollDataDAO().getPollData("taskDef", null);
assertNotNull(pollData);
assertTrue(pollData.getLastPollTime() > 0);
assertEquals(pollData.getQueueName(), "taskDef");
assertNull(pollData.getDomain());
assertEquals(pollData.getWorkerId(), "workerId1");
getPollDataDAO().updateLastPollData("taskDef", "domain1", "workerId1");
pollData = getPollDataDAO().getPollData("taskDef", "domain1");
assertNotNull(pollData);
assertTrue(pollData.getLastPollTime() > 0);
assertEquals(pollData.getQueueName(), "taskDef");
assertEquals(pollData.getDomain(), "domain1");
assertEquals(pollData.getWorkerId(), "workerId1");
List<PollData> pData = getPollDataDAO().getPollData("taskDef");
assertEquals(pData.size(), 2);
pollData = getPollDataDAO().getPollData("taskDef", "domain2");
assertNull(pollData);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java | core/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.*;
public abstract class ExecutionDAOTest {
protected abstract ExecutionDAO getExecutionDAO();
protected ConcurrentExecutionLimitDAO getConcurrentExecutionLimitDAO() {
return (ConcurrentExecutionLimitDAO) getExecutionDAO();
}
@Rule public ExpectedException expectedException = ExpectedException.none();
@Test
public void testTaskExceedsLimit() {
TaskDef taskDefinition = new TaskDef();
taskDefinition.setName("task1");
taskDefinition.setConcurrentExecLimit(1);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("task1");
workflowTask.setTaskDefinition(taskDefinition);
workflowTask.setTaskDefinition(taskDefinition);
List<TaskModel> tasks = new LinkedList<>();
for (int i = 0; i < 15; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId("t_" + i);
task.setWorkflowInstanceId("workflow_" + i);
task.setReferenceTaskName("task1");
task.setTaskDefName("task1");
tasks.add(task);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setWorkflowTask(workflowTask);
}
getExecutionDAO().createTasks(tasks);
assertFalse(getConcurrentExecutionLimitDAO().exceedsLimit(tasks.get(0)));
tasks.get(0).setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().updateTask(tasks.get(0));
for (TaskModel task : tasks) {
assertTrue(getConcurrentExecutionLimitDAO().exceedsLimit(task));
}
}
@Test
public void testCreateTaskException() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Workflow instance id cannot be null");
getExecutionDAO().createTasks(List.of(task));
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(List.of(task));
}
@Test
public void testCreateTaskException2() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
}
@Test
public void testTaskCreateDups() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("t" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
// Let's insert a retried task
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 2);
task.setReferenceTaskName("t" + 2);
task.setRetryCount(1);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 2);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
// Duplicate task!
task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 1);
task.setReferenceTaskName("t" + 1);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 1);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size() - 1, created.size()); // 1 less
Set<String> srcIds =
tasks.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
Set<String> createdIds =
created.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
assertEquals(srcIds, createdIds);
List<TaskModel> pending = getExecutionDAO().getPendingTasksByWorkflow("task0", workflowId);
assertNotNull(pending);
assertEquals(1, pending.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0)));
List<TaskModel> found = getExecutionDAO().getTasks(tasks.get(0).getTaskDefName(), null, 1);
assertNotNull(found);
assertEquals(1, found.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0)));
}
@Test
public void testTaskOps() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId("x" + workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId("x" + workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().createTasks(Collections.singletonList(task));
}
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size(), created.size());
List<TaskModel> pending =
getExecutionDAO().getPendingTasksForTaskType(tasks.get(0).getTaskDefName());
assertNotNull(pending);
assertEquals(2, pending.size());
// Pending list can come in any order. finding the one we are looking for and then
// comparing
TaskModel matching =
pending.stream()
.filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId()))
.findAny()
.get();
assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0)));
for (int i = 0; i < 3; i++) {
TaskModel found = getExecutionDAO().getTask(workflowId + "_t" + i);
assertNotNull(found);
found.addOutput("updated", true);
found.setStatus(TaskModel.Status.COMPLETED);
getExecutionDAO().updateTask(found);
}
List<String> taskIds =
tasks.stream().map(TaskModel::getTaskId).collect(Collectors.toList());
List<TaskModel> found = getExecutionDAO().getTasks(taskIds);
assertEquals(taskIds.size(), found.size());
found.forEach(
task -> {
assertTrue(task.getOutputData().containsKey("updated"));
assertEquals(true, task.getOutputData().get("updated"));
boolean removed = getExecutionDAO().removeTask(task.getTaskId());
assertTrue(removed);
});
found = getExecutionDAO().getTasks(taskIds);
assertTrue(found.isEmpty());
}
@Test
public void testPending() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_test");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> workflowIds = generateWorkflows(workflow, 10);
long count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(10, count);
for (int i = 0; i < 10; i++) {
getExecutionDAO().removeFromPendingWorkflow(def.getName(), workflowIds.get(i));
}
count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(0, count);
}
@Test
public void complexExecutionTest() {
WorkflowModel workflow = createTestWorkflow();
int numTasks = workflow.getTasks().size();
String workflowId = getExecutionDAO().createWorkflow(workflow);
assertEquals(workflow.getWorkflowId(), workflowId);
List<TaskModel> created = getExecutionDAO().createTasks(workflow.getTasks());
assertEquals(workflow.getTasks().size(), created.size());
WorkflowModel workflowWithTasks =
getExecutionDAO().getWorkflow(workflow.getWorkflowId(), true);
assertEquals(workflowId, workflowWithTasks.getWorkflowId());
assertEquals(numTasks, workflowWithTasks.getTasks().size());
WorkflowModel found = getExecutionDAO().getWorkflow(workflowId, false);
assertTrue(found.getTasks().isEmpty());
workflow.getTasks().clear();
assertEquals(workflow, found);
workflow.getInput().put("updated", true);
getExecutionDAO().updateWorkflow(workflow);
found = getExecutionDAO().getWorkflow(workflowId);
assertNotNull(found);
assertTrue(found.getInput().containsKey("updated"));
assertEquals(true, found.getInput().get("updated"));
List<String> running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
workflow.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertEquals(1, running.size());
assertEquals(workflow.getWorkflowId(), running.get(0));
List<WorkflowModel> pending =
getExecutionDAO()
.getPendingWorkflowsByType(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(pending);
assertEquals(1, pending.size());
assertEquals(3, pending.get(0).getTasks().size());
pending.get(0).getTasks().clear();
assertEquals(workflow, pending.get(0));
workflow.setStatus(WorkflowModel.Status.COMPLETED);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
List<WorkflowModel> bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
System.currentTimeMillis(),
System.currentTimeMillis() + 100);
assertNotNull(bytime);
assertTrue(bytime.isEmpty());
bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
workflow.getCreateTime() - 10,
workflow.getCreateTime() + 10);
assertNotNull(bytime);
assertEquals(1, bytime.size());
}
protected WorkflowModel createTestWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("Junit Workflow");
def.setVersion(3);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCorrelationId("correlationX");
workflow.setCreatedBy("junit_tester");
workflow.setEndTime(200L);
Map<String, Object> input = new HashMap<>();
input.put("param1", "param1 value");
input.put("param2", 100);
workflow.setInput(input);
Map<String, Object> output = new HashMap<>();
output.put("ouput1", "output 1 value");
output.put("op2", 300);
workflow.setOutput(output);
workflow.setOwnerApp("workflow");
workflow.setParentWorkflowId("parentWorkflowId");
workflow.setParentWorkflowTaskId("parentWFTaskId");
workflow.setReasonForIncompletion("missing recipe");
workflow.setReRunFromWorkflowId("re-run from id1");
workflow.setCreateTime(90L);
workflow.setStatus(WorkflowModel.Status.FAILED);
workflow.setWorkflowId(UUID.randomUUID().toString());
List<TaskModel> tasks = new LinkedList<>();
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setReferenceTaskName("t1");
task.setWorkflowInstanceId(workflow.getWorkflowId());
task.setTaskDefName("task1");
TaskModel task2 = new TaskModel();
task2.setScheduledTime(2L);
task2.setSeq(2);
task2.setTaskId(UUID.randomUUID().toString());
task2.setReferenceTaskName("t2");
task2.setWorkflowInstanceId(workflow.getWorkflowId());
task2.setTaskDefName("task2");
TaskModel task3 = new TaskModel();
task3.setScheduledTime(2L);
task3.setSeq(3);
task3.setTaskId(UUID.randomUUID().toString());
task3.setReferenceTaskName("t3");
task3.setWorkflowInstanceId(workflow.getWorkflowId());
task3.setTaskDefName("task3");
tasks.add(task);
tasks.add(task2);
tasks.add(task3);
workflow.setTasks(tasks);
workflow.setUpdatedBy("junit_tester");
workflow.setUpdatedTime(800L);
return workflow;
}
protected List<String> generateWorkflows(WorkflowModel base, int count) {
List<String> workflowIds = new ArrayList<>();
for (int i = 0; i < count; i++) {
String workflowId = UUID.randomUUID().toString();
base.setWorkflowId(workflowId);
base.setCorrelationId("corr001");
base.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().createWorkflow(base);
workflowIds.add(workflowId);
}
return workflowIds;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java | core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@EnableAutoConfiguration
public class WorkflowServiceTest {
@TestConfiguration
static class TestWorkflowConfiguration {
@Bean
public WorkflowExecutor workflowExecutor() {
return mock(WorkflowExecutor.class);
}
@Bean
public ExecutionService executionService() {
return mock(ExecutionService.class);
}
@Bean
public MetadataService metadataService() {
return mock(MetadataServiceImpl.class);
}
@Bean
public WorkflowService workflowService(
WorkflowExecutor workflowExecutor,
ExecutionService executionService,
MetadataService metadataService) {
return new WorkflowServiceImpl(workflowExecutor, executionService, metadataService);
}
}
@Autowired private WorkflowExecutor workflowExecutor;
@Autowired private ExecutionService executionService;
@Autowired private MetadataService metadataService;
@Autowired private WorkflowService workflowService;
@Test(expected = ConstraintViolationException.class)
public void testStartWorkflowNull() {
try {
workflowService.startWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("StartWorkflowRequest cannot be null"));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetWorkflowsNoName() {
try {
workflowService.getWorkflows("", "c123", true, true);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Workflow name cannot be null or empty"));
throw ex;
}
}
@Test
public void testGetWorklfowsSingleCorrelationId() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
List<Workflow> workflowArrayList = Collections.singletonList(workflow);
when(executionService.getWorkflowInstances(
anyString(), anyString(), anyBoolean(), anyBoolean()))
.thenReturn(workflowArrayList);
assertEquals(workflowArrayList, workflowService.getWorkflows("test", "c123", true, true));
}
@Test
public void testGetWorklfowsMultipleCorrelationId() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
List<Workflow> workflowArrayList = Collections.singletonList(workflow);
List<String> correlationIdList = Collections.singletonList("c123");
Map<String, List<Workflow>> workflowMap = new HashMap<>();
workflowMap.put("c123", workflowArrayList);
when(executionService.getWorkflowInstances(
anyString(), anyString(), anyBoolean(), anyBoolean()))
.thenReturn(workflowArrayList);
assertEquals(
workflowMap, workflowService.getWorkflows("test", true, true, correlationIdList));
}
@Test
public void testGetExecutionStatus() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
when(executionService.getExecutionStatus(anyString(), anyBoolean())).thenReturn(workflow);
assertEquals(workflow, workflowService.getExecutionStatus("w123", true));
}
@Test(expected = ConstraintViolationException.class)
public void testGetExecutionStatusNoWorkflowId() {
try {
workflowService.getExecutionStatus("", true);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = NotFoundException.class)
public void testNotFoundExceptionGetExecutionStatus() {
when(executionService.getExecutionStatus(anyString(), anyBoolean())).thenReturn(null);
workflowService.getExecutionStatus("w123", true);
}
@Test
public void testDeleteWorkflow() {
workflowService.deleteWorkflow("w123", false);
verify(executionService, times(1)).removeWorkflow(anyString(), eq(false));
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidDeleteWorkflow() {
try {
workflowService.deleteWorkflow(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test
public void testArchiveWorkflow() {
workflowService.deleteWorkflow("w123", true);
verify(executionService, times(1)).removeWorkflow(anyString(), eq(true));
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidArchiveWorkflow() {
try {
workflowService.deleteWorkflow(null, true);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidPauseWorkflow() {
try {
workflowService.pauseWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidResumeWorkflow() {
try {
workflowService.resumeWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidSkipTaskFromWorkflow() {
try {
SkipTaskRequest skipTaskRequest = new SkipTaskRequest();
workflowService.skipTaskFromWorkflow(null, null, skipTaskRequest);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId name cannot be null or empty."));
assertTrue(messages.contains("TaskReferenceName cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testInvalidWorkflowNameGetRunningWorkflows() {
try {
workflowService.getRunningWorkflows(null, 123, null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Workflow name cannot be null or empty."));
throw ex;
}
}
@Test
public void testGetRunningWorkflowsTime() {
workflowService.getRunningWorkflows("test", 1, 100L, 120L);
verify(workflowExecutor, times(1))
.getWorkflows(anyString(), anyInt(), anyLong(), anyLong());
}
@Test
public void testGetRunningWorkflows() {
workflowService.getRunningWorkflows("test", 1, null, null);
verify(workflowExecutor, times(1)).getRunningWorkflowIds(anyString(), anyInt());
}
@Test
public void testDecideWorkflow() {
workflowService.decideWorkflow("test");
verify(workflowExecutor, times(1)).decide(anyString());
}
@Test
public void testPauseWorkflow() {
workflowService.pauseWorkflow("test");
verify(workflowExecutor, times(1)).pauseWorkflow(anyString());
}
@Test
public void testResumeWorkflow() {
workflowService.resumeWorkflow("test");
verify(workflowExecutor, times(1)).resumeWorkflow(anyString());
}
@Test
public void testSkipTaskFromWorkflow() {
workflowService.skipTaskFromWorkflow("test", "testTask", null);
verify(workflowExecutor, times(1)).skipTaskFromWorkflow(anyString(), anyString(), isNull());
}
@Test
public void testRerunWorkflow() {
RerunWorkflowRequest request = new RerunWorkflowRequest();
workflowService.rerunWorkflow("test", request);
verify(workflowExecutor, times(1)).rerun(any(RerunWorkflowRequest.class));
}
@Test(expected = ConstraintViolationException.class)
public void testRerunWorkflowNull() {
try {
workflowService.rerunWorkflow(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
assertTrue(messages.contains("RerunWorkflowRequest cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRestartWorkflowNull() {
try {
workflowService.restartWorkflow(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRetryWorkflowNull() {
try {
workflowService.retryWorkflow(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testResetWorkflowNull() {
try {
workflowService.resetWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testTerminateWorkflowNull() {
try {
workflowService.terminateWorkflow(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
throw ex;
}
}
@Test
public void testRerunWorkflowReturnWorkflowId() {
RerunWorkflowRequest request = new RerunWorkflowRequest();
String workflowId = "w123";
when(workflowExecutor.rerun(any(RerunWorkflowRequest.class))).thenReturn(workflowId);
assertEquals(workflowId, workflowService.rerunWorkflow("test", request));
}
@Test
public void testRestartWorkflow() {
workflowService.restartWorkflow("w123", false);
verify(workflowExecutor, times(1)).restart(anyString(), anyBoolean());
}
@Test
public void testRetryWorkflow() {
workflowService.retryWorkflow("w123", false);
verify(workflowExecutor, times(1)).retry(anyString(), anyBoolean());
}
@Test
public void testResetWorkflow() {
workflowService.resetWorkflow("w123");
verify(workflowExecutor, times(1)).resetCallbacksForWorkflow(anyString());
}
@Test
public void testTerminateWorkflow() {
workflowService.terminateWorkflow("w123", "test");
verify(workflowExecutor, times(1)).terminateWorkflow(anyString(), anyString());
}
@Test
public void testSearchWorkflows() {
Workflow workflow = new Workflow();
WorkflowDef def = new WorkflowDef();
def.setName("name");
def.setVersion(1);
workflow.setWorkflowDefinition(def);
workflow.setCorrelationId("c123");
WorkflowSummary workflowSummary = new WorkflowSummary(workflow);
List<WorkflowSummary> listOfWorkflowSummary = Collections.singletonList(workflowSummary);
SearchResult<WorkflowSummary> searchResult = new SearchResult<>(100, listOfWorkflowSummary);
when(executionService.search("*", "*", 0, 100, Collections.singletonList("asc")))
.thenReturn(searchResult);
assertEquals(searchResult, workflowService.searchWorkflows(0, 100, "asc", "*", "*"));
assertEquals(
searchResult,
workflowService.searchWorkflows(
0, 100, Collections.singletonList("asc"), "*", "*"));
}
@Test
public void testSearchWorkflowsV2() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
List<Workflow> listOfWorkflow = Collections.singletonList(workflow);
SearchResult<Workflow> searchResult = new SearchResult<>(1, listOfWorkflow);
when(executionService.searchV2("*", "*", 0, 100, Collections.singletonList("asc")))
.thenReturn(searchResult);
assertEquals(searchResult, workflowService.searchWorkflowsV2(0, 100, "asc", "*", "*"));
assertEquals(
searchResult,
workflowService.searchWorkflowsV2(
0, 100, Collections.singletonList("asc"), "*", "*"));
}
@Test
public void testInvalidSizeSearchWorkflows() {
ConstraintViolationException ex =
assertThrows(
ConstraintViolationException.class,
() -> workflowService.searchWorkflows(0, 6000, "asc", "*", "*"));
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(
messages.contains(
"Cannot return more than 5000 workflows. Please use pagination."));
}
@Test
public void testInvalidSizeSearchWorkflowsV2() {
ConstraintViolationException ex =
assertThrows(
ConstraintViolationException.class,
() -> workflowService.searchWorkflowsV2(0, 6000, "asc", "*", "*"));
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(
messages.contains(
"Cannot return more than 5000 workflows. Please use pagination."));
}
@Test
public void testSearchWorkflowsByTasks() {
Workflow workflow = new Workflow();
WorkflowDef def = new WorkflowDef();
def.setName("name");
def.setVersion(1);
workflow.setWorkflowDefinition(def);
workflow.setCorrelationId("c123");
WorkflowSummary workflowSummary = new WorkflowSummary(workflow);
List<WorkflowSummary> listOfWorkflowSummary = Collections.singletonList(workflowSummary);
SearchResult<WorkflowSummary> searchResult = new SearchResult<>(100, listOfWorkflowSummary);
when(executionService.searchWorkflowByTasks(
"*", "*", 0, 100, Collections.singletonList("asc")))
.thenReturn(searchResult);
assertEquals(searchResult, workflowService.searchWorkflowsByTasks(0, 100, "asc", "*", "*"));
assertEquals(
searchResult,
workflowService.searchWorkflowsByTasks(
0, 100, Collections.singletonList("asc"), "*", "*"));
}
@Test
public void testSearchWorkflowsByTasksV2() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
List<Workflow> listOfWorkflow = Collections.singletonList(workflow);
SearchResult<Workflow> searchResult = new SearchResult<>(1, listOfWorkflow);
when(executionService.searchWorkflowByTasksV2(
"*", "*", 0, 100, Collections.singletonList("asc")))
.thenReturn(searchResult);
assertEquals(
searchResult, workflowService.searchWorkflowsByTasksV2(0, 100, "asc", "*", "*"));
assertEquals(
searchResult,
workflowService.searchWorkflowsByTasksV2(
0, 100, Collections.singletonList("asc"), "*", "*"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java | core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@EnableAutoConfiguration
public class WorkflowBulkServiceTest {
@TestConfiguration
static class TestWorkflowBulkConfiguration {
@Bean
WorkflowExecutor workflowExecutor() {
return mock(WorkflowExecutor.class);
}
@Bean
WorkflowService workflowService() {
return mock(WorkflowService.class);
}
@Bean
public WorkflowBulkService workflowBulkService(
WorkflowExecutor workflowExecutor, WorkflowService workflowService) {
return new WorkflowBulkServiceImpl(workflowExecutor, workflowService);
}
}
@Autowired private WorkflowExecutor workflowExecutor;
@Autowired private WorkflowBulkService workflowBulkService;
@Test(expected = ConstraintViolationException.class)
public void testPauseWorkflowNull() {
try {
workflowBulkService.pauseWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testPauseWorkflowWithInvalidListSize() {
try {
List<String> list = new ArrayList<>(1001);
for (int i = 0; i < 1002; i++) {
list.add("test");
}
workflowBulkService.pauseWorkflow(list);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(
messages.contains(
"Cannot process more than 1000 workflows. Please use multiple requests."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testResumeWorkflowNull() {
try {
workflowBulkService.resumeWorkflow(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRestartWorkflowNull() {
try {
workflowBulkService.restart(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRetryWorkflowNull() {
try {
workflowBulkService.retry(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test
public void testRetryWorkflowSuccessful() {
// When
workflowBulkService.retry(Collections.singletonList("anyId"));
// Then
verify(workflowExecutor).retry("anyId", false);
}
@Test(expected = ConstraintViolationException.class)
public void testTerminateNull() {
try {
workflowBulkService.terminate(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testDeleteWorkflowNull() {
try {
workflowBulkService.deleteWorkflow(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testTerminateRemoveNull() {
try {
workflowBulkService.terminateRemove(null, null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
throw ex;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/EventServiceTest.java | core/src/test/java/com/netflix/conductor/service/EventServiceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.core.events.EventQueues;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@EnableAutoConfiguration
public class EventServiceTest {
@TestConfiguration
static class TestEventConfiguration {
@Bean
public EventService eventService() {
MetadataService metadataService = mock(MetadataService.class);
EventQueues eventQueues = mock(EventQueues.class);
return new EventServiceImpl(metadataService, eventQueues);
}
}
@Autowired private EventService eventService;
@Test(expected = ConstraintViolationException.class)
public void testAddEventHandler() {
try {
eventService.addEventHandler(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("EventHandler cannot be null."));
throw ex;
}
fail("eventService.addEventHandler did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateEventHandler() {
try {
eventService.updateEventHandler(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("EventHandler cannot be null."));
throw ex;
}
fail("eventService.updateEventHandler did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testRemoveEventHandlerStatus() {
try {
eventService.removeEventHandlerStatus(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("EventHandler name cannot be null or empty."));
throw ex;
}
fail("eventService.removeEventHandlerStatus did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testGetEventHandlersForEvent() {
try {
eventService.getEventHandlersForEvent(null, false);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Event cannot be null or empty."));
throw ex;
}
fail("eventService.getEventHandlersForEvent did not throw ConstraintViolationException !");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java | core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.util.*;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.stubbing.Answer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDefSummary;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.model.BulkResponse;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.MetadataDAO;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@TestPropertySource(properties = "conductor.app.workflow.name-validation.enabled=true")
@EnableAutoConfiguration
public class MetadataServiceTest {
@TestConfiguration
static class TestMetadataConfiguration {
@Bean
public MetadataDAO metadataDAO() {
return mock(MetadataDAO.class);
}
@Bean
public ConductorProperties properties() {
ConductorProperties properties = mock(ConductorProperties.class);
when(properties.isOwnerEmailMandatory()).thenReturn(true);
return properties;
}
@Bean
public MetadataService metadataService(
MetadataDAO metadataDAO, ConductorProperties properties) {
EventHandlerDAO eventHandlerDAO = mock(EventHandlerDAO.class);
Map<String, TaskDef> taskDefinitions = new HashMap<>();
when(metadataDAO.getAllWorkflowDefs()).thenReturn(mockWorkflowDefs());
Answer<TaskDef> upsertTaskDef =
(invocation) -> {
TaskDef argument = invocation.getArgument(0, TaskDef.class);
taskDefinitions.put(argument.getName(), argument);
return argument;
};
when(metadataDAO.createTaskDef(any(TaskDef.class))).then(upsertTaskDef);
when(metadataDAO.updateTaskDef(any(TaskDef.class))).then(upsertTaskDef);
when(metadataDAO.getTaskDef(any()))
.then(
invocation ->
taskDefinitions.get(invocation.getArgument(0, String.class)));
return new MetadataServiceImpl(metadataDAO, eventHandlerDAO, properties);
}
private List<WorkflowDef> mockWorkflowDefs() {
// Returns list of workflowDefs in reverse version order.
List<WorkflowDef> retval = new ArrayList<>();
for (int i = 5; i > 0; i--) {
WorkflowDef def = new WorkflowDef();
def.setCreateTime(new Date().getTime());
def.setVersion(i);
def.setName("test_workflow_def");
retval.add(def);
}
return retval;
}
}
@Autowired private MetadataDAO metadataDAO;
@Autowired private MetadataService metadataService;
@Test(expected = ConstraintViolationException.class)
public void testRegisterTaskDefNoName() {
TaskDef taskDef = new TaskDef();
try {
metadataService.registerTaskDef(Collections.singletonList(taskDef));
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskDef name cannot be null or empty"));
assertTrue(messages.contains("ownerEmail cannot be empty"));
throw ex;
}
fail("metadataService.registerTaskDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testRegisterTaskDefNull() {
try {
metadataService.registerTaskDef(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskDefList cannot be empty or null"));
throw ex;
}
fail("metadataService.registerTaskDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testRegisterTaskDefNoResponseTimeout() {
try {
TaskDef taskDef = new TaskDef();
taskDef.setName("somename");
taskDef.setOwnerEmail("sample@test.com");
taskDef.setResponseTimeoutSeconds(0);
metadataService.registerTaskDef(Collections.singletonList(taskDef));
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(
messages.contains(
"TaskDef responseTimeoutSeconds: 0 should be minimum 1 second"));
throw ex;
}
fail("metadataService.registerTaskDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateTaskDefNameNull() {
try {
TaskDef taskDef = new TaskDef();
metadataService.updateTaskDef(taskDef);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskDef name cannot be null or empty"));
assertTrue(messages.contains("ownerEmail cannot be empty"));
throw ex;
}
fail("metadataService.updateTaskDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateTaskDefNull() {
try {
metadataService.updateTaskDef(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskDef cannot be null"));
throw ex;
}
fail("metadataService.updateTaskDef did not throw ConstraintViolationException !");
}
@Test(expected = NotFoundException.class)
public void testUpdateTaskDefNotExisting() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setOwnerEmail("sample@test.com");
metadataService.updateTaskDef(taskDef);
}
@Test(expected = NotFoundException.class)
public void testUpdateTaskDefDaoException() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setOwnerEmail("sample@test.com");
metadataService.updateTaskDef(taskDef);
}
@Test
public void testRegisterTaskDef() {
TaskDef taskDef = new TaskDef();
taskDef.setName("somename");
taskDef.setOwnerEmail("sample@test.com");
taskDef.setResponseTimeoutSeconds(60 * 60);
metadataService.registerTaskDef(Collections.singletonList(taskDef));
verify(metadataDAO, times(1)).createTaskDef(any(TaskDef.class));
}
@Test
public void testUpdateTask() {
String taskDefName = "another-task";
TaskDef taskDef = new TaskDef();
taskDef.setName(taskDefName);
taskDef.setOwnerEmail("sample@test.com");
taskDef.setRetryCount(1);
metadataService.registerTaskDef(Collections.singletonList(taskDef));
TaskDef before = metadataService.getTaskDef(taskDefName);
taskDef.setRetryCount(2);
taskDef.setCreatedBy("someone-else");
taskDef.setCreateTime(1000L);
metadataService.updateTaskDef(taskDef);
verify(metadataDAO, times(1)).updateTaskDef(any(TaskDef.class));
TaskDef after = metadataService.getTaskDef(taskDefName);
assertEquals(2, after.getRetryCount());
assertEquals(before.getCreateTime(), after.getCreateTime());
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefNull() {
try {
List<WorkflowDef> workflowDefList = null;
metadataService.updateWorkflowDef(workflowDefList);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDef list name cannot be null or empty"));
throw ex;
}
fail("metadataService.updateWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefEmptyList() {
try {
List<WorkflowDef> workflowDefList = new ArrayList<>();
metadataService.updateWorkflowDef(workflowDefList);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDefList is empty"));
throw ex;
}
fail("metadataService.updateWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefWithNullWorkflowDef() {
try {
List<WorkflowDef> workflowDefList = new ArrayList<>();
workflowDefList.add(null);
metadataService.updateWorkflowDef(workflowDefList);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDef cannot be null"));
throw ex;
}
fail("metadataService.updateWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefWithEmptyWorkflowDefName() {
try {
List<WorkflowDef> workflowDefList = new ArrayList<>();
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName(null);
workflowDef.setOwnerEmail(null);
workflowDefList.add(workflowDef);
metadataService.updateWorkflowDef(workflowDefList);
} catch (ConstraintViolationException ex) {
assertEquals(3, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(messages.contains("ownerEmail cannot be empty"));
throw ex;
}
fail("metadataService.updateWorkflowDef did not throw ConstraintViolationException !");
}
@Test
public void testUpdateWorkflowDef() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("somename");
workflowDef.setOwnerEmail("sample@test.com");
List<WorkflowTask> tasks = new ArrayList<>();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("hello");
workflowTask.setName("hello");
tasks.add(workflowTask);
workflowDef.setTasks(tasks);
metadataService.updateWorkflowDef(Collections.singletonList(workflowDef));
verify(metadataDAO, times(1)).updateWorkflowDef(workflowDef);
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefWithCaseExpression() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("somename");
workflowDef.setOwnerEmail("sample@test.com");
List<WorkflowTask> tasks = new ArrayList<>();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("hello");
workflowTask.setName("hello");
workflowTask.setType("DECISION");
WorkflowTask caseTask = new WorkflowTask();
caseTask.setTaskReferenceName("casetrue");
caseTask.setName("casetrue");
List<WorkflowTask> caseTaskList = new ArrayList<>();
caseTaskList.add(caseTask);
Map<String, List<WorkflowTask>> decisionCases = new HashMap();
decisionCases.put("true", caseTaskList);
workflowTask.setDecisionCases(decisionCases);
workflowTask.setCaseExpression("1 >0abcd");
tasks.add(workflowTask);
workflowDef.setTasks(tasks);
BulkResponse<String> bulkResponse =
metadataService.updateWorkflowDef(Collections.singletonList(workflowDef));
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateWorkflowDefWithJavscriptEvaluator() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("somename");
workflowDef.setOwnerEmail("sample@test.com");
List<WorkflowTask> tasks = new ArrayList<>();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("hello");
workflowTask.setName("hello");
workflowTask.setType("SWITCH");
workflowTask.setEvaluatorType("javascript");
workflowTask.setExpression("1>abcd");
WorkflowTask caseTask = new WorkflowTask();
caseTask.setTaskReferenceName("casetrue");
caseTask.setName("casetrue");
List<WorkflowTask> caseTaskList = new ArrayList<>();
caseTaskList.add(caseTask);
Map<String, List<WorkflowTask>> decisionCases = new HashMap();
decisionCases.put("true", caseTaskList);
workflowTask.setDecisionCases(decisionCases);
tasks.add(workflowTask);
workflowDef.setTasks(tasks);
BulkResponse<String> bulkResponse =
metadataService.updateWorkflowDef(Collections.singletonList(workflowDef));
}
@Test(expected = ConstraintViolationException.class)
public void testRegisterWorkflowDefNoName() {
try {
WorkflowDef workflowDef = new WorkflowDef();
metadataService.registerWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
assertEquals(3, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(messages.contains("ownerEmail cannot be empty"));
throw ex;
}
fail("metadataService.registerWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testValidateWorkflowDefNoName() {
try {
WorkflowDef workflowDef = new WorkflowDef();
metadataService.validateWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
assertEquals(3, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(messages.contains("ownerEmail cannot be empty"));
throw ex;
}
fail("metadataService.validateWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testRegisterWorkflowDefInvalidName() {
try {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("invalid:name");
workflowDef.setOwnerEmail("inavlid-email");
metadataService.registerWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(
messages.contains(
"Invalid name 'invalid:name'. Allowed characters are alphanumeric, underscores, spaces, hyphens, and special characters like <, >, {, }, #"));
throw ex;
}
fail("metadataService.registerWorkflowDef did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testValidateWorkflowDefInvalidName() {
try {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("invalid:name");
workflowDef.setOwnerEmail("inavlid-email");
metadataService.validateWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(
messages.contains(
"Invalid name 'invalid:name'. Allowed characters are alphanumeric, underscores, spaces, hyphens, and special characters like <, >, {, }, #"));
throw ex;
}
fail("metadataService.validateWorkflowDef did not throw ConstraintViolationException !");
}
@Test
public void testRegisterWorkflowDef() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("somename");
workflowDef.setSchemaVersion(2);
workflowDef.setOwnerEmail("sample@test.com");
List<WorkflowTask> tasks = new ArrayList<>();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("hello");
workflowTask.setName("hello");
tasks.add(workflowTask);
workflowDef.setTasks(tasks);
metadataService.registerWorkflowDef(workflowDef);
verify(metadataDAO, times(1)).createWorkflowDef(workflowDef);
assertEquals(2, workflowDef.getSchemaVersion());
}
@Test
public void testValidateWorkflowDef() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("somename");
workflowDef.setSchemaVersion(2);
workflowDef.setOwnerEmail("sample@test.com");
List<WorkflowTask> tasks = new ArrayList<>();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("hello");
workflowTask.setName("hello");
tasks.add(workflowTask);
workflowDef.setTasks(tasks);
metadataService.validateWorkflowDef(workflowDef);
verify(metadataDAO, times(1)).createWorkflowDef(workflowDef);
assertEquals(2, workflowDef.getSchemaVersion());
}
@Test(expected = ConstraintViolationException.class)
public void testUnregisterWorkflowDefNoName() {
try {
metadataService.unregisterWorkflowDef("", null);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Workflow name cannot be null or empty"));
assertTrue(messages.contains("Version cannot be null"));
throw ex;
}
fail("metadataService.unregisterWorkflowDef did not throw ConstraintViolationException !");
}
@Test
public void testUnregisterWorkflowDef() {
metadataService.unregisterWorkflowDef("somename", 111);
verify(metadataDAO, times(1)).removeWorkflowDef("somename", 111);
}
@Test(expected = ConstraintViolationException.class)
public void testValidateEventNull() {
try {
metadataService.addEventHandler(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("EventHandler cannot be null"));
throw ex;
}
fail("metadataService.addEventHandler did not throw ConstraintViolationException !");
}
@Test(expected = ConstraintViolationException.class)
public void testValidateEventNoEvent() {
try {
EventHandler eventHandler = new EventHandler();
metadataService.addEventHandler(eventHandler);
} catch (ConstraintViolationException ex) {
assertEquals(3, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Missing event handler name"));
assertTrue(messages.contains("Missing event location"));
assertTrue(
messages.contains("No actions specified. Please specify at-least one action"));
throw ex;
}
fail("metadataService.addEventHandler did not throw ConstraintViolationException !");
}
@Test
public void testWorkflowNamesAndVersions() {
Map<String, ? extends Iterable<WorkflowDefSummary>> namesAndVersions =
metadataService.getWorkflowNamesAndVersions();
Iterator<WorkflowDefSummary> versions =
namesAndVersions.get("test_workflow_def").iterator();
for (int i = 1; i <= 5; i++) {
WorkflowDefSummary ver = versions.next();
assertEquals(i, ver.getVersion());
assertNotNull(ver.getCreateTime());
assertEquals("test_workflow_def", ver.getName());
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java | core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.listener.TaskStatusListener;
import com.netflix.conductor.dao.QueueDAO;
import static junit.framework.TestCase.assertEquals;
import static org.mockito.Mockito.when;
@RunWith(SpringRunner.class)
public class ExecutionServiceTest {
@Mock private WorkflowExecutor workflowExecutor;
@Mock private ExecutionDAOFacade executionDAOFacade;
@Mock private QueueDAO queueDAO;
@Mock private ConductorProperties conductorProperties;
@Mock private ExternalPayloadStorage externalPayloadStorage;
@Mock private SystemTaskRegistry systemTaskRegistry;
@Mock private TaskStatusListener taskStatusListener;
private ExecutionService executionService;
private Workflow workflow1;
private Workflow workflow2;
private Task taskWorkflow1;
private Task taskWorkflow2;
private final List<String> sort = Collections.singletonList("Sort");
@Before
public void setup() {
when(conductorProperties.getTaskExecutionPostponeDuration())
.thenReturn(Duration.ofSeconds(60));
executionService =
new ExecutionService(
workflowExecutor,
executionDAOFacade,
queueDAO,
conductorProperties,
externalPayloadStorage,
systemTaskRegistry,
taskStatusListener);
WorkflowDef workflowDef = new WorkflowDef();
workflow1 = new Workflow();
workflow1.setWorkflowId("wf1");
workflow1.setWorkflowDefinition(workflowDef);
workflow2 = new Workflow();
workflow2.setWorkflowId("wf2");
workflow2.setWorkflowDefinition(workflowDef);
taskWorkflow1 = new Task();
taskWorkflow1.setTaskId("task1");
taskWorkflow1.setWorkflowInstanceId("wf1");
taskWorkflow2 = new Task();
taskWorkflow2.setTaskId("task2");
taskWorkflow2.setWorkflowInstanceId("wf2");
}
@Test
public void workflowSearchTest() {
when(executionDAOFacade.searchWorkflowSummary("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
new WorkflowSummary(workflow1),
new WorkflowSummary(workflow2))));
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getWorkflow(workflow2.getWorkflowId(), false))
.thenReturn(workflow2);
SearchResult<WorkflowSummary> searchResult =
executionService.search("query", "*", 0, 2, sort);
assertEquals(2, searchResult.getTotalHits());
assertEquals(2, searchResult.getResults().size());
assertEquals(workflow1.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
assertEquals(workflow2.getWorkflowId(), searchResult.getResults().get(1).getWorkflowId());
}
@Test
public void workflowSearchV2Test() {
when(executionDAOFacade.searchWorkflows("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
workflow1.getWorkflowId(), workflow2.getWorkflowId())));
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getWorkflow(workflow2.getWorkflowId(), false))
.thenReturn(workflow2);
SearchResult<Workflow> searchResult = executionService.searchV2("query", "*", 0, 2, sort);
assertEquals(2, searchResult.getTotalHits());
assertEquals(Arrays.asList(workflow1, workflow2), searchResult.getResults());
}
@Test
public void workflowSearchV2ExceptionTest() {
when(executionDAOFacade.searchWorkflows("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
workflow1.getWorkflowId(), workflow2.getWorkflowId())));
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getWorkflow(workflow2.getWorkflowId(), false))
.thenThrow(new RuntimeException());
SearchResult<Workflow> searchResult = executionService.searchV2("query", "*", 0, 2, sort);
assertEquals(1, searchResult.getTotalHits());
assertEquals(Collections.singletonList(workflow1), searchResult.getResults());
}
@Test
public void workflowSearchByTasksTest() {
when(executionDAOFacade.searchTaskSummary("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
new TaskSummary(taskWorkflow1),
new TaskSummary(taskWorkflow2))));
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getWorkflow(workflow2.getWorkflowId(), false))
.thenReturn(workflow2);
SearchResult<WorkflowSummary> searchResult =
executionService.searchWorkflowByTasks("query", "*", 0, 2, sort);
assertEquals(2, searchResult.getTotalHits());
assertEquals(2, searchResult.getResults().size());
assertEquals(workflow1.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
assertEquals(workflow2.getWorkflowId(), searchResult.getResults().get(1).getWorkflowId());
}
@Test
public void workflowSearchByTasksExceptionTest() {
when(executionDAOFacade.searchTaskSummary("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
new TaskSummary(taskWorkflow1),
new TaskSummary(taskWorkflow2))));
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getTask(workflow2.getWorkflowId()))
.thenThrow(new RuntimeException());
SearchResult<WorkflowSummary> searchResult =
executionService.searchWorkflowByTasks("query", "*", 0, 2, sort);
assertEquals(1, searchResult.getTotalHits());
assertEquals(1, searchResult.getResults().size());
assertEquals(workflow1.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
}
@Test
public void workflowSearchByTasksV2Test() {
when(executionDAOFacade.searchTasks("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
taskWorkflow1.getTaskId(), taskWorkflow2.getTaskId())));
when(executionDAOFacade.getTask(taskWorkflow1.getTaskId())).thenReturn(taskWorkflow1);
when(executionDAOFacade.getTask(taskWorkflow2.getTaskId())).thenReturn(taskWorkflow2);
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
when(executionDAOFacade.getWorkflow(workflow2.getWorkflowId(), false))
.thenReturn(workflow2);
SearchResult<Workflow> searchResult =
executionService.searchWorkflowByTasksV2("query", "*", 0, 2, sort);
assertEquals(2, searchResult.getTotalHits());
assertEquals(Arrays.asList(workflow1, workflow2), searchResult.getResults());
}
@Test
public void workflowSearchByTasksV2ExceptionTest() {
when(executionDAOFacade.searchTasks("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
taskWorkflow1.getTaskId(), taskWorkflow2.getTaskId())));
when(executionDAOFacade.getTask(taskWorkflow1.getTaskId())).thenReturn(taskWorkflow1);
when(executionDAOFacade.getTask(taskWorkflow2.getTaskId()))
.thenThrow(new RuntimeException());
when(executionDAOFacade.getWorkflow(workflow1.getWorkflowId(), false))
.thenReturn(workflow1);
SearchResult<Workflow> searchResult =
executionService.searchWorkflowByTasksV2("query", "*", 0, 2, sort);
assertEquals(1, searchResult.getTotalHits());
assertEquals(Collections.singletonList(workflow1), searchResult.getResults());
}
@Test
public void TaskSearchTest() {
List<TaskSummary> taskList =
Arrays.asList(new TaskSummary(taskWorkflow1), new TaskSummary(taskWorkflow2));
when(executionDAOFacade.searchTaskSummary("query", "*", 0, 2, sort))
.thenReturn(new SearchResult<>(2, taskList));
SearchResult<TaskSummary> searchResult =
executionService.getSearchTasks("query", "*", 0, 2, "Sort");
assertEquals(2, searchResult.getTotalHits());
assertEquals(2, searchResult.getResults().size());
assertEquals(taskWorkflow1.getTaskId(), searchResult.getResults().get(0).getTaskId());
assertEquals(taskWorkflow2.getTaskId(), searchResult.getResults().get(1).getTaskId());
}
@Test
public void TaskSearchV2Test() {
when(executionDAOFacade.searchTasks("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
taskWorkflow1.getTaskId(), taskWorkflow2.getTaskId())));
when(executionDAOFacade.getTask(taskWorkflow1.getTaskId())).thenReturn(taskWorkflow1);
when(executionDAOFacade.getTask(taskWorkflow2.getTaskId())).thenReturn(taskWorkflow2);
SearchResult<Task> searchResult =
executionService.getSearchTasksV2("query", "*", 0, 2, "Sort");
assertEquals(2, searchResult.getTotalHits());
assertEquals(Arrays.asList(taskWorkflow1, taskWorkflow2), searchResult.getResults());
}
@Test
public void TaskSearchV2ExceptionTest() {
when(executionDAOFacade.searchTasks("query", "*", 0, 2, sort))
.thenReturn(
new SearchResult<>(
2,
Arrays.asList(
taskWorkflow1.getTaskId(), taskWorkflow2.getTaskId())));
when(executionDAOFacade.getTask(taskWorkflow1.getTaskId())).thenReturn(taskWorkflow1);
when(executionDAOFacade.getTask(taskWorkflow2.getTaskId()))
.thenThrow(new RuntimeException());
SearchResult<Task> searchResult =
executionService.getSearchTasksV2("query", "*", 0, 2, "Sort");
assertEquals(1, searchResult.getTotalHits());
assertEquals(Collections.singletonList(taskWorkflow1), searchResult.getResults());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/service/TaskServiceTest.java | core/src/test/java/com/netflix/conductor/service/TaskServiceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.service;
import java.util.List;
import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.dao.QueueDAO;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@EnableAutoConfiguration
public class TaskServiceTest {
@TestConfiguration
static class TestTaskConfiguration {
@Bean
public ExecutionService executionService() {
return mock(ExecutionService.class);
}
@Bean
public TaskService taskService(ExecutionService executionService) {
QueueDAO queueDAO = mock(QueueDAO.class);
return new TaskServiceImpl(executionService, queueDAO);
}
}
@Autowired private TaskService taskService;
@Autowired private ExecutionService executionService;
@Test(expected = ConstraintViolationException.class)
public void testPoll() {
try {
taskService.poll(null, null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testBatchPoll() {
try {
taskService.batchPoll(null, null, null, null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetTasks() {
try {
taskService.getTasks(null, null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetPendingTaskForWorkflow() {
try {
taskService.getPendingTaskForWorkflow(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowId cannot be null or empty."));
assertTrue(messages.contains("TaskReferenceName cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateTask() {
try {
taskService.updateTask(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskResult cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testUpdateTaskInValid() {
try {
TaskResult taskResult = new TaskResult();
taskService.updateTask(taskResult);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("Workflow Id cannot be null or empty"));
assertTrue(messages.contains("Task ID cannot be null or empty"));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testAckTaskReceived() {
try {
taskService.ackTaskReceived(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskId cannot be null or empty."));
throw ex;
}
}
@Test
public void testAckTaskReceivedMissingWorkerId() {
String ack = taskService.ackTaskReceived("abc", null);
assertNotNull(ack);
}
@Test(expected = ConstraintViolationException.class)
public void testLog() {
try {
taskService.log(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetTaskLogs() {
try {
taskService.getTaskLogs(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetTask() {
try {
taskService.getTask(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskId cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRemoveTaskFromQueue() {
try {
taskService.removeTaskFromQueue(null, null);
} catch (ConstraintViolationException ex) {
assertEquals(2, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskId cannot be null or empty."));
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testGetPollData() {
try {
taskService.getPollData(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test(expected = ConstraintViolationException.class)
public void testRequeuePendingTask() {
try {
taskService.requeuePendingTask(null);
} catch (ConstraintViolationException ex) {
assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("TaskType cannot be null or empty."));
throw ex;
}
}
@Test
public void testSearch() {
SearchResult<TaskSummary> searchResult =
new SearchResult<>(2, List.of(mock(TaskSummary.class), mock(TaskSummary.class)));
when(executionService.getSearchTasks("query", "*", 0, 2, "Sort")).thenReturn(searchResult);
assertEquals(searchResult, taskService.search(0, 2, "Sort", "*", "query"));
}
@Test
public void testSearchV2() {
SearchResult<Task> searchResult =
new SearchResult<>(2, List.of(mock(Task.class), mock(Task.class)));
when(executionService.getSearchTasksV2("query", "*", 0, 2, "Sort"))
.thenReturn(searchResult);
assertEquals(searchResult, taskService.searchV2(0, 2, "Sort", "*", "query"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/metrics/WorkflowMonitorTest.java | core/src/test/java/com/netflix/conductor/metrics/WorkflowMonitorTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.metrics;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.service.MetadataService;
@RunWith(SpringRunner.class)
public class WorkflowMonitorTest {
@Mock private MetadataService metadataService;
@Mock private QueueDAO queueDAO;
@Mock private ExecutionDAOFacade executionDAOFacade;
private WorkflowMonitor workflowMonitor;
@Before
public void beforeEach() {
workflowMonitor =
new WorkflowMonitor(metadataService, queueDAO, executionDAOFacade, 1000, Set.of());
}
private WorkflowDef makeDef(String name, int version, String ownerApp) {
WorkflowDef wd = new WorkflowDef();
wd.setName(name);
wd.setVersion(version);
wd.setOwnerApp(ownerApp);
return wd;
}
@Test
public void testPendingWorkflowDataMap() {
WorkflowDef test1_1 = makeDef("test1", 1, null);
WorkflowDef test1_2 = makeDef("test1", 2, "name1");
WorkflowDef test2_1 = makeDef("test2", 1, "first");
WorkflowDef test2_2 = makeDef("test2", 2, "mid");
WorkflowDef test2_3 = makeDef("test2", 3, "last");
final Map<String, String> mapping =
workflowMonitor.getPendingWorkflowToOwnerAppMap(
List.of(test1_1, test1_2, test2_1, test2_2, test2_3));
Assert.assertEquals(2, mapping.keySet().size());
Assert.assertTrue(mapping.containsKey("test1"));
Assert.assertTrue(mapping.containsKey("test2"));
Assert.assertEquals("name1", mapping.get("test1"));
Assert.assertEquals("last", mapping.get("test2"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/validations/WorkflowDefConstraintTest.java | core/src/test/java/com/netflix/conductor/validations/WorkflowDefConstraintTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.validations;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.dao.MetadataDAO;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.Validation;
import jakarta.validation.Validator;
import jakarta.validation.ValidatorFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.when;
public class WorkflowDefConstraintTest {
private static Validator validator;
private static ValidatorFactory validatorFactory;
private MetadataDAO mockMetadataDao;
@BeforeClass
public static void init() {
validatorFactory = Validation.buildDefaultValidatorFactory();
validator = validatorFactory.getValidator();
}
@AfterClass
public static void close() {
validatorFactory.close();
}
@Before
public void setUp() {
mockMetadataDao = Mockito.mock(MetadataDAO.class);
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
ValidationContext.initialize(mockMetadataDao);
}
@Test
public void testWorkflowTaskName() {
TaskDef taskDef = new TaskDef(); // name is null
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
Validator validator = factory.getValidator();
Set<ConstraintViolation<Object>> result = validator.validate(taskDef);
assertEquals(2, result.size());
}
@Test
public void testWorkflowTaskSimple() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("sampleWorkflow");
workflowDef.setDescription("Sample workflow def");
workflowDef.setOwnerEmail("sample@test.com");
workflowDef.setVersion(2);
WorkflowTask workflowTask_1 = new WorkflowTask();
workflowTask_1.setName("task_1");
workflowTask_1.setTaskReferenceName("task_1");
workflowTask_1.setType(TaskType.TASK_TYPE_SIMPLE);
Map<String, Object> inputParam = new HashMap<>();
inputParam.put("fileLocation", "${workflow.input.fileLocation}");
workflowTask_1.setInputParameters(inputParam);
List<WorkflowTask> tasks = new ArrayList<>();
tasks.add(workflowTask_1);
workflowDef.setTasks(tasks);
Set<ConstraintViolation<WorkflowDef>> result = validator.validate(workflowDef);
assertEquals(0, result.size());
}
@Test
/*Testcase to check inputParam is not valid
*/
public void testWorkflowTaskInvalidInputParam() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("sampleWorkflow");
workflowDef.setDescription("Sample workflow def");
workflowDef.setOwnerEmail("sample@test.com");
workflowDef.setVersion(2);
WorkflowTask workflowTask_1 = new WorkflowTask();
workflowTask_1.setName("task_1");
workflowTask_1.setTaskReferenceName("task_1");
workflowTask_1.setType(TaskType.TASK_TYPE_SIMPLE);
Map<String, Object> inputParam = new HashMap<>();
inputParam.put("fileLocation", "${work.input.fileLocation}");
workflowTask_1.setInputParameters(inputParam);
List<WorkflowTask> tasks = new ArrayList<>();
tasks.add(workflowTask_1);
workflowDef.setTasks(tasks);
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
validator = factory.getValidator();
when(mockMetadataDao.getTaskDef("work1")).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowDef>> result = validator.validate(workflowDef);
assertEquals(1, result.size());
assertEquals(
result.iterator().next().getMessage(),
"taskReferenceName: work for given task: task_1 input value: fileLocation of input parameter: ${work.input.fileLocation} is not defined in workflow definition.");
}
@Test
public void testWorkflowTaskReferenceNameNotUnique() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("sampleWorkflow");
workflowDef.setDescription("Sample workflow def");
workflowDef.setOwnerEmail("sample@test.com");
workflowDef.setVersion(2);
WorkflowTask workflowTask_1 = new WorkflowTask();
workflowTask_1.setName("task_1");
workflowTask_1.setTaskReferenceName("task_1");
workflowTask_1.setType(TaskType.TASK_TYPE_SIMPLE);
Map<String, Object> inputParam = new HashMap<>();
inputParam.put("fileLocation", "${task_2.input.fileLocation}");
workflowTask_1.setInputParameters(inputParam);
WorkflowTask workflowTask_2 = new WorkflowTask();
workflowTask_2.setName("task_2");
workflowTask_2.setTaskReferenceName("task_1");
workflowTask_2.setType(TaskType.TASK_TYPE_SIMPLE);
workflowTask_2.setInputParameters(inputParam);
List<WorkflowTask> tasks = new ArrayList<>();
tasks.add(workflowTask_1);
tasks.add(workflowTask_2);
workflowDef.setTasks(tasks);
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
validator = factory.getValidator();
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowDef>> result = validator.validate(workflowDef);
assertEquals(3, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"taskReferenceName: task_2 for given task: task_2 input value: fileLocation of input parameter: ${task_2.input.fileLocation} is not defined in workflow definition."));
assertTrue(
validationErrors.contains(
"taskReferenceName: task_2 for given task: task_1 input value: fileLocation of input parameter: ${task_2.input.fileLocation} is not defined in workflow definition."));
assertTrue(
validationErrors.contains(
"taskReferenceName: task_1 should be unique across tasks for a given workflowDefinition: sampleWorkflow"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java | core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.validations;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.tasks.Terminate;
import com.netflix.conductor.dao.MetadataDAO;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.Validation;
import jakarta.validation.Validator;
import jakarta.validation.ValidatorFactory;
import jakarta.validation.executable.ExecutableValidator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.when;
public class WorkflowTaskTypeConstraintTest {
private static Validator validator;
private static ValidatorFactory validatorFactory;
private MetadataDAO mockMetadataDao;
@BeforeClass
public static void init() {
validatorFactory = Validation.buildDefaultValidatorFactory();
validator = validatorFactory.getValidator();
}
@AfterClass
public static void close() {
validatorFactory.close();
}
@Before
public void setUp() {
mockMetadataDao = Mockito.mock(MetadataDAO.class);
ValidationContext.initialize(mockMetadataDao);
}
@Test
public void testWorkflowTaskMissingReferenceName() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setDynamicForkTasksParam("taskList");
workflowTask.setDynamicForkTasksInputParamName("ForkTaskInputParam");
workflowTask.setTaskReferenceName(null);
Set<ConstraintViolation<Object>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
assertEquals(
result.iterator().next().getMessage(),
"WorkflowTask taskReferenceName name cannot be empty or null");
}
@Test
public void testWorkflowTaskTestSetType() throws NoSuchMethodException {
WorkflowTask workflowTask = createSampleWorkflowTask();
Method method = WorkflowTask.class.getMethod("setType", String.class);
Object[] parameterValues = {""};
ExecutableValidator executableValidator = validator.forExecutables();
Set<ConstraintViolation<Object>> result =
executableValidator.validateParameters(workflowTask, method, parameterValues);
assertEquals(1, result.size());
assertEquals(
result.iterator().next().getMessage(), "WorkTask type cannot be null or empty");
}
@Test
public void testWorkflowTaskTypeEvent() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("EVENT");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
assertEquals(
result.iterator().next().getMessage(),
"sink field is required for taskType: EVENT taskName: encode");
}
@Test
public void testWorkflowTaskTypeDynamic() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("DYNAMIC");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
assertEquals(
result.iterator().next().getMessage(),
"dynamicTaskNameParam field is required for taskType: DYNAMIC taskName: encode");
}
@Test
public void testWorkflowTaskTypeDecision() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("DECISION");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(2, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"decisionCases should have atleast one task for taskType: DECISION taskName: encode"));
assertTrue(
validationErrors.contains(
"caseValueParam or caseExpression field is required for taskType: DECISION taskName: encode"));
}
@Test
public void testWorkflowTaskTypeDoWhile() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("DO_WHILE");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(2, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"loopCondition field is required for taskType: DO_WHILE taskName: encode"));
assertTrue(
validationErrors.contains(
"loopOver field is required for taskType: DO_WHILE taskName: encode"));
}
@Test
public void testWorkflowTaskTypeWait() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("WAIT");
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
workflowTask.setInputParameters(Map.of("duration", "10s", "until", "2022-04-16"));
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"Both 'duration' and 'until' specified. Please provide only one input"));
}
@Test
public void testWorkflowTaskTypeDecisionWithCaseParam() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("DECISION");
workflowTask.setCaseExpression("$.valueCheck == null ? 'true': 'false'");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"decisionCases should have atleast one task for taskType: DECISION taskName: encode"));
}
@Test
public void testWorkflowTaskTypeForJoinDynamic() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN_DYNAMIC");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(2, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"dynamicForkTasksInputParamName field is required for taskType: FORK_JOIN_DYNAMIC taskName: encode"));
assertTrue(
validationErrors.contains(
"dynamicForkTasksParam field is required for taskType: FORK_JOIN_DYNAMIC taskName: encode"));
}
@Test
public void testWorkflowTaskTypeForJoinDynamicLegacy() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN_DYNAMIC");
workflowTask.setDynamicForkJoinTasksParam("taskList");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeForJoinDynamicWithForJoinTaskParam() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN_DYNAMIC");
workflowTask.setDynamicForkJoinTasksParam("taskList");
workflowTask.setDynamicForkTasksInputParamName("ForkTaskInputParam");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"dynamicForkJoinTasksParam or combination of dynamicForkTasksInputParamName and dynamicForkTasksParam cam be used for taskType: FORK_JOIN_DYNAMIC taskName: encode"));
}
@Test
public void testWorkflowTaskTypeForJoinDynamicValid() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN_DYNAMIC");
workflowTask.setDynamicForkTasksParam("ForkTasksParam");
workflowTask.setDynamicForkTasksInputParamName("ForkTaskInputParam");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeForJoinDynamicWithForJoinTaskParamAndInputTaskParam() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN_DYNAMIC");
workflowTask.setDynamicForkJoinTasksParam("taskList");
workflowTask.setDynamicForkTasksInputParamName("ForkTaskInputParam");
workflowTask.setDynamicForkTasksParam("ForkTasksParam");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"dynamicForkJoinTasksParam or combination of dynamicForkTasksInputParamName and dynamicForkTasksParam cam be used for taskType: FORK_JOIN_DYNAMIC taskName: encode"));
}
@Test
public void testWorkflowTaskTypeHTTP() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("HTTP");
workflowTask.getInputParameters().put("http_request", "http://www.netflix.com");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeHTTPWithHttpParamMissing() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("HTTP");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"inputParameters.http_request field is required for taskType: HTTP taskName: encode"));
}
@Test
public void testWorkflowTaskTypeHTTPWithHttpParamInTaskDef() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("HTTP");
TaskDef taskDef = new TaskDef();
taskDef.setName("encode");
taskDef.getInputTemplate().put("http_request", "http://www.netflix.com");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(taskDef);
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeHTTPWithHttpParamInTaskDefAndWorkflowTask() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("HTTP");
workflowTask.getInputParameters().put("http_request", "http://www.netflix.com");
TaskDef taskDef = new TaskDef();
taskDef.setName("encode");
taskDef.getInputTemplate().put("http_request", "http://www.netflix.com");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(taskDef);
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeFork() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("FORK_JOIN");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"forkTasks should have atleast one task for taskType: FORK_JOIN taskName: encode"));
}
@Test
public void testWorkflowTaskTypeSubworkflowMissingSubworkflowParam() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("SUB_WORKFLOW");
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"subWorkflowParam field is required for taskType: SUB_WORKFLOW taskName: encode"));
}
@Test
public void testWorkflowTaskTypeTerminateWithoutTerminationStatus() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType(TaskType.TASK_TYPE_TERMINATE);
workflowTask.setName("terminate_task");
workflowTask.setInputParameters(
Collections.singletonMap(
Terminate.getTerminationWorkflowOutputParameter(), "blah"));
List<String> validationErrors = getErrorMessages(workflowTask);
Assert.assertEquals(1, validationErrors.size());
Assert.assertEquals(
"terminate task must have an terminationStatus parameter and must be set to COMPLETED or FAILED, taskName: terminate_task",
validationErrors.get(0));
}
@Test
public void testWorkflowTaskTypeTerminateWithInvalidStatus() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType(TaskType.TASK_TYPE_TERMINATE);
workflowTask.setName("terminate_task");
workflowTask.setInputParameters(
Collections.singletonMap(Terminate.getTerminationStatusParameter(), "blah"));
List<String> validationErrors = getErrorMessages(workflowTask);
Assert.assertEquals(1, validationErrors.size());
Assert.assertEquals(
"terminate task must have an terminationStatus parameter and must be set to COMPLETED or FAILED, taskName: terminate_task",
validationErrors.get(0));
}
@Test
public void testWorkflowTaskTypeTerminateOptional() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType(TaskType.TASK_TYPE_TERMINATE);
workflowTask.setName("terminate_task");
workflowTask.setInputParameters(
Collections.singletonMap(Terminate.getTerminationStatusParameter(), "COMPLETED"));
workflowTask.setOptional(true);
List<String> validationErrors = getErrorMessages(workflowTask);
Assert.assertEquals(1, validationErrors.size());
Assert.assertEquals(
"terminate task cannot be optional, taskName: terminate_task",
validationErrors.get(0));
}
@Test
public void testWorkflowTaskTypeTerminateValid() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType(TaskType.TASK_TYPE_TERMINATE);
workflowTask.setName("terminate_task");
workflowTask.setInputParameters(
Collections.singletonMap(Terminate.getTerminationStatusParameter(), "COMPLETED"));
List<String> validationErrors = getErrorMessages(workflowTask);
Assert.assertEquals(0, validationErrors.size());
}
@Test
public void testWorkflowTaskTypeKafkaPublish() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("KAFKA_PUBLISH");
workflowTask.getInputParameters().put("kafka_request", "testInput");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeKafkaPublishWithRequestParamMissing() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("KAFKA_PUBLISH");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"inputParameters.kafka_request field is required for taskType: KAFKA_PUBLISH taskName: encode"));
}
@Test
public void testWorkflowTaskTypeKafkaPublishWithKafkaParamInTaskDef() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("KAFKA_PUBLISH");
TaskDef taskDef = new TaskDef();
taskDef.setName("encode");
taskDef.getInputTemplate().put("kafka_request", "test_kafka_request");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(taskDef);
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeKafkaPublishWithRequestParamInTaskDefAndWorkflowTask() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("KAFKA_PUBLISH");
workflowTask.getInputParameters().put("kafka_request", "http://www.netflix.com");
TaskDef taskDef = new TaskDef();
taskDef.setName("encode");
taskDef.getInputTemplate().put("kafka_request", "test Kafka Request");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(taskDef);
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeJSONJQTransform() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("JSON_JQ_TRANSFORM");
workflowTask.getInputParameters().put("queryExpression", ".");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
@Test
public void testWorkflowTaskTypeJSONJQTransformWithQueryParamMissing() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("JSON_JQ_TRANSFORM");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(new TaskDef());
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(1, result.size());
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
assertTrue(
validationErrors.contains(
"inputParameters.queryExpression field is required for taskType: JSON_JQ_TRANSFORM taskName: encode"));
}
@Test
public void testWorkflowTaskTypeJSONJQTransformWithQueryParamInTaskDef() {
WorkflowTask workflowTask = createSampleWorkflowTask();
workflowTask.setType("JSON_JQ_TRANSFORM");
TaskDef taskDef = new TaskDef();
taskDef.setName("encode");
taskDef.getInputTemplate().put("queryExpression", ".");
when(mockMetadataDao.getTaskDef(anyString())).thenReturn(taskDef);
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
assertEquals(0, result.size());
}
private List<String> getErrorMessages(WorkflowTask workflowTask) {
Set<ConstraintViolation<WorkflowTask>> result = validator.validate(workflowTask);
List<String> validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
return validationErrors;
}
private WorkflowTask createSampleWorkflowTask() {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("encode");
workflowTask.setTaskReferenceName("encode");
workflowTask.setType("FORK_JOIN_DYNAMIC");
Map<String, Object> inputParam = new HashMap<>();
inputParam.put("fileLocation", "${workflow.input.fileLocation}");
workflowTask.setInputParameters(inputParam);
return workflowTask;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/storage/DummyPayloadStorageTest.java | core/src/test/java/com/netflix/conductor/core/storage/DummyPayloadStorageTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.storage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class DummyPayloadStorageTest {
private DummyPayloadStorage dummyPayloadStorage;
private static final String TEST_STORAGE_PATH = "test-storage";
private ExternalStorageLocation location;
private ObjectMapper objectMapper;
public static final String MOCK_PAYLOAD = "{\n" + "\"output\": \"TEST_OUTPUT\",\n" + "}\n";
@Before
public void setup() {
dummyPayloadStorage = new DummyPayloadStorage();
objectMapper = new ObjectMapper();
location =
dummyPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.WRITE,
PayloadType.TASK_OUTPUT,
TEST_STORAGE_PATH);
try {
byte[] payloadBytes = MOCK_PAYLOAD.getBytes("UTF-8");
dummyPayloadStorage.upload(
location.getPath(),
new ByteArrayInputStream(payloadBytes),
payloadBytes.length);
} catch (UnsupportedEncodingException unsupportedEncodingException) {
}
}
@Test
public void testGetLocationNotNull() {
assertNotNull(location);
}
@Test
public void testDownloadForValidPath() {
try (InputStream inputStream = dummyPayloadStorage.download(location.getPath())) {
Map<String, Object> payload =
objectMapper.readValue(
IOUtils.toString(inputStream, StandardCharsets.UTF_8), Map.class);
assertTrue(payload.containsKey("output"));
assertEquals(payload.get("output"), "TEST_OUTPUT");
} catch (Exception e) {
assertTrue(e instanceof IOException);
}
}
@Test
public void testDownloadForInvalidPath() {
InputStream inputStream = dummyPayloadStorage.download("testPath");
assertNull(inputStream);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/dal/ExecutionDAOFacadeTest.java | core/src/test/java/com/netflix/conductor/core/dal/ExecutionDAOFacadeTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.dal;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import org.apache.commons.io.IOUtils;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.TestDeciderService;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.dao.*;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class ExecutionDAOFacadeTest {
private ExecutionDAO executionDAO;
private IndexDAO indexDAO;
private ExecutionDAOFacade executionDAOFacade;
private ExternalPayloadStorageUtils externalPayloadStorageUtils;
@Autowired private ObjectMapper objectMapper;
@Before
public void setUp() {
executionDAO = mock(ExecutionDAO.class);
QueueDAO queueDAO = mock(QueueDAO.class);
indexDAO = mock(IndexDAO.class);
externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class);
RateLimitingDAO rateLimitingDao = mock(RateLimitingDAO.class);
ConcurrentExecutionLimitDAO concurrentExecutionLimitDAO =
mock(ConcurrentExecutionLimitDAO.class);
PollDataDAO pollDataDAO = mock(PollDataDAO.class);
ConductorProperties properties = mock(ConductorProperties.class);
when(properties.isEventExecutionIndexingEnabled()).thenReturn(true);
when(properties.isAsyncIndexingEnabled()).thenReturn(true);
executionDAOFacade =
new ExecutionDAOFacade(
executionDAO,
queueDAO,
indexDAO,
rateLimitingDao,
concurrentExecutionLimitDAO,
pollDataDAO,
objectMapper,
properties,
externalPayloadStorageUtils);
}
@Test
public void testGetWorkflow() throws Exception {
when(executionDAO.getWorkflow(any(), anyBoolean())).thenReturn(new WorkflowModel());
Workflow workflow = executionDAOFacade.getWorkflow("workflowId", true);
assertNotNull(workflow);
verify(indexDAO, never()).get(any(), any());
}
@Test
public void testGetWorkflowModel() throws Exception {
when(executionDAO.getWorkflow(any(), anyBoolean())).thenReturn(new WorkflowModel());
WorkflowModel workflowModel = executionDAOFacade.getWorkflowModel("workflowId", true);
assertNotNull(workflowModel);
verify(indexDAO, never()).get(any(), any());
when(executionDAO.getWorkflow(any(), anyBoolean())).thenReturn(null);
InputStream stream = ExecutionDAOFacadeTest.class.getResourceAsStream("/test.json");
byte[] bytes = IOUtils.toByteArray(stream);
String jsonString = new String(bytes);
when(indexDAO.get(any(), any())).thenReturn(jsonString);
workflowModel = executionDAOFacade.getWorkflowModel("wokflowId", true);
assertNotNull(workflowModel);
verify(indexDAO, times(1)).get(any(), any());
}
@Test
public void testGetWorkflowsByCorrelationId() {
when(executionDAO.canSearchAcrossWorkflows()).thenReturn(true);
when(executionDAO.getWorkflowsByCorrelationId(any(), any(), anyBoolean()))
.thenReturn(Collections.singletonList(new WorkflowModel()));
List<Workflow> workflows =
executionDAOFacade.getWorkflowsByCorrelationId(
"workflowName", "correlationId", true);
assertNotNull(workflows);
assertEquals(1, workflows.size());
verify(indexDAO, never())
.searchWorkflows(anyString(), anyString(), anyInt(), anyInt(), any());
when(executionDAO.canSearchAcrossWorkflows()).thenReturn(false);
List<String> workflowIds = new ArrayList<>();
workflowIds.add("workflowId");
SearchResult<String> searchResult = new SearchResult<>();
searchResult.setResults(workflowIds);
when(indexDAO.searchWorkflows(anyString(), anyString(), anyInt(), anyInt(), any()))
.thenReturn(searchResult);
when(executionDAO.getWorkflow("workflowId", true)).thenReturn(new WorkflowModel());
workflows =
executionDAOFacade.getWorkflowsByCorrelationId(
"workflowName", "correlationId", true);
assertNotNull(workflows);
assertEquals(1, workflows.size());
}
@Test
public void testRemoveWorkflow() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("workflowId");
workflow.setStatus(WorkflowModel.Status.COMPLETED);
TaskModel task = new TaskModel();
task.setTaskId("taskId");
workflow.setTasks(Collections.singletonList(task));
when(executionDAO.getWorkflow(anyString(), anyBoolean())).thenReturn(workflow);
executionDAOFacade.removeWorkflow("workflowId", false);
verify(executionDAO, times(1)).removeWorkflow(anyString());
verify(executionDAO, never()).removeTask(anyString());
verify(indexDAO, never()).updateWorkflow(anyString(), any(), any());
verify(indexDAO, never()).updateTask(anyString(), anyString(), any(), any());
verify(indexDAO, times(1)).asyncRemoveWorkflow(anyString());
verify(indexDAO, times(1)).asyncRemoveTask(anyString(), anyString());
}
@Test
public void testArchiveWorkflow() throws Exception {
InputStream stream = TestDeciderService.class.getResourceAsStream("/completed.json");
WorkflowModel workflow = objectMapper.readValue(stream, WorkflowModel.class);
when(executionDAO.getWorkflow(anyString(), anyBoolean())).thenReturn(workflow);
executionDAOFacade.removeWorkflow("workflowId", true);
verify(executionDAO, times(1)).removeWorkflow(anyString());
verify(executionDAO, never()).removeTask(anyString());
verify(indexDAO, times(1)).updateWorkflow(anyString(), any(), any());
verify(indexDAO, times(15)).updateTask(anyString(), anyString(), any(), any());
verify(indexDAO, never()).removeWorkflow(anyString());
verify(indexDAO, never()).removeTask(anyString(), anyString());
}
@Test
public void testAddEventExecution() {
when(executionDAO.addEventExecution(any())).thenReturn(false);
boolean added = executionDAOFacade.addEventExecution(new EventExecution());
assertFalse(added);
verify(indexDAO, never()).addEventExecution(any());
when(executionDAO.addEventExecution(any())).thenReturn(true);
added = executionDAOFacade.addEventExecution(new EventExecution());
assertTrue(added);
verify(indexDAO, times(1)).asyncAddEventExecution(any());
}
@Test(expected = TerminateWorkflowException.class)
public void testUpdateTaskThrowsTerminateWorkflowException() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
doThrow(new TerminateWorkflowException("failed"))
.when(externalPayloadStorageUtils)
.verifyAndUpload(task, ExternalPayloadStorage.PayloadType.TASK_OUTPUT);
executionDAOFacade.updateTask(task);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/metadata/MetadataMapperServiceTest.java | core/src/test/java/com/netflix/conductor/core/metadata/MetadataMapperServiceTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.metadata;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.dao.MetadataDAO;
import jakarta.validation.ConstraintViolationException;
import static com.netflix.conductor.TestUtils.getConstraintViolationMessages;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoInteractions;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
@SuppressWarnings("SpringJavaAutowiredMembersInspection")
@RunWith(SpringRunner.class)
@EnableAutoConfiguration
public class MetadataMapperServiceTest {
@TestConfiguration
static class TestMetadataMapperServiceConfiguration {
@Bean
public MetadataDAO metadataDAO() {
return mock(MetadataDAO.class);
}
@Bean
public MetadataMapperService metadataMapperService(MetadataDAO metadataDAO) {
return new MetadataMapperService(metadataDAO);
}
}
@Autowired private MetadataDAO metadataDAO;
@Autowired private MetadataMapperService metadataMapperService;
@After
public void cleanUp() {
reset(metadataDAO);
}
@Test
public void testMetadataPopulationOnSimpleTask() {
String nameTaskDefinition = "task1";
TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition);
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
when(metadataDAO.getTaskDef(nameTaskDefinition)).thenReturn(taskDefinition);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(1, workflowDefinition.getTasks().size());
WorkflowTask populatedWorkflowTask = workflowDefinition.getTasks().get(0);
assertNotNull(populatedWorkflowTask.getTaskDefinition());
verify(metadataDAO).getTaskDef(nameTaskDefinition);
}
@Test
public void testNoMetadataPopulationOnEmbeddedTaskDefinition() {
String nameTaskDefinition = "task2";
TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition);
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
workflowTask.setTaskDefinition(taskDefinition);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(1, workflowDefinition.getTasks().size());
WorkflowTask populatedWorkflowTask = workflowDefinition.getTasks().get(0);
assertNotNull(populatedWorkflowTask.getTaskDefinition());
verifyNoInteractions(metadataDAO);
}
@Test
public void testMetadataPopulationOnlyOnNecessaryWorkflowTasks() {
String nameTaskDefinition1 = "task4";
TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition1);
WorkflowTask workflowTask1 = createWorkflowTask(nameTaskDefinition1);
workflowTask1.setTaskDefinition(taskDefinition);
String nameTaskDefinition2 = "task5";
WorkflowTask workflowTask2 = createWorkflowTask(nameTaskDefinition2);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask1, workflowTask2));
when(metadataDAO.getTaskDef(nameTaskDefinition2)).thenReturn(taskDefinition);
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(2, workflowDefinition.getTasks().size());
List<WorkflowTask> workflowTasks = workflowDefinition.getTasks();
assertNotNull(workflowTasks.get(0).getTaskDefinition());
assertNotNull(workflowTasks.get(1).getTaskDefinition());
verify(metadataDAO).getTaskDef(nameTaskDefinition2);
verifyNoMoreInteractions(metadataDAO);
}
@Test
public void testMetadataPopulationMissingDefinitions() {
String nameTaskDefinition1 = "task4";
WorkflowTask workflowTask1 = createWorkflowTask(nameTaskDefinition1);
String nameTaskDefinition2 = "task5";
WorkflowTask workflowTask2 = createWorkflowTask(nameTaskDefinition2);
TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition1);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask1, workflowTask2));
when(metadataDAO.getTaskDef(nameTaskDefinition1)).thenReturn(taskDefinition);
when(metadataDAO.getTaskDef(nameTaskDefinition2)).thenReturn(null);
try {
metadataMapperService.populateTaskDefinitions(workflowDefinition);
} catch (NotFoundException nfe) {
fail("Missing TaskDefinitions are not defaulted");
}
}
@Test
public void testVersionPopulationForSubworkflowTaskIfVersionIsNotAvailable() {
String nameTaskDefinition = "taskSubworkflow6";
String workflowDefinitionName = "subworkflow";
int version = 3;
WorkflowDef subWorkflowDefinition = createWorkflowDefinition("workflowDefinitionName");
subWorkflowDefinition.setVersion(version);
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW);
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName(workflowDefinitionName);
workflowTask.setSubWorkflowParam(subWorkflowParams);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
when(metadataDAO.getLatestWorkflowDef(workflowDefinitionName))
.thenReturn(Optional.of(subWorkflowDefinition));
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(1, workflowDefinition.getTasks().size());
List<WorkflowTask> workflowTasks = workflowDefinition.getTasks();
SubWorkflowParams params = workflowTasks.get(0).getSubWorkflowParam();
assertEquals(workflowDefinitionName, params.getName());
assertEquals(version, params.getVersion().intValue());
verify(metadataDAO).getLatestWorkflowDef(workflowDefinitionName);
verify(metadataDAO).getTaskDef(nameTaskDefinition);
verifyNoMoreInteractions(metadataDAO);
}
@Test
public void testNoVersionPopulationForSubworkflowTaskIfAvailable() {
String nameTaskDefinition = "taskSubworkflow7";
String workflowDefinitionName = "subworkflow";
Integer version = 2;
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW);
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName(workflowDefinitionName);
subWorkflowParams.setVersion(version);
workflowTask.setSubWorkflowParam(subWorkflowParams);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(1, workflowDefinition.getTasks().size());
List<WorkflowTask> workflowTasks = workflowDefinition.getTasks();
SubWorkflowParams params = workflowTasks.get(0).getSubWorkflowParam();
assertEquals(workflowDefinitionName, params.getName());
assertEquals(version, params.getVersion());
verify(metadataDAO).getTaskDef(nameTaskDefinition);
verifyNoMoreInteractions(metadataDAO);
}
@Test(expected = TerminateWorkflowException.class)
public void testExceptionWhenWorkflowDefinitionNotAvailable() {
String nameTaskDefinition = "taskSubworkflow8";
String workflowDefinitionName = "subworkflow";
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW);
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName(workflowDefinitionName);
workflowTask.setSubWorkflowParam(subWorkflowParams);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
when(metadataDAO.getLatestWorkflowDef(workflowDefinitionName)).thenReturn(Optional.empty());
metadataMapperService.populateTaskDefinitions(workflowDefinition);
verify(metadataDAO).getLatestWorkflowDef(workflowDefinitionName);
}
@Test(expected = IllegalArgumentException.class)
public void testLookupWorkflowDefinition() {
try {
String workflowName = "test";
when(metadataDAO.getWorkflowDef(workflowName, 0))
.thenReturn(Optional.of(new WorkflowDef()));
Optional<WorkflowDef> optionalWorkflowDef =
metadataMapperService.lookupWorkflowDefinition(workflowName, 0);
assertTrue(optionalWorkflowDef.isPresent());
metadataMapperService.lookupWorkflowDefinition(null, 0);
} catch (ConstraintViolationException ex) {
Assert.assertEquals(1, ex.getConstraintViolations().size());
Set<String> messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowIds list cannot be null."));
}
}
@Test(expected = IllegalArgumentException.class)
public void testLookupLatestWorkflowDefinition() {
String workflowName = "test";
when(metadataDAO.getLatestWorkflowDef(workflowName))
.thenReturn(Optional.of(new WorkflowDef()));
Optional<WorkflowDef> optionalWorkflowDef =
metadataMapperService.lookupLatestWorkflowDefinition(workflowName);
assertTrue(optionalWorkflowDef.isPresent());
metadataMapperService.lookupLatestWorkflowDefinition(null);
}
@Test
public void testShouldNotPopulateTaskDefinition() {
WorkflowTask workflowTask = createWorkflowTask("");
assertFalse(metadataMapperService.shouldPopulateTaskDefinition(workflowTask));
}
@Test
public void testShouldPopulateTaskDefinition() {
WorkflowTask workflowTask = createWorkflowTask("test");
assertTrue(metadataMapperService.shouldPopulateTaskDefinition(workflowTask));
}
@Test
public void testMetadataPopulationOnSimpleTaskDefMissing() {
String nameTaskDefinition = "task1";
WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition);
when(metadataDAO.getTaskDef(nameTaskDefinition)).thenReturn(null);
WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation");
workflowDefinition.setTasks(List.of(workflowTask));
metadataMapperService.populateTaskDefinitions(workflowDefinition);
assertEquals(1, workflowDefinition.getTasks().size());
WorkflowTask populatedWorkflowTask = workflowDefinition.getTasks().get(0);
assertNotNull(populatedWorkflowTask.getTaskDefinition());
}
private WorkflowDef createWorkflowDefinition(String name) {
WorkflowDef workflowDefinition = new WorkflowDef();
workflowDefinition.setName(name);
return workflowDefinition;
}
private WorkflowTask createWorkflowTask(String name) {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName(name);
workflowTask.setType(TaskType.SIMPLE.name());
return workflowTask;
}
private TaskDef createTaskDefinition(String name) {
return new TaskDef(name);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java | core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome;
import com.netflix.conductor.core.execution.mapper.TaskMapper;
import com.netflix.conductor.core.execution.tasks.SubWorkflow;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.micrometer.core.instrument.Counter;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import static com.netflix.conductor.common.metadata.tasks.TaskType.*;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ContextConfiguration(
classes = {TestObjectMapperConfiguration.class, TestDeciderService.TestConfiguration.class})
@RunWith(SpringRunner.class)
public class TestDeciderService {
@Configuration
@ComponentScan(basePackageClasses = TaskMapper.class) // loads all TaskMapper beans
public static class TestConfiguration {
@Bean(TASK_TYPE_SUB_WORKFLOW)
public SubWorkflow subWorkflow(ObjectMapper objectMapper) {
return new SubWorkflow(objectMapper);
}
@Bean("asyncCompleteSystemTask")
public WorkflowSystemTaskStub asyncCompleteSystemTask() {
return new WorkflowSystemTaskStub("asyncCompleteSystemTask") {
@Override
public boolean isAsyncComplete(TaskModel task) {
return true;
}
};
}
@Bean
public SystemTaskRegistry systemTaskRegistry(Set<WorkflowSystemTask> tasks) {
return new SystemTaskRegistry(tasks);
}
@Bean
public MetadataDAO mockMetadataDAO() {
return mock(MetadataDAO.class);
}
@Bean
public Map<String, TaskMapper> taskMapperMap(Collection<TaskMapper> taskMappers) {
return taskMappers.stream()
.collect(Collectors.toMap(TaskMapper::getTaskType, Function.identity()));
}
@Bean
public ParametersUtils parametersUtils(ObjectMapper mapper) {
return new ParametersUtils(mapper);
}
@Bean
public IDGenerator idGenerator() {
return new IDGenerator();
}
}
private DeciderService deciderService;
private ExternalPayloadStorageUtils externalPayloadStorageUtils;
private static MeterRegistry registry;
@Autowired private ObjectMapper objectMapper;
@Autowired private SystemTaskRegistry systemTaskRegistry;
@Autowired
@Qualifier("taskMapperMap")
private Map<String, TaskMapper> taskMappers;
@Autowired private ParametersUtils parametersUtils;
@Autowired private MetadataDAO metadataDAO;
@Rule public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() {
registry = new SimpleMeterRegistry();
}
@Before
public void setup() {
externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class);
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("TestDeciderService");
workflowDef.setVersion(1);
TaskDef taskDef = new TaskDef();
when(metadataDAO.getTaskDef(any())).thenReturn(taskDef);
when(metadataDAO.getLatestWorkflowDef(any())).thenReturn(Optional.of(workflowDef));
deciderService =
new DeciderService(
new IDGenerator(),
parametersUtils,
metadataDAO,
externalPayloadStorageUtils,
systemTaskRegistry,
taskMappers,
Duration.ofMinutes(60));
}
@Test
public void testGetTaskInputV2() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("taskOutputParam2", "${task2.output.locationBad}");
inputParams.put("taskOutputParam3", "${task3.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("channelMap", "${workflow.input.channelMapping}");
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertTrue(taskInput.containsKey("taskOutputParam2"));
assertTrue(taskInput.containsKey("taskOutputParam3"));
assertNull(taskInput.get("taskOutputParam2"));
assertNotNull(taskInput.get("channelMap"));
assertEquals(5, taskInput.get("channelMap"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNull(taskInput.get("taskOutputParam3"));
assertNull(taskInput.get("nullValue"));
assertEquals(
workflow.getTasks().get(0).getStatus().name(),
taskInput.get("task2Status")); // task2 and task3 are the tasks respectively
}
@Test
public void testGetTaskInputV2Partial() {
WorkflowModel workflow = createDefaultWorkflow();
System.setProperty("EC2_INSTANCE", "i-123abcdef990");
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("workfowOutputParam", "${workflow.output.name}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("taskOutputParam2", "${task2.output.locationBad}");
inputParams.put("taskOutputParam3", "${task3.output.location}");
inputParams.put("constParam", "Some String value &");
inputParams.put("partial", "${task2.output.location}/something?host=${EC2_INSTANCE}");
inputParams.put("jsonPathExtracted", "${workflow.output.names[*].year}");
inputParams.put("secondName", "${workflow.output.names[1].name}");
inputParams.put(
"concatenatedName",
"The Band is: ${workflow.output.names[1].name}-\t${EC2_INSTANCE}");
TaskDef taskDef = new TaskDef();
taskDef.getInputTemplate().put("opname", "${workflow.output.name}");
List<Object> listParams = new LinkedList<>();
List<Object> listParams2 = new LinkedList<>();
listParams2.add("${workflow.input.requestId}-10-${EC2_INSTANCE}");
listParams.add(listParams2);
Map<String, Object> map = new HashMap<>();
map.put("name", "${workflow.output.names[0].name}");
map.put("hasAwards", "${workflow.input.hasAwards}");
listParams.add(map);
taskDef.getInputTemplate().put("listValues", listParams);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, taskDef, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertTrue(taskInput.containsKey("taskOutputParam2"));
assertTrue(taskInput.containsKey("taskOutputParam3"));
assertNull(taskInput.get("taskOutputParam2"));
assertNotNull(taskInput.get("jsonPathExtracted"));
assertTrue(taskInput.get("jsonPathExtracted") instanceof List);
assertNotNull(taskInput.get("secondName"));
assertTrue(taskInput.get("secondName") instanceof String);
assertEquals("The Doors", taskInput.get("secondName"));
assertEquals("The Band is: The Doors-\ti-123abcdef990", taskInput.get("concatenatedName"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNull(taskInput.get("taskOutputParam3"));
assertNotNull(taskInput.get("partial"));
assertEquals("http://location/something?host=i-123abcdef990", taskInput.get("partial"));
}
@SuppressWarnings("unchecked")
@Test
public void testGetTaskInput() {
Map<String, Object> ip = new HashMap<>();
ip.put("workflowInputParam", "${workflow.input.requestId}");
ip.put("taskOutputParam", "${task2.output.location}");
List<Map<String, Object>> json = new LinkedList<>();
Map<String, Object> m1 = new HashMap<>();
m1.put("name", "person name");
m1.put("city", "New York");
m1.put("phone", 2120001234);
m1.put("status", "${task2.output.isPersonActive}");
Map<String, Object> m2 = new HashMap<>();
m2.put("employer", "City Of New York");
m2.put("color", "purple");
m2.put("requestId", "${workflow.input.requestId}");
json.add(m1);
json.add(m2);
ip.put("complexJson", json);
WorkflowDef def = new WorkflowDef();
def.setName("testGetTaskInput");
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", "request id 001");
TaskModel task = new TaskModel();
task.setReferenceTaskName("task2");
task.addOutput("location", "http://location");
task.addOutput("isPersonActive", true);
workflow.getTasks().add(task);
Map<String, Object> taskInput = parametersUtils.getTaskInput(ip, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNotNull(taskInput.get("complexJson"));
assertTrue(taskInput.get("complexJson") instanceof List);
List<Map<String, Object>> resolvedInput =
(List<Map<String, Object>>) taskInput.get("complexJson");
assertEquals(2, resolvedInput.size());
}
@Test
public void testGetTaskInputV1() {
Map<String, Object> ip = new HashMap<>();
ip.put("workflowInputParam", "workflow.input.requestId");
ip.put("taskOutputParam", "task2.output.location");
WorkflowDef def = new WorkflowDef();
def.setSchemaVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", "request id 001");
TaskModel task = new TaskModel();
task.setReferenceTaskName("task2");
task.addOutput("location", "http://location");
task.addOutput("isPersonActive", true);
workflow.getTasks().add(task);
Map<String, Object> taskInput = parametersUtils.getTaskInput(ip, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
}
@Test
public void testGetTaskInputV2WithInputTemplate() {
TaskDef def = new TaskDef();
Map<String, Object> inputTemplate = new HashMap<>();
inputTemplate.put("url", "https://some_url:7004");
inputTemplate.put("default_url", "https://default_url:7004");
inputTemplate.put("someKey", "someValue");
def.getInputTemplate().putAll(inputTemplate);
Map<String, Object> workflowInput = new HashMap<>();
workflowInput.put("some_new_url", "https://some_new_url:7004");
workflowInput.put("workflow_input_url", "https://workflow_input_url:7004");
workflowInput.put("some_other_key", "some_other_value");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testGetTaskInputV2WithInputTemplate");
workflowDef.setVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setInput(workflowInput);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("url", "${workflow.input.some_new_url}");
workflowTask
.getInputParameters()
.put("workflow_input_url", "${workflow.input.workflow_input_url}");
workflowTask.getInputParameters().put("someKey", "${workflow.input.someKey}");
workflowTask.getInputParameters().put("someOtherKey", "${workflow.input.some_other_key}");
workflowTask
.getInputParameters()
.put("someNowhereToBeFoundKey", "${workflow.input.some_ne_key}");
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflow, null, def);
assertTrue(taskInput.containsKey("url"));
assertTrue(taskInput.containsKey("default_url"));
assertEquals(taskInput.get("url"), "https://some_new_url:7004");
assertEquals(taskInput.get("default_url"), "https://default_url:7004");
assertEquals(taskInput.get("workflow_input_url"), "https://workflow_input_url:7004");
assertEquals("some_other_value", taskInput.get("someOtherKey"));
assertEquals("someValue", taskInput.get("someKey"));
assertNull(taskInput.get("someNowhereToBeFoundKey"));
}
@Test
public void testGetNextTask() {
WorkflowDef def = createNestedWorkflow();
WorkflowTask firstTask = def.getTasks().get(0);
assertNotNull(firstTask);
assertEquals("fork1", firstTask.getTaskReferenceName());
WorkflowTask nextAfterFirst = def.getNextTask(firstTask.getTaskReferenceName());
assertNotNull(nextAfterFirst);
assertEquals("join1", nextAfterFirst.getTaskReferenceName());
WorkflowTask fork2 = def.getTaskByRefName("fork2");
assertNotNull(fork2);
assertEquals("fork2", fork2.getTaskReferenceName());
WorkflowTask taskAfterFork2 = def.getNextTask("fork2");
assertNotNull(taskAfterFork2);
assertEquals("join2", taskAfterFork2.getTaskReferenceName());
WorkflowTask t2 = def.getTaskByRefName("t2");
assertNotNull(t2);
assertEquals("t2", t2.getTaskReferenceName());
WorkflowTask taskAfterT2 = def.getNextTask("t2");
assertNotNull(taskAfterT2);
assertEquals("t4", taskAfterT2.getTaskReferenceName());
WorkflowTask taskAfterT3 = def.getNextTask("t3");
assertNotNull(taskAfterT3);
assertEquals(DECISION.name(), taskAfterT3.getType());
assertEquals("d1", taskAfterT3.getTaskReferenceName());
WorkflowTask taskAfterT4 = def.getNextTask("t4");
assertNotNull(taskAfterT4);
assertEquals("join2", taskAfterT4.getTaskReferenceName());
WorkflowTask taskAfterT6 = def.getNextTask("t6");
assertNotNull(taskAfterT6);
assertEquals("t9", taskAfterT6.getTaskReferenceName());
WorkflowTask taskAfterJoin2 = def.getNextTask("join2");
assertNotNull(taskAfterJoin2);
assertEquals("join1", taskAfterJoin2.getTaskReferenceName());
WorkflowTask taskAfterJoin1 = def.getNextTask("join1");
assertNotNull(taskAfterJoin1);
assertEquals("t5", taskAfterJoin1.getTaskReferenceName());
WorkflowTask taskAfterSubWF = def.getNextTask("sw1");
assertNotNull(taskAfterSubWF);
assertEquals("join1", taskAfterSubWF.getTaskReferenceName());
WorkflowTask taskAfterT9 = def.getNextTask("t9");
assertNotNull(taskAfterT9);
assertEquals("join2", taskAfterT9.getTaskReferenceName());
}
@Test
public void testCaseStatement() {
WorkflowDef def = createConditionalWF();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(0L);
workflow.setWorkflowId("a");
workflow.setCorrelationId("b");
workflow.setStatus(WorkflowModel.Status.RUNNING);
DeciderOutcome outcome = deciderService.decide(workflow);
List<TaskModel> scheduledTasks = outcome.tasksToBeScheduled;
assertNotNull(scheduledTasks);
assertEquals(2, scheduledTasks.size());
assertEquals(TaskModel.Status.IN_PROGRESS, scheduledTasks.get(0).getStatus());
assertEquals(TaskModel.Status.SCHEDULED, scheduledTasks.get(1).getStatus());
}
@Test
public void testGetTaskByRef() {
WorkflowModel workflow = new WorkflowModel();
TaskModel t1 = new TaskModel();
t1.setReferenceTaskName("ref");
t1.setSeq(0);
t1.setStatus(TaskModel.Status.TIMED_OUT);
TaskModel t2 = new TaskModel();
t2.setReferenceTaskName("ref");
t2.setSeq(1);
t2.setStatus(TaskModel.Status.FAILED);
TaskModel t3 = new TaskModel();
t3.setReferenceTaskName("ref");
t3.setSeq(2);
t3.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().add(t1);
workflow.getTasks().add(t2);
workflow.getTasks().add(t3);
TaskModel task = workflow.getTaskByRefName("ref");
assertNotNull(task);
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertEquals(t3.getSeq(), task.getSeq());
}
@Test
public void testTaskTimeout() {
Counter counter =
registry.counter("task_timeout", "class", "WorkflowMonitor", "taskType", "test");
double counterCount = counter.count();
TaskDef taskType = new TaskDef();
taskType.setName("test");
taskType.setTimeoutPolicy(TimeoutPolicy.RETRY);
taskType.setTimeoutSeconds(1);
TaskModel task = new TaskModel();
task.setTaskType(taskType.getName());
task.setStartTime(System.currentTimeMillis() - 2_000); // 2 seconds ago!
task.setStatus(TaskModel.Status.IN_PROGRESS);
deciderService.checkTaskTimeout(taskType, task);
// Task should be marked as timed out
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
taskType.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
deciderService.checkTaskTimeout(taskType, task);
// Nothing will happen
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertNull(task.getReasonForIncompletion());
boolean exception = false;
taskType.setTimeoutPolicy(TimeoutPolicy.TIME_OUT_WF);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
try {
deciderService.checkTaskTimeout(taskType, task);
} catch (TerminateWorkflowException tw) {
exception = true;
}
assertTrue(exception);
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
taskType.setTimeoutPolicy(TimeoutPolicy.TIME_OUT_WF);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
deciderService.checkTaskTimeout(null, task); // this will be a no-op
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertNull(task.getReasonForIncompletion());
}
@Test
public void testCheckTaskPollTimeout() {
Counter counter =
registry.counter("task_timeout", "class", "WorkflowMonitor", "taskType", "test");
double counterCount = counter.count();
TaskDef taskType = new TaskDef();
taskType.setName("test");
taskType.setTimeoutPolicy(TimeoutPolicy.RETRY);
taskType.setPollTimeoutSeconds(1);
TaskModel task = new TaskModel();
task.setTaskType(taskType.getName());
task.setScheduledTime(System.currentTimeMillis() - 2_000);
task.setStatus(TaskModel.Status.SCHEDULED);
deciderService.checkTaskPollTimeout(taskType, task);
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
task.setScheduledTime(System.currentTimeMillis());
task.setReasonForIncompletion(null);
task.setStatus(TaskModel.Status.SCHEDULED);
deciderService.checkTaskPollTimeout(taskType, task);
assertEquals(TaskModel.Status.SCHEDULED, task.getStatus());
assertNull(task.getReasonForIncompletion());
}
@SuppressWarnings("unchecked")
@Test
public void testConcurrentTaskInputCalc() throws InterruptedException {
TaskDef def = new TaskDef();
Map<String, Object> inputMap = new HashMap<>();
inputMap.put("path", "${workflow.input.inputLocation}");
inputMap.put("type", "${workflow.input.sourceType}");
inputMap.put("channelMapping", "${workflow.input.channelMapping}");
List<Map<String, Object>> input = new LinkedList<>();
input.add(inputMap);
Map<String, Object> body = new HashMap<>();
body.put("input", input);
def.getInputTemplate().putAll(body);
ExecutorService executorService = Executors.newFixedThreadPool(10);
final int[] result = new int[10];
CountDownLatch latch = new CountDownLatch(10);
for (int i = 0; i < 10; i++) {
final int x = i;
executorService.submit(
() -> {
try {
Map<String, Object> workflowInput = new HashMap<>();
workflowInput.put("outputLocation", "baggins://outputlocation/" + x);
workflowInput.put("inputLocation", "baggins://inputlocation/" + x);
workflowInput.put("sourceType", "MuxedSource");
workflowInput.put("channelMapping", x);
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testConcurrentTaskInputCalc");
workflowDef.setVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setInput(workflowInput);
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
new HashMap<>(), workflow, null, def);
Object reqInputObj = taskInput.get("input");
assertNotNull(reqInputObj);
assertTrue(reqInputObj instanceof List);
List<Map<String, Object>> reqInput =
(List<Map<String, Object>>) reqInputObj;
Object cmObj = reqInput.get(0).get("channelMapping");
assertNotNull(cmObj);
if (!(cmObj instanceof Number)) {
result[x] = -1;
} else {
Number channelMapping = (Number) cmObj;
result[x] = channelMapping.intValue();
}
latch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
});
}
latch.await(1, TimeUnit.MINUTES);
if (latch.getCount() > 0) {
fail(
"Executions did not complete in a minute. Something wrong with the build server?");
}
executorService.shutdownNow();
for (int i = 0; i < result.length; i++) {
assertEquals(i, result[i]);
}
}
@SuppressWarnings("unchecked")
@Test
public void testTaskRetry() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("null", null);
inputParams.put("task_id", "${CPEWF_TASK_ID}");
Map<String, Object> env = new HashMap<>();
env.put("env_task_id", "${CPEWF_TASK_ID}");
inputParams.put("env", env);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, "t1");
TaskModel task = new TaskModel();
task.getInputData().putAll(taskInput);
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("task_id", "${CPEWF_TASK_ID}");
workflowTask.getInputParameters().put("env", env);
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals("t1", task.getInputData().get("task_id"));
assertEquals(
"t1", ((Map<String, Object>) task.getInputData().get("env")).get("env_task_id"));
assertNotSame(task.getTaskId(), task2.get().getTaskId());
assertEquals(task2.get().getTaskId(), task2.get().getInputData().get("task_id"));
assertEquals(
task2.get().getTaskId(),
((Map<String, Object>) task2.get().getInputData().get("env")).get("env_task_id"));
TaskModel task3 = new TaskModel();
task3.getInputData().putAll(taskInput);
task3.setStatus(TaskModel.Status.FAILED_WITH_TERMINAL_ERROR);
task3.setTaskId("t1");
when(metadataDAO.getWorkflowDef(anyString(), anyInt()))
.thenReturn(Optional.of(new WorkflowDef()));
exception.expect(TerminateWorkflowException.class);
deciderService.retry(taskDef, workflowTask, task3, workflow);
}
@SuppressWarnings("unchecked")
@Test
public void testWorkflowTaskRetry() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("null", null);
inputParams.put("task_id", "${CPEWF_TASK_ID}");
Map<String, Object> env = new HashMap<>();
env.put("env_task_id", "${CPEWF_TASK_ID}");
inputParams.put("env", env);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, "t1");
// Create a first failed task
TaskModel task = new TaskModel();
task.getInputData().putAll(taskInput);
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
assertEquals(3, taskDef.getRetryCount());
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("task_id", "${CPEWF_TASK_ID}");
workflowTask.getInputParameters().put("env", env);
workflowTask.setRetryCount(1);
// Retry the failed task and assert that a new one has been created
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals("t1", task.getInputData().get("task_id"));
assertEquals(
"t1", ((Map<String, Object>) task.getInputData().get("env")).get("env_task_id"));
assertNotSame(task.getTaskId(), task2.get().getTaskId());
assertEquals(task2.get().getTaskId(), task2.get().getInputData().get("task_id"));
assertEquals(
task2.get().getTaskId(),
((Map<String, Object>) task2.get().getInputData().get("env")).get("env_task_id"));
// Set the retried task to FAILED, retry it again and assert that the workflow failed
task2.get().setStatus(TaskModel.Status.FAILED);
exception.expect(TerminateWorkflowException.class);
final Optional<TaskModel> task3 =
deciderService.retry(taskDef, workflowTask, task2.get(), workflow);
assertFalse(task3.isPresent());
assertEquals(WorkflowModel.Status.FAILED, workflow.getStatus());
}
@Test
public void testLinearBackoff() {
WorkflowModel workflow = createDefaultWorkflow();
TaskModel task = new TaskModel();
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
taskDef.setRetryDelaySeconds(60);
taskDef.setRetryLogic(TaskDef.RetryLogic.LINEAR_BACKOFF);
taskDef.setBackoffScaleFactor(2);
WorkflowTask workflowTask = new WorkflowTask();
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/WorkflowSystemTaskStub.java | core/src/test/java/com/netflix/conductor/core/execution/WorkflowSystemTaskStub.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
public class WorkflowSystemTaskStub extends WorkflowSystemTask {
private boolean started = false;
public WorkflowSystemTaskStub(String taskType) {
super(taskType);
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
started = true;
task.setStatus(TaskModel.Status.COMPLETED);
super.start(workflow, task, executor);
}
public boolean isStarted() {
return started;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java | core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TestWorkflowDef {
@Test
public void testContainsType() {
WorkflowDef def = new WorkflowDef();
def.setName("test_workflow");
def.setVersion(1);
def.setSchemaVersion(2);
def.getTasks().add(createWorkflowTask("simple_task_1"));
def.getTasks().add(createWorkflowTask("simple_task_2"));
WorkflowTask task3 = createWorkflowTask("decision_task_1");
def.getTasks().add(task3);
task3.setType(TaskType.DECISION.name());
task3.getDecisionCases()
.put(
"Case1",
Arrays.asList(
createWorkflowTask("case_1_task_1"),
createWorkflowTask("case_1_task_2")));
task3.getDecisionCases()
.put(
"Case2",
Arrays.asList(
createWorkflowTask("case_2_task_1"),
createWorkflowTask("case_2_task_2")));
task3.getDecisionCases()
.put(
"Case3",
Collections.singletonList(
deciderTask(
"decision_task_2",
toMap("Case31", "case31_task_1", "case_31_task_2"),
Collections.singletonList("case3_def_task"))));
def.getTasks().add(createWorkflowTask("simple_task_3"));
assertTrue(def.containsType(TaskType.SIMPLE.name()));
assertTrue(def.containsType(TaskType.DECISION.name()));
assertFalse(def.containsType(TaskType.DO_WHILE.name()));
}
@Test
public void testGetNextTask_Decision() {
WorkflowDef def = new WorkflowDef();
def.setName("test_workflow");
def.setVersion(1);
def.setSchemaVersion(2);
def.getTasks().add(createWorkflowTask("simple_task_1"));
def.getTasks().add(createWorkflowTask("simple_task_2"));
WorkflowTask task3 = createWorkflowTask("decision_task_1");
def.getTasks().add(task3);
task3.setType(TaskType.DECISION.name());
task3.getDecisionCases()
.put(
"Case1",
Arrays.asList(
createWorkflowTask("case_1_task_1"),
createWorkflowTask("case_1_task_2")));
task3.getDecisionCases()
.put(
"Case2",
Arrays.asList(
createWorkflowTask("case_2_task_1"),
createWorkflowTask("case_2_task_2")));
task3.getDecisionCases()
.put(
"Case3",
Collections.singletonList(
deciderTask(
"decision_task_2",
toMap("Case31", "case31_task_1", "case_31_task_2"),
Collections.singletonList("case3_def_task"))));
def.getTasks().add(createWorkflowTask("simple_task_3"));
// Assertions
WorkflowTask next = def.getNextTask("simple_task_1");
assertNotNull(next);
assertEquals("simple_task_2", next.getTaskReferenceName());
next = def.getNextTask("simple_task_2");
assertNotNull(next);
assertEquals(task3.getTaskReferenceName(), next.getTaskReferenceName());
next = def.getNextTask("decision_task_1");
assertNotNull(next);
assertEquals("simple_task_3", next.getTaskReferenceName());
next = def.getNextTask("case_1_task_1");
assertNotNull(next);
assertEquals("case_1_task_2", next.getTaskReferenceName());
next = def.getNextTask("case_1_task_2");
assertNotNull(next);
assertEquals("simple_task_3", next.getTaskReferenceName());
next = def.getNextTask("case3_def_task");
assertNotNull(next);
assertEquals("simple_task_3", next.getTaskReferenceName());
next = def.getNextTask("case31_task_1");
assertNotNull(next);
assertEquals("case_31_task_2", next.getTaskReferenceName());
}
@Test
public void testGetNextTask_Conditional() {
String COND_TASK_WF = "COND_TASK_WF";
List<WorkflowTask> workflowTasks = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
workflowTasks.add(createWorkflowTask("junit_task_" + i));
}
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName(COND_TASK_WF);
workflowDef.setDescription(COND_TASK_WF);
WorkflowTask subCaseTask = new WorkflowTask();
subCaseTask.setType(TaskType.DECISION.name());
subCaseTask.setCaseValueParam("case2");
subCaseTask.setName("case2");
subCaseTask.setTaskReferenceName("case2");
Map<String, List<WorkflowTask>> dcx = new HashMap<>();
dcx.put("sc1", workflowTasks.subList(4, 5));
dcx.put("sc2", workflowTasks.subList(5, 7));
subCaseTask.setDecisionCases(dcx);
WorkflowTask caseTask = new WorkflowTask();
caseTask.setType(TaskType.DECISION.name());
caseTask.setCaseValueParam("case");
caseTask.setName("case");
caseTask.setTaskReferenceName("case");
Map<String, List<WorkflowTask>> dc = new HashMap<>();
dc.put("c1", Arrays.asList(workflowTasks.get(0), subCaseTask, workflowTasks.get(1)));
dc.put("c2", Collections.singletonList(workflowTasks.get(3)));
caseTask.setDecisionCases(dc);
workflowDef.getTasks().add(caseTask);
workflowDef.getTasks().addAll(workflowTasks.subList(8, 9));
WorkflowTask nextTask = workflowDef.getNextTask("case");
assertEquals("junit_task_8", nextTask.getTaskReferenceName());
nextTask = workflowDef.getNextTask("junit_task_8");
assertNull(nextTask);
nextTask = workflowDef.getNextTask("junit_task_0");
assertNotNull(nextTask);
assertEquals("case2", nextTask.getTaskReferenceName());
nextTask = workflowDef.getNextTask("case2");
assertNotNull(nextTask);
assertEquals("junit_task_1", nextTask.getTaskReferenceName());
}
private WorkflowTask createWorkflowTask(String name) {
WorkflowTask task = new WorkflowTask();
task.setName(name);
task.setTaskReferenceName(name);
return task;
}
private WorkflowTask deciderTask(
String name, Map<String, List<String>> decisions, List<String> defaultTasks) {
WorkflowTask task = createWorkflowTask(name);
task.setType(TaskType.DECISION.name());
decisions.forEach(
(key, value) -> {
List<WorkflowTask> tasks = new LinkedList<>();
value.forEach(taskName -> tasks.add(createWorkflowTask(taskName)));
task.getDecisionCases().put(key, tasks);
});
List<WorkflowTask> tasks = new LinkedList<>();
defaultTasks.forEach(defaultTask -> tasks.add(createWorkflowTask(defaultTask)));
task.setDefaultCase(tasks);
return task;
}
private Map<String, List<String>> toMap(String key, String... values) {
Map<String, List<String>> map = new HashMap<>();
List<String> vals = Arrays.asList(values);
map.put(key, vals);
return map;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java | core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.stubbing.Answer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.evaluators.Evaluator;
import com.netflix.conductor.core.execution.mapper.*;
import com.netflix.conductor.core.execution.tasks.*;
import com.netflix.conductor.core.listener.TaskStatusListener;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.core.metadata.MetadataMapperService;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.service.ExecutionLockService;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.*;
import static java.util.Comparator.comparingInt;
import static java.util.stream.Collectors.groupingBy;
import static java.util.stream.Collectors.maxBy;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
TestWorkflowExecutor.TestConfiguration.class
})
@RunWith(SpringRunner.class)
public class TestWorkflowExecutor {
private WorkflowExecutorOps workflowExecutor;
private ExecutionDAOFacade executionDAOFacade;
private MetadataDAO metadataDAO;
private QueueDAO queueDAO;
private WorkflowStatusListener workflowStatusListener;
private TaskStatusListener taskStatusListener;
private ExecutionLockService executionLockService;
private ExternalPayloadStorageUtils externalPayloadStorageUtils;
@Configuration
@ComponentScan(basePackageClasses = {Evaluator.class}) // load all Evaluator beans.
public static class TestConfiguration {
@Bean(TASK_TYPE_SUB_WORKFLOW)
public SubWorkflow subWorkflow(ObjectMapper objectMapper) {
return new SubWorkflow(objectMapper);
}
@Bean(TASK_TYPE_LAMBDA)
public Lambda lambda() {
return new Lambda();
}
@Bean(TASK_TYPE_WAIT)
public Wait waitBean() {
return new Wait();
}
@Bean("HTTP")
public WorkflowSystemTask http() {
return new WorkflowSystemTaskStub("HTTP") {
@Override
public boolean isAsync() {
return true;
}
};
}
@Bean("HTTP2")
public WorkflowSystemTask http2() {
return new WorkflowSystemTaskStub("HTTP2");
}
@Bean(TASK_TYPE_JSON_JQ_TRANSFORM)
public WorkflowSystemTask jsonBean() {
return new WorkflowSystemTaskStub("JSON_JQ_TRANSFORM") {
@Override
public boolean isAsync() {
return false;
}
@Override
public void start(
WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
task.setStatus(TaskModel.Status.COMPLETED);
}
};
}
@Bean
public SystemTaskRegistry systemTaskRegistry(Set<WorkflowSystemTask> tasks) {
return new SystemTaskRegistry(tasks);
}
}
@Autowired private ObjectMapper objectMapper;
@Autowired private SystemTaskRegistry systemTaskRegistry;
@Autowired private DefaultListableBeanFactory beanFactory;
@Autowired private Map<String, Evaluator> evaluators;
@Before
public void init() {
executionDAOFacade = mock(ExecutionDAOFacade.class);
metadataDAO = mock(MetadataDAO.class);
queueDAO = mock(QueueDAO.class);
workflowStatusListener = mock(WorkflowStatusListener.class);
taskStatusListener = mock(TaskStatusListener.class);
externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class);
executionLockService = mock(ExecutionLockService.class);
ParametersUtils parametersUtils = new ParametersUtils(objectMapper);
IDGenerator idGenerator = new IDGenerator();
Map<String, TaskMapper> taskMappers = new HashMap<>();
taskMappers.put(DECISION.name(), new DecisionTaskMapper());
taskMappers.put(SWITCH.name(), new SwitchTaskMapper(evaluators));
taskMappers.put(DYNAMIC.name(), new DynamicTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(FORK_JOIN.name(), new ForkJoinTaskMapper());
taskMappers.put(JOIN.name(), new JoinTaskMapper());
taskMappers.put(
FORK_JOIN_DYNAMIC.name(),
new ForkJoinDynamicTaskMapper(
idGenerator,
parametersUtils,
objectMapper,
metadataDAO,
mock(SystemTaskRegistry.class)));
taskMappers.put(
USER_DEFINED.name(), new UserDefinedTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(SIMPLE.name(), new SimpleTaskMapper(parametersUtils));
taskMappers.put(
SUB_WORKFLOW.name(), new SubWorkflowTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(EVENT.name(), new EventTaskMapper(parametersUtils));
taskMappers.put(WAIT.name(), new WaitTaskMapper(parametersUtils));
taskMappers.put(HTTP.name(), new HTTPTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(LAMBDA.name(), new LambdaTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(INLINE.name(), new InlineTaskMapper(parametersUtils, metadataDAO));
DeciderService deciderService =
new DeciderService(
idGenerator,
parametersUtils,
metadataDAO,
externalPayloadStorageUtils,
systemTaskRegistry,
taskMappers,
Duration.ofMinutes(60));
MetadataMapperService metadataMapperService = new MetadataMapperService(metadataDAO);
ConductorProperties properties = mock(ConductorProperties.class);
when(properties.getActiveWorkerLastPollTimeout()).thenReturn(Duration.ofSeconds(100));
when(properties.getTaskExecutionPostponeDuration()).thenReturn(Duration.ofSeconds(60));
when(properties.getWorkflowOffsetTimeout()).thenReturn(Duration.ofSeconds(30));
workflowExecutor =
new WorkflowExecutorOps(
deciderService,
metadataDAO,
queueDAO,
metadataMapperService,
workflowStatusListener,
taskStatusListener,
executionDAOFacade,
properties,
executionLockService,
systemTaskRegistry,
parametersUtils,
idGenerator);
}
@Test
public void testScheduleTask() {
IDGenerator idGenerator = new IDGenerator();
WorkflowSystemTaskStub httpTask = beanFactory.getBean("HTTP", WorkflowSystemTaskStub.class);
WorkflowSystemTaskStub http2Task =
beanFactory.getBean("HTTP2", WorkflowSystemTaskStub.class);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("1");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("1");
workflowDef.setVersion(1);
workflow.setWorkflowDefinition(workflowDef);
List<TaskModel> tasks = new LinkedList<>();
WorkflowTask taskToSchedule = new WorkflowTask();
taskToSchedule.setWorkflowTaskType(TaskType.USER_DEFINED);
taskToSchedule.setType("HTTP");
WorkflowTask taskToSchedule2 = new WorkflowTask();
taskToSchedule2.setWorkflowTaskType(TaskType.USER_DEFINED);
taskToSchedule2.setType("HTTP2");
WorkflowTask wait = new WorkflowTask();
wait.setWorkflowTaskType(TaskType.WAIT);
wait.setType("WAIT");
wait.setTaskReferenceName("wait");
TaskModel task1 = new TaskModel();
task1.setTaskType(taskToSchedule.getType());
task1.setTaskDefName(taskToSchedule.getName());
task1.setReferenceTaskName(taskToSchedule.getTaskReferenceName());
task1.setWorkflowInstanceId(workflow.getWorkflowId());
task1.setCorrelationId(workflow.getCorrelationId());
task1.setScheduledTime(System.currentTimeMillis());
task1.setTaskId(idGenerator.generate());
task1.setInputData(new HashMap<>());
task1.setStatus(TaskModel.Status.SCHEDULED);
task1.setRetryCount(0);
task1.setCallbackAfterSeconds(taskToSchedule.getStartDelay());
task1.setWorkflowTask(taskToSchedule);
TaskModel task2 = new TaskModel();
task2.setTaskType(TASK_TYPE_WAIT);
task2.setTaskDefName(taskToSchedule.getName());
task2.setReferenceTaskName(taskToSchedule.getTaskReferenceName());
task2.setWorkflowInstanceId(workflow.getWorkflowId());
task2.setCorrelationId(workflow.getCorrelationId());
task2.setScheduledTime(System.currentTimeMillis());
task2.setInputData(new HashMap<>());
task2.setTaskId(idGenerator.generate());
task2.setStatus(TaskModel.Status.IN_PROGRESS);
task2.setWorkflowTask(taskToSchedule);
TaskModel task3 = new TaskModel();
task3.setTaskType(taskToSchedule2.getType());
task3.setTaskDefName(taskToSchedule.getName());
task3.setReferenceTaskName(taskToSchedule.getTaskReferenceName());
task3.setWorkflowInstanceId(workflow.getWorkflowId());
task3.setCorrelationId(workflow.getCorrelationId());
task3.setScheduledTime(System.currentTimeMillis());
task3.setTaskId(idGenerator.generate());
task3.setInputData(new HashMap<>());
task3.setStatus(TaskModel.Status.SCHEDULED);
task3.setRetryCount(0);
task3.setCallbackAfterSeconds(taskToSchedule.getStartDelay());
task3.setWorkflowTask(taskToSchedule);
tasks.add(task1);
tasks.add(task2);
tasks.add(task3);
when(executionDAOFacade.createTasks(tasks)).thenReturn(tasks);
AtomicInteger startedTaskCount = new AtomicInteger(0);
doAnswer(
invocation -> {
startedTaskCount.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateTask(any());
AtomicInteger queuedTaskCount = new AtomicInteger(0);
final Answer answer =
invocation -> {
String queueName = invocation.getArgument(0, String.class);
queuedTaskCount.incrementAndGet();
return null;
};
doAnswer(answer).when(queueDAO).push(any(), any(), anyLong());
doAnswer(answer).when(queueDAO).push(any(), any(), anyInt(), anyLong());
boolean stateChanged = workflowExecutor.scheduleTask(workflow, tasks);
// Wait task is no async to it will be queued.
assertEquals(1, startedTaskCount.get());
assertEquals(2, queuedTaskCount.get());
assertTrue(stateChanged);
assertFalse(httpTask.isStarted());
assertTrue(http2Task.isStarted());
}
@Test(expected = TerminateWorkflowException.class)
public void testScheduleTaskFailure() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("wid_01");
List<TaskModel> tasks = new LinkedList<>();
TaskModel task1 = new TaskModel();
task1.setTaskType(TaskType.TASK_TYPE_SIMPLE);
task1.setTaskDefName("task_1");
task1.setReferenceTaskName("task_1");
task1.setWorkflowInstanceId(workflow.getWorkflowId());
task1.setTaskId("tid_01");
task1.setStatus(TaskModel.Status.SCHEDULED);
task1.setRetryCount(0);
tasks.add(task1);
when(executionDAOFacade.createTasks(tasks)).thenThrow(new RuntimeException());
workflowExecutor.scheduleTask(workflow, tasks);
}
/** Simulate Queue push failures and assert that scheduleTask doesn't throw an exception. */
@Test
public void testQueueFailuresDuringScheduleTask() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("wid_01");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("wid");
workflowDef.setVersion(1);
workflow.setWorkflowDefinition(workflowDef);
List<TaskModel> tasks = new LinkedList<>();
TaskModel task1 = new TaskModel();
task1.setTaskType(TaskType.TASK_TYPE_SIMPLE);
task1.setTaskDefName("task_1");
task1.setReferenceTaskName("task_1");
task1.setWorkflowInstanceId(workflow.getWorkflowId());
task1.setTaskId("tid_01");
task1.setStatus(TaskModel.Status.SCHEDULED);
task1.setRetryCount(0);
tasks.add(task1);
when(executionDAOFacade.createTasks(tasks)).thenReturn(tasks);
doThrow(new RuntimeException())
.when(queueDAO)
.push(anyString(), anyString(), anyInt(), anyLong());
assertFalse(workflowExecutor.scheduleTask(workflow, tasks));
}
@Test
@SuppressWarnings("unchecked")
public void testCompleteWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setWorkflowId("1");
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflow.setOwnerApp("junit_test");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
workflow.setOutput(Collections.EMPTY_MAP);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
AtomicInteger updateWorkflowCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateWorkflowCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateWorkflow(any());
AtomicInteger updateTasksCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateTasksCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateTasks(any());
AtomicInteger removeQueueEntryCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
removeQueueEntryCalledCounter.incrementAndGet();
return null;
})
.when(queueDAO)
.remove(anyString(), anyString());
workflowExecutor.completeWorkflow(workflow);
assertEquals(WorkflowModel.Status.COMPLETED, workflow.getStatus());
assertEquals(1, updateWorkflowCalledCounter.get());
assertEquals(0, updateTasksCalledCounter.get());
assertEquals(0, removeQueueEntryCalledCounter.get());
verify(workflowStatusListener, times(1))
.onWorkflowCompletedIfEnabled(any(WorkflowModel.class));
verify(workflowStatusListener, times(0))
.onWorkflowFinalizedIfEnabled(any(WorkflowModel.class));
def.setWorkflowStatusListenerEnabled(true);
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflowExecutor.completeWorkflow(workflow);
verify(workflowStatusListener, times(2))
.onWorkflowCompletedIfEnabled(any(WorkflowModel.class));
verify(workflowStatusListener, times(0))
.onWorkflowFinalizedIfEnabled(any(WorkflowModel.class));
}
@Test
@SuppressWarnings("unchecked")
public void testTerminateWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setWorkflowId("1");
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflow.setOwnerApp("junit_test");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
workflow.setOutput(Collections.EMPTY_MAP);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
AtomicInteger updateWorkflowCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateWorkflowCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateWorkflow(any());
AtomicInteger updateTasksCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateTasksCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateTasks(any());
AtomicInteger removeQueueEntryCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
removeQueueEntryCalledCounter.incrementAndGet();
return null;
})
.when(queueDAO)
.remove(anyString(), anyString());
workflowExecutor.terminateWorkflow("workflowId", "reason");
assertEquals(WorkflowModel.Status.TERMINATED, workflow.getStatus());
assertEquals(1, updateWorkflowCalledCounter.get());
assertEquals(1, removeQueueEntryCalledCounter.get());
verify(workflowStatusListener, times(1))
.onWorkflowTerminatedIfEnabled(any(WorkflowModel.class));
verify(workflowStatusListener, times(1))
.onWorkflowFinalizedIfEnabled(any(WorkflowModel.class));
def.setWorkflowStatusListenerEnabled(true);
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflowExecutor.completeWorkflow(workflow);
verify(workflowStatusListener, times(1))
.onWorkflowCompletedIfEnabled(any(WorkflowModel.class));
verify(workflowStatusListener, times(1))
.onWorkflowFinalizedIfEnabled(any(WorkflowModel.class));
}
@Test
public void testUploadOutputFailuresDuringTerminateWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setWorkflowStatusListenerEnabled(true);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setWorkflowId("1");
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflow.setOwnerApp("junit_test");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
workflow.setOutput(Collections.EMPTY_MAP);
List<TaskModel> tasks = new LinkedList<>();
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setReferenceTaskName("t1");
task.setWorkflowInstanceId(workflow.getWorkflowId());
task.setTaskDefName("task1");
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
workflow.setTasks(tasks);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
AtomicInteger updateWorkflowCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateWorkflowCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateWorkflow(any());
doThrow(new RuntimeException("any exception"))
.when(externalPayloadStorageUtils)
.verifyAndUpload(workflow, ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT);
workflowExecutor.terminateWorkflow(workflow.getWorkflowId(), "reason");
assertEquals(WorkflowModel.Status.TERMINATED, workflow.getStatus());
assertEquals(1, updateWorkflowCalledCounter.get());
verify(workflowStatusListener, times(1))
.onWorkflowTerminatedIfEnabled(any(WorkflowModel.class));
}
@Test
@SuppressWarnings("unchecked")
public void testQueueExceptionsIgnoredDuringTerminateWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setWorkflowStatusListenerEnabled(true);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setWorkflowId("1");
workflow.setStatus(WorkflowModel.Status.RUNNING);
workflow.setOwnerApp("junit_test");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
workflow.setOutput(Collections.EMPTY_MAP);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
AtomicInteger updateWorkflowCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateWorkflowCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateWorkflow(any());
AtomicInteger updateTasksCalledCounter = new AtomicInteger(0);
doAnswer(
invocation -> {
updateTasksCalledCounter.incrementAndGet();
return null;
})
.when(executionDAOFacade)
.updateTasks(any());
doThrow(new RuntimeException()).when(queueDAO).remove(anyString(), anyString());
workflowExecutor.terminateWorkflow("workflowId", "reason");
assertEquals(WorkflowModel.Status.TERMINATED, workflow.getStatus());
assertEquals(1, updateWorkflowCalledCounter.get());
verify(workflowStatusListener, times(1))
.onWorkflowTerminatedIfEnabled(any(WorkflowModel.class));
}
@Test
public void testRestartWorkflow() {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("test_task");
workflowTask.setTaskReferenceName("task_ref");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testDef");
workflowDef.setVersion(1);
workflowDef.setRestartable(true);
workflowDef.getTasks().add(workflowTask);
TaskModel task_1 = new TaskModel();
task_1.setTaskId(UUID.randomUUID().toString());
task_1.setSeq(1);
task_1.setStatus(TaskModel.Status.FAILED);
task_1.setTaskDefName(workflowTask.getName());
task_1.setReferenceTaskName(workflowTask.getTaskReferenceName());
TaskModel task_2 = new TaskModel();
task_2.setTaskId(UUID.randomUUID().toString());
task_2.setSeq(2);
task_2.setStatus(TaskModel.Status.FAILED);
task_2.setTaskDefName(workflowTask.getName());
task_2.setReferenceTaskName(workflowTask.getTaskReferenceName());
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setWorkflowId("test-workflow-id");
workflow.getTasks().addAll(Arrays.asList(task_1, task_2));
workflow.setStatus(WorkflowModel.Status.FAILED);
workflow.setEndTime(500);
workflow.setLastRetriedTime(100);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
doNothing().when(executionDAOFacade).removeTask(any());
when(metadataDAO.getWorkflowDef(workflow.getWorkflowName(), workflow.getWorkflowVersion()))
.thenReturn(Optional.of(workflowDef));
when(metadataDAO.getTaskDef(workflowTask.getName())).thenReturn(new TaskDef());
when(executionDAOFacade.updateWorkflow(any())).thenReturn("");
workflowExecutor.restart(workflow.getWorkflowId(), false);
assertEquals(WorkflowModel.Status.FAILED, workflow.getPreviousStatus());
assertEquals(WorkflowModel.Status.RUNNING, workflow.getStatus());
assertEquals(0, workflow.getEndTime());
assertEquals(0, workflow.getLastRetriedTime());
verify(metadataDAO, never()).getLatestWorkflowDef(any());
ArgumentCaptor<WorkflowModel> argumentCaptor = ArgumentCaptor.forClass(WorkflowModel.class);
verify(executionDAOFacade, times(1)).createWorkflow(argumentCaptor.capture());
assertEquals(
workflow.getWorkflowId(), argumentCaptor.getAllValues().get(0).getWorkflowId());
assertEquals(
workflow.getWorkflowDefinition(),
argumentCaptor.getAllValues().get(0).getWorkflowDefinition());
// add a new version of the workflow definition and restart with latest
workflow.setStatus(WorkflowModel.Status.COMPLETED);
workflow.setEndTime(500);
workflow.setLastRetriedTime(100);
workflowDef = new WorkflowDef();
workflowDef.setName("testDef");
workflowDef.setVersion(2);
workflowDef.setRestartable(true);
workflowDef.getTasks().addAll(Collections.singletonList(workflowTask));
when(metadataDAO.getLatestWorkflowDef(workflow.getWorkflowName()))
.thenReturn(Optional.of(workflowDef));
workflowExecutor.restart(workflow.getWorkflowId(), true);
assertEquals(WorkflowModel.Status.COMPLETED, workflow.getPreviousStatus());
assertEquals(WorkflowModel.Status.RUNNING, workflow.getStatus());
assertEquals(0, workflow.getEndTime());
assertEquals(0, workflow.getLastRetriedTime());
verify(metadataDAO, times(1)).getLatestWorkflowDef(anyString());
argumentCaptor = ArgumentCaptor.forClass(WorkflowModel.class);
verify(executionDAOFacade, times(2)).createWorkflow(argumentCaptor.capture());
assertEquals(
workflow.getWorkflowId(), argumentCaptor.getAllValues().get(1).getWorkflowId());
assertEquals(workflowDef, argumentCaptor.getAllValues().get(1).getWorkflowDefinition());
}
@Test(expected = NotFoundException.class)
public void testRetryNonTerminalWorkflow() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("testRetryNonTerminalWorkflow");
workflow.setStatus(WorkflowModel.Status.RUNNING);
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
workflowExecutor.retry(workflow.getWorkflowId(), false);
}
@Test(expected = ConflictException.class)
public void testRetryWorkflowNoTasks() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("ApplicationException");
workflow.setStatus(WorkflowModel.Status.FAILED);
workflow.setTasks(Collections.emptyList());
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
workflowExecutor.retry(workflow.getWorkflowId(), false);
}
@Test(expected = ConflictException.class)
public void testRetryWorkflowNoFailedTasks() {
// setup
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("testRetryWorkflowId");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testRetryWorkflowId");
workflowDef.setVersion(1);
workflow.setWorkflowDefinition(workflowDef);
workflow.setOwnerApp("junit_testRetryWorkflowId");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
//noinspection unchecked
workflow.setOutput(Collections.EMPTY_MAP);
workflow.setStatus(WorkflowModel.Status.FAILED);
// add 2 failed task in 2 forks and 1 cancelled in the 3rd fork
TaskModel task_1_1 = new TaskModel();
task_1_1.setTaskId(UUID.randomUUID().toString());
task_1_1.setSeq(1);
task_1_1.setRetryCount(0);
task_1_1.setTaskType(TaskType.SIMPLE.toString());
task_1_1.setStatus(TaskModel.Status.FAILED);
task_1_1.setTaskDefName("task1");
task_1_1.setReferenceTaskName("task1_ref1");
TaskModel task_1_2 = new TaskModel();
task_1_2.setTaskId(UUID.randomUUID().toString());
task_1_2.setSeq(2);
task_1_2.setRetryCount(1);
task_1_2.setTaskType(TaskType.SIMPLE.toString());
task_1_2.setStatus(TaskModel.Status.COMPLETED);
task_1_2.setTaskDefName("task1");
task_1_2.setReferenceTaskName("task1_ref1");
workflow.getTasks().addAll(Arrays.asList(task_1_1, task_1_2));
// end of setup
// when:
when(executionDAOFacade.getWorkflowModel(anyString(), anyBoolean())).thenReturn(workflow);
when(metadataDAO.getWorkflowDef(anyString(), anyInt()))
.thenReturn(Optional.of(new WorkflowDef()));
workflowExecutor.retry(workflow.getWorkflowId(), false);
}
@Test
public void testRetryWorkflow() {
// setup
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("testRetryWorkflowId");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testRetryWorkflowId");
workflowDef.setVersion(1);
workflow.setWorkflowDefinition(workflowDef);
workflow.setOwnerApp("junit_testRetryWorkflowId");
workflow.setCreateTime(10L);
workflow.setEndTime(100L);
//noinspection unchecked
workflow.setOutput(Collections.EMPTY_MAP);
workflow.setStatus(WorkflowModel.Status.FAILED);
AtomicInteger updateWorkflowCalledCounter = new AtomicInteger(0);
doAnswer(
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java | core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import java.io.InputStream;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.ClassPathResource;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.util.unit.DataSize;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome;
import com.netflix.conductor.core.execution.evaluators.Evaluator;
import com.netflix.conductor.core.execution.mapper.DecisionTaskMapper;
import com.netflix.conductor.core.execution.mapper.DynamicTaskMapper;
import com.netflix.conductor.core.execution.mapper.EventTaskMapper;
import com.netflix.conductor.core.execution.mapper.ForkJoinDynamicTaskMapper;
import com.netflix.conductor.core.execution.mapper.ForkJoinTaskMapper;
import com.netflix.conductor.core.execution.mapper.HTTPTaskMapper;
import com.netflix.conductor.core.execution.mapper.JoinTaskMapper;
import com.netflix.conductor.core.execution.mapper.SimpleTaskMapper;
import com.netflix.conductor.core.execution.mapper.SubWorkflowTaskMapper;
import com.netflix.conductor.core.execution.mapper.SwitchTaskMapper;
import com.netflix.conductor.core.execution.mapper.TaskMapper;
import com.netflix.conductor.core.execution.mapper.UserDefinedTaskMapper;
import com.netflix.conductor.core.execution.mapper.WaitTaskMapper;
import com.netflix.conductor.core.execution.tasks.Decision;
import com.netflix.conductor.core.execution.tasks.Join;
import com.netflix.conductor.core.execution.tasks.Switch;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.DECISION;
import static com.netflix.conductor.common.metadata.tasks.TaskType.DYNAMIC;
import static com.netflix.conductor.common.metadata.tasks.TaskType.EVENT;
import static com.netflix.conductor.common.metadata.tasks.TaskType.FORK_JOIN;
import static com.netflix.conductor.common.metadata.tasks.TaskType.FORK_JOIN_DYNAMIC;
import static com.netflix.conductor.common.metadata.tasks.TaskType.HTTP;
import static com.netflix.conductor.common.metadata.tasks.TaskType.JOIN;
import static com.netflix.conductor.common.metadata.tasks.TaskType.SIMPLE;
import static com.netflix.conductor.common.metadata.tasks.TaskType.SUB_WORKFLOW;
import static com.netflix.conductor.common.metadata.tasks.TaskType.SWITCH;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_DECISION;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_FORK;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_JOIN;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SWITCH;
import static com.netflix.conductor.common.metadata.tasks.TaskType.USER_DEFINED;
import static com.netflix.conductor.common.metadata.tasks.TaskType.WAIT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
TestDeciderOutcomes.TestConfiguration.class
})
@RunWith(SpringRunner.class)
public class TestDeciderOutcomes {
private DeciderService deciderService;
@Autowired private Map<String, Evaluator> evaluators;
@Autowired private ObjectMapper objectMapper;
@Autowired private SystemTaskRegistry systemTaskRegistry;
@Configuration
@ComponentScan(basePackageClasses = {Evaluator.class}) // load all Evaluator beans.
public static class TestConfiguration {
@Bean(TASK_TYPE_DECISION)
public Decision decision() {
return new Decision();
}
@Bean(TASK_TYPE_SWITCH)
public Switch switchTask() {
return new Switch();
}
@Bean(TASK_TYPE_JOIN)
public Join join() {
return new Join(new ConductorProperties());
}
@Bean
public SystemTaskRegistry systemTaskRegistry(Set<WorkflowSystemTask> tasks) {
return new SystemTaskRegistry(tasks);
}
}
@Before
public void init() {
MetadataDAO metadataDAO = mock(MetadataDAO.class);
systemTaskRegistry = mock(SystemTaskRegistry.class);
ExternalPayloadStorageUtils externalPayloadStorageUtils =
mock(ExternalPayloadStorageUtils.class);
ConductorProperties properties = mock(ConductorProperties.class);
when(properties.getTaskInputPayloadSizeThreshold()).thenReturn(DataSize.ofKilobytes(10L));
when(properties.getMaxTaskInputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10240L));
TaskDef taskDef = new TaskDef();
taskDef.setRetryCount(1);
taskDef.setName("mockTaskDef");
taskDef.setResponseTimeoutSeconds(60 * 60);
when(metadataDAO.getTaskDef(anyString())).thenReturn(taskDef);
ParametersUtils parametersUtils = new ParametersUtils(objectMapper);
Map<String, TaskMapper> taskMappers = new HashMap<>();
taskMappers.put(DECISION.name(), new DecisionTaskMapper());
taskMappers.put(SWITCH.name(), new SwitchTaskMapper(evaluators));
taskMappers.put(DYNAMIC.name(), new DynamicTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(FORK_JOIN.name(), new ForkJoinTaskMapper());
taskMappers.put(JOIN.name(), new JoinTaskMapper());
taskMappers.put(
FORK_JOIN_DYNAMIC.name(),
new ForkJoinDynamicTaskMapper(
new IDGenerator(),
parametersUtils,
objectMapper,
metadataDAO,
systemTaskRegistry));
taskMappers.put(
USER_DEFINED.name(), new UserDefinedTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(SIMPLE.name(), new SimpleTaskMapper(parametersUtils));
taskMappers.put(
SUB_WORKFLOW.name(), new SubWorkflowTaskMapper(parametersUtils, metadataDAO));
taskMappers.put(EVENT.name(), new EventTaskMapper(parametersUtils));
taskMappers.put(WAIT.name(), new WaitTaskMapper(parametersUtils));
taskMappers.put(HTTP.name(), new HTTPTaskMapper(parametersUtils, metadataDAO));
this.deciderService =
new DeciderService(
new IDGenerator(),
parametersUtils,
metadataDAO,
externalPayloadStorageUtils,
systemTaskRegistry,
taskMappers,
Duration.ofMinutes(60));
}
@Test
public void testWorkflowWithNoTasks() throws Exception {
InputStream stream = new ClassPathResource("./conditional_flow.json").getInputStream();
WorkflowDef def = objectMapper.readValue(stream, WorkflowDef.class);
assertNotNull(def);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(0L);
workflow.getInput().put("param1", "nested");
workflow.getInput().put("param2", "one");
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertFalse(outcome.isComplete);
assertTrue(outcome.tasksToBeUpdated.isEmpty());
assertEquals(3, outcome.tasksToBeScheduled.size());
outcome.tasksToBeScheduled.forEach(t -> t.setStatus(TaskModel.Status.COMPLETED));
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
outcome = deciderService.decide(workflow);
assertFalse(outcome.isComplete);
assertEquals(outcome.tasksToBeUpdated.toString(), 3, outcome.tasksToBeUpdated.size());
assertEquals(2, outcome.tasksToBeScheduled.size());
assertEquals("DECISION", outcome.tasksToBeScheduled.get(0).getTaskDefName());
}
@Test
public void testWorkflowWithNoTasksWithSwitch() throws Exception {
InputStream stream =
new ClassPathResource("./conditional_flow_with_switch.json").getInputStream();
WorkflowDef def = objectMapper.readValue(stream, WorkflowDef.class);
assertNotNull(def);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(0L);
workflow.getInput().put("param1", "nested");
workflow.getInput().put("param2", "one");
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertFalse(outcome.isComplete);
assertTrue(outcome.tasksToBeUpdated.isEmpty());
assertEquals(3, outcome.tasksToBeScheduled.size());
outcome.tasksToBeScheduled.forEach(t -> t.setStatus(TaskModel.Status.COMPLETED));
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
outcome = deciderService.decide(workflow);
assertFalse(outcome.isComplete);
assertEquals(outcome.tasksToBeUpdated.toString(), 3, outcome.tasksToBeUpdated.size());
assertEquals(2, outcome.tasksToBeScheduled.size());
assertEquals("SWITCH", outcome.tasksToBeScheduled.get(0).getTaskDefName());
}
@Test
public void testRetries() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("test_task");
workflowTask.setType("USER_TASK");
workflowTask.setTaskReferenceName("t0");
workflowTask.getInputParameters().put("taskId", "${CPEWF_TASK_ID}");
workflowTask.getInputParameters().put("requestId", "${workflow.input.requestId}");
workflowTask.setTaskDefinition(new TaskDef("test_task"));
def.getTasks().add(workflowTask);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", 123);
workflow.setCreateTime(System.currentTimeMillis());
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(
workflowTask.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId();
assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId"));
assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId"));
outcome.tasksToBeScheduled.get(0).setStatus(TaskModel.Status.FAILED);
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeUpdated.size());
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId());
assertNotSame(task1Id, outcome.tasksToBeScheduled.get(0).getTaskId());
assertEquals(
outcome.tasksToBeScheduled.get(0).getTaskId(),
outcome.tasksToBeScheduled.get(0).getInputData().get("taskId"));
assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getRetriedTaskId());
assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId"));
WorkflowTask fork = new WorkflowTask();
fork.setName("fork0");
fork.setWorkflowTaskType(TaskType.FORK_JOIN_DYNAMIC);
fork.setTaskReferenceName("fork0");
fork.setDynamicForkTasksInputParamName("forkedInputs");
fork.setDynamicForkTasksParam("forks");
fork.getInputParameters().put("forks", "${workflow.input.forks}");
fork.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}");
WorkflowTask join = new WorkflowTask();
join.setName("join0");
join.setType("JOIN");
join.setTaskReferenceName("join0");
def.getTasks().clear();
def.getTasks().add(fork);
def.getTasks().add(join);
List<WorkflowTask> forks = new LinkedList<>();
Map<String, Map<String, Object>> forkedInputs = new HashMap<>();
for (int i = 0; i < 1; i++) {
WorkflowTask wft = new WorkflowTask();
wft.setName("f" + i);
wft.setTaskReferenceName("f" + i);
wft.setWorkflowTaskType(TaskType.SIMPLE);
wft.getInputParameters().put("requestId", "${workflow.input.requestId}");
wft.getInputParameters().put("taskId", "${CPEWF_TASK_ID}");
wft.setTaskDefinition(new TaskDef("f" + i));
forks.add(wft);
Map<String, Object> input = new HashMap<>();
input.put("k", "v");
input.put("k1", 1);
forkedInputs.put(wft.getTaskReferenceName(), input);
}
workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", 123);
workflow.setCreateTime(System.currentTimeMillis());
workflow.getInput().put("forks", forks);
workflow.getInput().put("forkedInputs", forkedInputs);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(3, outcome.tasksToBeScheduled.size());
assertEquals(0, outcome.tasksToBeUpdated.size());
assertEquals("v", outcome.tasksToBeScheduled.get(1).getInputData().get("k"));
assertEquals(1, outcome.tasksToBeScheduled.get(1).getInputData().get("k1"));
assertEquals(
outcome.tasksToBeScheduled.get(1).getTaskId(),
outcome.tasksToBeScheduled.get(1).getInputData().get("taskId"));
task1Id = outcome.tasksToBeScheduled.get(1).getTaskId();
outcome.tasksToBeScheduled.get(1).setStatus(TaskModel.Status.FAILED);
for (TaskModel taskToBeScheduled : outcome.tasksToBeScheduled) {
taskToBeScheduled.setUpdateTime(System.currentTimeMillis());
}
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
outcome = deciderService.decide(workflow);
assertTrue(
outcome.tasksToBeScheduled.stream()
.anyMatch(task1 -> task1.getReferenceTaskName().equals("f0")));
Optional<TaskModel> optionalTask =
outcome.tasksToBeScheduled.stream()
.filter(t -> t.getReferenceTaskName().equals("f0"))
.findFirst();
assertTrue(optionalTask.isPresent());
TaskModel task = optionalTask.get();
assertEquals("v", task.getInputData().get("k"));
assertEquals(1, task.getInputData().get("k1"));
assertEquals(task.getTaskId(), task.getInputData().get("taskId"));
assertNotSame(task1Id, task.getTaskId());
assertEquals(task1Id, task.getRetriedTaskId());
}
@Test
public void testOptional() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowTask task1 = new WorkflowTask();
task1.setName("task0");
task1.setType("SIMPLE");
task1.setTaskReferenceName("t0");
task1.getInputParameters().put("taskId", "${CPEWF_TASK_ID}");
task1.setOptional(true);
task1.setTaskDefinition(new TaskDef("task0"));
WorkflowTask task2 = new WorkflowTask();
task2.setName("task1");
task2.setType("SIMPLE");
task2.setTaskReferenceName("t1");
task2.setTaskDefinition(new TaskDef("task1"));
def.getTasks().add(task1);
def.getTasks().add(task2);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(System.currentTimeMillis());
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(
task1.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
for (int i = 0; i < 3; i++) {
String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId();
assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId"));
workflow.getTasks().clear();
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
workflow.getTasks().get(0).setStatus(TaskModel.Status.FAILED);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeUpdated.size());
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(TaskModel.Status.FAILED, workflow.getTasks().get(0).getStatus());
assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId());
assertEquals(
task1.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertEquals(i + 1, outcome.tasksToBeScheduled.get(0).getRetryCount());
}
String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId();
workflow.getTasks().clear();
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
workflow.getTasks().get(0).setStatus(TaskModel.Status.FAILED);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeUpdated.size());
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(
TaskModel.Status.COMPLETED_WITH_ERRORS, workflow.getTasks().get(0).getStatus());
assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId());
assertEquals(
task2.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
}
@Test
public void testPermissive() {
WorkflowDef def = new WorkflowDef();
def.setName("test-permissive");
WorkflowTask task1 = new WorkflowTask();
task1.setName("task0");
task1.setPermissive(true);
task1.setTaskReferenceName("t0");
task1.getInputParameters().put("taskId", "${CPEWF_TASK_ID}");
task1.setTaskDefinition(new TaskDef("task0"));
WorkflowTask task2 = new WorkflowTask();
task2.setName("task1");
task2.setPermissive(true);
task2.setTaskReferenceName("t1");
task2.setTaskDefinition(new TaskDef("task1"));
def.getTasks().add(task1);
def.getTasks().add(task2);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(System.currentTimeMillis());
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(
task1.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
for (int i = 0; i < 3; i++) {
String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId();
assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId"));
workflow.getTasks().clear();
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
workflow.getTasks().get(0).setStatus(TaskModel.Status.FAILED);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeUpdated.size());
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(TaskModel.Status.FAILED, workflow.getTasks().get(0).getStatus());
assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId());
assertEquals(
task1.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertEquals(i + 1, outcome.tasksToBeScheduled.get(0).getRetryCount());
}
String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId();
workflow.getTasks().clear();
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
workflow.getTasks().get(0).setStatus(TaskModel.Status.FAILED);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(1, outcome.tasksToBeUpdated.size());
assertEquals(1, outcome.tasksToBeScheduled.size());
assertEquals(TaskModel.Status.FAILED, workflow.getTasks().get(0).getStatus());
assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId());
assertEquals(
task2.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
}
@Test
public void testOptionalWithDynamicFork() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowTask task1 = new WorkflowTask();
task1.setName("fork0");
task1.setWorkflowTaskType(TaskType.FORK_JOIN_DYNAMIC);
task1.setTaskReferenceName("fork0");
task1.setDynamicForkTasksInputParamName("forkedInputs");
task1.setDynamicForkTasksParam("forks");
task1.getInputParameters().put("forks", "${workflow.input.forks}");
task1.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}");
WorkflowTask task2 = new WorkflowTask();
task2.setName("join0");
task2.setType("JOIN");
task2.setTaskReferenceName("join0");
def.getTasks().add(task1);
def.getTasks().add(task2);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
List<WorkflowTask> forks = new LinkedList<>();
Map<String, Map<String, Object>> forkedInputs = new HashMap<>();
for (int i = 0; i < 3; i++) {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("f" + i);
workflowTask.getInputParameters().put("joinOn", new ArrayList<>());
workflowTask.setTaskReferenceName("f" + i);
workflowTask.setWorkflowTaskType(TaskType.SIMPLE);
workflowTask.setOptional(true);
workflowTask.setTaskDefinition(new TaskDef("f" + i));
forks.add(workflowTask);
forkedInputs.put(workflowTask.getTaskReferenceName(), new HashMap<>());
}
workflow.getInput().put("forks", forks);
workflow.getInput().put("forkedInputs", forkedInputs);
workflow.setCreateTime(System.currentTimeMillis());
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(5, outcome.tasksToBeScheduled.size());
assertEquals(0, outcome.tasksToBeUpdated.size());
assertEquals(TASK_TYPE_FORK, outcome.tasksToBeScheduled.get(0).getTaskType());
assertEquals(TaskModel.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus());
for (int retryCount = 0; retryCount < 3; retryCount++) {
for (TaskModel taskToBeScheduled : outcome.tasksToBeScheduled) {
if (taskToBeScheduled.getTaskDefName().equals("join0")) {
assertEquals(TaskModel.Status.IN_PROGRESS, taskToBeScheduled.getStatus());
} else if (taskToBeScheduled.getTaskType().matches("(f0|f1|f2)")) {
assertEquals(TaskModel.Status.SCHEDULED, taskToBeScheduled.getStatus());
taskToBeScheduled.setStatus(TaskModel.Status.FAILED);
}
taskToBeScheduled.setUpdateTime(System.currentTimeMillis());
}
workflow.getTasks().addAll(outcome.tasksToBeScheduled);
outcome = deciderService.decide(workflow);
assertNotNull(outcome);
}
assertEquals("f0", outcome.tasksToBeScheduled.get(0).getTaskType());
for (int i = 0; i < 3; i++) {
assertEquals(TaskModel.Status.FAILED, outcome.tasksToBeUpdated.get(i).getStatus());
assertEquals("f" + (i), outcome.tasksToBeUpdated.get(i).getTaskDefName());
}
assertEquals(TaskModel.Status.SCHEDULED, outcome.tasksToBeScheduled.get(0).getStatus());
System.out.println(outcome.tasksToBeScheduled.get(0));
new Join(new ConductorProperties())
.execute(workflow, outcome.tasksToBeScheduled.get(0), null);
assertEquals(TaskModel.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus());
}
@Test
public void testDecisionCases() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
WorkflowTask even = new WorkflowTask();
even.setName("even");
even.setType("SIMPLE");
even.setTaskReferenceName("even");
even.setTaskDefinition(new TaskDef("even"));
WorkflowTask odd = new WorkflowTask();
odd.setName("odd");
odd.setType("SIMPLE");
odd.setTaskReferenceName("odd");
odd.setTaskDefinition(new TaskDef("odd"));
WorkflowTask defaultt = new WorkflowTask();
defaultt.setName("defaultt");
defaultt.setType("SIMPLE");
defaultt.setTaskReferenceName("defaultt");
defaultt.setTaskDefinition(new TaskDef("defaultt"));
WorkflowTask decide = new WorkflowTask();
decide.setName("decide");
decide.setWorkflowTaskType(TaskType.DECISION);
decide.setTaskReferenceName("d0");
decide.getInputParameters().put("Id", "${workflow.input.Id}");
decide.getInputParameters().put("location", "${workflow.input.location}");
decide.setCaseExpression(
"if ($.Id == null) 'bad input'; else if ( ($.Id != null && $.Id % 2 == 0) || $.location == 'usa') 'even'; else 'odd'; ");
decide.getDecisionCases().put("even", Collections.singletonList(even));
decide.getDecisionCases().put("odd", Collections.singletonList(odd));
decide.setDefaultCase(Collections.singletonList(defaultt));
def.getTasks().add(decide);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(System.currentTimeMillis());
DeciderOutcome outcome = deciderService.decide(workflow);
assertNotNull(outcome);
assertEquals(2, outcome.tasksToBeScheduled.size());
assertEquals(
decide.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertEquals(
defaultt.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); // default
assertEquals(
Collections.singletonList("bad input"),
outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput"));
workflow.getInput().put("Id", 9);
workflow.getInput().put("location", "usa");
outcome = deciderService.decide(workflow);
assertEquals(2, outcome.tasksToBeScheduled.size());
assertEquals(
decide.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertEquals(
even.getTaskReferenceName(),
outcome.tasksToBeScheduled
.get(1)
.getReferenceTaskName()); // even because of location == usa
assertEquals(
Collections.singletonList("even"),
outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput"));
workflow.getInput().put("Id", 9);
workflow.getInput().put("location", "canada");
outcome = deciderService.decide(workflow);
assertEquals(2, outcome.tasksToBeScheduled.size());
assertEquals(
decide.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertEquals(
odd.getTaskReferenceName(),
outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); // odd
assertEquals(
Collections.singletonList("odd"),
outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.HashMap;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.execution.StartWorkflowInput;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class TestSubWorkflow {
private WorkflowExecutor workflowExecutor;
private SubWorkflow subWorkflow;
@Autowired private ObjectMapper objectMapper;
@Before
public void setup() {
workflowExecutor = mock(WorkflowExecutor.class);
subWorkflow = new SubWorkflow(objectMapper);
}
@Test
public void testStartSubWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 3);
task.setInputData(inputData);
String workflowId = "workflow_1";
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId(workflowId);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(3);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn(workflowId);
when(workflowExecutor.getWorkflow(anyString(), eq(false))).thenReturn(workflow);
workflow.setStatus(WorkflowModel.Status.RUNNING);
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
workflow.setStatus(WorkflowModel.Status.TERMINATED);
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
assertEquals(TaskModel.Status.CANCELED, task.getStatus());
workflow.setStatus(WorkflowModel.Status.COMPLETED);
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
}
@Test
public void testStartSubWorkflowQueueFailure() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
task.setStatus(TaskModel.Status.SCHEDULED);
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 3);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(3);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput))
.thenThrow(new TransientException("QueueDAO failure"));
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertNull("subWorkflowId should be null", task.getSubWorkflowId());
assertEquals(TaskModel.Status.SCHEDULED, task.getStatus());
assertTrue("Output data should be empty", task.getOutputData().isEmpty());
}
@Test
public void testStartSubWorkflowStartError() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
task.setStatus(TaskModel.Status.SCHEDULED);
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 3);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(3);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
String failureReason = "non transient failure";
when(workflowExecutor.startWorkflow(startWorkflowInput))
.thenThrow(new NonTransientException(failureReason));
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertNull("subWorkflowId should be null", task.getSubWorkflowId());
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertEquals(failureReason, task.getReasonForIncompletion());
assertTrue("Output data should be empty", task.getOutputData().isEmpty());
}
@Test
public void testStartSubWorkflowWithEmptyWorkflowInput() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 3);
Map<String, Object> workflowInput = new HashMap<>();
inputData.put("workflowInput", workflowInput);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(3);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
}
@Test
public void testStartSubWorkflowWithWorkflowInput() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 3);
Map<String, Object> workflowInput = new HashMap<>();
workflowInput.put("test", "value");
inputData.put("workflowInput", workflowInput);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(3);
startWorkflowInput.setWorkflowInput(workflowInput);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
}
@Test
public void testStartSubWorkflowTaskToDomain() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
Map<String, String> taskToDomain =
new HashMap<>() {
{
put("*", "unittest");
}
};
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
inputData.put("subWorkflowTaskToDomain", taskToDomain);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(taskToDomain);
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
}
@Test
public void testExecuteSubWorkflowWithoutId() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
assertFalse(subWorkflow.execute(workflowInstance, task, workflowExecutor));
}
@Test
public void testExecuteWorkflowStatus() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
WorkflowModel subWorkflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
Map<String, String> taskToDomain =
new HashMap<>() {
{
put("*", "unittest");
}
};
TaskModel task = new TaskModel();
Map<String, Object> outputData = new HashMap<>();
task.setOutputData(outputData);
task.setSubWorkflowId("sub-workflow-id");
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
inputData.put("subWorkflowTaskToDomain", taskToDomain);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(taskToDomain);
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
when(workflowExecutor.getWorkflow(eq("sub-workflow-id"), eq(false)))
.thenReturn(subWorkflowInstance);
subWorkflowInstance.setStatus(WorkflowModel.Status.RUNNING);
assertFalse(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertNull(task.getStatus());
assertNull(task.getReasonForIncompletion());
subWorkflowInstance.setStatus(WorkflowModel.Status.PAUSED);
assertFalse(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertNull(task.getStatus());
assertNull(task.getReasonForIncompletion());
subWorkflowInstance.setStatus(WorkflowModel.Status.COMPLETED);
assertTrue(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertNull(task.getReasonForIncompletion());
subWorkflowInstance.setStatus(WorkflowModel.Status.FAILED);
subWorkflowInstance.setReasonForIncompletion("unit1");
assertTrue(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertTrue(task.getReasonForIncompletion().contains("unit1"));
subWorkflowInstance.setStatus(WorkflowModel.Status.TIMED_OUT);
subWorkflowInstance.setReasonForIncompletion("unit2");
assertTrue(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertTrue(task.getReasonForIncompletion().contains("unit2"));
subWorkflowInstance.setStatus(WorkflowModel.Status.TERMINATED);
subWorkflowInstance.setReasonForIncompletion("unit3");
assertTrue(subWorkflow.execute(workflowInstance, task, workflowExecutor));
assertEquals(TaskModel.Status.CANCELED, task.getStatus());
assertTrue(task.getReasonForIncompletion().contains("unit3"));
}
@Test
public void testCancelWithWorkflowId() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
WorkflowModel subWorkflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setSubWorkflowId("sub-workflow-id");
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
when(workflowExecutor.getWorkflow(eq("sub-workflow-id"), eq(true)))
.thenReturn(subWorkflowInstance);
workflowInstance.setStatus(WorkflowModel.Status.TIMED_OUT);
subWorkflow.cancel(workflowInstance, task, workflowExecutor);
assertEquals(WorkflowModel.Status.TERMINATED, subWorkflowInstance.getStatus());
}
@Test
public void testCancelWithoutWorkflowId() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
WorkflowModel subWorkflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
Map<String, Object> outputData = new HashMap<>();
task.setOutputData(outputData);
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("UnitWorkFlow");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
when(workflowExecutor.getWorkflow(eq("sub-workflow-id"), eq(false)))
.thenReturn(subWorkflowInstance);
subWorkflow.cancel(workflowInstance, task, workflowExecutor);
assertEquals(WorkflowModel.Status.RUNNING, subWorkflowInstance.getStatus());
}
@Test
public void testIsAsync() {
assertFalse(subWorkflow.isAsync());
}
@Test
public void testStartSubWorkflowWithSubWorkflowDefinition() {
WorkflowDef workflowDef = new WorkflowDef();
WorkflowModel workflowInstance = new WorkflowModel();
workflowInstance.setWorkflowDefinition(workflowDef);
WorkflowDef subWorkflowDef = new WorkflowDef();
subWorkflowDef.setName("subWorkflow_1");
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
Map<String, Object> inputData = new HashMap<>();
inputData.put("subWorkflowName", "UnitWorkFlow");
inputData.put("subWorkflowVersion", 2);
inputData.put("subWorkflowDefinition", subWorkflowDef);
task.setInputData(inputData);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName("subWorkflow_1");
startWorkflowInput.setVersion(2);
startWorkflowInput.setWorkflowInput(inputData);
startWorkflowInput.setWorkflowDefinition(subWorkflowDef);
startWorkflowInput.setTaskToDomain(workflowInstance.getTaskToDomain());
when(workflowExecutor.startWorkflow(startWorkflowInput)).thenReturn("workflow_1");
subWorkflow.start(workflowInstance, task, workflowExecutor);
assertEquals("workflow_1", task.getSubWorkflowId());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/DoWhileTest.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/DoWhileTest.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.*;
public class DoWhileTest {
@Mock private ExecutionDAOFacade executionDAOFacade;
private ParametersUtils parametersUtils;
private DoWhile doWhile;
@Before
public void setup() {
MockitoAnnotations.openMocks(this);
parametersUtils = new ParametersUtils(new ObjectMapper());
doWhile = new DoWhile(parametersUtils, executionDAOFacade);
}
@Test
public void testRemoveIterations_WithKeepLastN_RemovesOldIterations() {
// Create workflow with 10 iterations, keep last 3
WorkflowModel workflow = createWorkflowWithIterations(10, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(10);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 3);
// Should remove 7 iterations * 3 tasks = 21 tasks
verify(executionDAOFacade, times(21)).removeTask(anyString());
}
@Test
public void testRemoveIterations_BelowThreshold_RemovesNothing() {
// Create workflow with 3 iterations, keep last 5
WorkflowModel workflow = createWorkflowWithIterations(3, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(3);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 5);
// Should not remove anything (iteration 3 <= keepLastN 5)
verify(executionDAOFacade, never()).removeTask(anyString());
}
@Test
public void testRemoveIterations_ExactBoundary_RemovesNothing() {
// Create workflow with 5 iterations, keep last 5
WorkflowModel workflow = createWorkflowWithIterations(5, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(5);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 5);
// Should not remove anything (iteration 5 == keepLastN 5)
verify(executionDAOFacade, never()).removeTask(anyString());
}
@Test
public void testRemoveIterations_FirstIteration_RemovesNothing() {
// Create workflow with 1 iteration
WorkflowModel workflow = createWorkflowWithIterations(1, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(1);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 3);
// Should not remove anything (no old iterations yet)
verify(executionDAOFacade, never()).removeTask(anyString());
}
@Test
public void testRemoveIterations_KeepLastOne_RemovesAllButLast() {
// Create workflow with 10 iterations, keep last 1
WorkflowModel workflow = createWorkflowWithIterations(10, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(10);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 1);
// Should remove 9 iterations * 3 tasks = 27 tasks
verify(executionDAOFacade, times(27)).removeTask(anyString());
}
@Test
public void testRemoveIterations_DoesNotRemoveDoWhileTaskItself() {
// Create workflow with 5 iterations
WorkflowModel workflow = createWorkflowWithIterations(5, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(5);
ArgumentCaptor<String> taskIdCaptor = ArgumentCaptor.forClass(String.class);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 2);
// Capture all removed task IDs
verify(executionDAOFacade, atLeastOnce()).removeTask(taskIdCaptor.capture());
List<String> removedTaskIds = taskIdCaptor.getAllValues();
// Verify DO_WHILE task itself was not removed
assertFalse(
"DO_WHILE task should not be removed",
removedTaskIds.contains(doWhileTask.getTaskId()));
}
@Test
public void testRemoveIterations_OnlyRemovesTasksFromOldIterations() {
// Create workflow with 5 iterations, keep last 2
WorkflowModel workflow = createWorkflowWithIterations(5, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(5);
ArgumentCaptor<String> taskIdCaptor = ArgumentCaptor.forClass(String.class);
// Execute removal
doWhile.removeIterations(workflow, doWhileTask, 2);
// Capture all removed task IDs
verify(executionDAOFacade, times(9)).removeTask(taskIdCaptor.capture()); // 3 iterations * 3
// tasks
List<String> removedTaskIds = taskIdCaptor.getAllValues();
// Get tasks that should remain (iterations 4, 5)
List<TaskModel> remainingTasks =
workflow.getTasks().stream()
.filter(t -> t.getIteration() >= 4)
.collect(Collectors.toList());
// Verify no remaining tasks were removed
for (TaskModel task : remainingTasks) {
assertFalse(
"Task from iteration "
+ task.getIteration()
+ " should not be removed: "
+ task.getReferenceTaskName(),
removedTaskIds.contains(task.getTaskId()));
}
// Verify old tasks were removed (iterations 1, 2, 3)
List<TaskModel> oldTasks =
workflow.getTasks().stream()
.filter(t -> t.getIteration() <= 3)
.filter(
t ->
!t.getReferenceTaskName()
.equals(doWhileTask.getReferenceTaskName()))
.collect(Collectors.toList());
assertEquals("Should have 9 old tasks", 9, oldTasks.size());
for (TaskModel task : oldTasks) {
assertTrue(
"Task from iteration "
+ task.getIteration()
+ " should be removed: "
+ task.getReferenceTaskName(),
removedTaskIds.contains(task.getTaskId()));
}
}
@Test
public void testRemoveIterations_ContinuesOnDaoFailure() {
// Create workflow with 3 iterations
WorkflowModel workflow = createWorkflowWithIterations(3, 3);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(3);
// Get first task to simulate failure
TaskModel firstTask =
workflow.getTasks().stream()
.filter(t -> t.getIteration() == 1)
.filter(
t ->
!t.getReferenceTaskName()
.equals(doWhileTask.getReferenceTaskName()))
.findFirst()
.orElseThrow();
// Simulate failure on first task removal
doThrow(new RuntimeException("Database error"))
.when(executionDAOFacade)
.removeTask(firstTask.getTaskId());
// Execute removal - should not throw exception
doWhile.removeIterations(workflow, doWhileTask, 2);
// Should still attempt to remove all old iteration tasks (3 tasks from iteration 1)
verify(executionDAOFacade, times(3)).removeTask(anyString());
}
@Test
public void testRemoveIterations_VerifiesCorrectTaskIdsRemoved() {
// Create workflow with specific task IDs
WorkflowModel workflow = createWorkflowWithIterations(4, 2);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(4);
ArgumentCaptor<String> taskIdCaptor = ArgumentCaptor.forClass(String.class);
// Execute removal (should remove iterations 1, 2)
doWhile.removeIterations(workflow, doWhileTask, 2);
verify(executionDAOFacade, times(4)).removeTask(taskIdCaptor.capture()); // 2 iterations * 2
// tasks
List<String> removedTaskIds = taskIdCaptor.getAllValues();
// Get expected task IDs (from iterations 1 and 2)
Set<String> expectedRemovedIds =
workflow.getTasks().stream()
.filter(t -> t.getIteration() <= 2)
.filter(
t ->
!t.getReferenceTaskName()
.equals(doWhileTask.getReferenceTaskName()))
.map(TaskModel::getTaskId)
.collect(Collectors.toSet());
assertEquals("Should remove correct number of tasks", 4, expectedRemovedIds.size());
assertEquals(
"Should remove exact expected tasks",
expectedRemovedIds,
Set.copyOf(removedTaskIds));
}
@Test
public void testRemoveIterations_HandlesEmptyWorkflow() {
// Create empty workflow
WorkflowModel workflow = new WorkflowModel();
workflow.setTasks(new ArrayList<>());
TaskModel doWhileTask = createDoWhileTask();
doWhileTask.setIteration(5);
workflow.getTasks().add(doWhileTask);
// Execute removal - should handle gracefully
doWhile.removeIterations(workflow, doWhileTask, 3);
// Should not attempt to remove anything
verify(executionDAOFacade, never()).removeTask(anyString());
}
@Test
public void testRemoveIterations_WithMultipleTasksPerIteration() {
// Create workflow with 5 tasks per iteration
WorkflowModel workflow = createWorkflowWithIterations(5, 5);
TaskModel doWhileTask = getDoWhileTask(workflow);
doWhileTask.setIteration(5);
// Execute removal (keep last 2 iterations)
doWhile.removeIterations(workflow, doWhileTask, 2);
// Should remove 3 iterations * 5 tasks = 15 tasks
verify(executionDAOFacade, times(15)).removeTask(anyString());
}
// Helper methods
private WorkflowModel createWorkflowWithIterations(int iterations, int tasksPerIteration) {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("test-workflow-" + System.currentTimeMillis());
List<TaskModel> allTasks = new ArrayList<>();
// Create DO_WHILE task
TaskModel doWhileTask = createDoWhileTask();
allTasks.add(doWhileTask);
// Create tasks for each iteration
for (int iteration = 1; iteration <= iterations; iteration++) {
for (int taskNum = 1; taskNum <= tasksPerIteration; taskNum++) {
TaskModel task = new TaskModel();
task.setTaskId("task-" + iteration + "-" + taskNum);
task.setReferenceTaskName("loopTask" + taskNum + "__" + iteration);
task.setIteration(iteration);
task.setTaskType("SIMPLE");
task.setStatus(TaskModel.Status.COMPLETED);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("loopTask" + taskNum);
task.setWorkflowTask(workflowTask);
allTasks.add(task);
}
}
workflow.setTasks(allTasks);
return workflow;
}
private TaskModel createDoWhileTask() {
TaskModel doWhileTask = new TaskModel();
doWhileTask.setTaskId("do-while-task");
doWhileTask.setReferenceTaskName("doWhileTask");
doWhileTask.setTaskType("DO_WHILE");
doWhileTask.setStatus(TaskModel.Status.IN_PROGRESS);
// Create workflow task with loopOver definition
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("doWhileTask");
workflowTask.setType("DO_WHILE");
// Add loop over tasks
List<WorkflowTask> loopOverTasks = new ArrayList<>();
for (int i = 1; i <= 5; i++) {
WorkflowTask loopTask = new WorkflowTask();
loopTask.setTaskReferenceName("loopTask" + i);
loopOverTasks.add(loopTask);
}
workflowTask.setLoopOver(loopOverTasks);
// Set input parameters with keepLastN
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("keepLastN", 3);
workflowTask.setInputParameters(inputParams);
doWhileTask.setWorkflowTask(workflowTask);
return doWhileTask;
}
private TaskModel getDoWhileTask(WorkflowModel workflow) {
return workflow.getTasks().stream()
.filter(t -> "DO_WHILE".equals(t.getTaskType()))
.findFirst()
.orElseThrow(() -> new IllegalStateException("No DO_WHILE task found"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestJoin.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/TestJoin.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.Test;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
public class TestJoin {
private final ConductorProperties properties = new ConductorProperties();
private final WorkflowExecutor executor = mock(WorkflowExecutor.class);
private TaskModel createTask(
String referenceName,
TaskModel.Status status,
boolean isOptional,
boolean isPermissive) {
TaskModel task = new TaskModel();
task.setStatus(status);
task.setReferenceTaskName(referenceName);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setOptional(isOptional);
workflowTask.setPermissive(isPermissive);
task.setWorkflowTask(workflowTask);
return task;
}
private Pair<WorkflowModel, TaskModel> createJoinWorkflow(
List<TaskModel> tasks, String... extraTaskRefNames) {
WorkflowModel workflow = new WorkflowModel();
var join = new TaskModel();
join.setReferenceTaskName("join");
var taskRefNames =
tasks.stream().map(TaskModel::getReferenceTaskName).collect(Collectors.toList());
taskRefNames.addAll(List.of(extraTaskRefNames));
join.getInputData().put("joinOn", taskRefNames);
workflow.getTasks().addAll(tasks);
workflow.getTasks().add(join);
return Pair.of(workflow, join);
}
@Test
public void testShouldNotMarkJoinAsCompletedWithErrorsWhenNotDone() {
var task1 = createTask("task1", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
// task2 is not scheduled yet, so the join is not completed
var wfJoinPair = createJoinWorkflow(List.of(task1), "task2");
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertFalse(result);
}
@Test
public void testJoinCompletesSuccessfullyWhenAllTasksSucceed() {
var task1 = createTask("task1", TaskModel.Status.COMPLETED, false, false);
var task2 = createTask("task2", TaskModel.Status.COMPLETED, false, false);
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertTrue("Join task should execute successfully when all tasks succeed", result);
assertEquals(
"Join task status should be COMPLETED when all tasks succeed",
TaskModel.Status.COMPLETED,
wfJoinPair.getRight().getStatus());
}
@Test
public void testJoinWaitsWhenAnyTaskIsNotTerminal() {
var task1 = createTask("task1", TaskModel.Status.IN_PROGRESS, false, false);
var task2 = createTask("task2", TaskModel.Status.COMPLETED, false, false);
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertFalse("Join task should wait when any task is not in terminal state", result);
}
@Test
public void testJoinFailsWhenMandatoryTaskFails() {
// Mandatory task fails
var task1 = createTask("task1", TaskModel.Status.FAILED, false, false);
// Optional task completes with errors
var task2 = createTask("task2", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertTrue("Join task should be executed when a mandatory task fails", result);
assertEquals(
"Join task status should be FAILED when a mandatory task fails",
TaskModel.Status.FAILED,
wfJoinPair.getRight().getStatus());
}
@Test
public void testJoinCompletesWithErrorsWhenOnlyOptionalTasksFail() {
// Mandatory task succeeds
var task1 = createTask("task1", TaskModel.Status.COMPLETED, false, false);
// Optional task completes with errors
var task2 = createTask("task2", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertTrue("Join task should be executed when only optional tasks fail", result);
assertEquals(
"Join task status should be COMPLETED_WITH_ERRORS when only optional tasks fail",
TaskModel.Status.COMPLETED_WITH_ERRORS,
wfJoinPair.getRight().getStatus());
}
@Test
public void testJoinAggregatesFailureReasonsCorrectly() {
var task1 = createTask("task1", TaskModel.Status.FAILED, false, false);
task1.setReasonForIncompletion("Task1 failed");
var task2 = createTask("task2", TaskModel.Status.FAILED, false, false);
task2.setReasonForIncompletion("Task2 failed");
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
var join = new Join(properties);
var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertTrue("Join task should be executed when tasks fail", result);
assertEquals(
"Join task status should be FAILED when tasks fail",
TaskModel.Status.FAILED,
wfJoinPair.getRight().getStatus());
assertTrue(
"Join task reason for incompletion should aggregate failure reasons",
wfJoinPair.getRight().getReasonForIncompletion().contains("Task1 failed")
&& wfJoinPair
.getRight()
.getReasonForIncompletion()
.contains("Task2 failed"));
}
@Test
public void testJoinWaitsForAllTasksBeforeFailingDueToPermissiveTaskFailure() {
// Task 1 is a permissive task that fails.
var task1 = createTask("task1", TaskModel.Status.FAILED, false, true);
// Task 2 is a non-permissive task that eventually succeeds.
var task2 =
createTask(
"task2",
TaskModel.Status.IN_PROGRESS,
false,
false); // Initially not in a terminal state.
var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
// First execution: Task 2 is not yet terminal.
var join = new Join(properties);
boolean result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertFalse("Join task should wait as not all tasks are terminal", result);
// Simulate Task 2 reaching a terminal state.
task2.setStatus(TaskModel.Status.COMPLETED);
// Second execution: Now all tasks are terminal.
result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
assertTrue("Join task should proceed as now all tasks are terminal", result);
assertEquals(
"Join task should be marked as FAILED due to permissive task failure",
TaskModel.Status.FAILED,
wfJoinPair.getRight().getStatus());
}
@Test
public void testEvaluationOffsetWhenPollCountIsBelowThreshold() {
var join = new Join(properties);
var taskModel = createTask("join1", TaskModel.Status.COMPLETED, false, false);
taskModel.setPollCount(properties.getSystemTaskPostponeThreshold() - 1);
var opt = join.getEvaluationOffset(taskModel, 30L);
assertEquals(0L, (long) opt.orElseThrow());
}
@Test
public void testEvaluationOffsetWhenPollCountIsAboveThreshold() {
final var maxOffset = 30L;
var join = new Join(properties);
var taskModel = createTask("join1", TaskModel.Status.COMPLETED, false, false);
taskModel.setPollCount(properties.getSystemTaskPostponeThreshold() + 1);
var opt = join.getEvaluationOffset(taskModel, maxOffset);
assertEquals(1L, (long) opt.orElseThrow());
taskModel.setPollCount(properties.getSystemTaskPostponeThreshold() + 10);
opt = join.getEvaluationOffset(taskModel, maxOffset);
long expected = (long) Math.pow(Join.EVALUATION_OFFSET_BASE, 10);
assertEquals(expected, (long) opt.orElseThrow());
taskModel.setPollCount(properties.getSystemTaskPostponeThreshold() + 40);
opt = join.getEvaluationOffset(taskModel, maxOffset);
assertEquals(maxOffset, (long) opt.orElseThrow());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/EventQueueResolutionTest.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/EventQueueResolutionTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.HashMap;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.EventQueues;
import com.netflix.conductor.core.events.MockQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Tests the {@link Event#computeQueueName(WorkflowModel, TaskModel)} and {@link
* Event#getQueue(String, String)} methods with a real {@link ParametersUtils} object.
*/
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class EventQueueResolutionTest {
private WorkflowDef testWorkflowDefinition;
private EventQueues eventQueues;
private ParametersUtils parametersUtils;
@Autowired private ObjectMapper objectMapper;
@Before
public void setup() {
Map<String, EventQueueProvider> providers = new HashMap<>();
providers.put("sqs", new MockQueueProvider("sqs"));
providers.put("conductor", new MockQueueProvider("conductor"));
parametersUtils = new ParametersUtils(objectMapper);
eventQueues = new EventQueues(providers, parametersUtils);
testWorkflowDefinition = new WorkflowDef();
testWorkflowDefinition.setName("testWorkflow");
testWorkflowDefinition.setVersion(2);
}
@Test
public void testSinkParam() {
String sink = "sqs:queue_name";
WorkflowDef def = new WorkflowDef();
def.setName("wf0");
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
TaskModel task1 = new TaskModel();
task1.setReferenceTaskName("t1");
task1.addOutput("q", "t1_queue");
workflow.getTasks().add(task1);
TaskModel task2 = new TaskModel();
task2.setReferenceTaskName("t2");
task2.addOutput("q", "task2_queue");
workflow.getTasks().add(task2);
TaskModel task = new TaskModel();
task.setReferenceTaskName("event");
task.getInputData().put("sink", sink);
task.setTaskType(TaskType.EVENT.name());
workflow.getTasks().add(task);
Event event = new Event(eventQueues, parametersUtils, objectMapper);
String queueName = event.computeQueueName(workflow, task);
ObservableQueue queue = event.getQueue(queueName, task.getTaskId());
assertNotNull(task.getReasonForIncompletion(), queue);
assertEquals("queue_name", queue.getName());
assertEquals("sqs", queue.getType());
sink = "sqs:${t1.output.q}";
task.getInputData().put("sink", sink);
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertNotNull(queue);
assertEquals("t1_queue", queue.getName());
assertEquals("sqs", queue.getType());
sink = "sqs:${t2.output.q}";
task.getInputData().put("sink", sink);
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertNotNull(queue);
assertEquals("task2_queue", queue.getName());
assertEquals("sqs", queue.getType());
sink = "conductor";
task.getInputData().put("sink", sink);
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertNotNull(queue);
assertEquals(
workflow.getWorkflowName() + ":" + task.getReferenceTaskName(), queue.getName());
assertEquals("conductor", queue.getType());
sink = "sqs:static_value";
task.getInputData().put("sink", sink);
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertNotNull(queue);
assertEquals("static_value", queue.getName());
assertEquals("sqs", queue.getType());
}
@Test
public void testDynamicSinks() {
Event event = new Event(eventQueues, parametersUtils, objectMapper);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(testWorkflowDefinition);
TaskModel task = new TaskModel();
task.setReferenceTaskName("task0");
task.setTaskId("task_id_0");
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.getInputData().put("sink", "conductor:some_arbitary_queue");
String queueName = event.computeQueueName(workflow, task);
ObservableQueue queue = event.getQueue(queueName, task.getTaskId());
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertNotNull(queue);
assertEquals("testWorkflow:some_arbitary_queue", queue.getName());
assertEquals("testWorkflow:some_arbitary_queue", queue.getURI());
assertEquals("conductor", queue.getType());
task.getInputData().put("sink", "conductor");
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertEquals(
"not in progress: " + task.getReasonForIncompletion(),
TaskModel.Status.IN_PROGRESS,
task.getStatus());
assertNotNull(queue);
assertEquals("testWorkflow:task0", queue.getName());
task.getInputData().put("sink", "sqs:my_sqs_queue_name");
queueName = event.computeQueueName(workflow, task);
queue = event.getQueue(queueName, task.getTaskId());
assertEquals(
"not in progress: " + task.getReasonForIncompletion(),
TaskModel.Status.IN_PROGRESS,
task.getStatus());
assertNotNull(queue);
assertEquals("my_sqs_queue_name", queue.getName());
assertEquals("sqs", queue.getType());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestNoop.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/TestNoop.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.junit.Test;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestNoop {
private final WorkflowExecutor executor = mock(WorkflowExecutor.class);
@Test
public void should_do_nothing() {
WorkflowModel workflow = new WorkflowModel();
Noop noopTask = new Noop();
TaskModel task = new TaskModel();
noopTask.execute(workflow, task, executor);
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSystemTaskWorkerCoordinator.java | core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSystemTaskWorkerCoordinator.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.time.Duration;
import java.util.Collections;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.core.config.ConductorProperties;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestSystemTaskWorkerCoordinator {
private static final String TEST_QUEUE = "test";
private static final String EXECUTION_NAMESPACE_CONSTANT = "@exeNS";
private SystemTaskWorker systemTaskWorker;
private ConductorProperties properties;
@Before
public void setUp() {
systemTaskWorker = mock(SystemTaskWorker.class);
properties = mock(ConductorProperties.class);
when(properties.getSystemTaskWorkerPollInterval()).thenReturn(Duration.ofMillis(50));
when(properties.getSystemTaskWorkerExecutionNamespace()).thenReturn("");
}
@Test
public void testIsFromCoordinatorExecutionNameSpace() {
doReturn("exeNS").when(properties).getSystemTaskWorkerExecutionNamespace();
SystemTaskWorkerCoordinator systemTaskWorkerCoordinator =
new SystemTaskWorkerCoordinator(
systemTaskWorker, properties, Collections.emptySet());
assertTrue(
systemTaskWorkerCoordinator.isFromCoordinatorExecutionNameSpace(
new TaskWithExecutionNamespace()));
}
static class TaskWithExecutionNamespace extends WorkflowSystemTask {
public TaskWithExecutionNamespace() {
super(TEST_QUEUE + EXECUTION_NAMESPACE_CONSTANT);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.