repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.core.events.EventQueues;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_EVENT;
@Component(TASK_TYPE_EVENT)
public class Event extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(Event.class);
public static final String NAME = "EVENT";
private static final String EVENT_PRODUCED = "event_produced";
private final ObjectMapper objectMapper;
private final ParametersUtils parametersUtils;
private final EventQueues eventQueues;
public Event(
EventQueues eventQueues, ParametersUtils parametersUtils, ObjectMapper objectMapper) {
super(TASK_TYPE_EVENT);
this.parametersUtils = parametersUtils;
this.eventQueues = eventQueues;
this.objectMapper = objectMapper;
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
Map<String, Object> payload = new HashMap<>(task.getInputData());
payload.put("workflowInstanceId", workflow.getWorkflowId());
payload.put("workflowType", workflow.getWorkflowName());
payload.put("workflowVersion", workflow.getWorkflowVersion());
payload.put("correlationId", workflow.getCorrelationId());
payload.put("taskToDomain", workflow.getTaskToDomain());
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.addOutput(payload);
try {
task.addOutput(EVENT_PRODUCED, computeQueueName(workflow, task));
} catch (Exception e) {
task.setStatus(TaskModel.Status.FAILED);
task.setReasonForIncompletion(e.getMessage());
LOGGER.error(
"Error executing task: {}, workflow: {}",
task.getTaskId(),
workflow.getWorkflowId(),
e);
}
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
try {
String queueName = (String) task.getOutputData().get(EVENT_PRODUCED);
ObservableQueue queue = getQueue(queueName, task.getTaskId());
Message message = getPopulatedMessage(task);
queue.publish(List.of(message));
LOGGER.debug("Published message:{} to queue:{}", message.getId(), queue.getName());
if (!isAsyncComplete(task)) {
task.setStatus(TaskModel.Status.COMPLETED);
return true;
}
} catch (JsonProcessingException jpe) {
task.setStatus(TaskModel.Status.FAILED);
task.setReasonForIncompletion("Error serializing JSON payload: " + jpe.getMessage());
LOGGER.error(
"Error serializing JSON payload for task: {}, workflow: {}",
task.getTaskId(),
workflow.getWorkflowId());
} catch (Exception e) {
task.setStatus(TaskModel.Status.FAILED);
task.setReasonForIncompletion(e.getMessage());
LOGGER.error(
"Error executing task: {}, workflow: {}",
task.getTaskId(),
workflow.getWorkflowId(),
e);
}
return false;
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
Message message = new Message(task.getTaskId(), null, task.getTaskId());
String queueName = computeQueueName(workflow, task);
ObservableQueue queue = getQueue(queueName, task.getTaskId());
queue.ack(List.of(message));
}
@VisibleForTesting
String computeQueueName(WorkflowModel workflow, TaskModel task) {
String sinkValueRaw = (String) task.getInputData().get("sink");
Map<String, Object> input = new HashMap<>();
input.put("sink", sinkValueRaw);
Map<String, Object> replaced =
parametersUtils.getTaskInputV2(input, workflow, task.getTaskId(), null);
String sinkValue = (String) replaced.get("sink");
String queueName = sinkValue;
if (sinkValue.startsWith("conductor")) {
if ("conductor".equals(sinkValue)) {
queueName =
sinkValue
+ ":"
+ workflow.getWorkflowName()
+ ":"
+ task.getReferenceTaskName();
} else if (sinkValue.startsWith("conductor:")) {
queueName =
"conductor:"
+ workflow.getWorkflowName()
+ ":"
+ sinkValue.replaceAll("conductor:", "");
} else {
throw new IllegalStateException(
"Invalid / Unsupported sink specified: " + sinkValue);
}
}
return queueName;
}
@VisibleForTesting
ObservableQueue getQueue(String queueName, String taskId) {
try {
return eventQueues.getQueue(queueName);
} catch (IllegalArgumentException e) {
throw new IllegalStateException(
"Error loading queue:"
+ queueName
+ ", for task:"
+ taskId
+ ", error: "
+ e.getMessage());
} catch (Exception e) {
throw new NonTransientException("Unable to find queue name for task " + taskId);
}
}
Message getPopulatedMessage(TaskModel task) throws JsonProcessingException {
String payloadJson = objectMapper.writeValueAsString(task.getOutputData());
return new Message(task.getTaskId(), payloadJson, task.getTaskId());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/StartWorkflow.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/StartWorkflow.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.execution.StartWorkflowInput;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import jakarta.validation.Validator;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_START_WORKFLOW;
import static com.netflix.conductor.model.TaskModel.Status.COMPLETED;
import static com.netflix.conductor.model.TaskModel.Status.FAILED;
@Component(TASK_TYPE_START_WORKFLOW)
public class StartWorkflow extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(StartWorkflow.class);
private static final String WORKFLOW_ID = "workflowId";
private static final String START_WORKFLOW_PARAMETER = "startWorkflow";
private final ObjectMapper objectMapper;
private final Validator validator;
public StartWorkflow(ObjectMapper objectMapper, Validator validator) {
super(TASK_TYPE_START_WORKFLOW);
this.objectMapper = objectMapper;
this.validator = validator;
}
@Override
public void start(
WorkflowModel workflow, TaskModel taskModel, WorkflowExecutor workflowExecutor) {
StartWorkflowRequest request = getRequest(taskModel);
if (request == null) {
return;
}
if (request.getTaskToDomain() == null || request.getTaskToDomain().isEmpty()) {
Map<String, String> workflowTaskToDomainMap = workflow.getTaskToDomain();
if (workflowTaskToDomainMap != null) {
request.setTaskToDomain(new HashMap<>(workflowTaskToDomainMap));
}
}
// set the correlation id of starter workflow, if its empty in the StartWorkflowRequest
request.setCorrelationId(
StringUtils.defaultIfBlank(
request.getCorrelationId(), workflow.getCorrelationId()));
try {
String workflowId = startWorkflow(request, workflow.getWorkflowId(), workflowExecutor);
taskModel.addOutput(WORKFLOW_ID, workflowId);
taskModel.setStatus(COMPLETED);
} catch (TransientException te) {
LOGGER.info(
"A transient backend error happened when task {} in {} tried to start workflow {}.",
taskModel.getTaskId(),
workflow.toShortString(),
request.getName());
} catch (Exception ae) {
taskModel.setStatus(FAILED);
taskModel.setReasonForIncompletion(ae.getMessage());
LOGGER.error(
"Error starting workflow: {} from workflow: {}",
request.getName(),
workflow.toShortString(),
ae);
}
}
private StartWorkflowRequest getRequest(TaskModel taskModel) {
Map<String, Object> taskInput = taskModel.getInputData();
StartWorkflowRequest startWorkflowRequest = null;
if (taskInput.get(START_WORKFLOW_PARAMETER) == null) {
taskModel.setStatus(FAILED);
taskModel.setReasonForIncompletion(
"Missing '" + START_WORKFLOW_PARAMETER + "' in input data.");
} else {
try {
startWorkflowRequest =
objectMapper.convertValue(
taskInput.get(START_WORKFLOW_PARAMETER),
StartWorkflowRequest.class);
var violations = validator.validate(startWorkflowRequest);
if (!violations.isEmpty()) {
StringBuilder reasonForIncompletion =
new StringBuilder(START_WORKFLOW_PARAMETER)
.append(" validation failed. ");
for (var violation : violations) {
reasonForIncompletion
.append("'")
.append(violation.getPropertyPath().toString())
.append("' -> ")
.append(violation.getMessage())
.append(". ");
}
taskModel.setStatus(FAILED);
taskModel.setReasonForIncompletion(reasonForIncompletion.toString());
startWorkflowRequest = null;
}
} catch (IllegalArgumentException e) {
LOGGER.error("Error reading StartWorkflowRequest for {}", taskModel, e);
taskModel.setStatus(FAILED);
taskModel.setReasonForIncompletion(
"Error reading StartWorkflowRequest. " + e.getMessage());
}
}
return startWorkflowRequest;
}
private String startWorkflow(
StartWorkflowRequest request, String workflowId, WorkflowExecutor workflowExecutor) {
StartWorkflowInput input = new StartWorkflowInput(request);
input.setTriggeringWorkflowId(workflowId);
return workflowExecutor.startWorkflow(input);
}
@Override
public boolean isAsync() {
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Noop.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Noop.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_NOOP;
@Component(TASK_TYPE_NOOP)
public class Noop extends WorkflowSystemTask {
public Noop() {
super(TASK_TYPE_NOOP);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(TaskModel.Status.COMPLETED);
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/SetVariable.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/SetVariable.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SET_VARIABLE;
@Component(TASK_TYPE_SET_VARIABLE)
public class SetVariable extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(SetVariable.class);
private final ConductorProperties properties;
private final ObjectMapper objectMapper;
private final ExecutionDAOFacade executionDAOFacade;
public SetVariable(
ConductorProperties properties,
ObjectMapper objectMapper,
ExecutionDAOFacade executionDAOFacade) {
super(TASK_TYPE_SET_VARIABLE);
this.properties = properties;
this.objectMapper = objectMapper;
this.executionDAOFacade = executionDAOFacade;
}
private boolean validateVariablesSize(
WorkflowModel workflow, TaskModel task, Map<String, Object> variables) {
String workflowId = workflow.getWorkflowId();
long maxThreshold = properties.getMaxWorkflowVariablesPayloadSizeThreshold().toKilobytes();
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
this.objectMapper.writeValue(byteArrayOutputStream, variables);
byte[] payloadBytes = byteArrayOutputStream.toByteArray();
long payloadSize = payloadBytes.length;
if (payloadSize > maxThreshold * 1024) {
String errorMsg =
String.format(
"The variables payload size: %d of workflow: %s is greater than the permissible limit: %d kilobytes",
payloadSize, workflowId, maxThreshold);
LOGGER.error(errorMsg);
task.setReasonForIncompletion(errorMsg);
return false;
}
return true;
} catch (IOException e) {
LOGGER.error(
"Unable to validate variables payload size of workflow: {}", workflowId, e);
throw new NonTransientException(
"Unable to validate variables payload size of workflow: " + workflowId, e);
}
}
@Override
public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor provider) {
Map<String, Object> variables = workflow.getVariables();
Map<String, Object> input = task.getInputData();
String taskId = task.getTaskId();
ArrayList<String> newKeys;
Map<String, Object> previousValues;
if (input != null && input.size() > 0) {
newKeys = new ArrayList<>();
previousValues = new HashMap<>();
input.keySet()
.forEach(
key -> {
if (variables.containsKey(key)) {
previousValues.put(key, variables.get(key));
} else {
newKeys.add(key);
}
variables.put(key, input.get(key));
LOGGER.debug(
"Task: {} setting value for variable: {}", taskId, key);
});
if (!validateVariablesSize(workflow, task, variables)) {
// restore previous variables
previousValues
.keySet()
.forEach(
key -> {
variables.put(key, previousValues.get(key));
});
newKeys.forEach(variables::remove);
task.setStatus(TaskModel.Status.FAILED_WITH_TERMINAL_ERROR);
return true;
}
}
task.setStatus(TaskModel.Status.COMPLETED);
executionDAOFacade.updateWorkflow(workflow);
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/WorkflowSystemTask.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/WorkflowSystemTask.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.Optional;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
public abstract class WorkflowSystemTask {
private final String taskType;
public WorkflowSystemTask(String taskType) {
this.taskType = taskType;
}
/**
* Start the task execution.
*
* <p>Called only once, and first, when the task status is SCHEDULED.
*
* @param workflow Workflow for which the task is being started
* @param task Instance of the Task
* @param workflowExecutor Workflow Executor
*/
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
// Do nothing unless overridden by the task implementation
}
/**
* "Execute" the task.
*
* <p>Called after {@link #start(WorkflowModel, TaskModel, WorkflowExecutor)}, if the task
* status is not terminal. Can be called more than once.
*
* @param workflow Workflow for which the task is being started
* @param task Instance of the Task
* @param workflowExecutor Workflow Executor
* @return true, if the execution has changed the task status. return false otherwise.
*/
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
return false;
}
/**
* Cancel task execution
*
* @param workflow Workflow for which the task is being started
* @param task Instance of the Task
* @param workflowExecutor Workflow Executor
*/
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {}
/**
* Determines the time in seconds by which the next execution of a task will be postponed after
* an execution. By default, this method returns {@code Optional.empty()}.
*
* <p>WorkflowSystemTasks may override this method to define a custom evaluation offset based on
* the task's behavior or requirements.
*
* @param taskModel task model
* @param maxOffset the max recommended offset value to use
* @return an {@code Optional<Long>} specifying the evaluation offset in seconds, or {@code
* Optional.empty()} if no postponement is required
*/
public Optional<Long> getEvaluationOffset(TaskModel taskModel, long maxOffset) {
return Optional.empty();
}
/**
* @return True if the task is supposed to be started asynchronously using internal queues.
*/
public boolean isAsync() {
return false;
}
/**
* @return True to keep task in 'IN_PROGRESS' state, and 'COMPLETE' later by an external
* message.
*/
public boolean isAsyncComplete(TaskModel task) {
if (task.getInputData().containsKey("asyncComplete")) {
return Optional.ofNullable(task.getInputData().get("asyncComplete"))
.map(result -> (Boolean) result)
.orElse(false);
} else {
return Optional.ofNullable(task.getWorkflowTask())
.map(WorkflowTask::isAsyncComplete)
.orElse(false);
}
}
/**
* @return name of the system task
*/
public String getTaskType() {
return taskType;
}
/**
* Default to true for retrieving tasks when retrieving workflow data. Some cases (e.g.
* subworkflows) might not need the tasks at all, and by setting this to false in that case, you
* can get a solid performance gain.
*
* @return true for retrieving tasks when getting workflow
*/
public boolean isTaskRetrievalRequired() {
return true;
}
@Override
public String toString() {
return taskType;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Lambda.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Lambda.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.events.ScriptEvaluator;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_LAMBDA;
/**
* @author X-Ultra
* <p>Task that enables execute Lambda script at workflow execution, For example,
* <pre>
* ...
* {
* "tasks": [
* {
* "name": "LAMBDA",
* "taskReferenceName": "lambda_test",
* "type": "LAMBDA",
* "inputParameters": {
* "input": "${workflow.input}",
* "scriptExpression": "if ($.input.a==1){return {testvalue: true}} else{return {testvalue: false} }"
* }
* }
* ]
* }
* ...
* </pre>
* then to use task output, e.g. <code>script_test.output.testvalue</code>
* @deprecated {@link Lambda} is deprecated. Use {@link Inline} task for inline expression
* evaluation. Also see ${@link com.netflix.conductor.common.metadata.workflow.WorkflowTask})
*/
@Deprecated
@Component(TASK_TYPE_LAMBDA)
public class Lambda extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(Lambda.class);
private static final String QUERY_EXPRESSION_PARAMETER = "scriptExpression";
public static final String NAME = "LAMBDA";
public Lambda() {
super(TASK_TYPE_LAMBDA);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
Map<String, Object> taskInput = task.getInputData();
String scriptExpression;
try {
scriptExpression = (String) taskInput.get(QUERY_EXPRESSION_PARAMETER);
if (StringUtils.isNotBlank(scriptExpression)) {
String scriptExpressionBuilder =
"function scriptFun(){" + scriptExpression + "} scriptFun();";
LOGGER.debug(
"scriptExpressionBuilder: {}, task: {}",
scriptExpressionBuilder,
task.getTaskId());
Object returnValue = ScriptEvaluator.eval(scriptExpressionBuilder, taskInput);
task.addOutput("result", returnValue);
task.setStatus(TaskModel.Status.COMPLETED);
} else {
LOGGER.error("Empty {} in Lambda task. ", QUERY_EXPRESSION_PARAMETER);
task.setReasonForIncompletion(
"Empty '"
+ QUERY_EXPRESSION_PARAMETER
+ "' in Lambda task's input parameters. A non-empty String value must be provided.");
task.setStatus(TaskModel.Status.FAILED);
}
} catch (Exception e) {
LOGGER.error(
"Failed to execute Lambda Task: {} in workflow: {}",
task.getTaskId(),
workflow.getWorkflowId(),
e);
task.setStatus(TaskModel.Status.FAILED);
task.setReasonForIncompletion(e.getMessage());
task.addOutput(
"error", e.getCause() != null ? e.getCause().getMessage() : e.getMessage());
}
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/SystemTaskWorkerCoordinator.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/SystemTaskWorkerCoordinator.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.utils.QueueUtils;
import static com.netflix.conductor.core.execution.tasks.SystemTaskRegistry.ASYNC_SYSTEM_TASKS_QUALIFIER;
@Component
@ConditionalOnProperty(
name = "conductor.system-task-workers.enabled",
havingValue = "true",
matchIfMissing = true)
public class SystemTaskWorkerCoordinator {
private static final Logger LOGGER = LoggerFactory.getLogger(SystemTaskWorkerCoordinator.class);
private final SystemTaskWorker systemTaskWorker;
private final String executionNameSpace;
private final Set<WorkflowSystemTask> asyncSystemTasks;
public SystemTaskWorkerCoordinator(
SystemTaskWorker systemTaskWorker,
ConductorProperties properties,
@Qualifier(ASYNC_SYSTEM_TASKS_QUALIFIER) Set<WorkflowSystemTask> asyncSystemTasks) {
this.systemTaskWorker = systemTaskWorker;
this.asyncSystemTasks = asyncSystemTasks;
this.executionNameSpace = properties.getSystemTaskWorkerExecutionNamespace();
}
@EventListener(ApplicationReadyEvent.class)
public void initSystemTaskExecutor() {
this.asyncSystemTasks.stream()
.filter(this::isFromCoordinatorExecutionNameSpace)
.forEach(this.systemTaskWorker::startPolling);
LOGGER.info(
"{} initialized with {} async tasks",
SystemTaskWorkerCoordinator.class.getSimpleName(),
this.asyncSystemTasks.size());
}
@VisibleForTesting
boolean isFromCoordinatorExecutionNameSpace(WorkflowSystemTask systemTask) {
String queueExecutionNameSpace = QueueUtils.getExecutionNameSpace(systemTask.getTaskType());
return StringUtils.equals(queueExecutionNameSpace, executionNameSpace);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Human.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Human.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_HUMAN;
import static com.netflix.conductor.model.TaskModel.Status.IN_PROGRESS;
@Component(TASK_TYPE_HUMAN)
public class Human extends WorkflowSystemTask {
public Human() {
super(TASK_TYPE_HUMAN);
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(IN_PROGRESS);
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(TaskModel.Status.CANCELED);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.events.ScriptEvaluator;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_DO_WHILE;
@Component(TASK_TYPE_DO_WHILE)
public class DoWhile extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(DoWhile.class);
private final ParametersUtils parametersUtils;
private final ExecutionDAOFacade executionDAOFacade;
public DoWhile(ParametersUtils parametersUtils, ExecutionDAOFacade executionDAOFacade) {
super(TASK_TYPE_DO_WHILE);
this.parametersUtils = parametersUtils;
this.executionDAOFacade = executionDAOFacade;
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
task.setStatus(TaskModel.Status.CANCELED);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel doWhileTaskModel, WorkflowExecutor workflowExecutor) {
boolean hasFailures = false;
StringBuilder failureReason = new StringBuilder();
Map<String, Object> output = new HashMap<>();
/*
* Get the latest set of tasks (the ones that have the highest retry count). We don't want to evaluate any tasks
* that have already failed if there is a more current one (a later retry count).
*/
Map<String, TaskModel> relevantTasks = new LinkedHashMap<>();
TaskModel relevantTask;
for (TaskModel t : workflow.getTasks()) {
if (doWhileTaskModel
.getWorkflowTask()
.has(TaskUtils.removeIterationFromTaskRefName(t.getReferenceTaskName()))
&& !doWhileTaskModel.getReferenceTaskName().equals(t.getReferenceTaskName())
&& doWhileTaskModel.getIteration() == t.getIteration()) {
relevantTask = relevantTasks.get(t.getReferenceTaskName());
if (relevantTask == null || t.getRetryCount() > relevantTask.getRetryCount()) {
relevantTasks.put(t.getReferenceTaskName(), t);
}
}
}
Collection<TaskModel> loopOverTasks = relevantTasks.values();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Workflow {} waiting for tasks {} to complete iteration {}",
workflow.getWorkflowId(),
loopOverTasks.stream()
.map(TaskModel::getReferenceTaskName)
.collect(Collectors.toList()),
doWhileTaskModel.getIteration());
}
// if the loopOverTasks collection is empty, no tasks inside the loop have been scheduled.
// so schedule it and exit the method.
if (loopOverTasks.isEmpty()) {
doWhileTaskModel.setIteration(1);
doWhileTaskModel.addOutput("iteration", doWhileTaskModel.getIteration());
return scheduleNextIteration(doWhileTaskModel, workflow, workflowExecutor);
}
for (TaskModel loopOverTask : loopOverTasks) {
TaskModel.Status taskStatus = loopOverTask.getStatus();
hasFailures = !taskStatus.isSuccessful();
if (hasFailures) {
failureReason.append(loopOverTask.getReasonForIncompletion()).append(" ");
}
output.put(
TaskUtils.removeIterationFromTaskRefName(loopOverTask.getReferenceTaskName()),
loopOverTask.getOutputData());
if (hasFailures) {
break;
}
}
doWhileTaskModel.addOutput(String.valueOf(doWhileTaskModel.getIteration()), output);
Optional<Integer> keepLastN =
Optional.ofNullable(doWhileTaskModel.getWorkflowTask().getInputParameters())
.map(parameters -> parameters.get("keepLastN"))
.map(value -> (Integer) value);
if (keepLastN.isPresent() && doWhileTaskModel.getIteration() > keepLastN.get()) {
Integer iteration = doWhileTaskModel.getIteration();
IntStream.range(0, iteration - keepLastN.get() - 1)
.mapToObj(Integer::toString)
.forEach(doWhileTaskModel::removeOutput);
// Remove old iteration tasks from the database
removeIterations(workflow, doWhileTaskModel, keepLastN.get());
}
if (hasFailures) {
LOGGER.debug(
"Task {} failed in {} iteration",
doWhileTaskModel.getTaskId(),
doWhileTaskModel.getIteration() + 1);
return markTaskFailure(
doWhileTaskModel, TaskModel.Status.FAILED, failureReason.toString());
}
if (!isIterationComplete(doWhileTaskModel, relevantTasks)) {
// current iteration is not complete (all tasks inside the loop are not terminal)
return false;
}
// if we are here, the iteration is complete, and we need to check if there is a next
// iteration by evaluating the loopCondition
boolean shouldContinue;
try {
shouldContinue = evaluateCondition(workflow, doWhileTaskModel);
LOGGER.debug(
"Task {} condition evaluated to {}",
doWhileTaskModel.getTaskId(),
shouldContinue);
if (shouldContinue) {
doWhileTaskModel.setIteration(doWhileTaskModel.getIteration() + 1);
doWhileTaskModel.addOutput("iteration", doWhileTaskModel.getIteration());
return scheduleNextIteration(doWhileTaskModel, workflow, workflowExecutor);
} else {
LOGGER.debug(
"Task {} took {} iterations to complete",
doWhileTaskModel.getTaskId(),
doWhileTaskModel.getIteration() + 1);
return markTaskSuccess(doWhileTaskModel);
}
} catch (Exception e) {
String message =
String.format(
"Unable to evaluate condition %s, exception %s",
doWhileTaskModel.getWorkflowTask().getLoopCondition(), e.getMessage());
LOGGER.error(message);
return markTaskFailure(
doWhileTaskModel, TaskModel.Status.FAILED_WITH_TERMINAL_ERROR, message);
}
}
/**
* Removes old iterations from the workflow to prevent database bloat. This method identifies
* and deletes tasks from iterations that exceed the keepLastN retention policy.
*
* @param workflow The workflow model containing all tasks
* @param doWhileTaskModel The DO_WHILE task model
* @param keepLastN Number of most recent iterations to keep
*/
@VisibleForTesting
void removeIterations(WorkflowModel workflow, TaskModel doWhileTaskModel, int keepLastN) {
int currentIteration = doWhileTaskModel.getIteration();
// Calculate which iterations should be removed (all iterations before currentIteration -
// keepLastN)
int iterationsToRemove = currentIteration - keepLastN;
if (iterationsToRemove <= 0) {
// Nothing to remove yet
return;
}
LOGGER.debug(
"Removing iterations 1 to {} for DO_WHILE task {} (keeping last {} iterations)",
iterationsToRemove,
doWhileTaskModel.getReferenceTaskName(),
keepLastN);
// Find and remove tasks from old iterations
List<TaskModel> tasksToRemove =
workflow.getTasks().stream()
.filter(
task -> {
// Check if this task belongs to the DO_WHILE loop
String taskRefWithoutIteration =
TaskUtils.removeIterationFromTaskRefName(
task.getReferenceTaskName());
boolean belongsToLoop =
doWhileTaskModel
.getWorkflowTask()
.has(taskRefWithoutIteration)
&& !doWhileTaskModel
.getReferenceTaskName()
.equals(task.getReferenceTaskName());
// Check if this task is from an old iteration that should be
// removed
boolean isOldIteration =
task.getIteration() <= iterationsToRemove;
return belongsToLoop && isOldIteration;
})
.collect(Collectors.toList());
// Remove each task from the database
for (TaskModel taskToRemove : tasksToRemove) {
try {
LOGGER.debug(
"Removing task {} (iteration {}) from workflow {}",
taskToRemove.getReferenceTaskName(),
taskToRemove.getIteration(),
workflow.getWorkflowId());
executionDAOFacade.removeTask(taskToRemove.getTaskId());
} catch (Exception e) {
LOGGER.error(
"Failed to remove task {} (iteration {}) from workflow {}",
taskToRemove.getReferenceTaskName(),
taskToRemove.getIteration(),
workflow.getWorkflowId(),
e);
// Continue with other tasks even if one fails
}
}
LOGGER.info(
"Removed {} tasks from {} old iterations for DO_WHILE task {} in workflow {}",
tasksToRemove.size(),
iterationsToRemove,
doWhileTaskModel.getReferenceTaskName(),
workflow.getWorkflowId());
}
/**
* Check if all tasks in the current iteration have reached terminal state.
*
* @param doWhileTaskModel The {@link TaskModel} of DO_WHILE.
* @param referenceNameToModel Map of taskReferenceName to {@link TaskModel}.
* @return true if all tasks in DO_WHILE.loopOver are in <code>referenceNameToModel</code> and
* reached terminal state.
*/
private boolean isIterationComplete(
TaskModel doWhileTaskModel, Map<String, TaskModel> referenceNameToModel) {
List<WorkflowTask> workflowTasksInsideDoWhile =
doWhileTaskModel.getWorkflowTask().getLoopOver();
int iteration = doWhileTaskModel.getIteration();
boolean allTasksTerminal = true;
for (WorkflowTask workflowTaskInsideDoWhile : workflowTasksInsideDoWhile) {
String taskReferenceName =
TaskUtils.appendIteration(
workflowTaskInsideDoWhile.getTaskReferenceName(), iteration);
if (referenceNameToModel.containsKey(taskReferenceName)) {
TaskModel taskModel = referenceNameToModel.get(taskReferenceName);
if (!taskModel.getStatus().isTerminal()) {
allTasksTerminal = false;
break;
}
} else {
allTasksTerminal = false;
break;
}
}
if (!allTasksTerminal) {
// Cases where tasks directly inside loop over are not completed.
// loopOver -> [task1 -> COMPLETED, task2 -> IN_PROGRESS]
return false;
}
// Check all the tasks in referenceNameToModel are completed or not. These are set of tasks
// which are not directly inside loopOver tasks, but they are under hierarchy
// loopOver -> [decisionTask -> COMPLETED [ task1 -> COMPLETED, task2 -> IN_PROGRESS]]
return referenceNameToModel.values().stream()
.noneMatch(taskModel -> !taskModel.getStatus().isTerminal());
}
boolean scheduleNextIteration(
TaskModel doWhileTaskModel, WorkflowModel workflow, WorkflowExecutor workflowExecutor) {
LOGGER.debug(
"Scheduling loop tasks for task {} as condition {} evaluated to true",
doWhileTaskModel.getTaskId(),
doWhileTaskModel.getWorkflowTask().getLoopCondition());
workflowExecutor.scheduleNextIteration(doWhileTaskModel, workflow);
return true; // Return true even though status not changed. Iteration has to be updated in
// execution DAO.
}
boolean markTaskFailure(TaskModel taskModel, TaskModel.Status status, String failureReason) {
LOGGER.error("Marking task {} failed with error.", taskModel.getTaskId());
taskModel.setReasonForIncompletion(failureReason);
taskModel.setStatus(status);
return true;
}
boolean markTaskSuccess(TaskModel taskModel) {
LOGGER.debug(
"Task {} took {} iterations to complete",
taskModel.getTaskId(),
taskModel.getIteration() + 1);
taskModel.setStatus(TaskModel.Status.COMPLETED);
return true;
}
@VisibleForTesting
boolean evaluateCondition(WorkflowModel workflow, TaskModel task) {
TaskDef taskDefinition = task.getTaskDefinition().orElse(null);
// Use paramUtils to compute the task input
Map<String, Object> conditionInput =
parametersUtils.getTaskInputV2(
task.getWorkflowTask().getInputParameters(),
workflow,
task.getTaskId(),
taskDefinition);
conditionInput.put(task.getReferenceTaskName(), task.getOutputData());
List<TaskModel> loopOver =
workflow.getTasks().stream()
.filter(
t ->
(task.getWorkflowTask()
.has(
TaskUtils
.removeIterationFromTaskRefName(
t
.getReferenceTaskName()))
&& !task.getReferenceTaskName()
.equals(t.getReferenceTaskName())))
.collect(Collectors.toList());
for (TaskModel loopOverTask : loopOver) {
conditionInput.put(
TaskUtils.removeIterationFromTaskRefName(loopOverTask.getReferenceTaskName()),
loopOverTask.getOutputData());
}
String condition = task.getWorkflowTask().getLoopCondition();
boolean result = false;
if (condition != null) {
LOGGER.debug("Condition: {} is being evaluated", condition);
// Evaluate the expression by using the Nashorn based script evaluator
result = ScriptEvaluator.evalBool(condition, conditionInput);
}
return result;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Wait.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Wait.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_WAIT;
import static com.netflix.conductor.model.TaskModel.Status.*;
@Component(TASK_TYPE_WAIT)
public class Wait extends WorkflowSystemTask {
public static final String DURATION_INPUT = "duration";
public static final String UNTIL_INPUT = "until";
public Wait() {
super(TASK_TYPE_WAIT);
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
task.setStatus(TaskModel.Status.IN_PROGRESS);
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(TaskModel.Status.CANCELED);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
long timeOut = task.getWaitTimeout();
if (timeOut == 0) {
return false;
}
if (System.currentTimeMillis() > timeOut) {
task.setStatus(COMPLETED);
return true;
}
return false;
}
public boolean isAsync() {
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/SystemTaskWorker.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/SystemTaskWorker.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.core.LifecycleAwareComponent;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.execution.AsyncSystemTaskExecutor;
import com.netflix.conductor.core.utils.QueueUtils;
import com.netflix.conductor.core.utils.SemaphoreUtil;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.service.ExecutionService;
/** The worker that polls and executes an async system task. */
@Component
@ConditionalOnProperty(
name = "conductor.system-task-workers.enabled",
havingValue = "true",
matchIfMissing = true)
public class SystemTaskWorker extends LifecycleAwareComponent {
private static final Logger LOGGER = LoggerFactory.getLogger(SystemTaskWorker.class);
private final long pollInterval;
private final QueueDAO queueDAO;
ExecutionConfig defaultExecutionConfig;
private final AsyncSystemTaskExecutor asyncSystemTaskExecutor;
private final ConductorProperties properties;
private final ExecutionService executionService;
private final int queuePopTimeout;
ConcurrentHashMap<String, ExecutionConfig> queueExecutionConfigMap = new ConcurrentHashMap<>();
public SystemTaskWorker(
QueueDAO queueDAO,
AsyncSystemTaskExecutor asyncSystemTaskExecutor,
ConductorProperties properties,
ExecutionService executionService) {
this.properties = properties;
int threadCount = properties.getSystemTaskWorkerThreadCount();
this.defaultExecutionConfig = new ExecutionConfig(threadCount, "system-task-worker-%d");
this.asyncSystemTaskExecutor = asyncSystemTaskExecutor;
this.queueDAO = queueDAO;
this.pollInterval = properties.getSystemTaskWorkerPollInterval().toMillis();
this.executionService = executionService;
this.queuePopTimeout = (int) properties.getSystemTaskQueuePopTimeout().toMillis();
LOGGER.info("SystemTaskWorker initialized with {} threads", threadCount);
}
public void startPolling(WorkflowSystemTask systemTask) {
startPolling(systemTask, systemTask.getTaskType());
}
public void startPolling(WorkflowSystemTask systemTask, String queueName) {
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
() -> this.pollAndExecute(systemTask, queueName),
1000,
pollInterval,
TimeUnit.MILLISECONDS);
LOGGER.info("Started listening for task: {} in queue: {}", systemTask, queueName);
}
void pollAndExecute(WorkflowSystemTask systemTask, String queueName) {
if (!isRunning()) {
LOGGER.debug(
"{} stopped. Not polling for task: {}", getClass().getSimpleName(), systemTask);
return;
}
ExecutionConfig executionConfig = getExecutionConfig(queueName);
SemaphoreUtil semaphoreUtil = executionConfig.getSemaphoreUtil();
ExecutorService executorService = executionConfig.getExecutorService();
String taskName = QueueUtils.getTaskType(queueName);
final int systemTaskMaxPollCount = properties.getSystemTaskMaxPollCount();
int maxSystemTasksToAcquire =
(systemTaskMaxPollCount < 1
|| systemTaskMaxPollCount
> properties.getSystemTaskWorkerThreadCount())
? properties.getSystemTaskWorkerThreadCount()
: systemTaskMaxPollCount;
int messagesToAcquire = Math.min(semaphoreUtil.availableSlots(), maxSystemTasksToAcquire);
try {
if (messagesToAcquire <= 0 || !semaphoreUtil.acquireSlots(messagesToAcquire)) {
// no available slots, do not poll
Monitors.recordSystemTaskWorkerPollingLimited(queueName);
return;
}
LOGGER.debug("Polling queue: {} with {} slots acquired", queueName, messagesToAcquire);
List<String> polledTaskIds =
queueDAO.pop(queueName, messagesToAcquire, queuePopTimeout);
Monitors.recordTaskPoll(queueName);
LOGGER.debug("Polling queue:{}, got {} tasks", queueName, polledTaskIds.size());
if (polledTaskIds.size() > 0) {
// Immediately release unused slots when number of messages acquired is less than
// acquired slots
if (polledTaskIds.size() < messagesToAcquire) {
semaphoreUtil.completeProcessing(messagesToAcquire - polledTaskIds.size());
}
for (String taskId : polledTaskIds) {
if (StringUtils.isNotBlank(taskId)) {
LOGGER.debug(
"Task: {} from queue: {} being sent to the workflow executor",
taskId,
queueName);
Monitors.recordTaskPollCount(queueName, 1);
executionService.ackTaskReceived(taskId);
CompletableFuture<Void> taskCompletableFuture =
CompletableFuture.runAsync(
() -> asyncSystemTaskExecutor.execute(systemTask, taskId),
executorService);
// release permit after processing is complete
taskCompletableFuture.whenComplete(
(r, e) -> semaphoreUtil.completeProcessing(1));
} else {
semaphoreUtil.completeProcessing(1);
}
}
} else {
// no task polled, release permit
semaphoreUtil.completeProcessing(messagesToAcquire);
}
} catch (Exception e) {
// release the permit if exception is thrown during polling, because the thread would
// not be busy
semaphoreUtil.completeProcessing(messagesToAcquire);
Monitors.recordTaskPollError(taskName, e.getClass().getSimpleName());
LOGGER.error("Error polling system task in queue:{}", queueName, e);
}
}
@VisibleForTesting
ExecutionConfig getExecutionConfig(String taskQueue) {
if (!QueueUtils.isIsolatedQueue(taskQueue)) {
return this.defaultExecutionConfig;
}
return queueExecutionConfigMap.computeIfAbsent(
taskQueue, __ -> this.createExecutionConfig());
}
private ExecutionConfig createExecutionConfig() {
int threadCount = properties.getIsolatedSystemTaskWorkerThreadCount();
String threadNameFormat = "isolated-system-task-worker-%d";
return new ExecutionConfig(threadCount, threadNameFormat);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/ExclusiveJoin.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/ExclusiveJoin.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_EXCLUSIVE_JOIN;
@Component(TASK_TYPE_EXCLUSIVE_JOIN)
public class ExclusiveJoin extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(ExclusiveJoin.class);
private static final String DEFAULT_EXCLUSIVE_JOIN_TASKS = "defaultExclusiveJoinTask";
public ExclusiveJoin() {
super(TASK_TYPE_EXCLUSIVE_JOIN);
}
@Override
@SuppressWarnings("unchecked")
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
boolean foundExlusiveJoinOnTask = false;
boolean hasFailures = false;
StringBuilder failureReason = new StringBuilder();
TaskModel.Status taskStatus;
List<String> joinOn = (List<String>) task.getInputData().get("joinOn");
if (task.isLoopOverTask()) {
// If exclusive join is part of loop over task, wait for specific iteration to get
// complete
joinOn =
joinOn.stream()
.map(name -> TaskUtils.appendIteration(name, task.getIteration()))
.collect(Collectors.toList());
}
TaskModel exclusiveTask = null;
for (String joinOnRef : joinOn) {
LOGGER.debug("Exclusive Join On Task {} ", joinOnRef);
exclusiveTask = workflow.getTaskByRefName(joinOnRef);
if (exclusiveTask == null || exclusiveTask.getStatus() == TaskModel.Status.SKIPPED) {
LOGGER.debug("The task {} is either not scheduled or skipped.", joinOnRef);
continue;
}
taskStatus = exclusiveTask.getStatus();
foundExlusiveJoinOnTask = taskStatus.isTerminal();
hasFailures =
!taskStatus.isSuccessful()
&& (!exclusiveTask.getWorkflowTask().isPermissive()
|| joinOn.stream()
.map(workflow::getTaskByRefName)
.allMatch(t -> t.getStatus().isTerminal()));
if (hasFailures) {
final String failureReasons =
joinOn.stream()
.map(workflow::getTaskByRefName)
.filter(t -> !t.getStatus().isSuccessful())
.map(TaskModel::getReasonForIncompletion)
.collect(Collectors.joining(" "));
failureReason.append(failureReasons);
}
break;
}
if (!foundExlusiveJoinOnTask) {
List<String> defaultExclusiveJoinTasks =
(List<String>) task.getInputData().get(DEFAULT_EXCLUSIVE_JOIN_TASKS);
LOGGER.info(
"Could not perform exclusive on Join Task(s). Performing now on default exclusive join task(s) {}, workflow: {}",
defaultExclusiveJoinTasks,
workflow.getWorkflowId());
if (defaultExclusiveJoinTasks != null && !defaultExclusiveJoinTasks.isEmpty()) {
for (String defaultExclusiveJoinTask : defaultExclusiveJoinTasks) {
// Pick the first task that we should join on and break.
exclusiveTask = workflow.getTaskByRefName(defaultExclusiveJoinTask);
if (exclusiveTask == null
|| exclusiveTask.getStatus() == TaskModel.Status.SKIPPED) {
LOGGER.debug(
"The task {} is either not scheduled or skipped.",
defaultExclusiveJoinTask);
continue;
}
taskStatus = exclusiveTask.getStatus();
foundExlusiveJoinOnTask = taskStatus.isTerminal();
hasFailures = !taskStatus.isSuccessful();
if (hasFailures) {
failureReason.append(exclusiveTask.getReasonForIncompletion()).append(" ");
}
break;
}
} else {
LOGGER.debug(
"Could not evaluate last tasks output. Verify the task configuration in the workflow definition.");
}
}
LOGGER.debug(
"Status of flags: foundExlusiveJoinOnTask: {}, hasFailures {}",
foundExlusiveJoinOnTask,
hasFailures);
if (foundExlusiveJoinOnTask || hasFailures) {
if (hasFailures) {
task.setReasonForIncompletion(failureReason.toString());
task.setStatus(TaskModel.Status.FAILED);
} else {
task.setOutputData(exclusiveTask.getOutputData());
task.setStatus(TaskModel.Status.COMPLETED);
}
LOGGER.debug("Task: {} status is: {}", task.getTaskId(), task.getStatus());
return true;
}
return false;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Switch.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Switch.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SWITCH;
/** {@link Switch} task is a replacement for now deprecated {@link Decision} task. */
@Component(TASK_TYPE_SWITCH)
public class Switch extends WorkflowSystemTask {
public Switch() {
super(TASK_TYPE_SWITCH);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(TaskModel.Status.COMPLETED);
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/tasks/Decision.java | core/src/main/java/com/netflix/conductor/core/execution/tasks/Decision.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.tasks;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_DECISION;
/**
* @deprecated {@link Decision} is deprecated. Use {@link Switch} task for condition evaluation
* using the extensible evaluation framework. Also see ${@link
* com.netflix.conductor.common.metadata.workflow.WorkflowTask}).
*/
@Deprecated
@Component(TASK_TYPE_DECISION)
public class Decision extends WorkflowSystemTask {
public Decision() {
super(TASK_TYPE_DECISION);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
task.setStatus(TaskModel.Status.COMPLETED);
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/ConsoleBridge.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/ConsoleBridge.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
import java.util.ArrayList;
import java.util.List;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
public class ConsoleBridge {
private final List<TaskExecLog> logEntries = new ArrayList<>();
private final String taskId;
public ConsoleBridge(String taskId) {
this.taskId = taskId;
}
public void error(Object message) {
log("[Error]", message);
}
public void info(Object message) {
log("[Info]", message);
}
public void log(Object message) {
log("[Log]", message);
}
private void log(String level, Object message) {
String logEntry = String.format("%s \"%s\"", level, message);
var entry = new TaskExecLog(logEntry);
entry.setTaskId(taskId);
logEntries.add(entry);
}
public List<TaskExecLog> logs() {
return logEntries;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/GraalJSEvaluator.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/GraalJSEvaluator.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
import java.util.HashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.config.ObjectMapperProvider;
import com.netflix.conductor.core.events.ScriptEvaluator;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* GraalJS evaluator - an alias for JavaScript evaluator using GraalJS engine. This allows explicit
* specification of "graaljs" as the evaluator type while maintaining backward compatibility with
* "javascript".
*/
@Component(GraalJSEvaluator.NAME)
public class GraalJSEvaluator implements Evaluator {
public static final String NAME = "graaljs";
private static final Logger LOGGER = LoggerFactory.getLogger(GraalJSEvaluator.class);
private final ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper();
@Override
public Object evaluate(String expression, Object input) {
LOGGER.debug("GraalJS evaluator -- expression: {}", expression);
Object inputCopy = new HashMap<>();
// Deep copy to prevent PolyglotMap issues (same as JavascriptEvaluator)
try {
inputCopy =
objectMapper.readValue(
objectMapper.writeValueAsString(input), new TypeReference<>() {});
} catch (Exception e) {
LOGGER.error("Error making a deep copy of input: {}", expression, e);
}
// Evaluate using the same GraalJS evaluation engine
Object result = ScriptEvaluator.eval(expression, inputCopy);
LOGGER.debug("GraalJS evaluator -- result: {}", result);
return result;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/JavascriptEvaluator.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/JavascriptEvaluator.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
import java.util.HashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.config.ObjectMapperProvider;
import com.netflix.conductor.core.events.ScriptEvaluator;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
@Component(JavascriptEvaluator.NAME)
public class JavascriptEvaluator implements Evaluator {
public static final String NAME = "javascript";
private static final Logger LOGGER = LoggerFactory.getLogger(JavascriptEvaluator.class);
private final ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper();
@Override
public Object evaluate(String expression, Object input) {
LOGGER.debug("Javascript evaluator -- expression: {}", expression);
Object inputCopy = new HashMap<>();
// We make a deep copy because there is a way to make it error out otherwise:
// e.g. there's an input parameter (an empty map) 'myParam',
// and an expression which has `$.myParam = {"a":"b"}`; It will put a 'PolyglotMap' from
// GraalVM into input map
// and that PolyglotMap can't be evaluated because the context is already closed.
// this caused a workflow with INLINE task to be undecideable due to Exception in
// TaskModelProtoMapper
// on 'to.setInputData(convertToJsonMap(from.getInputData()))' call
try {
inputCopy =
objectMapper.readValue(
objectMapper.writeValueAsString(input), new TypeReference<>() {});
} catch (Exception e) {
LOGGER.error("Error making a deep copy of input: {}", expression, e);
}
// Evaluate the expression by using the GraalJS evaluation engine.
Object result = ScriptEvaluator.eval(expression, inputCopy);
LOGGER.debug("Javascript evaluator -- result: {}", result);
return result;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/Evaluator.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/Evaluator.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
public interface Evaluator {
/**
* Evaluate the expression using the inputs provided, if required. Evaluation of the expression
* depends on the type of the evaluator.
*
* @param expression Expression to be evaluated.
* @param input Input object to the evaluator to help evaluate the expression.
* @return Return the evaluation result.
*/
Object evaluate(String expression, Object input);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/PythonEvaluator.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/PythonEvaluator.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
import java.util.Map;
import org.graalvm.polyglot.Context;
import org.graalvm.polyglot.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
@Component(PythonEvaluator.NAME)
public class PythonEvaluator implements Evaluator {
public static final String NAME = "python";
private static final Logger LOGGER = LoggerFactory.getLogger(PythonEvaluator.class);
@Override
public Object evaluate(String expression, Object input) {
try (Context context = Context.newBuilder("python").allowAllAccess(true).build()) {
if (input instanceof Map) {
Map<String, Object> inputMap = (Map<String, Object>) input;
// Set inputs as variables in the GraalVM context
for (Map.Entry<String, Object> entry : inputMap.entrySet()) {
context.getBindings("python").putMember(entry.getKey(), entry.getValue());
}
// Build the global declaration dynamically
StringBuilder globalDeclaration = new StringBuilder("def evaluate():\n global ");
for (Map.Entry<String, Object> entry : inputMap.entrySet()) {
globalDeclaration.append(entry.getKey()).append(", ");
}
// Remove the trailing comma and space, and add a newline
if (globalDeclaration.length() > 0) {
globalDeclaration.setLength(globalDeclaration.length() - 2);
}
globalDeclaration.append("\n");
// Wrap the expression in a function to handle multi-line statements
StringBuilder wrappedExpression = new StringBuilder(globalDeclaration);
for (String line : expression.split("\n")) {
wrappedExpression.append(" ").append(line).append("\n");
}
// Add the call to the function and capture the result
wrappedExpression.append("\nresult = evaluate()");
// Execute the wrapped expression
context.eval("python", wrappedExpression.toString());
// Get the result
Value result = context.getBindings("python").getMember("result");
// Convert the result to a Java object and return it
return result.as(Object.class);
} else {
return null;
}
} catch (Exception e) {
LOGGER.error("Error evaluating expression: {}", e.getMessage(), e);
throw new TerminateWorkflowException(e.getMessage());
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/evaluators/ValueParamEvaluator.java | core/src/main/java/com/netflix/conductor/core/execution/evaluators/ValueParamEvaluator.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.evaluators;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
@Component(ValueParamEvaluator.NAME)
public class ValueParamEvaluator implements Evaluator {
public static final String NAME = "value-param";
private static final Logger LOGGER = LoggerFactory.getLogger(ValueParamEvaluator.class);
@SuppressWarnings("unchecked")
@Override
public Object evaluate(String expression, Object input) {
LOGGER.debug("ValueParam evaluator -- evaluating: {}", expression);
if (input instanceof Map) {
Object result = ((Map<String, Object>) input).get(expression);
LOGGER.debug("ValueParam evaluator -- result: {}", result);
return result;
} else {
String errorMsg = String.format("Input has to be a JSON object: %s", input.getClass());
LOGGER.error(errorMsg);
throw new TerminateWorkflowException(errorMsg);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/TerminateTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/TerminateTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_TERMINATE;
@Component
public class TerminateTaskMapper implements TaskMapper {
public static final Logger logger = LoggerFactory.getLogger(TerminateTaskMapper.class);
private final ParametersUtils parametersUtils;
public TerminateTaskMapper(ParametersUtils parametersUtils) {
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.TERMINATE.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
logger.debug("TaskMapperContext {} in TerminateTaskMapper", taskMapperContext);
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
taskMapperContext.getWorkflowTask().getInputParameters(),
workflowModel,
taskId,
null);
TaskModel task = taskMapperContext.createTaskModel();
task.setTaskType(TASK_TYPE_TERMINATE);
task.setStartTime(System.currentTimeMillis());
task.setInputData(taskInput);
task.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(task);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
public interface TaskMapper {
String getTaskType();
List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/HumanTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/HumanTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.tasks.Human;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_HUMAN;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#HUMAN} to a {@link TaskModel} of type {@link Human} with {@link
* TaskModel.Status#IN_PROGRESS}
*/
@Component
public class HumanTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(HumanTaskMapper.class);
private final ParametersUtils parametersUtils;
public HumanTaskMapper(ParametersUtils parametersUtils) {
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.HUMAN.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in HumanTaskMapper", taskMapperContext);
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
Map<String, Object> humanTaskInput =
parametersUtils.getTaskInputV2(
taskMapperContext.getWorkflowTask().getInputParameters(),
workflowModel,
taskId,
null);
TaskModel humanTask = taskMapperContext.createTaskModel();
humanTask.setTaskType(TASK_TYPE_HUMAN);
humanTask.setInputData(humanTaskInput);
humanTask.setStartTime(System.currentTimeMillis());
humanTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(humanTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/NoopTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/NoopTaskMapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.model.TaskModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.*;
@Component
public class NoopTaskMapper implements TaskMapper {
public static final Logger logger = LoggerFactory.getLogger(NoopTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.NOOP.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
logger.debug("TaskMapperContext {} in NoopTaskMapper", taskMapperContext);
TaskModel task = taskMapperContext.createTaskModel();
task.setTaskType(TASK_TYPE_NOOP);
task.setStartTime(System.currentTimeMillis());
task.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(task);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/SwitchTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/SwitchTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.evaluators.Evaluator;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#SWITCH} to a List {@link TaskModel} starting with Task of type {@link TaskType#SWITCH}
* which is marked as IN_PROGRESS, followed by the list of {@link TaskModel} based on the case
* expression evaluation in the Switch task.
*/
@Component
public class SwitchTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(SwitchTaskMapper.class);
private final Map<String, Evaluator> evaluators;
public SwitchTaskMapper(Map<String, Evaluator> evaluators) {
this.evaluators = evaluators;
}
@Override
public String getTaskType() {
return TaskType.SWITCH.name();
}
/**
* This method gets the list of tasks that need to scheduled when the task to scheduled is of
* type {@link TaskType#SWITCH}.
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return List of tasks in the following order:
* <ul>
* <li>{@link TaskType#SWITCH} with {@link TaskModel.Status#IN_PROGRESS}
* <li>List of tasks based on the evaluation of {@link WorkflowTask#getEvaluatorType()}
* and {@link WorkflowTask#getExpression()} are scheduled.
* <li>In the case of no matching {@link WorkflowTask#getEvaluatorType()}, workflow will
* be terminated with error message. In case of no matching result after the
* evaluation of the {@link WorkflowTask#getExpression()}, the {@link
* WorkflowTask#getDefaultCase()} Tasks are scheduled.
* </ul>
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in SwitchTaskMapper", taskMapperContext);
List<TaskModel> tasksToBeScheduled = new LinkedList<>();
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
Map<String, Object> taskInput = taskMapperContext.getTaskInput();
int retryCount = taskMapperContext.getRetryCount();
// get the expression to be evaluated
String evaluatorType = workflowTask.getEvaluatorType();
Evaluator evaluator = evaluators.get(evaluatorType);
if (evaluator == null) {
String errorMsg = String.format("No evaluator registered for type: %s", evaluatorType);
LOGGER.error(errorMsg);
throw new TerminateWorkflowException(errorMsg);
}
String evalResult = "";
try {
evalResult = "" + evaluator.evaluate(workflowTask.getExpression(), taskInput);
} catch (Exception exception) {
TaskModel switchTask = taskMapperContext.createTaskModel();
switchTask.setTaskType(TaskType.TASK_TYPE_SWITCH);
switchTask.setTaskDefName(TaskType.TASK_TYPE_SWITCH);
switchTask.getInputData().putAll(taskInput);
switchTask.setStartTime(System.currentTimeMillis());
switchTask.setStatus(TaskModel.Status.FAILED);
switchTask.setReasonForIncompletion(exception.getMessage());
tasksToBeScheduled.add(switchTask);
return tasksToBeScheduled;
}
// QQ why is the case value and the caseValue passed and caseOutput passes as the same ??
TaskModel switchTask = taskMapperContext.createTaskModel();
switchTask.setTaskType(TaskType.TASK_TYPE_SWITCH);
switchTask.setTaskDefName(TaskType.TASK_TYPE_SWITCH);
switchTask.getInputData().putAll(taskInput);
switchTask.getInputData().put("case", evalResult);
switchTask.addOutput("evaluationResult", List.of(evalResult));
switchTask.addOutput("selectedCase", evalResult);
switchTask.setStartTime(System.currentTimeMillis());
switchTask.setStatus(TaskModel.Status.IN_PROGRESS);
tasksToBeScheduled.add(switchTask);
// get the list of tasks based on the evaluated expression
List<WorkflowTask> selectedTasks = workflowTask.getDecisionCases().get(evalResult);
// if the tasks returned are empty based on evaluated result, then get the default case if
// there is one
if (selectedTasks == null || selectedTasks.isEmpty()) {
selectedTasks = workflowTask.getDefaultCase();
}
// once there are selected tasks that need to proceeded as part of the switch, get the next
// task to be scheduled by using the decider service
if (selectedTasks != null && !selectedTasks.isEmpty()) {
WorkflowTask selectedTask =
selectedTasks.get(0); // Schedule the first task to be executed...
// TODO break out this recursive call using function composition of what needs to be
// done and then walk back the condition tree
List<TaskModel> caseTasks =
taskMapperContext
.getDeciderService()
.getTasksToBeScheduled(
workflowModel,
selectedTask,
retryCount,
taskMapperContext.getRetryTaskId());
tasksToBeScheduled.addAll(caseTasks);
switchTask.getInputData().put("hasChildren", "true");
}
return tasksToBeScheduled;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.*;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SUB_WORKFLOW;
@Component
public class SubWorkflowTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(SubWorkflowTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public SubWorkflowTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.SUB_WORKFLOW.name();
}
@SuppressWarnings("rawtypes")
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in SubWorkflowTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
// Check if there are sub workflow parameters, if not throw an exception, cannot initiate a
// sub-workflow without workflow params
SubWorkflowParams subWorkflowParams = getSubWorkflowParams(workflowTask);
Map<String, Object> resolvedParams =
getSubWorkflowInputParameters(workflowModel, subWorkflowParams);
String subWorkflowName = resolvedParams.get("name").toString();
Integer subWorkflowVersion = getSubWorkflowVersion(resolvedParams, subWorkflowName);
Object subWorkflowDefinition = resolvedParams.get("workflowDefinition");
Map subWorkflowTaskToDomain = null;
Object uncheckedTaskToDomain = resolvedParams.get("taskToDomain");
if (uncheckedTaskToDomain instanceof Map) {
subWorkflowTaskToDomain = (Map) uncheckedTaskToDomain;
}
TaskModel subWorkflowTask = taskMapperContext.createTaskModel();
subWorkflowTask.setTaskType(TASK_TYPE_SUB_WORKFLOW);
subWorkflowTask.addInput("subWorkflowName", subWorkflowName);
subWorkflowTask.addInput("priority", resolvedParams.get("priority"));
subWorkflowTask.addInput("subWorkflowVersion", subWorkflowVersion);
subWorkflowTask.addInput("subWorkflowTaskToDomain", subWorkflowTaskToDomain);
subWorkflowTask.addInput("subWorkflowDefinition", subWorkflowDefinition);
subWorkflowTask.addInput("workflowInput", taskMapperContext.getTaskInput());
subWorkflowTask.setStatus(TaskModel.Status.SCHEDULED);
subWorkflowTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
if (subWorkflowParams.getPriority() != null
&& !StringUtils.isEmpty(subWorkflowParams.getPriority().toString())) {
int priority = Integer.parseInt(subWorkflowParams.getPriority().toString());
subWorkflowTask.setWorkflowPriority(priority);
}
LOGGER.debug("SubWorkflowTask {} created to be Scheduled", subWorkflowTask);
return List.of(subWorkflowTask);
}
@VisibleForTesting
SubWorkflowParams getSubWorkflowParams(WorkflowTask workflowTask) {
return Optional.ofNullable(workflowTask.getSubWorkflowParam())
.orElseThrow(
() -> {
String reason =
String.format(
"Task %s is defined as sub-workflow and is missing subWorkflowParams. "
+ "Please check the workflow definition",
workflowTask.getName());
LOGGER.error(reason);
return new TerminateWorkflowException(reason);
});
}
private Map<String, Object> getSubWorkflowInputParameters(
WorkflowModel workflowModel, SubWorkflowParams subWorkflowParams) {
Map<String, Object> params = new HashMap<>();
params.put("name", subWorkflowParams.getName());
params.put("priority", subWorkflowParams.getPriority());
Integer version = subWorkflowParams.getVersion();
if (version != null) {
params.put("version", version);
}
Map<String, String> taskToDomain = subWorkflowParams.getTaskToDomain();
if (taskToDomain != null) {
params.put("taskToDomain", taskToDomain);
}
params = parametersUtils.getTaskInputV2(params, workflowModel, null, null);
// do not resolve params inside subworkflow definition
Object subWorkflowDefinition = subWorkflowParams.getWorkflowDefinition();
if (subWorkflowDefinition != null) {
params.put("workflowDefinition", subWorkflowDefinition);
}
return params;
}
private Integer getSubWorkflowVersion(
Map<String, Object> resolvedParams, String subWorkflowName) {
return Optional.ofNullable(resolvedParams.get("version"))
.map(Object::toString)
.map(Integer::parseInt)
.orElseGet(
() ->
metadataDAO
.getLatestWorkflowDef(subWorkflowName)
.map(WorkflowDef::getVersion)
.orElseThrow(
() -> {
String reason =
String.format(
"The Task %s defined as a sub-workflow has no workflow definition available ",
subWorkflowName);
LOGGER.error(reason);
return new TerminateWorkflowException(reason);
}));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#SIMPLE} to a {@link TaskModel} with status {@link TaskModel.Status#SCHEDULED}.
* <b>NOTE:</b> There is not type defined for simples task.
*/
@Component
public class SimpleTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(SimpleTaskMapper.class);
private final ParametersUtils parametersUtils;
public SimpleTaskMapper(ParametersUtils parametersUtils) {
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.SIMPLE.name();
}
/**
* This method maps a {@link WorkflowTask} of type {@link TaskType#SIMPLE} to a {@link
* TaskModel}
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @throws TerminateWorkflowException In case if the task definition does not exist
* @return a List with just one simple task
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in SimpleTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
int retryCount = taskMapperContext.getRetryCount();
String retriedTaskId = taskMapperContext.getRetryTaskId();
TaskDef taskDefinition =
Optional.ofNullable(workflowTask.getTaskDefinition())
.orElseThrow(
() -> {
String reason =
String.format(
"Invalid task. Task %s does not have a definition",
workflowTask.getName());
return new TerminateWorkflowException(reason);
});
Map<String, Object> input =
parametersUtils.getTaskInput(
workflowTask.getInputParameters(),
workflowModel,
taskDefinition,
taskMapperContext.getTaskId());
TaskModel simpleTask = taskMapperContext.createTaskModel();
simpleTask.setTaskType(workflowTask.getName());
simpleTask.setStartDelayInSeconds(workflowTask.getStartDelay());
simpleTask.setInputData(input);
simpleTask.setStatus(TaskModel.Status.SCHEDULED);
simpleTask.setRetryCount(retryCount);
simpleTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
simpleTask.setResponseTimeoutSeconds(taskDefinition.getResponseTimeoutSeconds());
simpleTask.setRetriedTaskId(retriedTaskId);
simpleTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
simpleTask.setRateLimitFrequencyInSeconds(taskDefinition.getRateLimitFrequencyInSeconds());
return List.of(simpleTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#USER_DEFINED} to a {@link TaskModel} of type {@link TaskType#USER_DEFINED} with {@link
* TaskModel.Status#SCHEDULED}
*/
@Component
public class UserDefinedTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(UserDefinedTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public UserDefinedTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.USER_DEFINED.name();
}
/**
* This method maps a {@link WorkflowTask} of type {@link TaskType#USER_DEFINED} to a {@link
* TaskModel} in a {@link TaskModel.Status#SCHEDULED} state
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return a List with just one User defined task
* @throws TerminateWorkflowException In case if the task definition does not exist
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in UserDefinedTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
int retryCount = taskMapperContext.getRetryCount();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(
() ->
Optional.ofNullable(
metadataDAO.getTaskDef(
workflowTask.getName()))
.orElseThrow(
() -> {
String reason =
String.format(
"Invalid task specified. Cannot find task by name %s in the task definitions",
workflowTask.getName());
return new TerminateWorkflowException(
reason);
}));
Map<String, Object> input =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition);
TaskModel userDefinedTask = taskMapperContext.createTaskModel();
userDefinedTask.setInputData(input);
userDefinedTask.setStatus(TaskModel.Status.SCHEDULED);
userDefinedTask.setRetryCount(retryCount);
userDefinedTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
userDefinedTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
userDefinedTask.setRateLimitFrequencyInSeconds(
taskDefinition.getRateLimitFrequencyInSeconds());
return List.of(userDefinedTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/ExclusiveJoinTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/ExclusiveJoinTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.model.TaskModel;
@Component
public class ExclusiveJoinTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(ExclusiveJoinTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.EXCLUSIVE_JOIN.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in ExclusiveJoinTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
Map<String, Object> joinInput = new HashMap<>();
joinInput.put("joinOn", workflowTask.getJoinOn());
if (workflowTask.getDefaultExclusiveJoinTask() != null) {
joinInput.put("defaultExclusiveJoinTask", workflowTask.getDefaultExclusiveJoinTask());
}
TaskModel joinTask = taskMapperContext.createTaskModel();
joinTask.setTaskType(TaskType.TASK_TYPE_EXCLUSIVE_JOIN);
joinTask.setTaskDefName(TaskType.TASK_TYPE_EXCLUSIVE_JOIN);
joinTask.setStartTime(System.currentTimeMillis());
joinTask.setInputData(joinInput);
joinTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(joinTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/InlineTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/InlineTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#INLINE} to a List {@link TaskModel} starting with Task of type {@link TaskType#INLINE}
* which is marked as IN_PROGRESS, followed by the list of {@link TaskModel} based on the case
* expression evaluation in the Inline task.
*/
@Component
public class InlineTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(InlineTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public InlineTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.INLINE.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in InlineTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
taskMapperContext.getWorkflowTask().getInputParameters(),
workflowModel,
taskId,
taskDefinition);
TaskModel inlineTask = taskMapperContext.createTaskModel();
inlineTask.setTaskType(TaskType.TASK_TYPE_INLINE);
inlineTask.setStartTime(System.currentTimeMillis());
inlineTask.setInputData(taskInput);
if (Objects.nonNull(taskMapperContext.getTaskDefinition())) {
inlineTask.setIsolationGroupId(
taskMapperContext.getTaskDefinition().getIsolationGroupId());
}
inlineTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(inlineTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/JsonJQTransformTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/JsonJQTransformTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
@Component
public class JsonJQTransformTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(JsonJQTransformTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public JsonJQTransformTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.JSON_JQ_TRANSFORM.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in JsonJQTransformTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition);
TaskModel jsonJQTransformTask = taskMapperContext.createTaskModel();
jsonJQTransformTask.setStartTime(System.currentTimeMillis());
jsonJQTransformTask.setInputData(taskInput);
if (Objects.nonNull(taskMapperContext.getTaskDefinition())) {
jsonJQTransformTask.setIsolationGroupId(
taskMapperContext.getTaskDefinition().getIsolationGroupId());
}
jsonJQTransformTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(jsonJQTransformTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/StartWorkflowTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/StartWorkflowTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.START_WORKFLOW;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_START_WORKFLOW;
@Component
public class StartWorkflowTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(StartWorkflowTaskMapper.class);
@Override
public String getTaskType() {
return START_WORKFLOW.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
TaskModel startWorkflowTask = taskMapperContext.createTaskModel();
startWorkflowTask.setTaskType(TASK_TYPE_START_WORKFLOW);
startWorkflowTask.addInput(taskMapperContext.getTaskInput());
startWorkflowTask.setStatus(TaskModel.Status.SCHEDULED);
startWorkflowTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
LOGGER.debug("{} created", startWorkflowTask);
return List.of(startWorkflowTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
@Component
public class KafkaPublishTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public KafkaPublishTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.KAFKA_PUBLISH.name();
}
/**
* This method maps a {@link WorkflowTask} of type {@link TaskType#KAFKA_PUBLISH} to a {@link
* TaskModel} in a {@link TaskModel.Status#SCHEDULED} state
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return a List with just one Kafka task
* @throws TerminateWorkflowException In case if the task definition does not exist
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in KafkaPublishTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
int retryCount = taskMapperContext.getRetryCount();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> input =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition);
TaskModel kafkaPublishTask = taskMapperContext.createTaskModel();
kafkaPublishTask.setInputData(input);
kafkaPublishTask.setStatus(TaskModel.Status.SCHEDULED);
kafkaPublishTask.setRetryCount(retryCount);
kafkaPublishTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
if (Objects.nonNull(taskDefinition)) {
kafkaPublishTask.setExecutionNameSpace(taskDefinition.getExecutionNameSpace());
kafkaPublishTask.setIsolationGroupId(taskDefinition.getIsolationGroupId());
kafkaPublishTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
kafkaPublishTask.setRateLimitFrequencyInSeconds(
taskDefinition.getRateLimitFrequencyInSeconds());
}
return Collections.singletonList(kafkaPublishTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_EVENT;
@Component
public class EventTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(EventTaskMapper.class);
private final ParametersUtils parametersUtils;
public EventTaskMapper(ParametersUtils parametersUtils) {
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.EVENT.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in EventTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
workflowTask.getInputParameters().put("sink", workflowTask.getSink());
workflowTask.getInputParameters().put("asyncComplete", workflowTask.isAsyncComplete());
Map<String, Object> eventTaskInput =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, null);
String sink = (String) eventTaskInput.get("sink");
Boolean asynComplete = (Boolean) eventTaskInput.get("asyncComplete");
TaskModel eventTask = taskMapperContext.createTaskModel();
eventTask.setTaskType(TASK_TYPE_EVENT);
eventTask.setStatus(TaskModel.Status.SCHEDULED);
eventTask.setInputData(eventTaskInput);
eventTask.getInputData().put("sink", sink);
eventTask.getInputData().put("asyncComplete", asynComplete);
return List.of(eventTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#FORK_JOIN} to a LinkedList of {@link TaskModel} beginning with a completed {@link
* TaskType#TASK_TYPE_FORK}, followed by the user defined fork tasks
*/
@Component
public class ForkJoinTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(ForkJoinTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.FORK_JOIN.name();
}
/**
* This method gets the list of tasks that need to scheduled when the task to scheduled is of
* type {@link TaskType#FORK_JOIN}.
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return List of tasks in the following order: *
* <ul>
* <li>{@link TaskType#TASK_TYPE_FORK} with {@link TaskModel.Status#COMPLETED}
* <li>Might be any kind of task, but in most cases is a UserDefinedTask with {@link
* TaskModel.Status#SCHEDULED}
* </ul>
*
* @throws TerminateWorkflowException When the task after {@link TaskType#FORK_JOIN} is not a
* {@link TaskType#JOIN}
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in ForkJoinTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
Map<String, Object> taskInput = taskMapperContext.getTaskInput();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
int retryCount = taskMapperContext.getRetryCount();
List<TaskModel> tasksToBeScheduled = new LinkedList<>();
TaskModel forkTask = taskMapperContext.createTaskModel();
forkTask.setTaskType(TaskType.TASK_TYPE_FORK);
forkTask.setTaskDefName(TaskType.TASK_TYPE_FORK);
long epochMillis = System.currentTimeMillis();
forkTask.setStartTime(epochMillis);
forkTask.setEndTime(epochMillis);
forkTask.setInputData(taskInput);
forkTask.setStatus(TaskModel.Status.COMPLETED);
if (Objects.nonNull(taskMapperContext.getTaskDefinition())) {
forkTask.setIsolationGroupId(
taskMapperContext.getTaskDefinition().getIsolationGroupId());
}
tasksToBeScheduled.add(forkTask);
List<List<WorkflowTask>> forkTasks = workflowTask.getForkTasks();
for (List<WorkflowTask> wfts : forkTasks) {
WorkflowTask wft = wfts.get(0);
List<TaskModel> tasks2 =
taskMapperContext
.getDeciderService()
.getTasksToBeScheduled(workflowModel, wft, retryCount);
tasksToBeScheduled.addAll(tasks2);
}
WorkflowTask joinWorkflowTask =
workflowModel
.getWorkflowDefinition()
.getNextTask(workflowTask.getTaskReferenceName());
if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) {
throw new TerminateWorkflowException(
"Fork task definition is not followed by a join task. Check the blueprint");
}
List<TaskModel> joinTask =
taskMapperContext
.getDeciderService()
.getTasksToBeScheduled(workflowModel, joinWorkflowTask, retryCount);
tasksToBeScheduled.addAll(joinTask);
return tasksToBeScheduled;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/DoWhileTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/DoWhileTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#DO_WHILE} to a {@link TaskModel} of type {@link TaskType#DO_WHILE}
*/
@Component
public class DoWhileTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DoWhileTaskMapper.class);
private final MetadataDAO metadataDAO;
private final ParametersUtils parametersUtils;
public DoWhileTaskMapper(MetadataDAO metadataDAO, ParametersUtils parametersUtils) {
this.metadataDAO = metadataDAO;
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.DO_WHILE.name();
}
/**
* This method maps {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#DO_WHILE} to a {@link TaskModel} of type {@link TaskType#DO_WHILE} with a status of
* {@link TaskModel.Status#IN_PROGRESS}
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return: A {@link TaskModel} of type {@link TaskType#DO_WHILE} in a List
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in DoWhileTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
TaskModel task = workflowModel.getTaskByRefName(workflowTask.getTaskReferenceName());
if (task != null && task.getStatus().isTerminal()) {
// Since loopTask is already completed no need to schedule task again.
return List.of();
}
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(
() ->
Optional.ofNullable(
metadataDAO.getTaskDef(
workflowTask.getName()))
.orElseGet(TaskDef::new));
TaskModel doWhileTask = taskMapperContext.createTaskModel();
doWhileTask.setTaskType(TaskType.TASK_TYPE_DO_WHILE);
doWhileTask.setStatus(TaskModel.Status.IN_PROGRESS);
doWhileTask.setStartTime(System.currentTimeMillis());
doWhileTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
doWhileTask.setRateLimitFrequencyInSeconds(taskDefinition.getRateLimitFrequencyInSeconds());
doWhileTask.setRetryCount(taskMapperContext.getRetryCount());
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(),
workflowModel,
doWhileTask.getTaskId(),
taskDefinition);
doWhileTask.setInputData(taskInput);
return List.of(doWhileTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.Map;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.DeciderService;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/** Business Object class used for interaction between the DeciderService and Different Mappers */
public class TaskMapperContext {
private final WorkflowModel workflowModel;
private final TaskDef taskDefinition;
private final WorkflowTask workflowTask;
private final Map<String, Object> taskInput;
private final int retryCount;
private final String retryTaskId;
private final String taskId;
private final DeciderService deciderService;
private TaskMapperContext(Builder builder) {
workflowModel = builder.workflowModel;
taskDefinition = builder.taskDefinition;
workflowTask = builder.workflowTask;
taskInput = builder.taskInput;
retryCount = builder.retryCount;
retryTaskId = builder.retryTaskId;
taskId = builder.taskId;
deciderService = builder.deciderService;
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(TaskMapperContext copy) {
Builder builder = new Builder();
builder.workflowModel = copy.getWorkflowModel();
builder.taskDefinition = copy.getTaskDefinition();
builder.workflowTask = copy.getWorkflowTask();
builder.taskInput = copy.getTaskInput();
builder.retryCount = copy.getRetryCount();
builder.retryTaskId = copy.getRetryTaskId();
builder.taskId = copy.getTaskId();
builder.deciderService = copy.getDeciderService();
return builder;
}
public WorkflowDef getWorkflowDefinition() {
return workflowModel.getWorkflowDefinition();
}
public WorkflowModel getWorkflowModel() {
return workflowModel;
}
public TaskDef getTaskDefinition() {
return taskDefinition;
}
public WorkflowTask getWorkflowTask() {
return workflowTask;
}
public int getRetryCount() {
return retryCount;
}
public String getRetryTaskId() {
return retryTaskId;
}
public String getTaskId() {
return taskId;
}
public Map<String, Object> getTaskInput() {
return taskInput;
}
public DeciderService getDeciderService() {
return deciderService;
}
public TaskModel createTaskModel() {
TaskModel taskModel = new TaskModel();
taskModel.setReferenceTaskName(workflowTask.getTaskReferenceName());
taskModel.setWorkflowInstanceId(workflowModel.getWorkflowId());
taskModel.setWorkflowType(workflowModel.getWorkflowName());
taskModel.setCorrelationId(workflowModel.getCorrelationId());
taskModel.setScheduledTime(System.currentTimeMillis());
taskModel.setTaskId(taskId);
taskModel.setWorkflowTask(workflowTask);
taskModel.setWorkflowPriority(workflowModel.getPriority());
// the following properties are overridden by some TaskMapper implementations
taskModel.setTaskType(workflowTask.getType());
taskModel.setTaskDefName(workflowTask.getName());
return taskModel;
}
@Override
public String toString() {
return "TaskMapperContext{"
+ "workflowDefinition="
+ getWorkflowDefinition()
+ ", workflowModel="
+ workflowModel
+ ", workflowTask="
+ workflowTask
+ ", taskInput="
+ taskInput
+ ", retryCount="
+ retryCount
+ ", retryTaskId='"
+ retryTaskId
+ '\''
+ ", taskId='"
+ taskId
+ '\''
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TaskMapperContext)) {
return false;
}
TaskMapperContext that = (TaskMapperContext) o;
if (getRetryCount() != that.getRetryCount()) {
return false;
}
if (!getWorkflowDefinition().equals(that.getWorkflowDefinition())) {
return false;
}
if (!getWorkflowModel().equals(that.getWorkflowModel())) {
return false;
}
if (!getWorkflowTask().equals(that.getWorkflowTask())) {
return false;
}
if (!getTaskInput().equals(that.getTaskInput())) {
return false;
}
if (getRetryTaskId() != null
? !getRetryTaskId().equals(that.getRetryTaskId())
: that.getRetryTaskId() != null) {
return false;
}
return getTaskId().equals(that.getTaskId());
}
@Override
public int hashCode() {
int result = getWorkflowDefinition().hashCode();
result = 31 * result + getWorkflowModel().hashCode();
result = 31 * result + getWorkflowTask().hashCode();
result = 31 * result + getTaskInput().hashCode();
result = 31 * result + getRetryCount();
result = 31 * result + (getRetryTaskId() != null ? getRetryTaskId().hashCode() : 0);
result = 31 * result + getTaskId().hashCode();
return result;
}
/** {@code TaskMapperContext} builder static inner class. */
public static final class Builder {
private WorkflowModel workflowModel;
private TaskDef taskDefinition;
private WorkflowTask workflowTask;
private Map<String, Object> taskInput;
private int retryCount;
private String retryTaskId;
private String taskId;
private DeciderService deciderService;
private Builder() {}
/**
* Sets the {@code workflowModel} and returns a reference to this Builder so that the
* methods can be chained together.
*
* @param val the {@code workflowModel} to set
* @return a reference to this Builder
*/
public Builder withWorkflowModel(WorkflowModel val) {
workflowModel = val;
return this;
}
/**
* Sets the {@code taskDefinition} and returns a reference to this Builder so that the
* methods can be chained together.
*
* @param val the {@code taskDefinition} to set
* @return a reference to this Builder
*/
public Builder withTaskDefinition(TaskDef val) {
taskDefinition = val;
return this;
}
/**
* Sets the {@code workflowTask} and returns a reference to this Builder so that the methods
* can be chained together.
*
* @param val the {@code workflowTask} to set
* @return a reference to this Builder
*/
public Builder withWorkflowTask(WorkflowTask val) {
workflowTask = val;
return this;
}
/**
* Sets the {@code taskInput} and returns a reference to this Builder so that the methods
* can be chained together.
*
* @param val the {@code taskInput} to set
* @return a reference to this Builder
*/
public Builder withTaskInput(Map<String, Object> val) {
taskInput = val;
return this;
}
/**
* Sets the {@code retryCount} and returns a reference to this Builder so that the methods
* can be chained together.
*
* @param val the {@code retryCount} to set
* @return a reference to this Builder
*/
public Builder withRetryCount(int val) {
retryCount = val;
return this;
}
/**
* Sets the {@code retryTaskId} and returns a reference to this Builder so that the methods
* can be chained together.
*
* @param val the {@code retryTaskId} to set
* @return a reference to this Builder
*/
public Builder withRetryTaskId(String val) {
retryTaskId = val;
return this;
}
/**
* Sets the {@code taskId} and returns a reference to this Builder so that the methods can
* be chained together.
*
* @param val the {@code taskId} to set
* @return a reference to this Builder
*/
public Builder withTaskId(String val) {
taskId = val;
return this;
}
/**
* Sets the {@code deciderService} and returns a reference to this Builder so that the
* methods can be chained together.
*
* @param val the {@code deciderService} to set
* @return a reference to this Builder
*/
public Builder withDeciderService(DeciderService val) {
deciderService = val;
return this;
}
/**
* Returns a {@code TaskMapperContext} built from the parameters previously set.
*
* @return a {@code TaskMapperContext} built with parameters of this {@code
* TaskMapperContext.Builder}
*/
public TaskMapperContext build() {
return new TaskMapperContext(this);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#DYNAMIC} to a {@link TaskModel} based on definition derived from the dynamic task name
* defined in {@link WorkflowTask#getInputParameters()}
*/
@Component
public class DynamicTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DynamicTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public DynamicTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.DYNAMIC.name();
}
/**
* This method maps a dynamic task to a {@link TaskModel} based on the input params
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return A {@link List} that contains a single {@link TaskModel} with a {@link
* TaskModel.Status#SCHEDULED}
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in DynamicTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
Map<String, Object> taskInput = taskMapperContext.getTaskInput();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
int retryCount = taskMapperContext.getRetryCount();
String retriedTaskId = taskMapperContext.getRetryTaskId();
String taskNameParam = workflowTask.getDynamicTaskNameParam();
String taskName = getDynamicTaskName(taskInput, taskNameParam);
workflowTask.setName(taskName);
TaskDef taskDefinition = getDynamicTaskDefinition(workflowTask);
workflowTask.setTaskDefinition(taskDefinition);
Map<String, Object> input =
parametersUtils.getTaskInput(
workflowTask.getInputParameters(),
workflowModel,
taskDefinition,
taskMapperContext.getTaskId());
// IMPORTANT: The WorkflowTask that is inside TaskMapperContext is changed above
// createTaskModel() must be called here so the changes are reflected in the created
// TaskModel
TaskModel dynamicTask = taskMapperContext.createTaskModel();
dynamicTask.setStartDelayInSeconds(workflowTask.getStartDelay());
dynamicTask.setInputData(input);
dynamicTask.setStatus(TaskModel.Status.SCHEDULED);
dynamicTask.setRetryCount(retryCount);
dynamicTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
dynamicTask.setResponseTimeoutSeconds(taskDefinition.getResponseTimeoutSeconds());
dynamicTask.setTaskType(taskName);
dynamicTask.setRetriedTaskId(retriedTaskId);
dynamicTask.setWorkflowPriority(workflowModel.getPriority());
return Collections.singletonList(dynamicTask);
}
/**
* Helper method that looks into the input params and returns the dynamic task name
*
* @param taskInput: a map which contains different input parameters and also contains the
* mapping between the dynamic task name param and the actual name representing the dynamic
* task
* @param taskNameParam: the key that is used to look up the dynamic task name.
* @return The name of the dynamic task
* @throws TerminateWorkflowException : In case is there is no value dynamic task name in the
* input parameters.
*/
@VisibleForTesting
String getDynamicTaskName(Map<String, Object> taskInput, String taskNameParam)
throws TerminateWorkflowException {
return Optional.ofNullable(taskInput.get(taskNameParam))
.map(String::valueOf)
.orElseThrow(
() -> {
String reason =
String.format(
"Cannot map a dynamic task based on the parameter and input. "
+ "Parameter= %s, input= %s",
taskNameParam, taskInput);
return new TerminateWorkflowException(reason);
});
}
/**
* This method gets the TaskDefinition for a specific {@link WorkflowTask}
*
* @param workflowTask: An instance of {@link WorkflowTask} which has the name of the using
* which the {@link TaskDef} can be retrieved.
* @return An instance of TaskDefinition
* @throws TerminateWorkflowException : in case of no workflow definition available
*/
@VisibleForTesting
TaskDef getDynamicTaskDefinition(WorkflowTask workflowTask)
throws TerminateWorkflowException { // TODO this is a common pattern in code base can
// be moved to DAO
return Optional.ofNullable(workflowTask.getTaskDefinition())
.orElseGet(
() ->
Optional.ofNullable(metadataDAO.getTaskDef(workflowTask.getName()))
.orElseThrow(
() -> {
String reason =
String.format(
"Invalid task specified. Cannot find task by name %s in the task definitions",
workflowTask.getName());
return new TerminateWorkflowException(reason);
}));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/HTTPTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/HTTPTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#HTTP} to a {@link TaskModel} of type {@link TaskType#HTTP} with {@link
* TaskModel.Status#SCHEDULED}
*/
@Component
public class HTTPTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(HTTPTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public HTTPTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.HTTP.name();
}
/**
* This method maps a {@link WorkflowTask} of type {@link TaskType#HTTP} to a {@link TaskModel}
* in a {@link TaskModel.Status#SCHEDULED} state
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return a List with just one HTTP task
* @throws TerminateWorkflowException In case if the task definition does not exist
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in HTTPTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
workflowTask.getInputParameters().put("asyncComplete", workflowTask.isAsyncComplete());
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
int retryCount = taskMapperContext.getRetryCount();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> input =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition);
Boolean asynComplete = (Boolean) input.get("asyncComplete");
TaskModel httpTask = taskMapperContext.createTaskModel();
httpTask.setInputData(input);
httpTask.getInputData().put("asyncComplete", asynComplete);
httpTask.setStatus(TaskModel.Status.SCHEDULED);
httpTask.setRetryCount(retryCount);
httpTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
if (Objects.nonNull(taskDefinition)) {
httpTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
httpTask.setRateLimitFrequencyInSeconds(
taskDefinition.getRateLimitFrequencyInSeconds());
httpTask.setIsolationGroupId(taskDefinition.getIsolationGroupId());
httpTask.setExecutionNameSpace(taskDefinition.getExecutionNameSpace());
}
return List.of(httpTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/LambdaTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/LambdaTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* @author x-ultra
* @deprecated {@link com.netflix.conductor.core.execution.tasks.Lambda} is also deprecated. Use
* {@link com.netflix.conductor.core.execution.tasks.Inline} and so ${@link InlineTaskMapper}
* will be used as a result.
*/
@Deprecated
@Component
public class LambdaTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(LambdaTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public LambdaTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.LAMBDA.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in LambdaTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
taskMapperContext.getWorkflowTask().getInputParameters(),
workflowModel,
taskId,
taskDefinition);
TaskModel lambdaTask = taskMapperContext.createTaskModel();
lambdaTask.setTaskType(TaskType.TASK_TYPE_LAMBDA);
lambdaTask.setStartTime(System.currentTimeMillis());
lambdaTask.setInputData(taskInput);
lambdaTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(lambdaTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/SetVariableTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/SetVariableTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
@Component
public class SetVariableTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(SetVariableTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.SET_VARIABLE.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in SetVariableMapper", taskMapperContext);
TaskModel varTask = taskMapperContext.createTaskModel();
varTask.setStartTime(System.currentTimeMillis());
varTask.setInputData(taskMapperContext.getTaskInput());
varTask.setStatus(TaskModel.Status.IN_PROGRESS);
return List.of(varTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.SUB_WORKFLOW;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SIMPLE;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#FORK_JOIN_DYNAMIC} to a LinkedList of {@link TaskModel} beginning with a {@link
* TaskType#TASK_TYPE_FORK}, followed by the user defined dynamic tasks and a {@link TaskType#JOIN}
* at the end
*/
@Component
public class ForkJoinDynamicTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(ForkJoinDynamicTaskMapper.class);
private final IDGenerator idGenerator;
private final ParametersUtils parametersUtils;
private final ObjectMapper objectMapper;
private final MetadataDAO metadataDAO;
private final SystemTaskRegistry systemTaskRegistry;
private static final TypeReference<List<WorkflowTask>> ListOfWorkflowTasks =
new TypeReference<>() {};
public ForkJoinDynamicTaskMapper(
IDGenerator idGenerator,
ParametersUtils parametersUtils,
ObjectMapper objectMapper,
MetadataDAO metadataDAO,
SystemTaskRegistry systemTaskRegistry) {
this.idGenerator = idGenerator;
this.parametersUtils = parametersUtils;
this.objectMapper = objectMapper;
this.metadataDAO = metadataDAO;
this.systemTaskRegistry = systemTaskRegistry;
}
@Override
public String getTaskType() {
return TaskType.FORK_JOIN_DYNAMIC.name();
}
/**
* This method gets the list of tasks that need to scheduled when the task to scheduled is of
* type {@link TaskType#FORK_JOIN_DYNAMIC}. Creates a Fork Task, followed by the Dynamic tasks
* and a final JOIN task.
*
* <p>The definitions of the dynamic forks that need to be scheduled are available in the {@link
* WorkflowTask#getInputParameters()} which are accessed using the {@link
* TaskMapperContext#getWorkflowTask()}. The dynamic fork task definitions are referred by a key
* value either by {@link WorkflowTask#getDynamicForkTasksParam()} or by {@link
* WorkflowTask#getDynamicForkJoinTasksParam()} When creating the list of tasks to be scheduled
* a set of preconditions are validated:
*
* <ul>
* <li>If the input parameter representing the Dynamic fork tasks is available as part of
* {@link WorkflowTask#getDynamicForkTasksParam()} then the input for the dynamic task is
* validated to be a map by using {@link WorkflowTask#getDynamicForkTasksInputParamName()}
* <li>If the input parameter representing the Dynamic fork tasks is available as part of
* {@link WorkflowTask#getDynamicForkJoinTasksParam()} then the input for the dynamic
* tasks is available in the payload of the tasks definition.
* <li>A check is performed that the next following task in the {@link WorkflowDef} is a
* {@link TaskType#JOIN}
* </ul>
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return List of tasks in the following order:
* <ul>
* <li>{@link TaskType#TASK_TYPE_FORK} with {@link TaskModel.Status#COMPLETED}
* <li>Might be any kind of task, but this is most cases is a UserDefinedTask with {@link
* TaskModel.Status#SCHEDULED}
* <li>{@link TaskType#JOIN} with {@link TaskModel.Status#IN_PROGRESS}
* </ul>
*
* @throws TerminateWorkflowException In case of:
* <ul>
* <li>When the task after {@link TaskType#FORK_JOIN_DYNAMIC} is not a {@link
* TaskType#JOIN}
* <li>When the input parameters for the dynamic tasks are not of type {@link Map}
* </ul>
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in ForkJoinDynamicTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
int retryCount = taskMapperContext.getRetryCount();
Map<String, Object> input =
parametersUtils.getTaskInput(
workflowTask.getInputParameters(), workflowModel, null, null);
List<TaskModel> mappedTasks = new LinkedList<>();
// Get the list of dynamic tasks and the input for the tasks
Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> workflowTasksAndInputPair =
getDynamicTasksSimple(workflowTask, input);
if (workflowTasksAndInputPair == null) {
workflowTasksAndInputPair =
Optional.ofNullable(workflowTask.getDynamicForkTasksParam())
.map(
dynamicForkTaskParam ->
getDynamicForkTasksAndInput(
workflowTask,
workflowModel,
dynamicForkTaskParam,
input))
.orElseGet(
() ->
getDynamicForkJoinTasksAndInput(
workflowTask, workflowModel, input));
}
List<WorkflowTask> dynForkTasks = workflowTasksAndInputPair.getLeft();
Map<String, Map<String, Object>> tasksInput = workflowTasksAndInputPair.getRight();
// Create Fork Task which needs to be followed by the dynamic tasks
TaskModel forkDynamicTask = createDynamicForkTask(taskMapperContext, dynForkTasks);
forkDynamicTask.getInputData().putAll(taskMapperContext.getTaskInput());
mappedTasks.add(forkDynamicTask);
Optional<TaskModel> exists =
workflowModel.getTasks().stream()
.filter(
task ->
task.getReferenceTaskName()
.equals(
taskMapperContext
.getWorkflowTask()
.getTaskReferenceName()))
.findAny();
List<String> joinOnTaskRefs = new LinkedList<>();
// Add each dynamic task to the mapped tasks and also get the last dynamic task in the list,
// which indicates that the following task after that needs to be a join task
if (!exists.isPresent()) {
// Add each dynamic task to the mapped tasks and also get the last dynamic task in the
// list,
// which indicates that the following task after that needs to be a join task
for (WorkflowTask dynForkTask : dynForkTasks) {
// composition
List<TaskModel> forkedTasks =
taskMapperContext
.getDeciderService()
.getTasksToBeScheduled(workflowModel, dynForkTask, retryCount);
if (forkedTasks == null || forkedTasks.isEmpty()) {
Optional<String> existingTaskRefName =
workflowModel.getTasks().stream()
.filter(
runningTask ->
runningTask
.getStatus()
.equals(
TaskModel.Status
.IN_PROGRESS)
|| runningTask.getStatus().isTerminal())
.map(TaskModel::getReferenceTaskName)
.filter(
refTaskName ->
refTaskName.equals(
dynForkTask.getTaskReferenceName()))
.findAny();
// Construct an informative error message
String terminateMessage =
"No dynamic tasks could be created for the Workflow: "
+ workflowModel.toShortString()
+ ", Dynamic Fork Task: "
+ dynForkTask;
if (existingTaskRefName.isPresent()) {
terminateMessage +=
" attempted to create a duplicate task reference name: "
+ existingTaskRefName.get();
}
throw new TerminateWorkflowException(terminateMessage);
}
for (TaskModel forkedTask : forkedTasks) {
try {
Map<String, Object> forkedTaskInput =
tasksInput.get(forkedTask.getReferenceTaskName());
if (forkedTask.getInputData() == null) {
forkedTask.setInputData(new HashMap<>());
}
if (forkedTaskInput == null) {
forkedTaskInput = new HashMap<>();
}
forkedTask.getInputData().putAll(forkedTaskInput);
} catch (Exception e) {
String reason =
String.format(
"Tasks could not be dynamically forked due to invalid input: %s",
e.getMessage());
throw new TerminateWorkflowException(reason);
}
}
mappedTasks.addAll(forkedTasks);
// Get the last of the dynamic tasks so that the join can be performed once this
// task is
// done
TaskModel last = forkedTasks.get(forkedTasks.size() - 1);
joinOnTaskRefs.add(last.getReferenceTaskName());
}
}
// From the workflow definition get the next task and make sure that it is a JOIN task.
// The dynamic fork tasks need to be followed by a join task
WorkflowTask joinWorkflowTask =
workflowModel
.getWorkflowDefinition()
.getNextTask(workflowTask.getTaskReferenceName());
if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) {
throw new TerminateWorkflowException(
"Dynamic join definition is not followed by a join task. Check the workflow definition.");
}
// Create Join task
HashMap<String, Object> joinInput = new HashMap<>();
joinInput.put("joinOn", joinOnTaskRefs);
TaskModel joinTask = createJoinTask(workflowModel, joinWorkflowTask, joinInput);
mappedTasks.add(joinTask);
return mappedTasks;
}
/**
* This method creates a FORK task and adds the list of dynamic fork tasks keyed by
* "forkedTaskDefs" and their names keyed by "forkedTasks" into {@link TaskModel#getInputData()}
*
* @param taskMapperContext: The {@link TaskMapperContext} which wraps workflowTask, workflowDef
* and workflowModel
* @param dynForkTasks: The list of dynamic forked tasks, the reference names of these tasks
* will be added to the forkDynamicTask
* @return A new instance of {@link TaskModel} representing a {@link TaskType#TASK_TYPE_FORK}
*/
@VisibleForTesting
TaskModel createDynamicForkTask(
TaskMapperContext taskMapperContext, List<WorkflowTask> dynForkTasks) {
TaskModel forkDynamicTask = taskMapperContext.createTaskModel();
forkDynamicTask.setTaskType(TaskType.TASK_TYPE_FORK);
forkDynamicTask.setTaskDefName(TaskType.TASK_TYPE_FORK);
forkDynamicTask.setStartTime(System.currentTimeMillis());
forkDynamicTask.setEndTime(System.currentTimeMillis());
List<String> forkedTaskNames =
dynForkTasks.stream()
.map(WorkflowTask::getTaskReferenceName)
.collect(Collectors.toList());
forkDynamicTask.getInputData().put("forkedTasks", forkedTaskNames);
forkDynamicTask
.getInputData()
.put(
"forkedTaskDefs",
dynForkTasks); // TODO: Remove this parameter in the later releases
forkDynamicTask.setStatus(TaskModel.Status.COMPLETED);
return forkDynamicTask;
}
/**
* This method creates a JOIN task that is used in the {@link
* this#getMappedTasks(TaskMapperContext)} at the end to add a join task to be scheduled after
* all the fork tasks
*
* @param workflowModel: A instance of the {@link WorkflowModel} which represents the workflow
* being executed.
* @param joinWorkflowTask: A instance of {@link WorkflowTask} which is of type {@link
* TaskType#JOIN}
* @param joinInput: The input which is set in the {@link TaskModel#setInputData(Map)}
* @return a new instance of {@link TaskModel} representing a {@link TaskType#JOIN}
*/
@VisibleForTesting
TaskModel createJoinTask(
WorkflowModel workflowModel,
WorkflowTask joinWorkflowTask,
HashMap<String, Object> joinInput) {
TaskModel joinTask = new TaskModel();
joinTask.setTaskType(TaskType.TASK_TYPE_JOIN);
joinTask.setTaskDefName(TaskType.TASK_TYPE_JOIN);
joinTask.setReferenceTaskName(joinWorkflowTask.getTaskReferenceName());
joinTask.setWorkflowInstanceId(workflowModel.getWorkflowId());
joinTask.setWorkflowType(workflowModel.getWorkflowName());
joinTask.setCorrelationId(workflowModel.getCorrelationId());
joinTask.setScheduledTime(System.currentTimeMillis());
joinTask.setStartTime(System.currentTimeMillis());
joinTask.setInputData(joinInput);
joinTask.setTaskId(idGenerator.generate());
joinTask.setStatus(TaskModel.Status.IN_PROGRESS);
joinTask.setWorkflowTask(joinWorkflowTask);
joinTask.setWorkflowPriority(workflowModel.getPriority());
return joinTask;
}
/**
* This method is used to get the List of dynamic workflow tasks and their input based on the
* {@link WorkflowTask#getDynamicForkTasksParam()}
*
* @param workflowTask: The Task of type FORK_JOIN_DYNAMIC that needs to scheduled, which has
* the input parameters
* @param workflowModel: The instance of the {@link WorkflowModel} which represents the workflow
* being executed.
* @param dynamicForkTaskParam: The key representing the dynamic fork join json payload which is
* available in {@link WorkflowTask#getInputParameters()}
* @return a {@link Pair} representing the list of dynamic fork tasks in {@link Pair#getLeft()}
* and the input for the dynamic fork tasks in {@link Pair#getRight()}
* @throws TerminateWorkflowException : In case of input parameters of the dynamic fork tasks
* not represented as {@link Map}
*/
@SuppressWarnings("unchecked")
@VisibleForTesting
Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> getDynamicForkTasksAndInput(
WorkflowTask workflowTask,
WorkflowModel workflowModel,
String dynamicForkTaskParam,
Map<String, Object> input)
throws TerminateWorkflowException {
List<WorkflowTask> dynamicForkWorkflowTasks =
getDynamicForkWorkflowTasks(dynamicForkTaskParam, input);
if (dynamicForkWorkflowTasks == null) {
dynamicForkWorkflowTasks = new ArrayList<>();
}
for (WorkflowTask dynamicForkWorkflowTask : dynamicForkWorkflowTasks) {
if ((dynamicForkWorkflowTask.getTaskDefinition() == null)
&& StringUtils.isNotBlank(dynamicForkWorkflowTask.getName())) {
dynamicForkWorkflowTask.setTaskDefinition(
metadataDAO.getTaskDef(dynamicForkWorkflowTask.getName()));
}
}
Object dynamicForkTasksInput = input.get(workflowTask.getDynamicForkTasksInputParamName());
if (!(dynamicForkTasksInput instanceof Map)) {
throw new TerminateWorkflowException(
"Input to the dynamically forked tasks is not a map -> expecting a map of K,V but found "
+ dynamicForkTasksInput);
}
return new ImmutablePair<>(
dynamicForkWorkflowTasks, (Map<String, Map<String, Object>>) dynamicForkTasksInput);
}
private List<WorkflowTask> getDynamicForkWorkflowTasks(
String dynamicForkTaskParam, Map<String, Object> input) {
Object dynamicForkTasksJson = input.get(dynamicForkTaskParam);
try {
List<WorkflowTask> tasks =
objectMapper.convertValue(dynamicForkTasksJson, ListOfWorkflowTasks);
for (var task : tasks) {
if (task.getTaskReferenceName() == null) {
throw new RuntimeException(
"One of the tasks had a null/missing taskReferenceName");
}
}
return tasks;
} catch (Exception e) {
LOGGER.warn("IllegalArgumentException in getDynamicForkTasksAndInput", e);
throw new TerminateWorkflowException(
String.format(
"Input '%s' is invalid. Cannot deserialize a list of Workflow Tasks from '%s'",
dynamicForkTaskParam, dynamicForkTasksJson));
}
}
Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> getDynamicTasksSimple(
WorkflowTask workflowTask, Map<String, Object> input)
throws TerminateWorkflowException {
String forkSubWorkflowName = (String) input.get("forkTaskWorkflow");
String forkSubWorkflowVersionStr = (String) input.get("forkTaskWorkflowVersion");
Integer forkSubWorkflowVersion = null;
try {
forkSubWorkflowVersion = Integer.parseInt(forkSubWorkflowVersionStr);
} catch (NumberFormatException nfe) {
}
String forkTaskType = (String) input.get("forkTaskType");
String forkTaskName = (String) input.get("forkTaskName");
if (forkTaskType != null
&& (systemTaskRegistry.isSystemTask(forkTaskType))
&& forkTaskName == null) {
forkTaskName = forkTaskType;
}
if (forkTaskName == null) {
forkTaskName = workflowTask.getTaskReferenceName();
// or we can ban using just forkTaskWorkflow without forkTaskName
}
if (forkTaskType == null) {
forkTaskType = TASK_TYPE_SIMPLE;
}
// This should be a list
Object forkTaskInputs = input.get("forkTaskInputs");
if (forkTaskInputs == null || !(forkTaskInputs instanceof List)) {
LOGGER.warn(
"fork_task_name is present but the inputs are NOT a list is empty {}",
forkTaskInputs);
return null;
}
List<Object> inputs = (List<Object>) forkTaskInputs;
List<WorkflowTask> dynamicForkWorkflowTasks = new ArrayList<>(inputs.size());
Map<String, Map<String, Object>> dynamicForkTasksInput = new HashMap<>();
int i = 0;
for (Object forkTaskInput : inputs) {
WorkflowTask forkTask = null;
if (forkSubWorkflowName != null) {
forkTask =
generateSubWorkflowWorkflowTask(
forkSubWorkflowName, forkSubWorkflowVersion, forkTaskInput);
forkTask.setTaskReferenceName("_" + forkTaskName + "_" + i);
} else {
forkTask = generateWorkflowTask(forkTaskName, forkTaskType, forkTaskInput);
forkTask.setTaskReferenceName("_" + forkTaskName + "_" + i);
}
forkTask.getInputParameters().put("__index", i++);
if (workflowTask.isOptional()) {
forkTask.setOptional(true);
}
dynamicForkWorkflowTasks.add(forkTask);
dynamicForkTasksInput.put(
forkTask.getTaskReferenceName(), forkTask.getInputParameters());
}
return new ImmutablePair<>(dynamicForkWorkflowTasks, dynamicForkTasksInput);
}
private WorkflowTask generateWorkflowTask(
String forkTaskName, String forkTaskType, Object forkTaskInput) {
WorkflowTask forkTask = new WorkflowTask();
try {
forkTask = objectMapper.convertValue(forkTaskInput, WorkflowTask.class);
} catch (Exception ignored) {
}
forkTask.setName(forkTaskName);
forkTask.setType(forkTaskType);
Map<String, Object> inputParameters = new HashMap<>();
if (forkTaskInput instanceof Map) {
inputParameters.putAll((Map<? extends String, ?>) forkTaskInput);
} else {
inputParameters.put("input", forkTaskInput);
}
forkTask.setInputParameters(inputParameters);
forkTask.setTaskDefinition(metadataDAO.getTaskDef(forkTaskName));
return forkTask;
}
private WorkflowTask generateSubWorkflowWorkflowTask(
String name, Integer version, Object forkTaskInput) {
WorkflowTask forkTask = new WorkflowTask();
try {
forkTask = objectMapper.convertValue(forkTaskInput, WorkflowTask.class);
} catch (Exception ignored) {
}
forkTask.setName(name);
forkTask.setType(SUB_WORKFLOW.toString());
Map<String, Object> inputParameters = new HashMap<>();
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName(name);
subWorkflowParams.setVersion(version);
forkTask.setSubWorkflowParam(subWorkflowParams);
if (forkTaskInput instanceof Map) {
inputParameters.putAll((Map<? extends String, ?>) forkTaskInput);
Map<? extends String, ?> forkTaskInputMap = (Map<? extends String, ?>) forkTaskInput;
subWorkflowParams.setTaskToDomain(
(Map<String, String>) forkTaskInputMap.get("taskToDomain"));
} else {
inputParameters.put("input", forkTaskInput);
}
forkTask.setInputParameters(inputParameters);
return forkTask;
}
/**
* This method is used to get the List of dynamic workflow tasks and their input based on the
* {@link WorkflowTask#getDynamicForkJoinTasksParam()}
*
* <p><b>NOTE:</b> This method is kept for legacy reasons, new workflows should use the {@link
* #getDynamicForkTasksAndInput}
*
* @param workflowTask: The Task of type FORK_JOIN_DYNAMIC that needs to scheduled, which has
* the input parameters
* @param workflowModel: The instance of the {@link WorkflowModel} which represents the workflow
* being executed.
* @return {@link Pair} representing the list of dynamic fork tasks in {@link Pair#getLeft()}
* and the input for the dynamic fork tasks in {@link Pair#getRight()}
* @throws TerminateWorkflowException : In case of the {@link WorkflowTask#getInputParameters()}
* does not have a payload that contains the list of the dynamic tasks
*/
@VisibleForTesting
Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> getDynamicForkJoinTasksAndInput(
WorkflowTask workflowTask, WorkflowModel workflowModel, Map<String, Object> input)
throws TerminateWorkflowException {
String dynamicForkJoinTaskParam = workflowTask.getDynamicForkJoinTasksParam();
Object paramValue = input.get(dynamicForkJoinTaskParam);
DynamicForkJoinTaskList dynamicForkJoinTaskList =
objectMapper.convertValue(paramValue, DynamicForkJoinTaskList.class);
if (dynamicForkJoinTaskList == null) {
String reason =
String.format(
"Dynamic tasks could not be created. The value of %s from task's input %s has no dynamic tasks to be scheduled",
dynamicForkJoinTaskParam, input);
LOGGER.error(reason);
throw new TerminateWorkflowException(reason);
}
Map<String, Map<String, Object>> dynamicForkJoinTasksInput = new HashMap<>();
List<WorkflowTask> dynamicForkJoinWorkflowTasks =
dynamicForkJoinTaskList.getDynamicTasks().stream()
.peek(
dynamicForkJoinTask ->
dynamicForkJoinTasksInput.put(
dynamicForkJoinTask.getReferenceName(),
dynamicForkJoinTask
.getInput())) // TODO create a custom pair
// collector
.map(
dynamicForkJoinTask -> {
WorkflowTask dynamicForkJoinWorkflowTask = new WorkflowTask();
dynamicForkJoinWorkflowTask.setTaskReferenceName(
dynamicForkJoinTask.getReferenceName());
dynamicForkJoinWorkflowTask.setName(
dynamicForkJoinTask.getTaskName());
dynamicForkJoinWorkflowTask.setType(
dynamicForkJoinTask.getType());
if (dynamicForkJoinWorkflowTask.getTaskDefinition() == null
&& StringUtils.isNotBlank(
dynamicForkJoinWorkflowTask.getName())) {
dynamicForkJoinWorkflowTask.setTaskDefinition(
metadataDAO.getTaskDef(
dynamicForkJoinTask.getTaskName()));
}
return dynamicForkJoinWorkflowTask;
})
.collect(Collectors.toCollection(LinkedList::new));
return new ImmutablePair<>(dynamicForkJoinWorkflowTasks, dynamicForkJoinTasksInput);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.text.ParseException;
import java.time.Duration;
import java.util.*;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.tasks.Wait;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_WAIT;
import static com.netflix.conductor.core.execution.tasks.Wait.DURATION_INPUT;
import static com.netflix.conductor.core.execution.tasks.Wait.UNTIL_INPUT;
import static com.netflix.conductor.core.utils.DateTimeUtils.parseDate;
import static com.netflix.conductor.core.utils.DateTimeUtils.parseDuration;
import static com.netflix.conductor.model.TaskModel.Status.FAILED_WITH_TERMINAL_ERROR;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#WAIT} to a {@link TaskModel} of type {@link Wait} with {@link
* TaskModel.Status#IN_PROGRESS}
*/
@Component
public class WaitTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(WaitTaskMapper.class);
private final ParametersUtils parametersUtils;
public WaitTaskMapper(ParametersUtils parametersUtils) {
this.parametersUtils = parametersUtils;
}
@Override
public String getTaskType() {
return TaskType.WAIT.name();
}
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in WaitTaskMapper", taskMapperContext);
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
Map<String, Object> waitTaskInput =
parametersUtils.getTaskInputV2(
taskMapperContext.getWorkflowTask().getInputParameters(),
workflowModel,
taskId,
null);
TaskModel waitTask = taskMapperContext.createTaskModel();
waitTask.setTaskType(TASK_TYPE_WAIT);
waitTask.setInputData(waitTaskInput);
waitTask.setStartTime(System.currentTimeMillis());
waitTask.setStatus(TaskModel.Status.IN_PROGRESS);
if (Objects.nonNull(taskMapperContext.getTaskDefinition())) {
waitTask.setIsolationGroupId(
taskMapperContext.getTaskDefinition().getIsolationGroupId());
}
setCallbackAfter(waitTask);
return List.of(waitTask);
}
void setCallbackAfter(TaskModel task) {
String duration =
Optional.ofNullable(task.getInputData().get(DURATION_INPUT)).orElse("").toString();
String until =
Optional.ofNullable(task.getInputData().get(UNTIL_INPUT)).orElse("").toString();
if (StringUtils.isNotBlank(duration) && StringUtils.isNotBlank(until)) {
task.setReasonForIncompletion(
"Both 'duration' and 'until' specified. Please provide only one input");
task.setStatus(FAILED_WITH_TERMINAL_ERROR);
return;
}
if (StringUtils.isNotBlank(duration)) {
Duration timeDuration = parseDuration(duration);
long waitTimeout = System.currentTimeMillis() + (timeDuration.getSeconds() * 1000);
task.setWaitTimeout(waitTimeout);
long seconds = timeDuration.getSeconds();
task.setCallbackAfterSeconds(seconds);
} else if (StringUtils.isNotBlank(until)) {
try {
Date expiryDate = parseDate(until);
long timeInMS = expiryDate.getTime();
long now = System.currentTimeMillis();
long seconds = ((timeInMS - now) / 1000);
if (seconds < 0) {
seconds = 0;
}
task.setCallbackAfterSeconds(seconds);
task.setWaitTimeout(timeInMS);
} catch (ParseException parseException) {
task.setReasonForIncompletion(
"Invalid/Unsupported Wait Until format. Provided: " + until);
task.setStatus(FAILED_WITH_TERMINAL_ERROR);
}
} else {
// If there is no time duration specified then the WAIT task should wait forever
task.setCallbackAfterSeconds(Integer.MAX_VALUE);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#JOIN} to a {@link TaskModel} of type {@link TaskType#JOIN}
*/
@Component
public class JoinTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(JoinTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.JOIN.name();
}
/**
* This method maps {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#JOIN} to a {@link TaskModel} of type {@link TaskType#JOIN} with a status of {@link
* TaskModel.Status#IN_PROGRESS}
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return A {@link TaskModel} of type {@link TaskType#JOIN} in a List
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in JoinTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
Map<String, Object> joinInput = new HashMap<>();
joinInput.put("joinOn", workflowTask.getJoinOn());
TaskModel joinTask = taskMapperContext.createTaskModel();
joinTask.setTaskType(TaskType.TASK_TYPE_JOIN);
joinTask.setTaskDefName(TaskType.TASK_TYPE_JOIN);
joinTask.setStartTime(System.currentTimeMillis());
joinTask.setInputData(joinInput);
joinTask.setStatus(TaskModel.Status.IN_PROGRESS);
if (Objects.nonNull(taskMapperContext.getTaskDefinition())) {
joinTask.setIsolationGroupId(
taskMapperContext.getTaskDefinition().getIsolationGroupId());
}
return List.of(joinTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java | core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.events.ScriptEvaluator;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link
* TaskType#DECISION} to a List {@link TaskModel} starting with Task of type {@link
* TaskType#DECISION} which is marked as IN_PROGRESS, followed by the list of {@link TaskModel}
* based on the case expression evaluation in the Decision task.
*
* @deprecated {@link com.netflix.conductor.core.execution.tasks.Decision} is also deprecated. Use
* {@link com.netflix.conductor.core.execution.tasks.Switch} and so ${@link SwitchTaskMapper}
* will be used as a result.
*/
@Deprecated
@Component
public class DecisionTaskMapper implements TaskMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DecisionTaskMapper.class);
@Override
public String getTaskType() {
return TaskType.DECISION.name();
}
/**
* This method gets the list of tasks that need to scheduled when the task to scheduled is of
* type {@link TaskType#DECISION}.
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return List of tasks in the following order:
* <ul>
* <li>{@link TaskType#DECISION} with {@link TaskModel.Status#IN_PROGRESS}
* <li>List of task based on the evaluation of {@link WorkflowTask#getCaseExpression()}
* are scheduled.
* <li>In case of no matching result after the evaluation of the {@link
* WorkflowTask#getCaseExpression()}, the {@link WorkflowTask#getDefaultCase()} Tasks
* are scheduled.
* </ul>
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext) {
LOGGER.debug("TaskMapperContext {} in DecisionTaskMapper", taskMapperContext);
List<TaskModel> tasksToBeScheduled = new LinkedList<>();
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
Map<String, Object> taskInput = taskMapperContext.getTaskInput();
int retryCount = taskMapperContext.getRetryCount();
// get the expression to be evaluated
String caseValue = getEvaluatedCaseValue(workflowTask, taskInput);
// QQ why is the case value and the caseValue passed and caseOutput passes as the same ??
TaskModel decisionTask = taskMapperContext.createTaskModel();
decisionTask.setTaskType(TaskType.TASK_TYPE_DECISION);
decisionTask.setTaskDefName(TaskType.TASK_TYPE_DECISION);
decisionTask.addInput("case", caseValue);
decisionTask.addOutput("caseOutput", Collections.singletonList(caseValue));
decisionTask.setStartTime(System.currentTimeMillis());
decisionTask.setStatus(TaskModel.Status.IN_PROGRESS);
tasksToBeScheduled.add(decisionTask);
// get the list of tasks based on the decision
List<WorkflowTask> selectedTasks = workflowTask.getDecisionCases().get(caseValue);
// if the tasks returned are empty based on evaluated case value, then get the default case
// if there is one
if (selectedTasks == null || selectedTasks.isEmpty()) {
selectedTasks = workflowTask.getDefaultCase();
}
// once there are selected tasks that need to proceeded as part of the decision, get the
// next task to be scheduled by using the decider service
if (selectedTasks != null && !selectedTasks.isEmpty()) {
WorkflowTask selectedTask =
selectedTasks.get(0); // Schedule the first task to be executed...
// TODO break out this recursive call using function composition of what needs to be
// done and then walk back the condition tree
List<TaskModel> caseTasks =
taskMapperContext
.getDeciderService()
.getTasksToBeScheduled(
workflowModel,
selectedTask,
retryCount,
taskMapperContext.getRetryTaskId());
tasksToBeScheduled.addAll(caseTasks);
decisionTask.addInput("hasChildren", "true");
}
return tasksToBeScheduled;
}
/**
* This method evaluates the case expression of a decision task and returns a string
* representation of the evaluated result.
*
* @param workflowTask: The decision task that has the case expression to be evaluated.
* @param taskInput: the input which has the values that will be used in evaluating the case
* expression.
* @return A String representation of the evaluated result
*/
@VisibleForTesting
String getEvaluatedCaseValue(WorkflowTask workflowTask, Map<String, Object> taskInput) {
String expression = workflowTask.getCaseExpression();
String caseValue;
if (StringUtils.isNotBlank(expression)) {
LOGGER.debug("Case being evaluated using decision expression: {}", expression);
try {
// Evaluate the expression by using the GraalJS based script evaluator
Object returnValue = ScriptEvaluator.eval(expression, taskInput);
caseValue = (returnValue == null) ? "null" : returnValue.toString();
} catch (Exception e) {
String errorMsg = String.format("Error while evaluating script: %s", expression);
LOGGER.error(errorMsg, e);
throw new TerminateWorkflowException(errorMsg);
}
} else { // In case of no case expression, get the caseValueParam and treat it as a string
// representation of caseValue
LOGGER.debug(
"No Expression available on the decision task, case value being assigned as param name");
String paramName = workflowTask.getCaseValueParam();
caseValue = "" + taskInput.get(paramName);
}
return caseValue;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/QueueUtils.java | core/src/main/java/com/netflix/conductor/core/utils/QueueUtils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.model.TaskModel;
public class QueueUtils {
public static final String DOMAIN_SEPARATOR = ":";
private static final String ISOLATION_SEPARATOR = "-";
private static final String EXECUTION_NAME_SPACE_SEPARATOR = "@";
public static String getQueueName(TaskModel taskModel) {
return getQueueName(
taskModel.getTaskType(),
taskModel.getDomain(),
taskModel.getIsolationGroupId(),
taskModel.getExecutionNameSpace());
}
public static String getQueueName(Task task) {
return getQueueName(
task.getTaskType(),
task.getDomain(),
task.getIsolationGroupId(),
task.getExecutionNameSpace());
}
/**
* Creates a queue name string using <code>taskType</code>, <code>domain</code>, <code>
* isolationGroupId</code> and <code>executionNamespace</code>.
*
* @return domain:taskType@eexecutionNameSpace-isolationGroupId.
*/
public static String getQueueName(
String taskType, String domain, String isolationGroupId, String executionNamespace) {
String queueName;
if (domain == null) {
queueName = taskType;
} else {
queueName = domain + DOMAIN_SEPARATOR + taskType;
}
if (executionNamespace != null) {
queueName = queueName + EXECUTION_NAME_SPACE_SEPARATOR + executionNamespace;
}
if (isolationGroupId != null) {
queueName = queueName + ISOLATION_SEPARATOR + isolationGroupId;
}
return queueName;
}
public static String getQueueNameWithoutDomain(String queueName) {
return queueName.substring(queueName.indexOf(DOMAIN_SEPARATOR) + 1);
}
public static String getExecutionNameSpace(String queueName) {
if (StringUtils.contains(queueName, ISOLATION_SEPARATOR)
&& StringUtils.contains(queueName, EXECUTION_NAME_SPACE_SEPARATOR)) {
return StringUtils.substringBetween(
queueName, EXECUTION_NAME_SPACE_SEPARATOR, ISOLATION_SEPARATOR);
} else if (StringUtils.contains(queueName, EXECUTION_NAME_SPACE_SEPARATOR)) {
return StringUtils.substringAfter(queueName, EXECUTION_NAME_SPACE_SEPARATOR);
} else {
return StringUtils.EMPTY;
}
}
public static boolean isIsolatedQueue(String queue) {
return StringUtils.isNotBlank(getIsolationGroup(queue));
}
private static String getIsolationGroup(String queue) {
return StringUtils.substringAfter(queue, QueueUtils.ISOLATION_SEPARATOR);
}
public static String getTaskType(String queue) {
if (StringUtils.isBlank(queue)) {
return StringUtils.EMPTY;
}
int domainSeperatorIndex = StringUtils.indexOf(queue, DOMAIN_SEPARATOR);
int startIndex;
if (domainSeperatorIndex == -1) {
startIndex = 0;
} else {
startIndex = domainSeperatorIndex + 1;
}
int endIndex = StringUtils.indexOf(queue, EXECUTION_NAME_SPACE_SEPARATOR);
if (endIndex == -1) {
endIndex = StringUtils.lastIndexOf(queue, ISOLATION_SEPARATOR);
}
if (endIndex == -1) {
endIndex = queue.length();
}
return StringUtils.substring(queue, startIndex, endIndex);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/SemaphoreUtil.java | core/src/main/java/com/netflix/conductor/core/utils/SemaphoreUtil.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.concurrent.Semaphore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** A class wrapping a semaphore which holds the number of permits available for processing. */
public class SemaphoreUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(SemaphoreUtil.class);
private final Semaphore semaphore;
public SemaphoreUtil(int numSlots) {
LOGGER.debug("Semaphore util initialized with {} permits", numSlots);
semaphore = new Semaphore(numSlots);
}
/**
* Signals if processing is allowed based on whether specified number of permits can be
* acquired.
*
* @param numSlots the number of permits to acquire
* @return {@code true} - if permit is acquired {@code false} - if permit could not be acquired
*/
public boolean acquireSlots(int numSlots) {
boolean acquired = semaphore.tryAcquire(numSlots);
LOGGER.trace("Trying to acquire {} permit: {}", numSlots, acquired);
return acquired;
}
/** Signals that processing is complete and the specified number of permits can be released. */
public void completeProcessing(int numSlots) {
LOGGER.trace("Completed execution; releasing permit");
semaphore.release(numSlots);
}
/**
* Gets the number of slots available for processing.
*
* @return number of available permits
*/
public int availableSlots() {
int available = semaphore.availablePermits();
LOGGER.trace("Number of available permits: {}", available);
return available;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/ExternalPayloadStorageUtils.java | core/src/main/java/com/netflix/conductor/core/utils/ExternalPayloadStorageUtils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
/** Provides utility functions to upload and download payloads to {@link ExternalPayloadStorage} */
@Component
public class ExternalPayloadStorageUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(ExternalPayloadStorageUtils.class);
private final ExternalPayloadStorage externalPayloadStorage;
private final ConductorProperties properties;
private final ObjectMapper objectMapper;
public ExternalPayloadStorageUtils(
ExternalPayloadStorage externalPayloadStorage,
ConductorProperties properties,
ObjectMapper objectMapper) {
this.externalPayloadStorage = externalPayloadStorage;
this.properties = properties;
this.objectMapper = objectMapper;
}
/**
* Download the payload from the given path.
*
* @param path the relative path of the payload in the {@link ExternalPayloadStorage}
* @return the payload object
* @throws NonTransientException in case of JSON parsing errors or download errors
*/
@SuppressWarnings("unchecked")
public Map<String, Object> downloadPayload(String path) {
try (InputStream inputStream = externalPayloadStorage.download(path)) {
return objectMapper.readValue(
IOUtils.toString(inputStream, StandardCharsets.UTF_8), Map.class);
} catch (TransientException te) {
throw te;
} catch (Exception e) {
LOGGER.error("Unable to download payload from external storage path: {}", path, e);
throw new NonTransientException(
"Unable to download payload from external storage path: " + path, e);
}
}
/**
* Verify the payload size and upload to external storage if necessary.
*
* @param entity the task or workflow for which the payload is to be verified and uploaded
* @param payloadType the {@link PayloadType} of the payload
* @param <T> {@link TaskModel} or {@link WorkflowModel}
* @throws NonTransientException in case of JSON parsing errors or upload errors
* @throws TerminateWorkflowException if the payload size is bigger than permissible limit as
* per {@link ConductorProperties}
*/
public <T> void verifyAndUpload(T entity, PayloadType payloadType) {
if (!shouldUpload(entity, payloadType)) return;
long threshold = 0L;
long maxThreshold = 0L;
Map<String, Object> payload = new HashMap<>();
String workflowId = "";
switch (payloadType) {
case TASK_INPUT:
threshold = properties.getTaskInputPayloadSizeThreshold().toKilobytes();
maxThreshold = properties.getMaxTaskInputPayloadSizeThreshold().toKilobytes();
payload = ((TaskModel) entity).getInputData();
workflowId = ((TaskModel) entity).getWorkflowInstanceId();
break;
case TASK_OUTPUT:
threshold = properties.getTaskOutputPayloadSizeThreshold().toKilobytes();
maxThreshold = properties.getMaxTaskOutputPayloadSizeThreshold().toKilobytes();
payload = ((TaskModel) entity).getOutputData();
workflowId = ((TaskModel) entity).getWorkflowInstanceId();
break;
case WORKFLOW_INPUT:
threshold = properties.getWorkflowInputPayloadSizeThreshold().toKilobytes();
maxThreshold = properties.getMaxWorkflowInputPayloadSizeThreshold().toKilobytes();
payload = ((WorkflowModel) entity).getInput();
workflowId = ((WorkflowModel) entity).getWorkflowId();
break;
case WORKFLOW_OUTPUT:
threshold = properties.getWorkflowOutputPayloadSizeThreshold().toKilobytes();
maxThreshold = properties.getMaxWorkflowOutputPayloadSizeThreshold().toKilobytes();
payload = ((WorkflowModel) entity).getOutput();
workflowId = ((WorkflowModel) entity).getWorkflowId();
break;
}
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
objectMapper.writeValue(byteArrayOutputStream, payload);
byte[] payloadBytes = byteArrayOutputStream.toByteArray();
long payloadSize = payloadBytes.length;
final long maxThresholdInBytes = maxThreshold * 1024;
if (payloadSize > maxThresholdInBytes) {
if (entity instanceof TaskModel) {
String errorMsg =
String.format(
"The payload size: %d of task: %s in workflow: %s is greater than the permissible limit: %d bytes",
payloadSize,
((TaskModel) entity).getTaskId(),
((TaskModel) entity).getWorkflowInstanceId(),
maxThresholdInBytes);
failTask(((TaskModel) entity), payloadType, errorMsg);
} else {
String errorMsg =
String.format(
"The payload size: %d of workflow: %s is greater than the permissible limit: %d bytes",
payloadSize,
((WorkflowModel) entity).getWorkflowId(),
maxThresholdInBytes);
failWorkflow(((WorkflowModel) entity), payloadType, errorMsg);
}
} else if (payloadSize > threshold * 1024) {
String externalInputPayloadStoragePath, externalOutputPayloadStoragePath;
switch (payloadType) {
case TASK_INPUT:
externalInputPayloadStoragePath =
uploadHelper(payloadBytes, payloadSize, PayloadType.TASK_INPUT);
((TaskModel) entity).externalizeInput(externalInputPayloadStoragePath);
Monitors.recordExternalPayloadStorageUsage(
((TaskModel) entity).getTaskDefName(),
ExternalPayloadStorage.Operation.WRITE.toString(),
PayloadType.TASK_INPUT.toString());
break;
case TASK_OUTPUT:
externalOutputPayloadStoragePath =
uploadHelper(payloadBytes, payloadSize, PayloadType.TASK_OUTPUT);
((TaskModel) entity).externalizeOutput(externalOutputPayloadStoragePath);
Monitors.recordExternalPayloadStorageUsage(
((TaskModel) entity).getTaskDefName(),
ExternalPayloadStorage.Operation.WRITE.toString(),
PayloadType.TASK_OUTPUT.toString());
break;
case WORKFLOW_INPUT:
externalInputPayloadStoragePath =
uploadHelper(payloadBytes, payloadSize, PayloadType.WORKFLOW_INPUT);
((WorkflowModel) entity).externalizeInput(externalInputPayloadStoragePath);
Monitors.recordExternalPayloadStorageUsage(
((WorkflowModel) entity).getWorkflowName(),
ExternalPayloadStorage.Operation.WRITE.toString(),
PayloadType.WORKFLOW_INPUT.toString());
break;
case WORKFLOW_OUTPUT:
externalOutputPayloadStoragePath =
uploadHelper(
payloadBytes, payloadSize, PayloadType.WORKFLOW_OUTPUT);
((WorkflowModel) entity)
.externalizeOutput(externalOutputPayloadStoragePath);
Monitors.recordExternalPayloadStorageUsage(
((WorkflowModel) entity).getWorkflowName(),
ExternalPayloadStorage.Operation.WRITE.toString(),
PayloadType.WORKFLOW_OUTPUT.toString());
break;
}
}
} catch (TransientException | TerminateWorkflowException te) {
throw te;
} catch (Exception e) {
LOGGER.error(
"Unable to upload payload to external storage for workflow: {}", workflowId, e);
throw new NonTransientException(
"Unable to upload payload to external storage for workflow: " + workflowId, e);
}
}
@VisibleForTesting
String uploadHelper(
byte[] payloadBytes, long payloadSize, ExternalPayloadStorage.PayloadType payloadType) {
ExternalStorageLocation location =
externalPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.WRITE, payloadType, "", payloadBytes);
externalPayloadStorage.upload(
location.getPath(), new ByteArrayInputStream(payloadBytes), payloadSize);
return location.getPath();
}
@VisibleForTesting
void failTask(TaskModel task, PayloadType payloadType, String errorMsg) {
LOGGER.error(errorMsg);
task.setReasonForIncompletion(errorMsg);
task.setStatus(TaskModel.Status.FAILED_WITH_TERMINAL_ERROR);
if (payloadType == PayloadType.TASK_INPUT) {
task.setInputData(new HashMap<>());
} else {
task.setOutputData(new HashMap<>());
}
}
@VisibleForTesting
void failWorkflow(WorkflowModel workflow, PayloadType payloadType, String errorMsg) {
LOGGER.error(errorMsg);
if (payloadType == PayloadType.WORKFLOW_INPUT) {
workflow.setInput(new HashMap<>());
} else {
workflow.setOutput(new HashMap<>());
}
throw new TerminateWorkflowException(errorMsg);
}
@VisibleForTesting
<T> boolean shouldUpload(T entity, PayloadType payloadType) {
if (entity instanceof TaskModel) {
TaskModel taskModel = (TaskModel) entity;
if (payloadType == PayloadType.TASK_INPUT) {
return !taskModel.getRawInputData().isEmpty();
} else {
return !taskModel.getRawOutputData().isEmpty();
}
} else {
WorkflowModel workflowModel = (WorkflowModel) entity;
if (payloadType == PayloadType.WORKFLOW_INPUT) {
return !workflowModel.getRawInput().isEmpty();
} else {
return !workflowModel.getRawOutput().isEmpty();
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/ParametersUtils.java | core/src/main/java/com/netflix/conductor/core/utils/ParametersUtils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.utils.EnvUtils;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.jayway.jsonpath.Configuration;
import com.jayway.jsonpath.DocumentContext;
import com.jayway.jsonpath.JsonPath;
import com.jayway.jsonpath.Option;
/** Used to parse and resolve the JSONPath bindings in the workflow and task definitions. */
@Component
public class ParametersUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(ParametersUtils.class);
private static final Pattern PATTERN =
Pattern.compile(
"(?=(?<!\\$)\\$\\{)(?:(?=.*?\\{(?!.*?\\1)(.*\\}(?!.*\\2).*))(?=.*?\\}(?!.*?\\2)(.*)).)+?.*?(?=\\1)[^{]*(?=\\2$)",
Pattern.DOTALL);
private final ObjectMapper objectMapper;
private final TypeReference<Map<String, Object>> map = new TypeReference<>() {};
public ParametersUtils(ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
}
public Map<String, Object> getTaskInput(
Map<String, Object> inputParams,
WorkflowModel workflow,
TaskDef taskDefinition,
String taskId) {
if (workflow.getWorkflowDefinition().getSchemaVersion() > 1) {
return getTaskInputV2(inputParams, workflow, taskId, taskDefinition);
}
return getTaskInputV1(workflow, inputParams);
}
public Map<String, Object> getTaskInputV2(
Map<String, Object> input,
WorkflowModel workflow,
String taskId,
TaskDef taskDefinition) {
Map<String, Object> inputParams;
if (input != null) {
inputParams = clone(input);
} else {
inputParams = new HashMap<>();
}
if (taskDefinition != null && taskDefinition.getInputTemplate() != null) {
clone(taskDefinition.getInputTemplate()).forEach(inputParams::putIfAbsent);
}
Map<String, Map<String, Object>> inputMap = new HashMap<>();
Map<String, Object> workflowParams = new HashMap<>();
workflowParams.put("input", workflow.getInput());
workflowParams.put("output", workflow.getOutput());
workflowParams.put("status", workflow.getStatus());
workflowParams.put("workflowId", workflow.getWorkflowId());
workflowParams.put("parentWorkflowId", workflow.getParentWorkflowId());
workflowParams.put("parentWorkflowTaskId", workflow.getParentWorkflowTaskId());
workflowParams.put("workflowType", workflow.getWorkflowName());
workflowParams.put("version", workflow.getWorkflowVersion());
workflowParams.put("correlationId", workflow.getCorrelationId());
workflowParams.put("reasonForIncompletion", workflow.getReasonForIncompletion());
workflowParams.put("schemaVersion", workflow.getWorkflowDefinition().getSchemaVersion());
workflowParams.put("variables", workflow.getVariables());
inputMap.put("workflow", workflowParams);
// For new workflow being started the list of tasks will be empty
workflow.getTasks().stream()
.map(TaskModel::getReferenceTaskName)
.map(workflow::getTaskByRefName)
.forEach(
task -> {
Map<String, Object> taskParams = new HashMap<>();
taskParams.put("input", task.getInputData());
taskParams.put("output", task.getOutputData());
taskParams.put("taskType", task.getTaskType());
if (task.getStatus() != null) {
taskParams.put("status", task.getStatus().toString());
}
taskParams.put("referenceTaskName", task.getReferenceTaskName());
taskParams.put("retryCount", task.getRetryCount());
taskParams.put("correlationId", task.getCorrelationId());
taskParams.put("pollCount", task.getPollCount());
taskParams.put("taskDefName", task.getTaskDefName());
taskParams.put("scheduledTime", task.getScheduledTime());
taskParams.put("startTime", task.getStartTime());
taskParams.put("endTime", task.getEndTime());
taskParams.put("workflowInstanceId", task.getWorkflowInstanceId());
taskParams.put("taskId", task.getTaskId());
taskParams.put(
"reasonForIncompletion", task.getReasonForIncompletion());
taskParams.put("callbackAfterSeconds", task.getCallbackAfterSeconds());
taskParams.put("workerId", task.getWorkerId());
taskParams.put("iteration", task.getIteration());
inputMap.put(
task.isLoopOverTask()
? TaskUtils.removeIterationFromTaskRefName(
task.getReferenceTaskName())
: task.getReferenceTaskName(),
taskParams);
});
Configuration option =
Configuration.defaultConfiguration().addOptions(Option.SUPPRESS_EXCEPTIONS);
DocumentContext documentContext = JsonPath.parse(inputMap, option);
Map<String, Object> replacedTaskInput = replace(inputParams, documentContext, taskId);
if (taskDefinition != null && taskDefinition.getInputTemplate() != null) {
// If input for a given key resolves to null, try replacing it with one from
// inputTemplate, if it exists.
replacedTaskInput.replaceAll(
(key, value) ->
(value == null) ? taskDefinition.getInputTemplate().get(key) : value);
}
return replacedTaskInput;
}
// deep clone using json - POJO
private Map<String, Object> clone(Map<String, Object> inputTemplate) {
try {
byte[] bytes = objectMapper.writeValueAsBytes(inputTemplate);
return objectMapper.readValue(bytes, map);
} catch (IOException e) {
throw new RuntimeException("Unable to clone input params", e);
}
}
public Map<String, Object> replace(Map<String, Object> input, Object json) {
Object doc;
if (json instanceof String) {
doc = JsonPath.parse(json.toString());
} else {
doc = json;
}
Configuration option =
Configuration.defaultConfiguration().addOptions(Option.SUPPRESS_EXCEPTIONS);
DocumentContext documentContext = JsonPath.parse(doc, option);
return replace(input, documentContext, null);
}
public Object replace(String paramString) {
Configuration option =
Configuration.defaultConfiguration().addOptions(Option.SUPPRESS_EXCEPTIONS);
DocumentContext documentContext = JsonPath.parse(Collections.emptyMap(), option);
return replaceVariables(paramString, documentContext, null);
}
@SuppressWarnings("unchecked")
private Map<String, Object> replace(
Map<String, Object> input, DocumentContext documentContext, String taskId) {
Map<String, Object> result = new HashMap<>();
for (Entry<String, Object> e : input.entrySet()) {
Object newValue;
Object value = e.getValue();
if (value instanceof String) {
newValue = replaceVariables(value.toString(), documentContext, taskId);
} else if (value instanceof Map) {
// recursive call
newValue = replace((Map<String, Object>) value, documentContext, taskId);
} else if (value instanceof List) {
newValue = replaceList((List<?>) value, taskId, documentContext);
} else {
newValue = value;
}
result.put(e.getKey(), newValue);
}
return result;
}
@SuppressWarnings("unchecked")
private Object replaceList(List<?> values, String taskId, DocumentContext io) {
List<Object> replacedList = new LinkedList<>();
for (Object listVal : values) {
if (listVal instanceof String) {
Object replaced = replaceVariables(listVal.toString(), io, taskId);
replacedList.add(replaced);
} else if (listVal instanceof Map) {
Object replaced = replace((Map<String, Object>) listVal, io, taskId);
replacedList.add(replaced);
} else if (listVal instanceof List) {
Object replaced = replaceList((List<?>) listVal, taskId, io);
replacedList.add(replaced);
} else {
replacedList.add(listVal);
}
}
return replacedList;
}
private Object replaceVariables(
String paramString, DocumentContext documentContext, String taskId) {
return replaceVariables(paramString, documentContext, taskId, 0);
}
private Object replaceVariables(
String paramString, DocumentContext documentContext, String taskId, int depth) {
var matcher = PATTERN.matcher(paramString);
var replacements = new LinkedList<Replacement>();
while (matcher.find()) {
var start = matcher.start();
var end = matcher.end();
var match = paramString.substring(start, end);
String paramPath = match.substring(2, match.length() - 1);
paramPath = replaceVariables(paramPath, documentContext, taskId, depth + 1).toString();
// if the paramPath is blank, meaning no value in between ${ and }
// like ${}, ${ } etc, set the value to empty string
if (StringUtils.isBlank(paramPath)) {
replacements.add(new Replacement("", start, end));
continue;
}
if (EnvUtils.isEnvironmentVariable(paramPath)) {
String sysValue = EnvUtils.getSystemParametersValue(paramPath, taskId);
if (sysValue != null) {
replacements.add(new Replacement(sysValue, start, end));
}
} else {
try {
replacements.add(new Replacement(documentContext.read(paramPath), start, end));
} catch (Exception e) {
LOGGER.warn(
"Error reading documentContext for paramPath: {}. Exception: {}",
paramPath,
e);
replacements.add(new Replacement(null, start, end));
}
}
}
if (replacements.size() == 1
&& replacements.getFirst().getStartIndex() == 0
&& replacements.getFirst().getEndIndex() == paramString.length()
&& depth == 0) {
return replacements.get(0).getReplacement();
}
Collections.sort(replacements);
var builder = new StringBuilder(paramString);
for (int i = replacements.size() - 1; i >= 0; i--) {
var replacement = replacements.get(i);
builder.replace(
replacement.getStartIndex(),
replacement.getEndIndex(),
Objects.toString(replacement.getReplacement()));
}
return builder.toString().replaceAll("\\$\\$\\{", "\\${");
}
@Deprecated
// Workflow schema version 1 is deprecated and new workflows should be using version 2
private Map<String, Object> getTaskInputV1(
WorkflowModel workflow, Map<String, Object> inputParams) {
Map<String, Object> input = new HashMap<>();
if (inputParams == null) {
return input;
}
Map<String, Object> workflowInput = workflow.getInput();
inputParams.forEach(
(paramName, value) -> {
String paramPath = "" + value;
String[] paramPathComponents = paramPath.split("\\.");
Utils.checkArgument(
paramPathComponents.length == 3,
"Invalid input expression for "
+ paramName
+ ", paramPathComponents.size="
+ paramPathComponents.length
+ ", expression="
+ paramPath);
String source = paramPathComponents[0]; // workflow, or task reference name
String type = paramPathComponents[1]; // input/output
String name = paramPathComponents[2]; // name of the parameter
if ("workflow".equals(source)) {
input.put(paramName, workflowInput.get(name));
} else {
TaskModel task = workflow.getTaskByRefName(source);
if (task != null) {
if ("input".equals(type)) {
input.put(paramName, task.getInputData().get(name));
} else {
input.put(paramName, task.getOutputData().get(name));
}
}
}
});
return input;
}
public Map<String, Object> getWorkflowInput(
WorkflowDef workflowDef, Map<String, Object> inputParams) {
if (workflowDef != null && workflowDef.getInputTemplate() != null) {
clone(workflowDef.getInputTemplate()).forEach(inputParams::putIfAbsent);
}
return inputParams;
}
private static class Replacement implements Comparable<Replacement> {
private final int startIndex;
private final int endIndex;
private final Object replacement;
public Replacement(Object replacement, int startIndex, int endIndex) {
this.replacement = replacement;
this.startIndex = startIndex;
this.endIndex = endIndex;
}
public Object getReplacement() {
return replacement;
}
public int getStartIndex() {
return startIndex;
}
public int getEndIndex() {
return endIndex;
}
@Override
public int compareTo(Replacement o) {
return Long.compare(startIndex, o.startIndex);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/IDGenerator.java | core/src/main/java/com/netflix/conductor/core/utils/IDGenerator.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.UUID;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
@Component
@ConditionalOnProperty(
name = "conductor.id.generator",
havingValue = "default",
matchIfMissing = true)
/**
* ID Generator used by Conductor Note on overriding the ID Generator: The default ID generator uses
* UUID v4 as the ID format. By overriding this class it is possible to use different scheme for ID
* generation. However, this is not normal and should only be done after very careful consideration.
*
* <p>Please note, if you use Cassandra persistence, the schema uses UUID as the column type and the
* IDs have to be valid UUIDs supported by Cassandra.
*/
public class IDGenerator {
public IDGenerator() {}
public String generate() {
return UUID.randomUUID().toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/Utils.java | core/src/main/java/com/netflix/conductor/core/utils/Utils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.core.exception.TransientException;
public class Utils {
public static final String DECIDER_QUEUE = "_deciderQueue";
/**
* ID of the server. Can be host name, IP address or any other meaningful identifier
*
* @return canonical host name resolved for the instance, "unknown" if resolution fails
*/
public static String getServerId() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
return "unknown";
}
}
/**
* Split string with "|" as delimiter.
*
* @param inputStr Input string
* @return List of String
*/
public static List<String> convertStringToList(String inputStr) {
List<String> list = new ArrayList<>();
if (StringUtils.isNotBlank(inputStr)) {
list = Arrays.asList(inputStr.split("\\|"));
}
return list;
}
/**
* Ensures the truth of an condition involving one or more parameters to the calling method.
*
* @param condition a boolean expression
* @param errorMessage The exception message use if the input condition is not valid
* @throws IllegalArgumentException if input condition is not valid.
*/
public static void checkArgument(boolean condition, String errorMessage) {
if (!condition) {
throw new IllegalArgumentException(errorMessage);
}
}
/**
* This method checks if the object is null or empty.
*
* @param object input of type {@link Object}.
* @param errorMessage The exception message use if the object is empty or null.
* @throws NullPointerException if input object is not valid.
*/
public static void checkNotNull(Object object, String errorMessage) {
if (object == null) {
throw new NullPointerException(errorMessage);
}
}
/**
* Used to determine if the exception is thrown due to a transient failure and the operation is
* expected to succeed upon retrying.
*
* @param throwable the exception that is thrown
* @return true - if the exception is a transient failure
* <p>false - if the exception is non-transient
*/
public static boolean isTransientException(Throwable throwable) {
if (throwable != null) {
return throwable instanceof TransientException;
}
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/JsonUtils.java | core/src/main/java/com/netflix/conductor/core/utils/JsonUtils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.List;
import java.util.Map;
import org.springframework.stereotype.Component;
import com.fasterxml.jackson.databind.ObjectMapper;
/** This class contains utility functions for parsing/expanding JSON. */
@SuppressWarnings("unchecked")
@Component
public class JsonUtils {
private final ObjectMapper objectMapper;
public JsonUtils(ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
}
/**
* Expands a JSON object into a java object
*
* @param input the object to be expanded
* @return the expanded object containing java types like {@link Map} and {@link List}
*/
public Object expand(Object input) {
if (input instanceof List) {
expandList((List<Object>) input);
return input;
} else if (input instanceof Map) {
expandMap((Map<String, Object>) input);
return input;
} else if (input instanceof String) {
return getJson((String) input);
} else {
return input;
}
}
private void expandList(List<Object> input) {
for (Object value : input) {
if (value instanceof String) {
if (isJsonString(value.toString())) {
value = getJson(value.toString());
}
} else if (value instanceof Map) {
expandMap((Map<String, Object>) value);
} else if (value instanceof List) {
expandList((List<Object>) value);
}
}
}
private void expandMap(Map<String, Object> input) {
for (Map.Entry<String, Object> entry : input.entrySet()) {
Object value = entry.getValue();
if (value instanceof String) {
if (isJsonString(value.toString())) {
entry.setValue(getJson(value.toString()));
}
} else if (value instanceof Map) {
expandMap((Map<String, Object>) value);
} else if (value instanceof List) {
expandList((List<Object>) value);
}
}
}
/**
* Used to obtain a JSONified object from a string
*
* @param jsonAsString the json object represented in string form
* @return the JSONified object representation if the input is a valid json string if the input
* is not a valid json string, it will be returned as-is and no exception is thrown
*/
private Object getJson(String jsonAsString) {
try {
return objectMapper.readValue(jsonAsString, Object.class);
} catch (Exception e) {
return jsonAsString;
}
}
private boolean isJsonString(String jsonAsString) {
jsonAsString = jsonAsString.trim();
return jsonAsString.startsWith("{") || jsonAsString.startsWith("[");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/utils/DateTimeUtils.java | core/src/main/java/com/netflix/conductor/core/utils/DateTimeUtils.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.text.ParseException;
import java.time.Duration;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.time.DateUtils;
public class DateTimeUtils {
private static final String[] DATE_PATTERNS =
new String[] {"yyyy-MM-dd HH:mm", "yyyy-MM-dd HH:mm z", "yyyy-MM-dd"};
private static final Pattern DURATION_PATTERN =
Pattern.compile(
"""
\\s*(?:(\\d+)\\s*(?:days?|d))?\
\\s*(?:(\\d+)\\s*(?:hours?|hrs?|h))?\
\\s*(?:(\\d+)\\s*(?:minutes?|mins?|m))?\
\\s*(?:(\\d+)\\s*(?:seconds?|secs?|s))?\
\\s*""",
Pattern.CASE_INSENSITIVE);
public static Duration parseDuration(String text) {
Matcher m = DURATION_PATTERN.matcher(text);
if (!m.matches()) throw new IllegalArgumentException("Not valid duration: " + text);
int days = (m.start(1) == -1 ? 0 : Integer.parseInt(m.group(1)));
int hours = (m.start(2) == -1 ? 0 : Integer.parseInt(m.group(2)));
int mins = (m.start(3) == -1 ? 0 : Integer.parseInt(m.group(3)));
int secs = (m.start(4) == -1 ? 0 : Integer.parseInt(m.group(4)));
return Duration.ofSeconds((days * 86400) + (hours * 60L + mins) * 60L + secs);
}
public static Date parseDate(String date) throws ParseException {
return DateUtils.parseDate(date, DATE_PATTERNS);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java | core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;
import org.graalvm.polyglot.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.evaluators.ConsoleBridge;
public class ScriptEvaluator {
private static final Logger LOGGER = LoggerFactory.getLogger(ScriptEvaluator.class);
private static final int DEFAULT_MAX_EXECUTION_SECONDS = 4;
private static final int DEFAULT_CONTEXT_POOL_SIZE = 10;
private static final boolean DEFAULT_CONTEXT_POOL_ENABLED = false;
private static Duration maxExecutionTimeSeconds;
private static ExecutorService executorService;
private static BlockingQueue<ScriptExecutionContext> contextPool;
private static boolean contextPoolEnabled;
private static boolean initialized = false;
private ScriptEvaluator() {}
/**
* Initialize the script evaluator with configuration. This should be called once at startup.
*
* @param maxSeconds Maximum execution time in seconds (default: 4)
* @param contextPoolSize Size of the context pool (default: 10)
* @param poolEnabled Whether to enable context pooling (default: false)
* @param executor ExecutorService for script execution
*/
public static synchronized void initialize(
int maxSeconds, int contextPoolSize, boolean poolEnabled, ExecutorService executor) {
if (initialized) {
LOGGER.warn("ScriptEvaluator already initialized, skipping re-initialization");
return;
}
maxExecutionTimeSeconds = Duration.ofSeconds(maxSeconds);
executorService = executor != null ? executor : Executors.newCachedThreadPool();
contextPoolEnabled = poolEnabled;
if (!contextPoolEnabled) {
LOGGER.warn(
"Script execution context pool is disabled. Each script execution will create a new context.");
contextPool = null;
} else {
contextPool = new LinkedBlockingQueue<>(contextPoolSize);
// Pre-fill the pool
for (int i = 0; i < contextPoolSize; i++) {
Context context = createNewContext();
contextPool.offer(new ScriptExecutionContext(context));
}
LOGGER.info(
"Script execution context pool initialized with {} contexts", contextPoolSize);
}
initialized = true;
}
/** Initialize with default values from environment variables or defaults. */
public static synchronized void initializeWithDefaults() {
if (initialized) {
return;
}
int maxSeconds =
Integer.parseInt(
getEnv(
"CONDUCTOR_SCRIPT_MAX_EXECUTION_SECONDS",
String.valueOf(DEFAULT_MAX_EXECUTION_SECONDS)));
int poolSize =
Integer.parseInt(
getEnv(
"CONDUCTOR_SCRIPT_CONTEXT_POOL_SIZE",
String.valueOf(DEFAULT_CONTEXT_POOL_SIZE)));
boolean poolEnabled =
Boolean.parseBoolean(
getEnv(
"CONDUCTOR_SCRIPT_CONTEXT_POOL_ENABLED",
String.valueOf(DEFAULT_CONTEXT_POOL_ENABLED)));
initialize(maxSeconds, poolSize, poolEnabled, null);
}
private static String getEnv(String name, String defaultValue) {
String value = System.getenv(name);
return value != null ? value : defaultValue;
}
private static void ensureInitialized() {
if (!initialized) {
initializeWithDefaults();
}
}
private static Context createNewContext() {
return Context.newBuilder("js")
.allowHostAccess(HostAccess.ALL)
.option("engine.WarnInterpreterOnly", "false")
.build();
}
/**
* Evaluates the script with the help of input provided but converts the result to a boolean
* value.
*
* @param script Script to be evaluated.
* @param input Input parameters.
* @return True or False based on the result of the evaluated expression.
*/
public static Boolean evalBool(String script, Object input) {
return toBoolean(eval(script, input));
}
/**
* Evaluates the script with the help of input provided.
*
* @param script Script to be evaluated.
* @param input Input parameters.
* @return Generic object, the result of the evaluated expression.
*/
public static Object eval(String script, Object input) {
return eval(script, input, null);
}
/**
* Evaluates the script with the help of input provided.
*
* @param script Script to be evaluated.
* @param input Input parameters.
* @param console ConsoleBridge that can be used to get the calls to console.log() and others.
* @return Generic object, the result of the evaluated expression.
*/
public static Object eval(String script, Object input, ConsoleBridge console) {
ensureInitialized();
if (contextPoolEnabled) {
// Context pool implementation
ScriptExecutionContext scriptContext = null;
try {
scriptContext = contextPool.take();
final ScriptExecutionContext finalScriptContext = scriptContext;
finalScriptContext.prepareBindings(input, console);
Future<Value> futureResult =
executorService.submit(
() -> finalScriptContext.getContext().eval("js", script));
Value value =
futureResult.get(maxExecutionTimeSeconds.getSeconds(), TimeUnit.SECONDS);
return getObject(value);
} catch (TimeoutException e) {
if (scriptContext != null) {
interrupt(scriptContext.getContext());
}
throw new NonTransientException(
String.format(
"Script not evaluated within %d seconds, interrupted.",
maxExecutionTimeSeconds.getSeconds()));
} catch (ExecutionException ee) {
handlePolyglotException(ee);
return null;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new NonTransientException("Script execution interrupted: " + ie.getMessage());
} finally {
if (scriptContext != null) {
scriptContext.clearBindings();
if (!contextPool.offer(scriptContext)) {
scriptContext.getContext().close();
LOGGER.warn(
"ScriptExecutionContext pool is full, context closed and not returned to pool.");
}
}
}
} else {
// No context pool - create new context for each execution
try (Context context = createNewContext()) {
final Value jsBindings = context.getBindings("js");
jsBindings.putMember("$", input);
if (console != null) {
jsBindings.putMember("console", console);
}
final Future<Value> futureResult =
executorService.submit(() -> context.eval("js", script));
Value value =
futureResult.get(maxExecutionTimeSeconds.getSeconds(), TimeUnit.SECONDS);
return getObject(value);
} catch (TimeoutException e) {
throw new NonTransientException(
String.format(
"Script not evaluated within %d seconds, interrupted.",
maxExecutionTimeSeconds.getSeconds()));
} catch (ExecutionException ee) {
handlePolyglotException(ee);
return null;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new NonTransientException("Script execution interrupted: " + ie.getMessage());
}
}
}
private static void handlePolyglotException(ExecutionException ee) {
if (ee.getCause() instanceof PolyglotException pe) {
SourceSection sourceSection = pe.getSourceLocation();
if (sourceSection == null) {
throw new TerminateWorkflowException(
"Error evaluating the script `" + pe.getMessage() + "`");
} else {
throw new TerminateWorkflowException(
"Error evaluating the script `"
+ pe.getMessage()
+ "` at line "
+ sourceSection.getStartLine());
}
}
throw new TerminateWorkflowException("Error evaluating the script " + ee.getMessage());
}
private static Object getObject(Value value) {
if (value.isNull()) return null;
if (value.isBoolean()) return value.asBoolean();
if (value.isString()) return value.asString();
if (value.isNumber()) {
if (value.fitsInInt()) return value.asInt();
if (value.fitsInLong()) return value.asLong();
if (value.fitsInDouble()) return value.asDouble();
}
if (value.hasArrayElements()) {
List<Object> items = new ArrayList<>();
for (int i = 0; i < value.getArraySize(); i++) {
items.add(getObject(value.getArrayElement(i)));
}
return items;
}
// Convert map
Map<Object, Object> output = new HashMap<>();
if (value.hasHashEntries()) {
Value keys = value.getHashKeysIterator();
while (keys.hasIteratorNextElement()) {
Value key = keys.getIteratorNextElement();
output.put(getObject(key), getObject(value.getHashValue(key)));
}
} else {
for (String key : value.getMemberKeys()) {
output.put(key, getObject(value.getMember(key)));
}
}
return output;
}
private static void interrupt(Context context) {
try {
context.interrupt(Duration.ZERO);
} catch (TimeoutException ignored) {
// Expected when interrupting
}
}
/**
* Converts a generic object into boolean value. Checks if the Object is of type Boolean and
* returns the value of the Boolean object. Checks if the Object is of type Number and returns
* True if the value is greater than 0.
*
* @param input Generic object that will be inspected to return a boolean value.
* @return True or False based on the input provided.
*/
public static Boolean toBoolean(Object input) {
if (input instanceof Boolean) {
return ((Boolean) input);
} else if (input instanceof Number) {
return ((Number) input).doubleValue() > 0;
}
return false;
}
/** Script execution context holder for context pooling. */
private static class ScriptExecutionContext {
private final Context context;
private final Value bindings;
public ScriptExecutionContext(Context context) {
this.context = context;
this.bindings = context.getBindings("js");
}
public Context getContext() {
return context;
}
public void prepareBindings(Object input, Object console) {
bindings.putMember("$", input);
if (console != null) {
bindings.putMember("console", console);
}
}
public void clearBindings() {
bindings.removeMember("$");
bindings.removeMember("console");
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/SimpleActionProcessor.java | core/src/main/java/com/netflix/conductor/core/events/SimpleActionProcessor.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.*;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import com.netflix.conductor.common.metadata.events.EventHandler.Action;
import com.netflix.conductor.common.metadata.events.EventHandler.StartWorkflow;
import com.netflix.conductor.common.metadata.events.EventHandler.TaskDetails;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.execution.StartWorkflowInput;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.utils.JsonUtils;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* Action Processor subscribes to the Event Actions queue and processes the actions (e.g. start
* workflow etc)
*/
@Component
public class SimpleActionProcessor implements ActionProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleActionProcessor.class);
private final WorkflowExecutor workflowExecutor;
private final ParametersUtils parametersUtils;
private final JsonUtils jsonUtils;
public SimpleActionProcessor(
WorkflowExecutor workflowExecutor,
ParametersUtils parametersUtils,
JsonUtils jsonUtils) {
this.workflowExecutor = workflowExecutor;
this.parametersUtils = parametersUtils;
this.jsonUtils = jsonUtils;
}
public Map<String, Object> execute(
Action action, Object payloadObject, String event, String messageId) {
LOGGER.debug(
"Executing action: {} for event: {} with messageId:{}",
action.getAction(),
event,
messageId);
Object jsonObject = payloadObject;
if (action.isExpandInlineJSON()) {
jsonObject = jsonUtils.expand(payloadObject);
}
switch (action.getAction()) {
case start_workflow:
return startWorkflow(action, jsonObject, event, messageId);
case complete_task:
return completeTask(
action,
jsonObject,
action.getComplete_task(),
TaskModel.Status.COMPLETED,
event,
messageId);
case fail_task:
return completeTask(
action,
jsonObject,
action.getFail_task(),
TaskModel.Status.FAILED,
event,
messageId);
default:
break;
}
throw new UnsupportedOperationException(
"Action not supported " + action.getAction() + " for event " + event);
}
private Map<String, Object> completeTask(
Action action,
Object payload,
TaskDetails taskDetails,
TaskModel.Status status,
String event,
String messageId) {
Map<String, Object> input = new HashMap<>();
input.put("workflowId", taskDetails.getWorkflowId());
input.put("taskId", taskDetails.getTaskId());
input.put("taskRefName", taskDetails.getTaskRefName());
input.putAll(taskDetails.getOutput());
Map<String, Object> replaced = parametersUtils.replace(input, payload);
String workflowId = (String) replaced.get("workflowId");
String taskId = (String) replaced.get("taskId");
String taskRefName = (String) replaced.get("taskRefName");
TaskModel taskModel = null;
if (StringUtils.isNotEmpty(taskId)) {
taskModel = workflowExecutor.getTask(taskId);
} else if (StringUtils.isNotEmpty(workflowId) && StringUtils.isNotEmpty(taskRefName)) {
WorkflowModel workflow = workflowExecutor.getWorkflow(workflowId, true);
if (workflow == null) {
replaced.put("error", "No workflow found with ID: " + workflowId);
return replaced;
}
taskModel = workflow.getTaskByRefName(taskRefName);
// Task can be loopover task.In such case find corresponding task and update
List<TaskModel> loopOverTaskList =
workflow.getTasks().stream()
.filter(
t ->
TaskUtils.removeIterationFromTaskRefName(
t.getReferenceTaskName())
.equals(taskRefName))
.collect(Collectors.toList());
if (!loopOverTaskList.isEmpty()) {
// Find loopover task with the highest iteration value
taskModel =
loopOverTaskList.stream()
.sorted(Comparator.comparingInt(TaskModel::getIteration).reversed())
.findFirst()
.get();
}
}
if (taskModel == null) {
replaced.put(
"error",
"No task found with taskId: "
+ taskId
+ ", reference name: "
+ taskRefName
+ ", workflowId: "
+ workflowId);
return replaced;
}
taskModel.setStatus(status);
taskModel.setOutputData(replaced);
taskModel.setOutputMessage(taskDetails.getOutputMessage());
taskModel.addOutput("conductor.event.messageId", messageId);
taskModel.addOutput("conductor.event.name", event);
try {
workflowExecutor.updateTask(new TaskResult(taskModel.toTask()));
LOGGER.debug(
"Updated task: {} in workflow:{} with status: {} for event: {} for message:{}",
taskId,
workflowId,
status,
event,
messageId);
} catch (RuntimeException e) {
Monitors.recordEventActionError(
action.getAction().name(), taskModel.getTaskType(), event);
LOGGER.error(
"Error updating task: {} in workflow: {} in action: {} for event: {} for message: {}",
taskDetails.getTaskRefName(),
taskDetails.getWorkflowId(),
action.getAction(),
event,
messageId,
e);
replaced.put("error", e.getMessage());
throw e;
}
return replaced;
}
private Map<String, Object> startWorkflow(
Action action, Object payload, String event, String messageId) {
StartWorkflow params = action.getStart_workflow();
Map<String, Object> output = new HashMap<>();
try {
Map<String, Object> inputParams = params.getInput();
Map<String, Object> workflowInput = parametersUtils.replace(inputParams, payload);
Map<String, Object> paramsMap = new HashMap<>();
// extracting taskToDomain map from the event payload
paramsMap.put("taskToDomain", "${taskToDomain}");
Optional.ofNullable(params.getCorrelationId())
.ifPresent(value -> paramsMap.put("correlationId", value));
Map<String, Object> replaced = parametersUtils.replace(paramsMap, payload);
// if taskToDomain is absent from event handler definition, and taskDomain Map is passed
// as a part of payload
// then assign payload taskToDomain map to the new workflow instance
final Map<String, String> taskToDomain =
params.getTaskToDomain() != null
? params.getTaskToDomain()
: (Map<String, String>) replaced.get("taskToDomain");
workflowInput.put("conductor.event.messageId", messageId);
workflowInput.put("conductor.event.name", event);
StartWorkflowInput startWorkflowInput = new StartWorkflowInput();
startWorkflowInput.setName(params.getName());
startWorkflowInput.setVersion(params.getVersion());
startWorkflowInput.setCorrelationId(
Optional.ofNullable(replaced.get("correlationId"))
.map(Object::toString)
.orElse(params.getCorrelationId()));
startWorkflowInput.setWorkflowInput(workflowInput);
startWorkflowInput.setEvent(event);
if (!CollectionUtils.isEmpty(taskToDomain)) {
startWorkflowInput.setTaskToDomain(taskToDomain);
}
String workflowId = workflowExecutor.startWorkflow(startWorkflowInput);
output.put("workflowId", workflowId);
LOGGER.debug(
"Started workflow: {}/{}/{} for event: {} for message:{}",
params.getName(),
params.getVersion(),
workflowId,
event,
messageId);
} catch (RuntimeException e) {
Monitors.recordEventActionError(action.getAction().name(), params.getName(), event);
LOGGER.error(
"Error starting workflow: {}, version: {}, for event: {} for message: {}",
params.getName(),
params.getVersion(),
event,
messageId,
e);
output.put("error", e.getMessage());
throw e;
}
return output;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/DefaultEventProcessor.java | core/src/main/java/com/netflix/conductor/core/events/DefaultEventProcessor.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventExecution.Status;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.events.EventHandler.Action;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.execution.evaluators.Evaluator;
import com.netflix.conductor.core.utils.JsonUtils;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.service.ExecutionService;
import com.netflix.conductor.service.MetadataService;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.spotify.futures.CompletableFutures;
import static com.netflix.conductor.core.utils.Utils.isTransientException;
/**
* Event Processor is used to dispatch actions configured in the event handlers, based on incoming
* events to the event queues.
*
* <p><code>Set conductor.default-event-processor.enabled=false</code> to disable event processing.
*/
@Component
@ConditionalOnProperty(
name = "conductor.default-event-processor.enabled",
havingValue = "true",
matchIfMissing = true)
public class DefaultEventProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventProcessor.class);
private final MetadataService metadataService;
private final ExecutionService executionService;
private final ActionProcessor actionProcessor;
private final ExecutorService eventActionExecutorService;
private final ObjectMapper objectMapper;
private final JsonUtils jsonUtils;
private final boolean isEventMessageIndexingEnabled;
private final Map<String, Evaluator> evaluators;
private final RetryTemplate retryTemplate;
public DefaultEventProcessor(
ExecutionService executionService,
MetadataService metadataService,
ActionProcessor actionProcessor,
JsonUtils jsonUtils,
ConductorProperties properties,
ObjectMapper objectMapper,
Map<String, Evaluator> evaluators,
@Qualifier("onTransientErrorRetryTemplate") RetryTemplate retryTemplate) {
this.executionService = executionService;
this.metadataService = metadataService;
this.actionProcessor = actionProcessor;
this.objectMapper = objectMapper;
this.jsonUtils = jsonUtils;
this.evaluators = evaluators;
this.retryTemplate = retryTemplate;
if (properties.getEventProcessorThreadCount() <= 0) {
throw new IllegalStateException(
"Cannot set event processor thread count to <=0. To disable event "
+ "processing, set conductor.default-event-processor.enabled=false.");
}
ThreadFactory threadFactory =
new BasicThreadFactory.Builder()
.namingPattern("event-action-executor-thread-%d")
.build();
eventActionExecutorService =
Executors.newFixedThreadPool(
properties.getEventProcessorThreadCount(), threadFactory);
this.isEventMessageIndexingEnabled = properties.isEventMessageIndexingEnabled();
LOGGER.info("Event Processing is ENABLED");
}
public void handle(ObservableQueue queue, Message msg) {
List<EventExecution> transientFailures = null;
boolean executionFailed = false;
try {
if (isEventMessageIndexingEnabled) {
executionService.addMessage(queue.getName(), msg);
}
String event = queue.getType() + ":" + queue.getName();
LOGGER.debug("Evaluating message: {} for event: {}", msg.getId(), event);
transientFailures = executeEvent(event, msg);
} catch (Exception e) {
executionFailed = true;
LOGGER.error("Error handling message: {} on queue:{}", msg, queue.getName(), e);
Monitors.recordEventQueueMessagesError(queue.getType(), queue.getName());
} finally {
if (!executionFailed && CollectionUtils.isEmpty(transientFailures)) {
queue.ack(Collections.singletonList(msg));
LOGGER.debug("Message: {} acked on queue: {}", msg.getId(), queue.getName());
} else if (queue.rePublishIfNoAck() || !CollectionUtils.isEmpty(transientFailures)) {
// re-submit this message to the queue, to be retried later
// This is needed for queues with no unack timeout, since messages are removed
// from the queue
queue.publish(Collections.singletonList(msg));
LOGGER.debug("Message: {} published to queue: {}", msg.getId(), queue.getName());
} else {
queue.nack(Collections.singletonList(msg));
LOGGER.debug("Message: {} nacked on queue: {}", msg.getId(), queue.getName());
}
Monitors.recordEventQueueMessagesHandled(queue.getType(), queue.getName());
}
}
/**
* Executes all the actions configured on all the event handlers triggered by the {@link
* Message} on the queue If any of the actions on an event handler fails due to a transient
* failure, the execution is not persisted such that it can be retried
*
* @return a list of {@link EventExecution} that failed due to transient failures.
*/
protected List<EventExecution> executeEvent(String event, Message msg) throws Exception {
List<EventHandler> eventHandlerList;
List<EventExecution> transientFailures = new ArrayList<>();
try {
eventHandlerList = metadataService.getEventHandlersForEvent(event, true);
} catch (TransientException transientException) {
transientFailures.add(new EventExecution(event, msg.getId()));
return transientFailures;
}
Object payloadObject = getPayloadObject(msg.getPayload());
for (EventHandler eventHandler : eventHandlerList) {
String condition = eventHandler.getCondition();
String evaluatorType = eventHandler.getEvaluatorType();
// Set default to true so that if condition is not specified, it falls through
// to process the event.
boolean success = true;
if (StringUtils.isNotEmpty(condition) && evaluators.get(evaluatorType) != null) {
Object result =
evaluators
.get(evaluatorType)
.evaluate(condition, jsonUtils.expand(payloadObject));
success = ScriptEvaluator.toBoolean(result);
} else if (StringUtils.isNotEmpty(condition)) {
LOGGER.debug("Checking condition: {} for event: {}", condition, event);
success = ScriptEvaluator.evalBool(condition, jsonUtils.expand(payloadObject));
}
if (!success) {
String id = msg.getId() + "_" + 0;
EventExecution eventExecution = new EventExecution(id, msg.getId());
eventExecution.setCreated(System.currentTimeMillis());
eventExecution.setEvent(eventHandler.getEvent());
eventExecution.setName(eventHandler.getName());
eventExecution.setStatus(Status.SKIPPED);
eventExecution.getOutput().put("msg", msg.getPayload());
eventExecution.getOutput().put("condition", condition);
executionService.addEventExecution(eventExecution);
LOGGER.debug(
"Condition: {} not successful for event: {} with payload: {}",
condition,
eventHandler.getEvent(),
msg.getPayload());
continue;
}
CompletableFuture<List<EventExecution>> future =
executeActionsForEventHandler(eventHandler, msg);
future.whenComplete(
(result, error) ->
result.forEach(
eventExecution -> {
if (error != null
|| eventExecution.getStatus()
== Status.IN_PROGRESS) {
transientFailures.add(eventExecution);
} else {
executionService.updateEventExecution(
eventExecution);
}
}))
.get();
}
return processTransientFailures(transientFailures);
}
/**
* Remove the event executions which failed temporarily.
*
* @param eventExecutions The event executions which failed with a transient error.
* @return The event executions which failed with a transient error.
*/
protected List<EventExecution> processTransientFailures(List<EventExecution> eventExecutions) {
eventExecutions.forEach(executionService::removeEventExecution);
return eventExecutions;
}
/**
* @param eventHandler the {@link EventHandler} for which the actions are to be executed
* @param msg the {@link Message} that triggered the event
* @return a {@link CompletableFuture} holding a list of {@link EventExecution}s for the {@link
* Action}s executed in the event handler
*/
protected CompletableFuture<List<EventExecution>> executeActionsForEventHandler(
EventHandler eventHandler, Message msg) {
List<CompletableFuture<EventExecution>> futuresList = new ArrayList<>();
int i = 0;
for (Action action : eventHandler.getActions()) {
String id = msg.getId() + "_" + i++;
EventExecution eventExecution = new EventExecution(id, msg.getId());
eventExecution.setCreated(System.currentTimeMillis());
eventExecution.setEvent(eventHandler.getEvent());
eventExecution.setName(eventHandler.getName());
eventExecution.setAction(action.getAction());
eventExecution.setStatus(Status.IN_PROGRESS);
if (executionService.addEventExecution(eventExecution)) {
futuresList.add(
CompletableFuture.supplyAsync(
() ->
execute(
eventExecution,
action,
getPayloadObject(msg.getPayload())),
eventActionExecutorService));
} else {
LOGGER.warn("Duplicate delivery/execution of message: {}", msg.getId());
}
}
return CompletableFutures.allAsList(futuresList);
}
/**
* @param eventExecution the instance of {@link EventExecution}
* @param action the {@link Action} to be executed for the event
* @param payload the {@link Message#getPayload()}
* @return the event execution updated with execution output, if the execution is
* completed/failed with non-transient error the input event execution, if the execution
* failed due to transient error
*/
protected EventExecution execute(EventExecution eventExecution, Action action, Object payload) {
try {
LOGGER.debug(
"Executing action: {} for event: {} with messageId: {} with payload: {}",
action.getAction(),
eventExecution.getId(),
eventExecution.getMessageId(),
payload);
// TODO: Switch to @Retryable annotation on SimpleActionProcessor.execute()
Map<String, Object> output =
retryTemplate.execute(
context ->
actionProcessor.execute(
action,
payload,
eventExecution.getEvent(),
eventExecution.getMessageId()));
if (output != null) {
eventExecution.getOutput().putAll(output);
}
eventExecution.setStatus(Status.COMPLETED);
Monitors.recordEventExecutionSuccess(
eventExecution.getEvent(),
eventExecution.getName(),
eventExecution.getAction().name());
} catch (RuntimeException e) {
LOGGER.error(
"Error executing action: {} for event: {} with messageId: {}",
action.getAction(),
eventExecution.getEvent(),
eventExecution.getMessageId(),
e);
if (!isTransientException(e)) {
// not a transient error, fail the event execution
eventExecution.setStatus(Status.FAILED);
eventExecution.getOutput().put("exception", e.getMessage());
Monitors.recordEventExecutionError(
eventExecution.getEvent(),
eventExecution.getName(),
eventExecution.getAction().name(),
e.getClass().getSimpleName());
}
}
return eventExecution;
}
private Object getPayloadObject(String payload) {
Object payloadObject = null;
if (payload != null) {
try {
payloadObject = objectMapper.readValue(payload, Object.class);
} catch (Exception e) {
payloadObject = payload;
}
}
return payloadObject;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/DefaultEventQueueManager.java | core/src/main/java/com/netflix/conductor/core/events/DefaultEventQueueManager.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.Lifecycle;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.core.LifecycleAwareComponent;
import com.netflix.conductor.core.events.queue.DefaultEventQueueProcessor;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel.Status;
/**
* Manages the event queues registered in the system and sets up listeners for these.
*
* <p>Manages the lifecycle of -
*
* <ul>
* <li>Queues registered with event handlers
* <li>Default event queues that Conductor listens on
* </ul>
*
* @see DefaultEventQueueProcessor
*/
@Component
@ConditionalOnProperty(
name = "conductor.default-event-processor.enabled",
havingValue = "true",
matchIfMissing = true)
public class DefaultEventQueueManager extends LifecycleAwareComponent implements EventQueueManager {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventQueueManager.class);
private final EventHandlerDAO eventHandlerDAO;
private final EventQueues eventQueues;
private final DefaultEventProcessor defaultEventProcessor;
private final Map<String, ObservableQueue> eventToQueueMap = new ConcurrentHashMap<>();
private final Map<Status, ObservableQueue> defaultQueues;
public DefaultEventQueueManager(
Map<Status, ObservableQueue> defaultQueues,
EventHandlerDAO eventHandlerDAO,
EventQueues eventQueues,
DefaultEventProcessor defaultEventProcessor) {
this.defaultQueues = defaultQueues;
this.eventHandlerDAO = eventHandlerDAO;
this.eventQueues = eventQueues;
this.defaultEventProcessor = defaultEventProcessor;
}
/**
* @return Returns a map of queues which are active. Key is event name and value is queue URI
*/
@Override
public Map<String, String> getQueues() {
Map<String, String> queues = new HashMap<>();
eventToQueueMap.forEach((key, value) -> queues.put(key, value.getName()));
return queues;
}
@Override
public Map<String, Map<String, Long>> getQueueSizes() {
Map<String, Map<String, Long>> queues = new HashMap<>();
eventToQueueMap.forEach(
(key, value) -> {
Map<String, Long> size = new HashMap<>();
size.put(value.getName(), value.size());
queues.put(key, size);
});
return queues;
}
@Override
public void doStart() {
eventToQueueMap.forEach(
(event, queue) -> {
LOGGER.info("Start listening for events: {}", event);
queue.start();
});
defaultQueues.forEach(
(status, queue) -> {
LOGGER.info(
"Start listening on default queue {} for status {}",
queue.getName(),
status);
queue.start();
});
}
@Override
public void doStop() {
eventToQueueMap.forEach(
(event, queue) -> {
LOGGER.info("Stop listening for events: {}", event);
queue.stop();
});
defaultQueues.forEach(
(status, queue) -> {
LOGGER.info(
"Stop listening on default queue {} for status {}",
status,
queue.getName());
queue.stop();
});
}
@Scheduled(fixedDelay = 60_000)
public void refreshEventQueues() {
try {
Set<String> events =
eventHandlerDAO.getAllEventHandlers().stream()
.filter(EventHandler::isActive)
.map(EventHandler::getEvent)
.collect(Collectors.toSet());
List<ObservableQueue> createdQueues = new LinkedList<>();
events.forEach(
event ->
eventToQueueMap.computeIfAbsent(
event,
s -> {
ObservableQueue q = eventQueues.getQueue(event);
createdQueues.add(q);
return q;
}));
// start listening on all of the created queues
createdQueues.stream()
.filter(Objects::nonNull)
.peek(Lifecycle::start)
.forEach(this::listen);
Set<String> removed = new HashSet<>(eventToQueueMap.keySet());
removed.removeAll(events);
removed.forEach(
key -> {
ObservableQueue queue = eventToQueueMap.remove(key);
try {
queue.stop();
} catch (Exception e) {
LOGGER.error("Failed to stop queue: " + queue, e);
}
});
Map<String, Map<String, Long>> eventToQueueSize = getQueueSizes();
eventToQueueSize.forEach(
(event, queueMap) -> {
Map.Entry<String, Long> queueSize = queueMap.entrySet().iterator().next();
Monitors.recordEventQueueDepth(queueSize.getKey(), queueSize.getValue());
});
LOGGER.debug("Event queues: {}", eventToQueueMap.keySet());
LOGGER.debug("Stored queue: {}", events);
LOGGER.debug("Removed queue: {}", removed);
} catch (Exception e) {
Monitors.error(getClass().getSimpleName(), "refresh");
LOGGER.error("refresh event queues failed", e);
}
}
private void listen(ObservableQueue queue) {
queue.observe().subscribe((Message msg) -> defaultEventProcessor.handle(queue, msg));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java | core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.Map;
import com.netflix.conductor.common.metadata.events.EventHandler;
public interface ActionProcessor {
Map<String, Object> execute(
EventHandler.Action action, Object payloadObject, String event, String messageId);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/EventQueueProvider.java | core/src/main/java/com/netflix/conductor/core/events/EventQueueProvider.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import org.springframework.lang.NonNull;
import com.netflix.conductor.core.events.queue.ObservableQueue;
public interface EventQueueProvider {
String getQueueType();
/**
* Creates or reads the {@link ObservableQueue} for the given <code>queueURI</code>.
*
* @param queueURI The URI of the queue.
* @return The {@link ObservableQueue} implementation for the <code>queueURI</code>.
* @throws IllegalArgumentException thrown when an {@link ObservableQueue} can not be created
* for the <code>queueURI</code>.
*/
@NonNull
ObservableQueue getQueue(String queueURI) throws IllegalArgumentException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/EventQueueManager.java | core/src/main/java/com/netflix/conductor/core/events/EventQueueManager.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.Map;
public interface EventQueueManager {
Map<String, String> getQueues();
Map<String, Map<String, Long>> getQueueSizes();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java | core/src/main/java/com/netflix/conductor/core/events/EventQueues.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.lang.NonNull;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.core.utils.ParametersUtils;
/** Holders for internal event queues */
@Component
public class EventQueues {
public static final String EVENT_QUEUE_PROVIDERS_QUALIFIER = "EventQueueProviders";
private static final Logger LOGGER = LoggerFactory.getLogger(EventQueues.class);
private final ParametersUtils parametersUtils;
private final Map<String, EventQueueProvider> providers;
public EventQueues(
@Qualifier(EVENT_QUEUE_PROVIDERS_QUALIFIER) Map<String, EventQueueProvider> providers,
ParametersUtils parametersUtils) {
this.providers = providers;
this.parametersUtils = parametersUtils;
}
public List<String> getProviders() {
return providers.values().stream()
.map(p -> p.getClass().getName())
.collect(Collectors.toList());
}
@NonNull
public ObservableQueue getQueue(String eventType) {
String event = parametersUtils.replace(eventType).toString();
int index = event.indexOf(':');
if (index == -1) {
throw new IllegalArgumentException("Illegal event " + event);
}
String type = event.substring(0, index);
String queueURI = event.substring(index + 1);
EventQueueProvider provider = providers.get(type);
if (provider != null) {
return provider.getQueue(queueURI);
} else {
throw new IllegalArgumentException("Unknown queue type " + type);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/queue/DefaultEventQueueProcessor.java | core/src/main/java/com/netflix/conductor/core/events/queue/DefaultEventQueueProcessor.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events.queue;
import java.util.*;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.TaskModel.Status;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_WAIT;
/**
* Monitors and processes messages on the default event queues that Conductor listens on.
*
* <p>The default event queue type is controlled using the property: <code>
* conductor.default-event-queue.type</code>
*/
@Component
@ConditionalOnProperty(
name = "conductor.default-event-queue-processor.enabled",
havingValue = "true",
matchIfMissing = true)
public class DefaultEventQueueProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventQueueProcessor.class);
private final Map<Status, ObservableQueue> queues;
private final WorkflowExecutor workflowExecutor;
private static final TypeReference<Map<String, Object>> _mapType = new TypeReference<>() {};
private final ObjectMapper objectMapper;
public DefaultEventQueueProcessor(
Map<Status, ObservableQueue> queues,
WorkflowExecutor workflowExecutor,
ObjectMapper objectMapper) {
this.queues = queues;
this.workflowExecutor = workflowExecutor;
this.objectMapper = objectMapper;
queues.forEach(this::startMonitor);
LOGGER.info(
"DefaultEventQueueProcessor initialized with {} queues", queues.entrySet().size());
}
private void startMonitor(Status status, ObservableQueue queue) {
queue.observe()
.subscribe(
(Message msg) -> {
try {
LOGGER.debug("Got message {}", msg.getPayload());
String payload = msg.getPayload();
JsonNode payloadJSON = objectMapper.readTree(payload);
String externalId = getValue("externalId", payloadJSON);
if (externalId == null || "".equals(externalId)) {
LOGGER.error("No external Id found in the payload {}", payload);
queue.ack(Collections.singletonList(msg));
return;
}
JsonNode json = objectMapper.readTree(externalId);
String workflowId = getValue("workflowId", json);
String taskRefName = getValue("taskRefName", json);
String taskId = getValue("taskId", json);
if (workflowId == null || "".equals(workflowId)) {
// This is a bad message, we cannot process it
LOGGER.error(
"No workflow id found in the message. {}", payload);
queue.ack(Collections.singletonList(msg));
return;
}
WorkflowModel workflow =
workflowExecutor.getWorkflow(workflowId, true);
Optional<TaskModel> optionalTaskModel;
if (StringUtils.isNotEmpty(taskId)) {
optionalTaskModel =
workflow.getTasks().stream()
.filter(
task ->
!task.getStatus().isTerminal()
&& task.getTaskId()
.equals(taskId))
.findFirst();
} else if (StringUtils.isEmpty(taskRefName)) {
LOGGER.error(
"No taskRefName found in the message. If there is only one WAIT task, will mark it as completed. {}",
payload);
optionalTaskModel =
workflow.getTasks().stream()
.filter(
task ->
!task.getStatus().isTerminal()
&& task.getTaskType()
.equals(
TASK_TYPE_WAIT))
.findFirst();
} else {
optionalTaskModel =
workflow.getTasks().stream()
.filter(
task ->
!task.getStatus().isTerminal()
&& TaskUtils
.removeIterationFromTaskRefName(
task
.getReferenceTaskName())
.equals(
taskRefName))
.findFirst();
}
if (optionalTaskModel.isEmpty()) {
LOGGER.error(
"No matching tasks found to be marked as completed for workflow {}, taskRefName {}, taskId {}",
workflowId,
taskRefName,
taskId);
queue.ack(Collections.singletonList(msg));
return;
}
Task task = optionalTaskModel.get().toTask();
task.setStatus(TaskModel.mapToTaskStatus(status));
task.getOutputData()
.putAll(objectMapper.convertValue(payloadJSON, _mapType));
workflowExecutor.updateTask(new TaskResult(task));
List<String> failures = queue.ack(Collections.singletonList(msg));
if (!failures.isEmpty()) {
LOGGER.error("Not able to ack the messages {}", failures);
}
} catch (JsonParseException e) {
LOGGER.error("Bad message? : {} ", msg, e);
queue.ack(Collections.singletonList(msg));
} catch (NotFoundException nfe) {
LOGGER.error(
"Workflow ID specified is not valid for this environment");
queue.ack(Collections.singletonList(msg));
} catch (Exception e) {
LOGGER.error("Error processing message: {}", msg, e);
}
},
(Throwable t) -> LOGGER.error(t.getMessage(), t));
LOGGER.info("QueueListener::STARTED...listening for " + queue.getName());
}
private String getValue(String fieldName, JsonNode json) {
JsonNode node = json.findValue(fieldName);
if (node == null) {
return null;
}
return node.textValue();
}
public Map<String, Long> size() {
Map<String, Long> size = new HashMap<>();
queues.forEach((key, queue) -> size.put(queue.getName(), queue.size()));
return size;
}
public Map<Status, String> queues() {
Map<Status, String> size = new HashMap<>();
queues.forEach((key, queue) -> size.put(key, queue.getURI()));
return size;
}
public void updateByTaskRefName(
String workflowId, String taskRefName, Map<String, Object> output, Status status)
throws Exception {
Map<String, Object> externalIdMap = new HashMap<>();
externalIdMap.put("workflowId", workflowId);
externalIdMap.put("taskRefName", taskRefName);
update(externalIdMap, output, status);
}
public void updateByTaskId(
String workflowId, String taskId, Map<String, Object> output, Status status)
throws Exception {
Map<String, Object> externalIdMap = new HashMap<>();
externalIdMap.put("workflowId", workflowId);
externalIdMap.put("taskId", taskId);
update(externalIdMap, output, status);
}
private void update(
Map<String, Object> externalIdMap, Map<String, Object> output, Status status)
throws Exception {
Map<String, Object> outputMap = new HashMap<>();
outputMap.put("externalId", objectMapper.writeValueAsString(externalIdMap));
outputMap.putAll(output);
Message msg =
new Message(
UUID.randomUUID().toString(),
objectMapper.writeValueAsString(outputMap),
null);
ObservableQueue queue = queues.get(status);
if (queue == null) {
throw new IllegalArgumentException(
"There is no queue for handling " + status.toString() + " status");
}
queue.publish(Collections.singletonList(msg));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/queue/Message.java | core/src/main/java/com/netflix/conductor/core/events/queue/Message.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events.queue;
import java.util.Objects;
public class Message {
private String payload;
private String id;
private String receipt;
private int priority;
public Message() {}
public Message(String id, String payload, String receipt) {
this.payload = payload;
this.id = id;
this.receipt = receipt;
}
public Message(String id, String payload, String receipt, int priority) {
this.payload = payload;
this.id = id;
this.receipt = receipt;
this.priority = priority;
}
/**
* @return the payload
*/
public String getPayload() {
return payload;
}
/**
* @param payload the payload to set
*/
public void setPayload(String payload) {
this.payload = payload;
}
/**
* @return the id
*/
public String getId() {
return id;
}
/**
* @param id the id to set
*/
public void setId(String id) {
this.id = id;
}
/**
* @return Receipt attached to the message
*/
public String getReceipt() {
return receipt;
}
/**
* @param receipt Receipt attached to the message
*/
public void setReceipt(String receipt) {
this.receipt = receipt;
}
/**
* Gets the message priority
*
* @return priority of message.
*/
public int getPriority() {
return priority;
}
/**
* Sets the message priority (between 0 and 99). Higher priority message is retrieved ahead of
* lower priority ones.
*
* @param priority the priority of message (between 0 and 99)
*/
public void setPriority(int priority) {
this.priority = priority;
}
@Override
public String toString() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Message message = (Message) o;
return Objects.equals(payload, message.payload)
&& Objects.equals(id, message.id)
&& Objects.equals(priority, message.priority)
&& Objects.equals(receipt, message.receipt);
}
@Override
public int hashCode() {
return Objects.hash(payload, id, receipt, priority);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/queue/ObservableQueue.java | core/src/main/java/com/netflix/conductor/core/events/queue/ObservableQueue.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events.queue;
import java.util.List;
import org.springframework.context.Lifecycle;
import rx.Observable;
public interface ObservableQueue extends Lifecycle {
/**
* @return An observable for the given queue
*/
Observable<Message> observe();
/**
* @return Type of the queue
*/
String getType();
/**
* @return Name of the queue
*/
String getName();
/**
* @return URI identifier for the queue.
*/
String getURI();
/**
* @param messages to be ack'ed
* @return the id of the ones which could not be ack'ed
*/
List<String> ack(List<Message> messages);
/**
* @param messages to be Nack'ed
*/
default void nack(List<Message> messages) {}
/**
* @param messages Messages to be published
*/
void publish(List<Message> messages);
/**
* Used to determine if the queue supports unack/visibility timeout such that the messages will
* re-appear on the queue after a specific period and are available to be picked up again and
* retried.
*
* @return - false if the queue message need not be re-published to the queue for retriability -
* true if the message must be re-published to the queue for retriability
*/
default boolean rePublishIfNoAck() {
return false;
}
/**
* Extend the lease of the unacknowledged message for longer period.
*
* @param message Message for which the timeout has to be changed
* @param unackTimeout timeout in milliseconds for which the unack lease should be extended.
* (replaces the current value with this value)
*/
void setUnackTimeout(Message message, long unackTimeout);
/**
* @return Size of the queue - no. messages pending. Note: Depending upon the implementation,
* this can be an approximation
*/
long size();
/** Used to close queue instance prior to remove from queues */
default void close() {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/queue/ConductorObservableQueue.java | core/src/main/java/com/netflix/conductor/core/events/queue/ConductorObservableQueue.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events.queue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.metrics.Monitors;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Scheduler;
/**
* An {@link ObservableQueue} implementation using the underlying {@link QueueDAO} implementation.
*/
public class ConductorObservableQueue implements ObservableQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(ConductorObservableQueue.class);
private static final String QUEUE_TYPE = "conductor";
private final String queueName;
private final QueueDAO queueDAO;
private final long pollTimeMS;
private final int longPollTimeout;
private final int pollCount;
private final Scheduler scheduler;
private volatile boolean running;
ConductorObservableQueue(
String queueName,
QueueDAO queueDAO,
ConductorProperties properties,
Scheduler scheduler) {
this.queueName = queueName;
this.queueDAO = queueDAO;
this.pollTimeMS = properties.getEventQueuePollInterval().toMillis();
this.pollCount = properties.getEventQueuePollCount();
this.longPollTimeout = (int) properties.getEventQueueLongPollTimeout().toMillis();
this.scheduler = scheduler;
}
@Override
public Observable<Message> observe() {
OnSubscribe<Message> subscriber = getOnSubscribe();
return Observable.create(subscriber);
}
@Override
public List<String> ack(List<Message> messages) {
for (Message msg : messages) {
queueDAO.ack(queueName, msg.getId());
}
return messages.stream().map(Message::getId).collect(Collectors.toList());
}
public void setUnackTimeout(Message message, long unackTimeout) {
queueDAO.setUnackTimeout(queueName, message.getId(), unackTimeout);
}
@Override
public void publish(List<Message> messages) {
queueDAO.push(queueName, messages);
}
@Override
public long size() {
return queueDAO.getSize(queueName);
}
@Override
public String getType() {
return QUEUE_TYPE;
}
@Override
public String getName() {
return queueName;
}
@Override
public String getURI() {
return queueName;
}
private List<Message> receiveMessages() {
try {
List<Message> messages = queueDAO.pollMessages(queueName, pollCount, longPollTimeout);
Monitors.recordEventQueueMessagesProcessed(QUEUE_TYPE, queueName, messages.size());
Monitors.recordEventQueuePollSize(queueName, messages.size());
return messages;
} catch (Exception exception) {
LOGGER.error("Exception while getting messages from queueDAO", exception);
Monitors.recordObservableQMessageReceivedErrors(QUEUE_TYPE);
}
return new ArrayList<>();
}
private OnSubscribe<Message> getOnSubscribe() {
return subscriber -> {
Observable<Long> interval =
Observable.interval(pollTimeMS, TimeUnit.MILLISECONDS, scheduler);
interval.flatMap(
(Long x) -> {
if (!isRunning()) {
LOGGER.debug(
"Component stopped, skip listening for messages from Conductor Queue");
return Observable.from(Collections.emptyList());
}
List<Message> messages = receiveMessages();
return Observable.from(messages);
})
.subscribe(subscriber::onNext, subscriber::onError);
};
}
@Override
public void start() {
LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueName);
running = true;
}
@Override
public void stop() {
LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueName);
running = false;
}
@Override
public boolean isRunning() {
return running;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/events/queue/ConductorEventQueueProvider.java | core/src/main/java/com/netflix/conductor/core/events/queue/ConductorEventQueueProvider.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.events.queue;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.lang.NonNull;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.dao.QueueDAO;
import rx.Scheduler;
/**
* Default provider for {@link com.netflix.conductor.core.events.queue.ObservableQueue} that listens
* on the <i>conductor</i> queue prefix.
*
* <p><code>Set conductor.event-queues.default.enabled=false</code> to disable the default queue.
*
* @see ConductorObservableQueue
*/
@Component
@ConditionalOnProperty(
name = "conductor.event-queues.default.enabled",
havingValue = "true",
matchIfMissing = true)
public class ConductorEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(ConductorEventQueueProvider.class);
private final Map<String, ObservableQueue> queues = new ConcurrentHashMap<>();
private final QueueDAO queueDAO;
private final ConductorProperties properties;
private final Scheduler scheduler;
public ConductorEventQueueProvider(
QueueDAO queueDAO, ConductorProperties properties, Scheduler scheduler) {
this.queueDAO = queueDAO;
this.properties = properties;
this.scheduler = scheduler;
}
@Override
public String getQueueType() {
return "conductor";
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) {
return queues.computeIfAbsent(
queueURI,
q -> new ConductorObservableQueue(queueURI, queueDAO, properties, scheduler));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowRepairService.java | core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowRepairService.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.reconciliation;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Service;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.utils.QueueUtils;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
/**
* A helper service that tries to keep ExecutionDAO and QueueDAO in sync, based on the task or
* workflow state.
*
* <p>This service expects that the underlying Queueing layer implements {@link
* QueueDAO#containsMessage(String, String)} method. This can be controlled with <code>
* conductor.workflow-repair-service.enabled</code> property.
*/
@Service
@ConditionalOnProperty(name = "conductor.workflow-repair-service.enabled", havingValue = "true")
public class WorkflowRepairService {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowRepairService.class);
private final ExecutionDAO executionDAO;
private final QueueDAO queueDAO;
private final ConductorProperties properties;
private SystemTaskRegistry systemTaskRegistry;
/*
For system task -> Verify the task isAsync() and not isAsyncComplete() or isAsyncComplete() in SCHEDULED state,
and in SCHEDULED or IN_PROGRESS state. (Example: SUB_WORKFLOW tasks in SCHEDULED state)
For simple task -> Verify the task is in SCHEDULED state.
*/
private final Predicate<TaskModel> isTaskRepairable =
task -> {
if (systemTaskRegistry.isSystemTask(task.getTaskType())) { // If system task
WorkflowSystemTask workflowSystemTask =
systemTaskRegistry.get(task.getTaskType());
return workflowSystemTask.isAsync()
&& (!workflowSystemTask.isAsyncComplete(task)
|| (workflowSystemTask.isAsyncComplete(task)
&& task.getStatus() == TaskModel.Status.SCHEDULED))
&& (task.getStatus() == TaskModel.Status.IN_PROGRESS
|| task.getStatus() == TaskModel.Status.SCHEDULED);
} else { // Else if simple task
return task.getStatus() == TaskModel.Status.SCHEDULED;
}
};
public WorkflowRepairService(
ExecutionDAO executionDAO,
QueueDAO queueDAO,
ConductorProperties properties,
SystemTaskRegistry systemTaskRegistry) {
this.executionDAO = executionDAO;
this.queueDAO = queueDAO;
this.properties = properties;
this.systemTaskRegistry = systemTaskRegistry;
LOGGER.info("WorkflowRepairService Initialized");
}
/**
* Verify and repair if the workflowId exists in deciderQueue, and then if each scheduled task
* has relevant message in the queue.
*/
public boolean verifyAndRepairWorkflow(String workflowId, boolean includeTasks) {
WorkflowModel workflow = executionDAO.getWorkflow(workflowId, includeTasks);
AtomicBoolean repaired = new AtomicBoolean(false);
repaired.set(verifyAndRepairDeciderQueue(workflow));
if (includeTasks) {
workflow.getTasks().forEach(task -> repaired.set(verifyAndRepairTask(task)));
}
return repaired.get();
}
/** Verify and repair tasks in a workflow. */
public void verifyAndRepairWorkflowTasks(String workflowId) {
WorkflowModel workflow =
Optional.ofNullable(executionDAO.getWorkflow(workflowId, true))
.orElseThrow(
() ->
new NotFoundException(
"Could not find workflow: " + workflowId));
verifyAndRepairWorkflowTasks(workflow);
}
/** Verify and repair tasks in a workflow. */
public void verifyAndRepairWorkflowTasks(WorkflowModel workflow) {
workflow.getTasks().forEach(this::verifyAndRepairTask);
// repair the parent workflow if needed
verifyAndRepairWorkflow(workflow.getParentWorkflowId());
}
/**
* Verify and fix if Workflow decider queue contains this workflowId.
*
* @return true - if the workflow was queued for repair
*/
private boolean verifyAndRepairDeciderQueue(WorkflowModel workflow) {
if (!workflow.getStatus().isTerminal()) {
return verifyAndRepairWorkflow(workflow.getWorkflowId());
}
return false;
}
/**
* Verify if ExecutionDAO and QueueDAO agree for the provided task.
*
* @param task the task to be repaired
* @return true - if the task was queued for repair
*/
@VisibleForTesting
boolean verifyAndRepairTask(TaskModel task) {
if (isTaskRepairable.test(task)) {
// Ensure QueueDAO contains this taskId
String taskQueueName = QueueUtils.getQueueName(task);
if (!queueDAO.containsMessage(taskQueueName, task.getTaskId())) {
queueDAO.push(taskQueueName, task.getTaskId(), task.getCallbackAfterSeconds());
LOGGER.info(
"Task {} in workflow {} re-queued for repairs",
task.getTaskId(),
task.getWorkflowInstanceId());
Monitors.recordQueueMessageRepushFromRepairService(task.getTaskDefName());
return true;
}
} else if (task.getTaskType().equals(TaskType.TASK_TYPE_SUB_WORKFLOW)
&& task.getStatus() == TaskModel.Status.IN_PROGRESS) {
WorkflowModel subWorkflow = executionDAO.getWorkflow(task.getSubWorkflowId(), false);
if (subWorkflow.getStatus().isTerminal()) {
LOGGER.info(
"Repairing sub workflow task {} for sub workflow {} in workflow {}",
task.getTaskId(),
task.getSubWorkflowId(),
task.getWorkflowInstanceId());
repairSubWorkflowTask(task, subWorkflow);
return true;
}
}
return false;
}
private boolean verifyAndRepairWorkflow(String workflowId) {
if (StringUtils.isNotEmpty(workflowId)) {
String queueName = Utils.DECIDER_QUEUE;
if (!queueDAO.containsMessage(queueName, workflowId)) {
queueDAO.push(
queueName, workflowId, properties.getWorkflowOffsetTimeout().getSeconds());
LOGGER.info("Workflow {} re-queued for repairs", workflowId);
Monitors.recordQueueMessageRepushFromRepairService(queueName);
return true;
}
return false;
}
return false;
}
private void repairSubWorkflowTask(TaskModel task, WorkflowModel subWorkflow) {
switch (subWorkflow.getStatus()) {
case COMPLETED:
task.setStatus(TaskModel.Status.COMPLETED);
break;
case FAILED:
task.setStatus(TaskModel.Status.FAILED);
break;
case TERMINATED:
task.setStatus(TaskModel.Status.CANCELED);
break;
case TIMED_OUT:
task.setStatus(TaskModel.Status.TIMED_OUT);
break;
}
task.addOutput(subWorkflow.getOutput());
executionDAO.updateTask(task);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowSweeper.java | core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowSweeper.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.reconciliation;
import java.time.Instant;
import java.util.Optional;
import java.util.Random;
import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.core.WorkflowContext;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.TaskModel.Status;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.service.ExecutionLockService;
import static com.netflix.conductor.core.config.SchedulerConfiguration.SWEEPER_EXECUTOR_NAME;
import static com.netflix.conductor.core.utils.Utils.DECIDER_QUEUE;
@Component
public class WorkflowSweeper {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowSweeper.class);
private final ConductorProperties properties;
private final WorkflowExecutor workflowExecutor;
private final WorkflowRepairService workflowRepairService;
private final QueueDAO queueDAO;
private final ExecutionDAOFacade executionDAOFacade;
private final ExecutionLockService executionLockService;
private static final String CLASS_NAME = WorkflowSweeper.class.getSimpleName();
public WorkflowSweeper(
WorkflowExecutor workflowExecutor,
Optional<WorkflowRepairService> workflowRepairService,
ConductorProperties properties,
QueueDAO queueDAO,
ExecutionDAOFacade executionDAOFacade,
ExecutionLockService executionLockService) {
this.properties = properties;
this.queueDAO = queueDAO;
this.workflowExecutor = workflowExecutor;
this.executionDAOFacade = executionDAOFacade;
this.workflowRepairService = workflowRepairService.orElse(null);
this.executionLockService = executionLockService;
LOGGER.info("WorkflowSweeper initialized.");
}
@Async(SWEEPER_EXECUTOR_NAME)
public CompletableFuture<Void> sweepAsync(String workflowId) {
sweep(workflowId);
return CompletableFuture.completedFuture(null);
}
public void sweep(String workflowId) {
WorkflowContext workflowContext = new WorkflowContext(properties.getAppId());
WorkflowContext.set(workflowContext);
WorkflowModel workflow = null;
try {
if (!executionLockService.acquireLock(workflowId)) {
return;
}
workflow = executionDAOFacade.getWorkflowModel(workflowId, true);
LOGGER.debug("Running sweeper for workflow {}", workflowId);
if (workflowRepairService != null) {
// Verify and repair tasks in the workflow.
workflowRepairService.verifyAndRepairWorkflowTasks(workflow);
}
long decideStartTime = System.currentTimeMillis();
workflow = workflowExecutor.decide(workflow.getWorkflowId());
Monitors.recordWorkflowDecisionTime(System.currentTimeMillis() - decideStartTime);
if (workflow != null && workflow.getStatus().isTerminal()) {
queueDAO.remove(DECIDER_QUEUE, workflowId);
return;
}
} catch (NotFoundException nfe) {
queueDAO.remove(DECIDER_QUEUE, workflowId);
LOGGER.info(
"Workflow NOT found for id:{}. Removed it from decider queue", workflowId, nfe);
return;
} catch (Exception e) {
Monitors.error(CLASS_NAME, "sweep");
LOGGER.error("Error running sweep for " + workflowId, e);
} finally {
executionLockService.releaseLock(workflowId);
}
long workflowOffsetTimeout =
workflowOffsetWithJitter(properties.getWorkflowOffsetTimeout().getSeconds());
if (workflow != null) {
long startTime = Instant.now().toEpochMilli();
unack(workflow, workflowOffsetTimeout);
long endTime = Instant.now().toEpochMilli();
Monitors.recordUnackTime(workflow.getWorkflowName(), endTime - startTime);
} else {
LOGGER.warn(
"Workflow with {} id can not be found. Attempting to unack using the id",
workflowId);
queueDAO.setUnackTimeout(DECIDER_QUEUE, workflowId, workflowOffsetTimeout * 1000);
}
}
@VisibleForTesting
void unack(WorkflowModel workflowModel, long workflowOffsetTimeout) {
long postponeDurationSeconds = 0;
for (TaskModel taskModel : workflowModel.getTasks()) {
if (taskModel.getStatus() == Status.IN_PROGRESS) {
if (taskModel.getTaskType().equals(TaskType.TASK_TYPE_WAIT)) {
if (taskModel.getWaitTimeout() == 0) {
postponeDurationSeconds = workflowOffsetTimeout;
} else {
long deltaInSeconds =
(taskModel.getWaitTimeout() - System.currentTimeMillis()) / 1000;
postponeDurationSeconds = (deltaInSeconds > 0) ? deltaInSeconds : 0;
}
} else if (taskModel.getTaskType().equals(TaskType.TASK_TYPE_HUMAN)) {
postponeDurationSeconds = workflowOffsetTimeout;
} else {
postponeDurationSeconds =
(taskModel.getResponseTimeoutSeconds() != 0)
? taskModel.getResponseTimeoutSeconds() + 1
: workflowOffsetTimeout;
}
if (postponeDurationSeconds
> properties.getMaxPostponeDurationSeconds().getSeconds()) {
postponeDurationSeconds =
properties.getMaxPostponeDurationSeconds().getSeconds();
}
break;
} else if (taskModel.getStatus() == Status.SCHEDULED) {
Optional<TaskDef> taskDefinition = taskModel.getTaskDefinition();
if (taskDefinition.isPresent()) {
TaskDef taskDef = taskDefinition.get();
if (taskDef.getPollTimeoutSeconds() != null
&& taskDef.getPollTimeoutSeconds() != 0) {
postponeDurationSeconds = taskDef.getPollTimeoutSeconds() + 1;
} else {
postponeDurationSeconds =
(workflowModel.getWorkflowDefinition().getTimeoutSeconds() != 0)
? workflowModel.getWorkflowDefinition().getTimeoutSeconds()
+ 1
: workflowOffsetTimeout;
}
} else {
postponeDurationSeconds =
(workflowModel.getWorkflowDefinition().getTimeoutSeconds() != 0)
? workflowModel.getWorkflowDefinition().getTimeoutSeconds() + 1
: workflowOffsetTimeout;
}
break;
}
}
queueDAO.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), postponeDurationSeconds * 1000);
}
/**
* jitter will be +- (1/3) workflowOffsetTimeout for example, if workflowOffsetTimeout is 45
* seconds, this function returns values between [30-60] seconds
*
* @param workflowOffsetTimeout
* @return
*/
@VisibleForTesting
long workflowOffsetWithJitter(long workflowOffsetTimeout) {
long range = workflowOffsetTimeout / 3;
long jitter = new Random().nextInt((int) (2 * range + 1)) - range;
return workflowOffsetTimeout + jitter;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowReconciler.java | core/src/main/java/com/netflix/conductor/core/reconciliation/WorkflowReconciler.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.reconciliation;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.LifecycleAwareComponent;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.metrics.Monitors;
import static com.netflix.conductor.core.utils.Utils.DECIDER_QUEUE;
/**
* Periodically polls all running workflows in the system and evaluates them for timeouts and/or
* maintain consistency.
*/
@Component
@ConditionalOnProperty(
name = "conductor.workflow-reconciler.enabled",
havingValue = "true",
matchIfMissing = true)
public class WorkflowReconciler extends LifecycleAwareComponent {
private final WorkflowSweeper workflowSweeper;
private final QueueDAO queueDAO;
private final int sweeperThreadCount;
private final int sweeperWorkflowPollTimeout;
private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowReconciler.class);
public WorkflowReconciler(
WorkflowSweeper workflowSweeper, QueueDAO queueDAO, ConductorProperties properties) {
this.workflowSweeper = workflowSweeper;
this.queueDAO = queueDAO;
this.sweeperThreadCount = properties.getSweeperThreadCount();
this.sweeperWorkflowPollTimeout =
(int) properties.getSweeperWorkflowPollTimeout().toMillis();
LOGGER.info(
"WorkflowReconciler initialized with {} sweeper threads",
properties.getSweeperThreadCount());
}
@Scheduled(
fixedDelayString = "${conductor.sweep-frequency.millis:500}",
initialDelayString = "${conductor.sweep-frequency.millis:500}")
public void pollAndSweep() {
try {
if (!isRunning()) {
LOGGER.debug("Component stopped, skip workflow sweep");
} else {
List<String> workflowIds =
queueDAO.pop(DECIDER_QUEUE, sweeperThreadCount, sweeperWorkflowPollTimeout);
if (workflowIds != null) {
// wait for all workflow ids to be "swept"
CompletableFuture.allOf(
workflowIds.stream()
.map(workflowSweeper::sweepAsync)
.toArray(CompletableFuture[]::new))
.get();
LOGGER.debug(
"Sweeper processed {} from the decider queue",
String.join(",", workflowIds));
}
// NOTE: Disabling the sweeper implicitly disables this metric.
recordQueueDepth();
}
} catch (Exception e) {
Monitors.error(WorkflowReconciler.class.getSimpleName(), "poll");
LOGGER.error("Error when polling for workflows", e);
if (e instanceof InterruptedException) {
// Restore interrupted state...
Thread.currentThread().interrupt();
}
}
}
private void recordQueueDepth() {
int currentQueueSize = queueDAO.getSize(DECIDER_QUEUE);
Monitors.recordGauge(DECIDER_QUEUE, currentQueueSize);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java | core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DataSizeUnit;
import org.springframework.boot.convert.DurationUnit;
import org.springframework.util.unit.DataSize;
import org.springframework.util.unit.DataUnit;
import com.netflix.conductor.model.TaskModel;
@ConfigurationProperties("conductor.app")
public class ConductorProperties {
/**
* Name of the stack within which the app is running. e.g. devint, testintg, staging, prod etc.
*/
private String stack = "test";
/** The id with the app has been registered. */
private String appId = "conductor";
/** The maximum number of threads to be allocated to the executor service threadpool. */
private int executorServiceMaxThreadCount = 50;
/** The timeout duration to set when a workflow is pushed to the decider queue. */
@DurationUnit(ChronoUnit.SECONDS)
private Duration workflowOffsetTimeout = Duration.ofSeconds(30);
/**
* The maximum timeout duration to set when a workflow with running task is pushed to the
* decider queue.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration maxPostponeDurationSeconds = Duration.ofSeconds(3600);
/** The number of threads to use to do background sweep on active workflows. */
private int sweeperThreadCount = Runtime.getRuntime().availableProcessors() * 2;
/** The timeout (in milliseconds) for the polling of workflows to be swept. */
private Duration sweeperWorkflowPollTimeout = Duration.ofMillis(2000);
/** The number of threads to configure the threadpool in the event processor. */
private int eventProcessorThreadCount = 2;
/** Used to enable/disable the indexing of messages within event payloads. */
private boolean eventMessageIndexingEnabled = true;
/** Used to enable/disable the indexing of event execution results. */
private boolean eventExecutionIndexingEnabled = true;
/** Used to enable/disable the workflow execution lock. */
private boolean workflowExecutionLockEnabled = false;
/** The time (in milliseconds) for which the lock is leased for. */
private Duration lockLeaseTime = Duration.ofMillis(60000);
/**
* The time (in milliseconds) for which the thread will block in an attempt to acquire the lock.
*/
private Duration lockTimeToTry = Duration.ofMillis(500);
/**
* The time (in seconds) that is used to consider if a worker is actively polling for a task.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration activeWorkerLastPollTimeout = Duration.ofSeconds(10);
/**
* The time (in seconds) for which a task execution will be postponed if being rate limited or
* concurrent execution limited.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskExecutionPostponeDuration = Duration.ofSeconds(60);
/** Used to enable/disable the indexing of tasks. */
private boolean taskIndexingEnabled = true;
/** Used to enable/disable the indexing of task execution logs. */
private boolean taskExecLogIndexingEnabled = true;
/** Used to enable/disable asynchronous indexing to elasticsearch. */
private boolean asyncIndexingEnabled = false;
/** The number of threads to be used within the threadpool for system task workers. */
private int systemTaskWorkerThreadCount = Runtime.getRuntime().availableProcessors() * 2;
/** The max number of the threads to be polled within the threadpool for system task workers. */
private int systemTaskMaxPollCount = systemTaskWorkerThreadCount;
/**
* The interval (in seconds) after which a system task will be checked by the system task worker
* for completion.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration systemTaskWorkerCallbackDuration = Duration.ofSeconds(30);
/**
* The interval (in milliseconds) at which system task queues will be polled by the system task
* workers.
*/
private Duration systemTaskWorkerPollInterval = Duration.ofMillis(50);
/** The namespace for the system task workers to provide instance level isolation. */
private String systemTaskWorkerExecutionNamespace = "";
/**
* The number of threads to be used within the threadpool for system task workers in each
* isolation group.
*/
private int isolatedSystemTaskWorkerThreadCount = 1;
/**
* The duration of workflow execution which qualifies a workflow as a short-running workflow
* when async indexing to elasticsearch is enabled.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration asyncUpdateShortRunningWorkflowDuration = Duration.ofSeconds(30);
/**
* The delay with which short-running workflows will be updated in the elasticsearch index when
* async indexing is enabled.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration asyncUpdateDelay = Duration.ofSeconds(60);
/**
* Used to control the validation for owner email field as mandatory within workflow and task
* definitions.
*/
private boolean ownerEmailMandatory = true;
/**
* The number of threads to be usde in Scheduler used for polling events from multiple event
* queues. By default, a thread count equal to the number of CPU cores is chosen.
*/
private int eventQueueSchedulerPollThreadCount = Runtime.getRuntime().availableProcessors();
/** The time interval (in milliseconds) at which the default event queues will be polled. */
private Duration eventQueuePollInterval = Duration.ofMillis(100);
/** The number of messages to be polled from a default event queue in a single operation. */
private int eventQueuePollCount = 10;
/** The timeout (in milliseconds) for the poll operation on the default event queue. */
private Duration eventQueueLongPollTimeout = Duration.ofMillis(1000);
/**
* The threshold of the workflow input payload size in KB beyond which the payload will be
* stored in {@link com.netflix.conductor.common.utils.ExternalPayloadStorage}.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize workflowInputPayloadSizeThreshold = DataSize.ofKilobytes(5120L);
/**
* The maximum threshold of the workflow input payload size in KB beyond which input will be
* rejected and the workflow will be marked as FAILED.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize maxWorkflowInputPayloadSizeThreshold = DataSize.ofKilobytes(10240L);
/**
* The threshold of the workflow output payload size in KB beyond which the payload will be
* stored in {@link com.netflix.conductor.common.utils.ExternalPayloadStorage}.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize workflowOutputPayloadSizeThreshold = DataSize.ofKilobytes(5120L);
/**
* The maximum threshold of the workflow output payload size in KB beyond which output will be
* rejected and the workflow will be marked as FAILED.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize maxWorkflowOutputPayloadSizeThreshold = DataSize.ofKilobytes(10240L);
/**
* The threshold of the task input payload size in KB beyond which the payload will be stored in
* {@link com.netflix.conductor.common.utils.ExternalPayloadStorage}.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize taskInputPayloadSizeThreshold = DataSize.ofKilobytes(3072L);
/**
* The maximum threshold of the task input payload size in KB beyond which the task input will
* be rejected and the task will be marked as FAILED_WITH_TERMINAL_ERROR.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize maxTaskInputPayloadSizeThreshold = DataSize.ofKilobytes(10240L);
/**
* The threshold of the task output payload size in KB beyond which the payload will be stored
* in {@link com.netflix.conductor.common.utils.ExternalPayloadStorage}.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize taskOutputPayloadSizeThreshold = DataSize.ofKilobytes(3072L);
/**
* The maximum threshold of the task output payload size in KB beyond which the task input will
* be rejected and the task will be marked as FAILED_WITH_TERMINAL_ERROR.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize maxTaskOutputPayloadSizeThreshold = DataSize.ofKilobytes(10240L);
/**
* The maximum threshold of the workflow variables payload size in KB beyond which the task
* changes will be rejected and the task will be marked as FAILED_WITH_TERMINAL_ERROR.
*/
@DataSizeUnit(DataUnit.KILOBYTES)
private DataSize maxWorkflowVariablesPayloadSizeThreshold = DataSize.ofKilobytes(256L);
/** Used to limit the size of task execution logs. */
private int taskExecLogSizeLimit = 10;
/**
* This property defines the number of poll counts (executions) after which SystemTasks
* implementing getEvaluationOffset should begin postponing the next execution.
*
* @see
* com.netflix.conductor.core.execution.tasks.WorkflowSystemTask#getEvaluationOffset(TaskModel,
* long)
* @see com.netflix.conductor.core.execution.tasks.Join#getEvaluationOffset(TaskModel, long)
*/
private int systemTaskPostponeThreshold = 200;
/**
* Timeout used by {@link com.netflix.conductor.core.execution.tasks.SystemTaskWorker} when
* polling, i.e.: call to {@link com.netflix.conductor.dao.QueueDAO#pop(String, int, int)}.
*/
@DurationUnit(ChronoUnit.MILLIS)
private Duration systemTaskQueuePopTimeout = Duration.ofMillis(100);
public String getStack() {
return stack;
}
public void setStack(String stack) {
this.stack = stack;
}
public String getAppId() {
return appId;
}
public void setAppId(String appId) {
this.appId = appId;
}
public int getExecutorServiceMaxThreadCount() {
return executorServiceMaxThreadCount;
}
public void setExecutorServiceMaxThreadCount(int executorServiceMaxThreadCount) {
this.executorServiceMaxThreadCount = executorServiceMaxThreadCount;
}
public Duration getWorkflowOffsetTimeout() {
return workflowOffsetTimeout;
}
public void setWorkflowOffsetTimeout(Duration workflowOffsetTimeout) {
this.workflowOffsetTimeout = workflowOffsetTimeout;
}
public Duration getMaxPostponeDurationSeconds() {
return maxPostponeDurationSeconds;
}
public void setMaxPostponeDurationSeconds(Duration maxPostponeDurationSeconds) {
this.maxPostponeDurationSeconds = maxPostponeDurationSeconds;
}
public int getSweeperThreadCount() {
return sweeperThreadCount;
}
public void setSweeperThreadCount(int sweeperThreadCount) {
this.sweeperThreadCount = sweeperThreadCount;
}
public Duration getSweeperWorkflowPollTimeout() {
return sweeperWorkflowPollTimeout;
}
public void setSweeperWorkflowPollTimeout(Duration sweeperWorkflowPollTimeout) {
this.sweeperWorkflowPollTimeout = sweeperWorkflowPollTimeout;
}
public int getEventProcessorThreadCount() {
return eventProcessorThreadCount;
}
public void setEventProcessorThreadCount(int eventProcessorThreadCount) {
this.eventProcessorThreadCount = eventProcessorThreadCount;
}
public boolean isEventMessageIndexingEnabled() {
return eventMessageIndexingEnabled;
}
public void setEventMessageIndexingEnabled(boolean eventMessageIndexingEnabled) {
this.eventMessageIndexingEnabled = eventMessageIndexingEnabled;
}
public boolean isEventExecutionIndexingEnabled() {
return eventExecutionIndexingEnabled;
}
public void setEventExecutionIndexingEnabled(boolean eventExecutionIndexingEnabled) {
this.eventExecutionIndexingEnabled = eventExecutionIndexingEnabled;
}
public boolean isWorkflowExecutionLockEnabled() {
return workflowExecutionLockEnabled;
}
public void setWorkflowExecutionLockEnabled(boolean workflowExecutionLockEnabled) {
this.workflowExecutionLockEnabled = workflowExecutionLockEnabled;
}
public Duration getLockLeaseTime() {
return lockLeaseTime;
}
public void setLockLeaseTime(Duration lockLeaseTime) {
this.lockLeaseTime = lockLeaseTime;
}
public Duration getLockTimeToTry() {
return lockTimeToTry;
}
public void setLockTimeToTry(Duration lockTimeToTry) {
this.lockTimeToTry = lockTimeToTry;
}
public Duration getActiveWorkerLastPollTimeout() {
return activeWorkerLastPollTimeout;
}
public void setActiveWorkerLastPollTimeout(Duration activeWorkerLastPollTimeout) {
this.activeWorkerLastPollTimeout = activeWorkerLastPollTimeout;
}
public Duration getTaskExecutionPostponeDuration() {
return taskExecutionPostponeDuration;
}
public void setTaskExecutionPostponeDuration(Duration taskExecutionPostponeDuration) {
this.taskExecutionPostponeDuration = taskExecutionPostponeDuration;
}
public boolean isTaskExecLogIndexingEnabled() {
return taskExecLogIndexingEnabled;
}
public void setTaskExecLogIndexingEnabled(boolean taskExecLogIndexingEnabled) {
this.taskExecLogIndexingEnabled = taskExecLogIndexingEnabled;
}
public boolean isTaskIndexingEnabled() {
return taskIndexingEnabled;
}
public void setTaskIndexingEnabled(boolean taskIndexingEnabled) {
this.taskIndexingEnabled = taskIndexingEnabled;
}
public boolean isAsyncIndexingEnabled() {
return asyncIndexingEnabled;
}
public void setAsyncIndexingEnabled(boolean asyncIndexingEnabled) {
this.asyncIndexingEnabled = asyncIndexingEnabled;
}
public int getSystemTaskWorkerThreadCount() {
return systemTaskWorkerThreadCount;
}
public void setSystemTaskWorkerThreadCount(int systemTaskWorkerThreadCount) {
this.systemTaskWorkerThreadCount = systemTaskWorkerThreadCount;
}
public int getSystemTaskMaxPollCount() {
return systemTaskMaxPollCount;
}
public void setSystemTaskMaxPollCount(int systemTaskMaxPollCount) {
this.systemTaskMaxPollCount = systemTaskMaxPollCount;
}
public Duration getSystemTaskWorkerCallbackDuration() {
return systemTaskWorkerCallbackDuration;
}
public void setSystemTaskWorkerCallbackDuration(Duration systemTaskWorkerCallbackDuration) {
this.systemTaskWorkerCallbackDuration = systemTaskWorkerCallbackDuration;
}
public Duration getSystemTaskWorkerPollInterval() {
return systemTaskWorkerPollInterval;
}
public void setSystemTaskWorkerPollInterval(Duration systemTaskWorkerPollInterval) {
this.systemTaskWorkerPollInterval = systemTaskWorkerPollInterval;
}
public String getSystemTaskWorkerExecutionNamespace() {
return systemTaskWorkerExecutionNamespace;
}
public void setSystemTaskWorkerExecutionNamespace(String systemTaskWorkerExecutionNamespace) {
this.systemTaskWorkerExecutionNamespace = systemTaskWorkerExecutionNamespace;
}
public int getIsolatedSystemTaskWorkerThreadCount() {
return isolatedSystemTaskWorkerThreadCount;
}
public void setIsolatedSystemTaskWorkerThreadCount(int isolatedSystemTaskWorkerThreadCount) {
this.isolatedSystemTaskWorkerThreadCount = isolatedSystemTaskWorkerThreadCount;
}
public Duration getAsyncUpdateShortRunningWorkflowDuration() {
return asyncUpdateShortRunningWorkflowDuration;
}
public void setAsyncUpdateShortRunningWorkflowDuration(
Duration asyncUpdateShortRunningWorkflowDuration) {
this.asyncUpdateShortRunningWorkflowDuration = asyncUpdateShortRunningWorkflowDuration;
}
public Duration getAsyncUpdateDelay() {
return asyncUpdateDelay;
}
public void setAsyncUpdateDelay(Duration asyncUpdateDelay) {
this.asyncUpdateDelay = asyncUpdateDelay;
}
public boolean isOwnerEmailMandatory() {
return ownerEmailMandatory;
}
public void setOwnerEmailMandatory(boolean ownerEmailMandatory) {
this.ownerEmailMandatory = ownerEmailMandatory;
}
public int getEventQueueSchedulerPollThreadCount() {
return eventQueueSchedulerPollThreadCount;
}
public void setEventQueueSchedulerPollThreadCount(int eventQueueSchedulerPollThreadCount) {
this.eventQueueSchedulerPollThreadCount = eventQueueSchedulerPollThreadCount;
}
public Duration getEventQueuePollInterval() {
return eventQueuePollInterval;
}
public void setEventQueuePollInterval(Duration eventQueuePollInterval) {
this.eventQueuePollInterval = eventQueuePollInterval;
}
public int getEventQueuePollCount() {
return eventQueuePollCount;
}
public void setEventQueuePollCount(int eventQueuePollCount) {
this.eventQueuePollCount = eventQueuePollCount;
}
public Duration getEventQueueLongPollTimeout() {
return eventQueueLongPollTimeout;
}
public void setEventQueueLongPollTimeout(Duration eventQueueLongPollTimeout) {
this.eventQueueLongPollTimeout = eventQueueLongPollTimeout;
}
public DataSize getWorkflowInputPayloadSizeThreshold() {
return workflowInputPayloadSizeThreshold;
}
public void setWorkflowInputPayloadSizeThreshold(DataSize workflowInputPayloadSizeThreshold) {
this.workflowInputPayloadSizeThreshold = workflowInputPayloadSizeThreshold;
}
public DataSize getMaxWorkflowInputPayloadSizeThreshold() {
return maxWorkflowInputPayloadSizeThreshold;
}
public void setMaxWorkflowInputPayloadSizeThreshold(
DataSize maxWorkflowInputPayloadSizeThreshold) {
this.maxWorkflowInputPayloadSizeThreshold = maxWorkflowInputPayloadSizeThreshold;
}
public DataSize getWorkflowOutputPayloadSizeThreshold() {
return workflowOutputPayloadSizeThreshold;
}
public void setWorkflowOutputPayloadSizeThreshold(DataSize workflowOutputPayloadSizeThreshold) {
this.workflowOutputPayloadSizeThreshold = workflowOutputPayloadSizeThreshold;
}
public DataSize getMaxWorkflowOutputPayloadSizeThreshold() {
return maxWorkflowOutputPayloadSizeThreshold;
}
public void setMaxWorkflowOutputPayloadSizeThreshold(
DataSize maxWorkflowOutputPayloadSizeThreshold) {
this.maxWorkflowOutputPayloadSizeThreshold = maxWorkflowOutputPayloadSizeThreshold;
}
public DataSize getTaskInputPayloadSizeThreshold() {
return taskInputPayloadSizeThreshold;
}
public void setTaskInputPayloadSizeThreshold(DataSize taskInputPayloadSizeThreshold) {
this.taskInputPayloadSizeThreshold = taskInputPayloadSizeThreshold;
}
public DataSize getMaxTaskInputPayloadSizeThreshold() {
return maxTaskInputPayloadSizeThreshold;
}
public void setMaxTaskInputPayloadSizeThreshold(DataSize maxTaskInputPayloadSizeThreshold) {
this.maxTaskInputPayloadSizeThreshold = maxTaskInputPayloadSizeThreshold;
}
public DataSize getTaskOutputPayloadSizeThreshold() {
return taskOutputPayloadSizeThreshold;
}
public void setTaskOutputPayloadSizeThreshold(DataSize taskOutputPayloadSizeThreshold) {
this.taskOutputPayloadSizeThreshold = taskOutputPayloadSizeThreshold;
}
public DataSize getMaxTaskOutputPayloadSizeThreshold() {
return maxTaskOutputPayloadSizeThreshold;
}
public void setMaxTaskOutputPayloadSizeThreshold(DataSize maxTaskOutputPayloadSizeThreshold) {
this.maxTaskOutputPayloadSizeThreshold = maxTaskOutputPayloadSizeThreshold;
}
public DataSize getMaxWorkflowVariablesPayloadSizeThreshold() {
return maxWorkflowVariablesPayloadSizeThreshold;
}
public void setMaxWorkflowVariablesPayloadSizeThreshold(
DataSize maxWorkflowVariablesPayloadSizeThreshold) {
this.maxWorkflowVariablesPayloadSizeThreshold = maxWorkflowVariablesPayloadSizeThreshold;
}
public int getTaskExecLogSizeLimit() {
return taskExecLogSizeLimit;
}
public void setTaskExecLogSizeLimit(int taskExecLogSizeLimit) {
this.taskExecLogSizeLimit = taskExecLogSizeLimit;
}
/**
* @return Returns all the configurations in a map.
*/
public Map<String, Object> getAll() {
Map<String, Object> map = new HashMap<>();
Properties props = System.getProperties();
props.forEach((key, value) -> map.put(key.toString(), value));
return map;
}
public void setSystemTaskPostponeThreshold(int systemTaskPostponeThreshold) {
this.systemTaskPostponeThreshold = systemTaskPostponeThreshold;
}
public int getSystemTaskPostponeThreshold() {
return systemTaskPostponeThreshold;
}
public Duration getSystemTaskQueuePopTimeout() {
return systemTaskQueuePopTimeout;
}
public void setSystemTaskQueuePopTimeout(Duration systemTaskQueuePopTimeout) {
this.systemTaskQueuePopTimeout = systemTaskQueuePopTimeout;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/config/SchedulerConfiguration.java | core/src/main/java/com/netflix/conductor/core/config/SchedulerConfiguration.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.config;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.SchedulingConfigurer;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
import rx.Scheduler;
import rx.schedulers.Schedulers;
@Configuration(proxyBeanMethods = false)
@EnableScheduling
@EnableAsync
public class SchedulerConfiguration implements SchedulingConfigurer {
public static final String SWEEPER_EXECUTOR_NAME = "WorkflowSweeperExecutor";
/**
* Used by some {@link com.netflix.conductor.core.events.queue.ObservableQueue} implementations.
*
* @see com.netflix.conductor.core.events.queue.ConductorObservableQueue
*/
@Bean
public Scheduler scheduler(ConductorProperties properties) {
ThreadFactory threadFactory =
new BasicThreadFactory.Builder()
.namingPattern("event-queue-poll-scheduler-thread-%d")
.build();
Executor executorService =
Executors.newFixedThreadPool(
properties.getEventQueueSchedulerPollThreadCount(), threadFactory);
return Schedulers.from(executorService);
}
@Bean(SWEEPER_EXECUTOR_NAME)
public Executor sweeperExecutor(ConductorProperties properties) {
if (properties.getSweeperThreadCount() <= 0) {
throw new IllegalStateException(
"conductor.app.sweeper-thread-count must be greater than 0.");
}
ThreadFactory threadFactory =
new BasicThreadFactory.Builder().namingPattern("sweeper-thread-%d").build();
return Executors.newFixedThreadPool(properties.getSweeperThreadCount(), threadFactory);
}
@Override
public void configureTasks(ScheduledTaskRegistrar taskRegistrar) {
ThreadPoolTaskScheduler threadPoolTaskScheduler = new ThreadPoolTaskScheduler();
threadPoolTaskScheduler.setPoolSize(3); // equal to the number of scheduled jobs
threadPoolTaskScheduler.setThreadNamePrefix("scheduled-task-pool-");
threadPoolTaskScheduler.initialize();
taskRegistrar.setTaskScheduler(threadPoolTaskScheduler);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java | core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.config;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.stream.Collectors;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.execution.mapper.TaskMapper;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.listener.TaskStatusListener;
import com.netflix.conductor.core.listener.TaskStatusListenerStub;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.core.listener.WorkflowStatusListenerStub;
import com.netflix.conductor.core.storage.DummyPayloadStorage;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.core.sync.noop.NoopLock;
import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER;
import static com.netflix.conductor.core.execution.tasks.SystemTaskRegistry.ASYNC_SYSTEM_TASKS_QUALIFIER;
import static java.util.function.Function.identity;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(ConductorProperties.class)
public class ConductorCoreConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(ConductorCoreConfiguration.class);
@ConditionalOnProperty(
name = "conductor.workflow-execution-lock.type",
havingValue = "noop_lock",
matchIfMissing = true)
@Bean
public Lock provideLock() {
return new NoopLock();
}
@ConditionalOnProperty(
name = "conductor.external-payload-storage.type",
havingValue = "dummy",
matchIfMissing = true)
@Bean
public ExternalPayloadStorage dummyExternalPayloadStorage() {
LOGGER.info("Initialized dummy payload storage!");
return new DummyPayloadStorage();
}
@ConditionalOnProperty(
name = "conductor.workflow-status-listener.type",
havingValue = "stub",
matchIfMissing = true)
@Bean
public WorkflowStatusListener workflowStatusListener() {
return new WorkflowStatusListenerStub();
}
@ConditionalOnProperty(
name = "conductor.task-status-listener.type",
havingValue = "stub",
matchIfMissing = true)
@Bean
public TaskStatusListener taskStatusListener() {
return new TaskStatusListenerStub();
}
@Bean
public ExecutorService executorService(ConductorProperties conductorProperties) {
ThreadFactory threadFactory =
new BasicThreadFactory.Builder()
.namingPattern("conductor-worker-%d")
.daemon(true)
.build();
return Executors.newFixedThreadPool(
conductorProperties.getExecutorServiceMaxThreadCount(), threadFactory);
}
@Bean
@Qualifier("taskMappersByTaskType")
public Map<String, TaskMapper> getTaskMappers(List<TaskMapper> taskMappers) {
return taskMappers.stream().collect(Collectors.toMap(TaskMapper::getTaskType, identity()));
}
@Bean
@Qualifier(ASYNC_SYSTEM_TASKS_QUALIFIER)
public Set<WorkflowSystemTask> asyncSystemTasks(Set<WorkflowSystemTask> allSystemTasks) {
return allSystemTasks.stream()
.filter(WorkflowSystemTask::isAsync)
.collect(Collectors.toUnmodifiableSet());
}
@Bean
@Qualifier(EVENT_QUEUE_PROVIDERS_QUALIFIER)
public Map<String, EventQueueProvider> getEventQueueProviders(
List<EventQueueProvider> eventQueueProviders) {
return eventQueueProviders.stream()
.collect(Collectors.toMap(EventQueueProvider::getQueueType, identity()));
}
@Bean
public RetryTemplate onTransientErrorRetryTemplate() {
return RetryTemplate.builder()
.retryOn(TransientException.class)
.maxAttempts(3)
.noBackoff()
.build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/listener/TaskStatusListener.java | core/src/main/java/com/netflix/conductor/core/listener/TaskStatusListener.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.listener;
import com.netflix.conductor.model.TaskModel;
/**
* Listener for the Task status change. All methods have default implementation so that
* Implementation can choose to override a subset of interested Task statuses.
*/
public interface TaskStatusListener {
default void onTaskScheduled(TaskModel task) {}
default void onTaskInProgress(TaskModel task) {}
default void onTaskCanceled(TaskModel task) {}
default void onTaskFailed(TaskModel task) {}
default void onTaskFailedWithTerminalError(TaskModel task) {}
default void onTaskCompleted(TaskModel task) {}
default void onTaskCompletedWithErrors(TaskModel task) {}
default void onTaskTimedOut(TaskModel task) {}
default void onTaskSkipped(TaskModel task) {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/listener/WorkflowStatusListener.java | core/src/main/java/com/netflix/conductor/core/listener/WorkflowStatusListener.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.listener;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.annotation.JsonValue;
/** Listener for the completed and terminated workflows */
public interface WorkflowStatusListener {
enum WorkflowEventType {
STARTED,
RERAN,
RETRIED,
PAUSED,
RESUMED,
RESTARTED,
COMPLETED,
TERMINATED,
FINALIZED;
@JsonValue // Ensures correct JSON serialization
@Override
public String toString() {
return name().toLowerCase(); // Convert to lowercase for consistency
}
}
default void onWorkflowCompletedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowCompleted(workflow);
}
}
default void onWorkflowTerminatedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowTerminated(workflow);
}
}
default void onWorkflowFinalizedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowFinalized(workflow);
}
}
default void onWorkflowStartedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowStarted(workflow);
}
}
default void onWorkflowRestartedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowRestarted(workflow);
}
}
default void onWorkflowRerunIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowRerun(workflow);
}
}
default void onWorkflowRetriedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowRetried(workflow);
}
}
default void onWorkflowPausedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowPaused(workflow);
}
}
default void onWorkflowResumedIfEnabled(WorkflowModel workflow) {
if (workflow.getWorkflowDefinition().isWorkflowStatusListenerEnabled()) {
onWorkflowResumed(workflow);
}
}
void onWorkflowCompleted(WorkflowModel workflow);
void onWorkflowTerminated(WorkflowModel workflow);
default void onWorkflowFinalized(WorkflowModel workflow) {}
default void onWorkflowStarted(WorkflowModel workflow) {}
default void onWorkflowRestarted(WorkflowModel workflow) {}
default void onWorkflowRerun(WorkflowModel workflow) {}
default void onWorkflowPaused(WorkflowModel workflow) {}
default void onWorkflowResumed(WorkflowModel workflow) {}
default void onWorkflowRetried(WorkflowModel workflow) {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/listener/TaskStatusListenerStub.java | core/src/main/java/com/netflix/conductor/core/listener/TaskStatusListenerStub.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.listener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.model.TaskModel;
/** Stub listener default implementation */
public class TaskStatusListenerStub implements TaskStatusListener {
private static final Logger LOGGER = LoggerFactory.getLogger(TaskStatusListenerStub.class);
@Override
public void onTaskScheduled(TaskModel task) {
LOGGER.debug("Task {} is scheduled", task.getTaskId());
}
@Override
public void onTaskCanceled(TaskModel task) {
LOGGER.debug("Task {} is canceled", task.getTaskId());
}
@Override
public void onTaskCompleted(TaskModel task) {
LOGGER.debug("Task {} is completed", task.getTaskId());
}
@Override
public void onTaskCompletedWithErrors(TaskModel task) {
LOGGER.debug("Task {} is completed with errors", task.getTaskId());
}
@Override
public void onTaskFailed(TaskModel task) {
LOGGER.debug("Task {} is failed", task.getTaskId());
}
@Override
public void onTaskFailedWithTerminalError(TaskModel task) {
LOGGER.debug("Task {} is failed with terminal error", task.getTaskId());
}
@Override
public void onTaskInProgress(TaskModel task) {
LOGGER.debug("Task {} is in-progress", task.getTaskId());
}
@Override
public void onTaskSkipped(TaskModel task) {
LOGGER.debug("Task {} is skipped", task.getTaskId());
}
@Override
public void onTaskTimedOut(TaskModel task) {
LOGGER.debug("Task {} is timed out", task.getTaskId());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/listener/WorkflowStatusListenerStub.java | core/src/main/java/com/netflix/conductor/core/listener/WorkflowStatusListenerStub.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.listener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.model.WorkflowModel;
/** Stub listener default implementation */
public class WorkflowStatusListenerStub implements WorkflowStatusListener {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowStatusListenerStub.class);
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
LOGGER.debug("Workflow {} is completed", workflow.getWorkflowId());
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
LOGGER.debug("Workflow {} is terminated", workflow.getWorkflowId());
}
@Override
public void onWorkflowFinalized(WorkflowModel workflow) {
LOGGER.debug("Workflow {} is finalized", workflow.getWorkflowId());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/sync/Lock.java | core/src/main/java/com/netflix/conductor/core/sync/Lock.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.sync;
import java.util.concurrent.TimeUnit;
/**
* Interface implemented by a distributed lock client.
*
* <p>A typical usage:
*
* <pre>
* if (acquireLock(workflowId, 5, TimeUnit.MILLISECONDS)) {
* [load and execute workflow....]
* ExecutionDAO.updateWorkflow(workflow); //use optimistic locking
* } finally {
* releaseLock(workflowId)
* }
* </pre>
*/
public interface Lock {
/**
* Acquires a re-entrant lock on lockId, blocks indefinitely on lockId until it succeeds
*
* @param lockId resource to lock on
*/
void acquireLock(String lockId);
/**
* Acquires a re-entrant lock on lockId, blocks for timeToTry duration before giving up
*
* @param lockId resource to lock on
* @param timeToTry blocks up to timeToTry duration in attempt to acquire the lock
* @param unit time unit
* @return true, if successfully acquired
*/
boolean acquireLock(String lockId, long timeToTry, TimeUnit unit);
/**
* Acquires a re-entrant lock on lockId with provided leaseTime duration. Blocks for timeToTry
* duration before giving up
*
* @param lockId resource to lock on
* @param timeToTry blocks up to timeToTry duration in attempt to acquire the lock
* @param leaseTime Lock lease expiration duration.
* @param unit time unit
* @return true, if successfully acquired
*/
boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit);
/**
* Release a previously acquired lock
*
* @param lockId resource to lock on
*/
void releaseLock(String lockId);
/**
* Explicitly cleanup lock resources, if releasing it wouldn't do so.
*
* @param lockId resource to lock on
*/
void deleteLock(String lockId);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/sync/noop/NoopLock.java | core/src/main/java/com/netflix/conductor/core/sync/noop/NoopLock.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.sync.noop;
import java.util.concurrent.TimeUnit;
import com.netflix.conductor.core.sync.Lock;
public class NoopLock implements Lock {
@Override
public void acquireLock(String lockId) {}
@Override
public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
return true;
}
@Override
public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
return true;
}
@Override
public void releaseLock(String lockId) {}
@Override
public void deleteLock(String lockId) {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLock.java | core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLock.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.sync.local;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.core.sync.Lock;
import com.github.benmanes.caffeine.cache.CacheLoader;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
public class LocalOnlyLock implements Lock {
private static final Logger LOGGER = LoggerFactory.getLogger(LocalOnlyLock.class);
private static final CacheLoader<String, ReentrantLock> LOADER = key -> new ReentrantLock(true);
private static final ConcurrentHashMap<String, ScheduledFuture<?>> SCHEDULEDFUTURES =
new ConcurrentHashMap<>();
private static final LoadingCache<String, ReentrantLock> LOCKIDTOSEMAPHOREMAP =
Caffeine.newBuilder().build(LOADER);
private static final ThreadGroup THREAD_GROUP = new ThreadGroup("LocalOnlyLock-scheduler");
private static final ThreadFactory THREAD_FACTORY =
runnable -> new Thread(THREAD_GROUP, runnable);
private static final ScheduledExecutorService SCHEDULER =
Executors.newScheduledThreadPool(1, THREAD_FACTORY);
@Override
public void acquireLock(String lockId) {
LOGGER.trace("Locking {}", lockId);
LOCKIDTOSEMAPHOREMAP.get(lockId).lock();
}
@Override
public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
try {
LOGGER.trace("Locking {} with timeout {} {}", lockId, timeToTry, unit);
return LOCKIDTOSEMAPHOREMAP.get(lockId).tryLock(timeToTry, unit);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
@Override
public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
LOGGER.trace(
"Locking {} with timeout {} {} for {} {}",
lockId,
timeToTry,
unit,
leaseTime,
unit);
if (acquireLock(lockId, timeToTry, unit)) {
LOGGER.trace("Releasing {} automatically after {} {}", lockId, leaseTime, unit);
SCHEDULEDFUTURES.put(
lockId, SCHEDULER.schedule(() -> deleteLock(lockId), leaseTime, unit));
return true;
}
return false;
}
private void removeLeaseExpirationJob(String lockId) {
ScheduledFuture<?> schedFuture = SCHEDULEDFUTURES.get(lockId);
if (schedFuture != null && schedFuture.cancel(false)) {
SCHEDULEDFUTURES.remove(lockId);
LOGGER.trace("lockId {} removed from lease expiration job", lockId);
}
}
@Override
public void releaseLock(String lockId) {
// Synchronized to prevent race condition between semaphore check and actual release
synchronized (LOCKIDTOSEMAPHOREMAP) {
if (LOCKIDTOSEMAPHOREMAP.getIfPresent(lockId) == null) {
return;
}
LOGGER.trace("Releasing {}", lockId);
try {
LOCKIDTOSEMAPHOREMAP.get(lockId).unlock();
} catch (IllegalMonitorStateException e) {
// Releasing a lock without holding it can cause this exception, which can be
// ignored.
// This matches the behavior of RedisLock implementation.
}
removeLeaseExpirationJob(lockId);
}
}
@Override
public void deleteLock(String lockId) {
LOGGER.trace("Deleting {}", lockId);
LOCKIDTOSEMAPHOREMAP.invalidate(lockId);
}
@VisibleForTesting
LoadingCache<String, ReentrantLock> cache() {
return LOCKIDTOSEMAPHOREMAP;
}
@VisibleForTesting
ConcurrentHashMap<String, ScheduledFuture<?>> scheduledFutures() {
return SCHEDULEDFUTURES;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLockConfiguration.java | core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLockConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.sync.local;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.sync.Lock;
@Configuration
@ConditionalOnProperty(name = "conductor.workflow-execution-lock.type", havingValue = "local_only")
public class LocalOnlyLockConfiguration {
@Bean
public Lock provideLock() {
return new LocalOnlyLock();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/server/src/test/java/com/netflix/conductor/common/config/ConductorObjectMapperTest.java | server/src/test/java/com/netflix/conductor/common/config/ConductorObjectMapperTest.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.common.config;
import java.io.IOException;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.run.Workflow;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.protobuf.Any;
import com.google.protobuf.Struct;
import com.google.protobuf.Value;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* Tests the customized {@link ObjectMapper} that is used by {@link com.netflix.conductor.Conductor}
* application.
*/
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@RunWith(SpringRunner.class)
@TestPropertySource(properties = "conductor.queue.type=")
public class ConductorObjectMapperTest {
@Autowired ObjectMapper objectMapper;
@Test
public void testSimpleMapping() throws IOException {
assertTrue(objectMapper.canSerialize(Any.class));
Struct struct1 =
Struct.newBuilder()
.putFields(
"some-key", Value.newBuilder().setStringValue("some-value").build())
.build();
Any source = Any.pack(struct1);
StringWriter buf = new StringWriter();
objectMapper.writer().writeValue(buf, source);
Any dest = objectMapper.reader().forType(Any.class).readValue(buf.toString());
assertEquals(source.getTypeUrl(), dest.getTypeUrl());
Struct struct2 = dest.unpack(Struct.class);
assertTrue(struct2.containsFields("some-key"));
assertEquals(
struct1.getFieldsOrThrow("some-key").getStringValue(),
struct2.getFieldsOrThrow("some-key").getStringValue());
}
@Test
public void testNullOnWrite() throws JsonProcessingException {
Map<String, Object> data = new HashMap<>();
data.put("someKey", null);
data.put("someId", "abc123");
String result = objectMapper.writeValueAsString(data);
assertTrue(result.contains("null"));
}
@Test
public void testWorkflowSerDe() throws IOException {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testDef");
workflowDef.setVersion(2);
Workflow workflow = new Workflow();
workflow.setWorkflowDefinition(workflowDef);
workflow.setWorkflowId("test-workflow-id");
workflow.setStatus(Workflow.WorkflowStatus.RUNNING);
workflow.setStartTime(10L);
workflow.setInput(null);
Map<String, Object> data = new HashMap<>();
data.put("someKey", null);
data.put("someId", "abc123");
workflow.setOutput(data);
String workflowPayload = objectMapper.writeValueAsString(workflow);
Workflow workflow1 = objectMapper.readValue(workflowPayload, Workflow.class);
assertTrue(workflow1.getOutput().containsKey("someKey"));
assertNull(workflow1.getOutput().get("someKey"));
assertNotNull(workflow1.getInput());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/server/src/main/java/com/netflix/conductor/Conductor.java | server/src/main/java/com/netflix/conductor/Conductor.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.core.io.FileSystemResource;
// Prevents from the datasource beans to be loaded, AS they are needed only for specific databases.
// In case that SQL database is selected this class will be imported back in the appropriate
// database persistence module.
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
@ComponentScan(basePackages = {"com.netflix.conductor", "io.orkes.conductor"})
public class Conductor {
private static final Logger log = LoggerFactory.getLogger(Conductor.class);
public static void main(String[] args) throws IOException {
loadExternalConfig();
SpringApplication.run(Conductor.class, args);
}
/**
* Reads properties from the location specified in <code>CONDUCTOR_CONFIG_FILE</code> and sets
* them as system properties so they override the default properties.
*
* <p>Spring Boot property hierarchy is documented here,
* https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config
*
* @throws IOException if file can't be read.
*/
private static void loadExternalConfig() throws IOException {
String configFile = System.getProperty("CONDUCTOR_CONFIG_FILE");
if (StringUtils.isBlank(configFile)) {
configFile = System.getenv("CONDUCTOR_CONFIG_FILE");
}
if (StringUtils.isNotBlank(configFile)) {
log.info("Loading {}", configFile);
FileSystemResource resource = new FileSystemResource(configFile);
if (resource.exists()) {
Properties properties = new Properties();
properties.load(resource.getInputStream());
properties.forEach(
(key, value) -> System.setProperty((String) key, (String) value));
log.info("Loaded {} properties from {}", properties.size(), configFile);
} else {
log.warn("Ignoring {} since it does not exist", configFile);
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/MetadataResourceTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/MetadataResourceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.service.MetadataService;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class MetadataResourceTest {
private MetadataResource metadataResource;
private MetadataService mockMetadataService;
@Before
public void before() {
this.mockMetadataService = mock(MetadataService.class);
this.metadataResource = new MetadataResource(this.mockMetadataService);
}
@Test
public void testCreateWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
metadataResource.create(workflowDef);
verify(mockMetadataService, times(1)).registerWorkflowDef(any(WorkflowDef.class));
}
@Test
public void testValidateWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
metadataResource.validate(workflowDef);
verify(mockMetadataService, times(1)).validateWorkflowDef(any(WorkflowDef.class));
}
@Test
public void testUpdateWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
List<WorkflowDef> listOfWorkflowDef = new ArrayList<>();
listOfWorkflowDef.add(workflowDef);
metadataResource.update(listOfWorkflowDef);
verify(mockMetadataService, times(1)).updateWorkflowDef(anyList());
}
@Test
public void testGetWorkflowDef() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("test");
workflowDef.setVersion(1);
workflowDef.setDescription("test");
when(mockMetadataService.getWorkflowDef(anyString(), any())).thenReturn(workflowDef);
assertEquals(workflowDef, metadataResource.get("test", 1));
}
@Test
public void testGetAllWorkflowDef() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("test");
workflowDef.setVersion(1);
workflowDef.setDescription("test");
List<WorkflowDef> listOfWorkflowDef = new ArrayList<>();
listOfWorkflowDef.add(workflowDef);
when(mockMetadataService.getWorkflowDefs()).thenReturn(listOfWorkflowDef);
assertEquals(listOfWorkflowDef, metadataResource.getAll());
}
@Test
public void testGetAllWorkflowDefLatestVersions() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("test");
workflowDef.setVersion(1);
workflowDef.setDescription("test");
List<WorkflowDef> listOfWorkflowDef = new ArrayList<>();
listOfWorkflowDef.add(workflowDef);
when(mockMetadataService.getWorkflowDefsLatestVersions()).thenReturn(listOfWorkflowDef);
assertEquals(listOfWorkflowDef, metadataResource.getAllWorkflowsWithLatestVersions());
}
@Test
public void testUnregisterWorkflowDef() throws Exception {
metadataResource.unregisterWorkflowDef("test", 1);
verify(mockMetadataService, times(1)).unregisterWorkflowDef(anyString(), any());
}
@Test
public void testRegisterListOfTaskDef() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setDescription("desc");
List<TaskDef> listOfTaskDefs = new ArrayList<>();
listOfTaskDefs.add(taskDef);
metadataResource.registerTaskDef(listOfTaskDefs);
verify(mockMetadataService, times(1)).registerTaskDef(listOfTaskDefs);
}
@Test
public void testRegisterTaskDef() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setDescription("desc");
metadataResource.registerTaskDef(taskDef);
verify(mockMetadataService, times(1)).updateTaskDef(taskDef);
}
@Test
public void testGetAllTaskDefs() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setDescription("desc");
List<TaskDef> listOfTaskDefs = new ArrayList<>();
listOfTaskDefs.add(taskDef);
when(mockMetadataService.getTaskDefs()).thenReturn(listOfTaskDefs);
assertEquals(listOfTaskDefs, metadataResource.getTaskDefs());
}
@Test
public void testGetTaskDef() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test");
taskDef.setDescription("desc");
when(mockMetadataService.getTaskDef(anyString())).thenReturn(taskDef);
assertEquals(taskDef, metadataResource.getTaskDef("test"));
}
@Test
public void testUnregisterTaskDef() {
metadataResource.unregisterTaskDef("test");
verify(mockMetadataService, times(1)).unregisterTaskDef(anyString());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/TaskResourceTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/TaskResourceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.springframework.http.ResponseEntity;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.service.TaskService;
import com.netflix.conductor.service.WorkflowService;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TaskResourceTest {
private TaskService mockTaskService;
private TaskResource taskResource;
private WorkflowService workflowService;
@Before
public void before() {
this.mockTaskService = mock(TaskService.class);
this.workflowService = mock(WorkflowService.class);
this.taskResource = new TaskResource(this.mockTaskService, this.workflowService);
}
@Test
public void testPoll() {
Task task = new Task();
task.setTaskType("SIMPLE");
task.setWorkerId("123");
task.setDomain("test");
when(mockTaskService.poll(anyString(), anyString(), anyString())).thenReturn(task);
assertEquals(ResponseEntity.ok(task), taskResource.poll("SIMPLE", "123", "test"));
}
@Test
public void testBatchPoll() {
Task task = new Task();
task.setTaskType("SIMPLE");
task.setWorkerId("123");
task.setDomain("test");
List<Task> listOfTasks = new ArrayList<>();
listOfTasks.add(task);
when(mockTaskService.batchPoll(anyString(), anyString(), anyString(), anyInt(), anyInt()))
.thenReturn(listOfTasks);
assertEquals(
ResponseEntity.ok(listOfTasks),
taskResource.batchPoll("SIMPLE", "123", "test", 1, 100));
}
@Test
public void testUpdateTask() {
TaskResult taskResult = new TaskResult();
taskResult.setStatus(TaskResult.Status.COMPLETED);
taskResult.setTaskId("123");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("123");
when(mockTaskService.updateTask(any(TaskResult.class))).thenReturn(taskModel);
assertEquals("123", taskResource.updateTask(taskResult));
}
@Test
public void testLog() {
taskResource.log("123", "test log");
verify(mockTaskService, times(1)).log(anyString(), anyString());
}
@Test
public void testGetTaskLogs() {
List<TaskExecLog> listOfLogs = new ArrayList<>();
listOfLogs.add(new TaskExecLog("test log"));
when(mockTaskService.getTaskLogs(anyString())).thenReturn(listOfLogs);
assertEquals(ResponseEntity.ok(listOfLogs), taskResource.getTaskLogs("123"));
}
@Test
public void testGetTask() {
Task task = new Task();
task.setTaskType("SIMPLE");
task.setWorkerId("123");
task.setDomain("test");
task.setStatus(Task.Status.IN_PROGRESS);
when(mockTaskService.getTask(anyString())).thenReturn(task);
ResponseEntity<Task> entity = taskResource.getTask("123");
assertNotNull(entity);
assertEquals(task, entity.getBody());
}
@Test
public void testSize() {
Map<String, Integer> map = new HashMap<>();
map.put("test1", 1);
map.put("test2", 2);
List<String> list = new ArrayList<>();
list.add("test1");
list.add("test2");
when(mockTaskService.getTaskQueueSizes(anyList())).thenReturn(map);
assertEquals(map, taskResource.size(list));
}
@Test
public void testAllVerbose() {
Map<String, Long> map = new HashMap<>();
map.put("queue1", 1L);
map.put("queue2", 2L);
Map<String, Map<String, Long>> mapOfMap = new HashMap<>();
mapOfMap.put("queue", map);
Map<String, Map<String, Map<String, Long>>> queueSizeMap = new HashMap<>();
queueSizeMap.put("queue", mapOfMap);
when(mockTaskService.allVerbose()).thenReturn(queueSizeMap);
assertEquals(queueSizeMap, taskResource.allVerbose());
}
@Test
public void testQueueDetails() {
Map<String, Long> map = new HashMap<>();
map.put("queue1", 1L);
map.put("queue2", 2L);
when(mockTaskService.getAllQueueDetails()).thenReturn(map);
assertEquals(map, taskResource.all());
}
@Test
public void testGetPollData() {
PollData pollData = new PollData("queue", "test", "w123", 100);
List<PollData> listOfPollData = new ArrayList<>();
listOfPollData.add(pollData);
when(mockTaskService.getPollData(anyString())).thenReturn(listOfPollData);
assertEquals(listOfPollData, taskResource.getPollData("w123"));
}
@Test
public void testGetAllPollData() {
PollData pollData = new PollData("queue", "test", "w123", 100);
List<PollData> listOfPollData = new ArrayList<>();
listOfPollData.add(pollData);
when(mockTaskService.getAllPollData()).thenReturn(listOfPollData);
assertEquals(listOfPollData, taskResource.getAllPollData());
}
@Test
public void testRequeueTaskType() {
when(mockTaskService.requeuePendingTask(anyString())).thenReturn("1");
assertEquals("1", taskResource.requeuePendingTask("SIMPLE"));
}
@Test
public void testSearch() {
Task task = new Task();
task.setTaskType("SIMPLE");
task.setWorkerId("123");
task.setDomain("test");
task.setStatus(Task.Status.IN_PROGRESS);
TaskSummary taskSummary = new TaskSummary(task);
List<TaskSummary> listOfTaskSummary = Collections.singletonList(taskSummary);
SearchResult<TaskSummary> searchResult = new SearchResult<>(100, listOfTaskSummary);
when(mockTaskService.search(0, 100, "asc", "*", "*")).thenReturn(searchResult);
assertEquals(searchResult, taskResource.search(0, 100, "asc", "*", "*"));
}
@Test
public void testSearchV2() {
Task task = new Task();
task.setTaskType("SIMPLE");
task.setWorkerId("123");
task.setDomain("test");
task.setStatus(Task.Status.IN_PROGRESS);
List<Task> listOfTasks = Collections.singletonList(task);
SearchResult<Task> searchResult = new SearchResult<>(100, listOfTasks);
when(mockTaskService.searchV2(0, 100, "asc", "*", "*")).thenReturn(searchResult);
assertEquals(searchResult, taskResource.searchV2(0, 100, "asc", "*", "*"));
}
@Test
public void testGetExternalStorageLocation() {
ExternalStorageLocation externalStorageLocation = mock(ExternalStorageLocation.class);
when(mockTaskService.getExternalStorageLocation("path", "operation", "payloadType"))
.thenReturn(externalStorageLocation);
assertEquals(
externalStorageLocation,
taskResource.getExternalStorageLocation("path", "operation", "payloadType"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/WorkflowResourceTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/WorkflowResourceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.service.WorkflowService;
import com.netflix.conductor.service.WorkflowTestService;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyMap;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class WorkflowResourceTest {
@Mock private WorkflowService mockWorkflowService;
@Mock private WorkflowTestService mockWorkflowTestService;
private WorkflowResource workflowResource;
@Before
public void before() {
this.mockWorkflowService = mock(WorkflowService.class);
this.mockWorkflowTestService = mock(WorkflowTestService.class);
this.workflowResource =
new WorkflowResource(this.mockWorkflowService, this.mockWorkflowTestService);
}
@Test
public void testStartWorkflow() {
StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest();
startWorkflowRequest.setName("w123");
Map<String, Object> input = new HashMap<>();
input.put("1", "abc");
startWorkflowRequest.setInput(input);
String workflowID = "w112";
when(mockWorkflowService.startWorkflow(any(StartWorkflowRequest.class)))
.thenReturn(workflowID);
assertEquals("w112", workflowResource.startWorkflow(startWorkflowRequest));
}
@Test
public void testStartWorkflowParam() {
Map<String, Object> input = new HashMap<>();
input.put("1", "abc");
String workflowID = "w112";
when(mockWorkflowService.startWorkflow(
anyString(), anyInt(), anyString(), anyInt(), anyMap()))
.thenReturn(workflowID);
assertEquals("w112", workflowResource.startWorkflow("test1", 1, "c123", 0, input));
}
@Test
public void getWorkflows() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("123");
ArrayList<Workflow> listOfWorkflows =
new ArrayList<>() {
{
add(workflow);
}
};
when(mockWorkflowService.getWorkflows(anyString(), anyString(), anyBoolean(), anyBoolean()))
.thenReturn(listOfWorkflows);
assertEquals(listOfWorkflows, workflowResource.getWorkflows("test1", "123", true, true));
}
@Test
public void testGetWorklfowsMultipleCorrelationId() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
List<Workflow> workflowArrayList =
new ArrayList<>() {
{
add(workflow);
}
};
List<String> correlationIdList =
new ArrayList<>() {
{
add("c123");
}
};
Map<String, List<Workflow>> workflowMap = new HashMap<>();
workflowMap.put("c123", workflowArrayList);
when(mockWorkflowService.getWorkflows(anyString(), anyBoolean(), anyBoolean(), anyList()))
.thenReturn(workflowMap);
assertEquals(
workflowMap, workflowResource.getWorkflows("test", true, true, correlationIdList));
}
@Test
public void testGetExecutionStatus() {
Workflow workflow = new Workflow();
workflow.setCorrelationId("c123");
when(mockWorkflowService.getExecutionStatus(anyString(), anyBoolean()))
.thenReturn(workflow);
assertEquals(workflow, workflowResource.getExecutionStatus("w123", true));
}
@Test
public void testDelete() {
workflowResource.delete("w123", true);
verify(mockWorkflowService, times(1)).deleteWorkflow(anyString(), anyBoolean());
}
@Test
public void testGetRunningWorkflow() {
List<String> listOfWorklfows =
new ArrayList<>() {
{
add("w123");
}
};
when(mockWorkflowService.getRunningWorkflows(anyString(), anyInt(), anyLong(), anyLong()))
.thenReturn(listOfWorklfows);
assertEquals(listOfWorklfows, workflowResource.getRunningWorkflow("w123", 1, 12L, 13L));
}
@Test
public void testDecide() {
workflowResource.decide("w123");
verify(mockWorkflowService, times(1)).decideWorkflow(anyString());
}
@Test
public void testPauseWorkflow() {
workflowResource.pauseWorkflow("w123");
verify(mockWorkflowService, times(1)).pauseWorkflow(anyString());
}
@Test
public void testResumeWorkflow() {
workflowResource.resumeWorkflow("test");
verify(mockWorkflowService, times(1)).resumeWorkflow(anyString());
}
@Test
public void testSkipTaskFromWorkflow() {
workflowResource.skipTaskFromWorkflow("test", "testTask", null);
verify(mockWorkflowService, times(1))
.skipTaskFromWorkflow(anyString(), anyString(), isNull());
}
@Test
public void testRerun() {
RerunWorkflowRequest request = new RerunWorkflowRequest();
workflowResource.rerun("test", request);
verify(mockWorkflowService, times(1))
.rerunWorkflow(anyString(), any(RerunWorkflowRequest.class));
}
@Test
public void restart() {
workflowResource.restart("w123", false);
verify(mockWorkflowService, times(1)).restartWorkflow(anyString(), anyBoolean());
}
@Test
public void testRetry() {
workflowResource.retry("w123", false);
verify(mockWorkflowService, times(1)).retryWorkflow(anyString(), anyBoolean());
}
@Test
public void testResetWorkflow() {
workflowResource.resetWorkflow("w123");
verify(mockWorkflowService, times(1)).resetWorkflow(anyString());
}
@Test
public void testTerminate() {
workflowResource.terminate("w123", "test");
verify(mockWorkflowService, times(1)).terminateWorkflow(anyString(), anyString());
}
@Test
public void testTerminateRemove() {
workflowResource.terminateRemove("w123", "test", false);
verify(mockWorkflowService, times(1))
.terminateRemove(anyString(), anyString(), anyBoolean());
}
@Test
public void testSearch() {
workflowResource.search(0, 100, "asc", "*", "*");
verify(mockWorkflowService, times(1))
.searchWorkflows(anyInt(), anyInt(), anyString(), anyString(), anyString());
}
@Test
public void testSearchV2() {
workflowResource.searchV2(0, 100, "asc", "*", "*");
verify(mockWorkflowService).searchWorkflowsV2(0, 100, "asc", "*", "*");
}
@Test
public void testSearchWorkflowsByTasks() {
workflowResource.searchWorkflowsByTasks(0, 100, "asc", "*", "*");
verify(mockWorkflowService, times(1))
.searchWorkflowsByTasks(anyInt(), anyInt(), anyString(), anyString(), anyString());
}
@Test
public void testSearchWorkflowsByTasksV2() {
workflowResource.searchWorkflowsByTasksV2(0, 100, "asc", "*", "*");
verify(mockWorkflowService).searchWorkflowsByTasksV2(0, 100, "asc", "*", "*");
}
@Test
public void testGetExternalStorageLocation() {
workflowResource.getExternalStorageLocation("path", "operation", "payloadType");
verify(mockWorkflowService).getExternalStorageLocation("path", "operation", "payloadType");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/ApplicationExceptionMapperTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/ApplicationExceptionMapperTest.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.Collections;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.request.MockMvcRequestBuilders;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import com.netflix.conductor.model.TaskModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.mockito.Mockito.*;
import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
public class ApplicationExceptionMapperTest {
private QueueAdminResource queueAdminResource;
private MockMvc mockMvc;
private static MockedStatic<LoggerFactory> mockLoggerFactory;
private static final Logger logger = mock(Logger.class);
@Before
public void before() {
mockLoggerFactory = Mockito.mockStatic(LoggerFactory.class);
when(LoggerFactory.getLogger(ApplicationExceptionMapper.class)).thenReturn(logger);
this.queueAdminResource = mock(QueueAdminResource.class);
this.mockMvc =
MockMvcBuilders.standaloneSetup(this.queueAdminResource)
.setControllerAdvice(new ApplicationExceptionMapper())
.build();
}
@After
public void after() {
mockLoggerFactory.close();
}
@Test
public void testException() throws Exception {
var exception = new Exception();
// pick a method that raises a generic exception
doThrow(exception).when(this.queueAdminResource).update(any(), any(), any(), any());
// verify we do send an error response
this.mockMvc
.perform(
MockMvcRequestBuilders.post(
"/api/queue/update/workflowId/taskRefName/{status}",
TaskModel.Status.SKIPPED)
.contentType(MediaType.APPLICATION_JSON)
.content(
new ObjectMapper()
.writeValueAsString(Collections.emptyMap())))
.andDo(print())
.andExpect(status().is5xxServerError());
// verify the error was logged
verify(logger)
.error(
"Error {} url: '{}'",
"Exception",
"/api/queue/update/workflowId/taskRefName/SKIPPED",
exception);
verifyNoMoreInteractions(logger);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/AdminResourceTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/AdminResourceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.service.AdminService;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class AdminResourceTest {
@Mock private AdminService mockAdminService;
@Mock private AdminResource adminResource;
@Before
public void before() {
this.mockAdminService = mock(AdminService.class);
this.adminResource = new AdminResource(mockAdminService);
}
@Test
public void testGetAllConfig() {
Map<String, Object> configs = new HashMap<>();
configs.put("config1", "test");
when(mockAdminService.getAllConfig()).thenReturn(configs);
assertEquals(configs, adminResource.getAllConfig());
}
@Test
public void testView() {
Task task = new Task();
task.setReferenceTaskName("test");
List<Task> listOfTask = new ArrayList<>();
listOfTask.add(task);
when(mockAdminService.getListOfPendingTask(anyString(), anyInt(), anyInt()))
.thenReturn(listOfTask);
assertEquals(listOfTask, adminResource.view("testTask", 0, 100));
}
@Test
public void testRequeueSweep() {
String workflowId = "w123";
when(mockAdminService.requeueSweep(anyString())).thenReturn(workflowId);
assertEquals(workflowId, adminResource.requeueSweep(workflowId));
}
@Test
public void testGetEventQueues() {
adminResource.getEventQueues(false);
verify(mockAdminService, times(1)).getEventQueues(anyBoolean());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/test/java/com/netflix/conductor/rest/controllers/EventResourceTest.java | rest/src/test/java/com/netflix/conductor/rest/controllers/EventResourceTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.service.EventService;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class EventResourceTest {
private EventResource eventResource;
@Mock private EventService mockEventService;
@Before
public void setUp() {
this.mockEventService = mock(EventService.class);
this.eventResource = new EventResource(this.mockEventService);
}
@Test
public void testAddEventHandler() {
EventHandler eventHandler = new EventHandler();
eventResource.addEventHandler(eventHandler);
verify(mockEventService, times(1)).addEventHandler(any(EventHandler.class));
}
@Test
public void testUpdateEventHandler() {
EventHandler eventHandler = new EventHandler();
eventResource.updateEventHandler(eventHandler);
verify(mockEventService, times(1)).updateEventHandler(any(EventHandler.class));
}
@Test
public void testRemoveEventHandlerStatus() {
eventResource.removeEventHandlerStatus("testEvent");
verify(mockEventService, times(1)).removeEventHandlerStatus(anyString());
}
@Test
public void testGetEventHandlersForEvent() {
EventHandler eventHandler = new EventHandler();
eventResource.addEventHandler(eventHandler);
List<EventHandler> listOfEventHandler = new ArrayList<>();
listOfEventHandler.add(eventHandler);
when(mockEventService.getEventHandlersForEvent(anyString(), anyBoolean()))
.thenReturn(listOfEventHandler);
assertEquals(listOfEventHandler, eventResource.getEventHandlersForEvent("testEvent", true));
}
@Test
public void testGetEventHandlers() {
EventHandler eventHandler = new EventHandler();
eventResource.addEventHandler(eventHandler);
List<EventHandler> listOfEventHandler = new ArrayList<>();
listOfEventHandler.add(eventHandler);
when(mockEventService.getEventHandlers()).thenReturn(listOfEventHandler);
assertEquals(listOfEventHandler, eventResource.getEventHandlers());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/ValidationExceptionMapper.java | rest/src/main/java/com/netflix/conductor/rest/controllers/ValidationExceptionMapper.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.RestControllerAdvice;
import com.netflix.conductor.common.validation.ErrorResponse;
import com.netflix.conductor.common.validation.ValidationError;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.metrics.Monitors;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.ConstraintViolationException;
import jakarta.validation.ValidationException;
/** This class converts Hibernate {@link ValidationException} into http response. */
@RestControllerAdvice
@Order(ValidationExceptionMapper.ORDER)
public class ValidationExceptionMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationExceptionMapper.class);
public static final int ORDER = Ordered.HIGHEST_PRECEDENCE;
private final String host = Utils.getServerId();
@ExceptionHandler(ValidationException.class)
public ResponseEntity<ErrorResponse> toResponse(
HttpServletRequest request, ValidationException exception) {
logException(request, exception);
HttpStatus httpStatus;
if (exception instanceof ConstraintViolationException) {
httpStatus = HttpStatus.BAD_REQUEST;
} else {
httpStatus = HttpStatus.INTERNAL_SERVER_ERROR;
Monitors.error("error", "error");
}
return new ResponseEntity<>(toErrorResponse(exception), httpStatus);
}
private ErrorResponse toErrorResponse(ValidationException ve) {
if (ve instanceof ConstraintViolationException) {
return constraintViolationExceptionToErrorResponse((ConstraintViolationException) ve);
} else {
ErrorResponse result = new ErrorResponse();
result.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.value());
result.setMessage(ve.getMessage());
result.setInstance(host);
return result;
}
}
private ErrorResponse constraintViolationExceptionToErrorResponse(
ConstraintViolationException exception) {
ErrorResponse errorResponse = new ErrorResponse();
errorResponse.setStatus(HttpStatus.BAD_REQUEST.value());
errorResponse.setMessage("Validation failed, check below errors for detail.");
List<ValidationError> validationErrors = new ArrayList<>();
exception
.getConstraintViolations()
.forEach(
e ->
validationErrors.add(
new ValidationError(
getViolationPath(e),
e.getMessage(),
getViolationInvalidValue(e.getInvalidValue()))));
errorResponse.setValidationErrors(validationErrors);
return errorResponse;
}
private String getViolationPath(final ConstraintViolation<?> violation) {
final String propertyPath = violation.getPropertyPath().toString();
return !"".equals(propertyPath) ? propertyPath : "";
}
private String getViolationInvalidValue(final Object invalidValue) {
if (invalidValue == null) {
return null;
}
if (invalidValue.getClass().isArray()) {
if (invalidValue instanceof Object[]) {
// not helpful to return object array, skip it.
return null;
} else if (invalidValue instanceof boolean[]) {
return Arrays.toString((boolean[]) invalidValue);
} else if (invalidValue instanceof byte[]) {
return Arrays.toString((byte[]) invalidValue);
} else if (invalidValue instanceof char[]) {
return Arrays.toString((char[]) invalidValue);
} else if (invalidValue instanceof double[]) {
return Arrays.toString((double[]) invalidValue);
} else if (invalidValue instanceof float[]) {
return Arrays.toString((float[]) invalidValue);
} else if (invalidValue instanceof int[]) {
return Arrays.toString((int[]) invalidValue);
} else if (invalidValue instanceof long[]) {
return Arrays.toString((long[]) invalidValue);
} else if (invalidValue instanceof short[]) {
return Arrays.toString((short[]) invalidValue);
}
}
// It is only helpful to return invalid value of primitive types
if (invalidValue.getClass().getName().startsWith("java.lang.")) {
return invalidValue.toString();
}
return null;
}
private void logException(HttpServletRequest request, ValidationException exception) {
LOGGER.error(
"Error {} url: '{}'",
exception.getClass().getSimpleName(),
request.getRequestURI(),
exception);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/AdminResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/AdminResource.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import java.util.Map;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.service.AdminService;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.ADMIN;
import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE;
@RestController
@RequestMapping(ADMIN)
public class AdminResource {
private final AdminService adminService;
public AdminResource(AdminService adminService) {
this.adminService = adminService;
}
@Operation(summary = "Get all the configuration parameters")
@GetMapping("/config")
public Map<String, Object> getAllConfig() {
return adminService.getAllConfig();
}
@GetMapping("/task/{tasktype}")
@Operation(summary = "Get the list of pending tasks for a given task type")
public List<Task> view(
@PathVariable("tasktype") String taskType,
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "count", defaultValue = "100", required = false) int count) {
return adminService.getListOfPendingTask(taskType, start, count);
}
@PostMapping(value = "/sweep/requeue/{workflowId}", produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Queue up all the running workflows for sweep")
public String requeueSweep(@PathVariable("workflowId") String workflowId) {
return adminService.requeueSweep(workflowId);
}
@PostMapping(value = "/consistency/verifyAndRepair/{workflowId}", produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Verify and repair workflow consistency")
public String verifyAndRepairWorkflowConsistency(
@PathVariable("workflowId") String workflowId) {
return String.valueOf(adminService.verifyAndRepairWorkflowConsistency(workflowId));
}
@GetMapping("/queues")
@Operation(summary = "Get registered queues")
public Map<String, ?> getEventQueues(
@RequestParam(value = "verbose", defaultValue = "false", required = false)
boolean verbose) {
return adminService.getEventQueues(verbose);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/QueueAdminResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/QueueAdminResource.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.Map;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.core.events.queue.DefaultEventQueueProcessor;
import com.netflix.conductor.model.TaskModel.Status;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.QUEUE;
@RestController
@RequestMapping(QUEUE)
public class QueueAdminResource {
private final DefaultEventQueueProcessor defaultEventQueueProcessor;
public QueueAdminResource(DefaultEventQueueProcessor defaultEventQueueProcessor) {
this.defaultEventQueueProcessor = defaultEventQueueProcessor;
}
@Operation(summary = "Get the queue length")
@GetMapping(value = "/size")
public Map<String, Long> size() {
return defaultEventQueueProcessor.size();
}
@Operation(summary = "Get Queue Names")
@GetMapping(value = "/")
public Map<Status, String> names() {
return defaultEventQueueProcessor.queues();
}
@Operation(summary = "Publish a message in queue to mark a wait task as completed.")
@PostMapping(value = "/update/{workflowId}/{taskRefName}/{status}")
public void update(
@PathVariable("workflowId") String workflowId,
@PathVariable("taskRefName") String taskRefName,
@PathVariable("status") Status status,
@RequestBody Map<String, Object> output)
throws Exception {
defaultEventQueueProcessor.updateByTaskRefName(workflowId, taskRefName, output, status);
}
@Operation(summary = "Publish a message in queue to mark a wait task (by taskId) as completed.")
@PostMapping("/update/{workflowId}/task/{taskId}/{status}")
public void updateByTaskId(
@PathVariable("workflowId") String workflowId,
@PathVariable("taskId") String taskId,
@PathVariable("status") Status status,
@RequestBody Map<String, Object> output)
throws Exception {
defaultEventQueueProcessor.updateByTaskId(workflowId, taskId, output, status);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/HealthCheckResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/HealthCheckResource.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.Collections;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.runtime.health.api.HealthCheckStatus;
@RestController
@RequestMapping("/health")
public class HealthCheckResource {
// SBMTODO: Move this Spring boot health check
@GetMapping
public HealthCheckStatus doCheck() throws Exception {
return HealthCheckStatus.create(true, Collections.emptyList());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/EventResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/EventResource.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.service.EventService;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.EVENT;
@RestController
@RequestMapping(EVENT)
public class EventResource {
private final EventService eventService;
public EventResource(EventService eventService) {
this.eventService = eventService;
}
@PostMapping
@Operation(summary = "Add a new event handler.")
public void addEventHandler(@RequestBody EventHandler eventHandler) {
eventService.addEventHandler(eventHandler);
}
@PutMapping
@Operation(summary = "Update an existing event handler.")
public void updateEventHandler(@RequestBody EventHandler eventHandler) {
eventService.updateEventHandler(eventHandler);
}
@DeleteMapping("/{name}")
@Operation(summary = "Remove an event handler")
public void removeEventHandlerStatus(@PathVariable("name") String name) {
eventService.removeEventHandlerStatus(name);
}
@GetMapping
@Operation(summary = "Get all the event handlers")
public List<EventHandler> getEventHandlers() {
return eventService.getEventHandlers();
}
@GetMapping("/{event}")
@Operation(summary = "Get event handlers for a given event")
public List<EventHandler> getEventHandlersForEvent(
@PathVariable("event") String event,
@RequestParam(value = "activeOnly", defaultValue = "true", required = false)
boolean activeOnly) {
return eventService.getEventHandlersForEvent(event, activeOnly);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/MetadataResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/MetadataResource.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import java.util.Map;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDefSummary;
import com.netflix.conductor.common.model.BulkResponse;
import com.netflix.conductor.service.MetadataService;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.METADATA;
@RestController
@RequestMapping(value = METADATA)
public class MetadataResource {
private final MetadataService metadataService;
public MetadataResource(MetadataService metadataService) {
this.metadataService = metadataService;
}
@PostMapping("/workflow")
@Operation(summary = "Create a new workflow definition")
public void create(@RequestBody WorkflowDef workflowDef) {
metadataService.registerWorkflowDef(workflowDef);
}
@PostMapping("/workflow/validate")
@Operation(summary = "Validates a new workflow definition")
public void validate(@RequestBody WorkflowDef workflowDef) {
metadataService.validateWorkflowDef(workflowDef);
}
@PutMapping("/workflow")
@Operation(summary = "Create or update workflow definition")
public BulkResponse<String> update(@RequestBody List<WorkflowDef> workflowDefs) {
return metadataService.updateWorkflowDef(workflowDefs);
}
@Operation(summary = "Retrieves workflow definition along with blueprint")
@GetMapping("/workflow/{name}")
public WorkflowDef get(
@PathVariable("name") String name,
@RequestParam(value = "version", required = false) Integer version) {
return metadataService.getWorkflowDef(name, version);
}
@Operation(summary = "Retrieves all workflow definition along with blueprint")
@GetMapping("/workflow")
public List<WorkflowDef> getAll() {
return metadataService.getWorkflowDefs();
}
@Operation(summary = "Returns workflow names and versions only (no definition bodies)")
@GetMapping("/workflow/names-and-versions")
public Map<String, ? extends Iterable<WorkflowDefSummary>> getWorkflowNamesAndVersions() {
return metadataService.getWorkflowNamesAndVersions();
}
@Operation(summary = "Returns only the latest version of all workflow definitions")
@GetMapping("/workflow/latest-versions")
public List<WorkflowDef> getAllWorkflowsWithLatestVersions() {
return metadataService.getWorkflowDefsLatestVersions();
}
@DeleteMapping("/workflow/{name}/{version}")
@Operation(
summary =
"Removes workflow definition. It does not remove workflows associated with the definition.")
public void unregisterWorkflowDef(
@PathVariable("name") String name, @PathVariable("version") Integer version) {
metadataService.unregisterWorkflowDef(name, version);
}
@PostMapping("/taskdefs")
@Operation(summary = "Create new task definition(s)")
public void registerTaskDef(@RequestBody List<TaskDef> taskDefs) {
metadataService.registerTaskDef(taskDefs);
}
@PutMapping("/taskdefs")
@Operation(summary = "Update an existing task")
public void registerTaskDef(@RequestBody TaskDef taskDef) {
metadataService.updateTaskDef(taskDef);
}
@GetMapping(value = "/taskdefs")
@Operation(summary = "Gets all task definition")
public List<TaskDef> getTaskDefs() {
return metadataService.getTaskDefs();
}
@GetMapping("/taskdefs/{tasktype}")
@Operation(summary = "Gets the task definition")
public TaskDef getTaskDef(@PathVariable("tasktype") String taskType) {
return metadataService.getTaskDef(taskType);
}
@DeleteMapping("/taskdefs/{tasktype}")
@Operation(summary = "Remove a task definition")
public void unregisterTaskDef(@PathVariable("tasktype") String taskType) {
metadataService.unregisterTaskDef(taskType);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowResource.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import java.util.Map;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.run.*;
import com.netflix.conductor.service.WorkflowService;
import com.netflix.conductor.service.WorkflowTestService;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.WORKFLOW;
import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE;
import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE;
@RestController
@RequestMapping(WORKFLOW)
public class WorkflowResource {
private final WorkflowService workflowService;
private final WorkflowTestService workflowTestService;
public WorkflowResource(
WorkflowService workflowService, WorkflowTestService workflowTestService) {
this.workflowService = workflowService;
this.workflowTestService = workflowTestService;
}
@PostMapping(produces = TEXT_PLAIN_VALUE)
@Operation(
summary =
"Start a new workflow with StartWorkflowRequest, which allows task to be executed in a domain")
public String startWorkflow(@RequestBody StartWorkflowRequest request) {
return workflowService.startWorkflow(request);
}
@PostMapping(value = "/{name}", produces = TEXT_PLAIN_VALUE)
@Operation(
summary =
"Start a new workflow. Returns the ID of the workflow instance that can be later used for tracking")
public String startWorkflow(
@PathVariable("name") String name,
@RequestParam(value = "version", required = false) Integer version,
@RequestParam(value = "correlationId", required = false) String correlationId,
@RequestParam(value = "priority", defaultValue = "0", required = false) int priority,
@RequestBody Map<String, Object> input) {
return workflowService.startWorkflow(name, version, correlationId, priority, input);
}
@GetMapping("/{name}/correlated/{correlationId}")
@Operation(summary = "Lists workflows for the given correlation id")
public List<Workflow> getWorkflows(
@PathVariable("name") String name,
@PathVariable("correlationId") String correlationId,
@RequestParam(value = "includeClosed", defaultValue = "false", required = false)
boolean includeClosed,
@RequestParam(value = "includeTasks", defaultValue = "false", required = false)
boolean includeTasks) {
return workflowService.getWorkflows(name, correlationId, includeClosed, includeTasks);
}
@PostMapping(value = "/{name}/correlated")
@Operation(summary = "Lists workflows for the given correlation id list")
public Map<String, List<Workflow>> getWorkflows(
@PathVariable("name") String name,
@RequestParam(value = "includeClosed", defaultValue = "false", required = false)
boolean includeClosed,
@RequestParam(value = "includeTasks", defaultValue = "false", required = false)
boolean includeTasks,
@RequestBody List<String> correlationIds) {
return workflowService.getWorkflows(name, includeClosed, includeTasks, correlationIds);
}
@GetMapping("/{workflowId}")
@Operation(summary = "Gets the workflow by workflow id")
public Workflow getExecutionStatus(
@PathVariable("workflowId") String workflowId,
@RequestParam(value = "includeTasks", defaultValue = "true", required = false)
boolean includeTasks) {
return workflowService.getExecutionStatus(workflowId, includeTasks);
}
@DeleteMapping("/{workflowId}/remove")
@Operation(summary = "Removes the workflow from the system")
public void delete(
@PathVariable("workflowId") String workflowId,
@RequestParam(value = "archiveWorkflow", defaultValue = "true", required = false)
boolean archiveWorkflow) {
workflowService.deleteWorkflow(workflowId, archiveWorkflow);
}
@GetMapping("/running/{name}")
@Operation(summary = "Retrieve all the running workflows")
public List<String> getRunningWorkflow(
@PathVariable("name") String workflowName,
@RequestParam(value = "version", defaultValue = "1", required = false) int version,
@RequestParam(value = "startTime", required = false) Long startTime,
@RequestParam(value = "endTime", required = false) Long endTime) {
return workflowService.getRunningWorkflows(workflowName, version, startTime, endTime);
}
@PutMapping("/decide/{workflowId}")
@Operation(summary = "Starts the decision task for a workflow")
public void decide(@PathVariable("workflowId") String workflowId) {
workflowService.decideWorkflow(workflowId);
}
@PutMapping("/{workflowId}/pause")
@Operation(summary = "Pauses the workflow")
public void pauseWorkflow(@PathVariable("workflowId") String workflowId) {
workflowService.pauseWorkflow(workflowId);
}
@PutMapping("/{workflowId}/resume")
@Operation(summary = "Resumes the workflow")
public void resumeWorkflow(@PathVariable("workflowId") String workflowId) {
workflowService.resumeWorkflow(workflowId);
}
@PutMapping("/{workflowId}/skiptask/{taskReferenceName}")
@Operation(summary = "Skips a given task from a current running workflow")
public void skipTaskFromWorkflow(
@PathVariable("workflowId") String workflowId,
@PathVariable("taskReferenceName") String taskReferenceName,
SkipTaskRequest skipTaskRequest) {
workflowService.skipTaskFromWorkflow(workflowId, taskReferenceName, skipTaskRequest);
}
@PostMapping(value = "/{workflowId}/rerun", produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Reruns the workflow from a specific task")
public String rerun(
@PathVariable("workflowId") String workflowId,
@RequestBody RerunWorkflowRequest request) {
return workflowService.rerunWorkflow(workflowId, request);
}
@PostMapping("/{workflowId}/restart")
@Operation(summary = "Restarts a completed workflow")
@ResponseStatus(
value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which
// expects a 204 for this request
public void restart(
@PathVariable("workflowId") String workflowId,
@RequestParam(value = "useLatestDefinitions", defaultValue = "false", required = false)
boolean useLatestDefinitions) {
workflowService.restartWorkflow(workflowId, useLatestDefinitions);
}
@PostMapping("/{workflowId}/retry")
@Operation(summary = "Retries the last failed task")
@ResponseStatus(
value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which
// expects a 204 for this request
public void retry(
@PathVariable("workflowId") String workflowId,
@RequestParam(
value = "resumeSubworkflowTasks",
defaultValue = "false",
required = false)
boolean resumeSubworkflowTasks) {
workflowService.retryWorkflow(workflowId, resumeSubworkflowTasks);
}
@PostMapping("/{workflowId}/resetcallbacks")
@Operation(summary = "Resets callback times of all non-terminal SIMPLE tasks to 0")
@ResponseStatus(
value = HttpStatus.NO_CONTENT) // for backwards compatibility with 2.x client which
// expects a 204 for this request
public void resetWorkflow(@PathVariable("workflowId") String workflowId) {
workflowService.resetWorkflow(workflowId);
}
@DeleteMapping("/{workflowId}")
@Operation(summary = "Terminate workflow execution")
public void terminate(
@PathVariable("workflowId") String workflowId,
@RequestParam(value = "reason", required = false) String reason) {
workflowService.terminateWorkflow(workflowId, reason);
}
@DeleteMapping("/{workflowId}/terminate-remove")
@Operation(summary = "Terminate workflow execution and remove the workflow from the system")
public void terminateRemove(
@PathVariable("workflowId") String workflowId,
@RequestParam(value = "reason", required = false) String reason,
@RequestParam(value = "archiveWorkflow", defaultValue = "true", required = false)
boolean archiveWorkflow) {
workflowService.terminateRemove(workflowId, reason, archiveWorkflow);
}
@Operation(
summary = "Search for workflows based on payload and other parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC.")
@GetMapping(value = "/search")
public SearchResult<WorkflowSummary> search(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return workflowService.searchWorkflows(start, size, sort, freeText, query);
}
@Operation(
summary = "Search for workflows based on payload and other parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC.")
@GetMapping(value = "/search-v2")
public SearchResult<Workflow> searchV2(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return workflowService.searchWorkflowsV2(start, size, sort, freeText, query);
}
@Operation(
summary = "Search for workflows based on task parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC")
@GetMapping(value = "/search-by-tasks")
public SearchResult<WorkflowSummary> searchWorkflowsByTasks(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return workflowService.searchWorkflowsByTasks(start, size, sort, freeText, query);
}
@Operation(
summary = "Search for workflows based on task parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC")
@GetMapping(value = "/search-by-tasks-v2")
public SearchResult<Workflow> searchWorkflowsByTasksV2(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return workflowService.searchWorkflowsByTasksV2(start, size, sort, freeText, query);
}
@Operation(
summary =
"Get the uri and path of the external storage where the workflow payload is to be stored")
@GetMapping({"/externalstoragelocation", "external-storage-location"})
public ExternalStorageLocation getExternalStorageLocation(
@RequestParam("path") String path,
@RequestParam("operation") String operation,
@RequestParam("payloadType") String payloadType) {
return workflowService.getExternalStorageLocation(path, operation, payloadType);
}
@PostMapping(value = "test", produces = APPLICATION_JSON_VALUE)
@Operation(summary = "Test workflow execution using mock data")
public Workflow testWorkflow(@RequestBody WorkflowTestRequest request) {
return workflowTestService.testWorkflow(request);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowBulkResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/WorkflowBulkResource.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.model.BulkResponse;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.service.WorkflowBulkService;
import io.swagger.v3.oas.annotations.Operation;
import static com.netflix.conductor.rest.config.RequestMappingConstants.WORKFLOW_BULK;
/** Synchronous Bulk APIs to process the workflows in batches */
@RestController
@RequestMapping(WORKFLOW_BULK)
public class WorkflowBulkResource {
private final WorkflowBulkService workflowBulkService;
public WorkflowBulkResource(WorkflowBulkService workflowBulkService) {
this.workflowBulkService = workflowBulkService;
}
/**
* Pause the list of workflows.
*
* @param workflowIds - list of workflow Ids to perform pause operation on
* @return bulk response object containing a list of succeeded workflows and a list of failed
* ones with errors
*/
@PutMapping("/pause")
@Operation(summary = "Pause the list of workflows")
public BulkResponse<String> pauseWorkflow(@RequestBody List<String> workflowIds) {
return workflowBulkService.pauseWorkflow(workflowIds);
}
/**
* Resume the list of workflows.
*
* @param workflowIds - list of workflow Ids to perform resume operation on
* @return bulk response object containing a list of succeeded workflows and a list of failed
* ones with errors
*/
@PutMapping("/resume")
@Operation(summary = "Resume the list of workflows")
public BulkResponse<String> resumeWorkflow(@RequestBody List<String> workflowIds) {
return workflowBulkService.resumeWorkflow(workflowIds);
}
/**
* Restart the list of workflows.
*
* @param workflowIds - list of workflow Ids to perform restart operation on
* @param useLatestDefinitions if true, use latest workflow and task definitions upon restart
* @return bulk response object containing a list of succeeded workflows and a list of failed
* ones with errors
*/
@PostMapping("/restart")
@Operation(summary = "Restart the list of completed workflow")
public BulkResponse<String> restart(
@RequestBody List<String> workflowIds,
@RequestParam(value = "useLatestDefinitions", defaultValue = "false", required = false)
boolean useLatestDefinitions) {
return workflowBulkService.restart(workflowIds, useLatestDefinitions);
}
/**
* Retry the last failed task for each workflow from the list.
*
* @param workflowIds - list of workflow Ids to perform retry operation on
* @return bulk response object containing a list of succeeded workflows and a list of failed
* ones with errors
*/
@PostMapping("/retry")
@Operation(summary = "Retry the last failed task for each workflow from the list")
public BulkResponse<String> retry(@RequestBody List<String> workflowIds) {
return workflowBulkService.retry(workflowIds);
}
/**
* Terminate workflows execution.
*
* @param workflowIds - list of workflow Ids to perform terminate operation on
* @param reason - description to be specified for the terminated workflow for future
* references.
* @return bulk response object containing a list of succeeded workflows and a list of failed
* ones with errors
*/
@PostMapping("/terminate")
@Operation(summary = "Terminate workflows execution")
public BulkResponse<String> terminate(
@RequestBody List<String> workflowIds,
@RequestParam(value = "reason", required = false) String reason) {
return workflowBulkService.terminate(workflowIds, reason);
}
/**
* Delete the list of workflows.
*
* @param workflowIds - list of workflow Ids to be deleted
* @return bulk reponse object containing a list of successfully deleted workflows
*/
@DeleteMapping("/remove")
public BulkResponse<String> deleteWorkflow(
@RequestBody List<String> workflowIds,
@RequestParam(value = "archiveWorkflow", defaultValue = "true", required = false)
boolean archiveWorkflow) {
return workflowBulkService.deleteWorkflow(workflowIds, archiveWorkflow);
}
/**
* Terminate then delete the list of workflows.
*
* @param workflowIds - list of workflow Ids to be deleted
* @return bulk response object containing a list of successfully deleted workflows
*/
@DeleteMapping("/terminate-remove")
public BulkResponse<String> terminateRemove(
@RequestBody List<String> workflowIds,
@RequestParam(value = "archiveWorkflow", defaultValue = "true", required = false)
boolean archiveWorkflow,
@RequestParam(value = "reason", required = false) String reason) {
return workflowBulkService.terminateRemove(workflowIds, reason, archiveWorkflow);
}
/**
* Search workflows for given list of workflows.
*
* @param workflowIds - list of workflow Ids to be searched
* @return bulk response object containing a list of workflows
*/
@PostMapping("/search")
public BulkResponse<WorkflowModel> searchWorkflow(
@RequestBody List<String> workflowIds,
@RequestParam(value = "includeTasks", defaultValue = "true", required = false)
boolean includeTasks) {
return workflowBulkService.searchWorkflow(workflowIds, includeTasks);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/TaskResource.java | rest/src/main/java/com/netflix/conductor/rest/controllers/TaskResource.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.service.TaskService;
import com.netflix.conductor.service.WorkflowService;
import io.swagger.v3.oas.annotations.Operation;
import jakarta.validation.Valid;
import static com.netflix.conductor.rest.config.RequestMappingConstants.TASKS;
import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE;
import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE;
@RestController
@RequestMapping(value = TASKS)
public class TaskResource {
private final TaskService taskService;
private final WorkflowService workflowService;
public TaskResource(TaskService taskService, WorkflowService workflowService) {
this.taskService = taskService;
this.workflowService = workflowService;
}
@GetMapping("/poll/{tasktype}")
@Operation(summary = "Poll for a task of a certain type")
public ResponseEntity<Task> poll(
@PathVariable("tasktype") String taskType,
@RequestParam(value = "workerid", required = false) String workerId,
@RequestParam(value = "domain", required = false) String domain) {
// for backwards compatibility with 2.x client which expects a 204 when no Task is found
return Optional.ofNullable(taskService.poll(taskType, workerId, domain))
.map(ResponseEntity::ok)
.orElse(ResponseEntity.noContent().build());
}
@GetMapping("/poll/batch/{tasktype}")
@Operation(summary = "Batch poll for a task of a certain type")
public ResponseEntity<List<Task>> batchPoll(
@PathVariable("tasktype") String taskType,
@RequestParam(value = "workerid", required = false) String workerId,
@RequestParam(value = "domain", required = false) String domain,
@RequestParam(value = "count", defaultValue = "1") int count,
@RequestParam(value = "timeout", defaultValue = "100") int timeout) {
List<Task> tasks = taskService.batchPoll(taskType, workerId, domain, count, timeout);
// Return empty list instead of 204 to avoid NPE in client libraries
return ResponseEntity.ok(tasks != null ? tasks : List.of());
}
@PostMapping(produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Update a task")
public String updateTask(@RequestBody TaskResult taskResult) {
taskService.updateTask(taskResult);
return taskResult.getTaskId();
}
@PostMapping("/update-v2")
@Operation(summary = "Update a task and return the next available task to be processed")
public ResponseEntity<Task> updateTaskV2(@RequestBody @Valid TaskResult taskResult) {
TaskModel updatedTask = taskService.updateTask(taskResult);
if (updatedTask == null) {
return ResponseEntity.noContent().build();
}
String taskType = updatedTask.getTaskType();
String domain = updatedTask.getDomain();
return poll(taskType, taskResult.getWorkerId(), domain);
}
@PostMapping(value = "/{workflowId}/{taskRefName}/{status}", produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Update a task By Ref Name")
public String updateTask(
@PathVariable("workflowId") String workflowId,
@PathVariable("taskRefName") String taskRefName,
@PathVariable("status") TaskResult.Status status,
@RequestParam(value = "workerid", required = false) String workerId,
@RequestBody Map<String, Object> output) {
return taskService.updateTask(workflowId, taskRefName, status, workerId, output);
}
@PostMapping(
value = "/{workflowId}/{taskRefName}/{status}/sync",
produces = APPLICATION_JSON_VALUE)
@Operation(summary = "Update a task By Ref Name synchronously and return the updated workflow")
public Workflow updateTaskSync(
@PathVariable("workflowId") String workflowId,
@PathVariable("taskRefName") String taskRefName,
@PathVariable("status") TaskResult.Status status,
@RequestParam(value = "workerid", required = false) String workerId,
@RequestBody Map<String, Object> output) {
Task pending = taskService.getPendingTaskForWorkflow(workflowId, taskRefName);
if (pending == null) {
throw new NotFoundException(
String.format(
"Found no running task %s of workflow %s to update",
taskRefName, workflowId));
}
TaskResult taskResult = new TaskResult(pending);
taskResult.setStatus(status);
taskResult.getOutputData().putAll(output);
taskResult.setWorkerId(workerId);
taskService.updateTask(taskResult);
return workflowService.getExecutionStatus(pending.getWorkflowInstanceId(), true);
}
@PostMapping("/{taskId}/log")
@Operation(summary = "Log Task Execution Details")
public void log(@PathVariable("taskId") String taskId, @RequestBody String log) {
taskService.log(taskId, log);
}
@GetMapping("/{taskId}/log")
@Operation(summary = "Get Task Execution Logs")
public ResponseEntity<List<TaskExecLog>> getTaskLogs(@PathVariable("taskId") String taskId) {
return Optional.ofNullable(taskService.getTaskLogs(taskId))
.filter(logs -> !logs.isEmpty())
.map(ResponseEntity::ok)
.orElseThrow(
() -> new NotFoundException("Task logs not found for taskId: %s", taskId));
}
@GetMapping("/{taskId}")
@Operation(summary = "Get task by Id")
public ResponseEntity<Task> getTask(@PathVariable("taskId") String taskId) {
// for backwards compatibility with 2.x client which expects a 204 when no Task is found
return Optional.ofNullable(taskService.getTask(taskId))
.map(ResponseEntity::ok)
.orElseThrow(() -> new NotFoundException("Task not found for taskId: %s", taskId));
}
@GetMapping("/queue/sizes")
@Operation(summary = "Deprecated. Please use /tasks/queue/size endpoint")
@Deprecated
public Map<String, Integer> size(
@RequestParam(value = "taskType", required = false) List<String> taskTypes) {
return taskService.getTaskQueueSizes(taskTypes);
}
@GetMapping("/queue/size")
@Operation(summary = "Get queue size for a task type.")
public Integer taskDepth(
@RequestParam("taskType") String taskType,
@RequestParam(value = "domain", required = false) String domain,
@RequestParam(value = "isolationGroupId", required = false) String isolationGroupId,
@RequestParam(value = "executionNamespace", required = false)
String executionNamespace) {
return taskService.getTaskQueueSize(taskType, domain, executionNamespace, isolationGroupId);
}
@GetMapping("/queue/all/verbose")
@Operation(summary = "Get the details about each queue")
public Map<String, Map<String, Map<String, Long>>> allVerbose() {
return taskService.allVerbose();
}
@GetMapping("/queue/all")
@Operation(summary = "Get the details about each queue")
public Map<String, Long> all() {
return taskService.getAllQueueDetails();
}
@GetMapping("/queue/polldata")
@Operation(summary = "Get the last poll data for a given task type")
public List<PollData> getPollData(@RequestParam("taskType") String taskType) {
return taskService.getPollData(taskType);
}
@GetMapping("/queue/polldata/all")
@Operation(summary = "Get the last poll data for all task types")
public List<PollData> getAllPollData() {
return taskService.getAllPollData();
}
@PostMapping(value = "/queue/requeue/{taskType}", produces = TEXT_PLAIN_VALUE)
@Operation(summary = "Requeue pending tasks")
public String requeuePendingTask(@PathVariable("taskType") String taskType) {
return taskService.requeuePendingTask(taskType);
}
@Operation(
summary = "Search for tasks based in payload and other parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC")
@GetMapping(value = "/search")
public SearchResult<TaskSummary> search(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return taskService.search(start, size, sort, freeText, query);
}
@Operation(
summary = "Search for tasks based in payload and other parameters",
description =
"use sort options as sort=<field>:ASC|DESC e.g. sort=name&sort=workflowId:DESC."
+ " If order is not specified, defaults to ASC")
@GetMapping(value = "/search-v2")
public SearchResult<Task> searchV2(
@RequestParam(value = "start", defaultValue = "0", required = false) int start,
@RequestParam(value = "size", defaultValue = "100", required = false) int size,
@RequestParam(value = "sort", required = false) String sort,
@RequestParam(value = "freeText", defaultValue = "*", required = false) String freeText,
@RequestParam(value = "query", required = false) String query) {
return taskService.searchV2(start, size, sort, freeText, query);
}
@Operation(summary = "Get the external uri where the task payload is to be stored")
@GetMapping({"/externalstoragelocation", "external-storage-location"})
public ExternalStorageLocation getExternalStorageLocation(
@RequestParam("path") String path,
@RequestParam("operation") String operation,
@RequestParam("payloadType") String payloadType) {
return taskService.getExternalStorageLocation(path, operation, payloadType);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/controllers/ApplicationExceptionMapper.java | rest/src/main/java/com/netflix/conductor/rest/controllers/ApplicationExceptionMapper.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.controllers;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.RestControllerAdvice;
import org.springframework.web.servlet.resource.NoResourceFoundException;
import com.netflix.conductor.common.validation.ErrorResponse;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.metrics.Monitors;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import jakarta.servlet.http.HttpServletRequest;
@RestControllerAdvice
@Order(ValidationExceptionMapper.ORDER + 1)
public class ApplicationExceptionMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationExceptionMapper.class);
private final String host = Utils.getServerId();
private static final Map<Class<? extends Throwable>, HttpStatus> EXCEPTION_STATUS_MAP =
new HashMap<>();
static {
EXCEPTION_STATUS_MAP.put(NotFoundException.class, HttpStatus.NOT_FOUND);
EXCEPTION_STATUS_MAP.put(ConflictException.class, HttpStatus.CONFLICT);
EXCEPTION_STATUS_MAP.put(IllegalArgumentException.class, HttpStatus.BAD_REQUEST);
EXCEPTION_STATUS_MAP.put(InvalidFormatException.class, HttpStatus.INTERNAL_SERVER_ERROR);
EXCEPTION_STATUS_MAP.put(NoResourceFoundException.class, HttpStatus.NOT_FOUND);
}
@ExceptionHandler(Throwable.class)
public ResponseEntity<ErrorResponse> handleAll(HttpServletRequest request, Throwable th) {
logException(request, th);
HttpStatus status =
EXCEPTION_STATUS_MAP.getOrDefault(th.getClass(), HttpStatus.INTERNAL_SERVER_ERROR);
ErrorResponse errorResponse = new ErrorResponse();
errorResponse.setInstance(host);
errorResponse.setStatus(status.value());
errorResponse.setMessage(th.getMessage());
errorResponse.setRetryable(
th instanceof TransientException); // set it to true for TransientException
Monitors.error("error", String.valueOf(status.value()));
return new ResponseEntity<>(errorResponse, status);
}
private void logException(HttpServletRequest request, Throwable exception) {
LOGGER.error(
"Error {} url: '{}'",
exception.getClass().getSimpleName(),
request.getRequestURI(),
exception);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/config/RequestMappingConstants.java | rest/src/main/java/com/netflix/conductor/rest/config/RequestMappingConstants.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.config;
public interface RequestMappingConstants {
String API_PREFIX = "/api/";
String ADMIN = API_PREFIX + "admin";
String EVENT = API_PREFIX + "event";
String METADATA = API_PREFIX + "metadata";
String QUEUE = API_PREFIX + "queue";
String TASKS = API_PREFIX + "tasks";
String WORKFLOW_BULK = API_PREFIX + "workflow/bulk";
String WORKFLOW = API_PREFIX + "workflow";
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/config/RestConfiguration.java | rest/src/main/java/com/netflix/conductor/rest/config/RestConfiguration.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.config;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
import static org.springframework.http.MediaType.APPLICATION_JSON;
import static org.springframework.http.MediaType.TEXT_PLAIN;
@Configuration
public class RestConfiguration implements WebMvcConfigurer {
/**
* Disable all 3 (Accept header, url parameter, path extension) strategies of content
* negotiation and only allow <code>application/json</code> and <code>text/plain</code> types.
* <br>
*
* <p>Any "mapping" that is annotated with <code>produces=TEXT_PLAIN_VALUE</code> will be sent
* as <code>text/plain</code> all others as <code>application/json</code>.<br>
* More details on Spring MVC content negotiation can be found at <a
* href="https://spring.io/blog/2013/05/11/content-negotiation-using-spring-mvc">https://spring.io/blog/2013/05/11/content-negotiation-using-spring-mvc</a>
* <br>
*/
@Override
public void configureContentNegotiation(ContentNegotiationConfigurer configurer) {
configurer
.favorParameter(false)
.favorPathExtension(false)
.ignoreAcceptHeader(true)
.defaultContentType(APPLICATION_JSON, TEXT_PLAIN);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.