repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/world.rb | lib/dynflow/world.rb | # -*- coding: utf-8 -*-
# frozen_string_literal: true
require 'dynflow/world/invalidation'
module Dynflow
# rubocop:disable Metrics/ClassLength
class World
include Algebrick::TypeCheck
include Algebrick::Matching
include Invalidation
attr_reader :id, :config, :client_dispatcher, :executor_dispatcher, :executor, :connector,
:transaction_adapter, :logger_adapter, :coordinator,
:persistence, :action_classes, :subscription_index,
:middleware, :auto_rescue, :clock, :meta, :delayed_executor, :auto_validity_check, :validity_check_timeout, :throttle_limiter,
:termination_timeout, :terminated, :dead_letter_handler, :execution_plan_cleaner
def initialize(config)
@config = Config::ForWorld.new(config, self)
# Set the telemetry instance as soon as possible
Dynflow::Telemetry.set_adapter @config.telemetry_adapter
Dynflow::Telemetry.register_metrics!
@id = SecureRandom.uuid
@logger_adapter = @config.logger_adapter
@clock = spawn_and_wait(Clock, 'clock', logger)
@config.validate
@transaction_adapter = @config.transaction_adapter
@persistence = Persistence.new(self, @config.persistence_adapter,
:backup_deleted_plans => @config.backup_deleted_plans,
:backup_dir => @config.backup_dir)
@coordinator = Coordinator.new(@config.coordinator_adapter)
if @config.executor
@executor = Executors::Parallel.new(self,
executor_class: @config.executor,
heartbeat_interval: @config.executor_heartbeat_interval,
queues_options: @config.queues)
end
@action_classes = @config.action_classes
@auto_rescue = @config.auto_rescue
@exit_on_terminate = Concurrent::AtomicBoolean.new(@config.exit_on_terminate)
@connector = @config.connector
@middleware = Middleware::World.new
@middleware.use Middleware::Common::Transaction if @transaction_adapter
@client_dispatcher = spawn_and_wait(Dispatcher::ClientDispatcher, "client-dispatcher", self, @config.ping_cache_age)
@dead_letter_handler = spawn_and_wait(DeadLetterSilencer, 'default_dead_letter_handler', @config.silent_dead_letter_matchers)
@auto_validity_check = @config.auto_validity_check
@validity_check_timeout = @config.validity_check_timeout
@throttle_limiter = @config.throttle_limiter
@terminated = Concurrent::Promises.resolvable_event
@termination_timeout = @config.termination_timeout
calculate_subscription_index
if executor
@executor_dispatcher = spawn_and_wait(Dispatcher::ExecutorDispatcher, "executor-dispatcher", self, @config.executor_semaphore)
executor.initialized.wait
end
update_register
perform_validity_checks if auto_validity_check
@termination_barrier = Mutex.new
@before_termination_hooks = Queue.new
if @config.auto_terminate
at_exit do
@exit_on_terminate.make_false # make sure we don't terminate twice
self.terminate.wait
end
end
post_initialization
end
# performs steps once the executor is ready and invalidation of previous worls is finished.
# Needs to be indempotent, as it can be called several times (expecially when auto_validity_check
# if false, as it should be called after `perform_validity_checks` method)
def post_initialization
@delayed_executor ||= try_spawn(:delayed_executor, Coordinator::DelayedExecutorLock)
@execution_plan_cleaner ||= try_spawn(:execution_plan_cleaner, Coordinator::ExecutionPlanCleanerLock)
update_register
@delayed_executor.start if auto_validity_check && @delayed_executor && !@delayed_executor.started?
self.auto_execute if @config.auto_execute
end
def before_termination(&block)
@before_termination_hooks << block
end
def update_register
@meta ||= @config.meta
@meta['queues'] = @config.queues if @executor
@meta['delayed_executor'] = true if @delayed_executor
@meta['execution_plan_cleaner'] = true if @execution_plan_cleaner
@meta['last_seen'] = Dynflow::Dispatcher::ClientDispatcher::PingCache.format_time
if @already_registered
coordinator.update_record(registered_world)
else
coordinator.register_world(registered_world)
@already_registered = true
end
end
def registered_world
if executor
Coordinator::ExecutorWorld.new(self)
else
Coordinator::ClientWorld.new(self)
end
end
def logger
logger_adapter.dynflow_logger
end
def action_logger
logger_adapter.action_logger
end
def subscribed_actions(action_class)
@subscription_index.has_key?(action_class) ? @subscription_index[action_class] : []
end
# reload actions classes, intended only for devel
def reload!
# TODO what happens with newly loaded classes
@action_classes = @action_classes.map do |klass|
begin
Utils.constantize(klass.to_s)
rescue NameError
nil # ignore missing classes
end
end.compact
middleware.clear_cache!
calculate_subscription_index
end
TriggerResult = Algebrick.type do
# Returned by #trigger when planning fails.
PlaningFailed = type { fields! execution_plan_id: String, error: Exception }
# Returned by #trigger when planning is successful, #future will resolve after
# ExecutionPlan is executed.
Triggered = type { fields! execution_plan_id: String, future: Concurrent::Promises::ResolvableFuture }
Scheduled = type { fields! execution_plan_id: String }
variants PlaningFailed, Triggered, Scheduled
end
module TriggerResult
def planned?
match self, PlaningFailed => false, Triggered => true, Scheduled => false
end
def triggered?
match self, PlaningFailed => false, Triggered => true, Scheduled => false
end
def scheduled?
match self, PlaningFailed => false, Triggered => false, Scheduled => true
end
def id
execution_plan_id
end
end
module Triggered
def finished
future
end
end
# @return [TriggerResult]
# blocks until action_class is planned
# if no arguments given, the plan is expected to be returned by a block
def trigger(action_class = nil, *args, &block)
if action_class.nil?
raise 'Neither action_class nor a block given' if block.nil?
execution_plan = block.call(self)
else
execution_plan = plan(action_class, *args)
end
planned = execution_plan.state == :planned
if planned
done = execute(execution_plan.id, Concurrent::Promises.resolvable_future)
Triggered[execution_plan.id, done]
else
PlaningFailed[execution_plan.id, execution_plan.errors.first.exception]
end
end
def delay(action_class, delay_options, *args)
delay_with_options(action_class: action_class, args: args, delay_options: delay_options)
end
def delay_with_options(action_class:, args:, delay_options:, id: nil, caller_action: nil)
raise 'No action_class given' if action_class.nil?
execution_plan = ExecutionPlan.new(self, id)
execution_plan.delay(caller_action, action_class, delay_options, *args)
Scheduled[execution_plan.id]
end
def chain(plan_uuids, action_class, *args)
plan_uuids = [plan_uuids] unless plan_uuids.is_a? Array
result = delay_with_options(action_class: action_class, args: args, delay_options: { frozen: true })
plan_uuids.each do |plan_uuid|
persistence.chain_execution_plan(plan_uuid, result.execution_plan_id)
end
persistence.set_delayed_plan_frozen(result.execution_plan_id, false)
result
end
def plan_elsewhere(action_class, *args)
execution_plan = ExecutionPlan.new(self, nil)
execution_plan.delay(nil, action_class, {}, *args)
plan_request(execution_plan.id)
Scheduled[execution_plan.id]
end
def plan(action_class, *args)
plan_with_options(action_class: action_class, args: args)
end
def plan_with_options(action_class:, args:, id: nil, caller_action: nil)
ExecutionPlan.new(self, id).tap do |execution_plan|
coordinator.acquire(Coordinator::PlanningLock.new(self, execution_plan.id)) do
execution_plan.prepare(action_class, caller_action: caller_action)
execution_plan.plan(*args)
end
end
end
# @return [Concurrent::Promises::ResolvableFuture] containing execution_plan when finished
# raises when ExecutionPlan is not accepted for execution
def execute(execution_plan_id, done = Concurrent::Promises.resolvable_future)
publish_request(Dispatcher::Execution[execution_plan_id], done, true)
end
def event(execution_plan_id, step_id, event, done = Concurrent::Promises.resolvable_future, optional: false)
publish_request(Dispatcher::Event[execution_plan_id, step_id, event, nil, optional], done, false)
end
def plan_event(execution_plan_id, step_id, event, time, accepted = Concurrent::Promises.resolvable_future, optional: false)
publish_request(Dispatcher::Event[execution_plan_id, step_id, event, time, optional], accepted, false)
end
def plan_request(execution_plan_id, done = Concurrent::Promises.resolvable_future)
publish_request(Dispatcher::Planning[execution_plan_id], done, false)
end
def ping(world_id, timeout, done = Concurrent::Promises.resolvable_future)
publish_request(Dispatcher::Ping[world_id, true], done, false, timeout)
end
def ping_without_cache(world_id, timeout, done = Concurrent::Promises.resolvable_future)
publish_request(Dispatcher::Ping[world_id, false], done, false, timeout)
end
def get_execution_status(world_id, execution_plan_id, timeout, done = Concurrent::Promises.resolvable_future)
publish_request(Dispatcher::Status[world_id, execution_plan_id], done, false, timeout)
end
def halt(execution_plan_id, accepted = Concurrent::Promises.resolvable_future)
coordinator.acquire(Coordinator::ExecutionInhibitionLock.new(execution_plan_id))
publish_request(Dispatcher::Halt[execution_plan_id], accepted, false)
end
def publish_request(request, done, wait_for_accepted, timeout = nil)
accepted = Concurrent::Promises.resolvable_future
accepted.rescue do |reason|
done.reject reason if reason
end
client_dispatcher.ask([:publish_request, done, request, timeout], accepted)
accepted.wait if wait_for_accepted
done
rescue => e
accepted.reject e
end
def terminate(future = Concurrent::Promises.resolvable_future)
start_termination.tangle(future)
future
end
def terminating?
defined?(@terminating)
end
# 24119 - ensure delayed executor is preserved after invalidation
# executes plans that are planned/paused and haven't reported any error yet (usually when no executor
# was available by the time of planning or terminating)
def auto_execute
coordinator.acquire(Coordinator::AutoExecuteLock.new(self)) do
planned_execution_plans =
self.persistence.find_execution_plans filters: { 'state' => %w(planned paused), 'result' => (ExecutionPlan.results - [:error]).map(&:to_s) }
planned_execution_plans.map do |ep|
if coordinator.find_locks(Dynflow::Coordinator::ExecutionLock.unique_filter(ep.id)).empty?
execute(ep.id)
end
end.compact
end
rescue Coordinator::LockError => e
logger.info "auto-executor lock already aquired: #{e.message}"
[]
end
def try_spawn(what, lock_class = nil)
object = nil
return nil if !executor || (object = @config.public_send(what)).nil?
coordinator.acquire(lock_class.new(self)) if lock_class
object.spawn.wait
object
rescue Coordinator::LockError
nil
end
private
def start_termination
@termination_barrier.synchronize do
return @terminating if @terminating
termination_future ||= Concurrent::Promises.future do
begin
run_before_termination_hooks
if delayed_executor
logger.info "start terminating delayed_executor..."
delayed_executor.terminate.wait(termination_timeout)
end
logger.info "start terminating throttle_limiter..."
throttle_limiter.terminate.wait(termination_timeout)
terminate_executor
logger.info "start terminating client dispatcher..."
client_dispatcher_terminated = Concurrent::Promises.resolvable_future
client_dispatcher.ask([:start_termination, client_dispatcher_terminated])
client_dispatcher_terminated.wait(termination_timeout)
logger.info "stop listening for new events..."
connector.stop_listening(self, termination_timeout)
if @clock
logger.info "start terminating clock..."
clock.ask(:terminate!).wait(termination_timeout)
end
begin
coordinator.delete_world(registered_world, true)
rescue Dynflow::Errors::FatalPersistenceError => e
nil
end
@terminated.resolve
true
rescue => e
logger.fatal(e)
end
end
@terminating = Concurrent::Promises.future do
termination_future.wait(termination_timeout)
end.on_resolution do
@terminated.resolve
Thread.new do
logger.info 'World terminated, exiting.'
Kernel.exit if @exit_on_terminate.true?
end
end
end
end
def calculate_subscription_index
@subscription_index =
action_classes.each_with_object(Hash.new { |h, k| h[k] = [] }) do |klass, index|
next unless klass.subscribe
Array(klass.subscribe).each do |subscribed_class|
index[Utils.constantize(subscribed_class.to_s)] << klass
end
end.tap { |o| o.freeze }
end
def run_before_termination_hooks
until @before_termination_hooks.empty?
hook_run = Concurrent::Promises.future do
begin
@before_termination_hooks.pop.call
rescue => e
logger.error e
end
end
logger.error "timeout running before_termination_hook" unless hook_run.wait(termination_timeout)
end
end
def spawn_and_wait(klass, name, *args)
initialized = Concurrent::Promises.resolvable_future
actor = klass.spawn(name: name, args: args, initialized: initialized)
initialized.wait
return actor
end
def terminate_executor
return unless executor
connector.stop_receiving_new_work(self, termination_timeout)
logger.info "start terminating executor..."
executor.terminate.wait(termination_timeout)
logger.info "start terminating executor dispatcher..."
executor_dispatcher_terminated = Concurrent::Promises.resolvable_future
executor_dispatcher.ask([:start_termination, executor_dispatcher_terminated])
executor_dispatcher_terminated.wait(termination_timeout)
end
end
# rubocop:enable Metrics/ClassLength
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/utils.rb | lib/dynflow/utils.rb | # frozen_string_literal: true
module Dynflow
module Utils
require 'dynflow/utils/indifferent_hash'
require 'dynflow/utils/priority_queue'
def self.validate_keys!(hash, *valid_keys)
valid_keys.flatten!
unexpected_options = hash.keys - valid_keys - valid_keys.map(&:to_s)
unless unexpected_options.empty?
raise ArgumentError, "Unexpected options #{unexpected_options.inspect}. "\
"Valid keys are: #{valid_keys.map(&:inspect).join(', ')}"
end
hash
end
def self.symbolize_keys(hash)
return hash.symbolize_keys if hash.respond_to?(:symbolize_keys)
hash.reduce({}) do |new_hash, (key, value)|
new_hash.update(key.to_sym => value)
end
end
def self.stringify_keys(hash)
return hash.stringify_keys if hash.respond_to?(:stringify_keys)
hash.reduce({}) do |new_hash, (key, value)|
new_hash.update(key.to_s => value)
end
end
# Inspired by ActiveSupport::Inflector
def self.constantize(string)
return string.constantize if string.respond_to?(:constantize)
names = string.split('::')
# Trigger a built-in NameError exception including the ill-formed constant in the message.
Object.const_get(string) if names.empty?
# Remove the first blank element in case of '::ClassName' notation.
names.shift if names.size > 1 && names.first.empty?
names.inject(Object) do |constant, name|
if constant == Object
constant.const_get(name)
else
candidate = constant.const_get(name)
next candidate if constant.const_defined?(name, false)
next candidate unless Object.const_defined?(name)
# Go down the ancestors to check if it is owned directly. The check
# stops when we reach Object or the end of ancestors tree.
constant = constant.ancestors.inject do |const, ancestor|
break const if ancestor == Object
break ancestor if ancestor.const_defined?(name, false)
const
end
# owner is in Object, so raise
constant.const_get(name, false)
end
end
end
def self.indifferent_hash(hash)
if defined? ::HashWithIndifferentAccess
# the users already have it: lets give them what they are used to
::HashWithIndifferentAccess.new(hash)
else
if hash.is_a? IndifferentHash
return hash
else
IndifferentHash.new(hash)
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/actors.rb | lib/dynflow/actors.rb | # frozen_string_literal: true
module Dynflow
module Actors
require 'dynflow/actors/execution_plan_cleaner'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/web_console.rb | lib/dynflow/web_console.rb | # frozen_string_literal: true
require 'dynflow/web'
warn %{"require 'dynflow/web_console'" is deprecated, use "require 'dynflow/web'" instead}
Dynflow::WebConsole = Dynflow::Web
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/serializer.rb | lib/dynflow/serializer.rb | # frozen_string_literal: true
require 'algebrick/serializer'
module Dynflow
def self.serializer
@serializer ||= Serializer.new
end
class Serializer < Algebrick::Serializer
ARBITRARY_TYPE_KEY = :class
MARSHAL_KEY = :marshaled
def load(data, options = {})
case data
when ::Array
data.collect { |v| load(v) }
else
super
end
end
def dump(object, options = {})
case object
when ::Array
object.collect { |v| dump(v) }
else
super
end
end
protected
def parse_other(other, options = {})
if Hash === other
if (marshal_value = other[MARSHAL_KEY] || other[MARSHAL_KEY.to_s])
return Marshal.load(Base64.strict_decode64(marshal_value))
end
if (type_name = other[ARBITRARY_TYPE_KEY] || other[ARBITRARY_TYPE_KEY.to_s])
if type_name == 'Time' && (time_str = other['value'])
return Serializable.send(:string_to_time, time_str)
end
type = Utils.constantize(type_name) rescue nil
if type && type.respond_to?(:from_hash)
return type.from_hash other
end
end
end
return other
end
def generate_other(object, options = {})
hash = case
when object.respond_to?(:to_h)
object.to_h
when object.respond_to?(:to_hash)
object.to_hash
when object.is_a?(Time) && !options[:marshaled_time]
{ ARBITRARY_TYPE_KEY => 'Time', 'value' => object.utc.strftime(Serializable::TIME_FORMAT) }
else
{ ARBITRARY_TYPE_KEY => object.class.to_s,
MARSHAL_KEY => Base64.strict_encode64(Marshal.dump(object)) }
end
raise "Missing #{ARBITRARY_TYPE_KEY} key in #{hash.inspect}" unless hash.key?(ARBITRARY_TYPE_KEY)
hash
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/round_robin.rb | lib/dynflow/round_robin.rb | # frozen_string_literal: true
module Dynflow
# A simple round-robin scheduling implementation used at various
# places in Dynflow
class RoundRobin
def initialize
@data = []
@cursor = 0
end
def add(item)
@data.push item
self
end
def delete(item)
@data.delete item
self
end
def next
@cursor = 0 if @cursor > @data.size - 1
@data[@cursor]
ensure
@cursor += 1
end
def empty?
@data.empty?
end
# the `add` and `delete` methods should be preferred, but
# sometimes the list of things to iterate though can not be owned
# by the round robin object itself
attr_writer :data
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/execution_plan.rb | lib/dynflow/execution_plan.rb | # frozen_string_literal: true
require 'securerandom'
module Dynflow
# rubocop:disable Metrics/ClassLength
# TODO extract planning logic to an extra class ExecutionPlanner
class ExecutionPlan < Serializable
# a fallback object representing a plan with some corrupted data,
# preventing to load the whole plan properly, this can be used for presenting
# at least some data and not running into internal server errors
class InvalidPlan
attr_reader :exception, :id, :label, :state,
:started_at, :ended_at,
:execution_time, :real_time, :execution_history
def initialize(exception, id, label, state,
started_at = nil, ended_at = nil,
execution_time = nil, real_time = nil, execution_history = nil)
@exception = exception
@id = id
@label = label || 'N/A'
@state = state
@started_at = started_at
@ended_at = ended_at
@execution_time = execution_time
@real_time = real_time
@execution_history = execution_history || []
end
def valid?
false
end
def result
'N/A'
end
end
include Algebrick::TypeCheck
include Stateful
require 'dynflow/execution_plan/steps'
require 'dynflow/execution_plan/output_reference'
require 'dynflow/execution_plan/dependency_graph'
attr_reader :id, :world, :label,
:root_plan_step, :steps, :run_flow, :finalize_flow,
:started_at, :ended_at, :execution_time, :real_time, :execution_history
def self.states
@states ||= [:pending, :scheduled, :planning, :planned, :running, :paused, :stopped]
end
require 'dynflow/execution_plan/hooks'
def self.results
@results ||= [:pending, :success, :warning, :error, :cancelled]
end
def self.state_transitions
@state_transitions ||= { pending: [:stopped, :scheduled, :planning],
scheduled: [:planning, :stopped],
planning: [:planned, :stopped],
planned: [:running, :stopped],
running: [:paused, :stopped],
paused: [:running, :stopped],
stopped: [] }
end
# all params with default values are part of *private* api
def initialize(world,
id = nil,
label = nil,
state = :pending,
root_plan_step = nil,
run_flow = Flows::Concurrence.new([]),
finalize_flow = Flows::Sequence.new([]),
steps = {},
started_at = nil,
ended_at = nil,
execution_time = nil,
real_time = 0.0,
execution_history = ExecutionHistory.new)
id ||= SecureRandom.uuid
@id = Type! id, String
@world = Type! world, World
@label = Type! label, String, NilClass
self.state = state
@run_flow = Type! run_flow, Flows::Abstract
@finalize_flow = Type! finalize_flow, Flows::Abstract
@root_plan_step = root_plan_step
@started_at = Type! started_at, Time, NilClass
@ended_at = Type! ended_at, Time, NilClass
@execution_time = Type! execution_time, Numeric, NilClass
@real_time = Type! real_time, Numeric
@execution_history = Type! execution_history, ExecutionHistory
steps.all? do |k, v|
Type! k, Integer
Type! v, Steps::Abstract
end
@steps = steps
end
def valid?
true
end
def logger
@world.logger
end
# @param state [Symbol] representing the new state
# @param history_notice [Symbol|string|false] should a note to execution_history be added as well?
# Possible values:
# - :auto (default) - the history notice will be added based on the new state
# - string - custom history notice is added
# - false - don't add any notice
def update_state(state, history_notice: :auto)
hooks_to_run = [state]
original = self.state
case self.state = state
when :planning
@started_at = Time.now.utc
when :stopped
@ended_at = Time.now.utc
@real_time = @ended_at - @started_at unless @started_at.nil?
@execution_time = compute_execution_time
key = failure? ? :failure : :success
Dynflow::Telemetry.with_instance do |t|
t.increment_counter(:dynflow_finished_execution_plans, 1,
telemetry_common_options.merge(:result => key.to_s))
end
hooks_to_run << key
world.persistence.delete_delayed_plans(:execution_plan_uuid => id) if delay_record && original == :scheduled
unlock_all_singleton_locks!
unlock_execution_inhibition_lock!
when :paused
unlock_all_singleton_locks!
else
# ignore
end
logger.debug format('%13s %s %9s >> %9s',
'ExecutionPlan', id, original, state)
add_history_notice(history_notice)
self.save
toggle_telemetry_state original == :pending ? nil : original.to_s,
self.state == :stopped ? nil : self.state.to_s
hooks_to_run.each { |kind| run_hooks kind }
end
def run_hooks(state)
records = persistence.load_actions_attributes(@id, [:id, :class]).select do |action|
Utils.constantize(action[:class])
.execution_plan_hooks
.on(state).any?
end
action_ids = records.compact.map { |record| record[:id] }
return if action_ids.empty?
persistence.load_actions(self, action_ids).each do |action|
world.middleware.execute(:hook, action, self) do
action.class.execution_plan_hooks.run(self, action, state)
end
end
end
def result
all_steps = steps.values
if all_steps.any? { |step| step.state == :cancelled }
return :cancelled
elsif all_steps.any? { |step| step.state == :error }
return :error
elsif all_steps.any? { |step| [:skipping, :skipped].include?(step.state) }
return :warning
elsif all_steps.all? { |step| step.state == :success }
return :success
else
return :pending
end
end
def error?
result == :error
end
def failure?
[:error, :warning, :cancelled].include?(result)
end
def error_in_plan?
steps_in_state(:error).any? { |step| step.is_a? Steps::PlanStep }
end
def errors
steps.values.map(&:error).compact
end
def rescue_strategy
rescue_strategy = entry_action.rescue_strategy || Action::Rescue::Skip
Type! rescue_strategy, Action::Rescue::Strategy
end
def sub_plans
persistence.find_execution_plans(filters: { 'caller_execution_plan_id' => self.id })
end
def sub_plans_count
persistence.find_execution_plan_counts(filters: { 'caller_execution_plan_id' => self.id })
end
def prepare_for_rescue
case rescue_strategy
when Action::Rescue::Pause
:paused
when Action::Rescue::Fail
:stopped
when Action::Rescue::Skip
failed_steps.each { |step| self.skip(step) }
:running
else
:paused
end
end
def plan_steps
steps_of_type(Dynflow::ExecutionPlan::Steps::PlanStep)
end
def run_steps
steps_of_type(Dynflow::ExecutionPlan::Steps::RunStep)
end
def finalize_steps
steps_of_type(Dynflow::ExecutionPlan::Steps::FinalizeStep)
end
def failed_steps
steps_in_state(:error)
end
def steps_in_state(*states)
self.steps.values.find_all { |step| states.include?(step.state) }
end
def generate_action_id
@last_action_id ||= 0
@last_action_id += 1
end
def generate_step_id
@last_step_id ||= 0
@last_step_id += 1
end
def delay(caller_action, action_class, delay_options, *args)
save
@root_plan_step = add_scheduling_step(action_class, caller_action)
run_hooks(:pending)
serializer = root_plan_step.delay(delay_options, args)
delayed_plan = DelayedPlan.new(@world,
id,
delay_options[:start_at],
delay_options.fetch(:start_before, nil),
serializer,
delay_options[:frozen] || false)
persistence.save_delayed_plan(delayed_plan)
ensure
update_state(error? ? :stopped : :scheduled)
end
def delay_record
@delay_record ||= persistence.load_delayed_plan(id)
end
def prepare(action_class, options = {})
options = options.dup
caller_action = Type! options.delete(:caller_action), Dynflow::Action, NilClass
raise "Unexpected options #{options.keys.inspect}" unless options.empty?
save
@root_plan_step = add_plan_step(action_class, caller_action)
step = @root_plan_step.save
run_hooks(:pending)
step
end
def plan(*args)
update_state(:planning)
world.middleware.execute(:plan_phase, root_plan_step.action_class, self) do
with_planning_scope do
root_action = root_plan_step.execute(self, nil, false, *args)
@label = root_action.label
if @dependency_graph.unresolved?
raise "Some dependencies were not resolved: #{@dependency_graph.inspect}"
end
end
end
if @run_flow.size == 1
@run_flow = @run_flow.sub_flows.first
end
steps.values.each(&:save)
update_state(error? ? :stopped : :planned)
end
# sends the cancel event to all currently running and cancellable steps.
# if the plan is just scheduled, it cancels it (and returns an one-item
# array with the future value of the cancel result)
def cancel(force = false)
if state == :scheduled
[Concurrent::Promises.resolvable_future.tap { |f| f.fulfill delay_record.cancel }]
else
event = force ? ::Dynflow::Action::Cancellable::Abort : ::Dynflow::Action::Cancellable::Cancel
steps_to_cancel.map do |step|
world.event(id, step.id, event)
end
end
end
def cancellable?
return true if state == :scheduled
return false unless state == :running
steps_to_cancel.any?
end
def steps_to_cancel
steps_in_state(:running, :suspended).find_all do |step|
step.action(self).is_a?(::Dynflow::Action::Cancellable)
end
end
def skip(step)
steps_to_skip = steps_to_skip(step).each(&:mark_to_skip)
self.save
return steps_to_skip
end
# All the steps that need to get skipped when wanting to skip the step
# includes the step itself, all steps dependent on it (even transitively)
# FIND maybe move to persistence to let adapter to do it effectively?
# @return [Array<Steps::Abstract>]
def steps_to_skip(step)
dependent_steps = steps.values.find_all do |s|
next if s.is_a? Steps::PlanStep
action = persistence.load_action(s)
action.required_step_ids.include?(step.id)
end
steps_to_skip = dependent_steps.map do |dependent_step|
steps_to_skip(dependent_step)
end.flatten
steps_to_skip << step
if step.is_a? Steps::RunStep
finalize_step_id = persistence.load_action(step).finalize_step_id
steps_to_skip << steps[finalize_step_id] if finalize_step_id
end
return steps_to_skip.uniq
end
# @api private
def steps_of_type(type)
steps.values.find_all { |step| step.is_a?(type) }
end
def current_run_flow
@run_flow_stack.last
end
# @api private
def with_planning_scope(&block)
@run_flow_stack = []
@dependency_graph = DependencyGraph.new
switch_flow(run_flow, &block)
ensure
@run_flow_stack = nil
@dependency_graph = nil
end
# @api private
# Switches the flow type (Sequence, Concurrence) to be used within the block.
def switch_flow(new_flow, &block)
@run_flow_stack << new_flow
return block.call
ensure
@run_flow_stack.pop
current_run_flow.add_and_resolve(@dependency_graph, new_flow) if current_run_flow
end
def add_scheduling_step(action_class, caller_action = nil)
add_step(Steps::PlanStep, action_class, generate_action_id, :scheduling).tap do |step|
step.initialize_action(caller_action)
end
end
def add_plan_step(action_class, caller_action = nil)
add_step(Steps::PlanStep, action_class, generate_action_id).tap do |step|
# TODO: to be removed and preferred by the caller_action
if caller_action && caller_action.execution_plan_id == self.id
@steps[caller_action.plan_step_id].children << step.id
end
step.initialize_action(caller_action)
end
end
def add_run_step(action)
add_step(Steps::RunStep, action.class, action.id).tap do |step|
step.update_from_action(action)
@dependency_graph.add_dependencies(step, action)
current_run_flow.add_and_resolve(@dependency_graph, Flows::Atom.new(step.id))
end
end
def add_finalize_step(action)
add_step(Steps::FinalizeStep, action.class, action.id).tap do |step|
step.update_from_action(action)
finalize_flow << Flows::Atom.new(step.id)
end
end
def self.load_flow(flow_hash)
if flow_hash.is_a? Hash
Flows::Abstract.from_hash(flow_hash)
else
Flows::Abstract.decode(flow_hash)
end
end
def to_hash
recursive_to_hash id: id,
class: self.class.to_s,
label: label,
state: state,
result: result,
root_plan_step_id: root_plan_step && root_plan_step.id,
run_flow: run_flow.encode,
finalize_flow: finalize_flow.encode,
step_ids: steps.map { |id, _| id },
started_at: time_to_str(started_at),
ended_at: time_to_str(ended_at),
execution_time: execution_time,
real_time: real_time,
execution_history: execution_history.to_hash
end
def save
persistence.save_execution_plan(self)
end
def self.new_from_hash(hash, world)
check_class_matching hash
execution_plan_id = hash[:id]
steps = steps_from_hash(hash[:step_ids], execution_plan_id, world)
self.new(world,
execution_plan_id,
hash[:label],
hash[:state],
steps[hash[:root_plan_step_id]],
load_flow(hash[:run_flow]),
load_flow(hash[:finalize_flow]),
steps,
string_to_time(hash[:started_at]),
string_to_time(hash[:ended_at]),
hash[:execution_time].to_f,
hash[:real_time].to_f,
ExecutionHistory.new_from_hash(hash[:execution_history]))
rescue => plan_exception
begin
world.logger.error("Could not load execution plan #{execution_plan_id}")
world.logger.error(plan_exception)
InvalidPlan.new(plan_exception, execution_plan_id,
hash[:label],
hash[:state],
string_to_time(hash[:started_at]),
string_to_time(hash[:ended_at]),
hash[:execution_time].to_f,
hash[:real_time].to_f,
ExecutionHistory.new_from_hash(hash[:execution_history]))
rescue => invalid_plan_exception
world.logger.error("Could not even load a fallback execution plan for #{execution_plan_id}")
world.logger.error(invalid_plan_exception)
InvalidPlan.new(invalid_plan_exception, execution_plan_id,
hash[:label],
hash[:state])
end
end
def compute_execution_time
self.steps.values.reduce(0) do |execution_time, step|
execution_time + (step.execution_time || 0)
end
end
# @return [0..1] the percentage of the progress. See Action::Progress for more
# info
def progress
return 0 if [:pending, :planning, :scheduled].include?(state)
flow_step_ids = run_flow.all_step_ids + finalize_flow.all_step_ids
plan_done, plan_total = flow_step_ids.reduce([0.0, 0]) do |(done, total), step_id|
step = self.steps[step_id]
[done + (step.progress_done * step.progress_weight),
total + step.progress_weight]
end
plan_total > 0 ? (plan_done / plan_total) : 1
end
def entry_action
@entry_action ||= root_plan_step.action(self)
end
# @return [Array<Action>] actions in Present phase
def actions
@actions ||= begin
[entry_action] + entry_action.all_planned_actions
end
end
def caller_execution_plan_id
entry_action.caller_execution_plan_id
end
private
def persistence
world.persistence
end
def add_step(step_class, action_class, action_id, state = :pending)
step_class.new(self.id,
self.generate_step_id,
state,
action_class,
action_id,
nil,
world).tap do |new_step|
@steps[new_step.id] = new_step
end
end
def self.steps_from_hash(step_ids, execution_plan_id, world)
steps = world.persistence.load_steps(execution_plan_id, world)
ids_to_steps = steps.inject({}) do |hash, step|
hash[step.id.to_i] = step
hash
end
# to make sure to we preserve the order of the steps
step_ids.inject({}) do |hash, step_id|
step = ids_to_steps[step_id.to_i]
if step.nil?
raise Errors::DataConsistencyError, "Could not load steps for execution plan #{execution_plan_id}"
else
hash[step_id.to_i] = step
end
hash
end
end
def unlock_all_singleton_locks!
filter = { :owner_id => 'execution-plan:' + self.id,
:class => Dynflow::Coordinator::SingletonActionLock.to_s }
world.coordinator.find_locks(filter).each do |lock|
world.coordinator.release(lock)
end
end
def unlock_execution_inhibition_lock!
filter = { :owner_id => 'execution-plan:' + self.id,
:class => Dynflow::Coordinator::ExecutionInhibitionLock.to_s }
world.coordinator.find_locks(filter).each do |lock|
world.coordinator.release(lock)
end
end
def toggle_telemetry_state(original, new)
return if original == new
@label = root_plan_step.action_class if @label.nil?
Dynflow::Telemetry.with_instance do |t|
t.set_gauge(:dynflow_active_execution_plans, '-1',
telemetry_common_options.merge(:state => original)) unless original.nil?
t.set_gauge(:dynflow_active_execution_plans, '+1',
telemetry_common_options.merge(:state => new)) unless new.nil?
end
end
def telemetry_common_options
{ :world => @world.id, :action => @label }
end
def add_history_notice(history_notice)
if history_notice == :auto
history_notice = case state
when :running
'start execution'
when :paused
'pause execution'
when :stopped
'finish execution'
when :scheduled
'delay'
end
end
execution_history.add(history_notice, @world.id) if history_notice
end
private_class_method :steps_from_hash
end
# rubocop:enable Metrics/ClassLength
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/delayed_executors.rb | lib/dynflow/delayed_executors.rb | # frozen_string_literal: true
module Dynflow
module DelayedExecutors
require 'dynflow/delayed_executors/abstract'
require 'dynflow/delayed_executors/abstract_core'
require 'dynflow/delayed_executors/polling'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors.rb | lib/dynflow/executors.rb | # frozen_string_literal: true
module Dynflow
module Executors
require 'dynflow/executors/parallel'
class << self
# Every time we run a code that can be defined outside of Dynflow,
# we should wrap it with this method, and we can ensure here to do
# necessary cleanup, such as cleaning ActiveRecord connections
def run_user_code
# Here we cover a case where the connection was already checked out from
# the pool and had opened transactions. In that case, we should leave the
# cleanup to the other runtime unit which opened the transaction. If the
# connection was checked out or there are no opened transactions, we can
# safely perform the cleanup.
no_previously_opened_transactions = active_record_open_transactions.zero?
yield
ensure
::ActiveRecord::Base.clear_active_connections! if no_previously_opened_transactions && active_record_connected?
::Logging.mdc.clear if defined? ::Logging
end
private
def active_record_open_transactions
active_record_active_connection&.open_transactions || 0
end
def active_record_active_connection
return unless defined?(::ActiveRecord) && ::ActiveRecord::Base.connected?
# #active_connection? returns the connection if already established or nil
::ActiveRecord::Base.connection_pool.active_connection?
end
def active_record_connected?
!!active_record_active_connection
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/actor.rb | lib/dynflow/actor.rb | # frozen_string_literal: true
module Dynflow
FULL_BACKTRACE = %w[1 y yes].include?((ENV['DYNFLOW_FULL_BACKTRACE'] || '').downcase)
BACKTRACE_LIMIT = begin
limit = ENV['DYNFLOW_BACKTRACE_LIMIT'].to_i
limit.zero? ? nil : limit
end
module MethodicActor
def on_message(message)
method, *args = message
self.send(method, *args)
end
end
# Extend the Concurrent::Actor::Envelope to include information about the origin of the message
module EnvelopeBacktraceExtension
def initialize(*args)
super
@origin_backtrace = caller + Actor::BacktraceCollector.current_actor_backtrace
end
def origin_backtrace
@origin_backtrace
end
def inspect
"#<#{self.class.name}:#{object_id}> @message=#{@message.inspect}, @sender=#{@sender.inspect}, @address=#{@address.inspect}>"
end
end
Concurrent::Actor::Envelope.prepend(EnvelopeBacktraceExtension)
# Common parent for all the Dynflow actors defining some defaults
# that we preffer here.
class Actor < Concurrent::Actor::Context
module LogWithFullBacktrace
def log(level, message = nil, &block)
if message.is_a? Exception
error = message
backtrace = Actor::BacktraceCollector.full_backtrace(error.backtrace)
log(level, format("%s (%s)\n%s", error.message, error.class, backtrace.join("\n")))
else
super
end
end
end
class SetResultsWithOriginLogging < Concurrent::Actor::Behaviour::SetResults
include LogWithFullBacktrace
def on_envelope(envelope)
if FULL_BACKTRACE
Actor::BacktraceCollector.with_backtrace(envelope.origin_backtrace) { super }
else
super
end
end
end
class BacktraceCollector
CONCURRENT_RUBY_LINE = '[ concurrent-ruby ]'
SIDEKIQ_LINE = '[ sidekiq ]'
class << self
def with_backtrace(backtrace)
previous_actor_backtrace = Thread.current[:_dynflow_actor_backtrace]
Thread.current[:_dynflow_actor_backtrace] = backtrace
yield
ensure
Thread.current[:_dynflow_actor_backtrace] = previous_actor_backtrace
end
def current_actor_backtrace
Thread.current[:_dynflow_actor_backtrace] || []
end
def full_backtrace(backtrace)
filter_backtrace((backtrace || []) + current_actor_backtrace)
end
private
def filter_line(line)
if %w[concurrent-ruby gems/logging actor.rb].any? { |pattern| line.include?(pattern) }
CONCURRENT_RUBY_LINE
elsif line.include?('lib/sidekiq')
SIDEKIQ_LINE
else
line
end
end
# takes an array of backtrace lines and replaces each chunk
def filter_backtrace(backtrace)
trace = backtrace.map { |line| filter_line(line) }
.chunk_while { |l1, l2| l1 == l2 }
.map(&:first)
if BACKTRACE_LIMIT
count = trace.count
trace = trace.take(BACKTRACE_LIMIT)
trace << "[ backtrace omitted #{count - BACKTRACE_LIMIT} lines ]" if trace.count < count
end
trace
end
end
end
include LogWithFullBacktrace
include MethodicActor
# Behaviour that watches for polite asking for termination
# and calls corresponding method on the context to do so
class PoliteTermination < Concurrent::Actor::Behaviour::Abstract
def on_envelope(envelope)
message, terminated_future = envelope
if :start_termination == message
context.start_termination(terminated_future)
envelope.future.fulfill true if !envelope.future.nil?
Concurrent::Actor::Behaviour::MESSAGE_PROCESSED
else
pass envelope
end
end
end
include Algebrick::Matching
def start_termination(future)
@terminated = future
end
def finish_termination
@terminated.fulfill(true)
reference.tell(:terminate!)
end
def terminating?
!!@terminated
end
def behaviour_definition
[*Concurrent::Actor::Behaviour.base(:just_log),
Concurrent::Actor::Behaviour::Buffer,
[SetResultsWithOriginLogging, :just_log],
Concurrent::Actor::Behaviour::Awaits,
PoliteTermination,
Concurrent::Actor::Behaviour::ExecutesContext,
Concurrent::Actor::Behaviour::ErrorsOnUnknownMessage]
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/dispatcher.rb | lib/dynflow/dispatcher.rb | # frozen_string_literal: true
module Dynflow
module Dispatcher
Request = Algebrick.type do
Event = type do
fields! execution_plan_id: String,
step_id: Integer,
event: Object,
time: type { variants Time, NilClass },
optional: Algebrick::Types::Boolean
end
Execution = type do
fields! execution_plan_id: String
end
Planning = type do
fields! execution_plan_id: String
end
Ping = type do
fields! receiver_id: String,
use_cache: type { variants TrueClass, FalseClass }
end
Status = type do
fields! receiver_id: String,
execution_plan_id: type { variants String, NilClass }
end
Halt = type do
fields! execution_plan_id: String
end
variants Event, Execution, Ping, Status, Planning, Halt
end
Response = Algebrick.type do
variants Accepted = atom,
Failed = type { fields! error: String },
Done = atom,
Pong = atom,
ExecutionStatus = type { fields! execution_status: Hash }
end
Envelope = Algebrick.type do
fields! request_id: String,
sender_id: String,
receiver_id: type { variants String, AnyExecutor = atom, UnknownWorld = atom },
message: type { variants Request, Response }
end
module Envelope
def build_response_envelope(response_message, sender)
Envelope[self.request_id,
sender.id,
self.sender_id,
response_message]
end
end
module Event
def to_hash
super.update event: Base64.strict_encode64(Marshal.dump(event))
end
def self.product_from_hash(hash)
super(hash.merge 'event' => Marshal.load(Base64.strict_decode64(hash.fetch('event'))))
end
end
end
end
require 'dynflow/dispatcher/abstract'
require 'dynflow/dispatcher/client_dispatcher'
require 'dynflow/dispatcher/executor_dispatcher'
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters.rb | lib/dynflow/persistence_adapters.rb | # frozen_string_literal: true
module Dynflow
module PersistenceAdapters
require 'dynflow/persistence_adapters/abstract'
require 'dynflow/persistence_adapters/sequel'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/config.rb | lib/dynflow/config.rb | # frozen_string_literal: true
require 'socket'
module Dynflow
class Config
include Algebrick::TypeCheck
def self.config_attr(name, *types, &default)
self.send(:define_method, "validate_#{name}!") do |value|
Type! value, *types unless types.empty?
end
self.send(:define_method, name) do
var_name = "@#{name}"
if instance_variable_defined?(var_name)
return instance_variable_get(var_name)
else
return default
end
end
self.send(:attr_writer, name)
end
class ForWorld
attr_reader :world, :config
def initialize(config, world)
@config = config
@world = world
@cache = {}
end
def validate
@config.validate(self)
end
def queues
@queues ||= @config.queues.finalized_config(self)
end
def method_missing(name)
return @cache[name] if @cache.key?(name)
value = @config.send(name)
value = value.call(@world, self) if value.is_a? Proc
validation_method = "validate_#{name}!"
@config.send(validation_method, value) if @config.respond_to?(validation_method)
@cache[name] = value
end
end
class QueuesConfig
attr_reader :queues
def initialize
@queues = { :default => {} }
end
# Add a new queue to the configuration
#
# @param [Hash] queue_options
# @option queue_options :pool_size The amount of workers available for the queue.
# By default, it uses global pool_size config option.
def add(name, queue_options = {})
Utils.validate_keys!(queue_options, :pool_size)
name = name.to_sym
raise ArgumentError, "Queue #{name} is already defined" if @queues.key?(name)
@queues[name] = queue_options
end
def finalized_config(config_for_world)
@queues.values.each do |queue_options|
queue_options[:pool_size] ||= config_for_world.pool_size
end
@queues
end
end
def queues
@queues ||= QueuesConfig.new
end
config_attr :logger_adapter, LoggerAdapters::Abstract do
LoggerAdapters::Simple.new
end
config_attr :transaction_adapter, TransactionAdapters::Abstract do
TransactionAdapters::None.new
end
config_attr :persistence_adapter, PersistenceAdapters::Abstract do
PersistenceAdapters::Sequel.new('sqlite:/')
end
config_attr :coordinator_adapter, CoordinatorAdapters::Abstract do |world|
CoordinatorAdapters::Sequel.new(world)
end
config_attr :pool_size, Integer do
5
end
config_attr :executor do |world, config|
Executors::Parallel::Core
end
def validate_executor!(value)
accepted_executors = [Executors::Parallel::Core]
accepted_executors << Executors::Sidekiq::Core if defined? Executors::Sidekiq::Core
if value && !accepted_executors.include?(value)
raise ArgumentError, "Executor #{value} is expected to be one of #{accepted_executors.inspect}"
end
end
config_attr :executor_semaphore, Semaphores::Abstract, FalseClass do |world, config|
Semaphores::Dummy.new
end
config_attr :executor_heartbeat_interval, Integer do
15
end
config_attr :ping_cache_age, Integer do
60
end
config_attr :connector, Connectors::Abstract do |world|
Connectors::Direct.new(world)
end
config_attr :auto_rescue, Algebrick::Types::Boolean do
true
end
config_attr :auto_validity_check, Algebrick::Types::Boolean do |world, config|
!!config.executor
end
config_attr :validity_check_timeout, Numeric do
30
end
config_attr :exit_on_terminate, Algebrick::Types::Boolean do
true
end
config_attr :auto_terminate, Algebrick::Types::Boolean do
true
end
config_attr :termination_timeout, Numeric do
60
end
config_attr :auto_execute, Algebrick::Types::Boolean do
true
end
config_attr :silent_dead_letter_matchers, Array do
# By default suppress dead letters sent by Clock
[
DeadLetterSilencer::Matcher.new(::Dynflow::Clock)
]
end
config_attr :delayed_executor, DelayedExecutors::Abstract, NilClass do |world|
options = { :poll_interval => 15,
:time_source => -> { Time.now.utc } }
DelayedExecutors::Polling.new(world, options)
end
config_attr :throttle_limiter, ::Dynflow::ThrottleLimiter do |world|
::Dynflow::ThrottleLimiter.new(world)
end
config_attr :execution_plan_cleaner, ::Dynflow::Actors::ExecutionPlanCleaner, NilClass do |world|
nil
end
config_attr :action_classes do
Action.all_children
end
config_attr :meta do |world, config|
{ 'hostname' => Socket.gethostname, 'pid' => Process.pid }
end
config_attr :backup_deleted_plans, Algebrick::Types::Boolean do
false
end
config_attr :backup_dir, String, NilClass do
'./backup'
end
config_attr :telemetry_adapter, ::Dynflow::TelemetryAdapters::Abstract do |world|
::Dynflow::TelemetryAdapters::Dummy.new
end
def validate(config_for_world)
if defined? ::ActiveRecord::Base
begin
ar_pool_size = ::ActiveRecord::Base.connection_pool.instance_variable_get(:@size)
if (config_for_world.pool_size / 2.0) > ar_pool_size
config_for_world.world.logger.warn 'Consider increasing ActiveRecord::Base.connection_pool size, ' +
"it's #{ar_pool_size} but there is #{config_for_world.pool_size} " +
'threads in Dynflow pool.'
end
rescue ActiveRecord::ConnectionNotEstablished
# If in tests or in an environment where ActiveRecord doesn't have a
# real DB connection, we want to skip AR configuration altogether
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/telemetry.rb | lib/dynflow/telemetry.rb | # frozen_string_literal: true
require 'dynflow/telemetry_adapters/abstract'
require 'dynflow/telemetry_adapters/dummy'
require 'dynflow/telemetry_adapters/statsd'
module Dynflow
class Telemetry
class << self
attr_reader :instance
# Configures the adapter to use for telemetry
#
# @param [TelemetryAdapters::Abstract] adapter the adapter to use
def set_adapter(adapter)
@instance = adapter
end
# Passes the block into the current telemetry adapter's
# {TelemetryAdapters::Abstract#with_instance} method
def with_instance(&block)
@instance.with_instance(&block)
end
def measure(name, tags = {}, &block)
@instance.measure name, tags, &block
end
# Registers the metrics to be collected
# @return [void]
def register_metrics!
return if @registered
@registered = true
with_instance do |t|
# Worker related
t.add_gauge :dynflow_active_workers, 'The number of currently busy workers',
[:queue, :world]
t.add_counter :dynflow_worker_events, 'The number of processed events',
[:queue, :world, :worker]
# Execution plan related
t.add_gauge :dynflow_active_execution_plans, 'The number of active execution plans',
[:action, :world, :state]
t.add_gauge :dynflow_queue_size, 'Number of items in queue',
[:queue, :world]
t.add_counter :dynflow_finished_execution_plans, 'The number of execution plans',
[:action, :world, :result]
# Step related
# TODO: Configure buckets in a sane manner
t.add_histogram :dynflow_step_real_time, 'The time between the start end end of the step',
[:action, :phase]
t.add_histogram :dynflow_step_execution_time, 'The time spent executing a step',
[:action, :phase]
# Connector related
t.add_counter :dynflow_connector_envelopes, 'The number of envelopes handled by a connector',
[:world, :direction]
# Persistence related
t.add_histogram :dynflow_persistence, 'The time spent communicating with the database',
[:world, :method]
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/web.rb | lib/dynflow/web.rb | # frozen_string_literal: true
require 'dynflow'
require 'pp'
require 'sinatra/base'
require 'yaml'
module Dynflow
module Web
def self.setup(&block)
console = Sinatra.new(Web::Console) { instance_exec(&block) }
Rack::Builder.app do
run Rack::URLMap.new('/' => console)
end
end
def self.web_dir(sub_dir)
web_dir = File.join(File.expand_path('../../../web', __FILE__))
File.join(web_dir, sub_dir)
end
require 'dynflow/web/filtering_helpers'
require 'dynflow/web/world_helpers'
require 'dynflow/web/console_helpers'
require 'dynflow/web/console'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/semaphores.rb | lib/dynflow/semaphores.rb | # frozen_string_literal: true
module Dynflow
module Semaphores
require 'dynflow/semaphores/abstract'
require 'dynflow/semaphores/stateful'
require 'dynflow/semaphores/aggregating'
require 'dynflow/semaphores/dummy'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/connectors.rb | lib/dynflow/connectors.rb | # frozen_string_literal: true
module Dynflow
module Connectors
require 'dynflow/connectors/abstract'
require 'dynflow/connectors/direct'
require 'dynflow/connectors/database'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/serializers.rb | lib/dynflow/serializers.rb | # frozen_string_literal: true
module Dynflow
module Serializers
require 'dynflow/serializers/abstract'
require 'dynflow/serializers/noop'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/logger_adapters.rb | lib/dynflow/logger_adapters.rb | # frozen_string_literal: true
module Dynflow
module LoggerAdapters
require 'dynflow/logger_adapters/formatters'
require 'dynflow/logger_adapters/abstract'
require 'dynflow/logger_adapters/simple'
require 'dynflow/logger_adapters/delegator'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/dead_letter_silencer.rb | lib/dynflow/dead_letter_silencer.rb | # frozen_string_literal: true
module Dynflow
class DeadLetterSilencer < Concurrent::Actor::DefaultDeadLetterHandler
def initialize(matchers)
@matchers = Type! matchers, Array
end
def should_drop?(dead_letter)
@matchers.any? { |matcher| matcher.match? dead_letter }
end
def on_message(dead_letter)
super unless should_drop?(dead_letter)
end
private
class Matcher
Any = Algebrick.atom
def initialize(from, message = Any, to = Any)
@from = from
@message = message
@to = to
end
def match?(dead_letter)
return unless dead_letter.sender.respond_to?(:actor_class)
evaluate(dead_letter.sender.actor_class, @from) &&
evaluate(dead_letter.message, @message) &&
evaluate(dead_letter.address.actor_class, @to)
end
private
def evaluate(thing, condition)
case condition
when Any
true
when Proc
condition.call(thing)
else
condition == thing
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/transaction_adapters.rb | lib/dynflow/transaction_adapters.rb | # frozen_string_literal: true
module Dynflow
module TransactionAdapters
require 'dynflow/transaction_adapters/abstract'
require 'dynflow/transaction_adapters/none'
require 'dynflow/transaction_adapters/active_record'
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action.rb | lib/dynflow/action.rb | # frozen_string_literal: true
module Dynflow
# rubocop:disable Metrics/ClassLength
class Action < Serializable
OutputReference = ExecutionPlan::OutputReference
include Algebrick::TypeCheck
include Algebrick::Matching
require 'dynflow/action/format'
extend Action::Format
require 'dynflow/action/progress'
include Action::Progress
require 'dynflow/action/rescue'
include Action::Rescue
require 'dynflow/action/suspended'
require 'dynflow/action/missing'
require 'dynflow/action/polling'
require 'dynflow/action/cancellable'
require 'dynflow/action/singleton'
require 'dynflow/action/with_sub_plans'
require 'dynflow/action/with_bulk_sub_plans'
require 'dynflow/action/with_polling_sub_plans'
require 'dynflow/action/v2'
def self.all_children
children.values.inject(children.values) do |children, child|
children + child.all_children
end
end
def self.inherited(child)
children[child.name] = child
child.inherit_execution_plan_hooks(execution_plan_hooks.dup)
super child
end
def self.children
@children ||= {}
end
def self.middleware
@middleware ||= Middleware::Register.new
end
def self.execution_plan_hooks
@execution_plan_hooks ||= ExecutionPlan::Hooks::Register.new
end
def self.inherit_execution_plan_hooks(hooks)
@execution_plan_hooks = hooks
end
# FIND define subscriptions in world independent on action's classes,
# limited only by in/output formats
# @return [nil, Class] a child of Action
def self.subscribe
nil
end
ERROR = Object.new
SUSPEND = Object.new
Skip = Algebrick.atom
Phase = Algebrick.type do
Executable = type do
variants Plan = atom,
Run = atom,
Finalize = atom
end
variants Executable, Present = atom
end
module Executable
def execute_method_name
match self,
(on Plan, :execute_plan),
(on Run, :execute_run),
(on Finalize, :execute_finalize)
end
end
module Phase
def to_s_humanized
to_s.split('::').last
end
end
DelayedEvent = Algebrick.type do
fields! execution_plan_id: String,
step_id: Integer,
event: Object,
time: type { variants Time, NilClass },
optional: Algebrick::Types::Boolean
end
def self.constantize(action_name)
super action_name
rescue NameError
Action::Missing.generate(action_name)
end
attr_reader :world, :phase, :execution_plan_id, :id, :input,
:plan_step_id, :run_step_id, :finalize_step_id,
:caller_execution_plan_id, :caller_action_id,
:pending_output_chunks
middleware.use Action::Progress::Calculate
def initialize(attributes, world)
Type! attributes, Hash
@phase = Type! attributes.fetch(:phase), Phase
@world = Type! world, World
@step = Type! attributes.fetch(:step, nil), ExecutionPlan::Steps::Abstract, NilClass
raise ArgumentError, 'Step reference missing' if phase?(Executable) && @step.nil?
@execution_plan_id = Type! attributes.fetch(:execution_plan_id), String
@id = Type! attributes.fetch(:id), Integer
@plan_step_id = Type! attributes.fetch(:plan_step_id), Integer
@run_step_id = Type! attributes.fetch(:run_step_id), Integer, NilClass
@finalize_step_id = Type! attributes.fetch(:finalize_step_id), Integer, NilClass
@execution_plan = Type!(attributes.fetch(:execution_plan), ExecutionPlan) if phase? Present
@caller_execution_plan_id = Type!(attributes.fetch(:caller_execution_plan_id, nil), String, NilClass)
@caller_action_id = Type!(attributes.fetch(:caller_action_id, nil), Integer, NilClass)
getter = ->key, required do
required ? attributes.fetch(key) : attributes.fetch(key, {})
end
@input = OutputReference.deserialize getter.(:input, phase?(Run, Finalize, Present))
@output = OutputReference.deserialize getter.(:output, false) if phase? Run, Finalize, Present
@pending_output_chunks = [] if phase? Run, Finalize
end
def phase?(*phases)
Match? phase, *phases
end
def phase!(*phases)
phase?(*phases) or
raise TypeError, "Wrong phase #{phase}, required #{phases}"
end
def label
self.class.name
end
def input=(hash)
Type! hash, Hash
phase! Plan
@input = Utils.indifferent_hash(hash)
end
def output=(hash)
Type! hash, Hash
phase! Run
@output = Utils.indifferent_hash(hash)
end
def output
if phase? Plan
@output_reference or
raise 'plan_self has to be invoked before being able to reference the output'
else
@output
end
end
def output_chunk(chunk, kind: nil, timestamp: Time.now)
@pending_output_chunks << { chunk: chunk, kind: kind, timestamp: timestamp }
end
def stored_output_chunks
@output_chunks ||= world.persistence.load_output_chunks(@execution_plan_id, @id)
end
def drop_output_chunks!
@pending_output_chunks = []
@output_chunks = []
world.persistence.delete_output_chunks(@execution_plan_id, @id)
end
def caller_action
phase! Present
return nil if @caller_action_id
return @caller_action if @caller_action
caller_execution_plan = if @caller_execution_plan_id.nil?
execution_plan
else
world.persistence.load_execution_plan(@caller_execution_plan_id)
end
@caller_action = world.persistence.load_action_for_presentation(caller_execution_plan, @caller_action_id)
end
def set_plan_context(execution_plan, triggering_action, from_subscription)
phase! Plan
@execution_plan = Type! execution_plan, ExecutionPlan
@triggering_action = Type! triggering_action, Action, NilClass
@from_subscription = Type! from_subscription, TrueClass, FalseClass
end
# action that caused this action to be planned. Available only in planning phase
def triggering_action
phase! Plan
@triggering_action
end
def from_subscription?
phase! Plan
@from_subscription
end
def execution_plan
phase! Plan, Present
@execution_plan
end
def action_logger
world.action_logger
end
def plan_step
phase! Present
execution_plan.steps.fetch(plan_step_id)
end
# @param [Class] filter_class return only actions which are kind of `filter_class`
# @return [Array<Action>] of directly planned actions by this action,
# returned actions are in Present phase
def planned_actions(filter = Action)
phase! Present
plan_step
.planned_steps(execution_plan)
.map { |s| s.action(execution_plan) }
.select { |a| a.is_a?(filter) }
end
# @param [Class] filter_class return only actions which are kind of `filter_class`
# @return [Array<Action>] of all (including indirectly) planned actions by this action,
# returned actions are in Present phase
def all_planned_actions(filter_class = Action)
phase! Present
mine = planned_actions
(mine + mine.reduce([]) { |arr, action| arr + action.all_planned_actions })
.select { |a| a.is_a?(filter_class) }
end
def run_step
phase! Present
execution_plan.steps.fetch(run_step_id) if run_step_id
end
def finalize_step
phase! Present
execution_plan.steps.fetch(finalize_step_id) if finalize_step_id
end
def steps
[plan_step, run_step, finalize_step]
end
def to_hash
recursive_to_hash(
{ class: self.class.name,
execution_plan_id: execution_plan_id,
id: id,
plan_step_id: plan_step_id,
run_step_id: run_step_id,
finalize_step_id: finalize_step_id,
caller_execution_plan_id: caller_execution_plan_id,
caller_action_id: caller_action_id,
input: input },
if phase? Run, Finalize, Present
{ output: output }
end
)
end
def state
raise "state data not available" if @step.nil?
@step.state
end
# @override to define more descriptive state information for the
# action: used in Dynflow console
def humanized_state
state.to_s
end
def error
raise "error data not available" if @step.nil?
@step.error
end
def execute(*args)
phase! Executable
self.send phase.execute_method_name, *args
end
# @api private
# @return [Array<Integer>] - ids of steps referenced from action
def required_step_ids(input = self.input)
results = []
recursion = ->value do
case value
when Hash
value.values.each { |v| recursion.(v) }
when Array
value.each { |v| recursion.(v) }
when ExecutionPlan::OutputReference
results << value.step_id
else
# no reference hidden in this arg
end
results
end
recursion.(input)
end
def execute_delay(delay_options, *args)
with_error_handling(true) do
world.middleware.execute(:delay, self, delay_options, *args) do |*new_args|
@serializer = delay(*new_args).tap do |serializer|
serializer.perform_serialization!
end
end
end
end
def serializer
raise "The action must be delayed in order to access the serializer" if @serializer.nil?
@serializer
end
def holds_singleton_lock?
false
end
# @override define what pool should the action be run in. The
# queue defined here will also be used as the default queue for
# all the steps planned under this action, unless overrided by sub-action
def queue
end
# Plan an +event+ to be send to the action defined by +action+, what defaults to be self.
# if +time+ is not passed, event is sent as soon as possible.
def plan_event(event, time = nil, execution_plan_id: self.execution_plan_id, step_id: self.run_step_id, optional: false)
time = @world.clock.current_time + time if time.is_a?(Numeric)
delayed_events << DelayedEvent[execution_plan_id, step_id, event, time, optional]
end
def delayed_events
@delayed_events ||= []
end
protected
def state=(state)
phase! Executable
@world.logger.debug format('%13s %s:%2d %9s >> %9s in phase %8s %s',
'Step', execution_plan_id, @step.id,
self.state, state,
phase.to_s_humanized, self.class)
@step.state = state
end
# If this save returns an integer, it means it was an update. The number
# represents the number of updated records. If it is 0, then the step was in
# an unexpected state and couldn't be updated
def save_state(conditions = {})
phase! Executable
@step.save(conditions)
end
def delay(delay_options, *args)
Serializers::Noop.new(args)
end
# @override to implement the action's *Plan phase* behaviour.
# By default it plans itself and expects input-hash.
# Use #plan_self and #plan_action methods to plan actions.
# It can use DB in this phase.
def plan(*args)
if from_subscription?
# if the action is triggered by subscription, by default use the
# input of parent action.
# should be replaced by referencing the input from input format
plan_self(input.merge(triggering_action.input))
else
# in this case, the action was triggered by plan_action. Use
# the argument specified there.
plan_self(*args)
end
self
end
# Add this method to implement the action's *Run phase* behaviour.
# It should not use DB in this phase.
def run(event = nil)
# just a rdoc placeholder
end
remove_method :run
# Add this method to implement the action's *Finalize phase* behaviour.
# It can use DB in this phase.
def finalize
# just a rdoc placeholder
end
remove_method :finalize
def run_accepts_events?
method(:run).arity != 0
end
def self.new_from_hash(hash, world)
hash.delete(:output) if hash[:output].nil?
unless hash[:execution_plan_uuid].nil?
hash[:execution_plan_id] = hash[:execution_plan_uuid]
end
new(hash, world)
end
private
# DSL for plan phase
def concurrence(&block)
phase! Plan
@execution_plan.switch_flow(Flows::Concurrence.new([]), &block)
end
def sequence(&block)
phase! Plan
@execution_plan.switch_flow(Flows::Sequence.new([]), &block)
end
def plan_self(input = {})
phase! Plan
self.input.update input
if self.respond_to?(:run)
run_step = @execution_plan.add_run_step(self)
@run_step_id = run_step.id
@output_reference = OutputReference.new(@execution_plan.id, run_step.id, id)
end
if self.respond_to?(:finalize)
finalize_step = @execution_plan.add_finalize_step(self)
@finalize_step_id = finalize_step.id
end
return self # to stay consistent with plan_action
end
def plan_action(action_class, *args)
phase! Plan
@execution_plan.add_plan_step(action_class, self).execute(@execution_plan, self, false, *args)
end
# DSL for run phase
def suspended_action
phase! Run
@suspended_action ||= Action::Suspended.new(self)
end
def suspend(&block)
phase! Run
block.call suspended_action if block
throw SUSPEND, SUSPEND
end
# DSL to terminate action execution and set it to error
def error!(error)
phase! Executable
set_error(error)
throw ERROR
end
def with_error_handling(propagate_error = nil, &block)
raise "wrong state #{self.state}" unless [:scheduling, :skipping, :running].include?(self.state)
begin
catch(ERROR) { block.call }
rescue Exception => error
set_error(error)
# reraise low-level exceptions
raise error unless Type? error, StandardError, ScriptError
end
case self.state
when :scheduling
self.state = :pending
when :running
self.state = :success
when :skipping
self.state = :skipped
when :suspended, :error
# Do nothing
else
raise "wrong state #{self.state}"
end
if propagate_error && self.state == :error
raise(@step.error.exception)
end
end
def set_error(error)
phase! Executable
Type! error, Exception, String
action_logger.error error
self.state = :error
@step.error = ExecutionPlan::Steps::Error.new(error)
end
def execute_plan(*args)
phase! Plan
self.state = :running
save_state
# when the error occurred inside the planning, catch that
# before getting out of the planning phase
with_error_handling(!root_action?) do
concurrence do
world.middleware.execute(:plan, self, *args) do |*new_args|
plan(*new_args)
end
end
subscribed_actions = world.subscribed_actions(self.class)
if subscribed_actions.any?
# we encapsulate the flow for this action into a concurrence and
# add the subscribed flows to it as well.
trigger_flow = @execution_plan.current_run_flow.sub_flows.pop
@execution_plan.switch_flow(Flows::Concurrence.new([trigger_flow].compact)) do
subscribed_actions.each do |action_class|
new_plan_step = @execution_plan.add_plan_step(action_class, self)
new_plan_step.execute(@execution_plan, self, true, *args)
end
end
end
check_serializable :input
end
end
# TODO: This is getting out of hand, refactoring needed
def execute_run(event)
phase! Run
@world.logger.debug format('%13s %s:%2d got event %s',
'Step', execution_plan_id, @step.id, event) if event
case
when state == :running
raise NotImplementedError, 'recovery after restart is not implemented'
when [:pending, :error, :skipping, :suspended].include?(state)
if event && state != :suspended
raise 'event can be processed only when in suspended state'
end
old_state = self.state
self.state = :running unless self.state == :skipping
saved = save_state(:state => %w(pending error skipping suspended))
if saved.kind_of?(Integer) && !saved.positive?
# The step was already in a state we're trying to transition to, most
# likely we were about to execute it for the second time after first
# execution was forcefully interrupted.
# Set error and return to prevent the step from being executed twice
set_error "Could not transition step from #{old_state} to #{self.state}, step already in #{self.state}."
return
end
@input = OutputReference.dereference @input, world.persistence
with_error_handling do
event = Skip if state == :skipping
# we run the Skip event only when the run accepts events
if event != Skip || run_accepts_events?
result = catch(SUSPEND) do
world.middleware.execute(:run, self, *[event].compact) do |*args|
run(*args)
end
end
self.state = :suspended if result == SUSPEND
end
check_serializable :output
end
else
raise "wrong state #{state} when event:#{event}"
end
end
def execute_finalize
phase! Finalize
@input = OutputReference.dereference @input, world.persistence
self.state = :running
save_state
with_error_handling do
world.middleware.execute(:finalize, self) do
finalize
end
end
end
def check_serializable(what)
Match! what, :input, :output
value = send what
recursive_to_hash value # it raises when not serializable
rescue => e
value.replace not_serializable: true
raise e
end
def root_action?
# in planning phase, the @triggered_action can be used to check whether the is root (the main action used
# to create the execution plan).
# For post-planning phases, the action is in root when either:
# - the @caller_action_id is not set OR
# - the @caller_action_id is set but the @caller_execution_plan_id is set as well
# which means, the @caller_action_id is actually referencing different execution plan
# and this action is creating a new execution plan, that's tracked as sub-plan
# for the @caller_execution_plan_id
@triggering_action.nil? && (@caller_action_id.nil? || @caller_execution_plan_id)
end
# An action must be a singleton and have a singleton lock
def self.singleton_locked?(world)
if self.ancestors.include? ::Dynflow::Action::Singleton
lock_class = ::Dynflow::Coordinator::SingletonActionLock
world.coordinator.find_locks(lock_class.unique_filter(self.name)).any?
else
false
end
end
end
# rubocop:enable Metrics/ClassLength
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/execution_history.rb | lib/dynflow/execution_history.rb | # frozen_string_literal: true
module Dynflow
class ExecutionHistory
include Algebrick::TypeCheck
include Enumerable
Event = Algebrick.type do
fields! time: Integer,
name: String,
world_id: type { variants String, NilClass }
end
module Event
def inspect
["#{Time.at(time).utc}: #{name}", world_id].compact.join(' @ ')
end
end
attr_reader :events
def initialize(events = [])
@events = (events || []).each { |e| Type! e, Event }
end
def each(&block)
@events.each(&block)
end
def add(name, world_id = nil)
@events << Event[Time.now.to_i, name, world_id]
end
def to_hash
@events.map(&:to_hash)
end
def inspect
"ExecutionHistory: #{@events.inspect}"
end
def self.new_from_hash(value)
value ||= [] # for compatibility with tasks before the
# introduction of execution history
self.new(value.map { |hash| Event[hash] })
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/stateful.rb | lib/dynflow/stateful.rb | # frozen_string_literal: true
module Dynflow
module Stateful
def self.included(base)
base.extend ClassMethods
end
module ClassMethods
def states
raise NotImplementedError
end
def state_transitions
raise NotImplementedError
end
end
def states
self.class.states
end
def state_transitions
self.class.state_transitions
end
attr_reader :state
def state=(state)
set_state state, false
end
def set_state(state, skip_transition_check)
state = state.to_sym if state.is_a?(String) && states.map(&:to_s).include?(state)
raise "unknown state #{state}" unless states.include? state
unless self.state.nil? || skip_transition_check || state_transitions.fetch(self.state).include?(state)
raise "invalid state transition #{self.state} >> #{state} in #{self}"
end
@state = state
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/testing.rb | lib/dynflow/testing.rb | # frozen_string_literal: true
module Dynflow
module Testing
extend Algebrick::TypeCheck
def self.logger_adapter
@logger_adapter || LoggerAdapters::Simple.new('test.log', 1)
end
def self.logger_adapter=(adapter)
Type! adapter, LoggerAdapters::Abstract
@logger_adapter = adapter
end
def self.get_id
@last_id ||= 0
@last_id += 1
end
require 'dynflow/testing/mimic'
require 'dynflow/testing/managed_clock'
require 'dynflow/testing/dummy_coordinator'
require 'dynflow/testing/dummy_world'
require 'dynflow/testing/dummy_executor'
require 'dynflow/testing/dummy_execution_plan'
require 'dynflow/testing/dummy_step'
require 'dynflow/testing/dummy_planned_action'
require 'dynflow/testing/in_thread_executor'
require 'dynflow/testing/in_thread_world'
require 'dynflow/testing/assertions'
require 'dynflow/testing/factories'
include Assertions
include Factories
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/watchers/memory_consumption_watcher.rb | lib/dynflow/watchers/memory_consumption_watcher.rb | # frozen_string_literal: true
require 'get_process_mem'
module Dynflow
module Watchers
class MemoryConsumptionWatcher
attr_reader :memory_limit, :world
def initialize(world, memory_limit, options)
@memory_limit = memory_limit
@world = world
@polling_interval = options[:polling_interval] || 60
@memory_info_provider = options[:memory_info_provider] || GetProcessMem.new
@memory_checked_callback = options[:memory_checked_callback]
@memory_limit_exceeded_callback = options[:memory_limit_exceeded_callback]
set_timer options[:initial_wait] || @polling_interval
end
def check_memory_state
current_memory = @memory_info_provider.bytes
if current_memory > @memory_limit
@memory_limit_exceeded_callback.call(current_memory, @memory_limit) if @memory_limit_exceeded_callback
# terminate the world and stop polling
world.terminate
else
@memory_checked_callback.call(current_memory, @memory_limit) if @memory_checked_callback
# memory is under the limit - keep waiting
set_timer
end
end
def set_timer(interval = @polling_interval)
@world.clock.ping(self, interval, nil, :check_memory_state)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/serializers/noop.rb | lib/dynflow/serializers/noop.rb | # frozen_string_literal: true
module Dynflow
module Serializers
class Noop < Abstract
def serialize(arg)
arg
end
def deserialize(arg)
arg
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/serializers/abstract.rb | lib/dynflow/serializers/abstract.rb | # frozen_string_literal: true
module Dynflow
module Serializers
# @abstract
# Used to serialize and deserialize arguments for storage in a database.
# Used by {DelayedPlan} to store arguments which should be passed into
# the {Dynflow::Action}'s #plan method when the plan is executed.
class Abstract
attr_reader :args, :serialized_args
# @param args [Array] arguments to be serialized
# @param serialized_args [nil, Array] arguments in their serialized form
def initialize(args, serialized_args = nil)
@args = args
@serialized_args = serialized_args
end
# Retrieves the arguments
#
# @raise [RuntimeError] if the deserialized arguments are not available
# @return [Array] the arguments
def args!
raise "@args not set" if @args.nil?
return @args
end
# Retrieves the arguments in the serialized form
#
# @raise [RuntimeError] if the serialized arguments are not available
# @return [Array] the serialized arguments
def serialized_args!
raise "@serialized_args not set" if @serialized_args.nil?
return @serialized_args
end
# Converts arguments into their serialized form, iterates over deserialized
# arguments, applying {#serialize} to each of them
#
# @raise [RuntimeError] if the deserialized arguments are not available
# @return [Array] the serialized arguments
def perform_serialization!
@serialized_args = args!.map { |arg| serialize arg }
end
# Converts arguments into their deserialized form, iterates over serialized
# arguments, applying {#deserialize} to each of them
#
# @raise [RuntimeError] if the serialized arguments are not available
# @return [Array] the deserialized arguments
def perform_deserialization!
@args = serialized_args!.map { |arg| deserialize arg }
end
# Converts an argument into it serialized form
#
# @param arg the argument to be serialized
def serialize(arg)
raise NotImplementedError
end
# Converts a serialized argument into its deserialized form
#
# @param arg the argument to be deserialized
def deserialize(arg)
raise NotImplementedError
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel.rb | lib/dynflow/persistence_adapters/sequel.rb | # frozen_string_literal: true
require 'sequel'
require 'msgpack'
require 'fileutils'
require 'csv'
# rubocop:disable Metrics/ClassLength
module Dynflow
module PersistenceAdapters
Sequel.extension :migration
Sequel.database_timezone = :utc
class Sequel < Abstract
include Algebrick::TypeCheck
include Algebrick::Matching
MAX_RETRIES = 10
RETRY_DELAY = 1
attr_reader :db
def pagination?
true
end
def filtering_by
META_DATA.fetch :execution_plan
end
def ordering_by
META_DATA.fetch :execution_plan
end
META_DATA = { execution_plan: %w(label state result started_at ended_at real_time execution_time root_plan_step_id class),
action: %w(caller_execution_plan_id caller_action_id class plan_step_id run_step_id finalize_step_id),
step: %w(state started_at ended_at real_time execution_time action_id progress_done progress_weight
class action_class execution_plan_uuid queue),
envelope: %w(receiver_id),
coordinator_record: %w(id owner_id class),
delayed: %w(execution_plan_uuid start_at start_before args_serializer frozen),
output_chunk: %w(execution_plan_uuid action_id kind timestamp),
execution_plan_dependency: %w(execution_plan_uuid blocked_by_uuid) }
SERIALIZABLE_COLUMNS = { action: %w(input output),
delayed: %w(serialized_args),
execution_plan: %w(run_flow finalize_flow execution_history step_ids),
step: %w(error children),
output_chunk: %w(chunk) }
def initialize(config)
migrate = true
config = config.dup
@additional_responsibilities = { coordinator: true, connector: true }
if config.is_a?(Hash)
@additional_responsibilities.merge!(config.delete(:additional_responsibilities)) if config.key?(:additional_responsibilities)
migrate = config.fetch(:migrate, true)
end
@db = initialize_db config
migrate_db if migrate
end
def transaction(&block)
db.transaction(&block)
end
def find_execution_plans(options = {})
table_name = :execution_plan
options[:order_by] ||= :started_at
data_set = filter(table_name,
order(table_name,
paginate(table(table_name), options),
options),
options[:filters])
records = with_retry { data_set.all }
records.map { |record| execution_plan_column_map(load_data(record, table_name)) }
end
def find_execution_plan_counts(options = {})
with_retry { filter(:execution_plan, table(:execution_plan), options[:filters]).count }
end
def find_execution_plan_counts_after(timestamp, options = {})
with_retry { filter(:execution_plan, table(:execution_plan), options[:filters]).filter(::Sequel.lit('ended_at >= ?', timestamp)).count }
end
def find_execution_plan_statuses(options)
plans = with_retry do
filter(:execution_plan, table(:execution_plan), options[:filters])
.select(:uuid, :state, :result)
end
plans.each_with_object({}) do |current, acc|
uuid = current.delete(:uuid)
acc[uuid] = current
end
end
def delete_execution_plans(filters, batch_size = 1000, backup_dir = nil)
count = 0
with_retry do
filter(:execution_plan, table(:execution_plan), filters).each_slice(batch_size) do |plans|
uuids = plans.map { |p| p.fetch(:uuid) }
@db.transaction do
table(:delayed).where(execution_plan_uuid: uuids).delete
steps = table(:step).where(execution_plan_uuid: uuids)
backup_to_csv(:step, steps, backup_dir, 'steps.csv') if backup_dir
steps.delete
output_chunks = table(:output_chunk).where(execution_plan_uuid: uuids).delete
actions = table(:action).where(execution_plan_uuid: uuids)
backup_to_csv(:action, actions, backup_dir, 'actions.csv') if backup_dir
actions.delete
execution_plans = table(:execution_plan).where(uuid: uuids)
backup_to_csv(:execution_plan, execution_plans, backup_dir, 'execution_plans.csv') if backup_dir
count += execution_plans.delete
end
end
return count
end
end
def load_execution_plan(execution_plan_id)
execution_plan_column_map(load :execution_plan, uuid: execution_plan_id)
end
def save_execution_plan(execution_plan_id, value)
save :execution_plan, { uuid: execution_plan_id }, value, with_data: false
end
def delete_delayed_plans(filters, batch_size = 1000)
count = 0
with_retry do
filter(:delayed, table(:delayed), filters).each_slice(batch_size) do |plans|
uuids = plans.map { |p| p.fetch(:execution_plan_uuid) }
@db.transaction do
count += table(:delayed).where(execution_plan_uuid: uuids).delete
end
end
end
count
end
def find_old_execution_plans(age)
table_name = :execution_plan
records = with_retry do
table(table_name)
.where(::Sequel.lit('ended_at <= ? AND state = ?', age, 'stopped'))
.all
end
records.map { |plan| execution_plan_column_map(load_data plan, table_name) }
end
def find_execution_plan_dependencies(execution_plan_id)
table(:execution_plan_dependency)
.where(execution_plan_uuid: execution_plan_id)
.select_map(:blocked_by_uuid)
end
def find_blocked_execution_plans(execution_plan_id)
table(:execution_plan_dependency)
.where(blocked_by_uuid: execution_plan_id)
.select_map(:execution_plan_uuid)
end
def find_ready_delayed_plans(time)
table_name = :delayed
# Subquery to find delayed plans that have at least one non-stopped dependency
plans_with_unfinished_deps = table(:execution_plan_dependency)
.join(TABLES[:execution_plan], uuid: :blocked_by_uuid)
.where(::Sequel.~(state: 'stopped'))
.select(:execution_plan_uuid)
records = with_retry do
table(table_name)
.where(::Sequel.lit('start_at IS NULL OR (start_at <= ? OR (start_before IS NOT NULL AND start_before <= ?))', time, time))
.where(:frozen => false)
.exclude(execution_plan_uuid: plans_with_unfinished_deps)
.order_by(:start_at)
.all
end
records.map { |plan| load_data(plan, table_name) }
end
def load_delayed_plan(execution_plan_id)
load :delayed, execution_plan_uuid: execution_plan_id
rescue KeyError
return nil
end
def save_delayed_plan(execution_plan_id, value)
save :delayed, { execution_plan_uuid: execution_plan_id }, value, with_data: false
end
def chain_execution_plan(first, second)
save :execution_plan_dependency, {}, { execution_plan_uuid: second, blocked_by_uuid: first }, with_data: false
end
def load_step(execution_plan_id, step_id)
load :step, execution_plan_uuid: execution_plan_id, id: step_id
end
def load_steps(execution_plan_id)
load_records :step, execution_plan_uuid: execution_plan_id
end
def save_step(execution_plan_id, step_id, value, update_conditions = {})
save :step, { execution_plan_uuid: execution_plan_id, id: step_id }, value,
with_data: false, update_conditions: update_conditions
end
def load_action(execution_plan_id, action_id)
load :action, execution_plan_uuid: execution_plan_id, id: action_id
end
def load_actions(execution_plan_id, action_ids)
load_records :action, { execution_plan_uuid: execution_plan_id, id: action_ids }
end
def load_actions_attributes(execution_plan_id, attributes)
load_records :action, { execution_plan_uuid: execution_plan_id }, attributes
end
def save_action(execution_plan_id, action_id, value)
save :action, { execution_plan_uuid: execution_plan_id, id: action_id }, value, with_data: false
end
def save_output_chunks(execution_plan_id, action_id, chunks)
chunks.each do |chunk|
chunk[:execution_plan_uuid] = execution_plan_id
chunk[:action_id] = action_id
save :output_chunk, {}, chunk, with_data: false
end
end
def load_output_chunks(execution_plan_id, action_id)
load_records :output_chunk, { execution_plan_uuid: execution_plan_id, action_id: action_id }, [:timestamp, :kind, :chunk]
end
def delete_output_chunks(execution_plan_id, action_id)
with_retry do
filter(:output_chunk, table(:output_chunk), { execution_plan_uuid: execution_plan_id, action_id: action_id }).delete
end
end
def connector_feature!
unless @additional_responsibilities[:connector]
raise "The sequel persistence adapter connector feature used but not enabled in additional_features"
end
end
def save_envelope(data)
connector_feature!
save :envelope, {}, data
end
def pull_envelopes(receiver_id)
connector_feature!
with_retry do
db.transaction do
data_set = table(:envelope).where(receiver_id: receiver_id).all
envelopes = data_set.map { |record| load_data(record) }
table(:envelope).where(id: data_set.map { |d| d[:id] }).delete
return envelopes
end
end
end
def push_envelope(envelope)
connector_feature!
with_retry { table(:envelope).insert(prepare_record(:envelope, envelope)) }
end
def prune_envelopes(receiver_ids)
connector_feature!
with_retry { table(:envelope).where(receiver_id: receiver_ids).delete }
end
def prune_undeliverable_envelopes
connector_feature!
with_retry { table(:envelope).where(receiver_id: table(:coordinator_record).select(:id)).invert.delete }
end
def coordinator_feature!
unless @additional_responsibilities[:coordinator]
raise "The sequel persistence adapter coordinator feature used but not enabled in additional_features"
end
end
def insert_coordinator_record(value)
coordinator_feature!
save :coordinator_record, {}, value
end
def update_coordinator_record(class_name, record_id, value)
coordinator_feature!
save :coordinator_record, { class: class_name, :id => record_id }, value
end
def delete_coordinator_record(class_name, record_id)
coordinator_feature!
with_retry { table(:coordinator_record).where(class: class_name, id: record_id).delete }
end
def find_coordinator_records(options)
coordinator_feature!
options = options.dup
filters = (options[:filters] || {}).dup
exclude_owner_id = filters.delete(:exclude_owner_id)
data_set = filter(:coordinator_record, table(:coordinator_record), filters)
if exclude_owner_id
data_set = data_set.exclude(:owner_id => exclude_owner_id)
end
with_retry do
data_set.all.map { |record| load_data(record) }
end
end
def to_hash
{ execution_plans: table(:execution_plan).all.to_a,
steps: table(:step).all.to_a,
actions: table(:action).all.to_a,
envelopes: table(:envelope).all.to_a }
end
def migrate_db
::Sequel::Migrator.run(db, self.class.migrations_path, table: 'dynflow_schema_info')
end
def abort_if_pending_migrations!
::Sequel::Migrator.check_current(db, self.class.migrations_path, table: 'dynflow_schema_info')
end
private
TABLES = { execution_plan: :dynflow_execution_plans,
action: :dynflow_actions,
step: :dynflow_steps,
envelope: :dynflow_envelopes,
coordinator_record: :dynflow_coordinator_records,
delayed: :dynflow_delayed_plans,
output_chunk: :dynflow_output_chunks,
execution_plan_dependency: :dynflow_execution_plan_dependencies }
def table(which)
db[TABLES.fetch(which)]
end
def initialize_db(db_path)
logger = Logger.new($stderr) if ENV['DYNFLOW_SQL_LOG']
::Sequel.connect db_path, logger: logger
end
def self.migrations_path
File.expand_path('../sequel_migrations', __FILE__)
end
def prepare_record(table_name, value, base = {}, with_data = true)
record = base.dup
has_data_column = table(table_name).columns.include?(:data)
if with_data && has_data_column
record[:data] = dump_data(value)
else
if has_data_column
record[:data] = nil
else
record.delete(:data)
end
record.merge! serialize_columns(table_name, value)
end
record.merge! extract_metadata(table_name, value)
record.each { |k, v| record[k] = v.to_s if v.is_a? Symbol }
record
end
def serialize_columns(table_name, record)
record.reduce({}) do |acc, (key, value)|
if SERIALIZABLE_COLUMNS.fetch(table_name, []).include?(key.to_s)
acc.merge(key.to_sym => dump_data(value))
else
acc
end
end
end
def save(what, condition, value, with_data: true, update_conditions: {})
table = table(what)
existing_record = with_retry { table.first condition } unless condition.empty?
if value
record = prepare_record(what, value, (existing_record || condition), with_data)
if existing_record
record = prune_unchanged(what, existing_record, record)
return value if record.empty?
condition = update_conditions.merge(condition)
return with_retry { table.where(condition).update(record) }
else
with_retry { table.insert record }
end
else
existing_record and with_retry { table.where(condition).delete }
end
value
end
def load_record(what, condition)
table = table(what)
if (record = with_retry { table.first(Utils.symbolize_keys(condition)) })
load_data(record, what)
else
raise KeyError, "searching: #{what} by: #{condition.inspect}"
end
end
def prune_unchanged(what, object, record)
record = record.dup
table(what).columns.each do |column|
record.delete(column) if object[column] == record[column]
end
record
end
alias_method :load, :load_record
def load_records(what, condition, keys = nil)
table = table(what)
records = with_retry do
filtered = table.filter(Utils.symbolize_keys(condition))
# Filter out requested columns which the table doesn't have, load data just in case
unless keys.nil?
columns = table.columns & keys
columns |= [:data] if table.columns.include?(:data)
filtered = filtered.select(*columns)
end
filtered.all
end
records = records.map { |record| load_data(record, what) }
return records if keys.nil?
records.map do |record|
keys.reduce({}) do |acc, key|
acc.merge(key => record[key])
end
end
end
def load_data(record, what = nil)
hash = if record[:data].nil?
SERIALIZABLE_COLUMNS.fetch(what, []).each do |key|
key = key.to_sym
record[key] = MessagePack.unpack(record[key].to_s) unless record[key].nil?
end
record
else
MessagePack.unpack(record[:data].to_s)
end
Utils.indifferent_hash(hash)
end
def ensure_backup_dir(backup_dir)
FileUtils.mkdir_p(backup_dir) unless File.directory?(backup_dir)
end
def backup_to_csv(table_name, dataset, backup_dir, file_name)
ensure_backup_dir(backup_dir)
csv_file = File.join(backup_dir, file_name)
appending = File.exist?(csv_file)
columns = dataset.columns
File.open(csv_file, 'a') do |csv|
csv << columns.to_csv unless appending
dataset.each do |row|
values = columns.map do |col|
value = row[col]
value = value.unpack('H*').first if value && SERIALIZABLE_COLUMNS.fetch(table_name, []).include?(col.to_s)
value
end
csv << values.to_csv
end
end
dataset
end
def delete(what, condition)
with_retry { table(what).where(Utils.symbolize_keys(condition)).delete }
end
def extract_metadata(what, value)
meta_keys = META_DATA.fetch(what) - SERIALIZABLE_COLUMNS.fetch(what, [])
value = Utils.indifferent_hash(value)
meta_keys.inject({}) { |h, k| h.update k.to_sym => value[k] }
end
def dump_data(value)
return if value.nil?
packed = MessagePack.pack(Type!(value, Hash, Array, Integer, String))
::Sequel.blob(packed)
end
def paginate(data_set, options)
page = Integer(options[:page]) if options[:page]
per_page = Integer(options[:per_page]) if options[:per_page]
if page
raise ArgumentError, "page specified without per_page attribute" unless per_page
data_set.limit per_page, per_page * page
else
data_set
end
end
def order(what, data_set, options)
order_by = (options[:order_by]).to_s
return data_set if order_by.empty?
unless META_DATA.fetch(what).include? order_by
raise ArgumentError, "unknown column #{order_by.inspect}"
end
order_by = order_by.to_sym
data_set.order_by options[:desc] ? ::Sequel.desc(order_by) : order_by
end
def filter(what, data_set, filters)
Type! filters, NilClass, Hash
return data_set if filters.nil?
filters = filters.each.with_object({}) { |(k, v), hash| hash[k.to_s] = v }
unknown = filters.keys - META_DATA.fetch(what)
if what == :execution_plan
unknown -= %w[uuid caller_execution_plan_id caller_action_id delayed]
if filters.key?('caller_action_id') && !filters.key?('caller_execution_plan_id')
raise ArgumentError, "caller_action_id given but caller_execution_plan_id missing"
end
if filters.key?('caller_execution_plan_id')
data_set = data_set.join_table(:inner, TABLES[:action], :execution_plan_uuid => :uuid)
.select_all(TABLES[:execution_plan]).distinct
end
if filters.key?('delayed')
filters.delete('delayed')
data_set = data_set.join_table(:inner, TABLES[:delayed], :execution_plan_uuid => :uuid)
.select_all(TABLES[:execution_plan]).distinct
end
end
unless unknown.empty?
raise ArgumentError, "unkown columns: #{unknown.inspect}"
end
data_set.where Utils.symbolize_keys(filters)
end
def with_retry
attempts = 0
begin
yield
rescue ::Sequel::DatabaseConnectionError, ::Sequel::DatabaseDisconnectError => e
attempts += 1
log(:error, e)
if attempts > MAX_RETRIES
log(:error, "The number of MAX_RETRIES exceeded")
raise Errors::FatalPersistenceError.delegate(e)
else
log(:error, "Persistence retry no. #{attempts}")
sleep RETRY_DELAY
retry
end
rescue Exception => e
raise Errors::PersistenceError.delegate(e)
end
end
def execution_plan_column_map(plan)
plan[:id] = plan[:uuid] unless plan[:uuid].nil?
plan
end
end
end
end
# rubocop:enable Metrics/ClassLength
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/abstract.rb | lib/dynflow/persistence_adapters/abstract.rb | # frozen_string_literal: true
module Dynflow
module PersistenceAdapters
class Abstract
# The logger is set by the world when used inside it
attr_accessor :logger
def register_world(world)
@logger ||= world.logger
end
def log(level, message)
logger.send(level, message) if logger
end
def pagination?
false
end
def transaction
raise NotImplementedError
end
def filtering_by
[]
end
def ordering_by
[]
end
# @option options [Integer] page index of the page (starting at 0)
# @option options [Integer] per_page the number of the items on page
# @option options [Symbol] order_by name of the column to use for ordering
# @option options [true, false] desc set to true if order should be descending
# @option options [Hash{ String => Object,Array<object> }] filters hash represents
# set of allowed values for a given key representing column
def find_execution_plans(options = {})
raise NotImplementedError
end
# @option options [Hash{ String => Object,Array<object> }] filters hash represents
# set of allowed values for a given key representing column
def find_execution_plan_counts(options = {})
filter(:execution_plan, options[:filters]).count
end
def find_execution_plan_counts_after(timestamp, options = {})
raise NotImplementedError
end
def find_execution_plan_statuses(options)
raise NotImplementedError
end
# @param filters [Hash{ String => Object }] filters to determine
# what to delete
# @param batch_size the size of the chunks to iterate over when
# performing the deletion
# @param backup_dir where the backup of deleted plans will be created.
# Set to nil for no backup
def delete_execution_plans(filters, batch_size = 1000, backup_dir = nil)
raise NotImplementedError
end
def load_execution_plan(execution_plan_id)
raise NotImplementedError
end
def save_execution_plan(execution_plan_id, value)
raise NotImplementedError
end
def find_execution_plan_dependencies(execution_plan_id)
raise NotImplementedError
end
def find_blocked_execution_plans(execution_plan_id)
raise NotImplementedError
end
def find_ready_delayed_plans(options = {})
raise NotImplementedError
end
def delete_delayed_plans(filters, batch_size = 1000)
raise NotImplementedError
end
def load_delayed_plan(execution_plan_id)
raise NotImplementedError
end
def save_delayed_plan(execution_plan_id, value)
raise NotImplementedError
end
def load_step(execution_plan_id, step_id)
raise NotImplementedError
end
def save_step(execution_plan_id, step_id, value)
raise NotImplementedError
end
def load_action(execution_plan_id, action_id)
raise NotImplementedError
end
def load_actions_attributes(execution_plan_id, attributes)
raise NotImplementedError
end
def load_actions(execution_plan_id, action_ids)
raise NotImplementedError
end
def save_action(execution_plan_id, action_id, value)
raise NotImplementedError
end
def save_output_chunks(execution_plan_id, action_id, chunks)
raise NotImplementedError
end
def load_output_chunks(execution_plan_id, action_id)
raise NotImplementedError
end
def delete_output_chunks(execution_plan_id, action_id)
raise NotImplementedError
end
# for debug purposes
def to_hash
raise NotImplementedError
end
def pull_envelopes(receiver_id)
raise NotImplementedError
end
def push_envelope(envelope)
raise NotImplementedError
end
def prune_envelopes(receiver_ids)
raise NotImplementedError
end
def prune_undeliverable_envelopes
raise NotImplementedError
end
def migrate_db
raise NotImplementedError
end
def abort_if_pending_migrations!
raise NotImplementedError
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/002_incremental_progress.rb | lib/dynflow/persistence_adapters/sequel_migrations/002_incremental_progress.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_steps) do
add_column :progress_done, Float
add_column :progress_weight, Float
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/018_add_uuid_column.rb | lib/dynflow/persistence_adapters/sequel_migrations/018_add_uuid_column.rb | # frozen_string_literal: true
helper = Module.new do
def to_uuid(table_name, column_name)
set_column_type(table_name, column_name, :uuid, :using => "#{column_name}::uuid")
end
def from_uuid(table_name, column_name)
set_column_type table_name, column_name, String, primary_key: true, size: 36, fixed: true
end
def with_foreign_key_recreation(&block)
# Drop the foreign key constraints so we can change the column type
alter_table :dynflow_actions do
drop_foreign_key [:execution_plan_uuid]
end
alter_table :dynflow_steps do
drop_foreign_key [:execution_plan_uuid]
drop_foreign_key [:execution_plan_uuid, :action_id], :name => :dynflow_steps_execution_plan_uuid_fkey1
end
alter_table :dynflow_delayed_plans do
drop_foreign_key [:execution_plan_uuid]
end
block.call
# Recreat the foreign key constraints as they were before
alter_table :dynflow_actions do
add_foreign_key [:execution_plan_uuid], :dynflow_execution_plans
end
alter_table :dynflow_steps do
add_foreign_key [:execution_plan_uuid], :dynflow_execution_plans
add_foreign_key [:execution_plan_uuid, :action_id], :dynflow_actions,
:name => :dynflow_steps_execution_plan_uuid_fkey1
end
alter_table :dynflow_delayed_plans do
add_foreign_key [:execution_plan_uuid], :dynflow_execution_plans,
:name => :dynflow_scheduled_plans_execution_plan_uuid_fkey
end
end
end
Sequel.migration do
up do
if database_type.to_s.include?('postgres')
Sequel::Postgres::Database.include helper
with_foreign_key_recreation do
to_uuid :dynflow_execution_plans, :uuid
to_uuid :dynflow_actions, :execution_plan_uuid
to_uuid :dynflow_steps, :execution_plan_uuid
to_uuid :dynflow_delayed_plans, :execution_plan_uuid
end
end
end
down do
if database_type.to_s.include?('postgres')
Sequel::Postgres::Database.include helper
with_foreign_key_recreation do
from_uuid :dynflow_execution_plans, :uuid
from_uuid :dynflow_actions, :execution_plan_uuid
from_uuid :dynflow_steps, :execution_plan_uuid
from_uuid :dynflow_delayed_plans, :execution_plan_uuid
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/msgpack_migration_helper.rb | lib/dynflow/persistence_adapters/sequel_migrations/msgpack_migration_helper.rb | # frozen_string_literal: true
require 'json'
require 'msgpack'
class MsgpackMigrationHelper
def initialize(tables)
@tables = tables
end
def up(migration)
@tables.each do |table, columns|
new_columns = columns.map { |c| "#{c}_blob" }
migrate_table migration, table, columns, new_columns, File do |data|
::Sequel.blob(MessagePack.pack(JSON.parse(data)))
end
end
end
def down(migration)
@tables.each do |table, columns|
new_columns = columns.map { |c| c + '_text' }
migrate_table migration, table, columns, new_columns, String do |data|
JSON.dump(MessagePack.unpack(data))
end
end
end
private
def migrate_table(migration, table, from_names, to_names, new_type)
migration.alter_table(table) do
to_names.each do |new|
add_column new, new_type
end
end
relevant_columns = table_pkeys(table) | from_names
migration.from(table).select(*relevant_columns).each do |row|
update = from_names.zip(to_names).reduce({}) do |acc, (from, to)|
row[from].nil? ? acc : acc.merge(to => yield(row[from]))
end
next if update.empty?
migration.from(table).where(conditions_for_row(table, row)).update(update)
end
from_names.zip(to_names).each do |old, new|
migration.alter_table(table) do
drop_column old
end
if migration.database_type == :mysql
type = new_type == File ? 'blob' : 'mediumtext'
run "ALTER TABLE #{table} CHANGE COLUMN `#{new}` `#{old}` #{type};"
else
migration.rename_column table, new, old
end
end
end
def conditions_for_row(table, row)
row.slice(*table_pkeys(table))
end
def table_pkeys(table)
case table
when :dynflow_execution_plans
[:uuid]
when :dynflow_actions, :dynflow_steps
[:execution_plan_uuid, :id]
when :dynflow_coordinator_records
[:id, :class]
when :dynflow_delayed_plans
[:execution_plan_uuid]
when :dynflow_envelopes
[:id]
when :dynflow_output_chunks
[:id]
else
raise "Unknown table '#{table}'"
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/021_create_output_chunks.rb | lib/dynflow/persistence_adapters/sequel_migrations/021_create_output_chunks.rb | # frozen_string_literal: true
Sequel.migration do
up do
type = database_type
create_table(:dynflow_output_chunks) do
primary_key :id
column_properties = if type.to_s.include?('postgres')
{ type: :uuid }
else
{ type: String, size: 36, fixed: true, null: false }
end
foreign_key :execution_plan_uuid, :dynflow_execution_plans, **column_properties
index :execution_plan_uuid
column :action_id, Integer, null: false
foreign_key [:execution_plan_uuid, :action_id], :dynflow_actions,
name: :dynflow_output_chunks_execution_plan_uuid_fkey1
index [:execution_plan_uuid, :action_id]
column :chunk, String, text: true
column :kind, String
column :timestamp, Time, null: false
end
end
down do
drop_table(:dynflow_output_chunks)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/005_envelopes.rb | lib/dynflow/persistence_adapters/sequel_migrations/005_envelopes.rb | # frozen_string_literal: true
Sequel.migration do
change do
create_table(:dynflow_envelopes) do
primary_key :id
# we don't add a foreign key to worlds here as there might be an envelope created for the world
# while the world gets terminated, and it would mess the whole thing up:
# error on the world deletion because some envelopes arrived in the meantime
# we still do our best to remove the envelopes if we can
column :receiver_id, String, size: 36, fixed: true
index :receiver_id
column :data, String, text: true
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/012_add_delayed_plans_serialized_args.rb | lib/dynflow/persistence_adapters/sequel_migrations/012_add_delayed_plans_serialized_args.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_delayed_plans) do
long_text_type = @db.database_type == :mysql ? :mediumtext : String
add_column :serialized_args, long_text_type
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/009_fix_mysql_data_length.rb | lib/dynflow/persistence_adapters/sequel_migrations/009_fix_mysql_data_length.rb | # frozen_string_literal: true
Sequel.migration do
affected_tables = [:dynflow_actions, :dynflow_coordinator_records, :dynflow_delayed_plans,
:dynflow_envelopes, :dynflow_execution_plans]
up do
affected_tables.each do |table|
alter_table(table) do
if @db.database_type == :mysql
set_column_type :data, :mediumtext
end
end
end
end
down do
affected_tables.each do |table|
alter_table(table) do
if @db.database_type == :mysql
set_column_type :data, :text
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/022_store_flows_as_msgpack.rb | lib/dynflow/persistence_adapters/sequel_migrations/022_store_flows_as_msgpack.rb | # frozen_string_literal: true
require_relative 'msgpack_migration_helper'
Sequel.migration do
helper = MsgpackMigrationHelper.new({
:dynflow_actions => [:data, :input, :output],
:dynflow_coordinator_records => [:data],
:dynflow_delayed_plans => [:serialized_args, :data],
:dynflow_envelopes => [:data],
:dynflow_execution_plans => [:run_flow, :finalize_flow, :execution_history, :step_ids],
:dynflow_steps => [:error, :children],
:dynflow_output_chunks => [:chunk]
})
up do
helper.up(self)
end
down do
helper.down(self)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/003_parent_action.rb | lib/dynflow/persistence_adapters/sequel_migrations/003_parent_action.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_actions) do
add_column :caller_execution_plan_id, String, fixed: true, size: 36
add_column :caller_action_id, Integer
add_index [:caller_execution_plan_id, :caller_action_id]
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/023_sqlite_workarounds.rb | lib/dynflow/persistence_adapters/sequel_migrations/023_sqlite_workarounds.rb | # frozen_string_literal: true
tables = [:dynflow_actions, :dynflow_delayed_plans, :dynflow_steps, :dynflow_output_chunks]
Sequel.migration do
up do
if database_type == :sqlite && Gem::Version.new(SQLite3::SQLITE_VERSION) <= Gem::Version.new('3.7.17')
tables.each do |table|
alter_table(table) { drop_foreign_key [:execution_plan_uuid] }
end
end
end
down do
if database_type == :sqlite && Gem::Version.new(SQLite3::SQLITE_VERSION) <= Gem::Version.new('3.7.17')
tables.each do |table|
alter_table(table) { add_foreign_key [:execution_plan_uuid], :dynflow_execution_plans }
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/016_add_step_queue.rb | lib/dynflow/persistence_adapters/sequel_migrations/016_add_step_queue.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_steps) do
add_column :queue, String
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/004_coordinator_records.rb | lib/dynflow/persistence_adapters/sequel_migrations/004_coordinator_records.rb | # frozen_string_literal: true
Sequel.migration do
change do
create_table(:dynflow_coordinator_records) do
column :id, String, size: 100
column :class, String, size: 100
primary_key [:id, :class]
index :class
column :owner_id, String
index :owner_id
column :data, String, text: true
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/020_drop_duplicate_indices.rb | lib/dynflow/persistence_adapters/sequel_migrations/020_drop_duplicate_indices.rb | # frozen_string_literal: true
Sequel.migration do
up do
alter_table(:dynflow_actions) do
drop_index [:execution_plan_uuid, :id]
end
alter_table(:dynflow_execution_plans) do
drop_index :uuid
end
alter_table(:dynflow_steps) do
drop_index [:execution_plan_uuid, :id]
end
end
down do
alter_table(:dynflow_actions) do
add_index [:execution_plan_uuid, :id], :unique => true
end
alter_table(:dynflow_execution_plans) do
add_index :uuid, :unique => true
end
alter_table(:dynflow_steps) do
add_index [:execution_plan_uuid, :id], :unique => true
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/008_rename_scheduled_plans_to_delayed_plans.rb | lib/dynflow/persistence_adapters/sequel_migrations/008_rename_scheduled_plans_to_delayed_plans.rb | # frozen_string_literal: true
Sequel.migration do
change do
rename_table(:dynflow_scheduled_plans, :dynflow_delayed_plans)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/013_add_action_columns.rb | lib/dynflow/persistence_adapters/sequel_migrations/013_add_action_columns.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_actions) do
long_text_type = @db.database_type == :mysql ? :mediumtext : String
add_column :class, String
add_column :input, long_text_type
add_column :output, long_text_type
# These could be removed in the future because an action can have at most one of each
# and each belongs to an action
add_column :plan_step_id, Integer
add_column :run_step_id, Integer
add_column :finalize_step_id, Integer
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/017_add_delayed_plan_frozen.rb | lib/dynflow/persistence_adapters/sequel_migrations/017_add_delayed_plan_frozen.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_delayed_plans) do
add_column :frozen, :boolean
end
self[:dynflow_delayed_plans].update(:frozen => false)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/019_update_mysql_time_precision.rb | lib/dynflow/persistence_adapters/sequel_migrations/019_update_mysql_time_precision.rb | # frozen_string_literal: true
Sequel.migration do
up do
alter_table(:dynflow_execution_plans) do
if @db.database_type == :mysql
set_column_type :started_at, 'datetime(3)'
set_column_type :ended_at, 'datetime(3)'
end
end
alter_table(:dynflow_steps) do
if @db.database_type == :mysql
set_column_type :started_at, 'datetime(3)'
set_column_type :ended_at, 'datetime(3)'
end
end
alter_table(:dynflow_delayed_plans) do
if @db.database_type == :mysql
set_column_type :start_at, 'datetime(3)'
set_column_type :start_before, 'datetime(3)'
end
end
end
down do
alter_table(:dynflow_steps) do
if @db.database_type == :mysql
set_column_type :started_at, Time
set_column_type :ended_at, Time
end
end
alter_table(:dynflow_steps) do
if @db.database_type == :mysql
set_column_type :started_at, Time
set_column_type :ended_at, Time
end
end
alter_table(:dynflow_delayed_plans) do
if @db.database_type == :mysql
set_column_type :start_at, Time
set_column_type :start_before, Time
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/011_placeholder.rb | lib/dynflow/persistence_adapters/sequel_migrations/011_placeholder.rb | # frozen_string_literal: true
Sequel.migration do
# Placeholder for 011_add_uuid_column.rb - it was readded in
# 018_add_uuid_column.rb with fixed check for postgresql
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/015_add_execution_plan_columns.rb | lib/dynflow/persistence_adapters/sequel_migrations/015_add_execution_plan_columns.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_execution_plans) do
long_text_type = @db.database_type == :mysql ? :mediumtext : String
add_column :class, String
add_column :run_flow, long_text_type
add_column :finalize_flow, long_text_type
add_column :execution_history, long_text_type
# These could be removed in the future because an action can have at most one of each
# and each belongs to an action
add_column :root_plan_step_id, Integer
add_column :step_ids, long_text_type
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/007_future_execution.rb | lib/dynflow/persistence_adapters/sequel_migrations/007_future_execution.rb | # frozen_string_literal: true
Sequel.migration do
change do
create_table(:dynflow_scheduled_plans) do
foreign_key :execution_plan_uuid, :dynflow_execution_plans, type: String, size: 36, fixed: true
index :execution_plan_uuid
column :start_at, Time
index :start_at
column :start_before, Time
column :data, String, text: true
column :args_serializer, String
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/006_fix_data_length.rb | lib/dynflow/persistence_adapters/sequel_migrations/006_fix_data_length.rb | # frozen_string_literal: true
Sequel.migration do
up do
alter_table(:dynflow_steps) do
if @db.database_type == :mysql
set_column_type :data, :mediumtext
end
end
end
down do
alter_table(:dynflow_steps) do
if @db.database_type == :mysql
set_column_type :data, :text
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/010_add_execution_plans_label.rb | lib/dynflow/persistence_adapters/sequel_migrations/010_add_execution_plans_label.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_execution_plans) do
add_column :label, String
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/014_add_step_columns.rb | lib/dynflow/persistence_adapters/sequel_migrations/014_add_step_columns.rb | # frozen_string_literal: true
Sequel.migration do
change do
alter_table(:dynflow_steps) do
add_column :class, String
add_column :error, @db.database_type == :mysql ? :mediumtext : String
# These could be removed in the future because an action can have at most one of each
# and each belongs to an action
add_column :action_class, String
add_column :children, String
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/001_initial.rb | lib/dynflow/persistence_adapters/sequel_migrations/001_initial.rb | # frozen_string_literal: true
Sequel.migration do
up do
create_table(:dynflow_execution_plans) do
column :uuid, String, primary_key: true, size: 36, fixed: true
index :uuid, :unique => true
column :data, String, text: true
column :state, String
column :result, String
column :started_at, Time
column :ended_at, Time
column :real_time, Float
column :execution_time, Float
end
create_table(:dynflow_actions) do
foreign_key :execution_plan_uuid, :dynflow_execution_plans, type: String, size: 36, fixed: true
index :execution_plan_uuid
column :id, Integer
primary_key [:execution_plan_uuid, :id]
index [:execution_plan_uuid, :id], :unique => true
column :data, String, text: true
end
create_table(:dynflow_steps) do
foreign_key :execution_plan_uuid, :dynflow_execution_plans, type: String, size: 36, fixed: true
index :execution_plan_uuid
column :id, Integer
primary_key [:execution_plan_uuid, :id]
index [:execution_plan_uuid, :id], :unique => true
column :action_id, Integer
foreign_key [:execution_plan_uuid, :action_id], :dynflow_actions,
name: :dynflow_steps_execution_plan_uuid_fkey1
index [:execution_plan_uuid, :action_id]
column :data, String, text: true
column :state, String
column :started_at, Time
column :ended_at, Time
column :real_time, Float
column :execution_time, Float
end
end
down do
drop_table(:dynflow_steps)
drop_table(:dynflow_actions)
drop_table(:dynflow_execution_plans)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/024_store_execution_plan_data_as_msgpack.rb | lib/dynflow/persistence_adapters/sequel_migrations/024_store_execution_plan_data_as_msgpack.rb | # frozen_string_literal: true
require_relative 'msgpack_migration_helper'
Sequel.migration do
helper = MsgpackMigrationHelper.new({
:dynflow_execution_plans => [:data],
:dynflow_steps => [:data]
})
up do
helper.up(self)
end
down do
helper.down(self)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/persistence_adapters/sequel_migrations/025_create_execution_plan_dependencies.rb | lib/dynflow/persistence_adapters/sequel_migrations/025_create_execution_plan_dependencies.rb | # frozen_string_literal: true
Sequel.migration do
up do
type = database_type
create_table(:dynflow_execution_plan_dependencies) do
column_properties = if type.to_s.include?('postgres')
{ type: :uuid }
else
{ type: String, size: 36, fixed: true, null: false }
end
foreign_key :execution_plan_uuid, :dynflow_execution_plans, on_delete: :cascade, **column_properties
foreign_key :blocked_by_uuid, :dynflow_execution_plans, on_delete: :cascade, **column_properties
index :blocked_by_uuid
index :execution_plan_uuid
end
end
down do
drop_table(:dynflow_execution_plan_dependencies)
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/dispatcher/client_dispatcher.rb | lib/dynflow/dispatcher/client_dispatcher.rb | # frozen_string_literal: true
module Dynflow
module Dispatcher
class ClientDispatcher < Abstract
TrackedRequest = Algebrick.type do
fields! id: String, request: Request,
accepted: Concurrent::Promises::ResolvableFuture, finished: Concurrent::Promises::ResolvableFuture
end
module TrackedRequest
def accept!
accepted.fulfill true unless accepted.resolved?
self
end
def fail!(error)
accepted.reject error unless accepted.resolved?
finished.reject error
self
end
def success!(resolve_to)
accepted.fulfill true unless accepted.resolved?
finished.fulfill(resolve_to)
self
end
end
# Class used for reducing the number of sent Pings among worlds.
# World's coordinator record include the time when was the world
# seen for the last time. This class can be used to query this
# information and determine whether the record is "fresh enough"
# or whether the Ping really needs to be sent.
class PingCache
# Format string used for formating and parsing times
TIME_FORMAT = '%Y-%m-%d %H:%M:%S.%L'
DEFAULT_MAX_AGE = 60
# Formats time into a string
#
# @param time [Time] the time to format
# @return [String] the formatted time
def self.format_time(time = Time.now)
time.strftime(TIME_FORMAT)
end
# Parses time from a string
#
# @param time [String] the time string to parse
# @return [Time] the parsed time
def self.load_time(time)
Time.strptime(time, TIME_FORMAT)
end
# @param world [World] the world to which the PingCache belongs
def initialize(world, max_age = DEFAULT_MAX_AGE)
@world = world
@max_age = max_age
@executor = {}
end
# Records when was the world seen into the world's coordinator record
#
# @param id [String] Id of the world to be added to the cache
# @param time [Time] Time when was the world last seen
def add_record(id, time = Time.now)
record = find_world id
@executor[id] ||= record.data[:class] == 'Dynflow::Coordinator::ExecutorWorld'
record.data[:meta].update(:last_seen => self.class.format_time(time))
@world.coordinator.update_record(record)
end
# Looks into the cache whether the world has an executor
#
# @param id [String] Id of the world
# @return [TrueClass] if the world has an executor
# @return [FalseClass] if the world is a client world
# @return [NilClass] if unknown
def executor?(id)
@executor[id]
end
# Loads the coordinator record from the database and checks whether the world
# was last seen within the time limit
#
# @param id [String] Id of the world to be checked
# @return [TrueClass] if the world was last seen within the limit
# @return [FalseClass] if the world was last seen after the limit passed
def fresh_record?(id)
record = find_world(id)
return false if record.nil?
@executor[id] = record.data[:class] == 'Dynflow::Coordinator::ExecutorWorld'
time = self.class.load_time(record.data[:meta][:last_seen])
time >= Time.now - @max_age
end
private
def find_world(id)
@world.coordinator.find_records(:id => id,
:class => ['Dynflow::Coordinator::ExecutorWorld', 'Dynflow::Coordinator::ClientWorld']).first
end
end
attr_reader :ping_cache
def initialize(world, ping_cache_age)
@world = Type! world, World
@last_id_suffix = 0
@tracked_requests = {}
@terminated = nil
@ping_cache = PingCache.new world, ping_cache_age
end
def publish_request(future, request, timeout)
with_ping_request_caching(request, future) do
track_request(future, request, timeout) do |tracked_request|
dispatch_request(request, @world.id, tracked_request.id)
end
end
end
def timeout(request_id)
resolve_tracked_request(request_id, Dynflow::Error.new("Request timeout"))
end
def start_termination(*args)
super
@tracked_requests.values.each { |tracked_request| tracked_request.fail!(Dynflow::Error.new('Dispatcher terminated')) }
@tracked_requests.clear
finish_termination
end
def dispatch_request(request, client_world_id, request_id)
ignore_unknown = false
executor_id = match request,
(on ~Execution | ~Planning do |execution|
AnyExecutor
end),
(on ~Event do |event|
ignore_unknown = event.optional
find_executor(event.execution_plan_id)
end),
(on ~Halt do |event|
executor = find_executor(event.execution_plan_id)
executor == Dispatcher::UnknownWorld ? AnyExecutor : executor
end),
(on Ping.(~any, ~any) | Status.(~any, ~any) do |receiver_id, _|
receiver_id
end)
envelope = Envelope[request_id, client_world_id, executor_id, request]
if Dispatcher::UnknownWorld === envelope.receiver_id
raise Dynflow::Error, "Could not find an executor for #{envelope}" unless ignore_unknown
message = "Could not find an executor for optional #{envelope}, discarding."
log(Logger::DEBUG, message)
return respond(envelope, Failed[message])
end
connector.send(envelope).value!
rescue => e
log(Logger::ERROR, e)
respond(envelope, Failed[e.message]) if envelope
end
def dispatch_response(envelope)
return unless @tracked_requests.key?(envelope.request_id)
match envelope.message,
(on ~Accepted do
@tracked_requests[envelope.request_id].accept!
end),
(on ~Failed do |msg|
resolve_tracked_request(envelope.request_id, Dynflow::Error.new(msg.error))
end),
(on Done do
resolve_tracked_request(envelope.request_id)
end),
(on Pong do
add_ping_cache_record(envelope.sender_id)
resolve_tracked_request(envelope.request_id)
end),
(on ExecutionStatus.(~any) do |steps|
@tracked_requests.delete(envelope.request_id).success! steps
end)
end
# Records when was the world with provided id last seen using a PingCache
#
# @param id [String] Id of the world
# @see PingCache#add_record
def add_ping_cache_record(id)
log Logger::DEBUG, "adding ping cache record for #{id}"
@ping_cache.add_record id
end
private
def find_executor(execution_plan_id)
execution_lock = @world.coordinator.find_locks(class: Coordinator::ExecutionLock.name,
id: "execution-plan:#{execution_plan_id}").first
if execution_lock
execution_lock.world_id
else
Dispatcher::UnknownWorld
end
rescue => e
log(Logger::ERROR, e)
Dispatcher::UnknownWorld
end
def track_request(finished, request, timeout)
id_suffix = @last_id_suffix += 1
id = "#{@world.id}-#{id_suffix}"
tracked_request = TrackedRequest[id, request, Concurrent::Promises.resolvable_future, finished]
@tracked_requests[id] = tracked_request
@world.clock.ping(self, timeout, [:timeout, id]) if timeout
yield tracked_request
rescue Dynflow::Error => e
resolve_tracked_request(tracked_request.id, e)
log(Logger::ERROR, e)
end
def reset_tracked_request(tracked_request)
if tracked_request.finished.resolved?
raise Dynflow::Error.new('Can not reset resolved tracked request')
end
unless tracked_request.accepted.resolved?
tracked_request.accept! # otherwise nobody would set the accept future
end
future = Concurrent::Promises.resolvable_future
@tracked_requests[tracked_request.id] = TrackedRequest[tracked_request.id, tracked_request.request, future, tracked_request.finished]
end
def resolve_tracked_request(id, error = nil)
return unless @tracked_requests.key?(id)
if error
@tracked_requests.delete(id).fail! error
else
tracked_request = @tracked_requests[id]
resolve_to = match tracked_request.request,
(on Execution.(execution_plan_id: ~any) do |uuid|
@world.persistence.load_execution_plan(uuid)
end),
(on Event | Ping | Halt do
true
end)
@tracked_requests.delete(id).success! resolve_to
end
end
# Tries to reduce the number of sent Ping requests by first looking into a cache. If the
# destination world is an executor world, the result is resolved solely from the cache.
# For client worlds the Ping might be sent if the cache record is stale.
#
# @param request [Dynflow::Dispatcher::Request] the request to send
# @param future [Concurrent::Future] the future to fulfill if the world was seen recently
# @return [Concurrent::Future] the future tracking the request
def with_ping_request_caching(request, future)
return yield unless request.is_a?(Dynflow::Dispatcher::Ping)
return yield unless request.use_cache
if @ping_cache.fresh_record?(request.receiver_id)
future.fulfill(true)
else
if @ping_cache.executor?(request.receiver_id)
future.reject false
else
yield
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/dispatcher/executor_dispatcher.rb | lib/dynflow/dispatcher/executor_dispatcher.rb | # frozen_string_literal: true
module Dynflow
module Dispatcher
class ExecutorDispatcher < Abstract
def initialize(world, semaphore)
@world = Type! world, World
@current_futures = Set.new
end
def handle_request(envelope)
match(envelope.message,
on(Planning) { perform_planning(envelope, envelope.message) },
on(Execution) { perform_execution(envelope, envelope.message) },
on(Event) { perform_event(envelope, envelope.message) },
on(Status) { get_execution_status(envelope, envelope.message) },
on(Halt) { halt_execution_plan(envelope, envelope.message) })
end
protected
def perform_planning(envelope, planning)
@world.executor.plan(planning.execution_plan_id)
respond(envelope, Accepted)
rescue Dynflow::Error => e
respond(envelope, Failed[e.message])
end
def perform_execution(envelope, execution)
allocate_executor(execution.execution_plan_id, envelope.sender_id, envelope.request_id)
execution_lock = Coordinator::ExecutionLock.new(@world, execution.execution_plan_id, envelope.sender_id, envelope.request_id)
future = on_finish do |f|
f.then do |plan|
when_done(plan, envelope, execution, execution_lock)
end.rescue do |reason|
@world.coordinator.release(execution_lock)
respond(envelope, Failed[reason.to_s])
end
end
@world.executor.execute(execution.execution_plan_id, future)
respond(envelope, Accepted)
rescue Dynflow::Error => e
future.reject(e) if future && !future.resolved?
respond(envelope, Failed[e.message])
end
def when_done(plan, envelope, execution, execution_lock)
if plan.state == :running
@world.invalidate_execution_lock(execution_lock)
else
@world.coordinator.release(execution_lock)
respond(envelope, Done)
end
end
def halt_execution_plan(envelope, execution_plan_id)
@world.executor.halt execution_plan_id
respond(envelope, Done)
end
def perform_event(envelope, event_request)
future = on_finish do |f|
f.then do
respond(envelope, Done)
end.rescue do |reason|
respond(envelope, Failed[reason.to_s])
end
end
if event_request.time.nil? || event_request.time < Time.now
@world.executor.event(envelope.request_id, event_request.execution_plan_id, event_request.step_id, event_request.event, future,
optional: event_request.optional)
else
@world.clock.ping(
@world.executor,
event_request.time,
Director::Event[envelope.request_id, event_request.execution_plan_id, event_request.step_id, event_request.event, Concurrent::Promises.resolvable_future,
event_request.optional],
:delayed_event
)
# resolves the future right away - currently we do not wait for the clock ping
future.fulfill true
end
rescue Dynflow::Error => e
future.reject(e) if future && !future.resolved?
end
def start_termination(*args)
super
if @current_futures.empty?
reference.tell(:finish_termination)
else
Concurrent::Promises.zip_futures(*@current_futures).then { reference.tell(:finish_termination) }
end
end
def get_execution_status(envelope, envelope_message)
items = @world.executor.execution_status envelope_message.execution_plan_id
respond(envelope, ExecutionStatus[execution_status: items])
end
private
def allocate_executor(execution_plan_id, client_world_id, request_id)
execution_lock = Coordinator::ExecutionLock.new(@world, execution_plan_id, client_world_id, request_id)
@world.coordinator.acquire(execution_lock)
end
def on_finish
raise "Dispatcher terminating: no new work can be started" if terminating?
future = Concurrent::Promises.resolvable_future
callbacks_future = (yield future).rescue { |reason| @world.logger.error("Unexpected fail on future #{reason}") }
# we track currently running futures to make sure to not
# terminate until the execution is finished (including
# cleaning of locks etc)
@current_futures << callbacks_future
callbacks_future.on_resolution! { reference.tell([:finish_execution, callbacks_future]) }
return future
end
def finish_execution(future)
@current_futures.delete(future)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/dispatcher/abstract.rb | lib/dynflow/dispatcher/abstract.rb | # frozen_string_literal: true
module Dynflow
module Dispatcher
class Abstract < Actor
def connector
@world.connector
end
def respond(request_envelope, response)
response_envelope = request_envelope.build_response_envelope(response, @world)
connector.send(response_envelope)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/coordinator_adapters/sequel.rb | lib/dynflow/coordinator_adapters/sequel.rb | # frozen_string_literal: true
module Dynflow
module CoordinatorAdapters
class Sequel < Abstract
def initialize(world)
super
@sequel_adapter = world.persistence.adapter
Type! @sequel_adapter, PersistenceAdapters::Sequel
end
def create_record(record)
@sequel_adapter.insert_coordinator_record(record.to_hash)
rescue Errors::PersistenceError => e
if e.cause.is_a? ::Sequel::UniqueConstraintViolation
raise Coordinator::DuplicateRecordError.new(record)
else
raise e
end
end
def update_record(record)
@sequel_adapter.update_coordinator_record(record.class.name, record.id, record.to_hash)
end
def delete_record(record)
@sequel_adapter.delete_coordinator_record(record.class.name, record.id)
end
def find_records(filter_options)
@sequel_adapter.find_coordinator_records(filters: filter_options)
end
def find_execution_plans(filter_options)
@sequel_adapter.find_execution_plans(filters: filter_options)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/coordinator_adapters/abstract.rb | lib/dynflow/coordinator_adapters/abstract.rb | # frozen_string_literal: true
module Dynflow
module CoordinatorAdapters
class Abstract
include Algebrick::TypeCheck
def initialize(world)
Type! world, World
@world = world
end
def create_record(record)
raise NotImplementedError
end
def update_record(record)
raise NotImplementedError
end
def delete_record(record)
raise NotImplementedError
end
def find_records(record)
raise NotImplementedError
end
def find_execution_plans(filter_options)
raise NotImplementedError
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/world.rb | lib/dynflow/middleware/world.rb | # frozen_string_literal: true
module Dynflow
class Middleware::World
include Algebrick::TypeCheck
def initialize
@register = Middleware::Register.new
clear_cache!
end
def use(*args)
clear_cache!
@register.use(*args)
end
def execute(method, action_or_class, *args, &block)
Match! method, :delay, :plan, :run, :finalize, :plan_phase, :finalize_phase, :present, :hook
if Child? action_or_class, Dynflow::Action
action = nil
action_class = action_or_class
elsif Type? action_or_class, Dynflow::Action
action = action_or_class
action_class = action.class
else
Algebrick::TypeCheck.error action_or_class, 'is not instance or child class', Dynflow::Action
end
classes = middleware_classes(action_class)
stack = Middleware::Stack.build(classes, method, action, &block)
stack.call(*args)
end
def clear_cache!
@middleware_classes_cache = {}
end
private
def cumulate_register(action_class, register = Middleware::Register.new)
register.merge!(@register)
unless action_class == Dynflow::Action
cumulate_register(action_class.superclass, register)
end
register.merge!(action_class.middleware)
return register
end
def middleware_classes(action_class)
unless @middleware_classes_cache.key?(action_class)
register = cumulate_register(action_class)
resolver = Dynflow::Middleware::Resolver.new(register)
@middleware_classes_cache[action_class] = resolver.result
end
return @middleware_classes_cache[action_class]
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/stack.rb | lib/dynflow/middleware/stack.rb | # frozen_string_literal: true
module Dynflow
class Middleware::Stack
include Algebrick::TypeCheck
attr_reader :action, :middleware_class, :middleware
def self.build(middleware_classes, method, action, &block)
middleware_classes.reverse_each.reduce(block) do |stack, klass|
Middleware::Stack.new(stack, klass, method, action)
end
end
def initialize(next_stack, middleware_class, method, action)
@middleware_class = Child! middleware_class, Middleware
@middleware = middleware_class.new self
@action = Type! action, Dynflow::Action, NilClass
@method = Match! method, :delay, :plan, :run, :finalize, :plan_phase, :finalize_phase, :present, :hook
@next_stack = Type! next_stack, Middleware::Stack, Proc
end
def call(*args)
@middleware.send @method, *args
end
def pass(*args)
@next_stack.call(*args)
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/resolver.rb | lib/dynflow/middleware/resolver.rb | # frozen_string_literal: true
require 'tsort'
module Dynflow
class Middleware::Resolver
include TSort
include Algebrick::TypeCheck
def initialize(register)
@register = Type! register, Middleware::Register
end
def result
@result ||= begin
@deps = normalize_rules(@register.rules)
self.tsort
end
end
private
# Takes eliminate :replace and :before rules.
# Returns hash, that maps middleware classes to their dependencies
def normalize_rules(rules)
deps = Hash.new { |h, k| h[k] = [] }
substitutions = {}
# replace before with after on oposite direction and build the
# substitutions dictionary
rules.each do |middleware_class, middleware_rules|
deps[middleware_class].concat(middleware_rules[:after])
middleware_rules[:before].each do |dependent_class|
deps[dependent_class] << middleware_class
end
middleware_rules[:replace].each do |replaced|
substitutions[replaced] = middleware_class
end
end
# replace the middleware to be substituted
substitutions.each do |old, new|
deps[new].concat(deps[old])
deps.delete(old)
end
# ignore deps, that are not present in the stack
deps.each do |middleware_class, middleware_deps|
middleware_deps.reject! { |dep| !deps.has_key?(dep) }
end
return deps.delete_if { |klass, _| klass.nil? }
end
def tsort_each_node(&block)
@deps.each_key(&block)
end
def tsort_each_child(node, &block)
@deps.fetch(node).each(&block)
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/register.rb | lib/dynflow/middleware/register.rb | # frozen_string_literal: true
module Dynflow
class Middleware::Register
include Algebrick::TypeCheck
attr_reader :rules
def initialize
@rules = Hash.new do |h, k|
h[k] = { before: [],
after: [],
replace: [] }
end
end
def use(middleware_class, options = {})
unknown_options = options.keys - [:before, :after, :replace]
if unknown_options.any?
raise ArgumentError, "Unexpected options: #{unknown_options}"
end
@rules[middleware_class].merge!(options) do |key, old, new|
old + Array(new)
end
end
def do_not_use(middleware_class)
use nil, :replace => middleware_class
end
def merge!(register)
Type! register, Middleware::Register
register.rules.each do |klass, rules|
use(klass, rules)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/common/transaction.rb | lib/dynflow/middleware/common/transaction.rb | # frozen_string_literal: true
module Dynflow
module Middleware::Common
class Transaction < Middleware
def plan_phase(execution_plan)
rollback_on_error(execution_plan)
end
def finalize_phase(execution_plan)
rollback_on_error(execution_plan)
end
private
def rollback_on_error(execution_plan)
execution_plan.world.transaction_adapter.transaction do
pass(execution_plan)
if execution_plan.error?
execution_plan.world.transaction_adapter.rollback
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/middleware/common/singleton.rb | lib/dynflow/middleware/common/singleton.rb | # frozen_string_literal: true
module Dynflow
module Middleware::Common
class Singleton < Middleware
# Each action tries to acquire its own lock before the action's #plan starts
def plan(*args)
action.singleton_lock!
pass(*args)
end
# At the start of #run we try to acquire action's lock unless it already holds it
# At the end the action tries to unlock its own lock if the execution plan has no
# finalize phase
def run(*args)
action.singleton_lock! unless action.holds_singleton_lock?
pass(*args)
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/registry.rb | lib/dynflow/flows/registry.rb | # frozen_string_literal: true
module Dynflow
module Flows
class Registry
class IdentifierTaken < ArgumentError; end
class UnknownIdentifier < ArgumentError; end
class << self
def register!(klass, identifier)
if (found = serialization_map[identifier])
raise IdentifierTaken, "Error setting up mapping #{identifier} to #{klass}, it already maps to #{found}"
else
serialization_map.update(identifier => klass)
end
end
def encode(klass)
klass = klass.class unless klass.is_a?(Class)
serialization_map.invert[klass] || raise(UnknownIdentifier, "Could not find mapping for #{klass}")
end
def decode(identifier)
serialization_map[identifier] || raise(UnknownIdentifier, "Could not find mapping for #{identifier}")
end
def serialization_map
@serialization_map ||= {}
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/sequence.rb | lib/dynflow/flows/sequence.rb | # frozen_string_literal: true
module Dynflow
module Flows
class Sequence < AbstractComposed
protected
def add_to_sequence(satisfying_flows, dependent_flow)
# the flows are already in sequence, we don't need to do anything extra
self << dependent_flow
end
end
Registry.register!(Sequence, 'S')
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/atom.rb | lib/dynflow/flows/atom.rb | # frozen_string_literal: true
module Dynflow
module Flows
class Atom < Abstract
attr_reader :step_id
def encode
step_id
end
def initialize(step_id)
@step_id = Type! step_id, Integer
end
def size
1
end
def all_step_ids
[step_id]
end
def flatten!
# nothing to do
end
protected
def self.new_from_hash(hash)
check_class_matching hash
new(hash[:step_id])
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/concurrence.rb | lib/dynflow/flows/concurrence.rb | # frozen_string_literal: true
module Dynflow
module Flows
class Concurrence < AbstractComposed
protected
def add_to_sequence(satisfying_flows, dependent_flow)
if satisfying_flows.empty?
self.sub_flows << dependent_flow
return
end
extracted_flow = extract_flows(satisfying_flows)
sequence = Sequence.new([extracted_flow])
self.sub_flows << sequence
sequence << dependent_flow
end
def extract_flows(extracted_sub_flows)
extracted_sub_flows.each do |sub_flow|
self.sub_flows.delete(sub_flow)
end
return Concurrence.new(extracted_sub_flows)
end
end
Registry.register!(Concurrence, 'C')
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/abstract.rb | lib/dynflow/flows/abstract.rb | # frozen_string_literal: true
module Dynflow
module Flows
class Abstract < Serializable
include Algebrick::TypeCheck
def initialize
raise 'cannot instantiate Flows::Abstract'
end
def to_hash
{ :class => self.class.name }
end
def empty?
self.size == 0
end
def size
raise NotImplementedError
end
def includes_step?(step_id)
self.all_step_ids.any? { |s| s == step_id }
end
def all_step_ids
raise NotImplementedError
end
def flatten!
raise NotImplementedError
end
def self.new_from_hash(hash)
check_class_matching hash
new(hash[:flows].map { |flow_hash| from_hash(flow_hash) })
end
def self.decode(data)
if data.is_a? Integer
Flows::Atom.new(data)
else
kind, *subflows = data
Registry.decode(kind).new(subflows.map { |subflow| self.decode(subflow) })
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/flows/abstract_composed.rb | lib/dynflow/flows/abstract_composed.rb | # frozen_string_literal: true
module Dynflow
module Flows
class AbstractComposed < Abstract
attr_reader :flows
def initialize(flows)
Type! flows, Array
flows.all? { |f| Type! f, Abstract }
@flows = flows
end
def encode
[Registry.encode(self)] + flows.map(&:encode)
end
def <<(v)
@flows << v
self
end
def [](*args)
@flows[*args]
end
def []=(*args)
@flows.[]=(*args)
end
def size
@flows.size
end
alias_method :sub_flows, :flows
# @return [Array<Integer>] all step_ids recursively in the flow
def all_step_ids
flows.map(&:all_step_ids).flatten
end
def add_and_resolve(dependency_graph, new_flow)
return if new_flow.empty?
satisfying_flows = find_satisfying_sub_flows(dependency_graph, new_flow)
add_to_sequence(satisfying_flows, new_flow)
flatten!
end
def flatten!
self.sub_flows.to_enum.with_index.reverse_each do |flow, i|
if flow.class == self.class
expand_steps(i)
elsif flow.is_a?(AbstractComposed) && flow.sub_flows.size == 1
self.sub_flows[i] = flow.sub_flows.first
end
end
self.sub_flows.map(&:flatten!)
end
protected
# adds the +new_flow+ in a way that it's in sequence with
# the +satisfying_flows+
def add_to_sequence(satisfying_flows, new_flow)
raise NotImplementedError
end
private
def find_satisfying_sub_flows(dependency_graph, new_flow)
satisfying_flows = Set.new
new_flow.all_step_ids.each do |step_id|
dependency_graph.required_step_ids(step_id).each do |required_step_id|
satisfying_flow = sub_flows.find do |flow|
flow.includes_step?(required_step_id)
end
if satisfying_flow
satisfying_flows << satisfying_flow
dependency_graph.mark_satisfied(step_id, required_step_id)
end
end
end
return satisfying_flows.to_a
end
def expand_steps(index)
expanded_step = self.sub_flows[index]
self.sub_flows.delete_at(index)
expanded_step.sub_flows.each do |flow|
self.sub_flows.insert(index, flow)
index += 1
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/rails/daemon.rb | lib/dynflow/rails/daemon.rb | # frozen_string_literal: true
require 'fileutils'
require 'get_process_mem'
require 'dynflow/watchers/memory_consumption_watcher'
require 'active_support/core_ext/numeric/bytes'
module Dynflow
class Rails
class Daemon
attr_reader :dynflow_memory_watcher_class, :daemons_class
# make Daemon dependency injection ready for testing purposes
def initialize(
dynflow_memory_watcher_class = ::Dynflow::Watchers::MemoryConsumptionWatcher,
non_default_daemons_class = nil
)
@dynflow_memory_watcher_class = dynflow_memory_watcher_class
@daemons_class = non_default_daemons_class
end
def daemons_class
@daemons_class || ::Daemons
end
def stdout
STDOUT
end
def stderr
STDERR
end
# Load the Rails environment and initialize the executor in this thread.
def run(rails_root = Dir.pwd, options = {})
stdout.puts('Starting Rails environment')
rails_env_file = File.expand_path('./config/environment.rb', rails_root)
unless File.exist?(rails_env_file)
raise "#{rails_root} doesn't seem to be a Rails root directory"
end
stderr.puts("Starting dynflow with the following options: #{options}")
::Rails.application.dynflow.executor!
if options[:memory_limit] && options[:memory_limit].to_i > 0
::Rails.application.dynflow.config.on_init do |world|
stdout_cap = stdout
memory_watcher = initialize_memory_watcher(world, options[:memory_limit], options)
world.terminated.on_resolution do
stdout_cap.puts("World has been terminated")
memory_watcher = nil # the object can be disposed
end
end
end
require rails_env_file
::Rails.application.dynflow.initialize!
world_id = ::Rails.application.dynflow.world.id
stdout.puts("Everything ready for world: #{world_id}")
sleep
ensure
stdout.puts('Exiting')
end
# run the executor as a daemon
def run_background(command = 'start', options = {})
options = default_options.merge(options)
FileUtils.mkdir_p(options[:pid_dir])
begin
require 'daemons'
rescue LoadError
raise "You need to add gem 'daemons' to your Gemfile if you wish to use it."
end
unless %w(start stop restart run).include?(command)
raise "Command exptected to be 'start', 'stop', 'restart', 'run', was #{command.inspect}"
end
stdout.puts("Dynflow Executor: #{command} in progress")
options[:executors_count].times do
daemons_class.run_proc(
options[:process_name],
daemons_options(command, options)
) do |*_args|
begin
::Logging.reopen
run(options[:rails_root], options)
rescue => e
stderr.puts e.message
::Rails.logger.fatal('Failed running Dynflow daemon')
::Rails.logger.fatal(e)
exit 1
end
end
end
end
protected
def world
::Rails.application.dynflow.world
end
private
def daemons_options(command, options)
{
:multiple => true,
:dir => options[:pid_dir],
:log_dir => options[:log_dir],
:dir_mode => :normal,
:monitor => true,
:log_output => true,
:log_output_syslog => true,
:monitor_interval => [options[:memory_polling_interval] / 2, 30].min,
:force_kill_waittime => options[:force_kill_waittime].try(:to_i),
:ARGV => [command]
}
end
def default_options
{
rails_root: Dir.pwd,
process_name: 'dynflow_executor',
pid_dir: "#{::Rails.root}/tmp/pids",
log_dir: File.join(::Rails.root, 'log'),
wait_attempts: 300,
wait_sleep: 1,
executors_count: (ENV['EXECUTORS_COUNT'] || 1).to_i,
memory_limit: begin
to_gb((ENV['EXECUTOR_MEMORY_LIMIT'] || '')).gigabytes
rescue RuntimeError
ENV['EXECUTOR_MEMORY_LIMIT'].to_i
end,
memory_init_delay: (ENV['EXECUTOR_MEMORY_MONITOR_DELAY'] || 7200).to_i, # 2 hours
memory_polling_interval: (ENV['EXECUTOR_MEMORY_MONITOR_INTERVAL'] || 60).to_i,
force_kill_waittime: (ENV['EXECUTOR_FORCE_KILL_WAITTIME'] || 60).to_i
}
end
def initialize_memory_watcher(world, memory_limit, options)
watcher_options = {}
watcher_options[:polling_interval] = options[:memory_polling_interval]
watcher_options[:initial_wait] = options[:memory_init_delay]
watcher_options[:memory_checked_callback] = ->(current_memory, memory_limit) do
log_memory_within_limit(current_memory, memory_limit)
end
watcher_options[:memory_limit_exceeded_callback] = ->(current_memory, memory_limit) do
log_memory_limit_exceeded(current_memory, memory_limit)
end
dynflow_memory_watcher_class.new(world, memory_limit, watcher_options)
end
def log_memory_limit_exceeded(current_memory, memory_limit)
message = "Memory level exceeded, registered #{current_memory} bytes, which is greater than #{memory_limit} limit."
world.logger.error(message)
end
def log_memory_within_limit(current_memory, memory_limit)
message = "Memory level OK, registered #{current_memory} bytes, which is less than #{memory_limit} limit."
world.logger.debug(message)
end
private
# Taken straight from https://github.com/theforeman/foreman/blob/develop/lib/core_extensions.rb#L142
# in order to make this class work with any Rails project
def to_gb(string)
match_data = string.match(/^(\d+(\.\d+)?) ?(([KMGT]i?B?|B|Bytes))?$/i)
if match_data.present?
value, _, unit = match_data[1..3]
else
raise "Unknown string: #{string.inspect}!"
end
unit ||= :byte # default to bytes if no unit given
case unit.downcase.to_sym
when :b, :byte, :bytes then (value.to_f / 1.gigabyte)
when :tb, :tib, :t, :terabyte then (value.to_f * 1.kilobyte)
when :gb, :gib, :g, :gigabyte then value.to_f
when :mb, :mib, :m, :megabyte then (value.to_f / 1.kilobyte)
when :kb, :kib, :k, :kilobyte then (value.to_f / 1.megabyte)
else raise "Unknown unit: #{unit.inspect}!"
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/rails/configuration.rb | lib/dynflow/rails/configuration.rb | # frozen_string_literal: true
require 'rails'
require 'active_record'
module Dynflow
class Rails
class Configuration
# the number of threads in the pool handling the execution
attr_accessor :pool_size
# the size of db connection pool, if not set, it's calculated
# from the amount of workers in the pool
attr_accessor :db_pool_size
# set true if the executor runs externally (by default true in procution, othewise false)
attr_accessor :remote
alias remote? remote
# what transaction adapater should be used, by default, it uses the ActiveRecord
# based adapter, expecting ActiveRecord is used as ORM in the application
attr_accessor :transaction_adapter
attr_accessor :eager_load_paths
attr_accessor :lazy_initialization
# what rake tasks should run their own executor, not depending on the external one
attr_accessor :rake_tasks_with_executor
# if true, the ForemanTasks::Concerns::ActionTriggering will make
# no effect. Useful for testing, where we mignt not want to execute
# the orchestration tied to the models.
attr_accessor :disable_active_record_actions
def initialize
self.pool_size = 5
self.remote = ::Rails.env.production?
self.transaction_adapter = ::Dynflow::TransactionAdapters::ActiveRecord.new
self.eager_load_paths = []
self.lazy_initialization = !::Rails.env.production?
self.rake_tasks_with_executor = %w(db:migrate db:seed)
@on_init = []
@on_executor_init = []
@post_executor_init = []
end
# Action related info such as exceptions raised inside the actions' methods
# To be overridden in the Rails application
def action_logger
::Rails.logger
end
# Dynflow related info about the progress of the execution
# To be overridden in the Rails application
def dynflow_logger
::Rails.logger
end
def on_init(executor = true, &block)
destination = executor ? @on_executor_init : @on_init
destination << block
end
def run_on_init_hooks(executor, world)
source = executor ? @on_executor_init : @on_init
source.each { |init| init.call(world) }
end
def post_executor_init(&block)
@post_executor_init << block
end
def run_post_executor_init_hooks(world)
@post_executor_init.each { |init| init.call(world) }
end
def initialize_world(world_class = ::Dynflow::World)
world_class.new(world_config)
end
# No matter what config.remote says, when the process is marked as executor,
# it can't be remote
def remote?
!::Rails.application.dynflow.executor? &&
!rake_task_with_executor? &&
@remote
end
def rake_task_with_executor?
return false unless defined?(::Rake) && ::Rake.respond_to?(:application)
::Rake.application.top_level_tasks.any? do |rake_task|
rake_tasks_with_executor.include?(rake_task)
end
end
def increase_db_pool_size?
!::Rails.env.test? && (!remote? || sidekiq_worker?)
end
def sidekiq_worker?
defined?(::Sidekiq) && ::Sidekiq.configure_server { |c| c[:queues].any? }
end
def calculate_db_pool_size(world)
return self.db_pool_size if self.db_pool_size
base_value = 5
if defined?(::Sidekiq)
Sidekiq.configure_server { |c| c[:concurrency] } + base_value
else
world.config.queues.values.inject(base_value) do |pool_size, pool_options|
pool_size += pool_options[:pool_size]
end
end
end
# To avoid pottential timeouts on db connection pool, make sure
# we have the pool bigger than the thread pool
def increase_db_pool_size(world = nil)
if world.nil?
warn 'Deprecated: using `increase_db_pool_size` outside of Dynflow code is not needed anymore'
return
end
if increase_db_pool_size?
db_pool_size = calculate_db_pool_size(world)
::ActiveRecord::Base.connection_pool.disconnect!
base_config = ::ActiveRecord::Base.configurations.configs_for(env_name: ::Rails.env)[0]
config = if base_config.respond_to?(:configuration_hash)
::Dynflow::Utils::IndifferentHash.new(base_config.configuration_hash.dup)
else
base_config.config.dup
end
config['pool'] = db_pool_size if config['pool'].to_i < db_pool_size
::ActiveRecord::Base.establish_connection(config)
end
end
# generates the options hash consumable by the Dynflow's world
def world_config
@world_config ||= ::Dynflow::Config.new.tap do |config|
config.auto_rescue = true
config.logger_adapter = ::Dynflow::LoggerAdapters::Delegator.new(action_logger, dynflow_logger)
config.pool_size = self.pool_size
config.persistence_adapter = ->(world, _) { initialize_persistence(world) }
config.transaction_adapter = transaction_adapter
config.executor = ->(world, _) { initialize_executor(world) }
config.connector = ->(world, _) { initialize_connector(world) }
# we can't do any operation until the Rails.application.dynflow.world is set
config.auto_execute = false
config.auto_validity_check = false
if sidekiq_worker? && !Sidekiq.configure_server { |c| c[:queues].include?("dynflow_orchestrator") }
config.delayed_executor = nil
end
end
end
# expose the queues definition to Rails developers
def queues
world_config.queues
end
protected
def default_sequel_adapter_options(world)
base_config = ::ActiveRecord::Base.configurations.configs_for(env_name: ::Rails.env)[0]
db_config = if base_config.respond_to?(:configuration_hash)
::Dynflow::Utils::IndifferentHash.new(base_config.configuration_hash.dup)
else
base_config.config.dup
end
db_config['adapter'] = db_config['adapter'].gsub(/_?makara_?/, '')
db_config['adapter'] = 'postgres' if db_config['adapter'] == 'postgresql'
db_config['max_connections'] = calculate_db_pool_size(world) if increase_db_pool_size?
if db_config['adapter'] == 'sqlite3'
db_config['adapter'] = 'sqlite'
database = db_config['database']
unless database == ':memory:'
# We need to create separate database for sqlite
# to avoid lock conflicts on the database
db_config['database'] = "#{File.dirname(database)}/dynflow-#{File.basename(database)}"
end
end
db_config
end
def initialize_executor(world)
if remote?
false
else
if defined?(::Sidekiq) && Sidekiq.configure_server { |c| c[:dynflow_executor] }
::Dynflow::Executors::Sidekiq::Core
else
::Dynflow::Executors::Parallel::Core
end
end
end
def initialize_connector(world)
::Dynflow::Connectors::Database.new(world)
end
def persistence_class
::Dynflow::PersistenceAdapters::Sequel
end
# Sequel adapter based on Rails app database.yml configuration
def initialize_persistence(world, options = {})
persistence_class.new(default_sequel_adapter_options(world).merge(options))
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/extensions/msgpack.rb | lib/dynflow/extensions/msgpack.rb | # frozen_string_literal: true
require 'msgpack'
module Dynflow
module Extensions
module MsgPack
module Time
def to_msgpack(out = ''.dup)
::MessagePack.pack(self, out)
out
end
end
::Time.include ::Dynflow::Extensions::MsgPack::Time
::MessagePack::DefaultFactory.register_type(0x00, Time, packer: MessagePack::Time::Packer, unpacker: MessagePack::Time::Unpacker)
begin
# time_with_zone added a deprecation warning in 7.1.0 which we need to account for
# it was removed again in 7.2.0
require 'active_support/deprecation'
require 'active_support/deprecator'
require 'active_support/time_with_zone'
unpacker = ->(payload) do
tv = MessagePack::Timestamp.from_msgpack_ext(payload)
::Time.zone.at(tv.sec, tv.nsec, :nanosecond)
end
::ActiveSupport::TimeWithZone.include ::Dynflow::Extensions::MsgPack::Time
::MessagePack::DefaultFactory.register_type(0x01, ActiveSupport::TimeWithZone, packer: MessagePack::Time::Packer, unpacker: unpacker)
::DateTime.include ::Dynflow::Extensions::MsgPack::Time
::MessagePack::DefaultFactory.register_type(0x02, DateTime,
packer: ->(datetime) { MessagePack::Time::Packer.(datetime.to_time) },
unpacker: ->(payload) { unpacker.(payload).to_datetime })
::Date.include ::Dynflow::Extensions::MsgPack::Time
::MessagePack::DefaultFactory.register_type(0x03, Date,
packer: ->(date) { MessagePack::Time::Packer.(date.to_time) },
unpacker: ->(payload) { unpacker.(payload).to_date })
rescue LoadError
# This is fine
nil
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/polling.rb | lib/dynflow/action/polling.rb | # frozen_string_literal: true
require 'dynflow/action/timeouts'
module Dynflow
module Action::Polling
def self.included(base)
base.send :include, Action::Timeouts
end
Poll = Algebrick.atom
def run(event = nil)
case event
when nil
if external_task
resume_external_action
else
initiate_external_action
end
when Poll
poll_external_task_with_rescue
when Action::Timeouts::Timeout
process_timeout
suspend
else
raise "unrecognized event #{event}"
end
done? ? on_finish : suspend_and_ping
end
def done?
raise NotImplementedError
end
def invoke_external_task
raise NotImplementedError
end
def poll_external_task
raise NotImplementedError
end
def on_finish
end
# External task data. It should return nil when the task has not
# been triggered yet.
def external_task
output[:task]
end
def external_task=(external_task_data)
output[:task] = external_task_data
end
# What is the trend in waiting for next polling event. It allows
# to strart with frequent polling, but slow down once it's clear this
# task will take some time: the idea is we don't care that much in finishing
# few seconds sooner, when the task takes orders of minutes/hours. It allows
# not overwhelming the backend-servers with useless requests.
# By default, it switches to next interval after +attempts_before_next_interval+ number
# of attempts
def poll_intervals
[0.5, 1, 2, 4, 8, 16]
end
def attempts_before_next_interval
5
end
# Returns the time to wait between two polling intervals.
def poll_interval
interval_level = poll_attempts[:total] / attempts_before_next_interval
poll_intervals[interval_level] || poll_intervals.last
end
# How man times in row should we retry the polling before giving up
def poll_max_retries
3
end
def initiate_external_action
self.external_task = invoke_external_task
end
def resume_external_action
poll_external_task_with_rescue
rescue
initiate_external_action
end
def suspend_and_ping
plan_event(Poll, poll_interval)
suspend
end
def poll_external_task_with_rescue
poll_attempts[:total] += 1
self.external_task = poll_external_task
poll_attempts[:failed] = 0
rescue => error
poll_attempts[:failed] += 1
rescue_external_task(error)
end
def poll_attempts
output[:poll_attempts] ||= { total: 0, failed: 0 }
end
def rescue_external_task(error)
if poll_attempts[:failed] < poll_max_retries
action_logger.warn("Polling failed, attempt no. #{poll_attempts[:failed]}, retrying in #{poll_interval}")
action_logger.warn(error)
else
raise error
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/with_sub_plans.rb | lib/dynflow/action/with_sub_plans.rb | # frozen_string_literal: true
module Dynflow
module Action::WithSubPlans
include Dynflow::Action::Cancellable
class SubtaskFailedException < RuntimeError
def backtrace
[]
end
end
SubPlanFinished = Algebrick.type do
fields! :execution_plan_id => String,
:success => type { variants TrueClass, FalseClass }
end
def run(event = nil)
match event,
(on nil do
if output[:total_count]
resume
else
initiate
end
end),
(on SubPlanFinished do
mark_as_done(event.execution_plan_id, event.success)
try_to_finish or suspend
end),
(on Action::Cancellable::Cancel do
cancel!
end),
(on Action::Cancellable::Abort do
abort!
end)
end
def initiate
if uses_concurrency_control
calculate_time_distribution
world.throttle_limiter.initialize_plan(execution_plan_id, input[:concurrency_control])
end
spawn_plans
end
def spawn_plans
sub_plans = create_sub_plans
sub_plans = Array[sub_plans] unless sub_plans.is_a? Array
wait_for_sub_plans sub_plans
end
# @abstract when the logic for the initiation of the subtasks
# is different from the default one.
# @returns a triggered task or array of triggered tasks
# @example
#
# def create_sub_plans
# trigger(MyAction, "Hello")
# end
#
# @example
#
# def create_sub_plans
# [trigger(MyAction, "Hello 1"), trigger(MyAction, "Hello 2")]
# end
#
def create_sub_plans
raise NotImplementedError
end
# @api method to be called after all the sub tasks finished
def on_finish
end
def cancel!(force = false)
@world.throttle_limiter.cancel!(execution_plan_id)
sub_plans('state' => 'running').each { |sub_plan| sub_plan.cancel(force) }
suspend
end
def abort!
cancel! true
end
# Helper for creating sub plans
def trigger(action_class, *args)
if uses_concurrency_control
trigger_with_concurrency_control(action_class, *args)
else
world.trigger { world.plan_with_options(action_class: action_class, args: args, caller_action: self) }
end
end
def trigger_with_concurrency_control(action_class, *args)
record = world.plan_with_options(action_class: action_class, args: args, caller_action: self)
records = [[record.id], []]
records.reverse! unless record.state == :planned
@world.throttle_limiter.handle_plans!(execution_plan_id, *records).first
end
def limit_concurrency_level(level)
input[:concurrency_control] ||= {}
input[:concurrency_control][:level] = ::Dynflow::Semaphores::Stateful.new(level).to_hash
end
def calculate_time_distribution
time, count = input[:concurrency_control][:time]
unless time.nil? || time.is_a?(Hash)
# Assume concurrency level 1 unless stated otherwise
level = input[:concurrency_control].fetch(:level, {}).fetch(:free, 1)
semaphore = ::Dynflow::Semaphores::Stateful.new(nil, level,
:interval => time.to_f / (count * level),
:time_span => time)
input[:concurrency_control][:time] = semaphore.to_hash
end
end
def distribute_over_time(time_span, count)
input[:concurrency_control] ||= {}
input[:concurrency_control][:time] = [time_span, count]
end
def wait_for_sub_plans(sub_plans)
planned, failed = sub_plans.partition(&:planned?)
increase_counts(planned.count, failed.count)
if planned.any?
notify_on_finish(planned)
else
check_for_errors!
end
end
def increase_counts(planned, failed, track_total = true)
output[:total_count] = output.fetch(:total_count, 0) + planned + failed if track_total
output[:failed_count] = output.fetch(:failed_count, 0) + failed
output[:pending_count] = output.fetch(:pending_count, 0) + planned
output[:success_count] ||= 0
end
def try_to_finish
if done?
world.throttle_limiter.finish(execution_plan_id)
check_for_errors!
on_finish
return true
else
return false
end
end
def resume
if sub_plans.all? { |sub_plan| sub_plan.error_in_plan? }
# We're starting over and need to reset the counts
%w(total failed pending success).each { |key| output.delete("#{key}_count".to_sym) }
initiate
else
recalculate_counts
try_to_finish or fail "Some sub plans are still not finished"
end
end
def sub_plans(filter = {})
filters = { 'caller_execution_plan_id' => execution_plan_id,
'caller_action_id' => self.id }
if filter.empty?
@sub_plans ||= world.persistence.find_execution_plans(filters: filters)
else
world.persistence.find_execution_plans(filters: filters.merge(filter))
end
end
def sub_plans_count(filter = {})
filters = { 'caller_execution_plan_id' => execution_plan_id,
'caller_action_id' => self.id }
world.persistence.find_execution_plan_counts(filters: filters.merge(filter))
end
def notify_on_finish(plans)
suspend do |suspended_action|
plans.each do |plan|
plan.finished.on_resolution! do |success, value|
suspended_action << SubPlanFinished[plan.id, success && (value.result == :success)]
end
end
end
end
def mark_as_done(plan_id, success)
if success
output[:success_count] += 1
else
output[:failed_count] += 1
end
output[:pending_count] -= 1
end
def done?
if counts_set?
output[:total_count] - output[:success_count] - output[:failed_count] <= 0
else
false
end
end
def run_progress
if counts_set? && output[:total_count] > 0
(output[:success_count] + output[:failed_count]).to_f / output[:total_count]
else
0.1
end
end
def recalculate_counts
output.update(total_count: 0,
failed_count: 0,
success_count: 0,
pending_count: 0)
sub_plans.each do |sub_plan|
output[:total_count] += 1
if sub_plan.state == :stopped
if sub_plan.error?
output[:failed_count] += 1
else
output[:success_count] += 1
end
else
output[:pending_count] += 1
end
end
end
def counts_set?
output[:total_count] && output[:success_count] && output[:failed_count] && output[:pending_count]
end
def check_for_errors!
raise SubtaskFailedException.new("A sub task failed") if output[:failed_count] > 0
end
def uses_concurrency_control
@uses_concurrency_control = input.key? :concurrency_control
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/with_bulk_sub_plans.rb | lib/dynflow/action/with_bulk_sub_plans.rb | # frozen_string_literal: true
module Dynflow
module Action::WithBulkSubPlans
include Dynflow::Action::Cancellable
DEFAULT_BATCH_SIZE = 100
# Should return a slice of size items starting from item with index from
def batch(from, size)
raise NotImplementedError
end
PlanNextBatch = Algebrick.atom
def run(event = nil)
if event === PlanNextBatch
if can_spawn_next_batch?
spawn_plans
suspend
else
on_planning_finished
end
else
super
end
end
def on_planning_finished
suspend
end
def initiate
output[:planned_count] = 0
output[:cancelled_count] = 0
output[:total_count] = total_count
super
end
def increase_counts(planned, failed)
super(planned, failed, false)
output[:planned_count] += planned + failed
end
# Should return the expected total count of tasks
def total_count
raise NotImplementedError
end
# Returns the items in the current batch
def current_batch
start_position = output[:planned_count]
size = start_position + batch_size > total_count ? total_count - start_position : batch_size
batch(start_position, size)
end
def batch_size
DEFAULT_BATCH_SIZE
end
# The same logic as in Action::WithSubPlans, but calculated using the expected total count
def run_progress
if counts_set? && total_count > 0
sum = output.values_at(:success_count, :cancelled_count, :failed_count).reduce(:+)
sum.to_f / total_count
else
0.1
end
end
def spawn_plans
super
ensure
plan_event(PlanNextBatch)
end
def cancel!(force = false)
# Count the not-yet-planned tasks as cancelled
output[:cancelled_count] = total_count - output[:planned_count]
if uses_concurrency_control
# Tell the throttle limiter to cancel the tasks its managing
world.throttle_limiter.cancel!(execution_plan_id)
else
# Just stop the tasks which were not started yet
sub_plans(:state => 'planned').each { |sub_plan| sub_plan.update_state(:stopped) }
end
# Pass the cancel event to running sub plans if they can be cancelled
sub_plans(:state => 'running').each { |sub_plan| sub_plan.cancel(force) if sub_plan.cancellable? }
suspend
end
private
def done?
!can_spawn_next_batch? && super
end
def can_spawn_next_batch?
remaining_count > 0
end
def remaining_count
total_count - output[:cancelled_count] - output[:planned_count]
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/format.rb | lib/dynflow/action/format.rb | # frozen_string_literal: true
module Dynflow
module Action::Format
def input_format(&block)
# Format definitions are not validated
# This method is kept for backward compatibility but does nothing
end
def output_format(&block)
# Format definitions are not validated
# This method is kept for backward compatibility but does nothing
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/missing.rb | lib/dynflow/action/missing.rb | # frozen_string_literal: true
module Dynflow
# for cases the serialized action was renamed and it's not available
# in the code base anymore.
class Action::Missing < Dynflow::Action
def self.generate(action_name)
Class.new(self).tap do |klass|
klass.singleton_class.send(:define_method, :name) do
action_name
end
end
end
def plan(*args)
raise StandardError,
"The action class was not found and therefore plan phase failed, this can happen if the action was added/renamed but the executor was not restarted."
end
def run
raise StandardError,
"The action class was not found and therefore run phase failed, this can happen if the action was added/renamed but the executor was not restarted."
end
def finalize
raise StandardError,
"The action class was not found and therefore finalize phase failed, this can happen if the action was added/renamed but the executor was not restarted."
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/timeouts.rb | lib/dynflow/action/timeouts.rb | # frozen_string_literal: true
module Dynflow
module Action::Timeouts
Timeout = Algebrick.atom
def process_timeout
fail("Timeout exceeded.")
end
def schedule_timeout(seconds, optional: false)
plan_event(Timeout, seconds, optional: optional)
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/v2.rb | lib/dynflow/action/v2.rb | # frozen_string_literal: true
module Dynflow
class Action
module V2
require 'dynflow/action/v2/with_sub_plans'
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/with_polling_sub_plans.rb | lib/dynflow/action/with_polling_sub_plans.rb | # frozen_string_literal: true
module Dynflow
module Action::WithPollingSubPlans
REFRESH_INTERVAL = 10
Poll = Algebrick.atom
def run(event = nil)
case event
when Poll
poll
else
super(event)
end
end
def poll
recalculate_counts
try_to_finish || suspend_and_ping
end
def initiate
ping
super
end
def wait_for_sub_plans(sub_plans)
increase_counts(sub_plans.count, 0)
suspend
end
def resume
if sub_plans.all? { |sub_plan| sub_plan.error_in_plan? }
output[:resumed_count] ||= 0
output[:resumed_count] += output[:failed_count]
# We're starting over and need to reset the counts
%w(total failed pending success).each { |key| output.delete("#{key}_count".to_sym) }
initiate
else
if self.is_a?(::Dynflow::Action::WithBulkSubPlans) && can_spawn_next_batch?
# Not everything was spawned
ping
spawn_plans
suspend
else
poll
end
end
end
def notify_on_finish(_sub_plans)
suspend
end
def suspend_and_ping
ping
suspend
end
def ping(_suspended_action = nil)
plan_event(Poll, REFRESH_INTERVAL)
end
def recalculate_counts
total = sub_plans_count
failed = sub_plans_count('state' => %w(paused stopped), 'result' => 'error')
success = sub_plans_count('state' => 'stopped', 'result' => 'success')
output.update(:total_count => total - output.fetch(:resumed_count, 0),
:pending_count => total - failed - success,
:failed_count => failed - output.fetch(:resumed_count, 0),
:success_count => success)
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/rescue.rb | lib/dynflow/action/rescue.rb | # frozen_string_literal: true
module Dynflow
module Action::Rescue
Strategy = Algebrick.type do
variants Skip = atom, Pause = atom, Fail = atom
end
SuggestedStrategy = Algebrick.type do
fields! action: Action,
strategy: Strategy
end
# What strategy should be used for rescuing from error in
# the action or its sub actions
#
# @return Strategy
#
# When determining the strategy, the algorithm starts from the
# entry action that by default takes the strategy from #rescue_strategy_for_self
# and #rescue_strategy_for_planned_actions and combines them together.
def rescue_strategy
suggested_strategies = []
if self.steps.compact.any? { |step| step.state == :error }
suggested_strategies << SuggestedStrategy[self, rescue_strategy_for_self]
end
self.planned_actions.each do |planned_action|
rescue_strategy = rescue_strategy_for_planned_action(planned_action)
next unless rescue_strategy # ignore actions that have no say in the rescue strategy
suggested_strategies << SuggestedStrategy[planned_action, rescue_strategy]
end
combine_suggested_strategies(suggested_strategies)
end
# Override when another strategy should be used for rescuing from
# error on the action
def rescue_strategy_for_self
return Pause
end
# Override when the action should override the rescue
# strategy of an action it planned
def rescue_strategy_for_planned_action(action)
action.rescue_strategy
end
# Override when different approach should be taken for combining
# the suggested strategies
def combine_suggested_strategies(suggested_strategies)
if suggested_strategies.empty?
nil
else
# TODO: Find the safest rescue strategy among the suggested ones
if suggested_strategies.all? { |suggested_strategy| suggested_strategy.strategy == Skip }
return Skip
elsif suggested_strategies.all? { |suggested_strategy| suggested_strategy.strategy == Fail }
return Fail
else
return Pause # We don't know how to handle this case, so we'll just pause
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/cancellable.rb | lib/dynflow/action/cancellable.rb | # frozen_string_literal: true
module Dynflow
module Action::Cancellable
Cancel = Algebrick.atom
Abort = Algebrick.atom
def run(event = nil)
case event
when Cancel
cancel!
when Abort
abort!
else
super event
end
end
def cancel!
raise NotImplementedError
end
def abort!
cancel!
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/singleton.rb | lib/dynflow/action/singleton.rb | # frozen_string_literal: true
module Dynflow
class Action
module Singleton
def self.included(base)
base.middleware.use ::Dynflow::Middleware::Common::Singleton
end
def validate_singleton_lock!
singleton_lock! unless holds_singleton_lock?
end
def singleton_lock!
world.coordinator.acquire(singleton_lock)
rescue Dynflow::Coordinator::LockError
fail "Action #{self.class.name} is already active"
end
def singleton_unlock!
world.coordinator.release(singleton_lock) if holds_singleton_lock?
end
def holds_singleton_lock?
# Get locks for this action, there should be none or one
lock_filter = singleton_lock_class.unique_filter(self.class.name)
present_locks = world.coordinator.find_locks lock_filter
!present_locks.empty? && present_locks.first.owner_id == execution_plan_id
end
def singleton_lock_class
::Dynflow::Coordinator::SingletonActionLock
end
def singleton_lock
singleton_lock_class.new(self.class.name, execution_plan_id)
end
def error!(*args)
singleton_unlock!
super
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/progress.rb | lib/dynflow/action/progress.rb | # frozen_string_literal: true
module Dynflow
# Methods for specifying the progress of the action
# the +*_progress+ methods should return number in 0..1.
# The weight is there to increase/decrease the portion of this task
# in the context of other tasks in execution plan. Normal action has
# weight 1.
#
# The +*_progress+ is run only when the action is in running/suspend state. Otherwise
# the progress is 1 for success/skipped actions and 0 for errorneous ones.
module Action::Progress
class Calculate < Middleware
def run(*args)
with_progress_calculation(*args) do
[action.run_progress, action.run_progress_weight]
end
end
def finalize(*args)
with_progress_calculation(*args) do
[action.finalize_progress, action.finalize_progress_weight]
end
end
protected
def with_progress_calculation(*args)
pass(*args)
ensure
begin
action.calculated_progress = yield
rescue => error
# we don't want progress calculation to cause fail of the whole process
# TODO: introduce post-execute state for handling issues with additional
# calculations after the step is run
action.action_logger.error('Error in progress calculation')
action.action_logger.error(error)
end
end
end
def run_progress
0.5
end
def run_progress_weight
1
end
def finalize_progress
0.5
end
def finalize_progress_weight
1
end
attr_accessor :calculated_progress
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/suspended.rb | lib/dynflow/action/suspended.rb | # frozen_string_literal: true
module Dynflow
class Action::Suspended
attr_reader :execution_plan_id, :step_id
def initialize(action)
@world = action.world
@execution_plan_id = action.execution_plan_id
@step_id = action.run_step_id
end
def plan_event(event, time, sent = Concurrent::Promises.resolvable_future, optional: false)
@world.plan_event(execution_plan_id, step_id, event, time, sent, optional: optional)
end
def event(event, sent = Concurrent::Promises.resolvable_future, optional: false)
# TODO: deprecate 2 levels backtrace (to know it's called from clock or internaly)
# remove lib/dynflow/clock.rb ClockReference#ping branch condition on removal.
plan_event(event, nil, sent, optional: optional)
end
def <<(event = nil)
event event
end
alias_method :ask, :event
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/action/v2/with_sub_plans.rb | lib/dynflow/action/v2/with_sub_plans.rb | # frozen_string_literal: true
module Dynflow::Action::V2
module WithSubPlans
include Dynflow::Action::Cancellable
DEFAULT_BATCH_SIZE = 100
DEFAULT_POLLING_INTERVAL = 15
Ping = Algebrick.atom
class SubtaskFailedException < RuntimeError
def backtrace
[]
end
end
# Methods to be overridden
def create_sub_plans
raise NotImplementedError
end
# Should return the expected total count of tasks
def total_count
raise NotImplementedError
end
def batch_size
DEFAULT_BATCH_SIZE
end
# Should return a slice of size items starting from item with index from
def batch(from, size)
raise NotImplementedError
end
# Polling
def polling_interval
DEFAULT_POLLING_INTERVAL
end
# Callbacks
def on_finish
end
def on_planning_finished
end
def run(event = nil)
case event
when nil
if output[:total_count]
resume
else
initiate
end
when Ping
tick
when ::Dynflow::Action::Cancellable::Cancel
cancel!
when ::Dynflow::Action::Cancellable::Abort
abort!
end
try_to_finish || suspend_and_ping
end
def initiate
output[:planned_count] = 0
output[:cancelled_count] = 0
output[:total_count] = total_count
spawn_plans
end
def resume
if sub_plans.all? { |sub_plan| sub_plan.error_in_plan? }
output[:resumed_count] ||= 0
output[:resumed_count] += output[:failed_count]
# We're starting over and need to reset the counts
%w(total failed pending success).each { |key| output.delete("#{key}_count".to_sym) }
initiate
else
tick
end
end
def tick
recalculate_counts
spawn_plans if can_spawn_next_batch?
end
def suspend_and_ping
delay = (concurrency_limit.nil? || concurrency_limit_capacity > 0) && can_spawn_next_batch? ? nil : polling_interval
plan_event(Ping, delay)
suspend
end
def spawn_plans
sub_plans = create_sub_plans
sub_plans = Array[sub_plans] unless sub_plans.is_a? Array
increase_counts(sub_plans.count, 0)
on_planning_finished unless can_spawn_next_batch?
end
def increase_counts(planned, failed)
output[:planned_count] += planned + failed
output[:failed_count] = output.fetch(:failed_count, 0) + failed
output[:pending_count] = output.fetch(:pending_count, 0) + planned
output[:success_count] ||= 0
end
def try_to_finish
return false unless done?
check_for_errors!
on_finish
true
end
def done?
return false if can_spawn_next_batch? || !counts_set?
total_count - output[:success_count] - output[:failed_count] - output[:cancelled_count] <= 0
end
def run_progress
return 0.1 unless counts_set? && total_count > 0
sum = output.values_at(:success_count, :cancelled_count, :failed_count).reduce(:+)
sum.to_f / total_count
end
def recalculate_counts
total = total_count
if output[:cancelled_timestamp]
cancelled_scheduled_plans = sub_plans_count_after(output[:cancelled_timestamp], { 'state' => %w(paused stopped), 'result' => %w(error warning) })
cancelled_unscheduled_plans = total_count - output[:planned_count]
cancelled = cancelled_unscheduled_plans + cancelled_scheduled_plans
else
cancelled = cancelled_scheduled_plans = 0
end
failed = sub_plans_count('state' => %w(paused stopped), 'result' => %w(error warning)) - cancelled_scheduled_plans
success = sub_plans_count('state' => 'stopped', 'result' => 'success')
output.update(:pending_count => total - failed - success - cancelled_scheduled_plans,
:failed_count => failed - output.fetch(:resumed_count, 0),
:success_count => success,
:cancelled_count => cancelled)
end
def counts_set?
output[:total_count] && output[:success_count] && output[:failed_count] && output[:pending_count]
end
def check_for_errors!
raise SubtaskFailedException.new("A sub task failed") if output[:failed_count] + output[:cancelled_count] > 0
end
# Helper for creating sub plans
def trigger(action_class, *args)
world.trigger { world.plan_with_options(action_class: action_class, args: args, caller_action: self) }
end
# Concurrency limitting
def limit_concurrency_level!(level)
input[:dynflow] ||= {}
input[:dynflow][:concurrency_limit] = level
end
def concurrency_limit
input[:dynflow] ||= {}
input[:dynflow][:concurrency_limit]
end
def concurrency_limit_capacity
if limit = concurrency_limit
return limit unless counts_set?
capacity = limit - (output[:planned_count] - (output[:success_count] + output[:failed_count]))
[0, capacity].max
end
end
# Cancellation handling
def cancel!(force = false)
# Count the not-yet-planned tasks as cancelled
output[:cancelled_count] = total_count - output[:planned_count]
output[:cancelled_timestamp] ||= Time.now.utc.iso8601 # time in UTC for comparison with UTC times in the database
on_planning_finished if output[:cancelled_count].positive?
# Pass the cancel event to running sub plans if they can be cancelled
sub_plans(:state => 'running').each { |sub_plan| sub_plan.cancel(force) if sub_plan.cancellable? }
suspend
end
def abort!
cancel! true
end
# Batching
# Returns the items in the current batch
def current_batch
start_position = output[:planned_count]
size = batch_size
size = concurrency_limit_capacity if concurrency_limit
size = start_position + size > total_count ? total_count - start_position : size
batch(start_position, size)
end
def can_spawn_next_batch?
remaining_count > 0
end
def remaining_count
return 0 if output[:cancelled_timestamp]
total_count - output[:planned_count]
end
private
# Sub-plan lookup
def sub_plan_filter
{ 'caller_execution_plan_id' => execution_plan_id,
'caller_action_id' => self.id }
end
def sub_plans(filter = {})
world.persistence.find_execution_plans(filters: sub_plan_filter.merge(filter))
end
def sub_plans_count(filter = {})
world.persistence.find_execution_plan_counts(filters: sub_plan_filter.merge(filter))
end
def sub_plans_count_after(timestamp, filter = {})
world.persistence.find_execution_plan_counts_after(timestamp, { filters: sub_plan_filter.merge(filter) })
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/parallel.rb | lib/dynflow/executors/parallel.rb | # frozen_string_literal: true
module Dynflow
module Executors
class Parallel
require 'dynflow/executors/abstract/core'
require 'dynflow/executors/parallel/core'
# only load Sidekiq pieces when run in Sidekiq runtime (and the Sidekiq module is already loaded)
require 'dynflow/executors/sidekiq/core' if defined? ::Sidekiq
attr_reader :core
def initialize(world,
executor_class:,
heartbeat_interval:,
queues_options: { :default => { :pool_size => 5 } })
@world = world
@logger = world.logger
@core = executor_class.spawn name: 'parallel-executor-core',
args: [world, heartbeat_interval, queues_options],
initialized: @core_initialized = Concurrent::Promises.resolvable_future
end
def execute(execution_plan_id, finished = Concurrent::Promises.resolvable_future, wait_for_acceptance = true)
accepted = @core.ask([:handle_execution, execution_plan_id, finished])
accepted.value! if wait_for_acceptance
finished
rescue Concurrent::Actor::ActorTerminated
dynflow_error = Dynflow::Error.new('executor terminated')
finished.reject dynflow_error unless finished.resolved?
raise dynflow_error
rescue => e
finished.reject e unless finished.resolved?
raise e
end
def event(request_id, execution_plan_id, step_id, event, future = nil, optional: false)
@core.ask([:handle_event, Director::Event[request_id, execution_plan_id, step_id, event, future, optional]])
future
end
def plan(execution_plan_id)
@core.ask([:handle_planning, execution_plan_id])
end
def delayed_event(director_event)
@core.ask([:handle_event, director_event])
director_event.result
end
def terminate(future = Concurrent::Promises.resolvable_future)
@core.tell([:start_termination, future])
future
end
def execution_status(execution_plan_id = nil)
@core.ask!([:execution_status, execution_plan_id])
end
def halt(execution_plan_id)
@core.tell([:halt, execution_plan_id])
end
def initialized
@core_initialized
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/abstract/core.rb | lib/dynflow/executors/abstract/core.rb | # frozen_string_literal: true
module Dynflow
module Executors
module Abstract
class Core < Actor
attr_reader :logger
def initialize(world, heartbeat_interval, queues_options)
@logger = world.logger
@world = Type! world, World
@pools = {}
@terminated = nil
@director = Director.new(@world)
@heartbeat_interval = heartbeat_interval
@queues_options = queues_options
schedule_heartbeat
end
def handle_execution(execution_plan_id, finished)
if terminating?
raise Dynflow::Error,
"cannot accept execution_plan_id:#{execution_plan_id} core is terminating"
end
handle_work(@director.start_execution(execution_plan_id, finished))
end
def handle_event(event)
Type! event, Director::Event
if terminating?
raise Dynflow::Error,
"cannot accept event: #{event} core is terminating"
end
handle_work(@director.handle_event(event))
end
def handle_planning(execution_plan_id)
if terminating?
raise Dynflow::Error,
"cannot accept event: #{event} core is terminating"
end
handle_work(@director.handle_planning(execution_plan_id))
end
def plan_events(delayed_events)
delayed_events.each do |event|
@world.plan_event(event.execution_plan_id, event.step_id, event.event, event.time, optional: event.optional)
end
end
def work_finished(work, delayed_events = nil)
handle_work(@director.work_finished(work))
plan_events(delayed_events) if delayed_events
end
def handle_persistence_error(error, work = nil)
logger.error "PersistenceError in executor"
logger.error error
@director.work_failed(work) if work
if error.is_a? Errors::FatalPersistenceError
logger.fatal "Terminating"
@world.terminate
end
end
def halt(execution_plan_id)
@director.halt execution_plan_id
end
def start_termination(*args)
logger.info 'shutting down Core ...'
super
end
def finish_termination
@director.terminate
logger.info '... Dynflow core terminated.'
super()
end
def dead_letter_routing
@world.dead_letter_handler
end
def execution_status(execution_plan_id = nil)
{}
end
def heartbeat
@logger.debug('Executor heartbeat')
record = @world.coordinator.find_records(:id => @world.id,
:class => ['Dynflow::Coordinator::ExecutorWorld', 'Dynflow::Coordinator::ClientWorld']).first
unless record
logger.error(%{Executor's world record for #{@world.id} missing: terminating})
@world.terminate
return
end
record.data[:meta].update(:last_seen => Dynflow::Dispatcher::ClientDispatcher::PingCache.format_time)
@world.coordinator.update_record(record)
schedule_heartbeat
end
private
def suggest_queue(work_item)
queue = work_item.queue
unless @queues_options.key?(queue)
logger.debug("Pool is not available for queue #{queue}, falling back to #{fallback_queue}")
queue = fallback_queue
end
queue
end
def fallback_queue
:default
end
def schedule_heartbeat
@world.clock.ping(self, @heartbeat_interval, :heartbeat)
end
def on_message(message)
super
rescue Errors::PersistenceError => e
handle_persistence_error(e)
end
def handle_work(work_items)
return if terminating?
return if work_items.nil?
work_items = [work_items] if work_items.is_a? Director::WorkItem
work_items.all? { |i| Type! i, Director::WorkItem }
feed_pool(work_items)
end
def feed_pool(work_items)
raise NotImplementedError
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/parallel/core.rb | lib/dynflow/executors/parallel/core.rb | # frozen_string_literal: true
require 'dynflow/executors/parallel/pool'
require 'dynflow/executors/parallel/worker'
module Dynflow
module Executors
class Parallel
class Core < Abstract::Core
attr_reader :logger
def initialize(world, heartbeat_interval, queues_options)
super
@pools = {}
initialize_queues
end
def initialize_queues
default_pool_size = @queues_options[:default][:pool_size]
@queues_options.each do |(queue_name, queue_options)|
queue_pool_size = queue_options.fetch(:pool_size, default_pool_size)
@pools[queue_name] = Pool.spawn("pool #{queue_name}", @world,
reference, queue_name, queue_pool_size,
@world.transaction_adapter)
end
end
def start_termination(*args)
super
@pools.values.each { |pool| pool.tell([:start_termination, Concurrent::Promises.resolvable_future]) }
end
def finish_termination(pool_name)
@pools.delete(pool_name)
# we expect this message from all worker pools
return unless @pools.empty?
super()
end
def execution_status(execution_plan_id = nil)
@pools.each_with_object({}) do |(pool_name, pool), hash|
hash[pool_name] = pool.ask!([:execution_status, execution_plan_id])
end
end
def feed_pool(work_items)
work_items.each do |new_work|
new_work.world = @world
@pools.fetch(suggest_queue(new_work)).tell([:schedule_work, new_work])
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/parallel/pool.rb | lib/dynflow/executors/parallel/pool.rb | # frozen_string_literal: true
module Dynflow
module Executors
class Parallel
class Pool < Actor
class JobStorage
def initialize
@jobs = []
end
def add(work)
@jobs << work
end
def pop
@jobs.shift
end
def empty?
@jobs.empty?
end
def queue_size(execution_plan_id = nil)
if execution_plan_id
@jobs.count do |item|
item.respond_to?(:execution_plan_id) && item.execution_plan_id == execution_plan_id
end
else
@jobs.size
end
end
end
def initialize(world, core, name, pool_size, transaction_adapter)
@world = world
@name = name
@executor_core = core
@pool_size = pool_size
@jobs = JobStorage.new
@free_workers = Array.new(pool_size) do |i|
name = "worker-#{i}"
Worker.spawn(name, reference, transaction_adapter, telemetry_options.merge(:worker => name))
end
end
def schedule_work(work)
@jobs.add work
distribute_jobs
update_telemetry
end
def worker_done(worker, work)
step = work.step if work.is_a?(Director::StepWorkItem)
@executor_core.tell([:work_finished, work, step && step.delayed_events])
@free_workers << worker
Dynflow::Telemetry.with_instance { |t| t.set_gauge(:dynflow_active_workers, -1, telemetry_options) }
distribute_jobs
end
def handle_persistence_error(worker, error, work = nil)
@executor_core.tell([:handle_persistence_error, error, work])
@free_workers << worker
distribute_jobs
end
def start_termination(*args)
super
try_to_terminate
end
def execution_status(execution_plan_id = nil)
{ :pool_size => @pool_size,
:free_workers => @free_workers.count,
:queue_size => @jobs.queue_size(execution_plan_id) }
end
private
def try_to_terminate
if terminating?
@free_workers.map { |worker| worker.ask(:terminate!) }.map(&:wait)
@pool_size -= @free_workers.count
@free_workers = []
if @pool_size.zero?
@executor_core.tell([:finish_termination, @name])
finish_termination
end
end
end
def distribute_jobs
try_to_terminate
until @free_workers.empty? || @jobs.empty?
Dynflow::Telemetry.with_instance { |t| t.set_gauge(:dynflow_active_workers, '+1', telemetry_options) }
@free_workers.pop << @jobs.pop
update_telemetry
end
end
def telemetry_options
{ :queue => @name.to_s, :world => @world.id }
end
def update_telemetry
Dynflow::Telemetry.with_instance { |t| t.set_gauge(:dynflow_queue_size, @jobs.queue_size, telemetry_options) }
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/parallel/worker.rb | lib/dynflow/executors/parallel/worker.rb | # frozen_string_literal: true
module Dynflow
module Executors
class Parallel
class Worker < Actor
def initialize(pool, transaction_adapter, telemetry_options = {})
@pool = Type! pool, Concurrent::Actor::Reference
@transaction_adapter = Type! transaction_adapter, TransactionAdapters::Abstract
@telemetry_options = telemetry_options
end
def on_message(work_item)
already_responded = false
Executors.run_user_code do
work_item.execute
end
rescue Errors::PersistenceError => e
@pool.tell([:handle_persistence_error, reference, e, work_item])
already_responded = true
ensure
Dynflow::Telemetry.with_instance { |t| t.increment_counter(:dynflow_worker_events, 1, @telemetry_options) }
if !already_responded && Concurrent.global_io_executor.running?
@pool.tell([:worker_done, reference, work_item])
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/sidekiq/core.rb | lib/dynflow/executors/sidekiq/core.rb | # frozen_string_literal: true
require 'dynflow/executors/sidekiq/serialization'
require 'dynflow/executors/sidekiq/internal_job_base'
require 'dynflow/executors/sidekiq/orchestrator_jobs'
require 'dynflow/executors/sidekiq/worker_jobs'
require 'dynflow/executors/sidekiq/redis_locking'
require 'sidekiq-reliable-fetch'
Sidekiq.configure_server do |config|
# Use semi-reliable fetch
# for details see https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/blob/master/README.md
config[:semi_reliable_fetch] = true
Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
end
::Sidekiq.strict_args!(false) if ::Sidekiq.respond_to?(:strict_args!)
module Dynflow
module Executors
module Sidekiq
class Core < Abstract::Core
include RedisLocking
TELEMETRY_UPDATE_INTERVAL = 30 # update telemetry every 30s
attr_reader :logger
def initialize(world, *_args)
@world = world
@logger = world.logger
wait_for_orchestrator_lock
super
schedule_update_telemetry
begin_startup!
end
def heartbeat
super
reacquire_orchestrator_lock
end
def start_termination(*args)
super
release_orchestrator_lock
finish_termination
end
# TODO: needs thoughs on how to implement it
def execution_status(execution_plan_id = nil)
{}
end
def feed_pool(work_items)
work_items.each do |new_work|
WorkerJobs::PerformWork.set(queue: suggest_queue(new_work)).perform_async(new_work)
end
end
def update_telemetry
sidekiq_queues = ::Sidekiq::Stats.new.queues
@queues_options.keys.each do |queue|
queue_size = sidekiq_queues[queue.to_s]
if queue_size
Dynflow::Telemetry.with_instance { |t| t.set_gauge(:dynflow_queue_size, queue_size, telemetry_options(queue)) }
end
end
schedule_update_telemetry
end
def work_finished(work, delayed_events = nil)
# If the work item is sent in reply to a request from the current orchestrator, proceed
if work.sender_orchestrator_id == @world.id
super
else
# If we're in recovery, we can drop the work as the execution plan will be resumed during validity checks performed when leaving recovery
# If we're not in recovery and receive an event from another orchestrator, it means it survived the queue draining.
handle_unknown_work_item(work) unless @recovery
end
end
def begin_startup!
WorkerJobs::DrainMarker.perform_async(@world.id)
@recovery = true
end
def startup_complete
logger.info('Performing validity checks')
@world.perform_validity_checks
logger.info('Finished performing validity checks')
if @world.delayed_executor && !@world.delayed_executor.started?
@world.delayed_executor.start
end
@recovery = false
end
private
def fallback_queue
:default
end
def schedule_update_telemetry
@world.clock.ping(reference, TELEMETRY_UPDATE_INTERVAL, [:update_telemetry])
end
def telemetry_options(queue)
{ queue: queue.to_s, world: @world.id }
end
# We take a look if an execution lock is already being held by an orchestrator (it should be the current one). If no lock is held
# we try to resume the execution plan if possible
def handle_unknown_work_item(work)
# We are past recovery now, if we receive an event here, the execution plan will be most likely paused
# We can either try to rescue it or turn it over to stopped
execution_lock = @world.coordinator.find_locks(class: Coordinator::ExecutionLock.name,
id: "execution-plan:#{work.execution_plan_id}").first
if execution_lock.nil?
plan = @world.persistence.load_execution_plan(work.execution_plan_id)
should_resume = !plan.error? || plan.prepare_for_rescue == :running
@world.execute(plan.id) if should_resume
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/sidekiq/serialization.rb | lib/dynflow/executors/sidekiq/serialization.rb | # frozen_string_literal: true
module Dynflow
module Executors
module Sidekiq
# Module to prepend the Sidekiq job to handle the serialization
module Serialization
def self.serialize(value)
Dynflow.serializer.dump(value)
end
def self.deserialize(value)
value = Utils::IndifferentHash.new(value) if value.is_a? Hash
Dynflow.serializer.load(value)
end
module WorkerExtension
# Overriding the Sidekiq entry method to perform additional serialization preparation
module ClassMethods
def client_push(opts)
opts['args'] = opts['args'].map { |a| Serialization.serialize(a) }
super(opts)
end
end
def perform(*args)
args = args.map { |a| Serialization.deserialize(a) }
super(*args)
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/sidekiq/worker_jobs.rb | lib/dynflow/executors/sidekiq/worker_jobs.rb | # frozen_string_literal: true
module Dynflow
module Executors
module Sidekiq
module WorkerJobs
class PerformWork < InternalJobBase
def perform(work_item)
with_telemetry(work_item) do
Executors.run_user_code do
work_item.world = Dynflow.process_world
work_item.execute
end
end
rescue Errors::PersistenceError => e
OrchestratorJobs::HandlePersistenceError.perform_async(e, work_item)
ensure
step = work_item.step if work_item.is_a?(Director::StepWorkItem)
OrchestratorJobs::WorkerDone.perform_async(work_item, step && step.delayed_events)
end
private
def with_telemetry(work_item)
Dynflow::Telemetry.with_instance { |t| t.set_gauge(:dynflow_active_workers, +1, telemetry_options(work_item)) }
yield
ensure
Dynflow::Telemetry.with_instance do |t|
t.increment_counter(:dynflow_worker_events, 1, telemetry_options(work_item))
t.set_gauge(:dynflow_active_workers, -1, telemetry_options(work_item))
end
end
end
class DrainMarker < InternalJobBase
def perform(world_id)
OrchestratorJobs::StartupComplete.perform_async(world_id)
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/sidekiq/redis_locking.rb | lib/dynflow/executors/sidekiq/redis_locking.rb | # frozen_string_literal: true
module Dynflow
module Executors
module Sidekiq
module RedisLocking
REDIS_LOCK_KEY = 'dynflow_orchestrator_uuid'
REDIS_LOCK_TTL = 60
REDIS_LOCK_POLL_INTERVAL = 15
ACQUIRE_OK = 0
ACQUIRE_MISSING = 1
ACQUIRE_TAKEN = 2
RELEASE_SCRIPT = <<~LUA
if redis.call("get", KEYS[1]) == ARGV[1] then
redis.call("del", KEYS[1])
end
return #{ACQUIRE_OK}
LUA
REACQUIRE_SCRIPT = <<~LUA
if redis.call("exists", KEYS[1]) == 1 then
local owner = redis.call("get", KEYS[1])
if owner == ARGV[1] then
redis.call("set", KEYS[1], ARGV[1], "XX", "EX", #{REDIS_LOCK_TTL})
return #{ACQUIRE_OK}
else
return #{ACQUIRE_TAKEN}
end
else
redis.call("set", KEYS[1], ARGV[1], "NX", "EX", #{REDIS_LOCK_TTL})
return #{ACQUIRE_MISSING}
end
LUA
def release_orchestrator_lock
::Sidekiq.redis { |conn| conn.eval RELEASE_SCRIPT, [REDIS_LOCK_KEY], [@world.id] }
end
def wait_for_orchestrator_lock
mode = nil
loop do
active = try_acquire_orchestrator_lock
break if active
if mode.nil?
mode = :passive
@logger.info('Orchestrator lock already taken, entering passive mode.')
end
sleep REDIS_LOCK_POLL_INTERVAL
end
@logger.info('Acquired orchestrator lock, entering active mode.')
end
def try_acquire_orchestrator_lock
::Sidekiq.redis do |conn|
conn.set(REDIS_LOCK_KEY, @world.id, :ex => REDIS_LOCK_TTL, :nx => true)
end
rescue ::Redis::BaseError => e
@logger.error("Could not acquire orchestrator lock: #{e}")
nil
end
def reacquire_orchestrator_lock
case ::Sidekiq.redis { |conn| conn.eval REACQUIRE_SCRIPT, [REDIS_LOCK_KEY], [@world.id] }
when ACQUIRE_MISSING
@logger.error('The orchestrator lock was lost, reacquired')
when ACQUIRE_TAKEN
owner = ::Sidekiq.redis { |conn| conn.get REDIS_LOCK_KEY }
@logger.fatal("The orchestrator lock was stolen by #{owner}, aborting.")
Process.kill('INT', Process.pid)
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Dynflow/dynflow | https://github.com/Dynflow/dynflow/blob/e14dcdfe57cec99cc78b2f6ab521f00809b1408a/lib/dynflow/executors/sidekiq/orchestrator_jobs.rb | lib/dynflow/executors/sidekiq/orchestrator_jobs.rb | # frozen_string_literal: true
module Dynflow
module Executors
module Sidekiq
module OrchestratorJobs
# handles resposnes about finished work form the workers
# or some event to handle on orchestrator side
class WorkerDone < InternalJobBase
sidekiq_options queue: :dynflow_orchestrator
# @param request_envelope [Dispatcher::Request] - request to handle on orchestrator side
# usually to start new execution or to pass some event
def perform(work_item, delayed_events = nil)
# Usually the step is saved on the worker's side. However if sidekiq is shut down,
# then the step may not have been saved so we save it just to be sure
if work_item.is_a?(Director::StepWorkItem) && work_item.step&.error&.exception_class == ::Sidekiq::Shutdown
work_item.step.save
end
Dynflow.process_world.executor.core.tell([:work_finished, work_item, delayed_events])
end
end
# handles setting up an event on orchestrator
class PlanEvent < InternalJobBase
sidekiq_options queue: :dynflow_orchestrator
# @param event_envelope [Dispatcher::Event] - request to handle on orchestrator side
# usually to start new execution or to pass some event
def perform(execution_plan_id, step_id, event, time)
Dynflow.process_world.plan_event(execution_plan_id, step_id, event, time)
end
end
class HandlePersistenceError < InternalJobBase
sidekiq_options queue: :dynflow_orchestrator
# @param request_envelope [Dispatcher::Request] - request to handle on orchestrator side
# usually to start new execution or to pass some event
def perform(error, work_item)
Dynflow.process_world.executor.core.tell([:handle_persistence_error, error, work_item])
end
end
class StartupComplete < InternalJobBase
sidekiq_options queue: :dynflow_orchestrator
# @param request_envelope [Dispatcher::Request] - request to handle on orchestrator side
# usually to start new execution or to pass some event
def perform(world_id)
if Dynflow.process_world.id == world_id
Dynflow.process_world.executor.core.tell([:startup_complete])
else
logger.warn("Received startup complete for a different world #{world_id}, discarding.")
end
end
end
end
end
end
end
| ruby | MIT | e14dcdfe57cec99cc78b2f6ab521f00809b1408a | 2026-01-04T17:50:16.326730Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.