repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
dtaniwaki/mandriller | https://github.com/dtaniwaki/mandriller/blob/c9f6c0d840dd78547491504fe4450bf2b1317dce/lib/mandriller/base.rb | lib/mandriller/base.rb | require 'action_mailer'
require 'multi_json'
require_relative 'settings_methods'
class Mandriller::Base < ActionMailer::Base
include Mandriller::SettingsMethods
BOOLEAN_SETTINGS = {
auto_text: 'X-MC-Autotext',
auto_html: 'X-MC-AutoHtml',
url_strip_qs: 'X-MC-URLStripQS',
preserve_recipients: 'X-MC-PreserveRecipients',
inline_css: 'X-MC-InlineCSS',
view_content_link: 'X-MC-ViewContentLink',
important: 'X-MC-Important',
}
STRING_SETTINGS = {
tracking_domain: 'X-MC-TrackingDomain',
signing_domain: 'X-MC-SigningDomain',
subaccount: 'X-MC-Subaccount',
bcc_address: 'X-MC-BccAddress',
ip_pool: 'X-MC-IpPool',
google_analytics_campaign: 'X-MC-GoogleAnalyticsCampaign',
return_path_domain: 'X-MC-ReturnPathDomain',
}
JSON_SETTINGS = {
metadata: 'X-MC-Metadata',
merge_vars: 'X-MC-MergeVars',
}
ARRAY_SETTINGS = {
google_analytics: 'X-MC-GoogleAnalytics',
tags: 'X-MC-Tags',
}
DATETIME_SETTINGS = {
send_at: 'X-MC-SendAt',
}
define_settings_methods BOOLEAN_SETTINGS.keys, default: true, getter: lambda { |v| v ? 'true' : 'false' }
define_settings_methods STRING_SETTINGS.keys, getter: lambda { |v| v.to_s }
define_settings_methods JSON_SETTINGS.keys, getter: lambda { |v| MultiJson.dump(v) }
define_settings_methods ARRAY_SETTINGS.keys, getter: lambda { |v| Array(v).join(',') }
define_settings_methods DATETIME_SETTINGS.keys, getter: lambda { |v| v.to_time.utc.strftime('%Y-%m-%d %H:%M:%S') }
define_settings_methods :open_track, default: true
define_settings_methods :click_track, default: 'all'
class_attribute :mandrill_template
class << self
def set_template(template_name, block_name = nil)
self.mandrill_template = [template_name, block_name]
end
end
def set_template(template_name, block_name = nil)
@mandrill_template = [template_name, block_name].compact
end
def mail(*args)
tracks = []
tracks << (mandrill_open_track ? 'opens' : nil)
if v = mandrill_click_track
tracks << "clicks_#{v}"
end
tracks = tracks.compact.map(&:to_s)
unless tracks.empty?
tracks.each do |track|
validate_values!(track, %w(opens clicks_all clicks_htmlonly clicks_textonly))
end
self.headers['X-MC-Track'] = tracks.join(',')
end
v = mandrill_template
self.headers['X-MC-Template'] = v.join('|') unless v.nil? || v.empty?
[BOOLEAN_SETTINGS, STRING_SETTINGS, JSON_SETTINGS, ARRAY_SETTINGS, DATETIME_SETTINGS].inject({}, :merge).each do |key, header_name|
if is_mandrill_setting_defined?(key)
self.headers[header_name] = get_mandrill_setting(key)
end
end
super(*args)
end
private
def validate_values!(value, valid_values)
raise Mandriller::InvalidHeaderValue, "#{value} is not included in #{valid_values.join(', ')}" unless valid_values.include?(value)
end
end
| ruby | MIT | c9f6c0d840dd78547491504fe4450bf2b1317dce | 2026-01-04T17:55:41.600973Z | false |
dtaniwaki/mandriller | https://github.com/dtaniwaki/mandriller/blob/c9f6c0d840dd78547491504fe4450bf2b1317dce/lib/mandriller/settings_methods.rb | lib/mandriller/settings_methods.rb | module Mandriller
module SettingsMethods
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def define_settings_methods(*keys)
options = keys[-1].is_a?(Hash) ? keys.pop : {}
keys.flatten.each do |key|
class_attribute "mandrill_#{key}"
define_mandrill_setter(key, options)
define_mandrill_getter(key, options)
end
end
private
def define_mandrill_setter(key, options = {})
if default = options[:default]
arg_s = "v = #{default.inspect}"
else
arg_s = "v"
end
method_name = "set_mandrill_setting_#{key}"
[self, singleton_class].each do |base|
base.class_eval <<-EOS
def #{method_name}(#{arg_s})
self.mandrill_#{key} = v
end
private :#{method_name}
alias_method :set_#{key}, :#{method_name}
EOS
end
end
def define_mandrill_getter(key, options = {})
getter = options[:getter]
method_name = "get_mandrill_setting_#{key}"
define_method method_name do
v = __send__("mandrill_#{key}")
if getter
getter.call(v)
else
v
end
end
private method_name
end
end
def is_mandrill_setting_defined?(key)
!__send__("mandrill_#{key}").nil?
end
private :is_mandrill_setting_defined?
def get_mandrill_setting(key)
__send__ "get_mandrill_setting_#{key}"
end
private :get_mandrill_setting
end
end
| ruby | MIT | c9f6c0d840dd78547491504fe4450bf2b1317dce | 2026-01-04T17:55:41.600973Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/test/test_helper.rb | test/test_helper.rb | # frozen_string_literal: true
require "minitest/autorun"
require_relative "../lib/toxiproxy"
require "webmock/minitest"
WebMock.disable!
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/test/toxiproxy_test.rb | test/toxiproxy_test.rb | # frozen_string_literal: true
require "test_helper"
class ToxiproxyTest < Minitest::Test
def teardown
Toxiproxy.grep(/\Atest_/).each(&:destroy)
end
def test_create_proxy
proxy = Toxiproxy.create(upstream: "localhost:3306", name: "test_mysql_master")
assert_equal("localhost:3306", proxy.upstream)
assert_equal("test_mysql_master", proxy.name)
end
def test_find_proxy
Toxiproxy.create(upstream: "localhost:3306", name: "test_mysql_master")
proxy = Toxiproxy[:test_mysql_master]
assert_equal("localhost:3306", proxy.upstream)
assert_equal("test_mysql_master", proxy.name)
end
def test_proxy_not_running_with_bad_host
Toxiproxy.host = "http://0.0.0.0:12345"
refute_predicate(Toxiproxy, :running?, "toxiproxy should not be running")
ensure
Toxiproxy.host = Toxiproxy::DEFAULT_URI
end
def test_toggle_proxy
with_tcpserver do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
Toxiproxy::Toxic.new(type: "latency", attributes: { latency: 123 }, proxy: proxy).save
proxy.disable
assert_proxy_unavailable(proxy)
proxy.enable
assert_proxy_available(proxy)
end
end
def test_toxic_available_after_toggle_toxic
with_tcpserver do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
listen_addr = proxy.listen
Toxiproxy::Toxic.new(type: "latency", attributes: { latency: 123 }, proxy: proxy).save
proxy.disable
proxy.enable
latency = proxy.toxics.find { |toxic| toxic.name == "latency_downstream" }
assert_equal(123, latency.attributes["latency"])
assert_equal(listen_addr, proxy.listen)
end
end
def test_delete_toxic
with_tcpserver do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
listen_addr = proxy.listen
Toxiproxy::Toxic.new(type: "latency", attributes: { latency: 123 }, proxy: proxy).save
latency = proxy.toxics.find { |toxic| toxic.name == "latency_downstream" }
latency.destroy
assert_empty(proxy.toxics)
assert_equal(listen_addr, proxy.listen)
end
end
def test_reset
with_tcpserver do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
listen_addr = proxy.listen
proxy.disable
Toxiproxy::Toxic.new(type: "latency", attributes: { latency: 123 }, proxy: proxy).save
Toxiproxy.reset
assert_proxy_available(proxy)
assert_empty(proxy.toxics)
assert_equal(listen_addr, proxy.listen)
end
end
def test_take_endpoint_down
with_tcpserver do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
listen_addr = proxy.listen
proxy.down do
assert_proxy_unavailable(proxy)
end
assert_proxy_available(proxy)
assert_equal(listen_addr, proxy.listen)
end
end
def test_raises_when_proxy_doesnt_exist
assert_raises(Toxiproxy::NotFound) do
Toxiproxy[:does_not_exist]
end
end
def test_proxies_all_returns_proxy_collection
assert_instance_of(Toxiproxy::ProxyCollection, Toxiproxy.all)
end
def test_down_on_proxy_collection_disables_entire_collection
with_tcpserver do |port1|
with_tcpserver do |port2|
proxies = [
Toxiproxy.create(upstream: "localhost:#{port1}", name: "test_proxy1"),
Toxiproxy.create(upstream: "localhost:#{port2}", name: "test_proxy2"),
]
Toxiproxy.all.down do
proxies.each { |proxy| assert_proxy_unavailable(proxy) }
end
proxies.each { |proxy| assert_proxy_available(proxy) }
end
end
end
def test_disable_on_proxy_collection
with_tcpserver do |port1|
with_tcpserver do |port2|
proxies = [
Toxiproxy.create(upstream: "localhost:#{port1}", name: "test_proxy1"),
Toxiproxy.create(upstream: "localhost:#{port2}", name: "test_proxy2"),
]
Toxiproxy.all.disable
proxies.each { |proxy| assert_proxy_unavailable(proxy) }
Toxiproxy.all.enable
proxies.each { |proxy| assert_proxy_available(proxy) }
end
end
end
def test_select_from_toxiproxy_collection
with_tcpserver do |port|
Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxies = Toxiproxy.select { |p| p.upstream == "localhost:#{port}" }
assert_equal(1, proxies.size)
assert_instance_of(Toxiproxy::ProxyCollection, proxies)
end
end
def test_grep_returns_toxiproxy_collection
with_tcpserver do |port|
Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxies = Toxiproxy.grep(/\Atest/)
assert_equal(1, proxies.size)
assert_instance_of(Toxiproxy::ProxyCollection, proxies)
end
end
def test_indexing_allows_regexp
with_tcpserver do |port|
Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxies = Toxiproxy[/\Atest/]
assert_equal(1, proxies.size)
assert_instance_of(Toxiproxy::ProxyCollection, proxies)
end
end
def test_apply_upstream_toxic
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxy.upstream(:latency, latency: 100).apply do
before = Time.now
socket = connect_to_proxy(proxy)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.100, 0.01)
end
end
end
def test_apply_downstream_toxic
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxy.downstream(:latency, latency: 100).apply do
before = Time.now
socket = connect_to_proxy(proxy)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.100, 0.01)
end
end
end
def test_toxic_applies_a_downstream_toxic
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxy.toxic(:latency, latency: 100).apply do
latency = proxy.toxics.find { |toxic| toxic.name == "latency_downstream" }
assert_equal(100, latency.attributes["latency"])
assert_equal("downstream", latency.stream)
end
end
end
def test_toxic_default_name_is_type_and_stream
toxic = Toxiproxy::Toxic.new(type: "latency", stream: "downstream")
assert_equal("latency_downstream", toxic.name)
end
def test_apply_prolong_toxics
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxy.upstream(:latency, latency: 100).downstream(:latency, latency: 100).apply do
before = Time.now
socket = connect_to_proxy(proxy)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.200, 0.01)
end
end
end
def test_apply_toxics_to_collection
with_tcpserver(receive: true) do |port1|
with_tcpserver(receive: true) do |port2|
proxy1 = Toxiproxy.create(upstream: "localhost:#{port1}", name: "test_proxy1")
proxy2 = Toxiproxy.create(upstream: "localhost:#{port2}", name: "test_proxy2")
Toxiproxy[/test_proxy/].upstream(:latency, latency: 100).downstream(:latency, latency: 100).apply do
before = Time.now
socket = connect_to_proxy(proxy1)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.200, 0.01)
before = Time.now
socket = connect_to_proxy(proxy2)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.200, 0.01)
end
end
end
end
def test_populate_creates_proxies_array
proxies = [
{
name: "test_toxiproxy_populate1",
upstream: "localhost:3306",
listen: "localhost:22222",
},
{
name: "test_toxiproxy_populate2",
upstream: "localhost:3306",
listen: "localhost:22223",
},
]
proxies = Toxiproxy.populate(proxies)
proxies.each do |proxy|
assert_proxy_available(proxy)
end
end
def test_populate_creates_proxies_args
proxies = [
{
name: "test_toxiproxy_populate1",
upstream: "localhost:3306",
listen: "localhost:22222",
},
{
name: "test_toxiproxy_populate2",
upstream: "localhost:3306",
listen: "localhost:22223",
},
]
proxies = Toxiproxy.populate(*proxies)
proxies.each do |proxy|
assert_proxy_available(proxy)
end
end
def test_populate_creates_proxies_update_listen
proxies = [{
name: "test_toxiproxy_populate1",
upstream: "localhost:3306",
listen: "localhost:22222",
}]
Toxiproxy.populate(proxies)
proxies = [{
name: "test_toxiproxy_populate1",
upstream: "localhost:3306",
listen: "localhost:22223",
}]
proxies = Toxiproxy.populate(proxies)
proxies.each do |proxy|
assert_proxy_available(proxy)
end
end
def test_populate_creates_proxies_update_upstream
proxy_name = "test_toxiproxy_populate1"
proxies_config = [{
name: proxy_name,
upstream: "localhost:3306",
listen: "localhost:22222",
}]
proxies = Toxiproxy.populate(proxies_config)
proxies_config = [{
name: proxy_name,
upstream: "localhost:3307",
listen: "localhost:22222",
}]
proxies2 = Toxiproxy.populate(proxies_config)
refute_equal(
proxies.find(name: proxy_name).first.upstream,
proxies2.find(name: proxy_name).first.upstream,
)
proxies2.each do |proxy|
assert_proxy_available(proxy)
end
end
def test_running_helper
assert_predicate(Toxiproxy, :running?)
end
def test_version
assert_instance_of(String, Toxiproxy.version)
end
def test_server_supports_patch_with_version_2_6_0
Toxiproxy.stub(:version, '{"version": "2.6.0"}') do
assert(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_server_supports_patch_with_version_2_7_0
Toxiproxy.stub(:version, '{"version": "2.7.0"}') do
assert(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_server_supports_patch_with_version_3_0_0
Toxiproxy.stub(:version, '{"version": "3.0.0"}') do
assert(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_does_not_support_patch_for_enable_disable_with_version_below_2_6_0
Toxiproxy.stub(:version, '{"version": "2.5.0"}') do
refute(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_does_not_support_patch_for_enable_disable_when_not_running
Toxiproxy.stub(:running?, false) do
refute(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_does_not_support_patch_for_enable_disable_with_invalid_version
Toxiproxy.stub(:version, "invalid") do
refute(Toxiproxy.new(upstream: "localhost:3306", name: "test").send(:server_supports_patch?))
end
end
def test_disable_uses_patch_when_version_supports_it
proxy = Toxiproxy.new(upstream: "localhost:3306", name: "test_proxy_patch")
# Mock version to return JSON with 2.6.0 (supports PATCH)
Toxiproxy.stub(:version, '{"version": "2.6.0"}') do
# Mock the http_request method to capture the request type
request_captured = nil
proxy.stub(:http_request, ->(req) {
request_captured = req
double = Object.new
double.define_singleton_method(:value) do
nil
end
double
}) do
proxy.disable
end
# Verify PATCH was used
assert_instance_of(Net::HTTP::Patch, request_captured)
assert_equal("/proxies/test_proxy_patch", request_captured.path)
assert_equal({ enabled: false }.to_json, request_captured.body)
end
end
def test_enable_uses_patch_when_version_supports_it
proxy = Toxiproxy.new(upstream: "localhost:3306", name: "test_proxy_patch_enable")
# Mock version to return JSON with 2.6.0 (supports PATCH)
Toxiproxy.stub(:version, '{"version": "2.6.0"}') do
# Mock the http_request method to capture the request type
request_captured = nil
proxy.stub(:http_request, ->(req) {
request_captured = req
double = Object.new
double.define_singleton_method(:value) do
nil
end
double
}) do
proxy.enable
end
# Verify PATCH was used
assert_instance_of(Net::HTTP::Patch, request_captured)
assert_equal("/proxies/test_proxy_patch_enable", request_captured.path)
assert_equal({ enabled: true }.to_json, request_captured.body)
end
end
def test_disable_uses_post_when_version_does_not_support_patch
proxy = Toxiproxy.new(upstream: "localhost:3306", name: "test_proxy_post")
# Mock version to return JSON with 2.5.0 (does not support PATCH)
Toxiproxy.stub(:version, '{"version": "2.5.0"}') do
# Mock the http_request method to capture the request type
request_captured = nil
proxy.stub(:http_request, ->(req) {
request_captured = req
double = Object.new
double.define_singleton_method(:value) do
nil
end
double
}) do
proxy.disable
end
# Verify POST was used
assert_instance_of(Net::HTTP::Post, request_captured)
assert_equal("/proxies/test_proxy_post", request_captured.path)
assert_equal({ enabled: false }.to_json, request_captured.body)
end
end
def test_enable_uses_post_when_version_does_not_support_patch
proxy = Toxiproxy.new(upstream: "localhost:3306", name: "test_proxy_post_enable")
# Mock version to return JSON with 2.5.0 (does not support PATCH)
Toxiproxy.stub(:version, '{"version": "2.5.0"}') do
# Mock the http_request method to capture the request type
request_captured = nil
proxy.stub(:http_request, ->(req) {
request_captured = req
double = Object.new
double.define_singleton_method(:value) do
nil
end
double
}) do
proxy.enable
end
# Verify POST was used
assert_instance_of(Net::HTTP::Post, request_captured)
assert_equal("/proxies/test_proxy_post_enable", request_captured.path)
assert_equal({ enabled: true }.to_json, request_captured.body)
end
end
def test_multiple_of_same_toxic_type
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
proxy.toxic(:latency, latency: 100).toxic(:latency, latency: 100, name: "second_latency_downstream").apply do
before = Time.now
socket = connect_to_proxy(proxy)
socket.write("omg\n")
socket.flush
socket.gets
passed = Time.now - before
assert_in_delta(passed, 0.200, 0.01)
end
end
end
def test_multiple_of_same_toxic_type_with_same_name
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_proxy")
assert_raises(ArgumentError) do
proxy.toxic(:latency, latency: 100).toxic(:latency, latency: 100).apply {}
end
end
end
def test_invalid_direction
with_tcpserver(receive: true) do |port|
proxy = Toxiproxy.create(upstream: "localhost:#{port}", name: "test_rubby_server")
assert_raises(Toxiproxy::InvalidToxic) do
Toxiproxy::Toxic.new(type: "latency", attributes: { latency: 123 }, proxy: proxy, stream: "lolstream").save
end
end
end
def test_whitelists_webmock_when_allow_is_nil
with_webmock_enabled do
WebMock::Config.instance.allow = nil
Toxiproxy.version # This should initialize the list.
assert_includes(WebMock::Config.instance.allow, @endpoint)
end
end
def test_whitelisting_webmock_does_not_override_other_configuration
with_webmock_enabled do
WebMock::Config.instance.allow = ["some-other-host"]
Toxiproxy.version
# 'some-other-host' should not be overriden.
assert_includes(WebMock::Config.instance.allow, "some-other-host")
assert_includes(WebMock::Config.instance.allow, @endpoint)
Toxiproxy.version
# Endpoint should not be duplicated.
assert_equal(1, WebMock::Config.instance.allow.count(@endpoint))
end
end
def test_invalidate_cache_http_on_host
old_value = Toxiproxy.uri
assert_equal(8474, Toxiproxy.http.port)
Toxiproxy.host = "http://127.0.0.1:8475"
assert_equal(8475, Toxiproxy.http.port)
ensure
Toxiproxy.host = old_value
end
private
def with_webmock_enabled
WebMock.enable!
WebMock.disable_net_connect!
@endpoint = "#{Toxiproxy.uri.host}:#{Toxiproxy.uri.port}"
yield
ensure
WebMock.disable!
end
def assert_proxy_available(proxy)
connect_to_proxy(proxy)
end
def assert_proxy_unavailable(proxy)
assert_raises(Errno::ECONNREFUSED) do
connect_to_proxy(proxy)
end
end
def connect_to_proxy(proxy)
TCPSocket.new(*proxy.listen.split(":"))
end
def with_tcpserver(receive = false, &block)
mon = Monitor.new
cond = mon.new_cond
port = nil
thread = Thread.new do
server = TCPServer.new("127.0.0.1", 0)
port = server.addr[1]
mon.synchronize { cond.signal }
loop do
client = server.accept
if receive
client.gets
client.write("omgs\n")
client.flush
end
client.close
end
server.close
end
mon.synchronize { cond.wait }
yield(port)
ensure
thread.kill
end
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/lib/toxiproxy.rb | lib/toxiproxy.rb | # frozen_string_literal: true
require "json"
require "uri"
require "net/http"
require "forwardable"
require "toxiproxy/toxic"
require "toxiproxy/toxic_collection"
require "toxiproxy/proxy_collection"
class Toxiproxy
extend SingleForwardable
DEFAULT_URI = "http://127.0.0.1:8474"
VALID_DIRECTIONS = [:upstream, :downstream]
class NotFound < StandardError; end
class ProxyExists < StandardError; end
class InvalidToxic < StandardError; end
attr_reader :listen, :name, :enabled
@http = nil
@timeout = 5 # Should be more than plenty
def initialize(options)
@upstream = options[:upstream]
@listen = options[:listen] || "localhost:0"
@name = options[:name]
@enabled = options[:enabled]
end
def_delegators :all, *ProxyCollection::METHODS
class << self
attr_accessor :timeout
# Re-enables all proxies and disables all toxics.
def reset
request = Net::HTTP::Post.new("/reset")
request["Content-Type"] = "application/json"
response = http_request(request)
assert_response(response)
self
end
def version
return false unless running?
request = Net::HTTP::Get.new("/version")
response = http_request(request)
assert_response(response)
response.body
end
# Returns a collection of all currently active Toxiproxies.
def all
request = Net::HTTP::Get.new("/proxies")
response = http_request(request)
assert_response(response)
proxies = JSON.parse(response.body).map do |_name, attrs|
new({
upstream: attrs["upstream"],
listen: attrs["listen"],
name: attrs["name"],
enabled: attrs["enabled"],
})
end
ProxyCollection.new(proxies)
end
# Sets the toxiproxy host to use.
def host=(host)
@uri = host.is_a?(::URI) ? host : ::URI.parse(host)
reset_http_client!
@uri
end
# Convenience method to create a proxy.
def create(options)
new(options).create
end
# Find a single proxy by name.
def find_by_name(name = nil, &block)
all.find { |p| p.name == name.to_s }
end
# Calls find_by_name and raises NotFound if not found
def find_by_name!(*args)
proxy = find_by_name(*args)
raise NotFound, "#{name} not found in #{all.map(&:name).join(", ")}" unless proxy
proxy
end
# If given a regex, it'll use `grep` to return a Toxiproxy::Collection.
# Otherwise, it'll convert the passed object to a string and find the proxy by
# name.
def [](query)
return grep(query) if query.is_a?(Regexp)
find_by_name!(query)
end
def populate(*proxies)
proxies = proxies.first if proxies.first.is_a?(Array)
request = Net::HTTP::Post.new("/populate")
request.body = proxies.to_json
request["Content-Type"] = "application/json"
response = http_request(request)
assert_response(response)
proxies = JSON.parse(response.body).fetch("proxies", []).map do |attrs|
new({
upstream: attrs["upstream"],
listen: attrs["listen"],
name: attrs["name"],
enabled: attrs["enabled"],
})
end
ProxyCollection.new(proxies)
end
def running?
TCPSocket.new(uri.host, uri.port).close
true
rescue Errno::ECONNREFUSED, Errno::ECONNRESET
false
end
def http_request(request)
ensure_webmock_whitelists_toxiproxy if defined? WebMock
http.request(request)
end
def http
@http ||= begin
connection = Net::HTTP.new(uri.host, uri.port)
connection.write_timeout = @timeout
connection.read_timeout = @timeout
connection.open_timeout = @timeout
connection
end
end
def assert_response(response)
case response
when Net::HTTPConflict
raise Toxiproxy::ProxyExists, response.body
when Net::HTTPNotFound
raise Toxiproxy::NotFound, response.body
when Net::HTTPBadRequest
raise Toxiproxy::InvalidToxic, response.body
else
response.value # raises if not OK
end
end
def uri
@uri ||= ::URI.parse(DEFAULT_URI)
end
def ensure_webmock_whitelists_toxiproxy
endpoint = "#{uri.host}:#{uri.port}"
WebMock::Config.instance.allow ||= []
unless WebMock::Config.instance.allow.include?(endpoint)
WebMock::Config.instance.allow << endpoint
end
end
def reset_http_client!
@http.finish if @http&.started?
@http = nil
end
end
# Set an upstream toxic.
def upstream(type = nil, attrs = {})
return @upstream unless type # also alias for the upstream endpoint
collection = ToxicCollection.new([self])
collection.upstream(type, attrs)
collection
end
# Set a downstream toxic.
def downstream(type, attrs = {})
collection = ToxicCollection.new([self])
collection.downstream(type, attrs)
collection
end
alias_method :toxic, :downstream
alias_method :toxicate, :downstream
# Simulates the endpoint is down, by closing the connection and no
# longer accepting connections. This is useful to simulate critical system
# failure, such as a data store becoming completely unavailable.
def down(&block)
disable
yield
ensure
enable
end
# Disables a Toxiproxy. This will drop all active connections and stop the proxy from listening.
def disable
request = if server_supports_patch?
Net::HTTP::Patch.new("/proxies/#{name}")
else
Net::HTTP::Post.new("/proxies/#{name}")
end
request["Content-Type"] = "application/json"
hash = { enabled: false }
request.body = hash.to_json
response = http_request(request)
assert_response(response)
self
end
# Enables a Toxiproxy. This will cause the proxy to start listening again.
def enable
request = if server_supports_patch?
Net::HTTP::Patch.new("/proxies/#{name}")
else
Net::HTTP::Post.new("/proxies/#{name}")
end
request["Content-Type"] = "application/json"
hash = { enabled: true }
request.body = hash.to_json
response = http_request(request)
assert_response(response)
self
end
# Create a Toxiproxy, proxying traffic from `@listen` (optional argument to
# the constructor) to `@upstream`. `#down` `#upstream` or `#downstream` can at any time alter the health
# of this connection.
def create
request = Net::HTTP::Post.new("/proxies")
request["Content-Type"] = "application/json"
hash = { upstream: upstream, name: name, listen: listen, enabled: enabled }
request.body = hash.to_json
response = http_request(request)
assert_response(response)
new = JSON.parse(response.body)
@listen = new["listen"]
self
end
# Destroys a Toxiproxy.
def destroy
request = Net::HTTP::Delete.new("/proxies/#{name}")
response = http_request(request)
assert_response(response)
self
end
# Returns an array of the current toxics for a direction.
def toxics
request = Net::HTTP::Get.new("/proxies/#{name}/toxics")
response = http_request(request)
assert_response(response)
JSON.parse(response.body).map do |attrs|
Toxic.new(
type: attrs["type"],
name: attrs["name"],
proxy: self,
stream: attrs["stream"],
toxicity: attrs["toxicity"],
attributes: attrs["attributes"],
)
end
end
private
def version_string
return @version_string if @version_string
version_response = self.class.version
return false if version_response == false
@version_string = begin
JSON.parse(version_response)["version"]
rescue JSON::ParserError
false
end
end
# Check if the toxiproxy server version supports PATCH for enable/disable
def server_supports_patch?
version_str = version_string
return false if version_str == false
begin
# Use Gem::Version for proper version comparison
current_version = Gem::Version.new(version_str.sub(/^v/, "")) # Remove 'v' prefix if present
required_version = Gem::Version.new("2.6.0")
current_version >= required_version
rescue ArgumentError
# Invalid version format
false
end
end
def http_request(request)
self.class.http_request(request)
end
def http
self.class.http
end
def assert_response(*args)
self.class.assert_response(*args)
end
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/lib/toxiproxy/toxic_collection.rb | lib/toxiproxy/toxic_collection.rb | # frozen_string_literal: true
class Toxiproxy
class ToxicCollection
extend Forwardable
attr_accessor :toxics
attr_reader :proxies
def_delegators :@toxics, :<<, :find
def initialize(proxies)
@proxies = proxies
@toxics = []
end
def apply(&block)
names = toxics.group_by { |t| [t.name, t.proxy.name] }
dups = names.values.select { |toxics| toxics.length > 1 }
unless dups.empty?
raise ArgumentError,
"There are two toxics with the name #{dups.first[0]} for proxy #{dups.first[1]}, " \
"please override the default name (<type>_<direction>)"
end
begin
@toxics.each(&:save)
yield
ensure
@toxics.each(&:destroy)
end
end
def upstream(type, attrs = {})
proxies.each do |proxy|
toxics << Toxic.new(
name: attrs.delete("name") || attrs.delete(:name),
type: type,
proxy: proxy,
stream: :upstream,
toxicity: attrs.delete("toxicitiy") || attrs.delete(:toxicity),
attributes: attrs,
)
end
self
end
def downstream(type, attrs = {})
proxies.each do |proxy|
toxics << Toxic.new(
name: attrs.delete("name") || attrs.delete(:name),
type: type,
proxy: proxy,
stream: :downstream,
toxicity: attrs.delete("toxicitiy") || attrs.delete(:toxicity),
attributes: attrs,
)
end
self
end
alias_method :toxic, :downstream
alias_method :toxicate, :downstream
end
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/lib/toxiproxy/version.rb | lib/toxiproxy/version.rb | # frozen_string_literal: true
class Toxiproxy
VERSION = "2.0.2"
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/lib/toxiproxy/proxy_collection.rb | lib/toxiproxy/proxy_collection.rb | # frozen_string_literal: true
class Toxiproxy
# ProxyCollection represents a set of proxies. This allows to easily perform
# actions on every proxy in the collection.
#
# Unfortunately, it doesn't implement all of Enumerable because there's no way
# to subclass an Array or include Enumerable for the methods to return a
# Collection instead of an Array (see MRI). Instead, we delegate methods where
# it doesn't matter and only allow the filtering methods that really make
# sense on a proxy collection.
class ProxyCollection
extend Forwardable
DELEGATED_METHODS = [:length, :size, :count, :find, :each, :map]
DEFINED_METHODS = [:select, :reject, :grep, :down]
METHODS = DEFINED_METHODS + DELEGATED_METHODS
def_delegators :@collection, *DELEGATED_METHODS
def initialize(collection)
@collection = collection
end
# Sets every proxy in the collection as down. For example:
#
# Toxiproxy.grep(/redis/).down { .. }
#
# Would simulate every Redis server being down for the duration of the
# block.
def down(&block)
@collection.inject(block) do |nested, proxy|
-> { proxy.down(&nested) }
end.call
end
# Set an upstream toxic.
def upstream(toxic, attrs = {})
toxics = ToxicCollection.new(@collection)
toxics.upstream(toxic, attrs)
toxics
end
# Set a downstream toxic.
def downstream(toxic, attrs = {})
toxics = ToxicCollection.new(@collection)
toxics.downstream(toxic, attrs)
toxics
end
alias_method :toxicate, :downstream
alias_method :toxic, :downstream
def disable
@collection.each(&:disable)
end
def enable
@collection.each(&:enable)
end
# Destroys all toxiproxy's in the collection
def destroy
@collection.each(&:destroy)
end
def select(&block)
self.class.new(@collection.select(&block))
end
def reject(&block)
self.class.new(@collection.reject(&block))
end
# Grep allows easily selecting a subset of proxies, by returning a
# ProxyCollection with every proxy name matching the regex passed.
def grep(regex)
self.class.new(@collection.select { |proxy| proxy.name =~ regex })
end
end
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
Shopify/toxiproxy-ruby | https://github.com/Shopify/toxiproxy-ruby/blob/7e04390a135b88ea28e866a1567b7284f4d58635/lib/toxiproxy/toxic.rb | lib/toxiproxy/toxic.rb | # frozen_string_literal: true
class Toxiproxy
class Toxic
attr_reader :name, :type, :stream, :proxy
attr_accessor :attributes, :toxicity
def initialize(attrs)
raise "Toxic type is required" unless attrs[:type]
@type = attrs[:type]
@stream = attrs[:stream] || "downstream"
@name = attrs[:name] || "#{@type}_#{@stream}"
@proxy = attrs[:proxy]
@toxicity = attrs[:toxicity] || 1.0
@attributes = attrs[:attributes] || {}
end
def save
request = Net::HTTP::Post.new("/proxies/#{proxy.name}/toxics")
request["Content-Type"] = "application/json"
request.body = as_json
response = Toxiproxy.http_request(request)
Toxiproxy.assert_response(response)
json = JSON.parse(response.body)
@attributes = json["attributes"]
@toxicity = json["toxicity"]
self
end
def destroy
request = Net::HTTP::Delete.new("/proxies/#{proxy.name}/toxics/#{name}")
response = Toxiproxy.http_request(request)
Toxiproxy.assert_response(response)
self
end
def as_json
{
name: name,
type: type,
stream: stream,
toxicity: toxicity,
attributes: attributes,
}.to_json
end
end
end
| ruby | MIT | 7e04390a135b88ea28e866a1567b7284f4d58635 | 2026-01-04T17:55:41.625156Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/spec/spec_helper.rb | spec/spec_helper.rb | require 'rmre'
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/spec/rmre/migrator_spec.rb | spec/rmre/migrator_spec.rb | require "spec_helper"
module Rmre
describe Migrator do
let(:src_connection) { double("source_connection") }
let(:tgt_connection) { tgt_con = double("target_connection") }
let(:src_db_opts) { { adapter: "fake_adapter", database: "source_db" } }
let(:tgt_db_opts) { { adapter: "fake_adapter", database: "target_db" } }
let(:id_column) do
col = double("id_column")
col.stub(:name).and_return("id")
col.stub(:null).and_return(false)
col.stub(:default).and_return(nil)
col.stub(:type).and_return("integer")
col
end
let(:name_column) do
col = double("name_column")
col.stub(:name).and_return("name")
col.stub(:null).and_return(false)
col.stub(:default).and_return(nil)
col.stub(:type).and_return("integer")
col
end
let(:table) do
tbl = double("created_table")
tbl.stub(:column)
tbl
end
before(:each) do
Source::Db.stub(:establish_connection).and_return(true)
Source::Db.stub(:connection).and_return(src_connection)
Target::Db.stub(:establish_connection).and_return(true)
Target::Db.stub(:connection).and_return(tgt_connection)
end
context "initialization" do
it "stores connection options in source and target modules" do
Migrator.new(src_db_opts, tgt_db_opts)
Source.connection_options.should be_eql(src_db_opts)
Target.connection_options.should be_eql(tgt_db_opts)
end
it "passes connection options to source and target connections" do
Source::Db.should_receive(:establish_connection).with(src_db_opts)
Target::Db.should_receive(:establish_connection).with(tgt_db_opts)
Migrator.new(src_db_opts, tgt_db_opts)
end
end
context "copying tables" do
before(:each) do
src_connection.stub(:tables).and_return %w{parent_table child_table}
src_connection.stub(:columns).and_return([id_column, name_column])
src_connection.stub(:primary_key).and_return("id")
@migrator = Migrator.new(src_db_opts, tgt_db_opts)
@migrator.stub(:copy_data)
end
it "copies all tables if they do not exist" do
tgt_connection.should_receive(:table_exists?).exactly(2).times.and_return(false)
tgt_connection.should_receive(:create_table).exactly(2).times
@migrator.copy
end
it "doesn't copy tables if they exist" do
tgt_connection.should_receive(:table_exists?).exactly(2).times.and_return(true)
tgt_connection.should_not_receive(:create_table)
@migrator.copy
end
it "copies existing tables if it is forced to recreate them" do
tgt_connection.should_receive(:table_exists?).exactly(2).times.and_return(true)
tgt_connection.should_receive(:create_table).exactly(2).times
@migrator.copy(true)
end
context "with before_copy filter" do
before(:each) do
@migrator.before_copy = lambda { |table_name|
return false if table_name == "child_table"
true
}
end
it "does not copy table if before copy filter returns false" do
tgt_connection.should_receive(:table_exists?).with("parent_table").and_return(false)
tgt_connection.should_receive(:create_table).once
@migrator.copy
end
end
end
context "copying tables with 'skip existing' turned on" do
before(:each) do
src_connection.stub(:tables).and_return %w{parent_table child_table}
src_connection.stub(:columns).and_return([id_column, name_column])
@migrator = Migrator.new(src_db_opts, tgt_db_opts, {:skip_existing => true})
end
it "skips existing tables" do
tgt_connection.should_receive(:table_exists?).exactly(2).times.and_return(true)
tgt_connection.should_not_receive(:create_table)
@migrator.copy
end
end
context "table creation" do
before(:each) do
@source_columns = [id_column, name_column]
end
context "Rails copy mode" do
before(:each) do
@migrator = Migrator.new(src_db_opts, tgt_db_opts)
src_connection.stub(:primary_key).and_return("id")
tgt_connection.stub(:adapter_name).and_return("fake adapter")
end
it "does not explicitely create ID column" do
tgt_connection.should_receive(:create_table).with("parent", {:id => true, :force => false}).
and_yield(table)
table.should_not_receive(:column).with("id", anything(), anything())
@migrator.create_table("parent", @source_columns)
end
it "creates other columns but ID column" do
tgt_connection.should_receive(:create_table).with("parent", {:id => true, :force => false}).
and_yield(table)
table.should_receive(:column).with("name", anything(), anything())
@migrator.create_table("parent", @source_columns)
end
end
context "non-Rails copy mode" do
before(:each) do
@migrator = Migrator.new(src_db_opts, tgt_db_opts, {:rails_copy_mode => false})
tgt_connection.stub(:adapter_name).times.and_return("fake adapter")
src_connection.stub(:primary_key).and_return("primaryIdColumn")
end
it "explicitely creates ID column" do
tgt_connection.should_receive(:create_table).with("parent",
{:id => true, :force => false, :primary_key => "primaryIdColumn" }).
and_yield(table)
table.should_receive(:column).with("id", anything(), anything())
@migrator.create_table("parent", @source_columns)
end
it "creates other columns too" do
tgt_connection.should_receive(:create_table).with("parent",
{:id => true, :force => false, :primary_key => "primaryIdColumn"}).
and_yield(table)
table.should_receive(:column).with("name", anything(), anything())
@migrator.create_table("parent", @source_columns)
end
end
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/spec/rmre/generator_spec.rb | spec/rmre/generator_spec.rb | require "spec_helper"
module Rmre
describe Generator do
let(:settings) do
{ db: {adapter: 'some_adapter',
database: 'db',
username: 'user',
password: 'pass'},
out_path: File.join(Dir.tmpdir, 'gne-test'),
include: ['incl1_', 'incl2_'],
inflections: [{plural: ["(.*)_des$", '\1_des'], singular: ["(.*)_des$", '\1_des']}]
}
end
let(:connection) do
double("db_connection", columns: [])
end
let(:generator) do
gen = Generator.new(settings[:db], settings[:out_path], settings[:include], settings[:inflections])
gen.stub(:connection).and_return(connection)
gen
end
let(:tables) { %w(incl1_tbl1 incl1_tbl2 incl2_tbl1 user processes) }
it "should flag table incl1_tbl1 for processing" do
generator.process?('incl1_tbl1').should be_true
end
it "should not flag table 'processes' for processing" do
generator.process?('processes').should be_false
end
it "should process three tables from the passed array of tables" do
generator.stub(:create_model)
generator.should_receive(:create_model).exactly(3).times
generator.create_models(tables)
end
it "should contain set_table_name 'incl1_tbl1' in generated source" do
generator.stub_chain(:connection, :primary_key).and_return("id")
generator.send(:generate_model_source, 'incl1_tbl1', []).should match(/self\.table_name = \'incl1_tbl1\'/)
end
it "should create three model files" do
generator.stub_chain(:connection, :primary_key).and_return("id")
generator.stub(:foreign_keys).and_return([])
generator.create_models(tables)
Dir.glob(File.join(generator.output_path, "*.rb")).should have(3).items
end
it "should create prettified file names" do
file = double("model_file")
file.stub(:write)
generator.connection.stub(:primary_key).and_return('')
File.stub(:open).and_yield(file)
File.should_receive(:open).with(/tbl_user/, "w")
file.should_receive(:write).with(/class TblUser/)
generator.create_model("TBL_USERS")
end
context 'with non standard keys' do
before(:each) do
@file = double('model_file')
@file.stub(:write)
end
it "should set primary key if PK column is not id" do
generator.connection.stub(:primary_key).and_return('usr_id')
File.stub(:open).and_yield(@file)
@file.should_receive(:write).with(/self\.primary_key = :usr_id/)
generator.create_model('users')
end
it "should set foreign key if FK column is not id" do
generator.connection.stub(:primary_key).and_return('pst_id')
generator.stub(:foreign_keys).and_return([
{ 'from_table' => 'posts',
'from_column' => 'pst_id',
'to_table'=>'user',
'to_column'=>'user_id'}
])
File.stub(:open).and_yield(@file)
@file.should_receive(:write).with(/:foreign_key => :pst_id/)
generator.create_model('posts')
end
end
context 'irregular plural table names' do
it "should create correct file and class names" do
file = double("model_file")
file.stub(:write)
generator.connection.stub(:primary_key).and_return('')
File.stub(:open).and_yield(file)
File.should_receive(:open).with(/status_des/, "w")
file.should_receive(:write).with(/class StatusDes/)
generator.create_model("status_des")
end
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre.rb | lib/rmre.rb | require 'rmre/generator'
require "rmre/migrator"
require 'rmre/version'
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/contrib/progressbar.rb | lib/contrib/progressbar.rb | # = progressbar.rb
#
# == Copyright (C) 2001 Satoru Takabayashi
#
# Ruby License
#
# This module is free software. You may use, modify, and/or redistribute this
# software under the same terms as Ruby.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.
#
# == Author(s)
#
# * Satoru Takabayashi
# Author:: Satoru Takabayashi
# Copyright:: Copyright (c) 2001 Satoru Takabayashi
# License:: Ruby License
# = Console Progress Bar
#
# Console::ProgressBar is a terminal-based progress bar library.
#
# == Usage
#
# pbar = ConsoleProgressBar.new( "Demo", 100 )
# 100.times { pbar.inc }
# pbar.finish
#
module Console; end
class Console::ProgressBar
def initialize(title, total, out = STDERR)
@title = title
@total = total
@out = out
@bar_length = 80
@bar_mark = "o"
@total_overflow = true
@current = 0
@previous = 0
@is_finished = false
@start_time = Time.now
@format = "%-14s %3d%% %s %s"
@format_arguments = [:title, :percentage, :bar, :stat]
show_progress
end
private
def convert_bytes (bytes)
if bytes < 1024
sprintf("%6dB", bytes)
elsif bytes < 1024 * 1000 # 1000kb
sprintf("%5.1fKB", bytes.to_f / 1024)
elsif bytes < 1024 * 1024 * 1000 # 1000mb
sprintf("%5.1fMB", bytes.to_f / 1024 / 1024)
else
sprintf("%5.1fGB", bytes.to_f / 1024 / 1024 / 1024)
end
end
def transfer_rate
bytes_per_second = @current.to_f / (Time.now - @start_time)
sprintf("%s/s", convert_bytes(bytes_per_second))
end
def bytes
convert_bytes(@current)
end
def format_time (t)
t = t.to_i
sec = t % 60
min = (t / 60) % 60
hour = t / 3600
sprintf("%02d:%02d:%02d", hour, min, sec);
end
# ETA stands for Estimated Time of Arrival.
def eta
if @current == 0
"ETA: --:--:--"
else
elapsed = Time.now - @start_time
eta = elapsed * @total / @current - elapsed;
sprintf("ETA: %s", format_time(eta))
end
end
def elapsed
elapsed = Time.now - @start_time
sprintf("Time: %s", format_time(elapsed))
end
def stat
if @is_finished then elapsed else eta end
end
def stat_for_file_transfer
if @is_finished then
sprintf("%s %s %s", bytes, transfer_rate, elapsed)
else
sprintf("%s %s %s", bytes, transfer_rate, eta)
end
end
def eol
if @is_finished then "\n" else "\r" end
end
def bar
len = percentage * @bar_length / 100
sprintf("|%s%s|", @bar_mark * len, " " * (@bar_length - len))
end
def percentage
if @total.zero?
100
else
@current * 100 / @total
end
end
def title
@title[0,13] + ":"
end
def get_width
# FIXME: I don't know how portable it is.
default_width = 80
begin
tiocgwinsz = 0x5413
data = [0, 0, 0, 0].pack("SSSS")
if @out.ioctl(tiocgwinsz, data) >= 0 then
rows, cols, xpixels, ypixels = data.unpack("SSSS")
if cols >= 0 then cols else default_width end
else
default_width
end
rescue Exception
default_width
end
end
def show
arguments = @format_arguments.map {|method| send(method) }
line = sprintf(@format, *arguments)
width = get_width
if line.length == width - 1
@out.print(line + eol)
elsif line.length >= width
@bar_length = [@bar_length - (line.length - width + 1), 0].max
if @bar_length == 0 then @out.print(line + eol) else show end
else #line.length < width - 1
@bar_length += width - line.length + 1
show
end
end
def show_progress
if @total.zero?
cur_percentage = 100
prev_percentage = 0
else
cur_percentage = (@current * 100 / @total).to_i
prev_percentage = (@previous * 100 / @total).to_i
end
if cur_percentage > prev_percentage || @is_finished
show
end
end
public
def file_transfer_mode
@format_arguments = [:title, :percentage, :bar, :stat_for_file_transfer]
end
def bar_mark= (mark)
@bar_mark = String(mark)[0..0]
end
def total_overflow= (boolv)
@total_overflow = boolv ? true : false
end
def format= (format)
@format = format
end
def format_arguments= (arguments)
@format_arguments = arguments
end
def finish
@current = @total
@is_finished = true
show_progress
end
def halt
@is_finished = true
show_progress
end
def set (count)
if count < 0
raise "invalid count less than zero: #{count}"
elsif count > @total
if @total_overflow
@total = count + 1
else
raise "invalid count greater than total: #{count}"
end
end
@current = count
show_progress
@previous = @current
end
def inc (step = 1)
@current += step
@current = @total if @current > @total
show_progress
@previous = @current
end
def inspect
"(ProgressBar: #{@current}/#{@total})"
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/db_utils.rb | lib/rmre/db_utils.rb | module Rmre
module DbUtils
COLUMN_CONVERSIONS = {
"Mysql2" => {
:raw => :binary,
"LONG" => :text
}
}
def self.convert_column_type(target_adapter_name, start_type)
if COLUMN_CONVERSIONS[target_adapter_name] &&
COLUMN_CONVERSIONS[target_adapter_name][start_type]
return COLUMN_CONVERSIONS[target_adapter_name][start_type]
end
return start_type
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/version.rb | lib/rmre/version.rb | module Rmre
VERSION = "0.0.9" unless defined?(::Rmre::VERSION)
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/dynamic_db.rb | lib/rmre/dynamic_db.rb | require 'active_record'
module Rmre
module DynamicDb
def self.included(base)
base.send :extend, Rmre::DynamicDb
end
def connection_options
@connection_options
end
def connection_options= v
@connection_options = v
end
def create_model_for(table_name, primary_key_name)
model_name = table_name.classify
module_eval <<-ruby_src, __FILE__, __LINE__ + 1
class #{model_name} < Db
self.table_name = '#{table_name}'
establish_connection(#{connection_options})
end
ruby_src
klass = const_get model_name
klass.primary_key = primary_key_name if primary_key_name && primary_key_name != 'id'
klass
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/generator.rb | lib/rmre/generator.rb | require "tmpdir"
require "fileutils"
require "erubis"
require "rmre/active_record/schema_dumper"
module Rmre
class Generator
attr_reader :connection
attr_reader :output_path
SETTINGS_ROOT = File.expand_path('../../../../db', __FILE__)
def initialize(options, out_path, include, custom_inflections)
@connection_options = options
@connection = nil
@output_path = out_path
@include_prefixes = include
ActiveSupport::Inflector.inflections do |inflect|
custom_inflections.each do |ci|
inflect.plural(/#{ci[:plural].first}/, ci[:plural].second)
inflect.singular(/#{ci[:singular].first}/, ci[:singular].second)
end if custom_inflections.is_a? Array
end
end
def connect
return if @connection_options.empty?
ActiveRecord::Base.establish_connection(@connection_options)
@connection = ActiveRecord::Base.connection
end
def create_models(tables)
return unless tables.is_a? Array
FileUtils.mkdir_p(@output_path) if !Dir.exists?(@output_path)
tables.each do |table_name|
create_model(table_name) if process?(table_name)
end
end
def dump_schema(stream)
ActiveRecord::SchemaDumper.dump_with_fk(connection, foreign_keys, stream)
end
def create_model(table_name)
File.open(File.join(output_path, "#{table_name.tableize.singularize}.rb"), "w") do |file|
constraints = []
foreign_keys.each do |fk|
src = constraint_src(table_name, fk)
constraints << src unless src.nil?
end
file.write generate_model_source(table_name, constraints)
end
end
def process?(table_name)
return true if @include_prefixes.nil? || @include_prefixes.empty?
@include_prefixes.each do |prefix|
return true if table_name =~ /^#{prefix}/
end
false
end
def foreign_keys
@foreign_keys ||= fetch_foreign_keys
end
private
def fetch_foreign_keys
fk = []
case @connection_options[:adapter]
when 'mysql'
when 'mysql2'
fk = mysql_foreign_keys
when 'postgresql'
fk = psql_foreign_keys
when 'sqlserver'
fk = mssql_foreign_keys
when 'oracle_enhanced'
fk = oracle_foreign_keys
end
fk
end
def constraint_src(table_name, fk={})
src = nil
if fk['from_table'] == table_name
src = "belongs_to :#{fk['to_table'].downcase.singularize}, :class_name => '#{fk['to_table'].tableize.classify}', :foreign_key => :#{fk['from_column']}"
elsif fk['to_table'] == table_name
src = "has_many :#{fk['from_table'].downcase.pluralize}, :class_name => '#{fk['from_table'].tableize.classify}'"
if connection.primary_key(table_name) == fk['from_column']
src += ", :foreign_key => :#{fk['from_column']}"
end
end
src
end
def generate_model_source(table_name, constraints)
eruby = Erubis::Eruby.new(File.read(File.join(File.expand_path("../", __FILE__), 'model.eruby')))
eruby.result(:table_name => table_name,
:primary_key => connection.primary_key(table_name),
:constraints => constraints,
:has_type_column => connection.columns(table_name).find { |col| col.name == 'type' })
end
def mysql_foreign_keys
sql = <<-SQL
select
table_name as from_table,
column_name as from_column,
referenced_table_name as to_table,
referenced_column_name as to_column
from information_schema.KEY_COLUMN_USAGE
where referenced_table_schema like '%'
and constraint_schema = '#{@connection_options[:database]}'
and referenced_table_name is not null
SQL
connection.select_all(sql)
end
def psql_foreign_keys
sql = <<-SQL
SELECT tc.table_name as from_table,
kcu.column_name as from_column,
ccu.table_name AS to_table,
ccu.column_name AS to_column
FROM information_schema.table_constraints tc
LEFT JOIN information_schema.key_column_usage kcu
ON tc.constraint_catalog = kcu.constraint_catalog
AND tc.constraint_schema = kcu.constraint_schema
AND tc.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.referential_constraints rc
ON tc.constraint_catalog = rc.constraint_catalog
AND tc.constraint_schema = rc.constraint_schema
AND tc.constraint_name = rc.constraint_name
LEFT JOIN information_schema.constraint_column_usage ccu
ON rc.unique_constraint_catalog = ccu.constraint_catalog
AND rc.unique_constraint_schema = ccu.constraint_schema
AND rc.unique_constraint_name = ccu.constraint_name
WHERE tc.table_name like '%'
AND tc.constraint_type = 'FOREIGN KEY';
SQL
connection.select_all(sql)
end
def mssql_foreign_keys
sql = <<-SQL
SELECT C.TABLE_NAME [from_table],
KCU.COLUMN_NAME [from_column],
C2.TABLE_NAME [to_table],
KCU2.COLUMN_NAME [to_column]
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS C
INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
ON C.CONSTRAINT_SCHEMA = KCU.CONSTRAINT_SCHEMA
AND C.CONSTRAINT_NAME = KCU.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
ON C.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
AND C.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS C2
ON RC.UNIQUE_CONSTRAINT_SCHEMA = C2.CONSTRAINT_SCHEMA
AND RC.UNIQUE_CONSTRAINT_NAME = C2.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU2
ON C2.CONSTRAINT_SCHEMA = KCU2.CONSTRAINT_SCHEMA
AND C2.CONSTRAINT_NAME = KCU2.CONSTRAINT_NAME
AND KCU.ORDINAL_POSITION = KCU2.ORDINAL_POSITION
WHERE C.CONSTRAINT_TYPE = 'FOREIGN KEY'
SQL
connection.select_all(sql)
end
def oracle_foreign_keys
fk = []
connection.tables.each do |table|
connection.foreign_keys(table).each do |oracle_fk|
table_fk = { 'from_table' => oracle_fk.from_table,
'from_column' => oracle_fk.options[:columns][0],
'to_table' => oracle_fk.to_table,
'to_column' => oracle_fk.options[:references][0] }
fk << table_fk
end
end
fk
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/migrator.rb | lib/rmre/migrator.rb | require "rmre/db_utils"
require "rmre/dynamic_db"
require "contrib/progressbar"
module Rmre
module Source
include DynamicDb
class Db < ActiveRecord::Base
self.abstract_class = true
end
end
module Target
include DynamicDb
class Db < ActiveRecord::Base
self.abstract_class = true
end
end
class Migrator
attr_accessor :before_copy
def initialize(source_db_options, target_db_options, options = {})
# If set to true will call AR create_table with force (table will be dropped if exists)
@force_table_create = false
@skip_existing_tables = options[:skip_existing]
@verbose = options[:verbose]
@before_copy = nil
Rmre::Source.connection_options = source_db_options
Rmre::Target.connection_options = target_db_options
Rmre::Source::Db.establish_connection(Rmre::Source.connection_options)
Rmre::Target::Db.establish_connection(Rmre::Target.connection_options)
end
# Before we start copying we call block if it is given so some additional options
# can be set. For example MS SQL adapter has option to use lowercase names for
# all entities. We can set this options in a following way:
#
# mig = Migrator.new(..)
# mig.copy(true) do
# ActiveRecord::ConnectionAdapters::SQLServerAdapter.lowercase_schema_reflection = true
# end
def copy(force = false)
yield if block_given?
@force_table_create = force
tables_count = Rmre::Source::Db.connection.tables.length
Rmre::Source::Db.connection.tables.sort.each_with_index do |table, idx|
info "Copying table #{table} (#{idx + 1}/#{tables_count})..."
copy_table(table)
end
end
def copy_table(table)
if @skip_existing_tables && Rmre::Target::Db.connection.table_exists?(table)
info "Skipping #{table}"
return
end
if before_copy && before_copy.is_a?(Proc)
return unless before_copy.call(table)
end
if !Rmre::Target::Db.connection.table_exists?(table) || @force_table_create
create_table(table, Rmre::Source::Db.connection.columns(table))
end
copy_data(table)
end
def create_table(table, source_columns)
primary_key = Rmre::Source::Db.connection.primary_key(table)
# Create primary key if source table has primary key
opts = { :id => !primary_key.nil?, :force => @force_table_create }
# If primary key is not 'id' then set option to create proper primary key
opts[:primary_key] = primary_key unless primary_key == "id"
Rmre::Target::Db.connection.create_table(table, opts) do |t|
# Skip 'id' column if it is already created as primary key
source_columns.reject {|col| col.name == 'id' && opts[:id] && opts[:primary_key].nil? }.each do |sc|
options = {
:null => sc.null,
:default => sc.default
}
# Some adapters do not convert all types to Rails value. Example is oracle_enhanced adapter
# which for 'LONG' column type sets column's type to nil but keeps sql_type as 'LONG'.
# Therefore we will use one of these values so we can, in DbUtils, handle all possible
# column type mappings when we are migrating from one DB to anohter (Oracle -> MySQL,
# MS SQL -> PostgreSQL, etc).
source_type = sc.type.nil? ? sc.sql_type : sc.type
col_type = Rmre::DbUtils.convert_column_type(Rmre::Target::Db.connection.adapter_name, source_type)
case col_type
when :decimal
options.merge!({
:limit => sc.limit,
:precision => sc.precision,
:scale => sc.scale,
})
when :string
options.merge!({
:limit => sc.limit
})
end
t.column(sc.name, col_type, options)
end
end
end
def table_has_type_column(table)
Rmre::Source::Db.connection.columns(table).find {|col| col.name == 'type'}
end
def copy_data(table_name)
primary_key = Rmre::Source::Db.connection.primary_key(table_name)
src_model = Rmre::Source.create_model_for(table_name, primary_key)
src_model.inheritance_column = 'ruby_type' if table_has_type_column(table_name)
tgt_model = Rmre::Target.create_model_for(table_name, primary_key)
rec_count = src_model.count
# We will always copy attributes without protection because we
# are the ones initiating DB copy (no need to preform additional checks)
progress_bar = Console::ProgressBar.new(table_name, rec_count) if @verbose
src_model.all.each do |src_rec|
tgt_model.create!(src_rec.attributes, :without_protection => true)
progress_bar.inc if @verbose
end
end
private
def info(msg)
puts msg if @verbose
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
bosko/rmre | https://github.com/bosko/rmre/blob/28a8ecc9c213f2288a4a913e2da330272a1c0501/lib/rmre/active_record/schema_dumper.rb | lib/rmre/active_record/schema_dumper.rb | require "active_record"
require "active_record/base"
require "active_record/schema_dumper"
module ActiveRecord
class SchemaDumper
def self.dump_with_fk(connection=ActiveRecord::Base.connection, foreign_keys=[], stream=STDOUT)
new(connection).dump_with_fk(foreign_keys, stream)
stream
end
def dump_with_fk(foreign_keys, stream)
header(stream)
tables(stream)
foreign_keys.each do |fk|
stream.puts <<-SQL
execute "ALTER TABLE #{fk['from_table']} ADD CONSTRAINT fk_#{fk['from_table']}_#{fk['to_table']}
FOREIGN KEY (#{fk['from_column']}) REFERENCES #{fk['to_table']}(#{fk['to_column']})"
SQL
end
trailer(stream)
stream
end
end
end
| ruby | MIT | 28a8ecc9c213f2288a4a913e2da330272a1c0501 | 2026-01-04T17:55:44.759618Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/signal_thread_spec.rb | spec/signal_thread_spec.rb | require 'spec_helper'
describe SignalThread do
before(:each) do
@th = SignalThread.new
end
after(:each) do
@th.trap('SIGUSR1')
@th.stop
@th.join
end
context('trap') do
it 'ignores Signal' do
old = @th.trap('SIGUSR1', 'IGNORE')
expect(old).to be_nil
expect(@th.handlers).to eq({})
end
it 'traps Signal' do
flag = false
pr = proc{flag=1;raise}
expect(@th.trap('SIGUSR1', &pr)).to be_nil
expect(@th.handlers).to eq({USR1: pr})
allow(STDERR).to receive(:write).at_least(:once)
Process.kill(:USR1, Process.pid)
Thread.pass until flag
expect(flag).to eq(1)
end
end
it 'queues signal' do
flag = false
pr = proc{flag=1;raise}
allow(@th).to receive(:enqueue){flag=2;raise}
expect(@th.trap('SIGUSR1', &pr)).to be_nil
allow(STDERR).to receive(:write).at_least(:once)
Process.kill(:USR1, Process.pid)
Thread.pass until flag
expect(flag).to eq(2)
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/null_backend_spec.rb | spec/null_backend_spec.rb | require 'spec_helper'
require 'perfectqueue/backend/null'
describe Backend::NullBackend do
let (:backend){ Backend::NullBackend.new }
describe '#list' do
subject { backend.list{} }
it { is_expected.to be_nil }
end
describe '#acquire' do
subject { backend.acquire(double('timeout')) }
it { is_expected.to be_nil }
end
describe '#finish' do
subject { backend.finish(double('token')) }
it { is_expected.to be true }
end
describe '#update' do
subject { backend.update(double('token'), double('timeout')) }
it { is_expected.to be_nil }
end
describe '#cancel' do
subject { backend.cancel(double('id')) }
it { is_expected.to be true }
end
describe '#submit' do
subject { backend.submit(double('id'), double('data')) }
it { is_expected.to be true }
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/client_spec.rb | spec/client_spec.rb | require 'spec_helper'
describe PerfectQueue::Client do
describe '#preempt' do
it '(key)' do
backend = double('backend')
alive_time = double('alive_time')
object_double('PerfectQueue::Backend', new_backend: backend).as_stubbed_const
client = Client.new({alive_time: alive_time})
ret = double('ret')
key = double('key')
expect(backend).to receive(:preempt).with(key, alive_time, {}).and_return(ret)
expect(client.preempt(key)).to eq(ret)
end
it '(key, options)' do
backend = double('backend')
alive_time = double('alive_time')
object_double('PerfectQueue::Backend', new_backend: backend).as_stubbed_const
client = Client.new({alive_time: alive_time})
ret = double('ret')
key = double('key')
options = {alive_time: alive_time}
expect(backend).to receive(:preempt).with(key, alive_time, options).and_return(ret)
expect(client.preempt(key, options)).to eq(ret)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/rdb_stress.rb | spec/rdb_stress.rb | require 'spec_helper'
require 'perfectqueue/backend/rdb_compat'
require 'logger'
# run this with `bundle exec rake spec SPEC_OPTS="-fd" SPEC=spec/rdb_stress.rb`
describe Backend::RDBCompatBackend do
let (:now){ Time.now.to_i }
let (:client){ double('client') }
let (:table){ 'test_queues' }
let (:config){ {
url: 'mysql2://root:@localhost/perfectqueue_test',
table: table,
disable_resource_limit: true,
} }
let (:db) do
d = Backend::RDBCompatBackend.new(client, config)
s = d.db
s.tables.each{|t| s.drop_table(t) }
d.init_database({})
d
end
context '#acquire' do
let (:task_token){ Backend::RDBCompatBackend::Token.new(key) }
let (:alive_time){ 42 }
let (:max_acquire){ 10 }
context 'some tasks' do
before do
sql = nil
bucket_size = 200000
600_000.times do |i|
if i % bucket_size == 0
sql = 'INSERT `test_queues` (id, timeout, data, created_at, resource) VALUES'
end
t = now - 600 + i/1000
sql << "(UUID(),#{t},TO_BASE64(RANDOM_BYTES(540)),#{t},NULL),"
if i % bucket_size == bucket_size - 1
db.db.run sql.chop!
end
end
db.db.loggers << Logger.new($stderr)
db.db.sql_log_level = :debug
end
it 'returns a task' do
#db.instance_variable_set(:@cleanup_interval_count, 0)
#expect(db.db.instance_variable_get(:@default_dataset)).to receive(:delete).and_call_original
ary = db.acquire(alive_time, max_acquire, {})
expect(ary).to be_an_instance_of(Array)
expect(ary.size).to eq(10)
expect(ary[0]).to be_an_instance_of(AcquiredTask)
end
end
context 'very large jobs' do
before do
sql = nil
sql = 'INSERT `test_queues` (id, timeout, data, created_at, resource) VALUES'
data = %<UNCOMPRESS(UNCOMPRESS(FROM_BASE64('6B8AAHic7c6xCYNQFEDRh8HGAawjmAmEOE7WsPziGoJ1xnAJLbJCVgiJbpBOkHOqW96IFN34nvvYpOvyuZXPIgAAAICTS6/hku1RfR9tffQNAAAA8Icxb+7r9AO74A1h')))>
200.times do |i|
t = now - 600 + i/1000
sql << "(UUID(),#{t},#{data},#{t},NULL),"
end
db.db.run sql.chop!
db.db.loggers << Logger.new($stderr)
db.db.sql_log_level = :debug
end
it 'returns a task' do
#db.instance_variable_set(:@cleanup_interval_count, 0)
#expect(db.db.instance_variable_get(:@default_dataset)).to receive(:delete).and_call_original
ary = db.acquire(alive_time, max_acquire, {})
expect(ary).to be_an_instance_of(Array)
expect(ary.size).to eq(10)
expect(ary[0]).to be_an_instance_of(AcquiredTask)
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/queue_spec.rb | spec/queue_spec.rb | require 'spec_helper'
describe Queue do
include QueueTest
it 'is a Queue' do
expect(queue.class).to eq(PerfectQueue::Queue)
end
it 'succeess submit' do
queue.submit('task01', 'type1', {})
end
it 'fail duplicated submit' do
now = Time.now.to_i
queue.submit('task01', 'type1', {}, :now=>now)
expect {
allow(STDERR).to receive(:puts)
queue.submit('task01', 'type1', {}, :now=>now+1)
}.to raise_error AlreadyExistsError
expect {
allow(STDERR).to receive(:puts)
queue.submit('task01', 'type1', {}, :now=>now+10)
}.to raise_error AlreadyExistsError
end
it 'list' do
queue.submit('task01', 'type1', {"a"=>1})
queue.submit('task02', 'type1', {"a"=>2})
queue.submit('task03', 'type1', {"a"=>3})
a = []
queue.each {|t| a << t }
a.sort_by! {|t| t.key }
task01 = a.shift
expect(task01.finished?).to eq(false)
task01.type == 'type1'
expect(task01.key).to eq('task01')
expect(task01.data["a"]).to eq(1)
task02 = a.shift
expect(task02.finished?).to eq(false)
task02.type == 'type1'
expect(task02.key).to eq('task02')
expect(task02.data["a"]).to eq(2)
task03 = a.shift
expect(task03.finished?).to eq(false)
task03.type == 'type1'
expect(task03.key).to eq('task03')
expect(task03.data["a"]).to eq(3)
expect(a.empty?).to eq(true)
end
it 'poll' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
queue.submit('task02', 'type1', {"a"=>2}, :now=>now+1)
queue.submit('task03', 'type1', {"a"=>3}, :now=>now+2)
task01 = queue.poll(:now=>now+10)
expect(task01.key).to eq('task01')
task02 = queue.poll(:now=>now+10)
expect(task02.key).to eq('task02')
task03 = queue.poll(:now=>now+10)
expect(task03.key).to eq('task03')
t4 = queue.poll(:now=>now+10)
expect(t4).to eq(nil)
end
it 'release' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
task01 = queue.poll(:now=>now+10)
expect(task01.key).to eq('task01')
task02 = queue.poll(:now=>now+10)
expect(task02).to eq(nil)
task01.release!(:now=>now+10)
task03 = queue.poll(:now=>now+11)
expect(task03.key).to eq('task01')
end
it 'timeout' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
task01 = queue.poll(:now=>now+10, :alive_time=>10)
expect(task01.key).to eq('task01')
task02 = queue.poll(:now=>now+15)
expect(task02).to eq(nil)
task03 = queue.poll(:now=>now+20)
expect(task03.key).to eq('task01')
end
it 'heartbeat' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
task01 = queue.poll(:now=>now+10, :alive_time=>10)
expect(task01.key).to eq('task01')
task01.heartbeat!(:alive_time=>15, :now=>now+10)
task02 = queue.poll(:now=>now+20)
expect(task02).to eq(nil)
task03 = queue.poll(:now=>now+30)
expect(task03.key).to eq('task01')
end
it 'retry' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
task01 = queue.poll(:now=>now+10, :alive_time=>10)
expect(task01.key).to eq('task01')
task01.retry!(:retry_wait=>15, :now=>now+10)
task02 = queue.poll(:now=>now+20)
expect(task02).to eq(nil)
task03 = queue.poll(:now=>now+30)
expect(task03.key).to eq('task01')
end
it 'froce_finish' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
task01 = queue.poll(:now=>now+10)
expect(task01.key).to eq('task01')
expect(queue['task01'].metadata.running?).to eq(true)
queue['task01'].force_finish!(:now=>now+11)
expect(queue['task01'].metadata.finished?).to eq(true)
end
it 'status' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
# rdb_backend backend can't distinguish running with waiting
#queue['task01'].metadata.finished?.should == false
#queue['task01'].metadata.running?.should == false
#queue['task01'].metadata.waiting?.should == true
task01 = queue.poll(:now=>now+10, :alive_time=>10)
expect(task01.key).to eq('task01')
expect(queue['task01'].metadata.finished?).to eq(false)
expect(queue['task01'].metadata.running?).to eq(true)
expect(queue['task01'].metadata.waiting?).to eq(false)
task01.finish!
expect(queue['task01'].metadata.finished?).to eq(true)
expect(queue['task01'].metadata.running?).to eq(false)
expect(queue['task01'].metadata.waiting?).to eq(false)
end
it 'retention_time' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
expect(queue['task01'].metadata.finished?).to eq(false)
task01 = queue.poll(:now=>now+10, :alive_time=>10)
expect(task01.key).to eq('task01')
task01.finish!(:now=>now+11, :retention_time=>10)
queue.poll(:now=>now+12)
expect(queue['task01'].exists?).to eq(true)
queue.poll(:now=>now+22)
allow(STDERR).to receive(:puts)
expect(queue['task01'].exists?).to eq(false)
end
it 'get_task_metadata failed with NotFoundError' do
expect {
allow(STDERR).to receive(:puts)
queue['task99'].metadata
}.to raise_error NotFoundError
end
it 'prefetch' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now+0)
queue.submit('task02', 'type2', {"a"=>2}, :now=>now+1)
queue.submit('task03', 'type3', {"a"=>3}, :now=>now+2)
tasks = queue.poll_multi(:now=>now+10, :alive_time=>10, :max_acquire=>2)
expect(tasks.size).to eq(2)
expect(tasks[0].key).to eq('task01')
expect(tasks[1].key).to eq('task02')
tasks = queue.poll_multi(:now=>now+10, :alive_time=>10, :max_acquire=>2)
expect(tasks.size).to eq(1)
expect(tasks[0].key).to eq('task03')
tasks = queue.poll_multi(:now=>now+10, :alive_time=>10, :max_acquire=>2)
expect(tasks).to eq(nil)
end
it 'data' do
now = Time.now.to_i
queue.submit('task01', 'type1', {"a"=>1}, :now=>now)
task01 = queue.poll(:now=>now+10)
expect(task01.key).to eq('task01')
expect(task01.data).to eq({"a"=>1})
task01.update_data!({"b"=>2})
expect(task01.data).to eq({"a"=>1, "b"=>2})
task01.update_data!({"a"=>3,"c"=>4})
expect(task01.data).to eq({"a"=>3, "b"=>2, "c"=>4})
task01.release!
task01 = queue.poll(:now=>now+10)
expect(task01.key).to eq('task01')
expect(task01.data).to eq({"a"=>3, "b"=>2, "c"=>4})
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/supervisor_spec.rb | spec/supervisor_spec.rb | require 'spec_helper'
class TestHandler < PerfectQueue::Application::Base
def run
#puts "TestHandler: #{task}"
if num = task.data['sleep']
sleep num
end
#puts "Task finished"
end
end
class RegexpHandler < PerfectQueue::Application::Base
def run
end
end
class TestApp < PerfectQueue::Application::Dispatch
route 'test' => TestHandler
route /reg.*/ => RegexpHandler
end
describe Supervisor do
include QueueTest
let (:logger) { double('logger').as_null_object }
before do
object_double('PerfectQueue::DaemonsLogger', new: logger).as_stubbed_const
end
context 'normal routing' do
before do
@sv = Supervisor.new(TestApp, queue_config)
@thread = Thread.new {
@sv.run
}
end
after do
@sv.stop(true)
@thread.join
end
it 'route' do
expect_any_instance_of(TestHandler).to receive(:run).once
expect_any_instance_of(RegexpHandler).to receive(:run).once
queue.submit('task01', 'test', {})
queue.submit('task02', 'reg01', {})
sleep 2
end
end
context 'listen_debug_server' do
after do
@sv.stop(true)
@thread.join
end
it 'listen_debug_server with UNIX Socket' do
Tempfile.open('supervisor') do |f|
config = queue_config.dup
config[:debug] = f.path
@sv = Supervisor.new(TestApp, config)
@thread = Thread.new {
@sv.run
}
sleep 2
end
end
it 'listen_debug_server with TCP with address:port' do
config = queue_config.dup
config[:debug] = '127.0.0.1:0'
@sv = Supervisor.new(TestApp, config)
@thread = Thread.new {
@sv.run
}
sleep 2
end
it 'listen_debug_server with TCP with port' do
config = queue_config.dup
config[:debug] = '0'
@sv = Supervisor.new(TestApp, config)
@thread = Thread.new {
@sv.run
}
sleep 2
end
end
context 'replace' do
before do
@sv = Supervisor.new(TestApp, queue_config)
@thread = Thread.new {
@sv.run
}
Thread.pass until @sv.engine
end
after do
@sv.stop(true)
@thread.join
end
it 'replaces immediately' do
@sv.replace(true, ':')
end
it 'replaces not immediately' do
@sv.replace(false, ':')
end
it 'fails to replace' do
Thread.pass until @sv.engine
allow(@sv.engine).to receive(:replace) { raise }
@sv.replace(false, ':')
end
end
context 'signal handling' do
before do
@sv = Supervisor.new(TestApp, queue_config)
@thread = Thread.new {
@sv.run
}
end
after do
@sv.stop(true)
@thread.join
end
it 'handles TERM signal' do
Thread.pass until @sv.engine
Process.kill(:TERM, Process.pid)
expect(@thread.join(3)).to eq(@thread)
end
it 'handles INT signal' do
Thread.pass until @sv.engine
Process.kill(:INT, Process.pid)
expect(@thread.join(3)).to eq(@thread)
end
it 'handles QUIT signal' do
Thread.pass until @sv.engine
Process.kill(:QUIT, Process.pid)
#puts "finish expected..."
expect(@thread.join(3)).to eq(@thread)
end
it 'handles USR1 signal' do
Thread.pass until @sv.engine
processors = @sv.engine.processors
Process.kill(:USR1, Process.pid)
expect(@sv.engine.processors).to eq(processors)
end
it 'handles HUP signal' do
Thread.pass until @sv.engine
processors = @sv.engine.processors
Process.kill(:HUP, Process.pid)
expect(@sv.engine.processors).to eq(processors)
end
it 'handles USR2 signal' do
Thread.pass until @sv.engine
allow(logger).to receive(:reopen!)
Process.kill(:USR2, Process.pid)
end
it 'kill reason' do
expect_any_instance_of(TestHandler).to receive(:kill).once #.with(kind_of(PerfectQueue::CancelRequestedError)) # FIXME 'with' dead locks
queue.submit('task01', 'test', {'sleep'=>4})
sleep 2
Process.kill(:TERM, Process.pid)
expect(@thread.join(5)).to eq(@thread)
end
end
describe '.run' do
let (:runner) { double('runner') }
let (:config) { double('config') }
before (:each) do
allow(Supervisor).to receive(:new) \
.with(runner, config) do |*args, &block|
expect(block).to be_a(Proc)
double('supervisor', run: nil)
end
end
it 'calls Supervisor.new.run' do
expect(Supervisor.run(runner, config){ }).to be_nil
end
end
describe '#run' do
let (:supervisor) { Supervisor.new(double('runner')){raise} }
it 'rescues exception' do
expect(supervisor.run).to be_nil
end
end
describe '#stop' do
let (:supervisor) { Supervisor.new(double('runner')){} }
it 'return nil without engine' do
expect(supervisor.run).to be_nil
end
it 'rescues exception' do
supervisor.instance_variable_set(:@engine, true) # dummy
expect(supervisor.stop(true)).to be false
end
end
describe '#restart' do
let (:supervisor) { Supervisor.new(double('runner')){} }
it 'return nil without engine' do
expect(supervisor.run).to be_nil
end
it 'rescues exception' do
expect(supervisor.restart(true)).to be false
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/task_metadata_spec.rb | spec/task_metadata_spec.rb | require 'spec_helper'
describe PerfectQueue::TaskMetadata do
let (:attributes){ double('attributes', delete: nil) }
describe '#task' do
it 'returns a task' do
client = double('client')
key = double('key')
tm = TaskMetadata.new(client, key, attributes)
task = tm.task
expect(task).to be_a(Task)
expect(task.client).to eq(client)
expect(task.key).to eq(key)
end
end
describe '#inspect' do
it 'returns inspected string' do
client = double('client')
key = double('key')
tm = TaskMetadata.new(client, key, attributes)
expect(tm.inspect).to eq("#<PerfectQueue::TaskMetadata @key=#{key.inspect} @attributes=#{attributes.inspect}>")
end
end
describe 'running?' do
it 'returns true on running' do
tm = TaskMetadata.new(double, double, status: TaskStatus::RUNNING)
expect(tm.running?).to be true
end
it 'returns false on finished' do
tm = TaskMetadata.new(double, double, status: TaskStatus::FINISHED)
expect(tm.running?).to be false
end
end
describe 'message' do
it 'returns given message' do
message = double('message')
tm = TaskMetadata.new(double, double, message: message)
expect(tm.message).to eq(message)
end
end
describe 'user' do
it 'returns given user' do
user = double('user')
tm = TaskMetadata.new(double, double, user: user)
expect(tm.user).to eq(user)
end
end
describe 'created_at' do
it 'returns a time of given created_at' do
epoch = 42
tm = TaskMetadata.new(double, double, created_at: epoch)
expect(tm.created_at).to eq(Time.at(epoch))
end
end
describe 'timeout' do
it 'returns a time of given timeout' do
epoch = 72
tm = TaskMetadata.new(double, double, timeout: epoch)
expect(tm.timeout).to eq(Time.at(epoch))
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/worker_spec.rb | spec/worker_spec.rb | require 'spec_helper'
describe PerfectQueue::Worker do
let (:worker){ Worker.new(double, {}) }
describe '.run' do
let (:runner){ double }
let (:config){ double }
let (:worker){ double }
context 'with config' do
it 'calls Worker.new.run' do
expect(worker).to receive(:run).with(no_args).exactly(:once)
allow(Worker).to receive(:new).with(runner, config).and_return(worker)
Worker.run(runner, config)
end
end
context 'with block' do
it 'calls Worker.new.run' do
expect(worker).to receive(:run).with(no_args).exactly(:once)
allow(Worker).to receive(:new).with(runner, nil).and_return(worker)
Worker.run(runner)
end
end
end
describe '.new' do
context 'with config' do
it 'returns a worker' do
expect(Worker.new(double, {})).to be_an_instance_of(Worker)
end
it 'has @detach_wait which is 10.0' do
worker = Worker.new(double, {})
expect(worker.instance_variable_get(:@detach_wait)).to eq(10.0)
end
it 'has @detach_wait which is configured by config[:detach_wait]' do
detach_wait = double
worker = Worker.new(double, {detach_wait: detach_wait})
expect(worker.instance_variable_get(:@detach_wait)).to eq(detach_wait)
end
end
context 'with block' do
it 'returns a worker' do
expect(Worker.new(double){ {} }).to be_an_instance_of(Worker)
end
end
end
describe '#run' do
before do
allow(worker).to receive(:install_signal_handlers)
allow(worker.instance_variable_get(:@sv)).to receive(:run){sleep 1}
end
context 'normal and detach' do
it do
pid = double
waitpid2_ret = nil
allow(worker).to receive(:fork).and_return(pid)
allow(Process).to receive(:kill).with(:INT, pid) do
waitpid2_ret = [pid, double]
end
allow(Process).to receive(:waitpid2).and_return(waitpid2_ret)
Thread.new{sleep 0.5;worker.detach}
worker.run
end
end
context 'wrong pid' do
it 'ignores error and finish' do
wrong_pid = $$ # pid of myself is not suitable for waitpid2 and raise ECHILD
allow(worker).to receive(:fork).and_return(wrong_pid)
expect{ worker.run }.not_to raise_error
end
end
context 'child process side' do
it 'run supervisor and exit!' do
e = StandardError.new
allow(worker).to receive(:fork).and_yield
allow(worker.instance_variable_get(:@sv)).to receive(:run)
allow(worker).to receive(:exit!).exactly(:once){raise e}
expect{ worker.run }.to raise_error(e)
end
end
end
describe '#stop' do
let (:worker){ Worker.new(double, {}) }
context 'immediate=true' do
it 'send_signal(:QUIT)' do
expect(worker).to receive(:send_signal).with(:QUIT)
worker.stop(true)
end
end
context 'immediate=false' do
it 'send_signal(:TERM)' do
expect(worker).to receive(:send_signal).with(:TERM)
worker.stop(false)
end
end
end
describe '#restart' do
let (:worker){ Worker.new(double, {}) }
context 'immediate=true' do
it 'send_signal(:HUP)' do
expect(worker).to receive(:send_signal).with(:HUP)
worker.restart(true)
end
end
context 'immediate=false' do
it 'send_signal(:USR1)' do
expect(worker).to receive(:send_signal).with(:USR1)
worker.restart(false)
end
end
end
describe '#logrotated' do
it 'send_signal(:USR2)' do
expect(worker).to receive(:send_signal).with(:USR2)
worker.logrotated
end
end
describe '#detach' do
it 'send_signal(:INT) and so on' do
expect(worker).to receive(:send_signal).with(:INT)
expect(worker.instance_variable_get(:@finish_flag)).to receive(:set!).with(no_args)
worker.detach
expect(worker.instance_variable_get(:@detach)).to be true
end
end
describe '#send_signal' do
let (:sig){ double }
let (:pid){ double }
before do
worker.instance_variable_set(:@pid, pid)
end
context 'normal' do
it 'kill the process' do
allow(Process).to receive(:kill).with(sig, pid)
worker.__send__(:send_signal, sig)
end
end
context 'ESRCH' do
it 'ignores ESRCH' do
allow(Process).to receive(:kill).with(sig, pid).and_raise(Errno::ESRCH)
worker.__send__(:send_signal, sig)
end
end
context 'EPERM' do
it 'ignores EPERM' do
allow(Process).to receive(:kill).with(sig, pid).and_raise(Errno::EPERM)
worker.__send__(:send_signal, sig)
end
end
end
describe '#install_signal_handlers' do
let (:signal_thread){ worker.__send__(:install_signal_handlers) }
before do
signal_thread
end
after do
signal_thread.stop
signal_thread.value
trap :TERM, 'DEFAULT'
trap :INT, 'DEFAULT'
trap :QUIT, 'DEFAULT'
trap :USR1, 'DEFAULT'
trap :HUP, 'DEFAULT'
trap :USR2, 'DEFAULT'
end
context 'TERM' do
it 'call #stop(false)' do
flag = false
expect(worker).to receive(:stop).with(false){flag = true}
Process.kill(:TERM, $$)
10.times{sleep 0.1;break if flag}
end
end
context 'INT' do
it 'call #detach' do
flag = false
expect(worker).to receive(:detach).with(no_args){flag = true}
Process.kill(:INT, $$)
10.times{sleep 0.1;break if flag}
end
end
context 'QUIT' do
it 'call #stop(true)' do
flag = false
expect(worker).to receive(:stop).with(true){flag = true}
Process.kill(:QUIT, $$)
10.times{sleep 0.1;break if flag}
end
end
context 'USR1' do
it 'call #restart(false)' do
flag = false
expect(worker).to receive(:restart).with(false){flag = true}
Process.kill(:USR1, $$)
10.times{sleep 0.1;break if flag}
end
end
context 'HUP' do
it 'call #restart(true)' do
flag = false
expect(worker).to receive(:restart).with(true){flag = true}
Process.kill(:HUP, $$)
10.times{sleep 0.1;break if flag}
end
end
context 'USR2' do
it 'call #logrotated' do
flag = false
expect(worker).to receive(:logrotated).with(no_args){flag = true}
Process.kill(:USR2, $$)
10.times{sleep 0.1;break if flag}
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/rdb_backend_spec.rb | spec/rdb_backend_spec.rb | require 'spec_helper'
require 'perfectqueue/backend/rdb'
require 'mysql2'
describe Backend::RDBBackend do
let (:now){ Time.now.to_i }
let (:uri){ 'mysql2://root:@localhost/perfectqueue_test' }
let (:table){ 'test_queues' }
let (:db) do
d = Backend::RDBCompatBackend.new(double, url: uri, table: table)
s = d.db
s.tables.each{|t| s.drop_table(t) }
d.init_database({})
Backend::RDBBackend.new(uri, table)
end
context '.new' do
it 'supports mysql' do
expect(Backend::RDBBackend.new(uri, table)).to be_an_instance_of(Backend::RDBBackend)
end
describe 'supports ssl_mode as an option' do
let (:uri){ 'mysql2://root:@127.0.0.1/perfectqueue_test' }
it 'passes ssl_mode to Mysql2::Client initializer' do
expect(Mysql2::Client).to receive(:new) do |params|
expect(params[:ssl_mode]).to be(:disabled)
end.and_call_original
Backend::RDBBackend.new(uri, table, ssl_mode: :disabled)
end
it 'invalid value causes error' do
expect { Backend::RDBBackend.new(uri, table, ssl_mode: :invalid) }.to raise_error(Sequel::DatabaseConnectionError)
end
end
end
context '#submit' do
it 'adds task' do
expect(db.submit('key', '{"foo":"bar"}')).to be true
row = db.db.fetch("SELECT * FROM `#{table}` WHERE id=? LIMIT 1", 'key').first
expect(row[:created_at]).not_to be_nil
expect(row[:data]).to eq('{"foo":"bar"}')
end
it 'returns nil for a duplicated task' do
expect(db.submit('key', '{"foo":"bar"}')).to be true
expect(db.submit('key', '{"foo":"bar"}')).to be_nil
end
end
context '#cancel' do
let (:key){ 'key' }
context 'have the task' do
before do
db.submit(key, '{}')
end
it 'returns true' do
expect(db.cancel(key)).to be true
row = db.db.fetch("SELECT created_at FROM `#{table}` WHERE id=? LIMIT 1", key).first
expect(row[:created_at]).to be_nil
end
end
context 'already canceled' do
it 'returns false' do
expect(db.cancel(key)).to be false
end
end
end
context '#connect' do
context 'normal' do
it 'returns nil' do
expect(db.__send__(:connect){ }).to be_nil
end
end
context 'error' do
it 'returns block result' do
expect(RuntimeError).to receive(:new).exactly(Backend::RDBBackend::MAX_RETRY).and_call_original
allow(STDERR).to receive(:puts)
allow(db).to receive(:sleep)
expect do
db.__send__(:connect) do
raise RuntimeError.new('try restarting transaction')
end
end.to raise_error(RuntimeError)
end
end
context 'cannot connect' do
let (:uri){ 'mysql2://root:@nonexistent/perfectqueue_test' }
it 'raises Sequel::DatabaseConnectionError' do
expect { Backend::RDBBackend.new(uri, table) }.to raise_error(Sequel::DatabaseConnectionError)
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/blocking_flag_spec.rb | spec/blocking_flag_spec.rb | require 'spec_helper'
describe PerfectQueue::BlockingFlag do
describe '.new' do
it 'returns a BlockingFlag' do
flag = BlockingFlag.new
expect(flag).to be_an_instance_of(BlockingFlag)
end
end
describe '#set!' do
let (:flag){ BlockingFlag.new }
it 'returns true if it was false' do
expect(flag.set?).to eq false
expect(flag.set!).to eq true
expect(flag.set?).to eq true
end
it 'returns false if it was already true' do
flag.set!
expect(flag.set?).to eq true
expect(flag.set!).to eq false
expect(flag.set?).to eq true
end
end
describe '#reset!' do
let (:flag){ BlockingFlag.new }
it 'returns false if it was already false' do
expect(flag.set?).to eq false
expect(flag.reset!).to eq false
expect(flag.set?).to eq false
end
it 'returns false if it was true' do
flag.set!
expect(flag.set?).to eq true
expect(flag.reset!).to eq true
expect(flag.set?).to eq false
end
end
describe '#set_region' do
let (:flag){ BlockingFlag.new }
it 'set in the block and reset it was set' do
flag.set!
flag.set_region do
expect(flag.set?).to eq true
end
expect(flag.set?).to eq false
end
it 'set in the block and reset if it was reset' do
flag.reset!
flag.set_region do
expect(flag.set?).to eq true
end
expect(flag.set?).to eq false
end
it 'set in the block and reset even if it raiess error' do
flag.set_region do
expect(flag.set?).to eq true
raise
end rescue nil
expect(flag.set?).to eq false
end
end
describe '#reset_region' do
let (:flag){ BlockingFlag.new }
it 'reset in the block and set it was set' do
flag.set!
flag.reset_region do
expect(flag.set?).to eq false
end
expect(flag.set?).to eq true
end
it 'reset in the block and set if it was reset' do
flag.reset!
flag.reset_region do
expect(flag.set?).to eq false
end
expect(flag.set?).to eq true
end
it 'set in the block and reset even if it raiess error' do
flag.reset_region do
expect(flag.set?).to eq false
raise
end rescue nil
expect(flag.set?).to eq true
end
end
describe '#wait' do
let (:flag){ BlockingFlag.new }
it 'wait until a thread set/reset the flag' do
th1 = Thread.start do
flag.wait(5)
expect(flag.set?).to eq true
end
Thread.pass until th1.stop?
flag.set!
th1.join(2)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/stress.rb | spec/stress.rb | require 'spec_helper'
describe Queue do
include QueueTest
let :thread_num do
5
end
let :loop_num do
50
end
let :now do
Time.now.to_i
end
def thread_main
thread_id = Thread.current.object_id
loop_num.times do |i|
queue.submit("#{thread_id}-#{i}", "type01", {}, :now=>now-10)
task = queue.poll(:now=>now, :alive_time=>60)
expect(task).not_to eq(nil)
task.heartbeat!(:now=>now, :alive_time=>70)
task.finish!(:now=>now, :retention_time=>80)
end
end
it 'stress' do
puts "stress test with threads=#{thread_num} * loop_num=#{loop_num} = #{thread_num * loop_num} tasks"
# initialize queue here
queue
now
start_at = Time.now
(1..thread_num).map {
Thread.new(&method(:thread_main))
}.each {|thread|
thread.join
}
finish_at = Time.now
elapsed = finish_at - start_at
task_num = thread_num * loop_num
puts "#{elapsed} sec."
puts "#{task_num / elapsed} req/sec."
puts "#{elapsed / task_num} sec/req."
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/daemons_logger_spec.rb | spec/daemons_logger_spec.rb | require 'spec_helper'
describe DaemonsLogger do
context 'new' do
it 'creates logger with path string' do
Tempfile.open('daemons_logger') do |io|
logger = DaemonsLogger.new(io.path)
expect(logger.class).to eq(DaemonsLogger)
logger.close
logger.close
end
end
it 'creates logger with IO object' do
io = double('dummy io', write: nil, close: nil)
expect(DaemonsLogger.new(io).class).to eq(DaemonsLogger)
end
end
context 'reopen' do
it 'reopens IOs' do
Tempfile.open('daemons_logger') do |f|
logger = DaemonsLogger.new(f.path)
expect(STDOUT).to receive(:reopen).twice
logger.hook_stdout!
expect(STDERR).to receive(:reopen).twice
logger.hook_stderr!
logger.reopen
io = logger.instance_variable_get(:@log)
allow(logger).to receive(:reopen!) { raise }
logger.reopen
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/task_spec.rb | spec/task_spec.rb | require 'spec_helper'
describe PerfectQueue::Task do
describe '.new' do
it 'returns a Task' do
task = Task.new(double, double)
expect(task).to be_an_instance_of(Task)
end
end
describe '#config' do
it 'returns the client' do
config = double('config')
client = double('client', config: config)
key = double('key')
task = Task.new(client, key)
expect(task).to be_an_instance_of(Task)
expect(task.client).to eq(client)
expect(task.config).to eq(config)
end
end
describe '#preempt' do
it 'returns inspected string' do
client = double('client')
key = double('key')
task = Task.new(client, key)
options = double('options')
ret = double('ret')
expect(client).to receive(:preempt).with(key, options).exactly(:once).and_return(ret)
expect(task.preempt(options)).to eq(ret)
end
end
describe '#inspect' do
it 'returns inspected string' do
key = double('key')
task = Task.new(double('client'), key)
expect(task.inspect).to eq("#<PerfectQueue::Task @key=#{key.inspect}>")
end
end
describe '#update_data!' do
context 'PLT-4238' do
let (:config){ {type: 'rdb_compat', url: 'mysql2://root:@localhost/perfectqueue_test', table: 'test_queues'} }
let (:client){ Client.new(config) }
before do
client.backend.db.tap{|s| s.tables.each{|t| s.drop_table(t) } }
client.init_database
client.submit('key', 'test1', {'foo' => 1}, {compression: 'gzip'})
end
it 'keeps the data compressed' do
tasks = client.acquire
expect(tasks.size).to eq 1
task = tasks.first
expect(task.compression).to eq 'gzip'
task.update_data!('hoge' => 2)
task.release!
tasks = client.acquire
task = tasks.first
expect(tasks.size).to eq 1
expect(task.compression).to eq 'gzip'
data = task.data
expect(data['foo']).to eq 1
expect(data['hoge']).to eq 2
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/rdb_compat_backend_spec.rb | spec/rdb_compat_backend_spec.rb | require 'spec_helper'
require 'perfectqueue/backend/rdb_compat'
describe Backend::RDBCompatBackend do
include QueueTest
let :client do
queue.client
end
let :backend do
client.backend
end
it 'backward compatibility 1' do
backend.db["INSERT INTO test_tasks (id, timeout, data, created_at, resource) VALUES (?, ?, ?, ?, ?)", "merge_type.1339801200", 1339801201, {'url'=>nil}.to_json, 1339801201, "1"].insert
ts = backend.acquire(60, 1, {:now=>1339801203})
expect(ts).not_to eq(nil)
t = ts[0]
expect(t.data).to eq({'url'=>nil})
expect(t.type).to eq('merge_type')
expect(t.key).to eq('merge_type.1339801200')
end
it 'backward compatibility 2' do
backend.db["INSERT INTO test_tasks (id, timeout, data, created_at, resource) VALUES (?, ?, ?, ?, ?)", "query.379474", 1339801201, {'query_id'=>32}.to_json, 1339801201, nil].insert
ts = backend.acquire(60, 1, {:now=>1339801203})
expect(ts).not_to eq(nil)
t = ts[0]
expect(t.data).to eq({'query_id'=>32})
expect(t.type).to eq('query')
expect(t.key).to eq('query.379474')
end
it 'resource limit' do
time = Time.now.to_i
3.times do |i|
queue.submit("test_#{i}", 'user01', {}, :now=>time-(i+1), :user=>'u1', :max_running=>2)
end
queue.submit("test_5", 'user02', {}, :now=>time, :user=>'u2', :max_running=>2)
task1 = queue.poll(:now=>time+10)
expect(task1).not_to eq(nil)
expect(task1.type).to eq('user01')
task2 = queue.poll(:now=>time+10)
expect(task2).not_to eq(nil)
expect(task2.type).to eq('user02')
task3 = queue.poll(:now=>time+10)
expect(task3).not_to eq(nil)
expect(task3.type).to eq('user01')
task4 = queue.poll(:now=>time+10)
expect(task4).to eq(nil)
task1.finish!
task5 = queue.poll(:now=>time+10)
expect(task5).not_to eq(nil)
expect(task5.type).to eq('user01')
end
it 'gzip data compression' do
time = Time.now.to_i
queue.submit("test", 'user01', {'data'=>'test'}, :now=>time, :user=>'u1', :max_running=>2, :compression=>'gzip')
task1 = queue.poll(:now=>time+10)
expect(task1).not_to eq(nil)
expect(task1.data).to eq({'data'=>'test'})
end
end
describe Backend::RDBCompatBackend do
let (:now){ Time.now.to_i }
let (:client){ double('client') }
let (:table){ 'test_queues' }
let (:config){ {url: 'mysql2://root:@localhost/perfectqueue_test', table: table} }
let (:db) do
d = Backend::RDBCompatBackend.new(client, config)
s = d.db
s.tables.each{|t| s.drop_table(t) }
d.init_database({})
d
end
context '.new' do
let (:client){ double('client') }
let (:table){ double('table') }
it 'raises error unless url' do
expect{Backend::RDBCompatBackend.new(client, {})}.to raise_error(ConfigError)
end
it 'raises error unless table' do
expect{Backend::RDBCompatBackend.new(client, {url: ''})}.to raise_error(ConfigError)
end
it 'supports mysql' do
expect(Backend::RDBCompatBackend.new(client, config)).to be_an_instance_of(Backend::RDBCompatBackend)
expect(db.instance_variable_get(:@disable_resource_limit)).to be_falsey
end
it 'doesn\'t support postgres' do
config = {url: 'postgres://localhost', table: table}
expect{Backend::RDBCompatBackend.new(client, config)}.to raise_error(ConfigError)
end
it 'with use_connection_pooling' do
config = {url: 'mysql2://root:@localhost/perfectqueue_test', table: table, use_connection_pooling: true}
db = Backend::RDBCompatBackend.new(client, config)
expect(db.instance_variable_get(:@use_connection_pooling)).to eq true
end
it 'disable_resource_limit' do
config = {url: 'mysql2://root:@localhost/perfectqueue_test', table: table, disable_resource_limit: true}
db = Backend::RDBCompatBackend.new(client, config)
expect(db.instance_variable_get(:@disable_resource_limit)).to be_truthy
end
end
context '#init_database' do
let (:db) do
d = Backend::RDBCompatBackend.new(client, config)
s = d.db
s.tables.each{|t| s.drop_table(t) }
d
end
it 'creates the table' do
db.init_database({})
end
it 'raises DatabaseError if already exists' do
expect(STDERR).to receive(:puts)
db.init_database({})
expect{db.init_database({})}.to raise_error(Sequel::DatabaseError)
end
it 'drops the table if force: true' do
db.init_database({})
db.init_database({force: true})
end
end
context '#get_task_metadata' do
before do
db.submit('key', 'test', nil, {})
end
it 'fetches a metadata' do
expect(db.get_task_metadata('key', {})).to be_an_instance_of(TaskMetadata)
end
it 'raises error if non exist key' do
expect(STDERR).to receive(:puts)
expect{db.get_task_metadata('nonexistent', {})}.to raise_error(NotFoundError)
end
end
context '#preempt' do
subject { db.preempt(nil, nil, nil) }
it { expect{ subject }.to raise_error(NotSupportedError) }
end
context '#list' do
before do
db.submit('key', 'test', nil, {})
end
it 'lists a metadata' do
db.list({}) do |x|
expect(x).to be_an_instance_of(TaskWithMetadata)
expect(x.key).to eq('key')
end
end
end
context '#submit' do
it 'returns true' do
expect(db.submit('key', 'test', nil, {})).to be_an_instance_of(Task)
end
it 'returns true (gzip)' do
expect(db.submit('key', 'test', nil, {compression: 'gzip'})).to be_an_instance_of(Task)
end
it 'returns nil if duplication' do
expect(db.submit('key', 'test', nil, {})).to be_an_instance_of(Task)
expect{db.submit('key', 'test', nil, {})}.to raise_error(IdempotentAlreadyExistsError)
end
end
context '#acquire' do
let (:key){ 'key' }
let (:task_token){ Backend::RDBCompatBackend::Token.new(key) }
let (:alive_time){ 42 }
let (:max_acquire){ 42 }
context 'no tasks' do
it 'returns nil' do
expect(db.acquire(alive_time, max_acquire, {})).to be_nil
end
end
context 'some tasks' do
before do
db.submit(key, 'test', nil, {})
end
it 'returns a task' do
ary = db.acquire(alive_time, max_acquire, {})
expect(ary).to be_an_instance_of(Array)
expect(ary.size).to eq(1)
expect(ary[0]).to be_an_instance_of(AcquiredTask)
end
end
context 'disable_resource_limit' do
let (:config) do
{url: 'mysql2://root:@localhost/perfectqueue_test', table: table, disable_resource_limit: true}
end
before do
db.submit(key, 'test', nil, {})
end
it 'returns a task' do
ary = db.acquire(alive_time, max_acquire, {})
expect(ary).to be_an_instance_of(Array)
expect(ary.size).to eq(1)
expect(ary[0]).to be_an_instance_of(AcquiredTask)
end
end
context 'some tasks' do
let :t0 do now - 300 end
let :t1 do now - 200 end
let :t2 do now - 100 end
before do
db.submit('key1', 'test1', nil, {now: t0})
db.submit('key2', 'test2', nil, {now: t0})
db.submit('key3', 'test3', nil, {now: t1})
db.submit('key4', 'test4', nil, {now: t2})
db.submit('key5', 'test5', nil, {now: t2})
end
it 'returns 5 tasks' do
ary = []
db.list({}){|task| ary << task }
expect(ary[0].timeout.to_i).to eq t0
expect(ary[1].timeout.to_i).to eq t0
expect(ary[2].timeout.to_i).to eq t1
expect(ary[3].timeout.to_i).to eq t2
expect(ary[4].timeout.to_i).to eq t2
ary = db.acquire(alive_time, max_acquire, {now: now})
expect(ary).to be_an_instance_of(Array)
expect(ary.size).to eq(5)
expect(ary[0]).to be_an_instance_of(AcquiredTask)
expect(ary[1]).to be_an_instance_of(AcquiredTask)
expect(ary[2]).to be_an_instance_of(AcquiredTask)
expect(ary[3]).to be_an_instance_of(AcquiredTask)
expect(ary[4]).to be_an_instance_of(AcquiredTask)
now1 = Time.at(now + alive_time)
expect(now1).to receive(:to_time).exactly(5).times.and_call_original
db.list({}){|task| expect(task.timeout).to eq now1.to_time }
end
end
end
context '#force_finish' do
let (:key){ double('key') }
let (:token){ double('token') }
let (:retention_time){ double('retention_time') }
let (:options){ double('options') }
let (:ret){ double('ret') }
before { expect(Backend::RDBCompatBackend::Token).to receive(:new).with(key).and_return(token) }
it 'calls #finish' do
expect(db).to receive(:finish).with(token, retention_time, options).exactly(:once).and_return(ret)
expect(db.force_finish(key, retention_time, options)).to eq ret
end
end
context '#finish' do
let (:key){ 'key' }
let (:task_token){ Backend::RDBCompatBackend::Token.new(key) }
let (:retention_time) { 42 }
let (:delete_timeout){ now - Backend::RDBCompatBackend::DELETE_OFFSET + retention_time }
let (:options){ {now: now} }
context 'have the task' do
before do
db.submit(key, 'test', nil, {})
expect(db.db).to receive(:[]).with(kind_of(String), delete_timeout, key).and_call_original
end
it 'returns nil' do
expect(db.finish(task_token, retention_time, options)).to be_nil
row = db.db.fetch("SELECT created_at FROM `#{table}` WHERE id=? LIMIT 1", key).first
expect(row[:created_at]).to be_nil
end
end
context 'already finished' do
it 'raises IdempotentAlreadyFinishedError' do
expect(STDERR).to receive(:puts)
expect{db.finish(task_token, retention_time, options)}.to raise_error(IdempotentAlreadyFinishedError)
end
end
end
context '#heartbeat' do
let (:key){ 'key' }
let (:task_token){ Backend::RDBCompatBackend::Token.new(key) }
let (:retention_time) { 42 }
let (:delete_timeout){ now + retention_time }
let (:options){ {now: now} }
before{ allow(STDERR).to receive(:puts) }
context 'have a queueuled task' do
before do
db.submit(key, 'test', nil, {})
end
it 'returns nil if next_run_time is not updated' do
expect(db.heartbeat(task_token, 0, {now: now})).to be_nil
end
it 'returns nil even if next_run_time is updated' do
expect(db.heartbeat(task_token, 1, {})).to be_nil
end
end
context 'no tasks' do
it 'raises PreemptedError' do
expect{db.heartbeat(task_token, 0, {})}.to raise_error(PreemptedError)
end
end
context 'finished task' do
before do
db.submit(key, 'test', nil, {})
db.finish(task_token, retention_time, options)
end
it 'raises PreemptedError' do
expect{db.heartbeat(task_token, 0, {})}.to raise_error(PreemptedError)
end
end
end
context '#connect' do
context 'normal' do
it 'returns now' do
expect(db.__send__(:connect){ }).to eq(now)
end
end
context 'error' do
it 'returns block result' do
expect(RuntimeError).to receive(:new).exactly(Backend::RDBCompatBackend::MAX_RETRY).and_call_original
allow(STDERR).to receive(:puts)
allow(db).to receive(:sleep)
expect do
db.__send__(:connect) do
raise RuntimeError.new('try restarting transaction')
end
end.to raise_error(RuntimeError)
end
context 'cannot connect' do
let (:config){ {url: 'mysql2://root:@nonexistent/perfectqueue_test', table: table} }
it 'raises Sequel::DatabaseConnectionError' do
allow(STDERR).to receive(:puts)
expect { Backend::RDBCompatBackend.new(client, config) }.to raise_error(Sequel::DatabaseConnectionError)
end
end
end
end
context '#create_attributes' do
let (:data){ Hash.new }
let (:row) do
r = double('row')
allow(r).to receive(:[]){|k| data[k] }
r
end
it 'returns a hash consisting the data of the row' do
data[:timezone] = timezone = double('timezone')
data[:delay] = delay = double('delay')
data[:cron] = cron = double('cron')
data[:next_time] = next_time = double('next_time')
data[:timeout] = timeout = double('timeout')
data[:data] = '{"type":"foo.bar","a":"b"}'
data[:id] = 'hoge'
expect(db.__send__(:create_attributes, now, row)).to eq(
status: :finished,
created_at: nil,
data: {"a"=>"b"},
user: nil,
timeout: timeout,
max_running: nil,
type: 'foo.bar',
message: nil,
node: nil,
compression: nil,
)
end
it 'returns {} if data\'s JSON is broken' do
data[:data] = '}{'
data[:id] = 'foo.bar.baz'
expect(db.__send__(:create_attributes, now, row)).to eq(
status: :finished,
created_at: nil,
data: {},
user: nil,
timeout: nil,
max_running: nil,
type: 'foo',
message: nil,
node: nil,
compression: nil,
)
end
it 'uses id[/\A[^.]*/] if type is empty string' do
data[:data] = '{"type":""}'
data[:id] = 'foo.bar.baz'
expect(db.__send__(:create_attributes, now, row)).to eq(
status: :finished,
created_at: nil,
data: {},
user: nil,
timeout: nil,
max_running: nil,
type: 'foo',
message: nil,
node: nil,
compression: nil,
)
end
it 'uses id[/\A[^.]*/] if type is nil' do
data[:id] = 'foo.bar.baz'
expect(db.__send__(:create_attributes, now, row)).to eq(
status: :finished,
created_at: nil,
data: {},
user: nil,
timeout: nil,
max_running: nil,
type: 'foo',
message: nil,
node: nil,
compression: nil,
)
end
end
context '#connect_locked' do
let (:ret){ double('ret') }
before do
end
it 'ensures to unlock on error with use_connection_pooling' do
#expect(STDERR).to receive(:puts)
config = {url: 'mysql2://root:@localhost/perfectqueue_test', table: table, use_connection_pooling: true}
db1 = Backend::RDBCompatBackend.new(client, config)
#expect{ db.__send__(:connect_locked){ raise } }.to raise_error(RuntimeError)
db1.__send__(:connect_locked){ ret }
stub_const('PerfectQueue::Backend::RDBCompatBackend::LOCK_WAIT_TIMEOUT', 5)
db2 = Backend::RDBCompatBackend.new(client, config)
Timeout.timeout(3) do
expect( db2.__send__(:connect_locked){ ret }).to eq ret
end
end
end
context '#create_attributes' do
let (:data){ {data: '{"type":"foo"}'} }
let (:timeout){ double('timeout') }
let (:row) do
r = double('row')
allow(r).to receive(:[]){|k| data[k] }
r
end
context 'created_at is nil' do
it 'returns a hash consisting the data of the row' do
data[:resource] = user = double('user')
data[:max_running] = max_running = double('max_running')
data[:cron] = cron = double('cron')
data[:next_time] = next_time = double('next_time')
data[:timeout] = timeout
data[:data] = '{"type":"foo.bar","a":"b"}'
data[:id] = 'hoge'
expect(db.__send__(:create_attributes, now, row)).to eq(
status: TaskStatus::FINISHED,
created_at: nil,
data: {"a"=>"b"},
type: 'foo.bar',
user: user,
timeout: timeout,
max_running: max_running,
message: nil,
node: nil,
compression: nil,
)
end
it 'returns {} if data\'s JSON is broken' do
data[:data] = '}{'
data[:id] = 'foo.bar.baz'
r = db.__send__(:create_attributes, now, row)
expect(r[:type]).to eq 'foo'
end
it 'uses id[/\A[^.]*/] if type is empty string' do
data[:data] = '{"type":""}'
data[:id] = 'foo.bar.baz'
r = db.__send__(:create_attributes, now, row)
expect(r[:type]).to eq 'foo'
end
it 'uses id[/\A[^.]*/] if type is nil' do
data[:id] = 'foo.bar.baz'
r = db.__send__(:create_attributes, now, row)
expect(r[:type]).to eq 'foo'
end
context 'created_at is nil' do
it 'status is :finished' do
data[:created_at] = nil
r = db.__send__(:create_attributes, now, row)
expect(r[:status]).to eq TaskStatus::FINISHED
end
end
end
context 'created_at > 0' do
context 'timeout' do
it 'status is :waiting' do
data[:created_at] = 1
data[:timeout] = 0
r = db.__send__(:create_attributes, now, row)
expect(r[:status]).to eq TaskStatus::WAITING
end
end
it 'status is :running' do
data[:created_at] = 1
data[:timeout] = now+100
r = db.__send__(:create_attributes, now, row)
expect(r[:status]).to eq TaskStatus::RUNNING
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/runner_spec.rb | spec/runner_spec.rb | require 'spec_helper'
describe PerfectQueue::Runner do
describe '#new' do
it 'creates with task' do
expect(PerfectQueue::Runner.new(double('task'))).to be_a(PerfectQueue::Runner)
end
end
describe '#task' do
let (:task) { double('task') }
let (:runner) { PerfectQueue::Runner.new(task) }
it 'returns given task' do
expect(runner.task).to eq(task)
end
end
describe '#queue' do
let (:runner) { PerfectQueue::Runner.new(double('task', client: 1)) }
it 'returns a queue' do
queue = runner.queue
expect(queue).to be_a(PerfectQueue::Queue)
end
end
describe '#kill' do
let (:runner) { PerfectQueue::Runner.new(double('task')) }
it 'always returns nil' do
expect(runner.kill(nil)).to be_nil
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/backend_spec.rb | spec/backend_spec.rb | require 'spec_helper'
describe PerfectQueue::Backend do
describe '.new_backend' do
it 'raises error if config[:type] is nil' do
expect{Backend.new_backend(nil, {})}.to raise_error(ConfigError)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/engine_spec.rb | spec/engine_spec.rb | require 'spec_helper'
describe PerfectQueue::Engine do
let (:logger){ double('logger').as_null_object }
let (:engine) do
config = {logger: logger, processor_type: :thread}
Engine.new(double, config)
end
describe '.new' do
it 'returns an Engine with ForkProcessor for processor_type: nil' do
config = {logger: double('logger'), processor_type: nil}
engine = Engine.new(double, config)
expect(engine).to be_an_instance_of(Engine)
expect(engine.processors).to be_a(Array)
expect(engine.processors.size).to eq(1)
expect(engine.processors[0]).to be_an_instance_of(Multiprocess::ForkProcessor)
end
it 'returns an Engine with ForkProcessor for processor_type: :process' do
config = {logger: double('logger'), processor_type: :process}
engine = Engine.new(double, config)
expect(engine).to be_an_instance_of(Engine)
expect(engine.processors).to be_a(Array)
expect(engine.processors.size).to eq(1)
expect(engine.processors[0]).to be_an_instance_of(Multiprocess::ForkProcessor)
end
it 'returns an Engine with ThreadProcessor for processor_type: :thread' do
config = {logger: double('logger'), processor_type: :thread}
engine = Engine.new(double, config)
expect(engine).to be_an_instance_of(Engine)
expect(engine.processors).to be_a(Array)
expect(engine.processors.size).to eq(1)
expect(engine.processors[0]).to be_an_instance_of(Multiprocess::ThreadProcessor)
end
it 'returns an Engine with ForkProcessor for processor_type: :invalid' do
config = {logger: double('logger'), processor_type: :invalid}
expect{Engine.new(double, config)}.to raise_error(ConfigError)
end
end
describe '#run' do
before do
processor_klass = (PerfectQueue::Multiprocess::ThreadProcessor)
allow(processor_klass).to receive(:new) do
processor = double('processor')
expect(processor).to receive(:keepalive).exactly(:twice)
expect(processor).to receive(:stop)
expect(processor).to receive(:join)
processor
end
expect(engine).to receive(:sleep).with(0...2)
end
it 'runs until stopped' do
Thread.start{sleep 1; engine.stop(true) }
engine.run
end
end
describe '#restart' do
context 'previous num_processors is small' do
it 'increase the number of processors' do
config = {logger: logger, processor_type: :thread}
engine = Engine.new(double, config)
expect(engine.processors.size).to eq(1)
config[:processors] = 3
expect(engine.restart(true, config)).to eq(engine)
expect(engine.processors.size).to eq(3)
end
end
context 'previous num_processors is large' do
it 'decrease the number of processors' do
config = {logger: logger, processor_type: :thread, processors: 2}
engine = Engine.new(double, config)
config[:processors] = 1
expect(engine.restart(true, config)).to eq(engine)
expect(engine.processors.size).to eq(1)
end
end
context 'same number of processors' do
it 'decrease the number of processors' do
config = {logger: logger, processor_type: :thread}
engine = Engine.new(double, config)
expect(engine.restart(true, config)).to eq(engine)
expect(engine.processors.size).to eq(1)
end
end
end
describe '#stop' do
let (:immediate){ double('immediate') }
before do
engine.processors.each do |c|
expect(c).to receive(:stop).with(immediate)
end
end
it '@processors.each {|c| c.stop(immediate) }' do
expect(engine.stop(immediate)).to eq(engine)
expect(engine.instance_variable_get(:@finish_flag).set?).to be true
end
end
describe '#join' do
before do
engine.processors.each do |c|
expect(c).to receive(:join)
end
end
it '@processors.each {|c| c.join }' do
expect(engine.join).to eq(engine)
end
end
describe '#shutdown' do
it 'calls stop and join' do
immediate = double('immediate')
expect(engine).to receive(:stop).with(immediate)
expect(engine).to receive(:join)
engine.shutdown(immediate)
end
end
describe '#replace' do
context 'already replaced' do
before do
engine.instance_variable_set(:@replaced_pid, double)
end
it 'returns nil' do
expect(engine).not_to receive(:stop)
expect(engine.replace(double, double)).to be_nil
end
end
context 'not replaced yet' do
it 'calls spawn with [$0]+ARGV' do
immediate = double('immediate')
expect(engine).to receive(:stop).with(immediate)
expect(Process).to receive(:spawn).with(*([$0]+ARGV))
engine.replace(immediate)
end
it 'calls spawn with given command' do
immediate = double('immediate')
command = double('command')
expect(engine).to receive(:stop).with(immediate)
expect(Process).to receive(:spawn).with(command)
engine.replace(immediate, command)
end
end
end
describe '#logrotated' do
before do
engine.processors.each do |c|
expect(c).to receive(:logrotated)
end
end
it '@processors.each {|c| c.logrotated }' do
engine.logrotated
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/task_monitor_spec.rb | spec/task_monitor_spec.rb | require 'spec_helper'
describe PerfectQueue::TaskMonitor do
describe '#kill_task' do
it 'rescues exception' do
tm = PerfectQueue::TaskMonitor.new(logger: double('logger').as_null_object)
task = double('task')
reason = double('reason')
allow(task).to receive_message_chain(:runner, :kill) \
.with(no_args).with(reason){raise}
tm.instance_variable_set(:@task, task)
expect{tm.kill_task(reason)}.to raise_error(RuntimeError)
end
end
describe '#external_task_heartbeat' do
it 'rescues exception' do
tm = PerfectQueue::TaskMonitor.new(logger: double('logger').as_null_object)
task = double('task')
reason = double('reason')
epoch = double('epoch')
allow(Time).to receive_message_chain(:now, :to_i){epoch}
ret = double('ret')
tm.instance_variable_set(:@task, task)
expect(tm.external_task_heartbeat(task){ret}).to eq(ret)
expect(tm.instance_variable_get(:@last_task_heartbeat)).to eq(epoch)
end
end
describe '#run' do
it 'rescues unknown error' do
config = {logger: double('logger').as_null_object}
force_stop = double('force_stop')
expect(force_stop).to receive(:call).with(no_args).exactly(:once)
tm = PerfectQueue::TaskMonitor.new(config, nil, force_stop)
allow(Time).to receive(:now){raise}
tm.run
end
end
describe '#task_heartbeat' do
let (:tm){ PerfectQueue::TaskMonitor.new(logger: double('logger').as_null_object) }
let (:err){ StandardError.new('heartbeat preempted') }
before do
task = double('task')
allow(task).to receive(:heartbeat!){ raise err }
tm.set_task(task, double('runner'))
end
it 'calls kill_task($!) on heartbeat error' do
expect(tm).to receive(:kill_task).with(err).exactly(:once)
tm.__send__(:task_heartbeat)
end
end
end
describe PerfectQueue::TaskMonitorHook do
let (:task) do
obj = AcquiredTask.new(double(:client).as_null_object, 'key', {}, double)
tm = TaskMonitor.new(logger: double('logger').as_null_object)
tm.set_task(obj, double('runner'))
obj
end
describe 'finish!' do
it { task.finish! }
end
describe 'release!' do
it { task.release! }
end
describe 'retry!' do
it { task.retry! }
end
describe 'update_data!' do
it { task.update_data!({}) }
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/spec_helper.rb | spec/spec_helper.rb | $LOAD_PATH.unshift(File.expand_path('../lib', File.dirname(__FILE__)))
if ENV['SIMPLE_COV']
require 'simplecov'
SimpleCov.start do
add_filter 'pkg/'
add_filter 'vendor/'
end
end
require 'perfectqueue'
if ENV["CI"]
require 'coveralls'
Coveralls.wear!
end
require 'fileutils'
require 'tempfile'
module QueueTest
def self.included(mod)
mod.module_eval do
let :queue_config do
{
:type => 'rdb_compat',
:url => "mysql2://root:@localhost/perfectqueue_test",
:table => 'test_tasks',
:processor_type => 'thread',
:cleanup_interval => 0, # for test
#:disable_resource_limit => true, # TODO backend-specific test cases
}
end
let :queue do
PerfectQueue.open(queue_config)
end
before do
queue.client.init_database(:force => true)
end
after do
queue.close
end
end
end
end
include PerfectQueue
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/application/router_spec.rb | spec/application/router_spec.rb | require 'spec_helper'
describe PerfectQueue::Application::Router do
describe '.new' do
it 'returns a PerfectQueue::Application::Router' do
router = Application::Router.new
expect(router).to be_an_instance_of(Application::Router)
end
end
describe '#add' do
let (:router){ Application::Router.new }
let (:sym){ double('sym') }
it 'accepts Regexp' do
router.add(/\Afoo\z/, sym, double)
expect(router.patterns[0]).to eq([/\Afoo\z/, sym])
end
it 'accepts String' do
router.add('foo', sym, double)
expect(router.patterns[0]).to eq([/\Afoo\z/, sym])
end
it 'accepts Symbol' do
router.add(:foo, sym, double)
expect(router.patterns[0]).to eq([/\Afoo\z/, sym])
end
it 'raises for others' do
expect{router.add(nil, nil, nil)}.to raise_error(ArgumentError)
end
end
describe '#route' do
let (:router) do
rt = Application::Router.new
rt.add(/\Afoo\z/, :TestHandler, double)
rt
end
let (:handler){ double('handler') }
before do
Application::Router::TestHandler = handler
end
after do
Application::Router.__send__(:remove_const, :TestHandler)
end
it 'return related handler' do
expect(router.route('foo')).to eq(handler)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/application/base_spec.rb | spec/application/base_spec.rb | require 'spec_helper'
describe PerfectQueue::Application::Base do
describe '.decider=' do
it 'defines .decider which returns the decider' do
decider_klass = double('decider_klass')
klass = PerfectQueue::Application::Base
allow(klass).to receive(:decider).and_call_original
allow(klass).to receive(:decider=).with(decider_klass).and_call_original
expect(klass.decider = decider_klass).to eq(decider_klass)
expect(klass.decider).to eq(decider_klass)
end
end
describe '.decider' do
it 'returns DefaultDecider' do
expect(PerfectQueue::Application::Base.decider).to eq(PerfectQueue::Application::DefaultDecider)
end
end
describe '#new' do
let (:task){ double('task') }
let (:base) { PerfectQueue::Application::Base.new(task) }
it 'calls super and set decider'do
expect(base).to be_an_instance_of(PerfectQueue::Application::Base)
expect(base.instance_variable_get(:@task)).to eq(task)
expect(base.instance_variable_get(:@decider)).to be_an_instance_of(Application::DefaultDecider)
end
end
describe '#run' do
let (:base) { PerfectQueue::Application::Base.new(double('task')) }
it 'returns nil if before_perform returns false' do
allow(base).to receive(:before_perform).and_return(false)
expect(base.run).to be_nil
end
it 'returns nil' do
expect(base).to receive(:before_perform).exactly(:once).and_call_original
expect(base).to receive(:perform).exactly(:once).and_return(nil)
expect(base).to receive(:after_perform).exactly(:once).and_call_original
expect(base.run).to be_nil
end
it 'calls unexpected_error_raised on error' do
allow(base).to receive(:before_perform).exactly(:once).and_call_original
allow(base).to receive(:perform).exactly(:once) { raise }
allow(base).to receive(:decide!).with(:unexpected_error_raised, error: kind_of(Exception)).exactly(:once)
expect(base.run).to be_nil
end
end
describe '#before_perform' do
let (:base) { PerfectQueue::Application::Base.new(double('task')) }
it 'returns true' do
expect(base.before_perform).to be true
end
end
describe '#after_perform' do
let (:base) { PerfectQueue::Application::Base.new(double('task')) }
it 'returns nil' do
expect(base.after_perform).to be_nil
end
end
describe '#decide!' do
let (:base) do
decider = double('decider')
expect(decider).to receive(:decide!).with(:type, :option).exactly(:once)
decider_klass = double('decider_klass')
allow(decider_klass).to receive(:new).with(kind_of(PerfectQueue::Application::Base)).and_return(decider)
klass = PerfectQueue::Application::Base
allow(klass).to receive(:decider).and_call_original
allow(klass).to receive(:decider=).with(decider_klass).and_call_original
klass.decider = decider_klass
klass.new(double('task'))
end
it 'calls decider.decide' do
expect(base.decide!(:type, :option)).to be_nil
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/application/decider_spec.rb | spec/application/decider_spec.rb | require 'spec_helper'
describe PerfectQueue::Application::UndefinedDecisionError do
it { is_expected.to be_an_instance_of(PerfectQueue::Application::UndefinedDecisionError) }
it { is_expected.to be_a(Exception) }
end
describe PerfectQueue::Application::Decider do
describe '#new' do
let (:decider) { PerfectQueue::Application::Decider.new(nil) }
it do
expect(decider).to be_an_instance_of(PerfectQueue::Application::Decider)
end
end
describe '#queue' do
let (:queue){ double('queue') }
let (:decider) do
base = double('base')
allow(base).to receive(:queue).exactly(:once).and_return(queue)
PerfectQueue::Application::Decider.new(base)
end
it 'calls @base.queue' do
expect(decider.queue).to eq(queue)
end
end
describe '#task' do
let (:task){ double('task') }
let (:decider) do
base = double('base')
allow(base).to receive(:task).exactly(:once).and_return(task)
PerfectQueue::Application::Decider.new(base)
end
it 'calls @base.task' do
expect(decider.task).to eq(task)
end
end
describe '#decide!' do
let (:decider) { PerfectQueue::Application::Decider.new(nil) }
it 'calls the specified method' do
allow(decider).to receive(:foo).exactly(:once).with(72).and_return(42)
expect(decider.decide!(:foo, 72)).to eq(42)
end
it 'raises UndefinedDecisionError on unknown method' do
expect{ decider.decide!(:foo, 72) }.to raise_error(PerfectQueue::Application::UndefinedDecisionError)
end
end
end
describe PerfectQueue::Application::DefaultDecider do
subject { PerfectQueue::Application::DefaultDecider.new(nil) }
it { is_expected.to be_a(PerfectQueue::Application::Decider) }
it { is_expected.to be_an_instance_of(PerfectQueue::Application::DefaultDecider) }
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/application/dispatch_spec.rb | spec/application/dispatch_spec.rb | require 'spec_helper'
describe PerfectQueue::Application::Dispatch do
describe '.new' do
before do
router = Application::Dispatch.router
handler = double('handler')
allow(handler).to receive(:new).and_return(nil)
router.add(/\Afoo\z/, handler, nil)
end
it 'returns a PerfectQueue::Application::Dispatch' do
task = double('task', type: 'foo')
dispatch = Application::Dispatch.new(task)
expect(dispatch).to be_an_instance_of(Application::Dispatch)
end
it 'raises RuntimeError if the task type doesn\'t match' do
task = double('task', type: 'bar')
expect(task).to receive(:retry!).exactly(:once)
expect{Application::Dispatch.new(task)}.to raise_error(RuntimeError)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/multiprocess/thread_processor_spec.rb | spec/multiprocess/thread_processor_spec.rb | require 'spec_helper'
describe PerfectQueue::Multiprocess::ThreadProcessor do
describe '.new' do
it 'returns a Multiprocess::ThreadProcessor' do
runner = double('runner')
processor_id = double('processor_id')
config = {}
processor = Multiprocess::ThreadProcessor.new(runner, processor_id, config)
expect(processor).to be_an_instance_of(Multiprocess::ThreadProcessor)
expect(processor.instance_variable_get(:@processor_id)).to eq(processor_id)
end
end
describe '#force_stop' do
let (:processor) do
config = {logger: double('logger').as_null_object}
Multiprocess::ThreadProcessor.new(double('runner'), double('processor_id'), config)
end
it 'force_stop' do
processor.force_stop
expect(processor.instance_variable_get(:@finish_flag).set?).to be true
end
end
describe '#run_loop' do
let (:processor) do
config = {logger: double('logger').as_null_object}
Multiprocess::ThreadProcessor.new(double('runner'), double('processor_id'), config)
end
it 'rescues error' do
pq = object_double('PerfectQueue').as_stubbed_const
allow(pq).to receive(:open).and_raise(RuntimeError)
processor.__send__(:run_loop)
end
end
describe '#process' do
let (:runner) do
r = double('runner')
allow(r).to receive(:new).and_raise(RuntimeError)
r
end
let (:processor) do
config = {logger: double('logger').as_null_object}
Multiprocess::ThreadProcessor.new(runner, double('processor_id'), config)
end
it 'rescues error' do
expect{processor.__send__(:process, double('task', key: 1))}.to raise_error(RuntimeError)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/multiprocess/child_process_spec.rb | spec/multiprocess/child_process_spec.rb | require 'spec_helper'
describe PerfectQueue::Multiprocess::ChildProcess do
let (:runner_insntace){ double('runner') }
let (:runner) do
runner = double('Runner')
allow(runner).to receive(:new).and_return(runner_insntace)
runner
end
let (:processor_id){ double('processor_id') }
let (:logger){ double('logger').as_null_object }
let (:config){ {logger: logger} }
let (:wpipe){ double('wpipe', sync: true, :'sync=' => true) }
let (:pr){ Multiprocess::ChildProcess.new(runner, processor_id, config, wpipe) }
describe '.run' do
let (:pr){ double('child_process') }
before do
expect(Multiprocess::ChildProcess).to receive(:new).exactly(:once) \
.with(runner, processor_id, config, wpipe).and_return(pr)
expect(pr).to receive(:run).exactly(:once)
end
it 'runs an instance' do
Multiprocess::ChildProcess.run(runner, processor_id, config, wpipe)
end
end
describe '.new' do
it 'returns a Multiprocess::ChildProcess' do
pr = Multiprocess::ChildProcess.new(runner, processor_id, config, wpipe)
expect(pr).to be_an_instance_of(Multiprocess::ChildProcess)
expect(pr.instance_variable_get(:@wpipe)).to eq(wpipe)
expect(pr.instance_variable_get(:@sig)).to be_a(SignalThread)
end
end
describe '#stop' do
it 'call super' do
pr.stop(true)
end
end
describe '#keepalive' do
it { pr.keepalive }
end
describe '#logrotated' do
it do
expect(logger).to receive(:reopen!).with(no_args).exactly(:once)
pr.logrotated
end
end
describe '#child_heartbeat' do
let (:packet){ Multiprocess::ChildProcess::HEARTBEAT_PACKET }
it 'write HEARTBEAT_PACKET' do
expect(wpipe).to receive(:write).with(packet).exactly(:once)
pr.child_heartbeat
end
it 'rescue an error' do
expect(wpipe).to receive(:write).with(packet).exactly(:once) \
.and_raise(RuntimeError)
expect(pr).to receive(:force_stop).exactly(:once)
pr.child_heartbeat
end
end
describe '#force_stop' do
it 'calls exit! 137' do
expect(Process).to receive(:kill).with(:KILL, Process.pid)
expect(pr).to receive(:exit!).with(137).exactly(:once)
pr.force_stop
end
end
describe '#process' do
let (:task){ double('task', key: double) }
before do
expect(runner_insntace).to receive(:run)
end
context 'max_request_per_child is nil' do
it 'runs' do
pr.process(task)
end
end
context 'max_request_per_child is set' do
before do
pr.instance_variable_set(:@max_request_per_child, 2)
end
it 'counts children if request_per_child is still small' do
expect(pr).not_to receive(:stop)
pr.instance_variable_set(:@request_per_child, 1)
pr.process(task)
expect(pr.instance_variable_get(:@request_per_child)).to eq(2)
end
it 'stops children if request_per_child exceeds the limit' do
expect(pr).to receive(:stop).with(false).exactly(:once)
pr.instance_variable_set(:@request_per_child, 2)
pr.process(task)
expect(pr.instance_variable_get(:@request_per_child)).to eq(3)
end
end
end
context 'signal handling' do
before do
allow(PerfectQueue).to receive(:open) do
flag = pr.instance_variable_get(:@finish_flag)
Thread.pass until flag.set?
end
end
it 'calls stop(false) SIGTERM' do
expect(pr).to receive(:stop).with(false).and_call_original
Process.kill(:TERM, Process.pid)
pr.run
end
it 'calls stop(false) SIGINT' do
expect(pr).to receive(:stop).with(false).and_call_original
Process.kill(:INT, Process.pid)
pr.run
end
it 'calls stop(true) SIGQUIT' do
expect(pr).to receive(:stop).with(true).and_call_original
Process.kill(:QUIT, Process.pid)
pr.run
end
it 'calls stop(false) SIGUSR1' do
expect(pr).to receive(:stop).with(false).and_call_original
Process.kill(:USR1, Process.pid)
pr.run
end
it 'calls stop(true) SIGHUP' do
expect(pr).to receive(:stop).with(true).and_call_original
Process.kill(:HUP, Process.pid)
pr.run
end
it 'calls stop(false) on SIGCONT' do
expect(pr).to receive(:stop).with(false).and_call_original
Process.kill(:CONT, Process.pid)
pr.run
end
it 'calls stop(true) on SIGWINCH' do
expect(pr).to receive(:stop).with(true).and_call_original
Process.kill(:WINCH, Process.pid)
pr.run
end
it 'calls logrotated on SIGUSR2' do
expect(pr).to receive(:logrotated){ pr.stop(true) }
Process.kill(:USR2, Process.pid)
pr.run
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/multiprocess/child_process_monitor_spec.rb | spec/multiprocess/child_process_monitor_spec.rb | require 'spec_helper'
describe PerfectQueue::Multiprocess::ChildProcessMonitor do
let (:rpipe){ double('rpipe') }
let (:last_heartbeat){ 42 }
let (:last_kill_time){ 42 }
let (:processor_id){ double('processor_id') }
let (:log){ double('log').as_null_object }
let (:cpm) {
cpm = Multiprocess::ChildProcessMonitor.new(log, processor_id, rpipe)
cpm.instance_variable_set(:@last_heartbeat, last_heartbeat)
cpm
}
let (:now){ 72 }
describe '.new' do
it 'returns a PerfectQueue::Multiprocess::ChildProcessMonitor' do
processor = Multiprocess::ChildProcessMonitor.new(log, processor_id, rpipe)
expect(processor).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
end
describe '#check_heartbeat' do
before do
allow(object_double('Time').as_stubbed_const).to \
receive_message_chain(:now, :to_i).and_return(now)
end
context 'rpipe returns value' do
before do
expect(rpipe).to receive(:read_nonblock)
end
it 'returns true' do
limit = double('limit')
expect(cpm.check_heartbeat(limit)).to be true
expect(cpm.instance_variable_get(:@last_heartbeat)).to eq(now)
end
end
context 'rpipe.read_nonblock raises EINTR' do
before do
expect(rpipe).to receive(:read_nonblock).and_raise(Errno::EINTR)
end
it 'returns false if last_heartbeat is too old on interupt' do
expect(cpm.check_heartbeat(now-last_heartbeat-1)).to be false
expect(cpm.instance_variable_get(:@last_heartbeat)).to eq(last_heartbeat)
end
it 'returns true if last_heartbeat is enough new on interupt' do
expect(cpm.check_heartbeat(now-last_heartbeat)).to be true
expect(cpm.instance_variable_get(:@last_heartbeat)).to eq(last_heartbeat)
end
end
end
describe '#start_killing' do
before do
allow(object_double('Time').as_stubbed_const).to \
receive_message_chain(:now, :to_i).and_return(now)
end
context 'initial state' do
it 'calls kill_children immediately if immediate: true' do
expect(cpm).to receive(:kill_children).with(now, nil).exactly(:once)
cpm.start_killing(true)
expect(cpm.instance_variable_get(:@kill_immediate)).to eq(true)
expect(cpm.instance_variable_get(:@last_kill_time)).to eq(now)
expect(cpm.instance_variable_get(:@kill_start_time)).to eq(now)
end
it 'sets @last_kill_time if immediate: true, delay!=0' do
delay = 3
expect(cpm).not_to receive(:kill_children)
cpm.start_killing(true, delay)
expect(cpm.instance_variable_get(:@kill_immediate)).to eq(true)
expect(cpm.instance_variable_get(:@last_kill_time)).to eq(now+delay)
expect(cpm.instance_variable_get(:@kill_start_time)).to eq(now+delay)
end
end
context 'already killed immediately' do
before do
cpm.instance_variable_set(:@kill_immediate, true)
cpm.instance_variable_set(:@last_kill_time, now)
cpm.instance_variable_set(:@kill_start_time, now)
end
it 'returns without do anything if immediate: true' do
expect(cpm).not_to receive(:kill_children)
cpm.start_killing(true)
end
it 'returns without do anything if immediate: false' do
expect(cpm).not_to receive(:kill_children)
cpm.start_killing(false)
end
end
context 'already started killing' do
before do
cpm.instance_variable_set(:@kill_start_time, double)
end
it 'return with do nothing if immediate: false' do
cpm.start_killing(false, double)
end
end
end
describe '#killing_status' do
context '@kill_start_time: nil' do
before { cpm.instance_variable_set(:@kill_start_time, nil) }
it 'returns nil' do
expect(cpm.killing_status).to be_nil
end
end
context '@kill_start_time: <time>' do
before { cpm.instance_variable_set(:@kill_start_time, double) }
context '@kill_immediate: true' do
before { cpm.instance_variable_set(:@kill_immediate, true) }
it 'returns nil' do
expect(cpm.killing_status).to be true
end
end
context '@kill_immediate: false' do
before { cpm.instance_variable_set(:@kill_immediate, false) }
it 'returns nil' do
expect(cpm.killing_status).to be false
end
end
end
end
describe '#try_join' do
context 'not killed yet' do
it 'returns nil' do
expect(cpm).not_to receive(:kill_children)
expect(cpm.try_join(double, double)).to be_nil
end
end
context 'killing' do
let (:cProcess) do
allow(Process).to receive(:waitpid).with(processor_id, Process::WNOHANG)
end
before do
cpm.instance_variable_set(:@kill_start_time, double)
end
context 'waitpid returns pid' do
before do
cProcess.and_return(processor_id)
expect(cpm).not_to receive(:kill_children)
end
it 'returns true' do
expect(cpm.try_join(double, double)).to be true
end
end
context 'waitpid raises ECHILD' do
before do
cProcess.and_raise(Errno::ECHILD)
expect(cpm).not_to receive(:kill_children)
end
it 'returns true' do
expect(cpm.try_join(double, double)).to be true
end
end
context 'waitpid returns nil' do
before do
cProcess.and_return(nil)
allow(object_double('Time').as_stubbed_const).to \
receive_message_chain(:now, :to_i).and_return(now)
cpm.instance_variable_set(:@last_kill_time, last_kill_time)
end
it 'returns true if last_kill_time is new' do
graceful_kill_limit = double('graceful_kill_limit')
expect(cpm).to receive(:kill_children).with(now, graceful_kill_limit).exactly(:once)
expect(cpm.try_join(30, graceful_kill_limit)).to be false
expect(cpm.instance_variable_get(:@last_kill_time)).to eq(now)
end
it 'returns false if last_kill_time is old' do
expect(cpm).not_to receive(:kill_children)
expect(cpm.try_join(31, double)).to be false
end
end
end
end
describe '#cleanup' do
context 'rpipe is open' do
it 'closes rpipe' do
allow(rpipe).to receive(:closed?).and_return(false)
expect(rpipe).to receive(:close).exactly(:once)
cpm.cleanup
end
end
context 'rpipe is closed' do
it 'doesn\'t close rpipe' do
allow(rpipe).to receive(:closed?).and_return(true)
expect(rpipe).not_to receive(:close)
cpm.cleanup
end
end
end
describe '#send_signal' do
let (:sig){ double('sig') }
let (:cProcess) do
allow(Process).to receive(:kill).with(sig, processor_id)
end
context 'kill returnes pid' do
before do
cProcess.and_return(processor_id)
end
it { cpm.send_signal(sig) }
end
context 'kill raises ESRCH' do
before{ cProcess.and_raise(Errno::ESRCH) }
it { cpm.send_signal(sig) }
end
context 'kill raises EPERM' do
before{ cProcess.and_raise(Errno::EPERM) }
it { cpm.send_signal(sig) }
end
end
describe '#kill_children' do
context '@kill_start_time: nil' do
# don't happen
end
context '@kill_start_time: <time>' do
before do
cpm.instance_variable_set(:@kill_start_time, 42)
end
context '@kill_immediate: true' do
before do
cpm.instance_variable_set(:@kill_immediate, true)
expect(cpm).to receive(:get_ppid_pids_map).with(no_args).and_return({1=>processor_id}).exactly(:once)
expect(cpm).to receive(:collect_child_pids).with({1=>processor_id}, [processor_id], processor_id) \
.and_return([processor_id]).exactly(:once)
expect(cpm).to receive(:kill_process).with(processor_id, true)
end
it 'calls kill_process immediately' do
cpm.__send__(:kill_children, now, double)
end
end
context '@kill_immediate: false' do
before do
cpm.instance_variable_set(:@kill_immediate, false)
end
it 'calls kill_process immediately' do
expect(cpm).to receive(:get_ppid_pids_map).with(no_args).and_return({1=>processor_id}).exactly(:once)
expect(cpm).to receive(:collect_child_pids).with({1=>processor_id}, [processor_id], processor_id) \
.and_return([processor_id]).exactly(:once)
expect(cpm).to receive(:kill_process).with(processor_id, true)
cpm.__send__(:kill_children, now, 29)
end
it 'calls kill_process' do
expect(cpm).not_to receive(:get_ppid_pids_map)
expect(cpm).not_to receive(:collect_child_pids)
expect(cpm).to receive(:kill_process).with(processor_id, false)
cpm.__send__(:kill_children, now, 30)
end
end
end
end
describe '#get_ppid_pids_map' do
before do
expect(cpm).to receive(:`).with('ps axo pid,ppid') \
.and_return <<eom
PID PPID
1 0
2 1
3 1
4 2
5 3
eom
end
it 'returns a tree of hash' do
expect(cpm.__send__(:get_ppid_pids_map)).to eq({0=>[1], 1=>[2, 3], 2=>[4], 3=>[5]})
end
end
describe '#collect_child_pids' do
it 'returns a flat array of given children' do
ppid_pids = {0=>[1], 1=>[2, 3], 2=>[4], 3=>[5]}
parent_pid = 1
results = cpm.__send__(:collect_child_pids, ppid_pids, [parent_pid], parent_pid)
expect(results).to eq([1, 2, 4, 3, 5])
end
end
describe '#kill_process' do
let (:pid){ double('pid') }
it 'kill(:KILL, pid) for immediate:true' do
expect(Process).to receive(:kill).with(:KILL, pid).and_return(pid).exactly(:once)
expect(cpm.__send__(:kill_process, pid, true)).to eq(pid)
end
it 'kill(:TERM, pid) for immediate:false' do
expect(Process).to receive(:kill).with(:TERM, pid).and_return(pid).exactly(:once)
expect(cpm.__send__(:kill_process, pid, false)).to eq(pid)
end
it 'rescues ESRCH' do
expect(Process).to receive(:kill).with(:KILL, pid).and_raise(Errno::ESRCH).exactly(:once)
expect(cpm.__send__(:kill_process, pid, true)).to be_nil
end
it 'rescues EPERM' do
expect(Process).to receive(:kill).with(:KILL, pid).and_raise(Errno::EPERM).exactly(:once)
expect(cpm.__send__(:kill_process, pid, true)).to be_nil
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/spec/multiprocess/fork_processor_spec.rb | spec/multiprocess/fork_processor_spec.rb | require 'spec_helper'
describe PerfectQueue::Multiprocess::ForkProcessor do
describe '.new' do
it 'returns a PerfectQueue::Multiprocess::ForkProcessor' do
runner = double('runner')
processor_id = double('processor_id')
config = {}
processor = Multiprocess::ForkProcessor.new(runner, processor_id, config)
expect(processor).to be_an_instance_of(Multiprocess::ForkProcessor)
expect(processor.instance_variable_get(:@processor_id)).to eq(processor_id)
end
end
describe '#restart' do
let (:config_keys){[
:child_heartbeat_limit,
:child_kill_interval,
:child_graceful_kill_limit,
:child_fork_frequency_limit,
:child_heartbeat_kill_delay,
]}
let (:config){ {logger: double('logger').as_null_object} }
let (:processor) {
runner = double('runner')
processor_id = double('processor_id')
Multiprocess::ForkProcessor.new(runner, processor_id, config)
}
it 'sets config' do
config_keys.each do |key|
config[key] = double(key)
end
processor.restart(true, config)
config_keys.each do |key|
expect(processor.instance_variable_get("@#{key}".to_sym)).to eq(config[key])
end
expect(processor.instance_variable_get(:@config)).to eq(config)
end
it 'calls ChildProcessMonitor#start_killing if it has ChildProcessMonitor' do
immediate = double('immediate')
cpm = double('ChildProcessMonitor')
expect(cpm).to receive(:start_killing).with(immediate).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
processor.restart(immediate, config)
end
end
describe '#stop' do
let (:processor) {
runner = double('runner')
processor_id = double('processor_id')
config = {logger: double('logger').as_null_object}
Multiprocess::ForkProcessor.new(runner, processor_id, config)
}
it 'calls ChildProcessMonitor#start_killing if it has ChildProcessMonitor' do
immediate = double('immediate')
cpm = double('ChildProcessMonitor')
expect(cpm).to receive(:start_killing).with(immediate).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
processor.stop(immediate)
expect(processor.instance_variable_get(:@stop)).to be true
end
end
describe '#keepalive' do
let (:processor) do
config = {logger: double('logger').as_null_object}
Multiprocess::ForkProcessor.new(double('runner'), double('processor_id'), config)
end
it 'tries join on stopping without cpm' do
processor.stop(true)
processor.keepalive
end
it 'tries join on stopping with cpm' do
processor.stop(true)
cpm = double('ChildProcessMonitor', try_join: false)
processor.instance_variable_set(:@cpm, cpm)
processor.keepalive
end
it 'calls fork_child if it doesn\'t have ChildProcessMonitor' do
expect(processor.keepalive).to be_nil
expect(processor.instance_variable_get(:@cpm)).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
it 'rascues fork_child\'s error if it doesn\'t have ChildProcessMonitor' do
allow(processor).to receive(:fork_child).and_raise(RuntimeError)
expect(processor.keepalive).to be_nil
end
it 'tries join if it has killed ChildProcessMonitor' do
cpm = double('ChildProcessMonitor', killing_status: true, try_join: true, cleanup: nil)
processor.instance_variable_set(:@cpm, cpm)
expect(processor.keepalive).to be_nil
expect(processor.instance_variable_get(:@cpm)).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
it 'recues EOFError of ChildProcessMonitor#check_heartbeat' do
cpm = double('ChildProcessMonitor', killing_status: false, try_join: true, cleanup: nil, pid: 42)
allow(cpm).to receive(:check_heartbeat).and_raise(EOFError)
immediate = double('immediate')
expect(cpm).to receive(:start_killing).with(true, processor.instance_variable_get(:@child_heartbeat_kill_delay)).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
expect(processor.keepalive).to be_nil
expect(processor.instance_variable_get(:@cpm)).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
it 'recues an error of ChildProcessMonitor#check_heartbeat' do
cpm = double('ChildProcessMonitor', killing_status: false, try_join: true, cleanup: nil, pid: 42)
allow(cpm).to receive(:check_heartbeat).and_raise(RuntimeError)
immediate = double('immediate')
expect(cpm).to receive(:start_killing).with(true, processor.instance_variable_get(:@child_heartbeat_kill_delay)).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
expect(processor.keepalive).to be_nil
expect(processor.instance_variable_get(:@cpm)).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
it 'calls ChildProcessMonitor#start_killing if it is dead' do
cpm = double('ChildProcessMonitor', killing_status: false, check_heartbeat: false, try_join: true, cleanup: nil, pid: 42)
immediate = double('immediate')
expect(cpm).to receive(:start_killing).with(true).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
expect(processor.keepalive).to be_nil
expect(processor.instance_variable_get(:@cpm)).to be_an_instance_of(Multiprocess::ChildProcessMonitor)
end
end
describe '#join' do
let (:processor) {
config = {logger: double('logger').as_null_object, child_kill_interval: 0.1}
Multiprocess::ForkProcessor.new(double('runner'), double('processor_id'), config)
}
it 'calls ChildProcessMonitor#start_killing if it has ChildProcessMonitor' do
immediate = double('immediate')
cpm = double('ChildProcessMonitor', cleanup: nil)
allow(cpm).to receive(:try_join).and_return(false, true)
processor.instance_variable_set(:@cpm, cpm)
processor.join
end
end
describe '#logrotated' do
let (:processor) {
config = {logger: double('logger').as_null_object}
Multiprocess::ForkProcessor.new(double('runner'), double('processor_id'), config)
}
it 'calls ChildProcessMonitor#start_killing if it has ChildProcessMonitor' do
immediate = double('immediate')
cpm = double('ChildProcessMonitor')
allow(cpm).to receive(:send_signal).with(:CONT).exactly(:once)
processor.instance_variable_set(:@cpm, cpm)
processor.logrotated
end
end
describe '#fork_child' do
it 'calls ChildProcessMonitor#start_killing if it has ChildProcessMonitor' do
config = {logger: double('logger').as_null_object}
processor = Multiprocess::ForkProcessor.new(double('runner'), double('processor_id'), config)
processor.instance_variable_set(:@last_fork_time, Float::MAX)
expect(processor.__send__(:fork_child)).to be_nil
end
it 'runs child process' do
runner = double('runner')
processor_id = double('processor_id')
expect(runner).to receive(:after_fork).exactly(:once)
expect(runner).to receive(:after_child_end).exactly(:once)
config = {logger: double('logger').as_null_object}
processor = Multiprocess::ForkProcessor.new(runner, processor_id, config)
expect(processor).to receive(:fork).and_yield
e = Exception.new
allow(processor).to receive(:exit!).and_raise(e)
expect{processor.__send__(:fork_child)}.to raise_error(e)
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue.rb | lib/perfectqueue.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'json'
require 'thread' # Mutex, CoditionVariable
require 'zlib'
require 'stringio'
require 'sequel'
require 'logger'
require 'fcntl'
require_relative 'perfectqueue/application'
require_relative 'perfectqueue/backend'
require_relative 'perfectqueue/backend/rdb_compat'
require_relative 'perfectqueue/blocking_flag'
require_relative 'perfectqueue/client'
require_relative 'perfectqueue/daemons_logger'
require_relative 'perfectqueue/engine'
require_relative 'perfectqueue/model'
require_relative 'perfectqueue/queue'
require_relative 'perfectqueue/runner'
require_relative 'perfectqueue/task_monitor'
require_relative 'perfectqueue/task_metadata'
require_relative 'perfectqueue/task_status'
require_relative 'perfectqueue/task'
require_relative 'perfectqueue/worker'
require_relative 'perfectqueue/supervisor'
require_relative 'perfectqueue/signal_thread'
require_relative 'perfectqueue/version'
require_relative 'perfectqueue/multiprocess/thread_processor'
require_relative 'perfectqueue/multiprocess/child_process'
require_relative 'perfectqueue/multiprocess/child_process_monitor'
require_relative 'perfectqueue/multiprocess/fork_processor'
require_relative 'perfectqueue/error'
module PerfectQueue
def self.open(config, &block)
c = Client.new(config)
begin
q = Queue.new(c)
if block
block.call(q)
else
c = nil
return q
end
ensure
c.close if c
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/supervisor.rb | lib/perfectqueue/supervisor.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Supervisor
def self.run(runner, config=nil, &block)
new(runner, config, &block).run
end
def initialize(runner, config=nil, &block)
# initial logger
STDERR.sync = true
@log = DaemonsLogger.new(STDERR)
@runner = runner
block = Proc.new { config } if config
@config_load_proc = block
end
attr_reader :engine
def run
@log.info "PerfectQueue #{VERSION}"
install_signal_handlers do
config = load_config
@engine = Engine.new(@runner, config)
listen_debug_server(config)
begin
@engine.run
ensure
@engine.shutdown(true)
end
end
return nil
rescue
@log.error "#{$!.class}: #{$!}"
$!.backtrace.each {|x| @log.warn "\t#{x}" }
return nil
end
def stop(immediate)
@log.info immediate ? "Received immediate stop" : "Received graceful stop"
begin
@engine.stop(immediate) if @engine
rescue
@log.error "failed to stop: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
return false
end
return true
end
def restart(immediate)
@log.info immediate ? "Received immediate restart" : "Received graceful restart"
begin
@engine.restart(immediate, load_config)
rescue
@log.error "failed to restart: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
return false
end
return true
end
def replace(immediate, command=[$0]+ARGV)
@log.info immediate ? "Received immediate binary replace" : "Received graceful binary replace"
begin
@engine.replace(immediate, command)
rescue
@log.error "failed to replace: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
return false
end
return true
end
def logrotated
@log.info "reopen a log file"
@engine.logrotated
@log.reopen!
return true
end
private
def load_config
raw_config = @config_load_proc.call
config = {}
raw_config.each_pair {|k,v| config[k.to_sym] = v }
old_log = @log
log = DaemonsLogger.new(config[:log] || STDERR)
old_log.close if old_log
@log = log
config[:logger] = log
return config
end
def listen_debug_server(config)
address = config[:debug].to_s
return if address.empty?
require 'drb'
if address.include?('/')
# unix
require 'drb/unix'
uri = "drbunix:#{address}"
if File.exist?(address)
File.unlink(address) rescue nil
end
else
# tcp
a, b = address.split(':',2)
if b
uri = "druby://#{a}:#{b}"
else
uri = "druby://0.0.0.0:#{a}"
end
end
@debug_server = DRb::DRbServer.new(uri, self)
end
def install_signal_handlers(&block)
s = self
st = SignalThread.new do |st|
st.trap :TERM do
s.stop(false)
end
st.trap :INT do
s.stop(false)
end
st.trap :QUIT do
s.stop(true)
end
st.trap :USR1 do
s.restart(false)
end
st.trap :HUP do
s.restart(true)
end
st.trap :USR2 do
s.logrotated
end
trap :CHLD, "SIG_IGN"
end
begin
block.call
ensure
st.stop
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/task.rb | lib/perfectqueue/task.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Task
include Model
def initialize(client, key)
super(client)
@key = key
end
attr_reader :key
def force_finish!(options={})
@client.force_finish(@key, options)
end
def metadata(options={})
@client.get_task_metadata(@key, options)
end
def exists?(options={})
metadata(options)
true
rescue NotFoundError
false
end
def preempt(options={})
@client.preempt(@key, options)
end
def inspect
"#<#{self.class} @key=#{@key.inspect}>"
end
end
class TaskWithMetadata < Task
def initialize(client, key, attributes)
super(client, key)
@compression = attributes.delete(:compression)
@attributes = attributes
end
def inspect
"#<#{self.class} @key=#{@key.inspect} @attributes=#{@attributes.inspect}>"
end
include TaskMetadataAccessors
end
class AcquiredTask < TaskWithMetadata
def initialize(client, key, attributes, task_token)
super(client, key, attributes)
@task_token = task_token
end
def heartbeat!(options={})
@client.heartbeat(@task_token, options)
end
def finish!(options={})
@client.finish(@task_token, options)
end
def release!(options={})
@client.release(@task_token, options)
end
def retry!(options={})
@client.retry(@task_token, options)
end
def update_data!(hash)
data = @attributes[:data] || {}
merged = data.merge(hash)
heartbeat!(data: merged, compression: compression)
@attributes[:data] = merged
end
#def to_json
# [@key, @task_token, @attributes].to_json
#end
#def self.from_json(data, client)
# key, task_token, attributes = JSON.load(data)
# new(client, key, attributes, task_token)
#end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/version.rb | lib/perfectqueue/version.rb | module PerfectQueue
VERSION = "0.10.1"
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/application.rb | lib/perfectqueue/application.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Application
{
:Dispatch => 'application/dispatch',
:Router => 'application/router',
:RouterDSL => 'application/router',
:Decider => 'application/decider',
:DefaultDecider => 'application/decider',
:UndefinedDecisionError => 'application/decider',
:Base => 'application/base',
}.each_pair {|k,v|
autoload k, File.expand_path(v, File.dirname(__FILE__))
}
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/daemons_logger.rb | lib/perfectqueue/daemons_logger.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class DaemonsLogger < Logger
def initialize(dev, shift_age=0, shift_size=1048576)
@stdout_hook = false
@stderr_hook = false
if dev.is_a?(String)
@path = dev
@io = File.open(@path, File::WRONLY|File::APPEND|File::CREAT)
else
@io = dev
end
super(@io, shift_size, shift_size)
end
def hook_stdout!
return nil if @io == STDOUT
STDOUT.reopen(@io)
@stdout_hook = true
self
end
def hook_stderr!
STDERR.reopen(@io)
@stderr_hook = true
self
end
def reopen!
if @path
@io.reopen(@path)
if @stdout_hook
STDOUT.reopen(@io)
end
if @stderr_hook
STDERR.reopen(@io)
end
end
nil
end
def reopen
begin
reopen!
return true
rescue
# TODO log?
return false
end
end
def close
if @path
@io.close unless @io.closed?
end
nil
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/task_status.rb | lib/perfectqueue/task_status.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module TaskStatus
WAITING = :waiting
RUNNING = :running
FINISHED = :finished
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/queue.rb | lib/perfectqueue/queue.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Queue
include Model
def initialize(client)
super(client)
end
def [](key)
Task.new(@client, key)
end
def each(options={}, &block)
@client.list(options, &block)
end
include Enumerable
def poll(options={})
options = options.merge({:max_acquire=>1})
if acquired = poll_multi(options)
return acquired[0]
end
return nil
end
def poll_multi(options={})
@client.acquire(options)
end
def submit(key, type, data, options={})
@client.submit(key, type, data, options)
end
def close
client.close
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/task_metadata.rb | lib/perfectqueue/task_metadata.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module TaskMetadataAccessors
attr_reader :attributes
attr_reader :compression
def type
@attributes[:type]
end
def data
@attributes[:data]
end
def status
@attributes[:status]
end
def message
@attributes[:message]
end
def user
@attributes[:user]
end
def created_at
if t = @attributes[:created_at]
return Time.at(t)
else
return nil
end
end
def timeout
if t = @attributes[:timeout]
return Time.at(t)
else
return nil
end
end
def finished?
status == TaskStatus::FINISHED
end
def waiting?
status == TaskStatus::WAITING
end
def running?
status == TaskStatus::RUNNING
end
end
class TaskMetadata
include Model
def initialize(client, key, attributes)
super(client)
@key = key
@compression = attributes.delete(:compression)
@attributes = attributes
end
def task
Task.new(@client, @key)
end
def inspect
"#<#{self.class} @key=#{@key.inspect} @attributes=#{@attributes.inspect}>"
end
include TaskMetadataAccessors
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/backend.rb | lib/perfectqueue/backend.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Backend
def self.new_backend(client, config)
raise ConfigError, "'type' must be 'rdb_compat'" if config[:type] != 'rdb_compat'
RDBCompatBackend.new(client, config)
end
end
module BackendHelper
def initialize(client, config)
@client = client
@config = config
end
attr_reader :client
def close
# do nothing by default
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/runner.rb | lib/perfectqueue/runner.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Runner
def initialize(task)
@task = task
end
attr_accessor :task
def queue
Queue.new(task.client)
end
#def run
#end
def kill(reason)
# do nothing
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/task_monitor.rb | lib/perfectqueue/task_monitor.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class TaskMonitor
def initialize(config, child_heartbeat=nil, force_stop=nil)
@config = config
@log = config[:logger]
@child_heartbeat = child_heartbeat || Proc.new {}
@force_stop = force_stop || Proc.new {}
@child_heartbeat_interval = (@config[:child_heartbeat_interval] || 2).to_i
@task_heartbeat_interval = (@config[:task_heartbeat_interval] || 2).to_i
@last_child_heartbeat = Time.now.to_i
@last_task_heartbeat = Time.now.to_i
@task = nil
@mutex = Monitor.new # support recursive lock
@cond = @mutex.new_cond
@finished = false
end
def start
@thread = Thread.new(&method(:run))
end
def stop
@finished = true
@mutex.synchronize {
@cond.broadcast
}
end
def join
@thread.join
end
def set_task(task, runner)
task.extend(TaskMonitorHook)
task.log = @log
task.task_monitor = self
task.runner = runner
@mutex.synchronize {
@task = task
@last_task_heartbeat = Time.now.to_i
}
end
def stop_task(immediate)
if immediate
kill_task ImmediateProcessStopError.new('immediate stop requested')
else
kill_task GracefulProcessStopError.new('graceful stop requested')
end
end
def kill_task(reason)
@mutex.synchronize {
if task = @task
begin
task.runner.kill(reason) # may recursive lock
rescue
@log.error "failed to kill task: #{$!.class}: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
raise # force exit
end
end
}
end
# callback
def task_finished(task, &block)
@mutex.synchronize {
ret = block.call if block # TODO is this ought to be synchronized?
if task == @task
@task = nil
end
ret
}
end
# callback
def external_task_heartbeat(task, &block)
@mutex.synchronize {
if task == @task
ret = block.call if block
@last_task_heartbeat = Time.now.to_i
end
ret
}
end
def run
@mutex.synchronize {
now = Time.now.to_i
until @finished
next_child_heartbeat = @last_child_heartbeat + @child_heartbeat_interval
if @task
next_task_heartbeat = @last_task_heartbeat + @task_heartbeat_interval
next_time = [next_child_heartbeat, next_task_heartbeat].min
else
next_task_heartbeat = nil
next_time = next_child_heartbeat
end
next_wait = next_time - now
@cond.wait(next_wait) if next_wait > 0
now = Time.now.to_i
if @task && next_task_heartbeat && next_task_heartbeat <= now
task_heartbeat
@last_task_heartbeat = now
end
if next_child_heartbeat <= now
@child_heartbeat.call # will recursive lock
@last_child_heartbeat = now
end
end
}
rescue
@log.error "Unknown error #{$!.class}: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
@force_stop.call
end
private
def task_heartbeat
@task.heartbeat!
rescue
# finished, preempted, etc.
kill_task($!)
end
end
module TaskMonitorHook
attr_accessor :log
attr_accessor :task_monitor
attr_accessor :runner
def finish!(*args, &block)
@log.info "finished task=#{self.key}" if @log
@task_monitor.task_finished(self) {
super(*args, &block)
}
end
def release!(*args, &block)
@log.info "release task=#{self.key}" if @log
@task_monitor.task_finished(self) {
super(*args, &block)
}
end
def retry!(*args, &block)
@log.info "retry task=#{self.key}" if @log
@task_monitor.task_finished(self) {
super(*args, &block)
}
end
def update_data!(hash)
@log.info "update data #{hash.inspect} task=#{self.key}" if @log
@task_monitor.external_task_heartbeat(self) {
super(hash)
}
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/worker.rb | lib/perfectqueue/worker.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Worker
def self.run(runner, config=nil, &block)
new(runner, config, &block).run
end
def initialize(runner, config=nil, &block)
block = Proc.new { config } if config
config = block.call
@config = config
@runner = runner
@detach_wait = config[:detach_wait] || config['detach_wait'] || 10.0
@sv = Supervisor.new(runner, &block)
@detach = false
@finish_flag = BlockingFlag.new
end
def run
@pid = fork do
$0 = "perfectqueue-supervisor:#{@runner}"
@sv.run
exit! 0
end
install_signal_handlers
begin
until @finish_flag.set?
pid, status = Process.waitpid2(@pid, Process::WNOHANG)
break if pid
@finish_flag.wait(1)
end
return if pid
if @detach
wait_time = Time.now + @detach_wait
while (w = wait_time - Time.now) > 0
sleep [0.5, w].min
pid, status = Process.waitpid2(@pid, Process::WNOHANG)
break if pid
end
else
# child process finished unexpectedly
end
rescue Errno::ECHILD
end
end
def stop(immediate)
send_signal(immediate ? :QUIT : :TERM)
end
def restart(immediate)
send_signal(immediate ? :HUP : :USR1)
end
def logrotated
send_signal(:USR2)
end
def detach
send_signal(:INT)
@detach = true
@finish_flag.set!
end
private
def send_signal(sig)
begin
Process.kill(sig, @pid)
rescue Errno::ESRCH, Errno::EPERM
end
end
def install_signal_handlers
s = self
SignalThread.new do |st|
st.trap :TERM do
s.stop(false)
end
# override
st.trap :INT do
s.detach
end
st.trap :QUIT do
s.stop(true)
end
st.trap :USR1 do
s.restart(false)
end
st.trap :HUP do
s.restart(true)
end
st.trap :USR2 do
s.logrotated
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/client.rb | lib/perfectqueue/client.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Client
def initialize(config)
@config = {}
config.each_pair {|k,v| @config[k.to_sym] = v }
@backend = Backend.new_backend(self, @config)
@retention_time = @config[:retention_time] || 300
@alive_time = @config[:alive_time] || 300
@retry_wait = @config[:retry_wait] || 300 # TODO retry wait algorithm
end
attr_reader :backend
attr_reader :config
def init_database(options={})
@backend.init_database(options)
end
def get_task_metadata(key, options={})
@backend.get_task_metadata(key, options)
end
# :message => nil
# :alive_time => @alive_time
def preempt(key, options={})
alive_time = options[:alive_time] || @alive_time
@backend.preempt(key, alive_time, options)
end
def list(options={}, &block)
@backend.list(options, &block)
end
# :run_at => Time.now
# :message => nil
# :user => nil
# :priority => nil
def submit(key, type, data, options={})
@backend.submit(key, type, data, options)
end
# :max_acquire => nil
# :alive_time => nil
def acquire(options={})
alive_time = options[:alive_time] || @alive_time
max_acquire = options[:max_acquire] || 1
@backend.acquire(alive_time, max_acquire, options)
end
def force_finish(key, options={})
retention_time = options[:retention_time] || @retention_time
@backend.force_finish(key, retention_time, options)
end
# :message => nil
# :retention_time => default_retention_time
def finish(task_token, options={})
retention_time = options[:retention_time] || @retention_time
@backend.finish(task_token, retention_time, options)
end
# :message => nil
# :alive_time => nil
def heartbeat(task_token, options={})
alive_time = options[:alive_time] || @alive_time
@backend.heartbeat(task_token, alive_time, options)
end
def release(task_token, options={})
alive_time = options[:alive_time] || 0
@backend.release(task_token, alive_time, options)
end
def retry(task_token, options={})
alive_time = options[:retry_wait] || @retry_wait
@backend.release(task_token, alive_time, options)
end
def close
@backend.close
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/blocking_flag.rb | lib/perfectqueue/blocking_flag.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class BlockingFlag
def initialize
@set = false
@mutex = Mutex.new
@cond = ConditionVariable.new
end
def set!
toggled = false
@mutex.synchronize do
unless @set
@set = true
toggled = true
end
@cond.broadcast
end
return toggled
end
def reset!
toggled = false
@mutex.synchronize do
if @set
@set = false
toggled = true
end
@cond.broadcast
end
return toggled
end
def set?
@set
end
def set_region(&block)
set!
begin
block.call
ensure
reset!
end
end
def reset_region(&block)
reset!
begin
block.call
ensure
set!
end
end
def wait(timeout=nil)
@mutex.synchronize do
@cond.wait(@mutex, timeout)
end
self
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/engine.rb | lib/perfectqueue/engine.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class Engine
def initialize(runner, config)
@runner = runner
@finish_flag = BlockingFlag.new
processor_type = config[:processor_type] || :process
case processor_type.to_sym
when :process
@processor_class = Multiprocess::ForkProcessor
when :thread
@processor_class = Multiprocess::ThreadProcessor
else
raise ConfigError, "Unknown processor_type: #{config[:processor_type].inspect}"
end
@processors = []
restart(false, config)
end
attr_reader :processors
def restart(immediate, config)
return nil if @finish_flag.set?
# TODO connection check
@log = config[:logger] || Logger.new(STDERR)
num_processors = config[:processors] || 1
# scaling
extra = num_processors - @processors.length
if extra > 0
extra.times do
@processors << @processor_class.new(@runner, @processors.size+1, config)
end
elsif extra < 0
(-extra).times do
c = @processors.shift
c.stop(immediate)
c.join
end
extra = 0
end
@processors[0..(-extra-1)].each {|c|
c.restart(immediate, config)
}
@child_keepalive_interval = (config[:child_keepalive_interval] || config[:child_heartbeat_interval] || 2).to_i
self
end
def run
@processors.each {|c|
c.keepalive
# add wait time before starting processors to avoid
# a spike of the number of concurrent connections.
sleep rand*2 # upto 2 second, average 1 seoncd
}
until @finish_flag.set?
@processors.each {|c| c.keepalive }
@finish_flag.wait(@child_keepalive_interval)
end
join
end
def stop(immediate)
@processors.each {|c| c.stop(immediate) }
@finish_flag.set!
self
end
def join
@processors.each {|c| c.join }
self
end
def shutdown(immediate)
stop(immediate)
join
end
def replace(immediate, command=[$0]+ARGV)
return if @replaced_pid
stop(immediate)
@replaced_pid = Process.spawn(*command)
self
end
def logrotated
@processors.each {|c| c.logrotated }
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/signal_thread.rb | lib/perfectqueue/signal_thread.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class SignalThread < Thread
def initialize(&block)
@handlers = {}
@mutex = Mutex.new
@cond = ConditionVariable.new
@queue = []
@finished = false
block.call(self) if block
super(&method(:main))
end
def trap(sig, command=nil, &block)
# normalize signal names
sig = sig.to_s.upcase
if sig[0,3] == "SIG"
sig = sig[3..-1]
end
sig = sig.to_sym
old = @handlers[sig]
if block
Kernel.trap(sig) { signal_handler_main(sig) }
@handlers[sig] = block
else
Kernel.trap(sig, command)
@handlers.delete(sig)
end
old
end
def handlers
@handlers.dup
end
def stop
@mutex.synchronize do
@finished = true
@cond.broadcast
end
self
end
private
def signal_handler_main(sig)
# here always creates new thread to avoid
# complicated race condition in signal handlers
Thread.new do
begin
enqueue(sig)
rescue => e
STDERR.print "#{e}\n"
e.backtrace.each do |bt|
STDERR.print "\t#{bt}\n"
STDERR.flush
end
end
end
end
def main
until @finished
sig = nil
@mutex.synchronize do
while true
return if @finished
sig = @queue.shift
break if sig
@cond.wait(@mutex, 1)
end
end
begin
@handlers[sig].call(sig)
rescue => e
STDERR.print "#{e}\n"
e.backtrace.each do |bt|
STDERR.print "\t#{bt}\n"
STDERR.flush
end
end
end
nil
ensure
@finished = false
end
def enqueue(sig)
@mutex.synchronize do
@queue << sig
@cond.broadcast
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/model.rb | lib/perfectqueue/model.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Model
def initialize(client)
@client = client
end
attr_reader :client
def config
@client.config
end
## TODO
#def inspect
# "<#{self.class}>"
#end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/error.rb | lib/perfectqueue/error.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
class TaskError < StandardError
end
class CancelRequestedError < TaskError
end
class AlreadyFinishedError < TaskError
end
class NotFoundError < TaskError
end
class AlreadyExistsError < TaskError
end
class PreemptedError < TaskError
end
class NotSupportedError < TaskError
end
class ConfigError < RuntimeError
end
class ProcessStopError < RuntimeError
end
class ImmediateProcessStopError < ProcessStopError
end
class GracefulProcessStopError < ProcessStopError
end
# Applications can ignore these errors to achieve idempotency
module IdempotentError
end
class IdempotentAlreadyFinishedError < AlreadyFinishedError
include IdempotentError
end
class IdempotentAlreadyExistsError < AlreadyExistsError
include IdempotentError
end
class IdempotentNotFoundError < NotFoundError
include IdempotentError
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/command/perfectqueue.rb | lib/perfectqueue/command/perfectqueue.rb | require 'optparse'
require 'perfectqueue/version'
op = OptionParser.new
op.banner += %[ <command>
commands:
list Show list of tasks
submit <key> <type> <data> Submit a new task
force_finish <key> Force finish a task
run <class> Run a worker process
init Initialize a backend database
debug <address> Connect to debug interface of a worker
]
op.version = PerfectQueue::VERSION
env = ENV['RAILS_ENV'] || 'development'
config_path = 'config/perfectqueue.yml'
include_dirs = []
require_files = []
debug_listen = nil
task_options = {
}
op.separator("options:")
op.on('-e', '--environment ENV', 'Framework environment (default: development)') {|s|
env = s
}
op.on('-c', '--config PATH.yml', 'Path to a configuration file (default: config/perfectqueue.yml)') {|s|
config_path = s
}
op.separator("\noptions for submit:")
op.on('-u', '--user USER', 'Set user') {|s|
task_options[:user] = s
}
op.on('-t', '--time UNIXTIME', 'Set time to run the task', Integer) {|i|
task_options[:run_at] = i
}
op.separator("\noptions for run:")
op.on('-I', '--include PATH', 'Add $LOAD_PATH directory') {|s|
include_dirs << s
}
op.on('-r', '--require PATH', 'Require files before starting') {|s|
require_files << s
}
(class<<self;self;end).module_eval do
define_method(:usage) do |msg|
puts op.to_s
puts "\nerror: #{msg}" if msg
exit 1
end
end
begin
op.parse!(ARGV)
usage nil if ARGV.empty?
cmd = ARGV.shift
case cmd
when 'list'
cmd = :list
usage nil unless ARGV.length == 0
when 'force_finish' ,'finish'
cmd = :finish
usage nil unless ARGV.length == 1
key = ARGV[0]
when 'submit'
cmd = :submit
usage nil unless ARGV.length == 3
key, type, data = *ARGV
require 'json'
data = JSON.load(data)
when 'run'
cmd = :run
usage nil unless ARGV.length == 1
klass = ARGV[0]
when 'init'
cmd = :init
usage nil unless ARGV.length == 0
when 'debug'
cmd = :debug
usage nil unless ARGV.length == 1
debug_address = ARGV[0]
else
raise "unknown command: '#{cmd}'"
end
rescue
usage $!.to_s
end
require 'yaml'
require 'perfectqueue'
config_load_proc = Proc.new {
yaml = YAML.load(File.read(config_path))
conf = yaml[env]
unless conf
raise "Configuration file #{config_path} doesn't include configuration for environment '#{env}'"
end
conf
}
case cmd
when :list
n = 0
PerfectQueue.open(config_load_proc.call) {|queue|
format = "%30s %15s %18s %18s %28s %28s %s"
puts format % ["key", "type", "user", "status", "created_at", "timeout", "data"]
queue.each {|task|
puts format % [task.key, task.type, task.user, task.status, task.created_at, task.timeout, task.data]
n += 1
}
}
puts "#{n} entries."
when :finish
PerfectQueue.open(config_load_proc.call) {|queue|
queue[key].force_finish!
}
when :submit
PerfectQueue.open(config_load_proc.call) {|queue|
queue.submit(key, type, data, task_options)
}
when :run
include_dirs.each {|path|
$LOAD_PATH << File.expand_path(path)
}
require_files.each {|file|
require file
}
klass = Object.const_get(klass)
PerfectQueue::Worker.run(klass, &config_load_proc)
when :init
PerfectQueue.open(config_load_proc.call) {|queue|
queue.client.init_database
}
when :debug
require 'irb'
require 'drb'
if debug_address.include?('/')
# unix
require 'drb/unix'
uri = "drbunix:#{debug_address}"
else
# tcp
uri = "druby://#{debug_address}"
end
puts "Connecting to #{uri}"
remote_supervisor = DRb::DRbObject.new_with_uri(uri)
Supervisor = remote_supervisor
Engine = remote_supervisor.engine
puts "Engine is initialized as a remote engine instance."
ARGV.clear
IRB.start
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/application/decider.rb | lib/perfectqueue/application/decider.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Application
class UndefinedDecisionError < StandardError
end
class Decider
def initialize(base)
@base = base
end
def queue
@base.queue
end
def task
@base.task
end
def decide!(type, opts={})
begin
m = method(type)
rescue NameError
raise UndefinedDecisionError, "Undefined decision #{type} options=#{opts.inspect}"
end
m.call(opts)
end
end
class DefaultDecider < Decider
# no decisions defined
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/application/dispatch.rb | lib/perfectqueue/application/dispatch.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Application
class Dispatch < Runner
# Runner interface
def initialize(task)
base = self.class.router.route(task.type)
unless base
task.retry!
raise "Unknown task type #{task.type.inspect}" # TODO error class
end
@runner = base.new(task)
super
end
attr_reader :runner
def run
@runner.run
end
def kill(reason)
@runner.kill(reason)
end
# DSL interface
extend RouterDSL
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/application/router.rb | lib/perfectqueue/application/router.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Application
module RouterDSL
def route(options)
patterns = options.keys.select {|k| !k.is_a?(Symbol) }
klasses = patterns.map {|k| options.delete(k) }
patterns.zip(klasses).each {|pattern,sym|
add_route(pattern, sym, options)
}
nil
end
def add_route(pattern, klass, options)
router.add(pattern, klass, options)
end
def router=(router)
(class<<self;self;end).instance_eval do
self.__send__(:define_method, :router) { router }
end
router
end
def router
self.router = Router.new
end
end
class Router
def initialize
@patterns = []
@cache = {}
end
def add(pattern, sym, options={})
case pattern
when Regexp
# ok
when String, Symbol
pattern = /\A#{Regexp.escape(pattern)}\z/
else
raise ArgumentError, "pattern should be String or Regexp but got #{pattern.class}: #{pattern.inspect}"
end
@patterns << [pattern, sym]
end
def route(type)
if @cache.has_key?(type)
return @cache[type]
end
@patterns.each {|(pattern,sym)|
if pattern.match(type)
base = resolve_application_base(sym)
return @cache[type] = base
end
}
return @cache[type] = nil
end
attr_reader :patterns
private
def resolve_application_base(sym)
case sym
when Symbol
self.class.const_get(sym)
else
sym
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/application/base.rb | lib/perfectqueue/application/base.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Application
class Base < Runner
def self.decider
DefaultDecider
end
def self.decider=(decider_klass)
(class<<self;self;end).instance_eval do
self.__send__(:define_method, :decider) { decider_klass }
end
decider_klass
end
def initialize(task)
super
@decider = self.class.decider.new(self)
end
attr_reader :decider
def run
begin
return unless before_perform
begin
perform
ensure
after_perform
end
rescue
decide! :unexpected_error_raised, :error=>$!
end
end
def before_perform
true
end
#def perform
#end
def after_perform
end
def decide!(type, option={})
@decider.decide!(type, option)
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/backend/rdb.rb | lib/perfectqueue/backend/rdb.rb | require 'sequel'
require 'uri'
require_relative 'rdb_compat'
module PerfectQueue::Backend
class RDBBackend
MAX_RETRY = ::PerfectQueue::Backend::RDBCompatBackend::MAX_RETRY
DELETE_OFFSET = ::PerfectQueue::Backend::RDBCompatBackend::DELETE_OFFSET
class Token < Struct.new(:key)
end
def initialize(uri, table, config={})
@uri = uri
@table = table
u = URI.parse(@uri)
options = {
max_connections: 1,
user: u.user,
password: u.password,
host: u.host,
port: u.port ? u.port.to_i : 3306
}
@pq_connect_timeout = config.fetch(:pq_connect_timeout, 20)
options[:connect_timeout] = config.fetch(:connect_timeout, 3)
options[:sslca] = config[:sslca] if config[:sslca]
options[:ssl_mode] = config[:ssl_mode] if config[:ssl_mode]
db_name = u.path.split('/')[1]
@db = Sequel.mysql2(db_name, options)
@mutex = Mutex.new
connect {
# connection test
}
end
attr_reader :db
def submit(id, data, time=Process.clock_gettime(Process::CLOCK_REALTIME, :second), resource=nil, max_running=nil)
connect {
begin
data = Sequel::SQL::Blob.new(data)
@db.sql_log_level = :debug
n = @db["INSERT INTO `#{@table}` (id, timeout, data, created_at, resource, max_running) VALUES (?, ?, ?, ?, ?, ?);", id, time, data, time, resource, max_running].insert
return true
rescue Sequel::UniqueConstraintViolation => e
return nil
end
}
end
def cancel(id, delete_timeout=3600, now=Process.clock_gettime(Process::CLOCK_REALTIME, :second))
connect {
n = @db["UPDATE `#{@table}` SET timeout=?, created_at=NULL, resource=NULL WHERE id=? AND created_at IS NOT NULL;", now+delete_timeout-DELETE_OFFSET, id].update
return n > 0
}
end
private
def connect
tmax = Process.clock_gettime(Process::CLOCK_REALTIME, :second) + @pq_connect_timeout
@mutex.synchronize do
retry_count = 0
begin
yield
rescue Sequel::DatabaseConnectionError
if (retry_count += 1) < MAX_RETRY && tmax > Process.clock_gettime(Process::CLOCK_REALTIME, :second)
STDERR.puts "#{$!}\n retrying."
sleep 2
retry
end
STDERR.puts "#{$!}\n abort."
raise
rescue
# workaround for "Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction" error
if $!.to_s.include?('try restarting transaction')
err = $!.backtrace.map{|bt| " #{bt}" }.unshift($!).join("\n")
retry_count += 1
if retry_count < MAX_RETRY
STDERR.puts "#{err}\n retrying."
sleep 0.5
retry
end
STDERR.puts "#{err}\n abort."
end
raise
ensure
@db.disconnect
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/backend/rdb_compat.rb | lib/perfectqueue/backend/rdb_compat.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Backend
class RDBCompatBackend
include BackendHelper
#
# == timeout model
#
# 0 ---- now-1Bs ---- retention ---|----- now -- alive ------- FUTURE
# ~~~~~~~^ to be deleted ^ |~~~^~~~ ^ running or in-queue
# DELETE 13_0000_0000->| to be acquired
#
# NOTE: this architecture introduces Year 2042 problem.
#
DELETE_OFFSET = 10_0000_0000
EVENT_HORIZON = 13_0000_0000 # 2011-03-13 07:06:40 UTC
LOCK_RETRY_INITIAL_INTERVAL = 0.5
LOCK_RETRY_MAX_INTERVAL = 30
class Token < Struct.new(:key)
end
def initialize(client, config)
super
@pq_connect_timeout = config.fetch(:pq_connect_timeout, 20)
url = config[:url]
@table = config[:table]
unless @table
raise ConfigError, ":table option is required"
end
if /\Amysql2:/i =~ url
options = {max_connections: 1, sslca: config[:sslca]}
options[:connect_timeout] = config.fetch(:connect_timeout, 3)
@db = Sequel.connect(url, options)
if config.fetch(:use_connection_pooling, nil) != nil
@use_connection_pooling = !!config[:use_connection_pooling]
else
@use_connection_pooling = !!config[:sslca]
end
@table_lock = lambda {
locked = nil
interval = LOCK_RETRY_INITIAL_INTERVAL
loop do
@db.fetch("SELECT GET_LOCK('#{@table}', #{LOCK_WAIT_TIMEOUT}) locked") do |row|
locked = true if row[:locked] == 1
end
break if locked
sleep interval
interval = [interval * 2, LOCK_RETRY_MAX_INTERVAL].min
end
}
@table_unlock = lambda {
@db.run("DO RELEASE_LOCK('#{@table}')")
}
else
raise ConfigError, "only 'mysql' is supported"
end
@last_time = Time.now.to_i
@mutex = Mutex.new
connect {
# connection test
}
@disable_resource_limit = config[:disable_resource_limit]
@cleanup_interval = config[:cleanup_interval] || DEFAULT_DELETE_INTERVAL
# If cleanup_interval > max_request_per_child / max_acquire,
# some processes won't run DELETE query.
# (it's not an issue when there are enough workers)
@cleanup_interval_count = @cleanup_interval > 0 ? rand(@cleanup_interval) : 0
end
attr_reader :db
KEEPALIVE = 10
MAX_RETRY = 10
LOCK_WAIT_TIMEOUT = 10
DEFAULT_DELETE_INTERVAL = 20
def init_database(options)
sql = []
sql << "DROP TABLE IF EXISTS `#{@table}`" if options[:force]
sql << <<-SQL
CREATE TABLE IF NOT EXISTS `#{@table}` (
id VARCHAR(255) NOT NULL,
timeout INT NOT NULL,
data LONGBLOB NOT NULL,
created_at INT,
resource VARCHAR(255),
max_running INT,
/* CONNECTION_ID() can be 64bit: https://bugs.mysql.com/bug.php?id=19806 */
owner BIGINT(21) UNSIGNED NOT NULL DEFAULT 0,
PRIMARY KEY (id)
)
SQL
sql << "CREATE INDEX `index_#{@table}_on_timeout` ON `#{@table}` (`timeout`)"
connect {
sql.each(&@db.method(:run))
}
end
# => TaskStatus
def get_task_metadata(key, options)
now = (options[:now] || Time.now).to_i
connect {
row = @db.fetch("SELECT timeout, data, created_at, resource, max_running FROM `#{@table}` WHERE id=? LIMIT 1", key).first
unless row
raise NotFoundError, "task key=#{key} does no exist"
end
attributes = create_attributes(now, row)
return TaskMetadata.new(@client, key, attributes)
}
end
# => AcquiredTask
def preempt(key, alive_time, options)
raise NotSupportedError.new("preempt is not supported by rdb_compat backend")
end
# yield [TaskWithMetadata]
def list(options, &block)
now = (options[:now] || Time.now).to_i
connect {
@db.fetch("SELECT id, timeout, data, created_at, resource, max_running FROM `#{@table}` ORDER BY timeout ASC") {|row|
attributes = create_attributes(now, row)
task = TaskWithMetadata.new(@client, row[:id], attributes)
yield task
}
}
end
def compress_data(data, compression)
if compression == 'gzip'
io = StringIO.new
io.set_encoding(Encoding::ASCII_8BIT)
gz = Zlib::GzipWriter.new(io)
begin
gz.write(data)
ensure
gz.close
end
data = io.string
data = Sequel::SQL::Blob.new(data)
end
data
end
# => Task
def submit(key, type, data, options)
now = (options[:now] || Time.now).to_i
now = 1 if now < 1 # 0 means cancel requested
run_at = (options[:run_at] || now).to_i
user = options[:user]
user = user.to_s if user
max_running = options[:max_running]
data = data ? data.dup : {}
data['type'] = type
d = compress_data(data.to_json, options[:compression])
connect {
begin
@db[
"INSERT INTO `#{@table}` (id, timeout, data, created_at, resource, max_running) VALUES (?, ?, ?, ?, ?, ?)",
key, run_at, d, now, user, max_running
].insert
return Task.new(@client, key)
rescue Sequel::UniqueConstraintViolation
raise IdempotentAlreadyExistsError, "task key=#{key} already exists"
end
}
end
# => [AcquiredTask]
def acquire(alive_time, max_acquire, options)
now = (options[:now] || Time.now).to_i
next_timeout = now + alive_time
t0 = nil
if @cleanup_interval_count <= 0
connect {
t0=Process.clock_gettime(Process::CLOCK_MONOTONIC)
@db["DELETE FROM `#{@table}` WHERE timeout <= ?", now-DELETE_OFFSET].delete
@cleanup_interval_count = @cleanup_interval
STDERR.puts"PQ:delete from #{@table}:%6f sec" % [Process.clock_gettime(Process::CLOCK_MONOTONIC)-t0]
}
end
if @disable_resource_limit
return acquire_without_resource(next_timeout, now, max_acquire)
else
return acquire_with_resource(next_timeout, now, max_acquire)
end
end
def force_finish(key, retention_time, options)
finish(Token.new(key), retention_time, options)
end
# => nil
def finish(task_token, retention_time, options)
now = (options[:now] || Time.now).to_i
delete_timeout = now - DELETE_OFFSET + retention_time
key = task_token.key
connect {
n = @db["UPDATE `#{@table}` SET timeout=?, created_at=NULL, resource=NULL WHERE id=? AND created_at IS NOT NULL", delete_timeout, key].update
if n <= 0
raise IdempotentAlreadyFinishedError, "task key=#{key} does not exist or already finished."
end
}
nil
end
# => nil
def heartbeat(task_token, alive_time, options)
now = (options[:now] || Time.now).to_i
next_timeout = now + alive_time
key = task_token.key
data = options[:data]
sql = "UPDATE `#{@table}` SET timeout=?"
params = [sql, next_timeout]
if data
sql << ", data=?"
params << compress_data(data.to_json, options[:compression])
end
sql << " WHERE id=? AND created_at IS NOT NULL"
params << key
connect {
n = @db[*params].update
if n <= 0
row = @db.fetch("SELECT id, timeout, created_at FROM `#{@table}` WHERE id=? LIMIT 1", key).first
if row == nil
raise PreemptedError, "task key=#{key} does not exist or preempted."
elsif row[:created_at] == nil
raise PreemptedError, "task key=#{key} preempted."
else # row[:timeout] == next_timeout
# ok
end
end
}
nil
end
def release(task_token, alive_time, options)
heartbeat(task_token, alive_time, options)
end
protected
def connect_locked
connect {
locked = false
begin
if @table_lock
@table_lock.call
locked = true
end
return yield
ensure
if @use_connection_pooling && locked
@table_unlock.call
end
end
}
end
def connect
now = Time.now.to_i
tmax = now + @pq_connect_timeout
@mutex.synchronize do
# keepalive_timeout
@db.disconnect if now - @last_time > KEEPALIVE
count = 0
begin
yield
@last_time = now
rescue Sequel::DatabaseConnectionError
if (count += 1) < MAX_RETRY && tmax > Time.now.to_i
STDERR.puts "#{$!}\n retrying."
sleep 2
retry
end
STDERR.puts "#{$!}\n abort."
raise
rescue
# workaround for "Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction" error
if $!.to_s.include?('try restarting transaction')
err = ([$!] + $!.backtrace.map {|bt| " #{bt}" }).join("\n")
count += 1
if count < MAX_RETRY
STDERR.puts err + "\n retrying."
sleep rand
retry
else
STDERR.puts err + "\n abort."
end
else
err = $!
end
STDERR.puts "disconnects current connection: #{err}"
@db.disconnect
raise
ensure
# connection_pooling
@db.disconnect if !@use_connection_pooling
end
end
end
GZIP_MAGIC_BYTES = [0x1f, 0x8b].pack('CC')
def create_attributes(now, row)
compression = nil
if row[:created_at] === nil
created_at = nil # unknown creation time
status = TaskStatus::FINISHED
elsif now && row[:timeout] < now
created_at = row[:created_at]
status = TaskStatus::WAITING
else
created_at = row[:created_at]
status = TaskStatus::RUNNING
end
d = row[:data]
if d == nil || d == ''
data = {}
else
# automatic gzip decompression
d.force_encoding('ASCII-8BIT') if d.respond_to?(:force_encoding)
if d[0, 2] == GZIP_MAGIC_BYTES
compression = 'gzip'
gz = Zlib::GzipReader.new(StringIO.new(d))
begin
d = gz.read
ensure
gz.close
end
end
begin
data = JSON.parse(d)
rescue
data = {}
end
end
type = data.delete('type')
if type == nil || type.empty?
type = row[:id].split(/\./, 2)[0]
end
{
:status => status,
:created_at => created_at,
:data => data,
:type => type,
:user => row[:resource],
:timeout => row[:timeout],
:max_running => row[:max_running],
:message => nil, # not supported
:node => nil, # not supported
:compression => compression,
}
end
def acquire_without_resource(next_timeout, now, max_acquire)
# MySQL's CONNECTION_ID() is a 64bit unsigned integer from the
# server's internal thread ID counter. It is unique while the MySQL
# server is running.
# https://bugs.mysql.com/bug.php?id=19806
#
# An acquired task is marked with next_timeout and CONNECTION_ID().
# Therefore while alive_time is not changed and we don't restart
# the server in 1 second, they won't conflict.
update_sql = <<SQL
UPDATE `#{@table}`
JOIN (
SELECT id
FROM `#{@table}` FORCE INDEX (`index_#{@table}_on_timeout`)
WHERE #{EVENT_HORIZON} < timeout AND timeout <= :now
ORDER BY timeout ASC
LIMIT :max_acquire) AS t1 USING(id)
SET timeout=:next_timeout, owner=CONNECTION_ID()
SQL
select_sql = <<SQL
SELECT id, timeout, data, created_at, resource
FROM `#{@table}`
WHERE timeout = ? AND owner = CONNECTION_ID()
SQL
t0 = 0
connect_locked do
t0 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
n = @db[update_sql, next_timeout: next_timeout, now: now, max_acquire: max_acquire].update
@table_unlock.call
STDERR.puts "PQ:acquire from #{@table}:%6f sec (%d tasks)" % [Process.clock_gettime(Process::CLOCK_MONOTONIC)-t0,n]
return nil if n <= 0
tasks = []
@db.fetch(select_sql, next_timeout) do |row|
attributes = create_attributes(nil, row)
task_token = Token.new(row[:id])
task = AcquiredTask.new(@client, row[:id], attributes, task_token)
tasks.push task
end
@cleanup_interval_count -= 1
return tasks
end
end
def acquire_with_resource(next_timeout, now, max_acquire)
t0 = nil
tasks = nil
sql = <<SQL
SELECT id, timeout, data, created_at, resource, max_running, IFNULL(max_running, 1) / (IFNULL(running, 0) + 1) AS weight
FROM `#{@table}`
LEFT JOIN (
SELECT resource AS res, COUNT(1) AS running
FROM `#{@table}` AS T
WHERE timeout > ? AND created_at IS NOT NULL AND resource IS NOT NULL
GROUP BY resource
) AS R ON resource = res
WHERE #{EVENT_HORIZON} < timeout AND timeout <= ?
AND created_at IS NOT NULL
AND (max_running-running IS NULL OR max_running-running > 0)
ORDER BY weight DESC, timeout ASC
LIMIT ?
SQL
connect_locked do
t0 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
tasks = []
@db.fetch(sql, now, now, max_acquire) do |row|
attributes = create_attributes(nil, row)
task_token = Token.new(row[:id])
task = AcquiredTask.new(@client, row[:id], attributes, task_token)
tasks.push task
end
return nil if tasks.empty?
sql = "UPDATE `#{@table}` FORCE INDEX (PRIMARY) SET timeout=? WHERE timeout <= ? AND id IN ("
params = [sql, next_timeout, now]
params.concat tasks.map(&:key)
sql << '?,' * tasks.size
sql.chop!
sql << ") AND created_at IS NOT NULL"
n = @db[*params].update
if n != tasks.size
# preempted
return nil
end
end
@cleanup_interval_count -= 1
return tasks
ensure
STDERR.puts "PQ:acquire from #{@table}:%6f sec (%d tasks)" % \
[Process.clock_gettime(Process::CLOCK_MONOTONIC)-t0, tasks.size] if tasks
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/backend/null.rb | lib/perfectqueue/backend/null.rb | module PerfectQueue::Backend
class NullBackend
def list(&block)
nil
end
def acquire(timeout, now=Time.now.to_i)
nil
end
def finish(token, delete_timeout=3600, now=Time.now.to_i)
true
end
def update(token, timeout)
nil
end
def cancel(id, delete_timeout=3600, now=Time.now.to_i)
true
end
def submit(id, data, time=Time.now.to_i, resource=nil, max_running=nil)
true
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/multiprocess/fork_processor.rb | lib/perfectqueue/multiprocess/fork_processor.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Multiprocess
class ForkProcessor
def initialize(runner, processor_id, config)
@runner = runner
@processor_id = processor_id
@stop = false
@cpm = nil
@last_fork_time = 0
restart(false, config)
end
def restart(immediate, config)
@child_heartbeat_limit = config[:child_heartbeat_limit] || 60.0
@child_kill_interval = config[:child_kill_interval] || 2.0
@child_graceful_kill_limit = config[:child_graceful_kill_limit] || nil
@child_fork_frequency_limit = config[:child_fork_frequency_limit] || 5.0
@child_heartbeat_kill_delay = config[:child_heartbeat_kill_delay] || 10
@log = config[:logger]
@config = config # for child process
if c = @cpm
c.start_killing(immediate)
end
end
def stop(immediate)
@stop = true
if c = @cpm
c.start_killing(immediate)
end
self
end
def keepalive
if @stop
try_join
return
end
if c = @cpm
if c.killing_status != true
# don't check status if killing status is immediate-killing
begin
# receive heartbeat
keptalive = c.check_heartbeat(@child_heartbeat_limit)
if !keptalive
@log.error "Heartbeat broke out. Restarting child process id=#{@processor_id} pid=#{c.pid}."
c.start_killing(true)
end
rescue EOFError
@log.error "Heartbeat pipe is closed. Restarting child process id=#{@processor_id} pid=#{c.pid}."
c.start_killing(true, @child_heartbeat_kill_delay)
rescue
@log.error "Unknown error: #{$!.class}: #{$!}: Restarting child process id=#{@processor_id} pid=#{c.pid}."
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
c.start_killing(true, @child_heartbeat_kill_delay)
end
end
try_join
end
unless @cpm
begin
@cpm = fork_child
rescue
@log.error "Failed to fork child process id=#{@processor_id}: #{$!.class}: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
end
end
nil
end
def join
while !try_join
sleep (@child_kill_interval+1) / 2 # TODO
end
self
end
def logrotated
if c = @cpm
c.send_signal(:CONT)
end
end
private
def try_join
unless @cpm
return true
end
if @cpm.try_join(@child_kill_interval, @child_graceful_kill_limit)
@cpm.cleanup
@cpm = nil
return true
else
return false
end
end
INTER_FORK_LOCK = Mutex.new
def fork_child
now = Time.now.to_f
if now - @last_fork_time < @child_fork_frequency_limit
@log.info "Tried to fork child #{now-@last_fork_time} seconds ago < #{@child_fork_frequency_limit}. Waiting... id=#{@processor_id}"
return nil
end
@last_fork_time = now
# set process name
@runner.before_fork if @runner.respond_to?(:before_fork) # TODO exception handling
INTER_FORK_LOCK.lock
begin
rpipe, wpipe = IO.pipe
rpipe.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
wpipe.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
ensure
INTER_FORK_LOCK.unlock
end
pid = fork do
#STDIN.close
# pass-through STDOUT
# pass-through STDERR
rpipe.close
$0 = "perfectqueue:#{@runner} #{@processor_id}"
@runner.after_fork if @runner.respond_to?(:after_fork)
begin
ChildProcess.run(@runner, @processor_id, @config, wpipe)
ensure
@runner.after_child_end if @runner.respond_to?(:after_child_end) # TODO exception handling
end
exit! 0
end
@log.info "Worker process started. pid=#{pid}"
wpipe.close
ChildProcessMonitor.new(@log, pid, rpipe)
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/multiprocess/thread_processor.rb | lib/perfectqueue/multiprocess/thread_processor.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Multiprocess
class ThreadProcessor
def initialize(runner, processor_id, config)
@runner = runner
@processor_id = processor_id
@running_flag = BlockingFlag.new
@finish_flag = BlockingFlag.new
@tm = TaskMonitor.new(config, method(:child_heartbeat), method(:force_stop))
restart(false, config)
end
def run
@tm.start
@running_flag.set_region do
run_loop
end
@tm.join
ensure
@thread = nil
end
def join
while t = @thread
t.join
end
end
def keepalive
unless @thread
@thread = Thread.new(&method(:run))
end
end
def restart(immediate, config)
@poll_interval = config[:poll_interval] || 1.0
@log = config[:logger]
@task_prefetch = config[:task_prefetch] || 0
@config = config
@tm.stop_task(immediate)
@finish_flag.set_region do
@running_flag.wait while @running_flag.set?
end
end
def stop(immediate)
@log.info immediate ? "Stopping thread immediately id=#{@processor_id}" : "Stopping thread gracefully id=#{@processor_id}"
@tm.stop_task(immediate)
@finish_flag.set!
end
def force_stop
@log.error "Force stopping processor processor_id=#{@processor_id}"
@tm.stop_task(true)
@finish_flag.set!
end
def logrotated
# do nothing
end
private
def child_heartbeat
# do nothing
end
def run_loop
PerfectQueue.open(@config) {|queue|
until @finish_flag.set?
tasks = queue.poll_multi(:max_acquire=>1+@task_prefetch)
if tasks == nil || tasks.empty?
@finish_flag.wait(@poll_interval)
else
begin
while task = tasks.shift
process(task)
end
ensure
# TODO do not call release! because rdb_compat backend
# doesn't have a mechanism to detect preemption.
# release! could cause a problem that multiple
# workers run one task concurrently.
#tasks.each {|task|
# # ignoring errors doesn't cause serious problems
# # because it's same as failure of this server.
# task.release! rescue nil
#}
end
end
end
}
rescue
@log.error "Unknown error #{$!.class}: #{$!}: Exiting thread id=#{@processor_id}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
ensure
@tm.stop
end
def process(task)
@log.info "acquired task task=#{task.key} id=#{@processor_id}: #{task.inspect}"
begin
r = @runner.new(task)
@tm.set_task(task, r)
begin
r.run
ensure
@tm.task_finished(task)
end
@log.info "completed processing task=#{task.key} id=#{@processor_id}:"
rescue
@log.error "unexpectedly failed task=#{task.key} id=#{@processor_id}: #{$!.class}: #{$!}"
$!.backtrace.each {|bt| @log.warn "\t#{bt}" }
raise # force exit
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/multiprocess/child_process_monitor.rb | lib/perfectqueue/multiprocess/child_process_monitor.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Multiprocess
class ChildProcessMonitor
def initialize(log, pid, rpipe)
@log = log
@pid = pid
@rpipe = rpipe
@last_heartbeat = Time.now.to_i
@kill_start_time = nil
@last_kill_time = nil
@kill_immediate = false
@rbuf = ''
end
attr_reader :pid
def check_heartbeat(limit)
@rpipe.read_nonblock(1024, @rbuf)
@last_heartbeat = Time.now.to_i
return true
rescue Errno::EINTR, Errno::EAGAIN
return Time.now.to_i - @last_heartbeat <= limit
end
def start_killing(immediate, delay=0)
if immediate && !@kill_immediate
@kill_immediate = true # escalation
elsif @kill_start_time
return
end
now = Time.now.to_i
if delay == 0
@last_kill_time = @kill_start_time = now
kill_children(now, nil)
else
@last_kill_time = @kill_start_time = now + delay
end
end
def killing_status
if @kill_start_time
if @kill_immediate
return true
else
return false
end
else
return nil
end
end
def try_join(kill_interval, graceful_kill_limit)
return nil unless @kill_start_time
begin
if Process.waitpid(@pid, Process::WNOHANG)
@log.info "Processor exited and joined pid=#{@pid}"
return true
end
rescue Errno::ECHILD
# SIGCHLD is trapped in Supervisor#install_signal_handlers
@log.info "Processor exited pid=#{@pid}"
return true
end
# resend signal
now = Time.now.to_i
if @last_kill_time + kill_interval <= now
kill_children(now, graceful_kill_limit)
@last_kill_time = now
end
return false
end
def cleanup
@rpipe.close unless @rpipe.closed?
end
def send_signal(sig)
begin
Process.kill(sig, @pid)
rescue Errno::ESRCH, Errno::EPERM
# TODO log?
end
end
private
def kill_children(now, graceful_kill_limit)
immediate = @kill_immediate || (graceful_kill_limit && @kill_start_time + graceful_kill_limit < now)
if immediate
pids = collect_child_pids(get_ppid_pids_map, [@pid], @pid)
pids.reverse_each {|pid|
kill_process(pid, true)
}
else
kill_process(@pid, false)
end
end
def get_ppid_pids_map
ppid_pids = {} # {ppid => [pid]}
`ps axo pid,ppid`.each_line do |line|
if m = /^\s*(\d+)\s+(\d+)\s*$/.match(line)
(ppid_pids[m[2].to_i] ||= []) << m[1].to_i
end
end
return ppid_pids
# We can ignore errors but not necessary
#rescue
# return {}
end
def collect_child_pids(ppid_pids, results, parent_pid)
if pids = ppid_pids[parent_pid]
pids.each {|pid|
results << pid
collect_child_pids(ppid_pids, results, pid)
}
end
results
end
def kill_process(pid, immediate)
begin
if immediate
@log.debug "sending SIGKILL to pid=#{pid} for immediate stop"
Process.kill(:KILL, pid)
else
@log.debug "sending SIGTERM to pid=#{pid} for graceful stop"
Process.kill(:TERM, pid)
end
rescue Errno::ESRCH, Errno::EPERM
# TODO log?
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
treasure-data/perfectqueue | https://github.com/treasure-data/perfectqueue/blob/3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9/lib/perfectqueue/multiprocess/child_process.rb | lib/perfectqueue/multiprocess/child_process.rb | #
# PerfectQueue
#
# Copyright (C) 2012-2013 Sadayuki Furuhashi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module PerfectQueue
module Multiprocess
class ChildProcess < ThreadProcessor
def self.run(runner, processor_id, config, wpipe)
new(runner, processor_id, config, wpipe).run
end
def initialize(runner, processor_id, config, wpipe)
@wpipe = wpipe
@wpipe.sync = true
@request_per_child = 0
super(runner, processor_id, config)
@sig = install_signal_handlers
end
# override
def run
super
@sig.stop
end
# override
def stop(immediate)
@log.info "Exiting processor id=#{@processor_id} pid=#{Process.pid}"
super
end
# override
def join
# do nothing
end
# override
def keepalive
# do nothing
end
# override
def logrotated
@log.reopen!
end
# override
def child_heartbeat
@wpipe.write HEARTBEAT_PACKET
rescue
@log.error "Parent process unexpectedly died: #{$!}"
force_stop
end
# override
def force_stop
super
Process.kill(:KILL, Process.pid)
exit! 137
end
HEARTBEAT_PACKET = [0].pack('C')
# override
def restart(immediate, config)
@max_request_per_child = config[:max_request_per_child] || nil
super
end
# override
def process(task)
super
if @max_request_per_child
@request_per_child += 1
if @request_per_child > @max_request_per_child
stop(false)
end
end
end
private
def install_signal_handlers
s = self
SignalThread.new do |st|
st.trap :TERM do
s.stop(false)
end
st.trap :INT do
s.stop(false)
end
st.trap :QUIT do
s.stop(true)
end
st.trap :USR1 do
s.stop(false)
end
st.trap :HUP do
s.stop(true)
end
st.trap :CONT do
s.stop(false)
end
st.trap :WINCH do
s.stop(true)
end
st.trap :USR2 do
s.logrotated
end
trap :CHLD, "SIG_DFL"
end
end
end
end
end
| ruby | Apache-2.0 | 3ef2dd04786ce822ce4f2e66d54d17ecf98fe8c9 | 2026-01-04T17:55:40.813256Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/test/cookbooks/test/recipes/default.rb | test/cookbooks/test/recipes/default.rb | log 'Testing 1 2 3!'
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/spec/spec_helper.rb | spec/spec_helper.rb | if ENV['COVERAGE'] != 'false'
require 'simplecov'
require 'coveralls'
SimpleCov.formatter = SimpleCov::Formatter::MultiFormatter.new([
SimpleCov::Formatter::HTMLFormatter,
Coveralls::SimpleCov::Formatter
])
SimpleCov.start
# Normally classes are lazily loaded, so any class without a test
# is missing from the report. This ensures they show up so we can
# see uncovered methods.
require 'vagrant'
Dir['lib/**/*.rb'].each do|file|
require_string = file.match(/lib\/(.*)\.rb/)[1]
require require_string
end
end
require 'pry'
require 'rspec/its'
I18n.load_path << 'locales/en.yml'
I18n.reload!
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/spec/vagrant-linode/config_spec.rb | spec/vagrant-linode/config_spec.rb | require 'spec_helper'
require 'vagrant-linode/config'
describe VagrantPlugins::Linode::Config do
describe 'defaults' do
let(:vagrant_public_key) { Vagrant.source_root.join('keys/vagrant.pub') }
subject do
super().tap(&:finalize!)
end
its(:api_key) { should be_nil }
its(:api_url) { should be_nil }
its(:distribution) { should eq(/Ubuntu/) }
its(:datacenter) { should eq(/dallas/) }
its(:plan) { should eq(/2048/) }
its(:paymentterm) { should eq(/1/) }
its(:private_networking) { should eq(/Ubuntu/) }
its(:ca_path) { should eql(vagrant_public_key) }
its(:ssh_key_name) { should eq(/Vagrant/) }
its(:setup) { should eq(true) }
its(:xvda_size) { should eq(true) }
its(:swap_size) { should eq(256) }
end
describe 'overriding defaults' do
[:api_key,
:api_url,
:distribution,
:plan,
:paymentterm,
:private_networking,
:ca_path,
:ssh_key_name,
:setup,
:xvda_size,
:swap_size].each do |attribute|
it "should not default #{attribute} if overridden" do
subject.send("#{attribute}=".to_sym, 'foo')
subject.finalize!
subject.send(attribute).should == 'foo'
end
end
it 'should not default plan if overridden' do
plan = 'Linode 2048'
subject.send(:plan, plan)
subject.finalize!
subject.send(:plan).should include(plan)
end
end
describe 'validation' do
let(:machine) { double('machine') }
let(:validation_errors) { subject.validate(machine)['Linode Provider'] }
let(:error_message) { double('error message') }
before(:each) do
machine.stub_chain(:env, :root_path).and_return '/'
subject.api_key = 'bar'
end
subject do
super().tap(&:finalize!)
end
context 'with invalid key' do
it 'should raise an error' do
subject.nonsense1 = true
subject.nonsense2 = false
I18n.should_receive(:t).with('vagrant.config.common.bad_field',
fields: 'nonsense1, nonsense2')
.and_return error_message
validation_errors.first.should == error_message
end
end
context 'with good values' do
it 'should validate' do
validation_errors.should be_empty
end
end
context 'the API key' do
it 'should error if not given' do
subject.api_key = nil
I18n.should_receive(:t).with('vagrant_linode.config.api_key').and_return error_message
validation_errors.first.should == error_message
end
end
context 'the public key path' do
it "should have errors if the key doesn't exist" do
subject.public_key_path = 'missing'
I18n.should_receive(:t).with('vagrant_linode.config.public_key_not_found').and_return error_message
validation_errors.first.should == error_message
end
it 'should not have errors if the key exists with an absolute path' do
subject.public_key_path = File.expand_path 'locales/en.yml', Dir.pwd
validation_errors.should be_empty
end
it 'should not have errors if the key exists with a relative path' do
machine.stub_chain(:env, :root_path).and_return '.'
subject.public_key_path = 'locales/en.yml'
validation_errors.should be_empty
end
end
context 'the username' do
it 'should error if not given' do
subject.username = nil
I18n.should_receive(:t).with('vagrant_linode.config.username_required').and_return error_message
validation_errors.first.should == error_message
end
end
[:linode_compute_url, :linode_auth_url].each do |url|
context "the #{url}" do
it 'should not validate if the URL is invalid' do
subject.send "#{url}=", 'baz'
I18n.should_receive(:t).with('vagrant_linode.config.invalid_uri', key: url, uri: 'baz').and_return error_message
validation_errors.first.should == error_message
end
end
end
end
describe 'linode_auth_url' do
it 'should return UNSET_VALUE if linode_auth_url and linode_region are UNSET' do
subject.linode_auth_url.should == VagrantPlugins::Linode::Config::UNSET_VALUE
end
it 'should return UNSET_VALUE if linode_auth_url is UNSET and linode_region is :ord' do
subject.linode_region = :ord
subject.linode_auth_url.should == VagrantPlugins::Linode::Config::UNSET_VALUE
end
it 'should return UK Authentication endpoint if linode_auth_url is UNSET and linode_region is :lon' do
subject.linode_region = :lon
subject.linode_auth_url.should == Fog::Linode::UK_AUTH_ENDPOINT
end
it 'should return custom endpoint if supplied and linode_region is :lon' do
my_endpoint = 'http://custom-endpoint.com'
subject.linode_region = :lon
subject.linode_auth_url = my_endpoint
subject.linode_auth_url.should == my_endpoint
end
it 'should return custom endpoint if supplied and linode_region is UNSET' do
my_endpoint = 'http://custom-endpoint.com'
subject.linode_auth_url = my_endpoint
subject.linode_auth_url.should == my_endpoint
end
end
describe 'lon_region?' do
it 'should return false if linode_region is UNSET_VALUE' do
subject.linode_region = VagrantPlugins::Linode::Config::UNSET_VALUE
subject.send(:lon_region?).should be_false
end
it 'should return false if linode_region is nil' do
subject.linode_region = nil
subject.send(:lon_region?).should be_false
end
it 'should return false if linode_region is :ord' do
subject.linode_region = :ord
subject.send(:lon_region?).should be_false
end
it "should return true if linode_region is 'lon'" do
subject.linode_region = 'lon'
subject.send(:lon_region?).should be_true
end
it 'should return true if linode_Region is :lon' do
subject.linode_region = :lon
subject.send(:lon_region?).should be_true
end
end
describe 'network' do
it 'should remove SERVICE_NET_ID if :service_net is detached' do
subject.send(:network, :service_net, attached: false)
subject.send(:networks).should_not include(VagrantPlugins::Linode::Config::SERVICE_NET_ID)
end
it 'should not allow duplicate networks' do
net_id = 'deadbeef-0000-0000-0000-000000000000'
subject.send(:network, net_id)
subject.send(:network, net_id)
subject.send(:networks).count(net_id).should == 1
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/spec/vagrant-linode/services/volume_manager_spec.rb | spec/vagrant-linode/services/volume_manager_spec.rb | require "vagrant-linode/services/volume_manager"
describe VagrantPlugins::Linode::Services::VolumeManager do
subject { described_class.new(machine, client, logger) }
let(:provider_config) { double(:config, volumes: [{label: "testvolume", size: 3}]) }
let(:machine) { double(:machine, id: 123, name: "test", provider_config: provider_config) }
let(:logger) { double(:logger, info: nil) }
let(:remote_volumes) { [double(:volume, volumeid: 234, size: 3, label: "test_testvolume")] }
let(:client) { double(:api, list: remote_volumes) }
describe "#perform" do
context "when the volume label is not specified" do
let(:provider_config) { double(:config, volumes: [{size: 3}]) }
it "raises an error" do
expect { subject.perform }.to raise_error "You must specify a volume label."
end
end
context "when the remote volume does not exist" do
let(:remote_volumes) { [] }
it "creates the volume bound to the linode" do
expect(client).to receive(:create).with(label: "test_testvolume", size: 3, linodeid: 123)
subject.perform
end
context "when the size is not specified" do
let(:provider_config) { double(:config, volumes: [{label: "testvolume"}]) }
it "raises an error" do
expect { subject.perform }.to raise_error "For volumes that need to be created the size has to be specified."
end
end
end
context "when the remote volume exists" do
let(:remote_volumes) { [double(:volume, volumeid: 234, size: 3, label: "test_testvolume")] }
it "attaches the volume to the machine" do
expect(client).to receive(:update).with(volumeid: 234, linodeid: 123)
subject.perform
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/spec/vagrant-linode/actions/list_distributions_spec.rb | spec/vagrant-linode/actions/list_distributions_spec.rb | require 'spec_helper'
require 'vagrant-linode/actions/list_distributions'
describe VagrantPlugins::Linode::Actions::ListImages do
let(:app) { lambda { |_env| } }
let(:ui) { Vagrant::UI::Silent.new }
let(:distributions) do
Fog.mock!
Fog::Compute.new(provider: :linode,
linode_region: :dfw,
linode_api_key: 'anything',
linode_username: 'anything').distributions
end
let(:compute_connection) { double('fog connection') }
let(:env) do
{
linode_compute: compute_connection,
ui: ui
}
end
subject(:action) { described_class.new(app, env) }
before do
allow(compute_connection).to receive(:distributions).and_return distributions
end
it 'get distributions from Fog' do
expect(compute_connection).to receive(:distributions).and_return distributions
action.call(env)
end
it 'writes a sorted, formatted image table to Vagrant::UI' do
header_line = '%-36s %s' % ['Image ID', 'Image Name']
expect(ui).to receive(:info).with(header_line)
distributions.sort_by(&:name).each do |image|
formatted_line = '%-36s %s' % [image.id.to_s, image.name]
expect(ui).to receive(:info).with formatted_line
end
action.call(env)
end
it 'continues the middleware chain' do
expect(app).to receive(:call).with(env)
action.call(env)
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/spec/vagrant-linode/actions/list_plans_spec.rb | spec/vagrant-linode/actions/list_plans_spec.rb | require 'spec_helper'
require 'vagrant-linode/actions/list_plans'
describe VagrantPlugins::Linode::Actions::ListPlans do
let(:app) { lambda { |_env| } }
let(:ui) { Vagrant::UI::Silent.new }
let(:plans) do
Fog.mock!
Fog::Compute.new(provider: :linode,
linode_datacenter: :dallas,
linode_api_key: 'anything',
linode_username: 'anything').plans
end
let(:compute_connection) { double('fog connection') }
let(:env) do
{
linode_compute: compute_connection,
ui: ui
}
end
subject(:action) { described_class.new(app, env) }
before do
allow(compute_connection).to receive(:plans).and_return plans
end
it 'get plans from Fog' do
expect(compute_connection).to receive(:plans).and_return plans
action.call(env)
end
it 'writes a sorted, formatted plan table to Vagrant::UI' do
header_line = '%-36s %s' % ['Plan ID', 'Plan Name']
expect(ui).to receive(:info).with(header_line)
plans.sort_by(&:id).each do |plan|
formatted_line = '%-36s %s' % [plan.id, plan.name]
expect(ui).to receive(:info).with formatted_line
end
action.call(env)
end
it 'continues the middleware chain' do
expect(app).to receive(:call).with(env)
action.call(env)
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode.rb | lib/vagrant-linode.rb | require 'pathname'
require 'vagrant-linode/plugin'
module VagrantPlugins
module Linode
lib_path = Pathname.new(File.expand_path('../vagrant-linode', __FILE__))
autoload :Errors, lib_path.join('errors')
# This initializes the i18n load path so that the plugin-specific
# translations work.
def self.init_i18n
I18n.load_path << File.expand_path('locales/en.yml', source_root)
I18n.reload!
end
# This initializes the logging so that our logs are outputted at
# the same level as Vagrant core logs.
def self.init_logging
# Initialize logging
level = nil
begin
level = Log4r.const_get(ENV['VAGRANT_LOG'].upcase)
rescue NameError
# This means that the logging constant wasn't found,
# which is fine. We just keep `level` as `nil`. But
# we tell the user.
level = nil
end
# Some constants, such as "true" resolve to booleans, so the
# above error checking doesn't catch it. This will check to make
# sure that the log level is an integer, as Log4r requires.
level = nil unless level.is_a?(Integer)
# Set the logging level on all "vagrant" namespaced
# logs as long as we have a valid level.
if level
logger = Log4r::Logger.new('vagrant_linode')
logger.outputters = Log4r::Outputter.stderr
logger.level = level
logger = nil
end
end
# This returns the path to the source of this plugin.
#
# @return [Pathname]
def self.source_root
@source_root ||= Pathname.new(File.expand_path('../../', __FILE__))
end
def self.public_key(private_key_path)
File.read("#{private_key_path}.pub")
rescue
raise Errors::PublicKeyError, path: "#{private_key_path}.pub"
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/provider.rb | lib/vagrant-linode/provider.rb | require 'vagrant-linode/helpers/client'
require 'vagrant-linode/actions'
module VagrantPlugins
module Linode
class Provider < Vagrant.plugin('2', :provider)
def initialize(machine)
@machine = machine
end
# This class method caches status for all linodes within
# the Linode account. A specific linode's status
# may be refreshed by passing :refresh => true as an option.
def self.linode(machine, opts = {})
client = Helpers::ApiClient.new(machine).client
# @todo how do I reuse VagrantPlugins::Linode::Actions::ConnectLinode ?
# ..and nuke the helper
# client = env[:linode_api]
# load status of linodes if it has not been done before
unless @linodes
@linodes = client.linode.list.each { |l| l.network = client.linode.ip.list linodeid: l.linodeid }
end
if opts[:refresh] && machine.id
# refresh the linode status for the given machine
@linodes.delete_if { |d| d['linodeid'].to_s == machine.id }
linode = client.linode.list(linodeid: machine.id).first
linode.network = client.linode.ip.list linodeid: linode['linodeid']
@linodes << linode
elsif machine.id
# lookup linode status for the given machine
linode = @linodes.find { |d| d['linodeid'].to_s == machine.id }
end
# if lookup by id failed, check for a linode with a matching name
# and set the id to ensure vagrant stores locally
# TODO allow the user to configure this behavior
unless linode
name = machine.config.vm.hostname || machine.name
linode = @linodes.find { |d| d['label'] == name.to_s }
machine.id = linode['linodeid'].to_s if linode
end
linode ||= { status: :not_created }
end
# Attempt to get the action method from the Action class if it
# exists, otherwise return nil to show that we don't support the
# given action.
def action(name)
action_method = "action_#{name}"
return Actions.send(action_method) if Actions.respond_to?(action_method)
nil
end
# This method is called if the underying machine ID changes. Providers
# can use this method to load in new data for the actual backing
# machine or to realize that the machine is now gone (the ID can
# become `nil`). No parameters are given, since the underlying machine
# is simply the machine instance given to this object. And no
# return value is necessary.
def machine_id_changed
if @machine.id
Provider.linode(@machine, refresh: true)
end
end
# This should return a hash of information that explains how to
# SSH into the machine. If the machine is not at a point where
# SSH is even possible, then `nil` should be returned.
#
# The general structure of this returned hash should be the
# following:
#
# {
# :host => "1.2.3.4",
# :port => "22",
# :username => "mitchellh",
# :private_key_path => "/path/to/my/key"
# }
#
# **Note:** Vagrant only supports private key based authenticatonion,
# mainly for the reason that there is no easy way to exec into an
# `ssh` prompt with a password, whereas we can pass a private key
# via commandline.
def ssh_info
env = @machine.action('read_ssh_info')
env[:machine_ssh_info]
end
# This should return the state of the machine within this provider.
# The state must be an instance of {MachineState}. Please read the
# documentation of that class for more information.
def state
env = @machine.action('read_state')
state_id = env[:machine_state]
short = I18n.t("vagrant_linode.states.short_#{state_id}")
long = I18n.t("vagrant_linode.states.long_#{state_id}")
Vagrant::MachineState.new(state_id, short, long)
end
def to_s
id = @machine.id.nil? ? 'new' : @machine.id
"Linode (#{id})"
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/actions.rb | lib/vagrant-linode/actions.rb | require 'pathname'
require 'vagrant/action/builder'
module VagrantPlugins
module Linode
module Actions
include Vagrant::Action::Builtin
def self.action_destroy
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if !env[:result]
b.use MessageNotCreated
else
b.use Call, DestroyConfirm do |env2, b2|
if env2[:result]
b2.use ConnectLinode
b2.use Destroy
b2.use ProvisionerCleanup if defined?(ProvisionerCleanup)
end
end
end
end
end
end
# This action is called to read the SSH info of the machine. The
# resulting state is expected to be put into the `:machine_ssh_info`
# key.
def self.action_read_ssh_info
Vagrant::Action::Builder.new.tap do |b|
b.use ConfigValidate
b.use ConnectLinode
b.use ReadSSHInfo
end
end
def self.action_read_state
Vagrant::Action::Builder.new.tap do |b|
b.use ConfigValidate
b.use ConnectLinode
b.use ReadState
end
end
def self.action_ssh
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use MessageOff
else
b2.use SSHExec
end
end
else
b.use MessageNotCreated
end
end
end
end
def self.action_ssh_run
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use SSHRun
else
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use MessageOff
else
b2.use MessageNotCreated
end
end
end
end
end
end
def self.action_provision
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use MessageOff
else
b2.use Provision
b2.use ModifyProvisionPath
b2.use SyncedFolders
end
end
else
b.use MessageNotCreated
end
end
end
end
def self.action_up
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use Provision
b2.use SyncedFolders
b2.use MessageOff
b2.use ConnectLinode
b2.use PowerOn
else
b2.use MessageAlreadyActive
end
end
else
b.use Provision
b.use SyncedFolders
b.use MessageNotCreated
b.use ConnectLinode
b.use Create
b.use SetupSudo
b.use SetupUser
b.use SetupHostname
end
end
end
end
def self.action_halt
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b1|
if env[:result]
b1.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use MessageAlreadyOff
else
b2.use ConnectLinode
b2.use PowerOff
end
end
else
b1.use MessageNotCreated
end
end
end
end
def self.action_reload
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use MessageOff
else
b2.use ConnectLinode
b2.use Reload
b2.use Provision
end
end
else
b.use MessageNotCreated
end
end
end
end
def self.action_rebuild
Vagrant::Action::Builder.new.tap do |builder|
builder.use ConfigValidate
builder.use Call, IsCreated do |env, b|
if env[:result]
b.use Call, IsStopped do |env2, b2|
if env2[:result]
b2.use ConnectLinode
b2.use Rebuild
b2.use SetupSudo
b2.use SetupUser
b2.use SetupHostname
b2.use Provision
else
b2.use MessageNotOff
end
end
else
b2.use MessageNotCreated
end
end
end
end
# Extended actions
def self.action_create_image
Vagrant::Action::Builder.new.tap do |b|
b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use CreateImage
end
end
def self.action_list_images
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListImages
end
end
def self.action_list_servers
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListServers
end
end
def self.action_list_plans
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListPlans
end
end
def self.action_list_datacenters
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListDatacenters
end
end
def self.action_list_distributions
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListDistributions
end
end
def self.action_list_kernels
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListKernels
end
end
def self.action_list_volumes
Vagrant::Action::Builder.new.tap do |b|
# b.use ConfigValidate # is this per machine?
b.use ConnectLinode
b.use ListVolumes
end
end
action_root = Pathname.new(File.expand_path('../actions', __FILE__))
autoload :ConnectLinode, action_root.join('connect_linode')
autoload :ReadState, action_root.join('read_state')
autoload :Create, action_root.join('create')
autoload :IsCreated, action_root.join('is_created')
autoload :IsStopped, action_root.join('is_stopped')
autoload :MessageAlreadyActive, action_root.join('message_already_active')
autoload :MessageAlreadyOff, action_root.join('message_already_off')
autoload :MessageNotOff, action_root.join('message_not_off')
autoload :MessageNotCreated, action_root.join('message_not_created')
autoload :MessageOff, action_root.join('message_off')
autoload :ModifyProvisionPath, action_root.join('modify_provision_path')
autoload :PowerOff, action_root.join('power_off')
autoload :PowerOn, action_root.join('power_on')
autoload :Destroy, action_root.join('destroy')
autoload :Reload, action_root.join('reload')
autoload :Rebuild, action_root.join('rebuild')
autoload :SetupHostname, action_root.join('setup_hostname')
autoload :SetupUser, action_root.join('setup_user')
autoload :SetupSudo, action_root.join('setup_sudo')
autoload :ReadSSHInfo, action_root.join("read_ssh_info")
autoload :ListServers, action_root.join('list_servers')
autoload :CreateImage, action_root.join('create_image')
autoload :ListImages, action_root.join('list_images')
autoload :ListPlans, action_root.join('list_plans')
autoload :ListDistributions, action_root.join('list_distributions')
autoload :ListKernels, action_root.join('list_kernels')
autoload :ListDatacenters, action_root.join('list_datacenters')
autoload :ListVolumes, action_root.join('list_volumes')
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/version.rb | lib/vagrant-linode/version.rb | module VagrantPlugins
module Linode
VERSION = '0.4.1'
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/errors.rb | lib/vagrant-linode/errors.rb | module VagrantPlugins
module Linode
module Errors
class LinodeError < Vagrant::Errors::VagrantError
error_namespace('vagrant_linode.errors')
end
class APIStatusError < LinodeError
error_key(:api_status)
end
class DiskSize < LinodeError
error_key(:disk_size)
end
class DistroMatch < LinodeError
error_key(:distro_match)
end
class DatacenterMatch < LinodeError
error_key(:datacenter_match)
end
class ImageMatch < LinodeError
error_key(:image_match)
end
class KernelMatch < LinodeError
error_key(:kernel_match)
end
class JSONError < LinodeError
error_key(:json)
end
class ResultMatchError < LinodeError
error_key(:result_match)
end
class CertificateError < LinodeError
error_key(:certificate)
end
class LocalIPError < LinodeError
error_key(:local_ip)
end
class PlanID < LinodeError
error_key(:plan_id)
end
class PublicKeyError < LinodeError
error_key(:public_key)
end
class RsyncError < LinodeError
error_key(:rsync)
end
class StackscriptMatch < LinodeError
error_key(:stackscript_match)
end
class StackscriptUDFFormat < LinodeError
error_key(:stackscript_udf_responses)
end
class VolumeSizeMissing < LinodeError
error_key(:volume_size_missing)
end
class VolumeLabelMissing < LinodeError
error_key(:volume_label_missing)
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/client_wrapper.rb | lib/vagrant-linode/client_wrapper.rb | require "log4r"
module VagrantPlugins
module Linode
class ClientWrapper
def initialize(client, logger)
@client = client
@logger = logger
end
def method_missing(method, *args, &block)
result = @client.send(method, *args, &block)
if result.is_a? LinodeAPI::Retryable
self.class.new(result, @logger)
else
result
end
rescue ::LinodeAPI::APIError => e
@logger.error e.details.inspect
raise
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/plugin.rb | lib/vagrant-linode/plugin.rb | begin
require 'vagrant'
rescue LoadError
raise 'The Linode provider must be run within Vagrant.'
end
# This is a sanity check to make sure no one is attempting to install
# this into an early Vagrant version.
if Vagrant::VERSION < '1.1.0'
fail 'Linode provider is only compatible with Vagrant 1.1+'
end
module VagrantPlugins
module Linode
class Plugin < Vagrant.plugin('2')
name 'Linode'
description <<-DESC
This plugin installs a provider that allows Vagrant to manage
machines using Linode's API.
DESC
config(:linode, :provider) do
require_relative 'config'
Config
end
provider(:linode, parallel: true) do
Linode.init_i18n
Linode.init_logging
require_relative 'provider'
Provider
end
command(:linode) do
require_relative 'commands/root'
Commands::Root
end
command(:rebuild) do
require_relative 'commands/rebuild'
Commands::Rebuild
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/config.rb | lib/vagrant-linode/config.rb | module VagrantPlugins
module Linode
class Config < Vagrant.plugin('2', :config)
attr_accessor :token # deprecated
attr_accessor :api_key
attr_accessor :api_url
attr_accessor :distributionid
attr_accessor :distribution
attr_accessor :imageid
attr_accessor :image
attr_accessor :datacenterid
attr_accessor :datacenter
attr_accessor :planid
attr_accessor :plan
attr_accessor :paymentterm
attr_accessor :private_networking
attr_accessor :ca_path
attr_accessor :ssh_key_name
attr_accessor :setup
attr_accessor :stackscriptid
attr_accessor :stackscript
attr_accessor :stackscript_udf_responses
attr_accessor :xvda_size
attr_accessor :swap_size
attr_accessor :kernelid
attr_accessor :kernel
attr_accessor :label
attr_accessor :group
attr_accessor :volumes
alias_method :setup?, :setup
def initialize
# @logger = Log4r::Logger.new('vagrant::linode::config')
@token = UNSET_VALUE
@api_key = UNSET_VALUE
@api_url = UNSET_VALUE
@distributionid = UNSET_VALUE
@distribution = UNSET_VALUE
@stackscriptid = UNSET_VALUE
@stackscript = UNSET_VALUE
@stackscript_udf_responses = UNSET_VALUE
@imageid = UNSET_VALUE
@image = UNSET_VALUE
@datacenterid = UNSET_VALUE
@datacenter = UNSET_VALUE
@planid = UNSET_VALUE
@plan = UNSET_VALUE
@paymentterm = UNSET_VALUE
@private_networking = UNSET_VALUE
@ca_path = UNSET_VALUE
@ssh_key_name = UNSET_VALUE
@setup = UNSET_VALUE
@xvda_size = UNSET_VALUE
@swap_size = UNSET_VALUE
@kernelid = UNSET_VALUE
@kernel = UNSET_VALUE
@label = UNSET_VALUE
@group = UNSET_VALUE
@volumes = UNSET_VALUE
end
def finalize!
@api_key = ENV['LINODE_API_KEY'] if @api_key == UNSET_VALUE
@token = ENV['LINODE_TOKEN'] if @token == UNSET_VALUE
@api_key = @token if ((@api_key == nil) and (@token != nil))
@api_url = ENV['LINODE_URL'] if @api_url == UNSET_VALUE
@imageid = nil if @imageid == UNSET_VALUE
@image = nil if @image == UNSET_VALUE
@distributionid = nil if @distributionid == UNSET_VALUE
@distribution = nil if @distribution == UNSET_VALUE
@distribution = 'Ubuntu 16.04 LTS' if @distribution.nil? and @distributionid.nil? and @imageid.nil? and @image.nil?
@stackscriptid = nil if @stackscriptid == UNSET_VALUE
@stackscript = nil if @stackscript == UNSET_VALUE
@stackscript_udf_responses = nil if @stackscript_udf_responses == UNSET_VALUE
@datacenterid = nil if @datacenterid == UNSET_VALUE
@datacenter = nil if @datacenter == UNSET_VALUE
@datacenter = 'dallas' if @datacenter.nil? and @datacenterid.nil?
@planid = nil if @planid == UNSET_VALUE
@plan = nil if @plan == UNSET_VALUE
@planid = '1' if @plan.nil? and @planid.nil?
@paymentterm = '1' if @paymentterm == UNSET_VALUE
@private_networking = false if @private_networking == UNSET_VALUE
@ca_path = nil if @ca_path == UNSET_VALUE
@ssh_key_name = 'Vagrant' if @ssh_key_name == UNSET_VALUE
@setup = true if @setup == UNSET_VALUE
@xvda_size = true if @xvda_size == UNSET_VALUE
@swap_size = '256' if @swap_size == UNSET_VALUE
@kernelid = nil if @kernelid == UNSET_VALUE
@kernel = nil if @kernel == UNSET_VALUE
@kernel = 'Latest 64 bit' if @kernel.nil? and @kernelid.nil?
@label = false if @label == UNSET_VALUE
@group = false if @group == UNSET_VALUE
@volumes = [] if @volumes == UNSET_VALUE
end
def validate(machine)
errors = []
errors << I18n.t('vagrant_linode.config.api_key') unless @api_key
# Log4r::Logger.new('vagrant_linode.config.token') if @token
# env[:ui].info I18n.t('vagrant_linode.config.token') if @token
# errors << I18n.t('vagrant_linode.config.token') if @token
key = machine.config.ssh.private_key_path
key = key[0] if key.is_a?(Array)
if !key
errors << I18n.t('vagrant_linode.config.private_key')
elsif !File.file?(File.expand_path("#{key}.pub", machine.env.root_path))
errors << I18n.t('vagrant_linode.config.public_key', key: "#{key}.pub")
end
if @distributionid and @distribution
errors << I18n.t('vagrant_linode.config.distributionid_or_distribution')
end
if @stackscriptid and @stackscript
errors << I18n.t('vagrant_linode.config.stackscriptid_or_stackscript')
end
if @datacenterid and @datacenter
errors << I18n.t('vagrant_linode.config.datacenterid_or_datacenter')
end
if @kernelid and @kernel
errors << I18n.t('vagrant_linode.config.kernelid_or_kernel')
end
if @planid and @plan
errors << I18n.t('vagrant_linode.config.planid_or_plan')
end
if @imageid and @image
errors << I18n.t('vagrant_linode.config.imageid_or_image')
end
if (@distribution or @distributionid) and (@imageid or @image)
errors << I18n.t('vagrant_linode.config.distribution_or_image')
end
if !@volumes.is_a? Array
errors << I18n.t("vagrant_linode.config.volumes")
end
{ 'Linode Provider' => errors }
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/services/volume_manager.rb | lib/vagrant-linode/services/volume_manager.rb | require "vagrant-linode/errors"
module VagrantPlugins
module Linode
module Services
class VolumeManager
def initialize(machine, api, logger)
@machine = machine
@volumes_api = api
@logger = logger
end
def perform
volume_definitions.each do |volume|
raise Errors::VolumeLabelMissing if volume[:label].to_s.empty?
volume_name = "#{@machine.name}_#{volume[:label]}"
remote_volume = remote_volumes.find { |v| v.label == volume_name }
if remote_volume
attach_volume(remote_volume)
else
create_and_attach_volume(volume_name, volume[:size])
end
end
end
private
def volume_definitions
@machine.provider_config.volumes
end
def remote_volumes
@_remote_volumes ||= @volumes_api.list
end
def attach_volume(volume)
@volumes_api.update(
volumeid: volume.volumeid,
linodeid: @machine.id
)
@logger.info "volume #{volume.label} attached"
end
def create_and_attach_volume(label, size)
raise Errors::VolumeSizeMissing unless size.to_i > 0
@volumes_api.create(
label: label,
size: size,
linodeid: @machine.id
)
@logger.info "volume #{label} created and attached"
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/helpers/waiter.rb | lib/vagrant-linode/helpers/waiter.rb | module VagrantPlugins
module Linode
module Helpers
module Waiter
include Vagrant::Util::Retryable
def wait_for_event(env, id)
retryable(tries: 120, sleep: 10) do
# stop waiting if interrupted
next if env[:interrupted]
# check action status
result = env[:linode_api].linode.job.list(jobid: id, linodeid: env[:machine].id)
result = result[0] if result.is_a?(Array)
yield result if block_given?
fail 'not ready' if result['host_finish_dt'] > ''
end
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/helpers/result.rb | lib/vagrant-linode/helpers/result.rb | module VagrantPlugins
module Linode
module Helpers
class Result
def initialize(body)
@result = body
end
def [](key)
@result[key.to_s]
end
def find_id(sub_obj, search) #:ssh_keys, {:name => 'ijin (vagrant)'}
find(sub_obj, search)['id']
end
def find(sub_obj, search)
key = search.keys.first #:slug
value = search[key].to_s # sfo1
key = key.to_s # slug
result = @result[sub_obj.to_s].reduce(nil) do |result, obj|
obj[key] == value ? obj : result
end
result || error(sub_obj, key, value)
end
def error(sub_obj, key, value)
fail(Errors::ResultMatchErro r, key: key,
value: value,
collection_name: sub_obj.to_s,
sub_obj: @result[sub_obj.to_s])
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/helpers/normalizer.rb | lib/vagrant-linode/helpers/normalizer.rb | module VagrantPlugins
module Linode
module Helpers
module Normalizer
def normalize_plan_label(plan_label)
# if config plan is "Linode x" instead of "Linode xGB", look for "(x/1024)GB instead", when x >= 1024
plan_label_has_size = plan_label.match(/(\d{4,})$/)
if plan_label_has_size
plan_size = plan_label_has_size.captures.first.to_i
plan_label.sub(/(\d{4,})$/, "#{plan_size / 1024}GB")
else
plan_label
end
end
end
end
end
end | ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/helpers/client.rb | lib/vagrant-linode/helpers/client.rb | require 'vagrant-linode/helpers/result'
require 'vagrant-linode/version'
require 'linodeapi'
require 'json'
require 'vagrant/util/retryable'
include Vagrant::Util::Retryable
module VagrantPlugins
module Linode
module Helpers
# module Client
# def client
# def wait_for_event(env, id)
# retryable(tries: 120, sleep: 10) do
# # stop waiting if interrupted
# next if env[:interrupted]
# # check action status
# result = @client.linode.job.list(jobid: id, linodeid: env[:machine].id)
# result = result[0] if result.is_a?(Array)
#
# yield result if block_given?
# fail 'not ready' if result['host_finish_dt'] > ''
# end
# end
# linodeapi = ::LinodeAPI::Raw.new(apikey: @machine.provider_config.api_key,
# endpoint: @machine.provider_config.api_url || nil)
# # linodeapi.wait_for_event = wait_for_event
# # linodeapi.extend wait_for_event
# end
# end
class ApiClient
include Vagrant::Util::Retryable
def initialize(machine)
@logger = Log4r::Logger.new('vagrant::linode::apiclient')
@config = machine.provider_config
@client = ::LinodeAPI::Retryable.new(apikey: @config.api_key,
endpoint: @config.api_url || nil,
user_agent_prefix: "vagrant-linode/#{VagrantPlugins::Linode::VERSION}")
end
attr_reader :client
def wait_for_event(env, id)
retryable(tries: 120, sleep: 10) do
# stop waiting if interrupted
next if env[:interrupted]
# check action status
result = @client.linode.job.list(jobid: id, linodeid: env[:machine].id)
yield result if block_given?
fail 'not ready' if result['host_finish_dt'] > ''
end
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/commands/datacenters.rb | lib/vagrant-linode/commands/datacenters.rb | module VagrantPlugins
module Linode
module Commands
class Datacenters < Vagrant.plugin('2', :command)
def execute
options = {}
opts = OptionParser.new do |o|
o.banner = 'Usage: vagrant linode datacenters [options]'
end
argv = parse_options(opts)
return unless argv
with_target_vms(argv, provider: :linode) do |machine|
machine.action('list_datacenters')
end
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/commands/servers.rb | lib/vagrant-linode/commands/servers.rb | module VagrantPlugins
module Linode
module Commands
class Servers < Vagrant.plugin('2', :command)
def execute
options = {}
opts = OptionParser.new do |o|
o.banner = 'Usage: vagrant linode servers [options]'
end
argv = parse_options(opts)
return unless argv
with_target_vms(argv, provider: :linode) do |machine|
machine.action('list_servers')
end
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
displague/vagrant-linode | https://github.com/displague/vagrant-linode/blob/0b06818ea50a408b793d14653d3bc34835b1dac0/lib/vagrant-linode/commands/list_volumes.rb | lib/vagrant-linode/commands/list_volumes.rb | module VagrantPlugins
module Linode
module Commands
class ListVolumes < Vagrant.plugin('2', :command)
def execute
opts = OptionParser.new do |o|
o.banner = 'Usage: vagrant linode volumes list [options]'
end
argv = parse_options(opts)
return unless argv
with_target_vms(argv) do |machine|
machine.action(:list_volumes)
end
end
end
end
end
end
| ruby | MIT | 0b06818ea50a408b793d14653d3bc34835b1dac0 | 2026-01-04T17:55:46.227254Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.