repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
gitviola/ynab-bank-importer | https://github.com/gitviola/ynab-bank-importer/blob/d69e289b8136861c4504f50d68843e9abab5bf1e/lib/transaction_creator.rb | lib/transaction_creator.rb | require 'ynab'
# Calculates the correct parameters for the YNAB transaction
# and build it.
class TransactionCreator
attr_accessor :account_id, :date, :amount, :payee_name, :payee_id,
:category_name, :category_id, :memo,
:import_id, :is_withdrawal
class <<self
require 'ynab/models/save_transaction'
# rubocop:disable Metrics/MethodLength
def call(options = {})
YNAB::SaveTransaction.new(
account_id: options.fetch(:account_id),
date: options.fetch(:date),
amount: options.fetch(:amount),
payee_id: payee_id(options),
payee_name: payee_name(options),
category_id: category_id(options),
memo: memo(options),
import_id: options.fetch(:import_id),
flag_color: flag_color(options),
cleared: 'cleared' # TODO: shouldn't be cleared if date is in the future
)
end
# rubocop:enable Metrics/MethodLength
def payee_id(options)
payee_id = options.fetch(:payee_id, nil)
return payee_id if payee_id
if withdrawal?(options) && cash_account_id
return find_payee_id_by_account_id(cash_account_id)
end
account_payee_id = account_payee_id(options)
return account_payee_id if account_payee_id
nil
end
def payee_name(options)
return nil if payee_id(options)
payee = options.fetch(:payee_name, nil)
# The api has a limit of 50 characters for the payee field
payee = truncate(payee, 50)
payee
end
def memo(options)
memo = options.fetch(:memo, nil)
# The api has a limit of 100 characters for the momo field
memo = truncate(memo, 100)
return "ATM withdrawal #{memo}" if withdrawal?(options)
memo
end
def category_id(_options)
# TODO: query through all categories and match by category_name
nil
end
# Helper methods
def truncate(string, max)
return nil unless string
string.length > max ? string[0...max] : string
end
def cash_account_id
Settings.all['ynab'].fetch('cash_account_id', nil)
end
def withdrawal?(options)
options.fetch(:is_withdrawal, nil)
end
def account_payee_id(options)
result = Settings.all['accounts'].find do |account|
payee_iban = payee_iban(options)
account['ynab_id'] && payee_iban && account['iban'] == payee_iban
end
return nil unless result
find_payee_id_by_account_id(result['ynab_id'])
end
def find_payee_id_by_account_id(account_id)
return nil unless account_id
budget_id = Settings.all['ynab'].fetch('budget_id')
access_token = Settings.all['ynab'].fetch('access_token')
YNAB::API.new(access_token)
.accounts
.get_account_by_id(budget_id, account_id)
.data.account.transfer_payee_id
end
def flag_color(_options)
nil
end
def payee_iban(options)
iban = options.fetch(:payee_iban, nil)
iban.delete(' ') if iban
end
end
end
| ruby | MIT | d69e289b8136861c4504f50d68843e9abab5bf1e | 2026-01-04T17:45:50.785060Z | false |
gitviola/ynab-bank-importer | https://github.com/gitviola/ynab-bank-importer/blob/d69e289b8136861c4504f50d68843e9abab5bf1e/lib/dumper/fints.rb | lib/dumper/fints.rb | class Dumper
# Implements logic to fetch transactions via the Fints protocol
# and implements methods that convert the response to meaningful data.
class Fints < Dumper
require 'ruby_fints'
require 'digest/md5'
def initialize(params = {})
@ynab_id = params.fetch('ynab_id')
@username = params.fetch('username').to_s
@password = params.fetch('password').to_s
@iban = params.fetch('iban')
@endpoint = params.fetch('fints_endpoint')
@blz = params.fetch('fints_blz')
end
def fetch_transactions
FinTS::Client.logger.level = Logger::WARN
client = FinTS::PinTanClient.new(@blz, @username, @password, @endpoint)
account = client.get_sepa_accounts.find { |a| a[:iban] == @iban }
statement = client.get_statement(account, Date.today - 35, Date.today)
statement.map { |t| to_ynab_transaction(t) }
end
private
def account_id
@ynab_id
end
def date(transaction)
transaction.entry_date || transaction.date
rescue NoMethodError
# https://github.com/schurig/ynab-bank-importer/issues/52
# Some banks think Feb 29 and 30 exist in non-leap years.
entry_date(transaction) || to_date(transaction['date'])
end
def payee_name(transaction)
transaction.name.try(:strip)
end
def payee_iban(transaction)
transaction.iban
end
def memo(transaction)
[
transaction.description,
transaction.information
].compact.join(' / ').try(:strip)
end
def amount(transaction)
(transaction.amount * transaction.sign * 1000).to_i
end
def withdrawal?(transaction)
memo = memo(transaction)
return nil unless memo
memo.include?('Atm') || memo.include?('Bargeld')
end
def import_id(transaction)
Digest::MD5.hexdigest(transaction.source)
end
# Patches
# taken from https://github.com/railslove/cmxl/blob/master/lib/cmxl/field.rb
# and modified so that it takes the last day of the month if the provided day
# doesn't exist in that month.
# See issue: https://github.com/schurig/ynab-bank-importer/issues/52
DATE = /(?<year>\d{0,2})(?<month>\d{2})(?<day>\d{2})/
def to_date(date, year = nil)
if match = date.to_s.match(DATE)
year ||= "20#{match['year'] || Date.today.strftime('%y')}"
month = match['month']
day = match['day']
begin
Date.new(year.to_i, month.to_i, day.to_i)
rescue ArgumentError
# Take the last day of that month
Date.civil(year.to_i, month.to_i, -1)
end
else
date
end
end
def entry_date(transaction)
data = transaction.data
date = to_date(data['date'])
return unless transaction.data['entry_date'] && date
entry_date_with_date_year = to_date(data['entry_date'], date.year)
if date.month == 1 && date.month < entry_date_with_date_year.month
to_date(data['entry_date'], date.year - 1)
else
to_date(data['entry_date'], date.year)
end
end
end
end
| ruby | MIT | d69e289b8136861c4504f50d68843e9abab5bf1e | 2026-01-04T17:45:50.785060Z | false |
gitviola/ynab-bank-importer | https://github.com/gitviola/ynab-bank-importer/blob/d69e289b8136861c4504f50d68843e9abab5bf1e/lib/dumper/n26.rb | lib/dumper/n26.rb | class Dumper
# Implements logic to fetch transactions via the N26 api
# and implements methods that convert the response to meaningful data.
class N26 < Dumper
require 'twentysix'
require 'active_support'
require 'digest/md5'
WITHDRAWAL_CATEGORIES = [
'micro-v2-atm',
'micro-v2-cash26'
].freeze
def initialize(params = {})
@ynab_id = params.fetch('ynab_id')
@username = params.fetch('username')
@password = params.fetch('password')
@iban = params.fetch('iban')
@set_category = params.fetch('set_category', false)
@skip_pending_transactions = params.fetch('skip_pending_transactions',
false)
@categories = {}
end
def fetch_transactions
client = TwentySix::Core.authenticate(@username, @password)
check_authorization!(client)
client.categories.map do |category|
@categories[category['id']] = category['name']
end
client.transactions(count: 100)
.select { |t| accept?(t) }
.map { |t| to_ynab_transaction(t) }
end
def accept?(transaction)
return true unless @skip_pending_transactions
already_processed?(transaction)
end
private
def check_authorization!(client)
return if client.instance_variable_get('@access_token')
raise "Couldn't login with your provided N26 credentials. " \
"Please verify that they're correct."
end
def account_id
@ynab_id
end
def date(transaction)
timestamp = Time.at(transaction['visibleTS'] / 1000)
Date.parse(timestamp.strftime('%Y-%m-%d'))
end
def payee_name(transaction)
[
transaction['merchantName'],
transaction['partnerName']
].join(' ').try(:strip)
end
def payee_iban(transaction)
transaction['partnerIban']
end
def category_name(transaction)
return nil unless @set_category
@categories[transaction['category']]
end
def memo(transaction)
[
transaction['referenceText'],
transaction['merchantCity']
].join(' ').try(:strip)
end
def amount(transaction)
(transaction['amount'].to_f * 1000).to_i
end
def withdrawal?(transaction)
WITHDRAWAL_CATEGORIES.include?(transaction['category'])
end
def import_id(transaction)
data = [transaction['visibleTS'],
transaction['transactionNature'],
transaction['amount'],
transaction['accountId']].join
Digest::MD5.hexdigest(data)
end
# All very recent transactions with the credit card have
# the type value set to "AA". So we assume that this is an
# indicator to check if a transaction has been processed or not.
def already_processed?(transaction)
transaction['type'] != 'AA'
end
end
end
| ruby | MIT | d69e289b8136861c4504f50d68843e9abab5bf1e | 2026-01-04T17:45:50.785060Z | false |
gitviola/ynab-bank-importer | https://github.com/gitviola/ynab-bank-importer/blob/d69e289b8136861c4504f50d68843e9abab5bf1e/lib/dumper/bbva.rb | lib/dumper/bbva.rb | class Dumper
# Implements logic to fetch transactions via the BBVA Spain api
# and implements methods that convert the response to meaningful data.
class Bbva < Dumper
require 'bankscrap'
require 'bankscrap-bbva'
require 'digest/md5'
def initialize(params = {})
@ynab_id = params.fetch('ynab_id')
@username = params.fetch('username')
@password = params.fetch('password')
@iban = params.fetch('iban')
end
def fetch_transactions
bbva = Bankscrap::BBVA::Bank.new(user: @username, password: @password)
account = bbva.accounts.find do |a|
normalize_iban(a.iban) == @iban
end
account.transactions.map { |t| to_ynab_transaction(t) }
end
private
def account_id
@ynab_id
end
def date(transaction)
transaction.effective_date
end
def payee_name(_transaction)
'N/A'
end
def payee_iban(_transaction)
nil
end
def memo(transaction)
transaction.description
end
def amount(transaction)
transaction.amount.fractional * 10
end
def withdrawal?(transaction)
text = transaction.description.downcase
text.include?('cajero') ||
text.include?('withdrawal') ||
text.include?('efectivo')
end
def normalize_iban(iban)
iban.delete(' ')
end
def import_id(transaction)
Digest::MD5.hexdigest(transaction.id)
end
end
end
| ruby | MIT | d69e289b8136861c4504f50d68843e9abab5bf1e | 2026-01-04T17:45:50.785060Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/spec_helper.rb | spec/spec_helper.rb | #############
## WARNING ##
#############
# THIS TEST SUITE IS VERY MEAN TO MYSQL AND ELASTICSEARCH
# IT *WILL* DELETE ANY CONTENT IN THE TEST DBs
$LOAD_PATH.unshift(File.dirname(__FILE__))
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
APP_DIR ||= File.expand_path('../../', __FILE__)
require 'forklift'
require 'rspec'
require 'fileutils'
ENV["FORKLIFT_RUN_ALL_STEPS"] = 'true'
Dir["#{APP_DIR}/spec/support/**/*.rb"].each {|f| require f}
RSpec.configure do |config|
config.before(:all) do
piddir = "#{File.dirname(__FILE__)}/pid"
FileUtils.rmdir(piddir) if File.exists?(piddir)
SpecSeeds.setup_mysql
SpecSeeds.setup_elasticsearch
SpecSeeds.setup_csv
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/support/spec_plan.rb | spec/support/spec_plan.rb | class SpecPlan
def self.config
return {
project_root: File.join(Dir.pwd, 'spec'),
logger: {
stdout: false,
debug: false,
},
}
end
def self.new
return Forklift::Plan.new(self.config)
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/support/spec_client.rb | spec/support/spec_client.rb | require 'yaml'
require 'erb'
class SpecClient
def self.load_config(file)
YAML.load(ERB.new(File.read(file)).result)
end
def self.mysql(name)
file = File.join(File.dirname(__FILE__), '..', 'config', 'connections', 'mysql', "#{name}.yml")
config = self.load_config(file)
db = config[:database]
config.delete(:database)
connection = ::Mysql2::Client.new(config)
begin
connection.query("use `#{db}`")
rescue Exception => e
puts "#{e} => will create new databse #{db}"
end
connection
end
def self.elasticsearch(name)
file = File.join(File.dirname(__FILE__), '..', 'config', 'connections', 'elasticsearch', "#{name}.yml")
config = self.load_config(file)
::Elasticsearch::Client.new(config)
end
def self.csv(file)
CSV.read(file, headers: true, converters: :all).map {|r| r = r.to_hash.symbolize_keys }
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/support/spec_seeds.rb | spec/support/spec_seeds.rb | require 'json'
require 'fileutils'
class SpecSeeds
def self.setup_mysql
mysql_connections = []
mysql_databases = []
files = Dir["#{File.dirname(__FILE__)}/../config/connections/mysql/*.yml"]
files.each do |f|
name = f.split('/').last.gsub('.yml','')
mysql_connections << ::SpecClient.mysql(name)
mysql_databases << name
end
i = 0
while i < mysql_connections.count
conn = mysql_connections[i]
db = mysql_databases[i]
seed = File.join(File.dirname(__FILE__), '..', 'support', 'dumps', 'mysql', "#{db}.sql")
conn.query("drop database if exists `#{db}`")
conn.query("create database `#{db}`")
conn.query("use `#{db}`")
if File.exists? seed
lines = File.read(seed).split(";")
lines.each do |line|
conn.query(line) if line[0] != "#"
end
end
i = i + 1
end
end
def self.setup_elasticsearch
elasticsearch_connections = []
elasticsearch_databases = []
files = Dir["#{File.dirname(__FILE__)}/../config/connections/elasticsearch/*.yml"]
files.each do |f|
name = f.split('/').last.gsub('.yml','')
elasticsearch_connections << ::SpecClient.elasticsearch(name)
elasticsearch_databases << name
end
i = 0
while i < elasticsearch_connections.count
conn = elasticsearch_connections[i]
index = elasticsearch_databases[i]
seed = File.join(File.dirname(__FILE__), '..', 'support', 'dumps', 'elasticsearch', "#{index}.json")
conn.indices.delete({ index: index }) if conn.indices.exists({ index: index })
if File.exists? seed
lines = JSON.parse(File.read(seed))
lines.each do |line|
object = {
index: index,
body: line,
type: 'forklift',
id: line['id']
}
conn.index object # assumes ES is setup to allow index creation on write
end
conn.indices.refresh({ index: index })
end
i = i + 1
end
end
def self.setup_csv
seed = File.join(File.dirname(__FILE__), '..', 'support', 'dumps', 'csv', "source.csv")
source = '/tmp/source.csv'
destination = '/tmp/destination.csv'
FileUtils.rm(source, {force: true})
FileUtils.rm(destination, {force: true})
FileUtils.copy(seed, source)
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/mysql_spec.rb | spec/integration/mysql_spec.rb | require 'spec_helper'
describe 'mysql' do
before(:each) do
SpecSeeds.setup_mysql
end
it "can read data (raw)" do
query = 'select * from `users`'
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
source.read(query) {|data|
@rows = (@rows + data)
}
}
plan.disconnect!
expect(@rows.length).to eql 5
end
it "can read data (filtered)" do
query = 'select * from `users`'
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
source.read(query, source.current_database, false, 3, 0) {|data|
@rows = (@rows + data)
}
}
plan.disconnect!
expect(@rows.length).to eql 3
end
it "can write new data" do
table = "users"
data = [
{email: 'other@example.com', first_name: 'other', last_name: 'n', created_at: Time.new.to_s(:db), updated_at: Time.new.to_s(:db)},
{email: 'else@example.com', first_name: 'else', last_name: 'n', created_at: Time.new.to_s(:db), updated_at: Time.new.to_s(:db)}
]
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
count = destination.query('select count(1) as "count" from users').first['count']
expect(count).to eql 7
end
it "can update existing data" do
table = "users"
data = [
{id: 1, email: 'evan@example.com', first_name: 'New Name', last_name: 'T', created_at: Time.new.to_s(:db), updated_at: Time.new.to_s(:db)}
]
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
count = destination.query('select count(1) as "count" from users').first['count']
expect(count).to eql 5
first_name = destination.query('select first_name from users where id = 1').first['first_name']
expect(first_name).to eql 'New Name'
end
describe 'lazy create' do
after(:each) do
destination = SpecClient.mysql('forklift_test_source_a')
destination.query('drop table if exists `new_table`')
end
it "can lazy-create a table with primary keys provided" do
data = [
{id: 1, thing: 'stuff a', updated_at: Time.new},
{id: 2, thing: 'stuff b', updated_at: Time.new},
{id: 3, thing: 'stuff c', updated_at: Time.new},
]
table = "new_table"
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
cols = []
destination.query("describe #{table}").each do |row|
cols << row["Field"]
case row["Field"]
when "id"
expect(row["Type"]).to eql "bigint(20)"
when "thing"
expect(row["Type"]).to eql "text"
when "updated_at"
expect(row["Type"]).to eql "datetime"
end
end
expect(cols).to eql ['id', 'thing', 'updated_at']
end
it "can lazy-create a table without primary keys provided" do
data = [
{thing: 'stuff a', number: 1.123, updated_at: Time.new},
{thing: 'stuff b', number: 1.123, updated_at: Time.new},
{thing: 'stuff c', number: 1.123, updated_at: Time.new},
]
table = "new_table"
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
cols = []
destination.query("describe #{table}").each do |row|
cols << row["Field"]
case row["Field"]
when "id"
expect(row["Type"]).to eql "bigint(20)"
when "thing"
expect(row["Type"]).to eql "text"
when "number"
expect(row["Type"]).to eql "float"
when "updated_at"
expect(row["Type"]).to eql "datetime"
end
end
expect(cols).to include('id', 'thing', 'number', 'updated_at')
end
it "can add columns to exiting tables when new keys are provided" do
table = "users"
raw = SpecClient.mysql('forklift_test_source_a')
count = raw.query("SHOW COLUMNS FROM #{table}").count
expect(count).to eql 6
data = [
{email: 'other@example.com', something_else: :abc123, first_name: 'other', last_name: 'n', created_at: Time.new.to_s(:db), updated_at: Time.new.to_s(:db)}
]
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
count = raw.query("SHOW COLUMNS FROM #{table}").count
expect(count).to eql 7
end
it "can will seek further for null-ish values" do
data = [
{id: 1, thing: 'stuff a', number: nil, updated_at: Time.new},
{id: 2, thing: 'stuff b', number: nil, updated_at: Time.new},
{id: 3, thing: 'stuff c', number: 100, updated_at: Time.new},
]
table = "new_table"
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
cols = []
destination.query("describe #{table}").each do |row|
cols << row["Field"]
case row["Field"]
when "id"
expect(row["Type"]).to eql "bigint(20)"
when "thing"
expect(row["Type"]).to eql "text"
when "number"
expect(row["Type"]).to eql "bigint(20)"
when "updated_at"
expect(row["Type"]).to eql "datetime"
end
end
expect(cols).to include('id', 'thing', 'updated_at', 'number')
end
it "null rows will be text, and can be updated on subsequent writes" do
data = [
{id: 1, number: nil, updated_at: Time.new},
{id: 2, number: nil, updated_at: Time.new},
]
table = "new_table"
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
cols = []
destination.query("describe #{table}").each do |row|
cols << row["Field"]
case row["Field"]
when "id"
expect(row["Type"]).to eql "bigint(20)"
when "number"
expect(row["Type"]).to eql "varchar(0)"
when "updated_at"
expect(row["Type"]).to eql "datetime"
end
end
expect(cols).to include('id', 'updated_at', 'number')
data = [
{id: 3, number: 123, updated_at: Time.new},
]
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:mysql][:forklift_test_source_a]
destination.write(data, table)
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_source_a')
cols = []
destination.query("describe #{table}").each do |row|
cols << row["Field"]
case row["Field"]
when "number"
expect(row["Type"]).to eql "bigint(20)"
end
end
expect(cols).to include('id', 'updated_at', 'number')
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/csv_spec.rb | spec/integration/csv_spec.rb | require 'spec_helper'
require 'csv'
describe 'csv' do
after(:each) do
SpecSeeds.setup_csv
end
it "can read data (simple)" do
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:csv][:forklift_test_source]
source.read {|data|
@rows = (@rows + data)
}
}
expect(@rows.length).to eql 5
expect(@rows.first[:vendor_id]).to eql 1
expect(@rows.last[:vendor_id]).to eql 5
end
it "can read partial data" do
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:csv][:forklift_test_source]
@rows = source.read(3)
}
expect(@rows.length).to eql 3
expect(@rows.first[:vendor_id]).to eql 1
expect(@rows.last[:vendor_id]).to eql 3
end
it "can write data (simple)" do
plan = SpecPlan.new
data = [
{thing: 1, when: Time.now},
{thing: 2, when: Time.now},
]
plan.do! {
destination = plan.connections[:csv][:forklift_test_destination]
destination.write(data)
}
@rows = SpecClient.csv('/tmp/destination.csv')
expect(@rows.length).to eql 2
expect(@rows.first[:thing]).to eql 1
expect(@rows.last[:thing]).to eql 2
end
it "can append data" do
plan = SpecPlan.new
plan.do! {
destination = plan.connections[:csv][:forklift_test_destination]
data = [
{thing: 1, when: Time.now},
{thing: 2, when: Time.now},
]
destination.write(data)
data = [
{thing: 3, when: Time.now},
]
destination.write(data)
}
@rows = SpecClient.csv('/tmp/destination.csv')
expect(@rows.length).to eql 3
expect(@rows.first[:thing]).to eql 1
expect(@rows.last[:thing]).to eql 3
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/mysql_patterns_spec.rb | spec/integration/mysql_patterns_spec.rb | require 'spec_helper'
describe 'mysql patterns' do
before(:each) do
SpecSeeds.setup_mysql
end
it "can do a raw data pipe" do
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:mysql][:forklift_test_destination]
expect(source.tables.length).to eql 3
expect(destination.tables.length).to eql 0
source.tables.each do |table|
Forklift::Patterns::Mysql.pipe(source, table, destination, table)
end
expect(destination.tables.length).to eql 3
}
plan.disconnect!
end
it "can do an incramental data pipe with only updated data" do
plan = SpecPlan.new
table = 'users'
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:mysql][:forklift_test_destination]
Forklift::Patterns::Mysql.incremental_pipe(source, table, destination, table)
expect(destination.count('users')).to eql 5
expect(destination.read('select first_name from users where id = 1')[0][:first_name]).to eql 'Evan'
source.q("UPDATE `users` SET `first_name` = 'EvanAgain' WHERE `id` = '1'")
source.q("UPDATE `users` SET `updated_at` = NOW() WHERE `id` = '1'")
Forklift::Patterns::Mysql.incremental_pipe(source, table, destination, table)
expect(destination.count('users')).to eql 5
expect(destination.read('select first_name from users where id = 1')[0][:first_name]).to eql 'EvanAgain'
}
plan.disconnect!
end
it "(optimistic_pipe) can determine if it should do an incramental or full pipe" do
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect(Forklift::Patterns::Mysql.can_incremental_pipe?(source, 'users', source, 'users')).to eql true
expect(Forklift::Patterns::Mysql.can_incremental_pipe?(source, 'sales', source, 'sales')).to eql false
expect(Forklift::Patterns::Mysql.can_incremental_pipe?(source, 'products', source, 'products')).to eql true
}
plan.disconnect!
end
it "can run the mysql_optimistic_import pattern" do
plan = SpecPlan.new
table = 'users'
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:mysql][:forklift_test_destination]
Forklift::Patterns::Mysql.mysql_optimistic_import(source, table, destination, table)
expect(destination.tables.length).to eql 1
source.q("UPDATE `users` SET `first_name` = 'EvanAgain' WHERE `id` = '1'")
source.q("UPDATE `users` SET `updated_at` = NOW() WHERE `id` = '1'")
Forklift::Patterns::Mysql.mysql_optimistic_import(source, table, destination, table)
expect(destination.count('users')).to eql 5
expect(destination.read('select first_name from users where id = 1')[0][:first_name]).to eql 'EvanAgain'
}
plan.disconnect!
end
it "can write the high_water_mark"
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/elasticsearch_spec.rb | spec/integration/elasticsearch_spec.rb | require 'spec_helper'
describe 'elasticsearch' do
before(:each) do
SpecSeeds.setup_elasticsearch
end
it "can read data (raw)" do
index = 'forklift_test'
query = { query: { match_all: {} } }
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:elasticsearch][:forklift_test]
source.read(index, query) {|data|
@rows = (@rows + data)
}
}
plan.disconnect!
expect(@rows.length).to eql 5
end
it "can read data (filtered)" do
index = 'forklift_test'
query = { query: { match_all: {} } }
plan = SpecPlan.new
@rows = []
plan.do! {
source = plan.connections[:elasticsearch][:forklift_test]
source.read(index, query, false, 0, 3) {|data|
@rows = (@rows + data)
}
}
plan.disconnect!
expect(@rows.length).to eql 3
end
it "can write new data" do
index = 'forklift_test'
plan = SpecPlan.new
data = [
{id: 99, user_id: 99, product_id: 99, viewed_at: 99}
]
plan.do! {
destination = plan.connections[:elasticsearch][:forklift_test]
destination.write(data, index)
}
plan.disconnect!
destination = SpecClient.elasticsearch('forklift_test')
count = destination.count({ index: index })["count"]
expect(count).to eql 6
end
it "can overwrite existing data, probided a primary key" do
index = 'forklift_test'
plan = SpecPlan.new
data = [
{'id' => 1, 'user_id' => 1, 'product_id' => 1, 'viewed_at' => 99}
]
plan.do! {
destination = plan.connections[:elasticsearch][:forklift_test]
destination.write(data, index, true)
}
plan.disconnect!
destination = SpecClient.elasticsearch('forklift_test')
count = destination.count({ index: index })["count"]
expect(count).to eql 5
result = destination.search({ index: index, body: { query: {term: {id: 1}} } })
expect(result["hits"]["total"]).to eql 1
obj = result["hits"]["hits"][0]["_source"]
expect(obj["id"]).to eql 1
expect(obj["user_id"]).to eql 1
expect(obj["product_id"]).to eql 1
expect(obj["viewed_at"]).to eql 99
end
it "can delete an index" do
index = 'other_test_index'
plan = SpecPlan.new
client = SpecClient.elasticsearch('forklift_test')
data = [
{id: 1}
]
plan.do! {
destination = plan.connections[:elasticsearch][:forklift_test]
expect { client.search({ index: index }) }.to raise_error(/index_not_found_exception|IndexMissingException/)
destination.write(data, index, true)
expect { client.search({ index: index }) }.to_not raise_error
destination.delete_index(index)
expect { client.search({ index: index }) }.to raise_error(/index_not_found_exception|IndexMissingException/)
}
plan.disconnect!
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/basic_spec.rb | spec/integration/basic_spec.rb | require 'spec_helper'
describe 'basics' do
describe 'test suite setup' do
it 'seeded the mysql dbs' do
client = SpecClient.mysql('forklift_test_source_a')
tables = []
client.query("show tables").each do |row|
tables << row.values[0]
end
expect(tables.count).to eql 3
client.close
client = SpecClient.mysql('forklift_test_source_b')
tables = []
client.query("show tables").each do |row|
tables << row.values[0]
end
expect(tables.count).to eql 1
client.close
end
it 'seeded the elasticsearch db' do
client = SpecClient.elasticsearch('forklift_test')
results = client.search({ index: 'forklift_test' , body: { query: { match_all: {} } } })
expect(results['hits']['total']).to eql 5
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/multi_transport_spec.rb | spec/integration/multi_transport_spec.rb | require 'spec_helper'
describe 'multiple trasport types' do
before(:each) do
SpecSeeds.setup_mysql
SpecSeeds.setup_elasticsearch
end
describe 'elasticsearch => mysql' do
it 'can load in a full query' do
table = 'es_import'
index = 'forklift_test'
query = { query: { match_all: {} } }
plan = SpecPlan.new
plan.do! {
source = plan.connections[:elasticsearch][:forklift_test]
destination = plan.connections[:mysql][:forklift_test_destination]
source.read(index, query) {|data| destination.write(data, table) }
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_destination')
rows = destination.query("select count(1) as 'count' from es_import").first["count"]
expect(rows).to eql 5
end
it 'can load in a partial query' do
table = 'es_import'
index = 'forklift_test'
query = { query: { match_all: {} }, sort: [{ id: {order: "asc" } }] }
plan = SpecPlan.new
plan.do! {
source = plan.connections[:elasticsearch][:forklift_test]
destination = plan.connections[:mysql][:forklift_test_destination]
source.read(index, query, false, 0, 3) {|data| destination.write(data, table) }
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_destination')
rows = destination.query("select count(1) as 'count' from es_import").first["count"]
expect(rows).to eql 3
min = destination.query("select min(id) as 'min' from es_import").first["min"]
expect(min).to eql 1
max = destination.query("select max(id) as 'max' from es_import").first["max"]
expect(max).to eql 3
end
it 'can detect data types' do
table = 'es_import'
index = 'forklift_test'
query = { query: { match_all: {} } }
plan = SpecPlan.new
plan.do! {
source = plan.connections[:elasticsearch][:forklift_test]
destination = plan.connections[:mysql][:forklift_test_destination]
source.read(index, query) {|data|
clean_data = []
data.each do |row|
row[:viewed_at] = Time.at(row[:viewed_at])
clean_data << row
end
destination.write(clean_data, table)
}
}
plan.disconnect!
destination = SpecClient.mysql('forklift_test_destination')
max = destination.query("select max(viewed_at) as 'max' from es_import").first["max"]
expect(max.class).to eql Time
end
end
describe 'mysql => elasticsearch' do
after(:each) do
es = SpecClient.elasticsearch('forklift_test')
es.indices.delete({ index: 'users' }) if es.indices.exists({ index: 'users' })
end
it 'can load in a full table' do
table = 'users'
index = 'users'
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:elasticsearch][:forklift_test]
source.read("select * from #{table}") {|data| destination.write(data, index) }
}
plan.disconnect!
destination = SpecClient.elasticsearch('forklift_test')
count = destination.count({ index: index })["count"]
expect(count).to eql 5
end
it 'can load in only some rows' do
table = 'users'
index = 'users'
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:elasticsearch][:forklift_test]
source.read("select * from #{table}", source.current_database, false, 3, 0) {|data|
destination.write(data, index)
}
}
plan.disconnect!
destination = SpecClient.elasticsearch('forklift_test')
count = destination.count({ index: index })["count"]
expect(count).to eql 3
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/elasticsearch_patterns_spec.rb | spec/integration/elasticsearch_patterns_spec.rb | require 'spec_helper'
describe 'elasticsearch patterns' do
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/integration/transformations_spec.rb | spec/integration/transformations_spec.rb | require 'spec_helper'
describe 'transformations' do
before(:each) do
SpecSeeds.setup_mysql
end
it "can run a native transformation" do
plan = SpecPlan.new
@rows = []
raw = SpecClient.mysql('forklift_test_destination')
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:mysql][:forklift_test_destination]
source.read('select * from `users`') {|data| destination.write(data, 'users') }
expect( destination.columns("users").include?(:full_name) ).to eql false
transformation_file = "#{File.dirname(__FILE__)}/../template/spec_user_transformation.sql"
destination.exec!(transformation_file)
expect( destination.columns("users").include?(:full_name) ).to eql true
}
plan.disconnect!
end
it "can run a ruby transformation" do
plan = SpecPlan.new
@rows = []
raw = SpecClient.mysql('forklift_test_destination')
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
destination = plan.connections[:mysql][:forklift_test_destination]
source.read('select * from `users`') {|data| destination.write(data, 'users') }
expect( destination.columns("users").include?(:full_name) ).to eql false
transformation_file = "#{File.dirname(__FILE__)}/../template/spec_user_transformation.rb"
destination.exec!(transformation_file, {prefix: 'my_prefix' })
expect( destination.columns("users").include?(:full_name) ).to eql true
data = destination.read('select * from `users` where email="evan@example.com"')
expect( data.first[:full_name] ).to eql 'my_prefix Evan T'
}
plan.disconnect!
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/unit/misc/step_spec.rb | spec/unit/misc/step_spec.rb | require 'spec_helper'
describe 'misc forklift core' do
describe 'steps' do
before(:each) do
ENV['FORKLIFT_RUN_ALL_STEPS'] = 'false'
end
after(:each) do
ENV['FORKLIFT_RUN_ALL_STEPS'] = 'true'
end
it "will run all steps with no extra ARGV" do
plan = SpecPlan.new
allow(plan).to receive(:argv){ ['/path/to/plan'] }
steps_run = []
plan.do! {
plan.step("a"){ steps_run << 'a' }
plan.step("b"){ steps_run << 'b' }
plan.step("c"){ steps_run << 'c' }
}
plan.disconnect!
expect(steps_run).to include 'a'
expect(steps_run).to include 'b'
expect(steps_run).to include 'c'
end
it "will only run steps named within ARGV" do
plan = SpecPlan.new
allow(plan).to receive(:argv){ ['/path/to/plan', 'a','c'] }
steps_run = []
plan.do! {
plan.step("a"){ steps_run << 'a' }
plan.step("b"){ steps_run << 'b' }
plan.step("c"){ steps_run << 'c' }
}
plan.disconnect!
expect(steps_run).to include 'a'
expect(steps_run).to_not include 'b'
expect(steps_run).to include 'c'
end
it "won't run on a badly defined step" do
plan = SpecPlan.new
allow(plan).to receive(:argv){ ['/path/to/plan', 'missing_step'] }
expect{
plan.do! {
plan.step("a"){ raise 'never should get here' }
}
plan.disconnect!
}.to raise_error SystemExit
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/unit/misc/email_spec.rb | spec/unit/misc/email_spec.rb | require 'spec_helper'
require "email_spec"
describe 'misc forklift core' do
describe 'email' do
include EmailSpec::Helpers
include EmailSpec::Matchers
it "can send mail with an email template" do
plan = SpecPlan.new
plan.do! {
email_args = {
to: "YOU@FAKE.com",
from: "Forklift",
subject: "Forklift has moved your database",
}
email_variables = {
total_users_count: 10,
new_users_count: 5,
}
email_template = "#{File.dirname(__FILE__)}/../../template/spec_email_template.erb"
@email = plan.mailer.send_template(email_args, email_template, email_variables).first
}
plan.disconnect!
expect(@email).to deliver_to("YOU@FAKE.com")
expect(@email).to have_subject(/Forklift has moved your database/)
expect(@email).to have_body_text(/Your forklift email/) # base
expect(@email).to have_body_text(/Total Users: 10/) # template
expect(@email).to have_body_text(/New Users: 5/) # template
end
it "can send mail with an attachment" do
skip("how to test email attachments?")
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/unit/misc/pid_spec.rb | spec/unit/misc/pid_spec.rb | require 'spec_helper'
describe 'misc forklift core' do
describe 'pidfile' do
it "can create a pidfile and will remove it when the plan is over" do
plan = SpecPlan.new
pid = "#{File.dirname(__FILE__)}/../../pid/pidfile"
expect(File.exists?(pid)).to eql false
plan.do! {
expect(File.exists?(pid)).to eql true
expect(File.read(pid).to_i).to eql Process.pid
}
plan.disconnect!
expect(File.exists?(pid)).to eql false
end
it "will not run with an existing pidfile" do
plan = SpecPlan.new
plan.pid.store!
expect { plan.do! }.to raise_error SystemExit
plan.pid.delete!
plan.disconnect!
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/unit/misc/error_spec.rb | spec/unit/misc/error_spec.rb | require 'spec_helper'
describe 'misc forklift core' do
describe 'error handling' do
it "un-caught errors will raise" do
plan = SpecPlan.new
expect{
plan.do! {
plan.step("step_a"){ raise 'BREAK' }
}
}.to raise_error 'BREAK'
plan.pid.delete!
plan.disconnect!
end
it 'can make error handlers' do
plan = SpecPlan.new
name = ''
ex = ''
error_handler = lambda{ |n, e|
ex = e
name = n
}
plan.do! {
plan.step("step_a", error_handler){ raise 'BREAK' }
}
plan.disconnect!
expect(name).to eql :step_a
expect(ex.to_s).to eql 'BREAK'
end
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/unit/connection/mysql_spec.rb | spec/unit/connection/mysql_spec.rb | require 'spec_helper'
require 'zlib'
describe Forklift::Connection::Mysql do
describe "read/write utils" do
before(:each) do
SpecSeeds.setup_mysql
end
it "can read a list of tables" do
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect(source.tables).to include 'users'
expect(source.tables).to include 'products'
expect(source.tables).to include 'sales'
}
plan.disconnect!
end
it "can delete a table" do
plan = SpecPlan.new
table = "users"
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect(source.tables).to include 'users'
source.drop! table
expect(source.tables).to_not include 'users'
}
plan.disconnect!
end
it "can count the rows in a table" do
plan = SpecPlan.new
table = "users"
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect(source.count(table)).to eql 5
}
plan.disconnect!
end
it "can truncate a table (both with and without !)" do
plan = SpecPlan.new
table = "users"
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect(source.count(table)).to eql 5
source.truncate! table
expect(source.count(table)).to eql 0
expect { source.truncate(table) }.to_not raise_error
}
plan.disconnect!
end
it 'trunacte! will raise if the table does not exist' do
plan = SpecPlan.new
table = "other_table"
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
expect { source.truncate!(table) }.to raise_error(/Table 'forklift_test_source_a.other_table' doesn't exist/)
}
plan.disconnect!
end
it "can get the columns of a table" do
plan = SpecPlan.new
table = "sales"
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
columns = source.columns(table)
expect(columns).to include :id
expect(columns).to include :user_id
expect(columns).to include :product_id
expect(columns).to include :timestamp
}
plan.disconnect!
end
it "can create a mysqldump" do
dump = "/tmp/destination.sql.gz"
plan = SpecPlan.new
plan.do! {
source = plan.connections[:mysql][:forklift_test_source_a]
source.dump(dump)
}
plan.disconnect!
expect(File.exists?(dump)).to eql true
contents = Zlib::GzipReader.new(StringIO.new(File.read(dump))).read
expect(contents).to include "(1,'evan@example.com','Evan','T','2014-04-03 11:40:12','2014-04-03 11:39:28')"
end
end
describe "#safe_values" do
subject { described_class.new({}, {}) }
it "escapes one trailing backslash" do
columns = [:col]
values = {:col => "foo\\"}
expect(subject.send(:safe_values, columns, values)).to eq("(\"foo\\\\\")")
end
it "escapes two trailing backslashes" do
columns = [:col]
values = {:col => "foo\\\\" }
expect(subject.send(:safe_values, columns, values)).to eq("(\"foo\\\\\\\\\")")
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/spec/template/spec_user_transformation.rb | spec/template/spec_user_transformation.rb | class SpecUserTransformation
def do!(connection, forklift, args)
connection.q("ALTER TABLE `users` ADD `full_name` VARCHAR(255) NULL DEFAULT NULL AFTER `updated_at`;")
connection.q("UPDATE `users` SET full_name = CONCAT('#{args[:prefix]}', ' ', first_name, ' ', last_name);")
end
end | ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/example/plan.rb | example/plan.rb | # plan = Forklift::Plan.new
# Or, you can pass configs
plan = Forklift::Plan.new ({
# logger: {debug: true}
})
plan.do! {
# do! is a wrapper around common setup methods (pidfile locking, setting up the logger, etc)
# you don't need to use do! if you want finer control
# cleanup from a previous run
plan.step('Cleanup'){
destination = plan.connections[:mysql][:destination]
destination.exec("./transformations/cleanup.sql");
end
# mySQL -> mySQL
plan.step('Mysql Import'){
source = plan.connections[:mysql][:source]
destination = plan.connections[:mysql][:destination]
source.tables.each do |table|
Forklift::Patterns::Mysql.optimistic_pipe(source, table, destination, table)
# will attempt to do an incremental pipe, will fall back to a full table copy
# by default, incremental updates happen off of the `created_at` column, but you can modify this with "matcher"
end
}
# Elasticsearch -> mySQL
plan.step('Elasticsearch Import'){
source = plan.connections[:elasticsearch][:source]
destination = plan.connections[:mysql][:destination]
table = 'es_import'
index = 'aaa'
query = { query: { match_all: {} } } # pagination will happen automatically
destination.truncate!(table) if destination.tables.include? table
source.read(index, query) {|data| destination.write(data, table) }
}
# mySQL -> Elasticsearch
plan.step('Elasticsearch Load'){
source = plan.connections[:mysql][:source]
destination = plan.connections[:elasticsearch][:source]
table = 'users'
index = 'users'
query = "select * from users" # pagination will happen automatically
source.read(query) {|data| destination.write(data, table, true, 'user') }
}
# ... and you can write your own connections [LINK GOES HERE]
# Do some SQL transformations
plan.step('Transformations'){
# SQL transformations are done exactly as they are written
destination = plan.connections[:mysql][:destination]
destination.exec!("./transformations/combined_name.sql")
# Do some Ruby transformations
# Ruby transformations expect `do!(connection, forklift)` to be defined
destination = plan.connections[:mysql][:destination]
destination.exec!("./transformations/email_suffix.rb")
}
# mySQL Dump the destination
plan.step('Mysql Dump'){
destination = plan.connections[:mysql][:destination]
destination.dump('/tmp/destination.sql.gz')
}
# email the logs and a summary
plan.step('Email'){
destination = plan.connections[:mysql][:destination]
email_args = {
to: "YOU@FAKE.com",
from: "Forklift",
subject: "value", "Forklift has moved your database @ #{Time.new}",
}
email_variables = {
total_users_count: destination.read('select count(1) as "count" from users')[0][:count],
new_users_count: destination.read('select count(1) as "count" from users where date(created_at) = date(NOW())')[0][:count],
}
email_template = "./template/email.erb"
plan.mailer.send_template(email_args, email_template, email_variables, plan.logger.messages) unless ENV['EMAIL'] == 'false'
}
}
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/example/transformations/email_suffix.rb | example/transformations/email_suffix.rb | class EmailSuffix
def do!(connection, forklift)
forklift.logger.log "collecting email suffixes..."
suffixes = {}
connection.read("select email from users"){|data|
data.each do |row|
part = row[:email].split('@').last
suffixes[part] = 0 if suffixes[part].nil?
suffixes[part] = suffixes[part] + 1
end
}
suffixes.each do |suffix, count|
forklift.logger.log " > #{suffix}: #{count}" if count > 5
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift.rb | lib/forklift.rb | require 'forklift/version'
module Forklift
lib = File.join(File.expand_path(File.dirname(__FILE__)), 'forklift')
require "#{lib}/base/utils.rb"
require "#{lib}/base/pid.rb"
require "#{lib}/base/logger.rb"
require "#{lib}/base/mailer.rb"
require "#{lib}/base/connection.rb"
Dir["#{lib}/transports/*.rb"].each {|file| require file }
Dir["#{lib}/patterns/*.rb"].each {|file| require file }
Dir["#{Dir.pwd}/transports/*.rb"].each {|file| require file } if File.directory?("#{Dir.pwd}/transports")
Dir["#{Dir.pwd}/patterns/*.rb"].each {|file| require file } if File.directory?("#{Dir.pwd}/patterns")
require "#{lib}/plan.rb"
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/version.rb | lib/forklift/version.rb | module Forklift
VERSION = "2.0.0-alpha"
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/plan.rb | lib/forklift/plan.rb | require 'active_support/all'
module Forklift
class Plan
def initialize(config={})
@config = default_config.merge(config)
@utils = Forklift::Base::Utils.new
@pid = Forklift::Base::Pid.new(self)
@logger = Forklift::Base::Logger.new(self)
@mailer = Forklift::Base::Mailer.new(self)
@connections = {}
@steps = {}
end
def connections; @connections end
def steps; @steps end
def config; @config end
def logger; @logger end
def mailer; @mailer end
def utils; @utils end
def pid; @pid end
def connect!
files = Dir["#{config[:project_root]}/config/connections/**/*.yml"]
files.each do |f|
next if f.include?('example.yml')
name = f.split("/")[-1].split('.')[0]
type = f.split("/")[-2]
connections[type.to_sym] = {} if connections[type.to_sym].nil?
db_config = utils.load_yml(f)
begin
loader = "Forklift::Connection::#{type.camelcase}.new(db_config, self)"
connection = eval(loader)
connection.connect
connections[type.to_sym][name.to_sym] = connection
logger.debug "loaded a #{type.camelcase} connection from #{f}"
rescue Exception => e
logger.fatal "cannot create a class type of #{loader} from #{f} | #{e}"
# raise e ## Don't raise here, but let a step fail so the error_handler can report
end
end
end
def disconnect!
connections.each do |k, collection|
collection.each do |k, connection|
connection.disconnect
end
end
end
def default_error_handler
return lambda {|name, e| raise e }
end
def step(*args, &block)
name = args[0].to_sym
error_handler = default_error_handler
error_handler = args[1] unless args[1].nil?
self.steps[name] = {
ran: false,
to_run: false,
block: block,
error_handler: error_handler,
}
end
def do_step!(name)
name = name.to_sym
if self.steps[name].nil?
self.logger.log "[error] step `#{name}` not found"
else
step = self.steps[name]
if step[:ran] == true
self.logger.log "step `#{name}` already ran"
elsif step[:to_run] == false
self.logger.log "skipping step `#{name}`"
else
self.logger.log "*** step: #{name} ***"
begin
step[:block].call
step[:ran] = true
rescue Exception => e
step[:error_handler].call(name, e)
end
end
end
end
def argv
ARGV
end
def activate_steps
# all steps are run by default
# step names are passed as ARGV
# `forklift plan.rb` runs everything and `forklift plan.rb send_email` only sends the email
if argv.length < 2 || ENV['FORKLIFT_RUN_ALL_STEPS'] == 'true'
self.steps.each do |k,v|
self.steps[k][:to_run] = true
end
else
i = 1
while i < argv.length
name = argv[i].to_sym
unless self.steps[name].nil?
self.steps[name][:to_run] = true
else
self.logger.log "[error] step `#{name}` not found"
exit(1)
end
i = i + 1
end
end
end
def do!
# you can use `plan.logger.log` in your plan for logging
self.logger.log "Starting forklift"
# use a pidfile to ensure that only one instance of forklift is running at a time; store the file if OK
self.pid.safe_to_run?
self.pid.store!
# this will load all connections in /config/connections/#{type}/#{name}.yml into the plan.connections hash
# and build all the connection objects (and try to connect in some cases)
self.connect!
yield # your stuff here!
self.activate_steps
self.steps.each do |k, v|
do_step!(k)
end
# remove the pidfile
self.logger.log "Completed forklift"
self.pid.delete!
end
private
def default_config
return {
project_root: Dir.pwd,
batch_size: 1000,
char_bytecode_max: 65535, # the utf8 char limit
logger: {
stdout: true,
debug: false,
},
}
end
#/private
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/base/mailer.rb | lib/forklift/base/mailer.rb | require 'pony'
require 'erb'
require 'active_support/core_ext/hash/keys'
module Forklift
module Base
class Mailer
def initialize(forklift)
@forklift = forklift
end
# Public: Pull out the settings from config/email.yml.
#
# Returns a Hash with all symbolized keys.
def config
config_file = "#{forklift.config[:project_root]}/config/email.yml"
@config ||= forklift.utils.load_yml(config_file).deep_symbolize_keys
end
def forklift
@forklift
end
def message_defaults
{
from: "Forklift",
subject: "Forklift has moved your database @ #{Time.new}",
body: "Forklift has moved your database @ #{Time.new}",
}
end
def send_template(args, template_file, variables, attachment_lines=[])
renderer = ERB.new(File.read(template_file))
binder = ERBBinding.new(variables)
body = renderer.result(binder.get_binding)
args[:body] = body
send(args, attachment_lines)
end
def send(args, attachment_lines=[])
params = message_defaults
[:to, :from, :subject, :body].each do |i|
params[i] = args[i] unless args[i].nil?
end
if attachment_lines.length > 0
params[:attachments] = {"log.txt" => attachment_lines.join("\r\n")}
end
deliver(params)
end
private
# Private: Actually deliver the message using Pony.
#
# Returns the raw email from Pony.
def deliver(params)
forklift.logger.log("Sending email via #{config[:via]}")
if params[:html_body].nil?
params[:html_body] = params[:body]
params.delete(:body)
end
params[:via] = config[:via].to_sym
params[:via_options] = config[:via_options]
Pony.mail(params)
end
class ERBBinding
def initialize(hash)
hash.each do |k,v|
v = v.gsub("'", " ") if v.class == String
instance_variable_set("@#{k}", v)
end
end
def get_binding
return binding()
end
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/base/logger.rb | lib/forklift/base/logger.rb | require 'lumberjack'
module Forklift
module Base
class Logger
def initialize(forklift)
@forklift = forklift
end
def forklift
@forklift
end
def messages
@messages ||= []
end
def logger
log_dir = "#{forklift.config[:project_root]}/log"
@logger ||= ::Lumberjack::Logger.new("#{log_dir}/forklift.log", buffer_size: 0)
end
def log(message, severity="info")
timed_message = "[Forklift @ #{Time.now}] #{message}"
puts timed_message unless forklift.config[:logger][:stdout] != true
logger.send(severity.to_sym, message) unless logger.nil?
messages << timed_message
end
def debug(message)
if forklift.config[:logger][:debug] == true
log("[debug] #{message}")
end
end
def emphatically(message)
log "" if message.length > 0
log "*** #{message} ***"
log ""
end
def fatal(message)
log "!!! #{message} !!!"
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/base/utils.rb | lib/forklift/base/utils.rb | require 'yaml'
require 'erb'
module Forklift
module Base
class Utils
def load_yml(file)
YAML.load(ERB.new(File.read(file)).result)
end
def class_name_from_file(file)
klass = ""
words = file.split("/").last.split(".").first.split("_")
words.each do |word|
klass << word.capitalize
end
klass
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/base/connection.rb | lib/forklift/base/connection.rb | module Forklift
module Base
class Connection
attr_reader :config, :forklift, :client
def initialize(config, forklift)
@config = config
@forklift = forklift
end
def connect
# Will define @client
raise 'not implemented'
end
def disconnect
raise 'not implemented'
end
def read(query)
# will return an array of data rows
raise 'not implemented'
end
def write(data, collection)
# will write array data to collection (table)
raise 'not implemented'
end
def pipe
# when copying within the same connection, this method can be defined to speed things up
raise 'not implemented'
end
def exec(path, *args)
begin
exec!(path, &args)
rescue Exception => e
forklift.logger.log(e)
end
end
def exec!(path, *args)
forklift.logger.log "Running script: #{path}"
extension = path.split(".").last
if(extension == "rb" || extension == "ruby")
exec_ruby(path, *args)
else
exec_script(path, *args)
end
end
def exec_ruby(path, *args)
klass = forklift.utils.class_name_from_file(path)
require path
model = eval("#{klass}.new")
model.do!(self, forklift, *args)
end
def exec_script(path, *args)
raise 'not implemented'
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/base/pid.rb | lib/forklift/base/pid.rb | module Forklift
module Base
class Pid
def initialize(forklift)
@forklift = forklift
end
def forklift
@forklift
end
def pid_dir
"#{forklift.config[:project_root]}/pid"
end
def ensure_pid_dir
`mkdir -p #{pid_dir}`
end
def pidfile
"#{pid_dir}/pidfile"
end
def store!
forklift.logger.debug "Creating pidfile @ #{pidfile}"
ensure_pid_dir
File.open(pidfile, 'w') {|f| f << Process.pid}
end
def recall
ensure_pid_dir
IO.read(pidfile).to_i rescue nil
end
def delete!
forklift.logger.debug "Removing pidfile @ #{pidfile}"
FileUtils.rm(pidfile) rescue nil
end
def safe_to_run?
return if recall.nil?
count = `ps -p #{recall} | wc -l`.to_i
if count >= 2
forklift.logger.fatal "This application is already running (pidfile) #{recall}. Exiting now"
exit(1)
else
forklift.logger.log "Clearing old pidfile from previous process #{recall}"
delete!
end
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/patterns/elasticsearch_patterns.rb | lib/forklift/patterns/elasticsearch_patterns.rb | module Forklift
module Patterns
class Elasticsearch
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/patterns/mysql_patterns.rb | lib/forklift/patterns/mysql_patterns.rb | module Forklift
module Patterns
class Mysql
class<<self
# Moves rows from one table to another within a single database.
# This assumes that we can directly insert to the destination from
# the source. If you are moving data between separate MySQL servers
# take a look at {Forklift::Patterns::Mysql.mysql_import}.
#
# It's worth noting that the move happens by way of a working table
# that is cleared, filled and then renamed to the `to_table` after
# `to_table` is dropped.
#
# @param source [Forklift::Connection::Mysql] the source database
# connection
# @param from_table [String] the table name that has the rows you
# want to move
# @param destination [Forklift::Connection::Mysql] the destination
# database connection. This must be the same MySQL server as the
# source
# @param to_table [String] the table name where the new rows will
# be inserted in to
# @param options [Hash]
# @option options [String] :tmp_table ('_forklift_tmp') The working
# table name that ultimately replaces `to_table`
#
# @see .mysql_import
def pipe(source, from_table, destination, to_table, options={})
start = Time.new.to_i
from_db = source.current_database
to_db = destination.current_database
tmp_table = options[:tmp_table] || '_forklift_tmp'
source.forklift.logger.log("mysql pipe: `#{from_db}`.`#{from_table}` => `#{to_db}`.`#{to_table}`")
source.q("DROP TABLE IF EXISTS `#{to_db}`.`#{tmp_table}`")
source.q("CREATE TABLE `#{to_db}`.`#{tmp_table}` LIKE `#{from_db}`.`#{from_table}`")
source.q("INSERT INTO `#{to_db}`.`#{tmp_table}` SELECT * FROM `#{from_db}`.`#{from_table}`")
source.q("DROP TABLE IF EXISTS `#{to_db}`.`#{to_table}`")
source.q("RENAME TABLE `#{to_db}`.`#{tmp_table}` TO `#{to_db}`.`#{to_table}`")
delta = Time.new.to_i - start
source.forklift.logger.log(" ^ moved #{destination.count(to_table, to_db)} rows in #{delta}s")
end
# Pipe rows from one table to another within the same database
# (see .pipe). This is the incremental version of the {.pipe}
# pattern and will only move records whose `matcher` column is
# newer than the maximum in the destination table.
#
# @param (see .pipe)
# @option options [String] :matcher ('updated_at') The datetime
# column used to find the "newest" records in the `from_table`
# @option options [String] :primary_key ('id') The column to use
# to determine if the row should be updated or inserted. Updates
# are performed by deleting the old version of the row and
# reinserting the new, updated row.
#
# @see .mysql_incremental_import
# @see .pipe
def incremental_pipe(source, from_table, destination, to_table, options={})
start = Time.new.to_i
from_db = source.current_database
to_db = destination.current_database
matcher = options[:matcher] || source.default_matcher
primary_key = options[:primary_key] || :id
source.forklift.logger.log("mysql incremental_pipe: `#{from_db}`.`#{from_table}` => `#{to_db}`.`#{to_table}`")
source.q("CREATE TABLE IF NOT EXISTS `#{to_db}`.`#{to_table}` LIKE `#{from_db}`.`#{from_table}`")
# Count the number of rows in to_table
original_count = source.count(to_table, to_db)
# Find the latest/max/newest timestamp from the final table
# in order to determine the last copied row.
latest_timestamp = source.max_timestamp(to_table, matcher, to_db)
# If to_table has existing rows, ensure none of them are "stale."
# A stale row in to_table means a previously copied row was
# updated in from_table, so let's delete it from the to_table
# so we can get a fresh copy of that row.
if original_count > 0
# Get the ids of rows in from_table that are newer than the newest row in to_table.
# Some of these rows could either be a) stale or b) new.
source.read("SELECT `#{primary_key}` FROM `#{from_db}`.`#{from_table}` WHERE `#{matcher}` > \"#{latest_timestamp}\" ORDER BY `#{matcher}`") do |stale_rows|
if stale_rows.length > 0
# Delete these ids from to_table.
# If the ids are stale, then they'll be deleted. If they're new, they won't exist, and nothing will happen.
stale_ids = stale_rows.map { |row| row[primary_key] }.join(',')
source.q("DELETE FROM `#{to_db}`.`#{to_table}` WHERE `#{primary_key}` IN (#{stale_ids})")
source.forklift.logger.log(" ^ deleted up to #{stale_rows.length} stale rows from `#{to_db}`.`#{to_table}`")
end
end
end
# Do the insert into to_table
destination.q("INSERT INTO `#{to_db}`.`#{to_table}` SELECT * FROM `#{from_db}`.`#{from_table}` WHERE `#{matcher}` > \"#{latest_timestamp.to_s(:db)}\" ORDER BY `#{matcher}`")
delta = Time.new.to_i - start
new_count = destination.count(to_table, to_db) - original_count
source.forklift.logger.log(" ^ created #{new_count} new rows in #{delta}s")
end
# Attempt an {.incremental_pipe} and fall back to a {.pipe} if unable
# to run incrementally.
#
# @param (see .pipe)
# @option (see .pipe)
# @option (see .incremental_pipe)
#
# @see .pipe
# @see .incremental_pipe
def optimistic_pipe(source, from_table, destination, to_table, options={})
from_db = source.current_database
to_db = destination.current_database
if self.can_incremental_pipe?(source, from_table, destination, to_table, options)
begin
incremental_pipe(source, from_table, destination, to_table, options)
rescue Exception => e
source.forklift.logger.log("! incremental_pipe failure on #{from_table} => #{to_table}: #{e} ")
source.forklift.logger.log("! falling back to pipe...")
pipe(source, from_table, destination, to_table)
end
else
pipe(source, from_table, destination, to_table, options)
end
end
# Attempt a {.mysql_incremental_import} and fall back to {.mysql_import}
#
# @param (see .mysql_import)
# @option (see .mysql_import)
# @option (see .mysql_incremental_import)
#
# @see .mysql_import
# @see .mysql_incremental_import
def mysql_optimistic_import(source, from_table, destination, to_table, options={})
if self.can_incremental_import?(source, from_table, destination, to_table, options)
begin
self.mysql_incremental_import(source, from_table, destination, to_table, options)
rescue Exception => e
source.forklift.logger.log("! incremental import failure on #{from_table} => #{to_table}: #{e} ")
source.forklift.logger.log("! falling back to import...")
self.mysql_import(source, from_table, destination, to_table, options)
end
else
self.mysql_import(source, from_table, destination, to_table, options)
end
end
def detect_primary_key_or_default(source, from_table)
source.q("SHOW INDEX FROM `#{source.current_database}`.`#{from_table}` WHERE key_name = 'PRIMARY';").try(:first).try(:[], :Column_name).try(:to_sym) || :id
end
# Import table from one mysql instance to another incrementally.
#
# @param (see .mysql_import
# @option options [String] :matcher ('updated_at') The datetime
# column used to find the "newest" records in the `from_table`
#
# @see .mysql_import
# @see .incremental_pipe
def mysql_incremental_import(source, from_table, destination, to_table, options={})
matcher = options[:matcher] || source.default_matcher
primary_key = detect_primary_key_or_default(source, from_table)
since = destination.max_timestamp(to_table, matcher)
source.read_since(from_table, since, matcher){ |data| destination.write(data, to_table, true, destination.current_database, primary_key) }
end
# Pull a table from the `source` database in to the `destination` database.
# This is an upoptimized version of {.pipe}. Unlike {.pipe} this method can
# pull records from one mysql instance in to another. The `to_table` at the
# `destination` database will get a `DROP` if it exists.
#
# @param (see .pipe)
#
# @return
#
# @see .pipe
def mysql_import(source, from_table, destination, to_table, options={})
primary_key = detect_primary_key_or_default(source, from_table)
# destination.truncate table
destination.drop! to_table if destination.tables.include?(to_table)
source.read("SELECT * FROM #{from_table}"){ |data| destination.write(data, to_table, true, destination.current_database, primary_key) }
end
# The high water method will stub a row in all tables with a `default_matcher` column prentending to have a record from `time`
# This enabled partial forklift funs which will only extract data "later than X"
#
# @todo assumes all columns have a default NULL setting
def write_high_water_mark(db, time, matcher=db.default_matcher)
db.tables.each do |table|
columns, types = db.columns(table, db.current_database, true)
if columns.include?(matcher)
row = {}
i = 0
while( i < columns.length )
if(columns[i] == matcher)
row[columns[i]] = time.to_s(:db)
elsif( types[i] =~ /text/ )
row[columns[i]] = "~~stub~~"
elsif( types[i] =~ /varchar/ )
row[columns[i]] = "~~stub~~".to_sym
elsif( types[i] =~ /float/ || types[i] =~ /int/ || types[i] =~ /decimal/ )
row[columns[i]] = 0
elsif( types[i] =~ /datetime/ || types[i] =~ /timestamp/ )
row[columns[i]] = time.to_s(:db)
elsif( types[i] =~ /date/ )
row[columns[i]] = time.to_s(:db).split(" ").first
else
row[columns[i]] = "NULL"
end
i = i + 1
end
db.write([row], table)
end
end
end
# Tests if a particular pipe parameterization can be performed incrementally
#
# @param (see .incremental_pipe)
#
# @return [true|false]
def can_incremental_pipe?(source, from_table, destination, to_table, options={})
matcher = options[:matcher] || source.default_matcher
return false unless source.tables.include?(from_table)
return false unless destination.tables.include?(to_table)
source_cols = source.columns(from_table, source.current_database)
destination_cols = destination.columns(to_table, destination.current_database)
return false unless source_cols.include?(matcher)
return false unless destination_cols.include?(matcher)
source_cols.each do |source_col|
return false unless destination_cols.include?(source_col)
end
destination_cols.each do |destination_col|
return false unless source_cols.include?(destination_col)
end
true
end
# Tests if a particular import parameterization can be performed incrementally
#
# @param (see .mysql_incremental_import)
#
# @return [true|false]
def can_incremental_import?(source, from_table, destination, to_table, options={})
matcher = options[:matcher] || source.default_matcher
source.columns(from_table).include?(matcher) && destination.tables.include?(to_table) && destination.columns(to_table).include?(matcher)
end
end
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/transports/mysql.rb | lib/forklift/transports/mysql.rb | require 'mysql2'
require 'open3'
module Forklift
module Connection
class Mysql < Forklift::Base::Connection
def connect
@client = Mysql2::Client.new(config)
q("USE `#{config[:database]}`")
end
def disconnect
@client.close
end
def default_matcher
:updated_at
end
def drop!(table, database=current_database)
q("DROP table `#{database}`.`#{table}`");
end
def rename(table, new_table, database=current_database, new_database=current_database)
q("RENAME TABLE `#{database}`.`#{table}` TO `#{new_database}`.`#{new_table}`")
end
def read(query, database=current_database, looping=true, limit=forklift.config[:batch_size], offset=0)
loop_count = 0
# TODO: Detect limit/offset already present in query
while ( looping == true || loop_count == 0 )
data = []
prepared_query = query
if prepared_query.downcase.include?("select") && !prepared_query.downcase.include?("limit")
prepared_query = "#{prepared_query} LIMIT #{offset}, #{limit}"
end
response = q(prepared_query)
response.each do |row|
data << row
end
if block_given?
yield data
else
return data
end
offset = offset + limit
looping = false if data.length == 0
loop_count = loop_count + 1
end
end
def write(rows, table, to_update=true, database=current_database, primary_key=:id, lazy=true, crash_on_extral_col=false)
if tables.include? table
ensure_row_types(rows, table, database)
elsif(lazy == true && rows.length > 0)
lazy_table_create(table, rows, database, primary_key)
end
if rows.length > 0
columns = columns(table, database)
rows.each do |row|
if crash_on_extral_col == false
row.each do |column, value|
unless columns.include?(column)
q("ALTER TABLE `#{database}`.`#{table}` ADD `#{column}` #{sql_type(value)} NULL DEFAULT NULL;")
columns = columns(table, database)
end
end
end
end
insert_values = []
delete_keys = []
rows.map do |row|
delete_keys << row[primary_key] if to_update && row[primary_key].present?
insert_values << safe_values(columns, row)
end
unless delete_keys.empty?
q(%{DELETE FROM `#{database}`.`#{table}` WHERE `#{primary_key}` IN (#{delete_keys.join(',')})})
end
begin
q(%{INSERT INTO `#{database}`.`#{table}` (#{safe_columns(columns)}) VALUES #{insert_values.join(',')}})
rescue Mysql2::Error => ex
# UTF8 Safety. Open a PR if you don't want UTF8 data...
# https://github.com/taskrabbit/demoji
raise ex unless ex.message.match /Incorrect string value:/
safer_insert_q = ""
for i in (0...insert_q.length)
char = insert_q[i]
char = '???' if char.ord > forklift.config[:char_bytecode_max]
safer_insert_q << char
end
q(safer_insert_q)
end
forklift.logger.log "wrote #{rows.length} rows to `#{database}`.`#{table}`"
end
end
def lazy_table_create(table, data, database=current_database, primary_key=:id, matcher=default_matcher)
keys = {}
data.each do |item|
item.each do |k,v|
keys[k] = sql_type(v) if (keys[k].nil? || keys[k] == sql_type(nil))
end
end
keys[primary_key] = 'bigint(20)' unless keys.has_key?(primary_key)
col_defn = keys.map do |col, type|
if col == primary_key
"`#{col}` #{type} NOT NULL AUTO_INCREMENT"
else
"`#{col}` #{type} DEFAULT NULL"
end
end
col_defn << "PRIMARY KEY (`#{primary_key}`)"
col_defn << "KEY `#{matcher}` (`#{matcher}`)" if keys.include?(matcher)
command = <<-EOS
CREATE TABLE `#{database}`.`#{table}` (
#{col_defn.join(', ')}
)
EOS
q(command)
forklift.logger.log "lazy-created table `#{database}`.`#{table}`"
end
def sql_type(v)
return "bigint(20)" if v.class == Fixnum
return "float" if v.class == Float
return "float" if v.class == BigDecimal
return "date" if v.class == Date
return "datetime" if v.class == Time
return "datetime" if v.class == DateTime
return "varchar(255)" if v.class == Symbol
return "tinyint(1)" if v.class == TrueClass
return "tinyint(1)" if v.class == FalseClass
return "text" if v.class == String
return "varchar(0)" if v.class == NilClass
return "text" # catchall
end
def read_since(table, since, matcher=default_matcher, database=current_database, limit=forklift.config[:batch_size])
query = "SELECT * FROM `#{database}`.`#{table}` WHERE `#{matcher}` >= '#{since.to_s(:db)}' ORDER BY `#{matcher}` ASC"
self.read(query, database, true, limit){|data|
if block_given?
yield data
else
return data
end
}
end
def max_timestamp(table, matcher=default_matcher, database=current_database)
return Time.at(0) unless tables.include?(table)
last_copied_row = read("SELECT MAX(`#{matcher}`) AS \"#{matcher}\" FROM `#{database}`.`#{table}`")[0]
if ( last_copied_row.nil? || last_copied_row[matcher].nil? )
Time.at(0)
else
last_copied_row[matcher]
end
end
def tables
t = []
client.query("show tables").each do |row|
t << row.values[0]
end
t
end
def current_database
@_current_database ||= q("SELECT DATABASE() AS 'db'").first[:db]
end
def count(table, database=current_database)
q("SELECT COUNT(1) AS \"count\" FROM `#{database}`.`#{table}`").first[:count]
end
def truncate!(table, database=current_database)
q("TRUNCATE TABLE `#{database}`.`#{table}`")
end
def truncate(table, database=current_database)
begin
self.truncate!(table, database=current_database)
rescue Exception => e
forklift.logger.debug e
end
end
def columns(table, database=current_database, return_types=false)
cols = []
types = []
read("DESCRIBE `#{database}`.`#{table}`").each do |row|
cols << row[:Field].to_sym
types << row[:Type]
end
return cols if return_types == false
return cols, types
end
def dump(file, options=[])
# example options:
# options.push '--max_allowed_packet=512M'
# options.push '--set-gtid-purged=OFF'
cmd = "mysqldump"
cmd << " -u#{config[:username]}" unless config[:username].nil?
cmd << " -p#{config[:password]}" unless config[:password].nil?
options.each do |o|
cmd << " #{o} "
end
cmd << " #{config[:database]}"
cmd << " | gzip > #{file}"
forklift.logger.log "Dumping #{config['database']} to #{file}"
forklift.logger.debug cmd
stdin, stdout, stderr = Open3.popen3(cmd)
stdout = stdout.readlines
stderr = stderr.readlines
if stderr.length > 0
raise " > Dump error: #{stderr.join(" ")}"
else
forklift.logger.log " > Dump complete"
end
end
def exec_script(path)
body = File.read(path)
delim = ';'
body.split(/^(delimiter\s+.*)$/i).each do |section|
if section =~ /^delimiter/i
delim = section[/^delimiter\s+(.+)$/i,1]
next
end
lines = section.split(delim)
lines.each do |line|
line.strip!
q(line) if line.length > 0
end
end
end
def q(query, options={})
forklift.logger.debug "\tSQL[#{config[:database]}]: #{query}"
return client.query(query, {symbolize_keys: true}.merge(options))
end
private
def ensure_row_types(data, table, database=current_database)
read("describe `#{database}`.`#{table}`").each do |row|
if row[:Type] == 'varchar(0)'
value = nil
data.each do |r|
if ( !r[row[:Field].to_sym].nil? )
value = r[row[:Field].to_sym]
break
end
end
if !value.nil?
sql_type = sql_type(value)
alter_sql = "ALTER TABLE `#{database}`.`#{table}` CHANGE `#{row[:Field]}` `#{row[:Field]}` #{sql_type};"
forklift.logger.log alter_sql
q(alter_sql)
end
end
end
end
def safe_columns(cols)
a = []
cols.each do |c|
a << "`#{c}`"
end
return a.join(', ')
end
def safe_values(columns, row)
"(" + columns.map do |column|
v = row[column]
case v
when String, Symbol then %{"#{Mysql2::Client.escape(v.to_s)}"}
when Date, Time, DateTime then %{"#{v.to_s(:db)}"}
when Fixnum then v
when Float, BigDecimal then v.to_f
else 'NULL'
end
end.compact.join(', ') + ")"
end
#/private
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/transports/csv.rb | lib/forklift/transports/csv.rb | require 'csv'
require 'fileutils'
module Forklift
module Connection
class Csv < Forklift::Base::Connection
def connect; end
def disconnect; end
def read(size=forklift.config[:batch_size])
data = []
CSV.foreach(config[:file], headers: true, converters: :all) do |row|
data << row.to_hash.symbolize_keys
if(data.length == size)
if block_given?
yield data
data = []
else
return data
end
end
end
if block_given?
yield data
else
return data
end
end
def write(data, append=true)
if (append == false)
FileUtils.rm(config[:file], {force: true})
end
if( !File.exists?(config[:file]) )
keys = data.first.keys
row = {}
keys.each do |k|
row[k] = k
end
data = [row] + data
end
CSV.open(config[:file],'a') do |file|
data.each do |row|
file << row.values
end
end
end
private
#/private
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/lib/forklift/transports/elasticsearch.rb | lib/forklift/transports/elasticsearch.rb | require 'elasticsearch'
module Forklift
module Connection
class Elasticsearch < Forklift::Base::Connection
def connect
@client = ::Elasticsearch::Client.new(config)
end
def disconnect
@client = nil
end
def read(index, query, looping=true, from=0, size=forklift.config[:batch_size])
offset = 0
loop_count = 0
while (looping == true || loop_count == 0)
data = []
prepared_query = query
prepared_query[:from] = from + offset
prepared_query[:size] = size
forklift.logger.debug " ELASTICSEARCH: #{query.to_json}"
results = client.search( { index: index, body: prepared_query } )
results["hits"]["hits"].each do |hit|
data << hit["_source"]
end
data.map{|l| l.symbolize_keys! }
if block_given?
yield data
else
return data
end
looping = false if results["hits"]["hits"].length == 0
offset = offset + size
loop_count = loop_count + 1
end
end
def write(data, index, update=false, type='forklift', primary_key=:id)
data.map{|l| l.symbolize_keys! }
data.each do |d|
object = {
index: index,
body: d,
type: type,
}
object[:id] = d[primary_key] if ( !d[primary_key].nil? && update == true )
forklift.logger.debug " ELASTICSEARCH (store): #{object.to_json}"
client.index object
end
client.indices.refresh({ index: index })
end
def delete_index(index)
forklift.logger.debug " ELASTICSEARCH (delete index): #{index}"
client.indices.delete({ index: index }) if client.indices.exists({ index: index })
end
private
#/private
end
end
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
taskrabbit/forklift | https://github.com/taskrabbit/forklift/blob/e4261da656cb0af77cea37deb07502e24bb1abe2/template/plan.rb | template/plan.rb | # plan = Forklift::Plan.new
# Or, you can pass configs
plan = Forklift::Plan.new({
# logger: {debug: true}
})
plan.do! do
# Your plan here.
end
| ruby | Apache-2.0 | e4261da656cb0af77cea37deb07502e24bb1abe2 | 2026-01-04T17:45:51.809866Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/app/controllers/healthcheck/healthchecks_controller.rb | app/controllers/healthcheck/healthchecks_controller.rb | # frozen_string_literal: true
require 'action_controller/railtie'
module Healthcheck
class HealthchecksController < ActionController::Base
def check
return Healthcheck.custom!(self) if Healthcheck.custom?
checker = Healthcheck.check
response = if checker.errored?
Healthcheck::Response::Error.new(self, checker)
else
Healthcheck::Response::Success.new(self, checker)
end
response.execute!
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck_spec.rb | spec/healthcheck_spec.rb | # frozen_string_literal: true
RSpec.describe Healthcheck, type: :module do
it { expect(described_class::VERSION).not_to be nil }
describe '.configure' do
let(:name) { :zero_division }
let(:block) { -> { 100 / 0 } }
subject do
described_class.configure do |config|
config.success = 200
config.error = 503
config.verbose = false
config.route = '/healthcheck'
config.method = :get
config.add_check(name, block)
end
end
before { subject }
it { expect(described_class.configuration.success).to eq(200) }
it { expect(described_class.configuration.error).to eq(503) }
it { expect(described_class.configuration.verbose).to eq(false) }
it { expect(described_class.configuration.route).to eq('/healthcheck') }
it { expect(described_class.configuration.method).to eq(:get) }
it { expect(described_class.configuration.checks.first).to be_a(Healthcheck::Check) }
end
describe '.configuration' do
subject { described_class.configuration }
it { is_expected.to be_a(described_class::Configuration) }
end
describe '.routes' do
subject { described_class.routes(nil) }
after { subject }
it { expect(Healthcheck::Router).to receive(:mount).once }
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/spec_helper.rb | spec/spec_helper.rb | # frozen_string_literal: true
require 'bundler/setup'
require 'support/configs/simple_cov_config'
SimpleCovConfig.configure
require 'healthcheck'
require './app/controllers/healthcheck/healthchecks_controller'
require File.expand_path('../spec/dummy/config/environment.rb', __dir__)
ENV['RAILS_ROOT'] ||= File.dirname(__FILE__) + '../../../spec/dummy'
require 'rspec/rails'
require 'timecop'
RSpec.configure do |config|
config.disable_monkey_patching!
config.expect_with :rspec do |c|
c.syntax = :expect
end
config.before { Healthcheck.configuration.clear! }
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/support/configs/simple_cov_config.rb | spec/support/configs/simple_cov_config.rb | # frozen_string_literal: true
require 'simplecov'
require 'simplecov-console'
module SimpleCovConfig
def self.configure
SimpleCov.formatter = SimpleCov::Formatter::Console
SimpleCov.minimum_coverage 100
SimpleCov.start do
add_filter { |source_file| cover?(source_file.lines) }
end
end
def self.cover?(lines)
!lines.detect { |line| line.src.match?(/(def |attributes)/) }
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/app/controllers/healthcheck/healthchecks_controller_spec.rb | spec/app/controllers/healthcheck/healthchecks_controller_spec.rb | # frozen_string_literal: true
RSpec.describe Healthcheck::HealthchecksController, type: :model do
describe '#check' do
let(:controller) { described_class.new }
let(:verbose) { false }
subject { controller.check }
before do
Healthcheck.configure do |config|
config.success = 200
config.error = 503
config.verbose = verbose
config.add_check :zero_division, -> { 100 / 0 }
end
end
after { subject }
context 'when check with success' do
before { allow_any_instance_of(Healthcheck::Check).to receive(:execute!) }
context 'without verbose setting' do
it 'returns success code' do
expect(controller).to receive(:head).with(Healthcheck.configuration.success).once
end
end
context 'with verbose setting' do
let(:verbose) { true }
it 'returns verbose response' do
expect(controller)
.to receive(:render)
.with(
status: Healthcheck.configuration.success,
json: {
code: Healthcheck.configuration.success,
status: { zero_division: 'OK' }
}
)
.once
end
end
end
context 'when check without success' do
context 'without verbose setting' do
it { expect(controller).to receive(:head).with(Healthcheck.configuration.error).once }
end
context 'with verbose setting' do
let(:verbose) { true }
it 'returns verbose response' do
expect(controller)
.to receive(:render)
.with(
json: {
code: Healthcheck.configuration.error,
errors: [
{
'exception' => 'ZeroDivisionError',
'message' => 'divided by 0',
'name' => 'zero_division'
}
]
},
status: Healthcheck.configuration.error
)
.once
end
end
end
context 'with custom' do
it do
Healthcheck.configure do |config|
config.custom = lambda { |controller, checker|
controller.head :ok if checker.success?
}
end
expect(Healthcheck.configuration.custom).to receive(:call).once
subject
Healthcheck.configure do |config|
config.custom = nil
end
end
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/requests/healthcheck_spec.rb | spec/requests/healthcheck_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Healthcheck', type: :request do
describe '/healthcheck' do
subject { get '/healthcheck' }
before do
Healthcheck.configuration.add_check :year_check, -> { raise StandardError, 'year' if Time.current.year == 1969 }
Healthcheck.configuration.add_check :sum_check, -> { [1, 2, 3].sum == 6 }
end
context 'all ok' do
before { subject }
it { expect(response.code) == Healthcheck.configuration.success }
it { expect(response.body).to eq('') }
end
context 'with error in one check' do
context 'verbose true' do
before do
Healthcheck.configuration.verbose = true
Timecop.freeze(Time.parse('19690101Z')) { subject }
end
it { expect(response.code) == Healthcheck.configuration.error }
it { expect(response.body).not_to eq('') }
it do
expect(JSON.parse(response.body)).to eq(
'code' => Healthcheck.configuration.error,
'errors' => [
{
'exception' => 'StandardError',
'message' => 'year',
'name' => 'year_check'
}
]
)
end
end
context 'verbose false' do
before do
Healthcheck.configuration.verbose = false
Timecop.freeze(Time.parse('19690101')) { subject }
end
it { expect(response.code) == Healthcheck.configuration.error }
it { expect(response.body).to eq('') }
end
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/app/jobs/application_job.rb | spec/dummy/app/jobs/application_job.rb | # frozen_string_literal: true
class ApplicationJob < ActiveJob::Base
# Automatically retry jobs that encountered a deadlock
# retry_on ActiveRecord::Deadlocked
# Most jobs are safe to ignore if the underlying records are no longer available
# discard_on ActiveJob::DeserializationError
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/app/controllers/application_controller.rb | spec/dummy/app/controllers/application_controller.rb | # frozen_string_literal: true
class ApplicationController < ActionController::API
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/application.rb | spec/dummy/config/application.rb | # frozen_string_literal: true
require_relative 'boot'
require 'rails'
# Pick the frameworks you want:
# require 'active_model/railtie'
# require 'active_job/railtie'
# require "active_record/railtie"
# require "active_storage/engine"
require 'action_controller/railtie'
# require "action_mailer/railtie"
# require "action_mailbox/engine"
# require "action_text/engine"
require 'action_view/railtie'
# require "action_cable/engine"
# require "sprockets/railtie"
# require "rails/test_unit/railtie"
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
require 'rails-healthcheck'
module Dummy
class Application < Rails::Application
# Initialize configuration defaults for originally generated Rails version.
config.load_defaults 6.0
# Settings in config/environments/* take precedence over those specified here.
# Application configuration can go into files in config/initializers
# -- all .rb files in that directory are automatically loaded after loading
# the framework and any gems in your application.
# Only loads a smaller set of middleware suitable for API only apps.
# Middleware like session, flash, cookies can be added back manually.
# Skip views, helpers and assets when generating a new resource.
config.api_only = true
config.hosts.clear
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/environment.rb | spec/dummy/config/environment.rb | # frozen_string_literal: true
# Load the Rails application.
require_relative 'application'
# Initialize the Rails application.
Rails.application.initialize!
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/puma.rb | spec/dummy/config/puma.rb | # frozen_string_literal: true
# Puma can serve each request in a thread from an internal thread pool.
# The `threads` method setting takes two numbers: a minimum and maximum.
# Any libraries that use thread pools should be configured to match
# the maximum value specified for Puma. Default is set to 5 threads for minimum
# and maximum; this matches the default thread size of Active Record.
#
max_threads_count = ENV.fetch('RAILS_MAX_THREADS') { 5 }
min_threads_count = ENV.fetch('RAILS_MIN_THREADS') { max_threads_count }
threads min_threads_count, max_threads_count
# Specifies the `port` that Puma will listen on to receive requests; default is 3000.
#
port ENV.fetch('PORT') { 3000 }
# Specifies the `environment` that Puma will run in.
#
environment ENV.fetch('RAILS_ENV') { 'development' }
# Specifies the `pidfile` that Puma will use.
pidfile ENV.fetch('PIDFILE') { 'tmp/pids/server.pid' }
# Specifies the number of `workers` to boot in clustered mode.
# Workers are forked web server processes. If using threads and workers together
# the concurrency of the application would be max `threads` * `workers`.
# Workers do not work on JRuby or Windows (both of which do not support
# processes).
#
# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
# Use the `preload_app!` method when specifying a `workers` number.
# This directive tells Puma to first boot the application and load code
# before forking the application. This takes advantage of Copy On Write
# process behavior so workers use less memory.
#
# preload_app!
# Allow puma to be restarted by `rails restart` command.
plugin :tmp_restart
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/routes.rb | spec/dummy/config/routes.rb | # frozen_string_literal: true
Rails.application.routes.draw do
Healthcheck.routes(self)
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/spring.rb | spec/dummy/config/spring.rb | # frozen_string_literal: true
Spring.watch(
'.ruby-version',
'.rbenv-vars',
'tmp/restart.txt',
'tmp/caching-dev.txt'
)
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/boot.rb | spec/dummy/config/boot.rb | # frozen_string_literal: true
ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)
require 'bundler/setup' # Set up gems listed in the Gemfile.
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/filter_parameter_logging.rb | spec/dummy/config/initializers/filter_parameter_logging.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# Configure sensitive parameters which will be filtered from the log file.
Rails.application.config.filter_parameters += [:password]
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/application_controller_renderer.rb | spec/dummy/config/initializers/application_controller_renderer.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# ActiveSupport::Reloader.to_prepare do
# ApplicationController.renderer.defaults.merge!(
# http_host: 'example.org',
# https: false
# )
# end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/healthcheck.rb | spec/dummy/config/initializers/healthcheck.rb | # frozen_string_literal: true
Healthcheck.configure do |config|
config.success = 200
config.error = 503
config.verbose = false
config.route = '/healthcheck'
config.method = :get
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/wrap_parameters.rb | spec/dummy/config/initializers/wrap_parameters.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# This file contains settings for ActionController::ParamsWrapper which
# is enabled by default.
# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.
ActiveSupport.on_load(:action_controller) do
wrap_parameters format: [:json]
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/inflections.rb | spec/dummy/config/initializers/inflections.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# Add new inflection rules using the following format. Inflections
# are locale specific, and you may define rules for as many different
# locales as you wish. All of these examples are active by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.plural /^(ox)$/i, '\1en'
# inflect.singular /^(ox)en/i, '\1'
# inflect.irregular 'person', 'people'
# inflect.uncountable %w( fish sheep )
# end
# These inflection rules are supported but not enabled by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.acronym 'RESTful'
# end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/backtrace_silencers.rb | spec/dummy/config/initializers/backtrace_silencers.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
# Rails.backtrace_cleaner.remove_silencers!
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/mime_types.rb | spec/dummy/config/initializers/mime_types.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# Add new mime types for use in respond_to blocks:
# Mime::Type.register "text/richtext", :rtf
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/initializers/cors.rb | spec/dummy/config/initializers/cors.rb | # frozen_string_literal: true
# Be sure to restart your server when you modify this file.
# Avoid CORS issues when API is called from the frontend app.
# Handle Cross-Origin Resource Sharing (CORS) in order to accept cross-origin AJAX requests.
# Read more: https://github.com/cyu/rack-cors
# Rails.application.config.middleware.insert_before 0, Rack::Cors do
# allow do
# origins 'example.com'
#
# resource '*',
# headers: :any,
# methods: [:get, :post, :put, :patch, :delete, :options, :head]
# end
# end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/environments/test.rb | spec/dummy/config/environments/test.rb | # frozen_string_literal: true
# The test environment is used exclusively to run your application's
# test suite. You never need to work with it otherwise. Remember that
# your test database is "scratch space" for the test suite and is wiped
# and recreated between test runs. Don't rely on the data there!
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
config.cache_classes = false
# Do not eager load code on boot. This avoids loading your whole application
# just for the purpose of running a single test. If you are using a tool that
# preloads Rails for running tests, you may have to set it to true.
config.eager_load = false
# Configure public file server for tests with Cache-Control for performance.
config.public_file_server.enabled = true
config.public_file_server.headers = {
'Cache-Control' => "public, max-age=#{1.hour.to_i}"
}
# Show full error reports and disable caching.
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
config.cache_store = :null_store
# Raise exceptions instead of rendering exception templates.
config.action_dispatch.show_exceptions = false
# Disable request forgery protection in test environment.
config.action_controller.allow_forgery_protection = false
# Print deprecation notices to the stderr.
config.active_support.deprecation = :stderr
# Raises error for missing translations.
# config.action_view.raise_on_missing_translations = true
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/environments/development.rb | spec/dummy/config/environments/development.rb | # frozen_string_literal: true
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded on
# every request. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports.
config.consider_all_requests_local = true
# Enable/disable caching. By default caching is disabled.
# Run rails dev:cache to toggle caching.
if Rails.root.join('tmp', 'caching-dev.txt').exist?
config.cache_store = :memory_store
config.public_file_server.headers = {
'Cache-Control' => "public, max-age=#{2.days.to_i}"
}
else
config.action_controller.perform_caching = false
config.cache_store = :null_store
end
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raises error for missing translations.
# config.action_view.raise_on_missing_translations = true
# Use an evented file watcher to asynchronously detect changes in source code,
# routes, locales, etc. This feature depends on the listen gem.
# config.file_watcher = ActiveSupport::EventedFileUpdateChecker
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/dummy/config/environments/production.rb | spec/dummy/config/environments/production.rb | # frozen_string_literal: true
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
# Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"]
# or in config/master.key. This key is used to decrypt credentials (and other encrypted files).
# config.require_master_key = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV['RAILS_SERVE_STATIC_FILES'].present?
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.action_controller.asset_host = 'http://assets.example.com'
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache
# config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Use the lowest log level to ensure availability of diagnostic information
# when problems arise.
config.log_level = :debug
# Prepend all log lines with the following tags.
config.log_tags = [:request_id]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment).
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "dummy_production"
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Send deprecation notices to registered listeners.
config.active_support.deprecation = :notify
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require 'syslog/logger'
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new 'app-name')
if ENV['RAILS_LOG_TO_STDOUT'].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Inserts middleware to perform automatic connection switching.
# The `database_selector` hash is used to pass options to the DatabaseSelector
# middleware. The `delay` is used to determine how long to wait after a write
# to send a subsequent read to the primary.
#
# The `database_resolver` class is used by the middleware to determine which
# database is appropriate to use based on the time delay.
#
# The `database_resolver_context` class is used by the middleware to set
# timestamps for the last write to the primary. The resolver uses the context
# class timestamps to determine how long to wait before reading from the
# replica.
#
# By default Rails will store a last write timestamp in the session. The
# DatabaseSelector middleware is designed as such you can define your own
# strategy for connection switching and pass that into the middleware through
# these configuration options.
# config.active_record.database_selector = { delay: 2.seconds }
# config.active_record.database_resolver = ActiveRecord::Middleware::DatabaseSelector::Resolver
# config.active_record.database_resolver_context = ActiveRecord::Middleware::DatabaseSelector::Resolver::Session
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck/check_spec.rb | spec/healthcheck/check_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Healthcheck::Check, type: :model do
let(:name) { :zero_division }
let(:block) { -> { 100 / 0 } }
subject { described_class.new(name, block) }
it { is_expected.to be_a(described_class) }
it { expect(subject.name).to eq(name) }
it { expect(subject.block).to eq(block) }
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck/router_spec.rb | spec/healthcheck/router_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Healthcheck::Router, type: :model do
describe '#mount' do
let(:router) { double('Router') }
subject { described_class.mount(router) }
before do
Healthcheck.configure do |config|
config.method = :get
config.route = '/healthcheck'
end
end
after { subject }
it do
expect(router).to receive(:send).with(
Healthcheck.configuration.method,
Healthcheck.configuration.route => 'healthcheck/healthchecks#check'
)
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck/configuration_spec.rb | spec/healthcheck/configuration_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Healthcheck::Configuration, type: :model do
describe '#initialize' do
subject { described_class.new }
it { is_expected.to be_a(described_class) }
it { expect(subject.success).to be_nil }
it { expect(subject.error).to be_nil }
it { expect(subject.verbose).to be_nil }
it { expect(subject.route).to be_nil }
it { expect(subject.method).to be_nil }
it { expect(subject.custom).to be_nil }
it { expect(subject.checks).to be_empty }
end
describe '#add_check' do
let(:name) { :zero_division }
let(:block) { -> { 100 / 0 } }
let(:instance) { described_class.new }
subject { instance.add_check(name, block) }
before { subject }
it { expect(instance.checks).not_to be_empty }
it { expect(instance.checks.length).to eq(1) }
it { expect(instance.checks.first).to be_a(Healthcheck::Check) }
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck/error_spec.rb | spec/healthcheck/error_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Healthcheck::Error, type: :model do
describe '#initialize' do
let(:name) { :migrations }
let(:exception) { StandardError }
let(:message) { 'Migrations are pending. To resolve this issue, run: bin/rails db:migrate RAILS_ENV=production' }
subject { described_class.new(name, exception, message) }
it { is_expected.to be_a(described_class) }
it { expect(subject.name).to eq(name) }
it { expect(subject.exception).to eq(exception.to_s) }
it { expect(subject.message).to eq(message) }
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/spec/healthcheck/checker_spec.rb | spec/healthcheck/checker_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Healthcheck::Checker, type: :model do
describe '#initialize' do
it { is_expected.to be_a(described_class) }
it { expect(subject.errors).to be_empty }
end
describe '#check' do
let(:checker) { described_class.new }
subject { checker.check }
before do
Healthcheck.configure do |config|
config.add_check :zero_division, -> { 100 / 0 }
config.add_check :standard_error, -> { raise StandardError }
end
end
context 'with errors' do
let(:errors) { checker.errors }
let(:standard_error) { errors.find { |error| error.name == :standard_error } }
let(:zero_division) { errors.find { |error| error.name == :zero_division } }
before { subject }
it 'has two errors' do
expect(errors.length).to eq(2)
expect(standard_error.name).to eq(:standard_error)
expect(standard_error.exception).to eq('StandardError')
expect(standard_error.message).to eq('StandardError')
expect(zero_division.name).to eq(:zero_division)
expect(zero_division.exception).to eq('ZeroDivisionError')
expect(zero_division.message).to eq('divided by 0')
end
end
context 'without errors' do
before do
allow_any_instance_of(Healthcheck::Check).to receive(:execute!)
subject
end
it 'hasnt errors' do
expect(checker.errors).to be_empty
end
end
end
describe '#errored?' do
let(:checker) { described_class.new }
subject { checker.errored? }
context 'without errors' do
it { is_expected.to be_falsey }
end
context 'with errors' do
before { checker.errors << 1 }
it { is_expected.to be_truthy }
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck.rb | lib/healthcheck.rb | # frozen_string_literal: true
require 'healthcheck/version'
require 'healthcheck/configuration'
require 'healthcheck/check'
require 'healthcheck/checker'
require 'healthcheck/error'
require 'healthcheck/router'
require 'healthcheck/engine'
require 'healthcheck/response/base'
require 'healthcheck/response/success'
require 'healthcheck/response/error'
module Healthcheck
CONTROLLER_ACTION = 'Healthcheck::HealthchecksController#check'
module_function
def configure
yield(configuration)
end
def configuration
@configuration ||= Configuration.new
end
def routes(router)
Router.mount(router)
end
def check
Checker.new.tap(&:check)
end
def custom!(controller)
configuration.custom.call(controller, check)
end
def custom?
configuration.custom
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/rails-healthcheck.rb | lib/rails-healthcheck.rb | # frozen_string_literal: true
require 'healthcheck'
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/generators/healthcheck/install_generator.rb | lib/generators/healthcheck/install_generator.rb | # frozen_string_literal: true
require 'rails/generators/base'
module Healthcheck
class InstallGenerator < Rails::Generators::Base
desc 'It creates an initializer to set the healthcheck settings'
def create_initializer_file
create_file(
'config/initializers/healthcheck.rb',
<<~HEALTHCHECK_INITIALIZER_TEXT
# frozen_string_literal: true
Healthcheck.configure do |config|
config.success = 200
config.error = 503
config.verbose = false
config.route = '/healthcheck'
config.method = :get
# -- Custom Response --
# config.custom = lambda { |controller, checker|
# return controller.render(plain: 'Everything is awesome!') unless checker.errored?
# controller.verbose? ? controller.verbose_error(checker) : controller.head_error
# }
# -- Checks --
# config.add_check :database, -> { ActiveRecord::Base.connection.execute('select 1') }
# config.add_check :migrations, -> { ActiveRecord::Migration.check_pending! }
# config.add_check :cache, -> { Rails.cache.read('some_key') }
# config.add_check :environments, -> { Dotenv.require_keys('ENV_NAME', 'ANOTHER_ENV') }
end
HEALTHCHECK_INITIALIZER_TEXT
)
route 'Healthcheck.routes(self)'
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/version.rb | lib/healthcheck/version.rb | # frozen_string_literal: true
module Healthcheck
VERSION = '1.4.0'
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/router.rb | lib/healthcheck/router.rb | # frozen_string_literal: true
module Healthcheck
class Router
def self.mount(router)
router.send(
Healthcheck.configuration.method,
Healthcheck.configuration.route => 'healthcheck/healthchecks#check'
)
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/configuration.rb | lib/healthcheck/configuration.rb | # frozen_string_literal: true
module Healthcheck
class Configuration
SETTINGS = %i[success error verbose route method checks custom].freeze
attr_accessor(*SETTINGS)
def initialize
clear!
end
def add_check(name, block)
@checks << Check.new(name, block)
end
def clear!
@checks = []
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/check.rb | lib/healthcheck/check.rb | # frozen_string_literal: true
module Healthcheck
class Check
attr_accessor :name, :block
def initialize(name, block)
@name = name
@block = block
end
def execute!
block.call
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/engine.rb | lib/healthcheck/engine.rb | # frozen_string_literal: true
require 'rails'
module Healthcheck
class Engine < Rails::Engine
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/error.rb | lib/healthcheck/error.rb | # frozen_string_literal: true
module Healthcheck
class Error
attr_accessor :name, :exception, :message
def initialize(name, exception, message)
@name = name
@exception = exception.to_s
@message = message&.squish
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/checker.rb | lib/healthcheck/checker.rb | # frozen_string_literal: true
module Healthcheck
class Checker
attr_accessor :errors
def initialize
@errors = []
end
def check
Healthcheck.configuration
.checks
.map { |c| Thread.new { execute(c) } }
.each(&:join)
end
def errored?
@errors.any?
end
private
def execute(check)
check.execute!
rescue StandardError => e
@errors << Error.new(check.name, e.class, e.message)
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/response/success.rb | lib/healthcheck/response/success.rb | # frozen_string_literal: true
module Healthcheck
module Response
class Success < Base
def verbose
{
status: @configuration.success,
json: {
code: @configuration.success,
status: @configuration.checks.each_with_object({}) { |check, obj| obj[check.name] = 'OK' }
}
}
end
def status
@configuration.success
end
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/response/base.rb | lib/healthcheck/response/base.rb | # frozen_string_literal: true
module Healthcheck
module Response
class Base
def initialize(controller, checker)
@controller = controller
@checker = checker
@configuration = Healthcheck.configuration
end
def execute!
verbose? ? @controller.render(verbose) : @controller.head(status)
end
private
def verbose?
@configuration.verbose
end
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
linqueta/rails-healthcheck | https://github.com/linqueta/rails-healthcheck/blob/8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238/lib/healthcheck/response/error.rb | lib/healthcheck/response/error.rb | # frozen_string_literal: true
module Healthcheck
module Response
class Error < Base
def verbose
{
status: Healthcheck.configuration.error,
json: {
code: Healthcheck.configuration.error,
errors: @checker.errors.as_json
}
}
end
def status
@configuration.error
end
end
end
end
| ruby | MIT | 8afb0f08ae2a7f3ace2432a1fc9d76e4478a1238 | 2026-01-04T17:45:59.256167Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/spec_helper.rb | spec/spec_helper.rb | require "bundler/setup"
require 'byebug'
require 'ruby-statistics'
RSpec.configure do |config|
# Enable flags like --only-failures and --next-failure
config.example_status_persistence_file_path = ".rspec_status"
# Disable RSpec exposing methods globally on `Module` and `main`
config.disable_monkey_patching!
config.expose_dsl_globally = true
config.expect_with :rspec do |c|
c.syntax = :expect
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/bigdecimal_spec.rb | spec/ruby-statistics/bigdecimal_spec.rb | require 'spec_helper'
require 'bigdecimal'
require 'bigdecimal/util'
describe BigDecimal do
context 'when bigdecimal is passed to personalized math functions' do
it 'truncates the decimal numbers and calculates the factorial for the real part' do
expect(Math.factorial(BigDecimal(5.45354598234834, 16))).to eq 120
end
it 'calculates the possible permutations of k objects from a set of n elements' do
expect(Math.permutation(BigDecimal(15, 1), BigDecimal(4, 1))).to eq 32760
expect(Math.permutation(BigDecimal(16, 1), BigDecimal(3, 1))).to eq 3360 # 16 balls, choose 3.
expect(Math.permutation(BigDecimal(10, 1), BigDecimal(2, 1))).to eq 90 # 10 people to select 1th and 2nd place.
end
it 'calculates the possible combinations of k object from a set of n elements' do
expect(Math.combination(BigDecimal(16, 1), BigDecimal(3, 1))).to eq 560 # Order 16 balls in 3 ways.
expect(Math.combination(BigDecimal(5, 1), BigDecimal(3, 1))).to eq 10 # How to choose 3 people out of 5.
expect(Math.combination(BigDecimal(12, 1), BigDecimal(5, 1))).to eq 792 # How to choose 5 games out of 12.
end
it 'approximates a solution in the [a,b] interval for the integral of the specified function' do
lower = BigDecimal(rand(1..10), 1)
upper = BigDecimal(rand(11..20), 1)
function_a = Math.simpson_rule(lower, upper, 10_000) do |x|
x ** 2
end
function_b = Math.simpson_rule(lower, upper, 10_000) do |x|
Math.sin(x)
end
res_a = ((upper ** BigDecimal(3,0))/BigDecimal(3,0)) - ((lower ** BigDecimal(3,0))/BigDecimal(3,0)) # Integral of x^2
res_b = -Math.cos(upper) + Math.cos(lower) # Integral of sin(x)
expect(function_a.to_f.floor).to be_within(0.0001).of(res_a.to_f.floor)
expect(function_b.to_f.floor).to be_within(0.0001).of(res_b.to_f.floor)
end
it 'returns the expected calculationi for the lower incomplete gamma function' do
results = [0.6322, 0.594, 1.1536, 3.3992, 13.4283]
(1..5).each_with_index do |number, index|
expect(
Math.lower_incomplete_gamma_function(
BigDecimal(number, 1), BigDecimal(number, 1)
)
).to be_within(0.0001).of(results[index])
end
end
it 'returns 1 for the special case x = y = 1 when calculating the beta function' do
expect(Math.beta_function(BigDecimal(1, 1), BigDecimal(1, 1))).to eq 1
end
it 'Calculates the expected values for the beta function' do
# TODO: Find a way to better test this instead of fixing some values.
result = [1, 0.1667, 0.0333, 0.0071, 0.0016]
(1..5).each_with_index do |number, index|
expectation = Math.beta_function(
BigDecimal(number, 1), BigDecimal(number, 1)
)
expect(expectation).to be_within(0.0001).of(result[index])
end
end
it 'calculates the expected values for the incomplete beta function' do
# The last 2 values:
# For 9 is 0.9999979537560903519733 which is rounding to 1.0
# For 10 is 1.0
results = [
0.19, 0.1808, 0.2557, 0.4059,
0.6230, 0.8418, 0.9685, BigDecimal(0.9985, 5),
BigDecimal(0.9999979537560903, 5), 1.0
]
(1..10).each_with_index do |number, index|
expect(
Math.incomplete_beta_function(
(number/10.0).to_d(16), BigDecimal(number, 1), BigDecimal(number + 1, 1)
)
).to be_within(0.0001).of(results[index])
end
end
end
context 'when bigdecimal is used with chi squared distributions' do
context 'With degrees of freedom from 1 to 30' do
it 'returns the expected probabilities for the chi-squared distribution compared to a table' do
alpha = 0.100
values = RubyStatistics::Distribution::Tables::ChiSquared.alpha_column(alpha).map { |x|
{
df: x[:df],
bd: BigDecimal(x[:critical_value], 5)
}
}[0, 30]
values.each do |p|
result = 1.0 - RubyStatistics::Distribution::ChiSquared.new(p[:df]).cumulative_function(p[:bd])
expect(result).to be_within(0.0001).of(alpha)
end
end
end
context 'With degrees of freedom from 40 to 100, with a 10 unit increment' do
it 'returns the expected probabilities for the chi-squared distribution compared to a table' do
alpha = 0.100
values = RubyStatistics::Distribution::Tables::ChiSquared.alpha_column(alpha).map { |x|
{
df: x[:df],
bd: BigDecimal(x[:critical_value], 5)
}
}[30, 7]
values.each do |p|
result = 1.0 - RubyStatistics::Distribution::ChiSquared.new(p[:df]).cumulative_function(p[:bd])
expect(result).to be_within(0.0001).of(alpha)
end
end
end
end
context 'when bigdecimal is used in chi squared tests' do
it 'perform a goodness of fit test following example ONE' do
pending 'It is giving a less accurate p-value when using BigDecimal. It passes on Float numbers.'
observed_counts = [
BigDecimal(212, 1), BigDecimal(147, 1), BigDecimal(103, 1),
BigDecimal(50, 1), BigDecimal(46, 1), BigDecimal(42, 1)
]
expected = BigDecimal(100, 1)
result = StatisticalTest::ChiSquaredTest.goodness_of_fit(0.05, expected, observed_counts)
# We cannot get exact p-values as it's dependant on the precision and the machine, therefore
# we use a limit criteria defined by R in 4.4.1.
# Here's the output for the same configuration:
# > observed <- c(212, 147, 103, 50, 46, 42)
# > expected <- c(100, 100, 100, 100, 100, 100)
# > chisq.test(observed, p = expected, rescale.p = TRUE)
# Chi-squared test for given probabilities
#
# data: observed
# X-squared = 235.42, df = 5, p-value < 2.2e-16
expect(result[:p_value]).to be <= 2.2e-16 # This matches the criteria used in R 4.4.1
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
it 'perform a goodness of fit test following example TWO' do
observed = [
BigDecimal(224, 1), BigDecimal(119, 1), BigDecimal(130, 1),
BigDecimal(48, 1), BigDecimal(59, 1)
]
expected = [
BigDecimal(232, 1), BigDecimal(116, 1), BigDecimal(116, 1),
BigDecimal(58, 1), BigDecimal(58, 1)
]
result = StatisticalTest::ChiSquaredTest.goodness_of_fit(0.05, expected, observed)
expect(result[:p_value]).to be_within(0.0001).of(0.4359)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
end
# The following test is based on the numbers reported in https://github.com/estebanz01/ruby-statistics/issues/78
# which give us a minimum test case scenario where the integral being solved with simpson's rule
# uses zero iterations, raising errors.
it 'performs a goodness of fit test with values that generates small chi statistics' do
observed_counts = [
BigDecimal(481, 1), BigDecimal(483, 1),
BigDecimal(482, 1), BigDecimal(488, 1),
BigDecimal(478, 1), BigDecimal(471, 1),
BigDecimal(477, 1), BigDecimal(479, 1),
BigDecimal(475, 1), BigDecimal(462, 1)
]
expected = BigDecimal(477, 1)
result = {}
expect do
result = StatisticalTest::ChiSquaredTest.goodness_of_fit(0.01, expected, observed_counts)
end.not_to raise_error
expect(result[:p_value]).to be_within(0.0001).of(0.9995)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
end
end
context 'when bigdecimal is used in F tests' do
# example ONE
# explained here: https://courses.lumenlearning.com/boundless-statistics/chapter/one-way-anova
# Four sororities took a random sample of sisters regarding their grade means for the past term.
# Using a significance level of 1%, is there a difference in mean grades among the sororities?
it 'performs a One way ANOVA Test following example ONE' do
sorority_one = [
BigDecimal(2.17, 3), BigDecimal(1.85, 3), BigDecimal(2.83, 3),
BigDecimal(1.69, 3), BigDecimal(3.33, 3)
]
sorority_two = [
BigDecimal(2.63, 3), BigDecimal(1.77, 3), BigDecimal(3.25, 3),
BigDecimal(1.86, 3), BigDecimal(2.21, 3)
]
sorority_three = [
BigDecimal(2.63, 3), BigDecimal(3.78, 3), BigDecimal(4.00, 3),
BigDecimal(2.55, 3), BigDecimal(2.45, 3)
]
sorority_four = [
BigDecimal(3.79, 3), BigDecimal(3.45, 3), BigDecimal(3.08, 3),
BigDecimal(2.26, 3), BigDecimal(3.18, 3)
]
alpha = 0.1
result = StatisticalTest::FTest.one_way_anova(alpha,
sorority_one,
sorority_two,
sorority_three,
sorority_four)
expect(result[:p_value]).to be_within(0.0001).of(0.1241)
# Accept null hypotheses ?
expect(result[:null]).to be true
# Accept alternative hypotheses ?
expect(result[:alternative]).to be false
# Confidence level (90 %)
expect(result[:confidence_level]).to eq 0.9
end
# example TWO
# explained here: https://web.mst.edu/~psyworld/anovaexample.htm
# Susan Sound predicts that students will learn most effectively with a constant background sound,
# as opposed to an unpredictable sound or no sound at all. She randomly divides twenty-four students
# into three groups of eight. All students study a passage of text for 30 minutes.
# Those in group 1 study with background sound at a constant volume in the background.
# Those in group 2 study with noise that changes volume periodically.
# Those in group 3 study with no sound at all.
# After studying, all students take a 10 point multiple choice test over the material.
it 'perfoms a One way ANOVA Test following example TWO' do
constant_sound = [
BigDecimal(7, 1), BigDecimal(4, 1), BigDecimal(6, 1), BigDecimal(8, 1),
BigDecimal(6, 1), BigDecimal(6, 1), BigDecimal(2, 1), BigDecimal(9, 1)
]
random_sound = [
BigDecimal(5, 1), BigDecimal(5, 1), BigDecimal(3, 1), BigDecimal(4, 1),
BigDecimal(4, 1), BigDecimal(7, 1), BigDecimal(2, 1), BigDecimal(2, 1)
]
no_sound = [
BigDecimal(2, 1), BigDecimal(4, 1), BigDecimal(7, 1), BigDecimal(1, 1),
BigDecimal(2, 1), BigDecimal(1, 1), BigDecimal(5, 1), BigDecimal(5, 1)
]
alpha = 0.05
result = StatisticalTest::FTest.one_way_anova(alpha,
constant_sound,
random_sound,
no_sound)
expect(result[:p_value]).to be_within(0.001).of(0.045)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
context 'when bigdecimal is used in kolmogorov smirnov tests' do
it 'computes a two-sided Kolmogorov-Smirnov test for two samples, following example ONE' do
# Data extracted from http://www.stats.ox.ac.uk/~massa/Lecture%2013.pdf
# calculation in R:
# > ks.test(c(1.2,1.4,1.9,3.7,4.4,4.8,9.7,17.3,21.1,28.4), c(5.6,6.5,6.6,6.9,9.2,10.4,10.6,19.3))
#
# Two-sample Kolmogorov-Smirnov test
#
# D = 0.6, p-value = 0.04987
# alternative hypothesis: two-sided
group_one = [
BigDecimal(1.2, 3), BigDecimal(1.4, 3), BigDecimal(1.9, 3), BigDecimal(3.7, 3),
BigDecimal(4.4, 3), BigDecimal(4.8, 3), BigDecimal(9.7, 3), BigDecimal(17.3, 3),
BigDecimal(21.1, 3), BigDecimal(28.4, 3)
]
group_two = [
BigDecimal(5.6, 3), BigDecimal(6.5, 3), BigDecimal(6.6, 3), BigDecimal(6.9, 3),
BigDecimal(9.2, 3), BigDecimal(10.4, 3), BigDecimal(10.6, 3), BigDecimal(19.3, 3)
]
# alpha, by default, is 0.05
result = StatisticalTest::KSTest.two_samples(group_one: group_one, group_two: group_two)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:d_max]).to eq 0.6
end
end
context 'when bigdecimal is used in wilcoxon rank sum test' do
## Examples
# Example ONE extracted from http://users.sussex.ac.uk/~grahamh/RM1web/Mann-Whitney%20worked%20example.pdf
# The effectiveness of advertising for two rival products (Brand X and Brand Y) was compared.
#
# Example TWO and THREE extracted from http://webspace.ship.edu/pgmarr/Geo441/Lectures/Lec%207%20-%20Mann-Whitney%20and%20Paired%20Tests.pdf
# both examples tries to identify if there is significant difference between oceanic and continental
# earthquakes compared by magnitude (TWO) and depth (THREE).
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example ONE' do
rating_x = [
BigDecimal(3, 1), BigDecimal(4, 1), BigDecimal(2, 1), BigDecimal(6, 1),
BigDecimal(2, 1), BigDecimal(5, 1)
]
rating_y = [
BigDecimal(9, 1), BigDecimal(7, 1), BigDecimal(5, 1), BigDecimal(10, 1),
BigDecimal(6, 1), BigDecimal(8, 1)
]
result = StatisticalTest::WilcoxonRankSumTest.new.perform(0.05, :two_tail, rating_x, rating_y)
expect(result[:u]).to eq 2
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:p_value]).to be_within(0.01).of(0.01)
end
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example TWO' do
oceanic_magnitudes = [
BigDecimal(3.9, 3), BigDecimal(4.0, 3), BigDecimal(4.1, 3), BigDecimal(4.3, 3),
BigDecimal(4.3, 3), BigDecimal(4.4, 3), BigDecimal(4.5, 3), BigDecimal(4.8, 3),
BigDecimal(5.4, 3), BigDecimal(6.3, 3), BigDecimal(6.8, 3), BigDecimal(6.8, 3)
]
continental_magnitudes = [
BigDecimal(4.1, 3), BigDecimal(4.3, 3), BigDecimal(4.3, 3), BigDecimal(4.3, 3),
BigDecimal(4.4, 3), BigDecimal(4.4, 3), BigDecimal(4.5, 3), BigDecimal(4.6, 3),
BigDecimal(5.0, 3), BigDecimal(5.1, 3), BigDecimal(5.1, 3)
]
result = StatisticalTest::WilcoxonRankSumTest.new.perform(0.05, :two_tail, oceanic_magnitudes, continental_magnitudes)
expect(result[:u]).to eq 63 # In the example, they use the largest instead of the lowest.
expect(result[:z]).to be_within(0.01).of(-0.186)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
expect(result[:p_value]).to eq 0.8525013990549617
end
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example THREE' do
oceanic_earthquakes = [
BigDecimal(75, 1), BigDecimal(32, 1), BigDecimal(50, 1), BigDecimal(38, 1),
BigDecimal(19, 1), BigDecimal(44, 1), BigDecimal(33, 1), BigDecimal(102, 1),
BigDecimal(28, 1), BigDecimal(70, 1), BigDecimal(49, 1), BigDecimal(70, 1)
]
continental_earthquakes = [
BigDecimal(69, 1), BigDecimal(99, 1), BigDecimal(135, 1), BigDecimal(115, 1),
BigDecimal(33, 1), BigDecimal(92, 1), BigDecimal(118, 1), BigDecimal(115, 1),
BigDecimal(92, 1), BigDecimal(89, 1), BigDecimal(101, 1)
]
result = StatisticalTest::WilcoxonRankSumTest.new.perform(0.05, :two_tail, oceanic_earthquakes, continental_earthquakes)
expect(result[:u]).to eq 17.5
expect(result[:z]).to be_within(0.001).of(-2.988)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:p_value]).to eq 0.002808806689028387
end
end
context 'when bigdecimal is used in t-tests' do
# Example with one sample
# explained here: https://secure.brightstat.com/index.php?id=40
# A random sample of 22 fifth grade pupils have a grade point average of 5.0 in maths
# with a standard deviation of 0.452, whereas marks range from 1 (worst) to 6 (excellent).
# The grade point average (GPA) of all fifth grade pupils of the last five years is 4.7.
# Is the GPA of the 22 pupils different from the populations’ GPA?
it 'performs a t-test with one sample for one tail' do
student_grades = [
BigDecimal(5, 3), BigDecimal(5.5, 3), BigDecimal(4.5, 3), BigDecimal(5, 3),
BigDecimal(5, 3), BigDecimal(6, 3), BigDecimal(5, 3), BigDecimal(5, 3),
BigDecimal(4.5, 3), BigDecimal(5, 3), BigDecimal(5, 3), BigDecimal(4.5, 3),
BigDecimal(4.5, 3), BigDecimal(5.5, 3), BigDecimal(4, 3), BigDecimal(5, 3),
BigDecimal(5, 3), BigDecimal(5.5, 3), BigDecimal(4.5, 3), BigDecimal(5.5, 3),
BigDecimal(5, 3), BigDecimal(5.5, 3)
]
alpha = 0.05
result = StatisticalTest::TTest.perform(alpha, :one_tail, 4.7, student_grades)
expect(result[:p_value]).to be_within(0.000001).of(0.003114) # R 3.5.1. calculates the p_value as 0.003114
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
it 'performs a t-test with one sample for two tails' do
student_grades = [
BigDecimal(5, 3), BigDecimal(5.5, 3), BigDecimal(4.5, 3), BigDecimal(5, 3),
BigDecimal(5, 3), BigDecimal(6, 3), BigDecimal(5, 3), BigDecimal(5, 3),
BigDecimal(4.5, 3), BigDecimal(5, 3), BigDecimal(5, 3), BigDecimal(4.5, 3),
BigDecimal(4.5, 3), BigDecimal(5.5, 3), BigDecimal(4, 3), BigDecimal(5, 3),
BigDecimal(5, 3), BigDecimal(5.5, 3), BigDecimal(4.5, 3), BigDecimal(5.5, 3),
BigDecimal(5, 3), BigDecimal(5.5, 3)
]
alpha = 0.05
result = StatisticalTest::TTest.perform(alpha, :two_tail, 4.7, student_grades)
expect(result[:p_value]).to be_within(0.000001).of(0.006229) # R 3.5.1. calculates the p_value as 0.006229
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
context 'when two samples are specified' do
# example ONE
# explained here: http://faculty.webster.edu/woolflm/6aanswer.html
# A research study was conducted to examine the differences between older and younger adults
# on perceived life satisfaction. A pilot study was conducted to examine this hypothesis.
# Ten older adults (over the age of 70) and ten younger adults (between 20 and 30) were give
# a life satisfaction test (known to have high reliability and validity).
# Scores on the measure range from 0 to 60 with high scores indicative of high life satisfaction;
# low scores indicative of low life satisfaction. The data are presented below.
it 'performs a t-test following example ONE' do
older_adults = [
BigDecimal(45, 1), BigDecimal(38, 1), BigDecimal(52, 1), BigDecimal(48, 1),
BigDecimal(25, 1), BigDecimal(39, 1), BigDecimal(51, 1), BigDecimal(46, 1),
BigDecimal(55, 1), BigDecimal(46, 1)
]
younger_adults = [
BigDecimal(34, 1), BigDecimal(22, 1), BigDecimal(15, 1), BigDecimal(27, 1),
BigDecimal(37, 1), BigDecimal(41, 1), BigDecimal(24, 1), BigDecimal(19, 1),
BigDecimal(26, 1), BigDecimal(36, 1)
]
alpha = 0.05
result = StatisticalTest::TTest.perform(alpha, :two_tail, older_adults, younger_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
result = StatisticalTest::TTest.perform(alpha, :two_tail, younger_adults, older_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
# example TWO
# explained here: http://www.indiana.edu/%7Eeducy520/sec6342/week_10/ttest_exp.pdf
# Rosenthal and Jacobson (1968) informed classroom teachers that some of their students showed
# unusual potential for intellectual gains. Eight months later the students identified to teachers as
# having potentional for unusual intellectual gains showed significiantly greater gains performance
# on a test said to measure IQ than did children who were not so identified.
it 'performs a t-test following example TWO' do
experimental = [
BigDecimal(35, 1), BigDecimal(40, 1), BigDecimal(12, 1), BigDecimal(15, 1),
BigDecimal(21, 1), BigDecimal(14, 1), BigDecimal(46, 1), BigDecimal(10, 1),
BigDecimal(28, 1), BigDecimal(48, 1), BigDecimal(16, 1), BigDecimal(30, 1),
BigDecimal(32, 1), BigDecimal(48, 1), BigDecimal(31, 1), BigDecimal(22, 1),
BigDecimal(12, 1), BigDecimal(39, 1), BigDecimal(19, 1), BigDecimal(25, 1)
]
comparison = [
BigDecimal(2, 1), BigDecimal(27, 1), BigDecimal(38, 1), BigDecimal(31, 1),
BigDecimal(1, 1), BigDecimal(19, 1), BigDecimal(1, 1), BigDecimal(34, 1),
BigDecimal(3, 1), BigDecimal(1, 1), BigDecimal(2, 1), BigDecimal(3, 1),
BigDecimal(2, 1), BigDecimal(1, 1), BigDecimal(2, 1), BigDecimal(1, 1),
BigDecimal(3, 1), BigDecimal(29, 1), BigDecimal(37, 1), BigDecimal(2, 1)
]
alpha = 0.01
result = StatisticalTest::TTest.perform(alpha, :one_tail, experimental, comparison)
expect(result[:t_score]).to be_within(0.0001).of(3.5341)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
result = StatisticalTest::TTest.perform(alpha, :one_tail, comparison, experimental)
expect(result[:t_score]).to be_within(0.0001).of(3.5341)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
context 'when two samples are specified' do
# example ONE
# explained here: http://faculty.webster.edu/woolflm/6aanswer.html
# A research study was conducted to examine the differences between older and younger adults
# on perceived life satisfaction. A pilot study was conducted to examine this hypothesis.
# Ten older adults (over the age of 70) and ten younger adults (between 20 and 30) were give
# a life satisfaction test (known to have high reliability and validity).
# Scores on the measure range from 0 to 60 with high scores indicative of high life satisfaction;
# low scores indicative of low life satisfaction. The data are presented below.
it 'performs a t-test following example ONE' do
older_adults = [
BigDecimal(45, 1), BigDecimal(38, 1), BigDecimal(52, 1), BigDecimal(48, 1),
BigDecimal(25, 1), BigDecimal(39, 1), BigDecimal(51, 1), BigDecimal(46, 1),
BigDecimal(55, 1), BigDecimal(46, 1)
]
younger_adults = [
BigDecimal(34, 1), BigDecimal(22, 1), BigDecimal(15, 1), BigDecimal(27, 1),
BigDecimal(37, 1), BigDecimal(41, 1), BigDecimal(24, 1), BigDecimal(19, 1),
BigDecimal(26, 1), BigDecimal(36, 1)
]
alpha = 0.05
result = StatisticalTest::TTest.perform(alpha, :two_tail, older_adults, younger_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
result = StatisticalTest::TTest.perform(alpha, :two_tail, younger_adults, older_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
describe '.paired_test' do
# example ONE
# explained here: https://onlinecourses.science.psu.edu/stat500/node/51
# Trace metals in drinking water affect the flavor and an unusually high concentration can pose a health hazard.
# Ten pairs of data were taken measuring zinc concentration in bottom water and surface.
# Does the data suggest that the true average concentration in the bottom water exceeds that of surface water?
it 'performs a paired t-test following example ONE' do
group_one = [
BigDecimal(0.430, 4), BigDecimal(0.266, 4), BigDecimal(0.567, 4), BigDecimal(0.531, 4),
BigDecimal(0.707, 4), BigDecimal(0.716, 4), BigDecimal(0.651, 4), BigDecimal(0.589, 4),
BigDecimal(0.469, 4), BigDecimal(0.723, 4)
]
group_two = [
BigDecimal(0.415, 4), BigDecimal(0.238, 4), BigDecimal(0.390, 4), BigDecimal(0.410, 4),
BigDecimal(0.605, 4), BigDecimal(0.609, 4), BigDecimal(0.632, 4), BigDecimal(0.523, 4),
BigDecimal(0.411, 4), BigDecimal(0.612, 4)
]
alpha = 0.05
result = StatisticalTest::TTest.paired_test(alpha, :one_tail, group_one, group_two)
expect(result[:t_score]).to be_within(0.0001).of(4.8638)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/math_spec.rb | spec/ruby-statistics/math_spec.rb | require 'spec_helper'
describe Math do
describe '.factorial' do
it 'is not defined for numbers less than zero' do
expect(described_class.factorial(rand(-100...0))).to be_nil
end
it 'returns one for zero or one' do
expect(described_class.factorial(0)).to eq 1
expect(described_class.factorial(1)).to eq 1
end
it 'calculates the correct factorial for the specified number' do
expect(described_class.factorial(2)).to eq 2
expect(described_class.factorial(3)).to eq 6
expect(described_class.factorial(4)).to eq 24
expect(described_class.factorial(5)).to eq 120
end
it 'truncates the decimal numbers and calculates the factorial for the real part' do
expect(described_class.factorial(5.4535459823483432434)).to eq 120
end
end
describe '.permutation' do
it 'calculates the possible permutations of k objects from a set of n elements' do
expect(described_class.permutation(15,4)).to eq 32760
expect(described_class.permutation(16, 3)).to eq 3360 # 16 balls, choose 3.
expect(described_class.permutation(10, 2)).to eq 90 # 10 people to select 1th and 2nd place.
end
end
describe '.combination' do
it 'calculates the possible combinations of k object from a set of n elements' do
expect(described_class.combination(16, 3)).to eq 560 # Order 16 balls in 3 ways.
expect(described_class.combination(5, 3)).to eq 10 # How to choose 3 people out of 5.
expect(described_class.combination(12, 5)).to eq 792 # How to choose 5 games out of 12.
end
end
describe '.simpson_rule' do
it 'approximates a solution in the [a,b] interval for the integral of the specified function' do
lower = rand(10)
upper = rand(11..20)
function_a = described_class.simpson_rule(lower, upper, 10_000) do |x|
x ** 2
end
function_b = described_class.simpson_rule(lower, upper, 10_000) do |x|
Math.sin(x)
end
res_a = ((upper ** 3)/3.0) - ((lower ** 3)/3.0) # Integral of x^2
res_b = -Math.cos(upper) + Math.cos(lower) # Integral of sin(x)
expect(function_a.floor).to eq res_a.floor
expect(function_b.floor).to eq res_b.floor
end
it 'is not defined when the iterations are not even numbers' do
expect(
described_class.simpson_rule(1, 2, 3) { |x| x }
).to be_nil
end
end
describe '.lower_incomplete_gamma_function' do
it "solves the function using the simpson's rule" do
lower = 0
upper = rand(1..5)
iteration = 10_000 * upper
expect(described_class).to receive(:simpson_rule).with(lower, upper, iteration)
described_class.lower_incomplete_gamma_function(lower, upper)
end
it "uses the simpson's rule with iterations bigger than 10_000 when upper is 0.0 < x < 1.0" do
lower = 0
upper = rand(0.01..0.99)
iteration = 10_000 * (1 + upper.round(1))
expect(described_class).to receive(:simpson_rule).with(lower, upper.to_r, iteration)
described_class.lower_incomplete_gamma_function(lower, upper)
end
it 'returns the expected calculation' do
results = [0.6322, 0.594, 1.1536, 3.3992, 13.4283]
(1..5).each_with_index do |number, index|
expect(
described_class.lower_incomplete_gamma_function(number, number)
).to be_within(0.0001).of(results[index])
end
end
# The following context is based on the numbers reported in https://github.com/estebanz01/ruby-statistics/issues/78
# which give us a minimum test case scenario where the integral being solved with simpson's rule
# uses zero iterations, raising errors.
context 'When X for the lower incomplete gamma function is rounded to zero' do
let(:s_parameter) { 4.5 }
let(:x) { (52/53).to_r }
it 'does not try to perform a division by zero' do
expect do
described_class.lower_incomplete_gamma_function(s_parameter, x)
end.not_to raise_error
end
it "tries to solve the function using simpson's rule with at least 100_000 iterations" do
expect(described_class).to receive(:simpson_rule).with(0, x, 100_000)
described_class.lower_incomplete_gamma_function(s_parameter, x)
end
end
end
describe '.normalised_lower_incomplete_gamma_function' do
it 'returns zero if the complex parameter is negative' do
expect(described_class.normalised_lower_incomplete_gamma_function(-1, 1)).to be_zero
end
it 'returns zero if the X parameter is negative' do
expect(described_class.normalised_lower_incomplete_gamma_function(1, -1)).to be_zero
end
it 'returns zero if the X parameter is zero' do
expect(described_class.normalised_lower_incomplete_gamma_function(1, 0)).to be_zero
end
# The following context is based on the numbers reported in https://github.com/estebanz01/ruby-statistics/issues/78
# which give us a minimum test case scenario where the integral being solved with simpson's rule
# uses zero iterations, raising errors. It should calculate properly using the normalised lower incomplete.
it 'performs a calculation with a special case' do
expect {
described_class.normalised_lower_incomplete_gamma_function(4.5, 52/53r)
}.not_to raise_error
end
it 'returns the expected calculations' do
# Results calculated on R 4.4.1 using the `pgamma(number, number)` function.
results = [0.6321206, 0.5939942, 0.5768099, 0.5665299, 0.5595067]
(1..5).each_with_index do |number, index|
expect(
described_class.normalised_lower_incomplete_gamma_function(number, number)
).to be_within(0.00001).of(results[index])
end
end
end
describe '.beta_function' do
it 'returns 1 for the special case x = y = 1' do
expect(described_class.beta_function(1, 1)).to eq 1
end
it 'Calculates the expected values for the beta function' do
# TODO: Find a way to better test this instead of fixing some values.
result = [1, 0.1667, 0.0333, 0.0071, 0.0016]
(1..5).each_with_index do |number, index|
expectation = described_class.beta_function(number, number)
expect(expectation).to be_within(0.0001).of(result[index])
end
end
end
describe '.incomplete_beta_function' do
it 'calculates the expected values for the incomplete beta function' do
# The last 2 values:
# For 9 is 0.9999979537560903519733 which is rounding to 1.0
# For 10 is 1.0
results = [0.19, 0.1808, 0.2557, 0.4059, 0.6230, 0.8418, 0.9685, 0.9985, 1.0, 1.0]
(1..10).each_with_index do |number, index|
expect(described_class.incomplete_beta_function(number/10.0, number, number + 1))
.to be_within(0.0001).of(results[index])
end
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/enumerable_spec.rb | spec/ruby-statistics/enumerable_spec.rb | require 'spec_helper'
describe Enumerable do
describe '#mean' do
it 'calculates the mean of an specific collection' do
expect((1..5).to_a.mean).to eq 3.0
expect((1..10).to_a.mean).to eq 5.5
expect((-10..-5).to_a.mean).to eq -7.5
end
end
describe '#variance' do
it 'calculates the *sample* variance of an specific collection' do
expect((1..5).to_a.variance).to eq 2.5
expect((1..10).to_a.variance).to eq 9.166666666666666
expect((-10..-5).to_a.variance).to eq 3.5
end
end
describe '#standard_deviation' do
it 'calcultes the *sample* standard deviation of an specific collection' do
expect((1..5).to_a.standard_deviation).to eq 1.5811388300841898
expect((1..10).to_a.standard_deviation).to eq 3.0276503540974917
expect((-10..-5).to_a.standard_deviation).to eq 1.8708286933869707
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/spearman_rank_coefficient_spec.rb | spec/ruby-statistics/spearman_rank_coefficient_spec.rb | require 'spec_helper'
describe RubyStatistics::SpearmanRankCoefficient do
describe '.rank' do
context 'when only ranks are needed' do
it 'returns an array of elements corresponding to the expected ranks wihout altering order' do
expected_ranks = [4, 1, 3, 2, 5]
result = described_class.rank(data: [10, 30, 12, 15, 3], return_ranks_only: true)
expect(result).to eq expected_ranks
end
end
context 'when ranks and passed elements are needed' do
it 'returns a hash composed by the elements and ranking information' do
expected_ranks = {
30 => { counter: 1, rank: 1, tie_rank: 1 },
15 => { counter: 1, rank: 2, tie_rank: 2 },
12 => { counter: 1, rank: 3, tie_rank: 3 },
10 => { counter: 1, rank: 4, tie_rank: 4 },
3 => { counter: 1, rank: 5, tie_rank: 5 }
}
result = described_class.rank(data: [10, 30, 12, 15, 3], return_ranks_only: false)
expect(result).to eq expected_ranks
end
end
context 'when there are ties' do
it 'returns a ranking list with solved ties when ranks only are needed' do
expected_ranking = [9, 3, 10, 4, 6.5, 5, 8, 1, 2, 6.5]
data = [56, 75, 45, 71, 61, 64, 58, 80, 76, 61]
result = described_class.rank(data: data, return_ranks_only: true)
expect(result).to eq expected_ranking
end
it 'returns a hash composed by the elements and some ranking information' do
expected_ranks = {
80 => { counter: 1, rank: 1, tie_rank: 1 },
76 => { counter: 1, rank: 2, tie_rank: 2 },
75 => { counter: 1, rank: 3, tie_rank: 3 },
71 => { counter: 1, rank: 4, tie_rank: 4 },
64 => { counter: 1, rank: 5, tie_rank: 5 },
61 => { counter: 2, rank: 13, tie_rank: 6.5 },
58 => { counter: 1, rank: 8, tie_rank: 8 },
56 => { counter: 1, rank: 9, tie_rank: 9 },
45 => { counter: 1, rank: 10, tie_rank: 10 }
}
data = [56, 75, 45, 71, 61, 64, 58, 80, 76, 61]
result = described_class.rank(data: data, return_ranks_only: false)
expect(result).to include(expected_ranks)
end
it 'returns a hash containing information about the existing ties' do
tie_rank = { 61 => { counter: 2, tie_rank: 6.5, rank: 13 } }
data = [56, 75, 45, 71, 61, 64, 58, 80, 76, 61]
result = described_class.rank(data: data, return_ranks_only: false)
expect(result).to include(tie_rank)
end
end
end
describe '.coefficient' do
it 'raises an error when the groups have different number of cases' do
expect do
described_class.coefficient([1, 2, 3], [1, 2, 3, 4])
end.to raise_error(StandardError, 'Both group sets must have the same number of cases.')
end
it 'returns nothing when both groups have a size of zero cases' do
expect(described_class.coefficient([], [])).to be_nil
end
context 'when there are ties in the data' do
it 'calculates the spearman rank coefficient for example one' do
# Example taken from http://www.biostathandbook.com/spearman.html
volume = [1760, 2040, 2440, 2550, 2730, 2740, 3010, 3080, 3370, 3740, 4910, 5090, 5090, 5380, 5850, 6730, 6990, 7960]
frequency = [529, 566, 473, 461, 465, 532, 484, 527, 488, 485, 478, 434, 468, 449, 425, 389, 421, 416]
volume_rank = described_class.rank(data: volume)
frequency_rank = described_class.rank(data: frequency)
rho = described_class.coefficient(volume_rank, frequency_rank)
expect(rho).to be_within(0.0000001).of(-0.7630357)
end
it 'calcultes the spearman rank coefficient for example two' do
# Example taken from https://geographyfieldwork.com/SpearmansRank.htm
# Results from R:
# cor(c(50, 175, 270, 375, 425, 580, 710, 790, 890, 980), c(1.80, 1.20, 2.0, 1.0, 1.0, 1.20, 0.80, 0.60, 1.0, 0.85), method = 'spearman')
# [1] -0.7570127
distance = [50, 175, 270, 375, 425, 580, 710, 790, 890, 980]
price = [1.80, 1.20, 2.0, 1.0, 1.0, 1.20, 0.80, 0.60, 1.0, 0.85]
distance_rank = described_class.rank(data: distance)
price_rank = described_class.rank(data: price)
rho = described_class.coefficient(distance_rank, price_rank)
expect(rho).to be_within(0.0000001).of(-0.7570127)
end
it 'calculates the spearman rank coefficient for example three' do
# Example taken from http://www.real-statistics.com/correlation/spearmans-rank-correlation/spearmans-rank-correlation-detailed/
life_exp = [80, 78, 60, 53, 85, 84, 73, 79, 81, 75, 68, 72, 58, 92, 65]
cigarretes = [5, 23, 25, 48, 17, 8, 4, 26, 11, 19, 14, 35, 29, 4, 23]
life_rank = described_class.rank(data: life_exp)
cigarretes_rank = described_class.rank(data: cigarretes)
rho = described_class.coefficient(life_rank, cigarretes_rank)
expect(rho).to be_within(0.0000001).of(-0.6744197)
end
end
context 'when there are no ties in the data' do
it 'calculates the spearman rank coefficient for example one' do
# Example taken from here: https://statistics.laerd.com/statistical-guides/spearmans-rank-order-correlation-statistical-guide-2.php
english_data = [56, 75, 45, 71, 62, 64, 58, 80, 76, 61]
math_data = [66, 70, 40, 60, 65, 56, 59, 77, 67, 63]
english_rank = described_class.rank(data: english_data)
math_rank = described_class.rank(data: math_data)
rho = described_class.coefficient(english_rank, math_rank)
expect(rho).to be_within(0.01).of(0.67)
end
it 'calculates the spearman rank coefficient for example two' do
# Example taken from here: https://www.statisticshowto.datasciencecentral.com/spearman-rank-correlation-definition-calculate/
physics = [35, 23, 47, 17, 10, 43, 9, 6, 28]
math = [30, 33, 45, 23, 8, 49, 12, 4, 31]
physics_rank = described_class.rank(data: physics)
math_rank = described_class.rank(data: math)
rho = described_class.coefficient(physics_rank, math_rank)
expect(rho).to eq 0.9
end
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/statistical_test/t_test_spec.rb | spec/ruby-statistics/statistical_test/t_test_spec.rb | require 'spec_helper'
describe RubyStatistics::StatisticalTest::TTest do
describe '.perform' do
context 'when there is an standard deviation of zero' do
let(:alpha) { 0.05 }
it 'does not perform a t-test when standard deviation of zero' do
sample_1 = [1.0, 1.0, 1.0]
mean = 1.0
error_msg = 'Standard deviation for the difference or group is zero. Please, reconsider sample contents'
expect do
described_class.perform(alpha, :one_tail, mean, sample_1)
end.to raise_error(described_class::ZeroStdError, error_msg)
end
end
# Example with one sample
# explained here: https://secure.brightstat.com/index.php?id=40
# A random sample of 22 fifth grade pupils have a grade point average of 5.0 in maths
# with a standard deviation of 0.452, whereas marks range from 1 (worst) to 6 (excellent).
# The grade point average (GPA) of all fifth grade pupils of the last five years is 4.7.
# Is the GPA of the 22 pupils different from the populations’ GPA?
it 'performs a t-test with one sample for one tail' do
student_grades = [5, 5.5, 4.5, 5, 5, 6, 5, 5, 4.5, 5, 5, 4.5, 4.5, 5.5, 4, 5, 5, 5.5, 4.5, 5.5, 5, 5.5]
alpha = 0.05
result = described_class.perform(alpha, :one_tail, 4.7, student_grades)
expect(result[:p_value]).to be_within(0.000001).of(0.003114) # R 3.5.1. calculates the p_value as 0.003114
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
it 'performs a t-test with one sample for two tails' do
student_grades = [5, 5.5, 4.5, 5, 5, 6, 5, 5, 4.5, 5, 5, 4.5, 4.5, 5.5, 4, 5, 5, 5.5, 4.5, 5.5, 5, 5.5]
alpha = 0.05
result = described_class.perform(alpha, :two_tail, 4.7, student_grades)
expect(result[:p_value]).to be_within(0.000001).of(0.006229) # R 3.5.1. calculates the p_value as 0.006229
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
context 'when two samples are specified' do
# example ONE
# explained here: http://faculty.webster.edu/woolflm/6aanswer.html
# A research study was conducted to examine the differences between older and younger adults
# on perceived life satisfaction. A pilot study was conducted to examine this hypothesis.
# Ten older adults (over the age of 70) and ten younger adults (between 20 and 30) were give
# a life satisfaction test (known to have high reliability and validity).
# Scores on the measure range from 0 to 60 with high scores indicative of high life satisfaction;
# low scores indicative of low life satisfaction. The data are presented below.
it 'performs a t-test following example ONE' do
older_adults = [45, 38, 52, 48, 25, 39, 51, 46, 55, 46]
younger_adults = [34, 22, 15, 27, 37, 41, 24, 19, 26, 36]
alpha = 0.05
result = described_class.perform(alpha, :two_tail, older_adults, younger_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
result = described_class.perform(alpha, :two_tail, younger_adults, older_adults)
expect(result[:t_score]).to be_within(0.0001).of(4.2575)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
# example TWO
# explained here: http://www.indiana.edu/%7Eeducy520/sec6342/week_10/ttest_exp.pdf
# Rosenthal and Jacobson (1968) informed classroom teachers that some of their students showed
# unusual potential for intellectual gains. Eight months later the students identified to teachers as
# having potentional for unusual intellectual gains showed significiantly greater gains performance
# on a test said to measure IQ than did children who were not so identified.
it 'performs a t-test following example TWO' do
experimental = [35, 40, 12, 15, 21, 14, 46, 10, 28, 48, 16, 30, 32, 48, 31, 22, 12, 39, 19, 25]
comparison = [2, 27, 38, 31, 1, 19, 1, 34, 3, 1, 2, 3, 2, 1, 2, 1, 3, 29, 37, 2]
alpha = 0.01
result = described_class.perform(alpha, :one_tail, experimental, comparison)
expect(result[:t_score]).to be_within(0.0001).of(3.5341)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
result = described_class.perform(alpha, :one_tail, comparison, experimental)
expect(result[:t_score]).to be_within(0.0001).of(3.5341)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
end
describe '.paired_test' do
context 'when both samples have an standard deviation of zero' do
let(:alpha) { 0.05 }
it 'does not perform a paired test when both samples are the same' do
sample_1 = [1.0, 2.0]
sample_2 = sample_1
expect do
described_class.paired_test(alpha, :one_tail, sample_1, sample_2)
end.to raise_error(StandardError, 'both samples are the same')
end
it 'does not perform a paired test when both samples have an standard deviation of zero' do
sample_1 = [1.0, 2.0, 3.0]
sample_2 = [2.0, 3.0, 4.0]
error_msg = 'Standard deviation for the difference or group is zero. Please, reconsider sample contents'
expect do
described_class.paired_test(alpha, :one_tail, sample_1, sample_2)
end.to raise_error(described_class::ZeroStdError, error_msg)
end
end
# example ONE
# explained here: https://onlinecourses.science.psu.edu/stat500/node/51
# Trace metals in drinking water affect the flavor and an unusually high concentration can pose a health hazard.
# Ten pairs of data were taken measuring zinc concentration in bottom water and surface.
# Does the data suggest that the true average concentration in the bottom water exceeds that of surface water?
it 'performs a paired t-test following example ONE' do
group_one = [0.430, 0.266, 0.567, 0.531, 0.707, 0.716, 0.651, 0.589, 0.469, 0.723]
group_two = [0.415, 0.238, 0.390, 0.410, 0.605, 0.609, 0.632, 0.523, 0.411, 0.612]
alpha = 0.05
result = described_class.paired_test(alpha, :one_tail, group_one, group_two)
expect(result[:t_score]).to be_within(0.0001).of(4.8638)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
# example TWO
# explained here: https://www.statsdirect.com/help/parametric_methods/paired_t.htm
# Comparison of peak expiratory flow rate (PEFR) before and after a walk on a cold winter's day for a random sample of 9 asthmatics.
it 'performs a paired t-test following example TWO - one tail' do
before = [312, 242, 340, 388, 296, 254, 391, 402, 290]
after = [300, 201, 232, 312, 220, 256, 328, 330, 231]
alpha = 0.05
result = described_class.paired_test(alpha, :one_tail, before, after)
expect(result[:t_score]).to be_within(0.0001).of(4.9258)
expect(result[:p_value]).to be_within(0.0001).of(0.0006)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
it 'performs a paired t-test following example TWO - two tail' do
before = [312, 242, 340, 388, 296, 254, 391, 402, 290]
after = [300, 201, 232, 312, 220, 256, 328, 330, 231]
alpha = 0.05
result = described_class.paired_test(alpha, :two_tail, before, after)
expect(result[:t_score]).to be_within(0.0001).of(4.9258)
expect(result[:p_value]).to be_within(0.0001).of(0.0012)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
# Example THREE
# explained here: http://www.unm.edu/~marcusj/Paired2Sample.pdf
# we would hypothesize that the width of survey transects between individuals effects the
# density estimate of artifacts per unit area, however, we wish to test this hypothesis formally.
it 'performs a paired t-test following example THREE' do
five_mts = [9.998, 140.228, 40.669, 40.030, 86.292, 76.255, 16.689, 115.963, 161.497, 29.529,
37.765, 16.919, 22.415, 4.496, 22.272, 73.061, 57.477, 64.188, 40.958, 10.224,
91.245, 38.836, 160.985, 1.452, 209.540]
ten_mts = [15.384, 54.803, 38.913, 10.875, 10.733, 36.444, 34.774, 49.252, 51.759, 0.643,
0.908, 114.969, 42.673, 43.370, 27.073, 19.343, 27.489, 2.808, 2.994, 95.575,
53.564, 32.265, 42.102, 3.544, 11.333]
alpha = 0.05
result = described_class.paired_test(alpha, :two_tail, five_mts, ten_mts)
expect(result[:t_score]).to be_within(0.0001).of(2.3697)
expect(result[:p_value]).to be_within(0.001).of(0.026)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
# Calculations match R:
#
# left_group <- c(0.12819915256260872, 0.24345459073897613, 0.27517650565714014, 0.8522185144081152, 0.05471111219486524)
# right_group <- c(0.3272414061985621, 0.2989306116723194, 0.642664937717922, 0.9476073892620895, 0.7050008194345182)
#
# t.test(left_group, right_group, alternative = 'two.sided', paired = TRUE, conf.level = 0.99)
#
# Paired t-test
#
# data: left_group and right_group
# t = -2.5202, df = 4, p-value = 0.06534
# alternative hypothesis: true mean difference is not equal to 0
# 99 percent confidence interval:
# -0.7732524 0.2261783
# sample estimates:
# mean difference
# -0.2735371
it 'performs a paired t-test (two tail) returning a p_value always smaller than 1' do
left_group = [0.12819915256260872, 0.24345459073897613, 0.27517650565714014, 0.8522185144081152, 0.05471111219486524]
right_group = [0.3272414061985621, 0.2989306116723194, 0.642664937717922, 0.9476073892620895, 0.7050008194345182]
alpha = 0.01
result = described_class.paired_test(alpha, :two_tail, left_group, right_group)
expect(result[:t_score]).to be_within(0.0001).of(-2.5202)
expect(result[:p_value]).to be_within(0.00001).of(0.06534)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/statistical_test/chi_squared_test_spec.rb | spec/ruby-statistics/statistical_test/chi_squared_test_spec.rb | require 'spec_helper'
describe RubyStatistics::StatisticalTest::ChiSquaredTest do
describe '.chi_statistic' do
# example ONE
# explained here: https://www.thoughtco.com/chi-square-goodness-of-fit-test-example-3126382
it 'returns an array with expected chi-squared statistic value following example ONE' do
observed_counts = [212, 147, 103, 50, 46, 42]
expected = 100
result = described_class.chi_statistic(expected, observed_counts)
expect(result[0]).to be_within(0.001).of(235.42)
end
it 'returns an array with the expected degrees of freedom following example ONE' do
observed_counts = [212, 147, 103, 50, 46, 42]
expected = 100
result = described_class.chi_statistic(expected, observed_counts)
degrees_of_freedom = observed_counts.size - 1
expect(result[1]).to eq degrees_of_freedom
end
# Example two: chocolate colours
# explained here: https://onlinecourses.science.psu.edu/stat414/book/export/html/228
it 'returns an array with the expected chi-squared statistic value following example TWO' do
observed = [224, 119, 130, 48, 59]
expected = [232, 116, 116, 58, 58]
result = described_class.chi_statistic(expected, observed)
expect(result[0]).to be_within(0.001).of(3.784)
end
it 'returns an array with the expected degrees of freedom following example TWO' do
observed = [224, 119, 130, 48, 59]
expected = [232, 116, 116, 58, 58]
result = described_class.chi_statistic(expected, observed)
degrees_of_freedom = observed.size - 1
expect(result[1]).to eq degrees_of_freedom
end
end
describe '.goodness_of_fit' do
it 'perform a goodness of fit test following example ONE' do
observed_counts = [212, 147, 103, 50, 46, 42]
expected = 100 # this is equal to [100, 100, 100, 100, 100, 100]
result = described_class.goodness_of_fit(0.05, expected, observed_counts)
# We cannot get exact p-values as it's dependant on the precision and the machine, therefore
# we use a limit criteria defined by R in 4.4.1.
# Here's the output for the same configuration:
# > observed <- c(212, 147, 103, 50, 46, 42)
# > expected <- c(100, 100, 100, 100, 100, 100)
# > chisq.test(observed, p = expected, rescale.p = TRUE)
# Chi-squared test for given probabilities
#
# data: observed
# X-squared = 235.42, df = 5, p-value < 2.2e-16
expect(result[:p_value]).to be <= 2.2e-16 # This matches the criteria used in R 4.4.1
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
it 'perform a goodness of fit test following example TWO' do
observed = [224, 119, 130, 48, 59]
expected = [232, 116, 116, 58, 58]
result = described_class.goodness_of_fit(0.05, expected, observed)
expect(result[:p_value]).to be_within(0.0001).of(0.4359)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
end
# The following test is based on the numbers reported in https://github.com/estebanz01/ruby-statistics/issues/78
# which give us a minimum test case scenario where the integral being solved with simpson's rule
# uses zero iterations, raising errors.
it 'performs a goodness of fit test with values that generates small chi statistics' do
observed_counts = [481, 483, 482, 488, 478, 471, 477, 479, 475, 462]
expected = 477
result = {}
expect do
result = described_class.goodness_of_fit(0.01, expected, observed_counts)
end.not_to raise_error
expect(result[:p_value]).to be_within(0.0001).of(0.9995)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/statistical_test/kolmogorov_smirnov_test_spec.rb | spec/ruby-statistics/statistical_test/kolmogorov_smirnov_test_spec.rb | require 'spec_helper'
describe RubyStatistics::StatisticalTest::KolmogorovSmirnovTest do
describe 'KSTest' do
it 'defines KSTest as an alias for the KolmogorovSmirnovTest class name' do
expect(RubyStatistics::StatisticalTest::KSTest).to be_a(Class)
expect(RubyStatistics::StatisticalTest::KSTest).to eq described_class
end
end
describe '.two_samples' do
it 'computes a two-sided Kolmogorov-Smirnov test for two samples, following example ONE' do
# Data extracted from http://www.stats.ox.ac.uk/~massa/Lecture%2013.pdf
# calculation in R:
# > ks.test(c(1.2,1.4,1.9,3.7,4.4,4.8,9.7,17.3,21.1,28.4), c(5.6,6.5,6.6,6.9,9.2,10.4,10.6,19.3))
#
# Two-sample Kolmogorov-Smirnov test
#
# D = 0.6, p-value = 0.04987
# alternative hypothesis: two-sided
group_one = [1.2, 1.4, 1.9, 3.7, 4.4, 4.8, 9.7, 17.3, 21.1, 28.4]
group_two = [5.6, 6.5, 6.6, 6.9, 9.2, 10.4, 10.6, 19.3]
# alpha, by default, is 0.05
result = described_class.two_samples(group_one: group_one, group_two: group_two)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:d_max]).to eq 0.6
end
it 'computes a two-sided Kolmogorov-Smirnov test for two samples, following example TWO' do
# Calculation in R
# Two-sample Kolmogorov-Smirnov test
#
# D = 1, p-value = 6.657e-08
# alternative hypothesis: two-sided
men = RubyStatistics::Distribution::Normal.new(3.0, 1.0).random(elements: 10, seed: 100)
women = RubyStatistics::Distribution::Weibull.new(2.0, 3.0).random(elements: 20, seed: 100)
# alpha, by default, is 0.05
result = described_class.two_samples(group_one: men, group_two: women)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:d_max]).to eq 1.0
end
it 'computes a two-sided Kolmogorov-Smirnov test for two samples, following example THREE' do
# Calculation in R
# Two-sample Kolmogorov-Smirnov test
#
# D = 0.4, p-value = 0.873
# alternative hypothesis: two-sided
men = RubyStatistics::Distribution::StandardNormal.new.random(elements: 500, seed: 10)
women = RubyStatistics::Distribution::StandardNormal.new.random(elements: 50, seed: 40)
# alpha, by default, is 0.05
result = described_class.two_samples(group_one: men, group_two: women)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
expect(result[:d_max]).to eq 0.12
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/statistical_test/wilcoxon_rank_sum_test_spec.rb | spec/ruby-statistics/statistical_test/wilcoxon_rank_sum_test_spec.rb | require 'spec_helper'
describe RubyStatistics::StatisticalTest::WilcoxonRankSumTest do
let(:test_class) { described_class.new }
it 'can be instantiated using the MannWhitneyU alias' do
expect(described_class).to eq RubyStatistics::StatisticalTest::MannWhitneyU
end
describe '#rank' do
it 'ranks an specified group of elements according to the Mann-whitney U test' do
oceanic_earthquakes = [75, 32, 50, 38, 19, 44, 33, 102, 28, 70, 49, 70]
continental_earthquakes = [69, 99, 135, 115, 33, 92, 118, 115, 92, 89, 101]
earthquakes = oceanic_earthquakes + continental_earthquakes
# When there is a tie, the final rank is calculated when computing the test.
# This can be found it by dividing counter and rank.
# e.g.: 33 => { counter: 2, rank: 9 } can be seen as 33 - 4.5; 33 - 4.5
result = { 19=>{:counter=>1, :rank=>1}, 28=>{:counter=>1, :rank=>2},
32=>{:counter=>1, :rank=>3}, 33=>{:counter=>2, :rank=>9},
38=>{:counter=>1, :rank=>6}, 44=>{:counter=>1, :rank=>7},
49=>{:counter=>1, :rank=>8}, 50=>{:counter=>1, :rank=>9},
69=>{:counter=>1, :rank=>10}, 70=>{:counter=>2, :rank=>23},
75=>{:counter=>1, :rank=>13}, 89=>{:counter=>1, :rank=>14},
92=>{:counter=>2, :rank=>31}, 99=>{:counter=>1, :rank=>17},
101=>{:counter=>1, :rank=>18}, 102=>{:counter=>1, :rank=>19},
115=>{:counter=>2, :rank=>41}, 118=>{:counter=>1, :rank=>22},
135=>{:counter=>1, :rank=>23} }
expect(test_class.rank(earthquakes)).to eq result
end
end
## Examples
# Example ONE extracted from http://users.sussex.ac.uk/~grahamh/RM1web/Mann-Whitney%20worked%20example.pdf
# The effectiveness of advertising for two rival products (Brand X and Brand Y) was compared.
#
# Example TWO and THREE extracted from http://webspace.ship.edu/pgmarr/Geo441/Lectures/Lec%207%20-%20Mann-Whitney%20and%20Paired%20Tests.pdf
# both examples tries to identify if there is significant difference between oceanic and continental
# earthquakes compared by magnitude (TWO) and depth (THREE).
describe '#perform' do
it 'always computes the test approximating the U-statistic to the standard normal distribution' do
expect_any_instance_of(RubyStatistics::Distribution::StandardNormal)
.to receive(:cumulative_function).and_call_original
result = test_class.perform(0.05, :two_tail, [1,2,3], [4,5,6])
expect(result.keys).to include :z
end
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example ONE' do
rating_x = [3, 4, 2, 6, 2, 5]
rating_y = [9, 7, 5, 10, 6, 8]
result = test_class.perform(0.05, :two_tail, rating_x, rating_y)
expect(result[:u]).to eq 2
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:p_value]).to be_within(0.01).of(0.01)
end
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example TWO' do
oceanic_magnitudes = [3.9, 4.0, 4.1, 4.3, 4.3, 4.4, 4.5, 4.8, 5.4, 6.3, 6.8, 6.8]
continental_magnitudes = [4.1, 4.3, 4.3, 4.3, 4.4, 4.4, 4.5, 4.6, 5.0, 5.1, 5.1]
result = test_class.perform(0.05, :two_tail, oceanic_magnitudes, continental_magnitudes)
expect(result[:u]).to eq 63 # In the example, they use the largest instead of the lowest.
expect(result[:z]).to be_within(0.001).of(-0.186)
expect(result[:null]).to be true
expect(result[:alternative]).to be false
expect(result[:p_value]).to eq 0.8525013990549617
end
it 'performs a wilcoxon rank sum/Mann-Whitney U test following example THREE' do
oceanic_earthquakes = [75, 32, 50, 38, 19, 44, 33, 102, 28, 70, 49, 70]
continental_earthquakes = [69, 99, 135, 115, 33, 92, 118, 115, 92, 89, 101]
result = test_class.perform(0.05, :two_tail, oceanic_earthquakes, continental_earthquakes)
expect(result[:u]).to eq 17.5
expect(result[:z]).to be_within(0.001).of(-2.988)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
expect(result[:p_value]).to eq 0.002808806689028387
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/statistical_test/f_test_spec.rb | spec/ruby-statistics/statistical_test/f_test_spec.rb | require 'spec_helper'
describe RubyStatistics::StatisticalTest::FTest do
describe '.anova_f_score' do
context 'when only two groups have been specified' do
let(:group_one) { (1..15).to_a.sample(5) }
let(:group_two) { (1..15).to_a.sample(5) }
let(:variances) { [group_one.variance, group_two.variance] }
let(:result) { described_class.anova_f_score(group_one, group_two) }
it 'calculates the F statistic using the ratio of variances' do
expect(result[0]).to eq variances.max / variances.min.to_f
end
it 'returns the degrees of freedom of the numerator as one' do
expect(result[1]).to eq 1
end
it 'returns the degrees of freedom of the denominator as N-k' do
df = group_one.size + group_two.size - 2
expect(result[2]).to eq df
end
end
# The following groups and values are a replica of the example
# explained here: https://courses.lumenlearning.com/boundless-statistics/chapter/one-way-anova
context 'when more than two groups have been specified' do
let(:sorority_one) { [2.17, 1.85, 2.83, 1.69, 3.33] }
let(:sorority_two) { [2.63, 1.77, 3.25, 1.86, 2.21] }
let(:sorority_three) { [2.63, 3.78, 4.00, 2.55, 2.45] }
let(:sorority_four) { [3.79, 3.45, 3.08, 2.26, 3.18] }
let(:result) do
described_class.anova_f_score(sorority_one,
sorority_two,
sorority_three,
sorority_four)
end
it 'calculates the variances between and within groups to retrieve the F-statistic' do
expect(result[0]).to be_within(0.01).of(2.23)
end
it 'calculates the correct degrees of freedom for the specified groups' do
expect(result[1]).to eq 3
expect(result[2]).to eq 16
end
end
end
describe '.one_way_anova' do
it 'calculates the ANOVA F-score' do
expect(described_class).to receive(:anova_f_score)
described_class.one_way_anova(0.1, [1, 2, 3], [4, 5, 6])
end
# example ONE
# explained here: https://courses.lumenlearning.com/boundless-statistics/chapter/one-way-anova
# Four sororities took a random sample of sisters regarding their grade means for the past term.
# Using a significance level of 1%, is there a difference in mean grades among the sororities?
it 'performs a One way ANOVA Test following example ONE' do
sorority_one = [2.17, 1.85, 2.83, 1.69, 3.33]
sorority_two = [2.63, 1.77, 3.25, 1.86, 2.21]
sorority_three = [2.63, 3.78, 4.00, 2.55, 2.45]
sorority_four = [3.79, 3.45, 3.08, 2.26, 3.18]
alpha = 0.1
result = described_class.one_way_anova(alpha,
sorority_one,
sorority_two,
sorority_three,
sorority_four)
expect(result[:p_value]).to be_within(0.0001).of(0.1241)
# Accept null hypotheses ?
expect(result[:null]).to be true
# Accept alternative hypotheses ?
expect(result[:alternative]).to be false
# Confidence level (90 %)
expect(result[:confidence_level]).to eq 0.9
end
# example TWO
# explained here: https://web.mst.edu/~psyworld/anovaexample.htm
# Susan Sound predicts that students will learn most effectively with a constant background sound,
# as opposed to an unpredictable sound or no sound at all. She randomly divides twenty-four students
# into three groups of eight. All students study a passage of text for 30 minutes.
# Those in group 1 study with background sound at a constant volume in the background.
# Those in group 2 study with noise that changes volume periodically.
# Those in group 3 study with no sound at all.
# After studying, all students take a 10 point multiple choice test over the material.
it 'perfoms a One way ANOVA Test following example TWO' do
constant_sound = [7, 4, 6, 8, 6, 6, 2, 9]
random_sound = [5, 5, 3, 4, 4, 7, 2, 2]
no_sound = [2, 4, 7, 1, 2, 1, 5, 5]
alpha = 0.05
result = described_class.one_way_anova(alpha,
constant_sound,
random_sound,
no_sound)
expect(result[:p_value]).to be_within(0.001).of(0.045)
expect(result[:null]).to be false
expect(result[:alternative]).to be true
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/binomial_spec.rb | spec/ruby-statistics/distribution/binomial_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Binomial do
describe '#probability_mass_function' do
it 'is not defined for negative values' do
expect(described_class.new(0, 0).probability_mass_function(rand(-10...0))).to be_nil
end
it 'is not defined for values greater than the number of defined trials' do
k = rand(10)
p = rand(0..1)
expect(described_class.new(k, p).probability_mass_function(k + 1)).to be_nil
end
it 'returns the expected results for the binomial distribution' do
results = [0.009765625, 0.0439453125, 0.117187500, 0.205078125, 0.246093750]
binomial = described_class.new(10, 0.5) # Number of trials: 10, probability per trial: 0.5
(1..5).each_with_index do |number, index|
expect(binomial.probability_mass_function(number)).to eq results[index]
end
end
end
describe '#cumulative_function' do
it 'is not defined for negative values' do
expect(described_class.new(0, 0).cumulative_function(rand(-10...0))).to be_nil
end
it 'is not defined for values greater than the number of defined trials' do
k = rand(10)
p = rand(0..1)
expect(described_class.new(k, p).cumulative_function(k + 1)).to be_nil
end
it 'returns the expected results for the binomial distribution' do
results = [0.01074219, 0.05468750, 0.17187500, 0.37695313, 0.62304687]
binomial = described_class.new(10, 0.5) # Number of trials: 10, probability per trial: 0.5
(1..5).each_with_index do |number, index|
expect(binomial.cumulative_function(number)).to be_within(0.00000001).of(results[index])
end
end
end
describe '#mean' do
it 'returns the expected mean for the specified values' do
n = rand(10)
p = rand(0..1)
expect(described_class.new(n, p).mean).to eq n * p
end
end
describe '#variance' do
it 'returns the expected variance for the specified values' do
n = rand(10)
p = rand(0..1)
expect(described_class.new(n, p).variance).to eq (n * p) * (1 - p)
end
end
describe '#mode' do
# The test evaluator is: (number_of_trials + 1) * probability_per_trial
it 'is the floor(test) when the test is zero o when it is a float' do
n = rand(10)
n = n.even? ? n - 1 : n
p = (1/3.7) # To ensure that we always have a test with decimal places.
test = (n + 1) * p
expect(described_class.new(n, p).mode).to eq test.floor
end
context 'when the test is an integer and is located between {1, .., number_of_trials}' do
it 'is the test value and test value minus one' do
n, p = 11, 0.5
test = (n + 1) * p
expect(described_class.new(n, p).mode).to eq [test, test - 1]
end
end
it 'is the number of trials when the probability per trial is 1.0' do
n = rand(10)
p = 1.0
expect(described_class.new(n, p).mode).to eq n
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/beta_spec.rb | spec/ruby-statistics/distribution/beta_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Beta do
describe '#cumulative_function' do
it 'returns one for values greater than one' do
expect(described_class.new(rand(10), rand(10)).cumulative_function(rand(2..10))).to eq 1
end
it 'is not defined for values less than zero' do
expect(described_class.new(rand(10), rand(10)).cumulative_function(rand(-5...0))).to be_nil
end
it 'returns the expected values for the beta distribution' do
result = [0.0523, 0.1808, 0.3483, 0.5248, 0.6875]
beta_distribution = described_class.new(2, 3) # alpha 2, beta 3
[0.1, 0.2, 0.3, 0.4, 0.5].each_with_index do |number, index|
expect(beta_distribution.cumulative_function(number)).to be_within(0.0001).of(result[index])
end
end
end
describe '#density_function' do
it 'returns zero when the value is not defined in the [0,1] interval' do
expect(described_class.new(1,1).density_function(2)).to eq 0
expect(described_class.new(1,1).density_function(-2)).to eq 0
end
it 'returns the expected values for the probability density function associated to the beta distribution' do
# TODO: Find a way to better test this instead of fixing some values
result = [0, 0.108, 0.384, 0.756, 1.152, 1.5]
beta_distribution = described_class.new(3, 2) # Alpha = 3, Beta = 2
[0, 0.1, 0.2, 0.3, 0.4, 0.5].each_with_index do |number, index|
expect(beta_distribution.density_function(number)).to be_within(0.0001).of(result[index])
end
end
end
describe '#mode' do
it 'calculates the expected mode for the beta distribution' do
alpha = rand(2..10)
beta = rand(2..10)
mode = (alpha.to_f - 1)/(alpha.to_f + beta.to_f - 2)
expect(described_class.new(alpha, beta).mode).to eq (mode)
end
it 'is not defined for alpha, beta minor than or equal to 1' do
alpha = rand(-3..1)
beta = rand(-3..1)
expect(described_class.new(alpha, beta).mode).to be_nil
end
end
describe '#mean' do
it 'returns nil if alpha and beta is zero' do
alpha = 0
beta = 0
expect(described_class.new(alpha, beta).mean).to be_nil
end
it 'returns nil if the sum of alpha and beta is zero' do
alpha = -1
beta = 1
expect(described_class.new(alpha, beta).mean).to be_nil
end
it 'calculates the expected mean for the beta distribution' do
alpha = rand(-5..5)
beta = rand(-5..5)
if alpha + beta == 0 # To avoid NaN results.
alpha = 1
beta = 1
end
expect(described_class.new(alpha, beta).mean).to eq alpha.to_f/(alpha.to_f + beta.to_f)
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/logseries_spec.rb | spec/ruby-statistics/distribution/logseries_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::LogSeries do
describe '.density_function' do
it 'is not defined for negative events' do
expect(described_class.density_function(rand(-5..-1), rand)).to be_nil
end
it 'is not defined for zero events' do
expect(described_class.density_function(0, rand)).to be_nil
end
it 'returns the expected result for the logarithmic distribution' do
# Results extracted from http://calculator.vhex.net/calculator/probability/log-series-discrete-probability-mass
# for x = 1, 2, 3, 4, 5 and p = 0.5
results = [0.721348, 0.180337, 0.060112, 0.022542, 0.009017]
1.upto(5) do |number|
expect(described_class.density_function(number, 0.5)).to be_within(0.000001).of(results[number - 1])
end
end
end
describe '.cumulative_function' do
it 'is not defined for negative events' do
expect(described_class.cumulative_function(rand(-5..-1), rand)).to be_nil
end
it 'is not defined for zero events' do
expect(described_class.cumulative_function(0, rand)).to be_nil
end
it 'returns the expected result for the logarithmic distribution' do
# Results extracted from http://calculator.vhex.net/calculator/probability/log-series-discrete-cumulative-distribution
# for x = 1, 2, 3, 4, 5 and p = 0.5
results = [0.721348, 0.901684, 0.961797, 0.984339, 0.993356]
1.upto(5) do |number|
expect(described_class.cumulative_function(number, 0.5)).to be_within(0.000001).of(results[number - 1])
end
end
end
describe '.mode' do
it 'returns 1.0' do
expect(described_class.mode).to eq 1.0
end
end
describe '.mean' do
it 'returns the expected value for the logseries distribution' do
p = rand
expect(described_class.mean(p)).to eq ((-1.0 / Math.log(1.0 - p)) * (p / (1.0 - p)))
end
end
describe '.variance' do
it 'returns the expected value for the logseries distribution' do
p = rand
result = (-1.0 * p) * ((p + Math.log(1.0 - p)) / (((1.0 - p) ** 2) * (Math.log(1.0 - p) ** 2)))
expect(described_class.variance(p)).to eq result
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/uniform_spec.rb | spec/ruby-statistics/distribution/uniform_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Uniform do
describe '#density_function' do
it 'returns the expected value for the uniform distribution' do
left, right = 3.0, 4.0
expected = 1/(right - left)
expect(described_class.new(left, right).density_function(rand(3.0..4.0))).to eq expected
end
it 'returns zero when specified value does not belong to the Uniform distribution support' do
left, right = rand(1..10), rand(1..10)
uniform = described_class.new(left, right)
expect(uniform.density_function(left - 1)).to eq 0
expect(uniform.density_function(right + 1)).to eq 0
end
end
describe '#cumulative_function' do
it 'calculates the probability of the specified value for the uniform distribution' do
results = [0.0, 0.0, 0.5, 1.0, 1.0]
(1..5).each_with_index do |number, index|
uniform = described_class.new(2, 4) # left: 2, right: 4
expect(uniform.cumulative_function(number)).to eq results[index]
end
end
it 'returns zero when the specified value is less than the left value' do
expect(described_class.new(3,4).cumulative_function(2)).to eq 0
end
it 'returns one when the specified value is greater than the right value' do
expect(described_class.new(4, 5).cumulative_function(6)).to eq 1
end
end
describe '#mean' do
it 'returns the expected mean for the uniform distribution' do
left,right = rand(1..10), rand(1..10)
expect(described_class.new(left, right).mean).to eq ((1/2.0) * (left + right))
end
end
describe '#variance' do
it 'returns the expected variance for the uniform distribution' do
left,right = rand(1..10), rand(1..10)
expect(described_class.new(left, right).variance).to eq ((1/12.0) * (right - left) ** 2)
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/f_spec.rb | spec/ruby-statistics/distribution/f_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::F do
describe '#cumulative_function' do
it 'returns the expected probabilities for the F distribution' do
results = [0.5210509, 0.7436128, 0.8418970, 0.8930887, 0.9229813]
f_distribution = described_class.new(3, 4) # d1: 3, d2: 4
(1..5).each_with_index do |number, index|
expect(f_distribution.cumulative_function(number)).to be_within(0.0000001).of(results[index])
end
end
end
describe '#density_function' do
it 'returns the expected values for the F distribution' do
d1, d2 = 1, 2
f_distribution = described_class.new(d1, d2)
results = [0.19245009, 0.08838835, 0.05163978, 0.03402069, 0.02414726]
(1..5).each_with_index do |number, index|
expect(f_distribution.density_function(number)).to be_within(0.00000001).of(results[index])
end
end
it 'uses the defined beta function for the Beta distribution' do
x, y = 1, 2
expect(Math).to receive(:beta_function)
.with(x/2.0, y/2.0).and_call_original
described_class.new(x, y).density_function(rand(1..10))
end
end
describe '#mean' do
it 'returns the expected mean value for the F distribution' do
d2 = rand(3..10)
expected_mean = d2/(d2 - 2).to_f
f_distribution = described_class.new(1, d2)
expect(f_distribution.mean).to eq expected_mean
end
it 'is not defined for d2 values less or equal than 2' do
f_distribution = described_class.new(rand(10), rand(-10..2))
expect(f_distribution.mean).to be_nil
end
end
describe '#mode' do
it 'returns the expected mode for the F distribution' do
d1, d2 = rand(3..10), rand(10)
expected_mode = ((d1 - 2)/d1.to_r) * (d2/(d2 + 2).to_r)
f_distribution = described_class.new(d1, d2)
expect(f_distribution.mode).to eq expected_mode.to_f
end
it 'is not defined for d1 values less or equal than 2' do
f_distribution = described_class.new(rand(-10..2), rand(10))
expect(f_distribution.mode).to be_nil
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/poisson_spec.rb | spec/ruby-statistics/distribution/poisson_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Poisson do
describe '#probability_mass_function' do
it 'returns the expected values using the poisson distribution' do
results = [0.271, 0.271, 0.180, 0.090, 0.036]
poisson = described_class.new(2) # lambda: 2
(1..5).each_with_index do |number, index|
expect(poisson.probability_mass_function(number)).to be_within(0.001).of(results[index])
end
end
end
describe '#cumulative_function' do
it 'returns the expected values using the poisson distribution' do
results = [0.406, 0.677, 0.857, 0.947, 0.983]
poisson = described_class.new(2) # lambda: 2
(1..5).each_with_index do |number, index|
expect(poisson.cumulative_function(number)).to be_within(0.001).of(results[index])
end
end
end
describe '#mean' do
it 'returns the lambda value specified at initialization time' do
l = rand(10)
poisson = described_class.new(l)
expect(poisson.mean).to eq l
end
end
describe '#variance' do
it 'returns the lambda value specified at initialization time' do
l = rand(10)
poisson = described_class.new(l)
expect(poisson.mean).to eq l
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/geometric_spec.rb | spec/ruby-statistics/distribution/geometric_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Geometric do
describe '#initialize' do
it 'creates a distribution that does not allow probabilities of 100 % chance by default' do
expect(described_class.new(rand).always_success_allowed).to be false
end
it 'creates a distribution that allows 100 % probabilities when specified' do
expect(described_class.new(rand, always_success: true).always_success_allowed).to be true
end
end
context 'when a probability of 100 % chance is allowed' do
let(:p) { rand }
let(:geometric) { described_class.new(p, always_success: true) }
describe '#mean' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.mean).to eq (1.0 - p) / p
end
end
describe '#median' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.median).to eq (-1.0 / Math.log2(1.0 - p)).ceil - 1.0
end
end
describe '#mode' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.mode).to be_zero
end
end
describe '#cumulative_function' do
it 'is not defined if the number of trials is negative' do
expect(geometric.cumulative_function(rand(-10...0))).to be_nil
end
it 'is defined if the number of trials is zero' do
expect(geometric.cumulative_function(0)).not_to be_nil
end
it 'returns the expected values for the geometric distribution' do
# Results from R:
# pgeom(c(1,2,3,4,5), 0.5)
# [1] 0.750000 0.875000 0.937500 0.968750 0.984375
geometric.probability_of_success = 0.5
results = [0.750000, 0.875000, 0.937500, 0.968750, 0.984375]
1.upto(5) do |trial|
expect(geometric.cumulative_function(trial)).to eq results[trial - 1]
end
end
end
describe '#density_function' do
it 'is not defined if the number of trials is negative' do
expect(geometric.density_function(rand(-10...0))).to be_nil
end
it 'is defined if the number of trials is zero' do
expect(geometric.density_function(0)).not_to be_nil
end
it 'returns the expected values for the geometric distribution' do
# Results from R:
# dgeom(c(1,2,3,4,5), 0.5)
# [1] 0.250000 0.125000 0.062500 0.031250 0.015625
geometric.probability_of_success = 0.5
results = [0.250000, 0.125000, 0.062500, 0.031250, 0.015625]
1.upto(5) do |trial|
expect(geometric.density_function(trial)).to eq results[trial - 1]
end
end
end
end
context 'when a probability of 100 % chance is not allowed' do
let(:p) { rand }
let(:geometric) { described_class.new(p, always_success: false) }
describe '#mean' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.mean).to eq 1.0 / p
end
end
describe '#median' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.median).to eq (-1.0 / Math.log2(1.0 - p)).ceil
end
end
describe '#mode' do
it 'returns the expected value for the geometric distribution' do
expect(geometric.mode).to eq 1.0
end
end
describe '#cumulative_function' do
it 'is not defined if the number of trials is negative' do
expect(geometric.cumulative_function(rand(-10...0))).to be_nil
end
it 'is not defined if the number of trials is zero' do
expect(geometric.cumulative_function(0)).to be_nil
end
it 'returns the expected values for the geometric distribution' do
## We don't have a way to compare against R results because
# the geometric distribution in R is calculated with p <= 1
k = rand(1..10)
expect(geometric.cumulative_function(k)).to eq (1.0 - ((1.0 - p) ** k))
end
end
describe '#density_function' do
it 'is not defined if the number of trials is negative' do
expect(geometric.density_function(rand(-10...0))).to be_nil
end
it 'is not defined if the number of trials is zero' do
expect(geometric.density_function(0)).to be_nil
end
it 'returns the expected values for the geometric distribution' do
## We don't have a way to compare against R results because
# the geometric distribution in R is calculated with p <= 1
k = rand(1..10)
expect(geometric.density_function(k)).to eq ((1.0 - p) ** (k - 1.0)) * p
end
end
end
describe '#variance' do
it 'returns the expected value for the geometric distribution' do
p = rand
geometric = described_class.new(p)
expect(geometric.variance).to eq (1.0 - p) / (p ** 2)
end
end
describe '#skewness' do
it 'returns the expected value for the geometric distribution' do
p = rand
geometric = described_class.new(p)
expect(geometric.skewness).to eq (2.0 - p) / Math.sqrt(1.0 - p)
end
end
describe '#kurtosis' do
it 'returns the expected value for the geometric distribution' do
p = rand
geometric = described_class.new(p)
expect(geometric.kurtosis).to eq (6.0 + ((p ** 2) / (1.0 - p)))
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/gamma_spec.rb | spec/ruby-statistics/distribution/gamma_spec.rb | # frozen_string_literal: true
require 'spec_helper'
describe RubyStatistics::Distribution::Gamma do
let(:shape_p) { 4 }
let(:scale_p) { 2 }
let(:rate_p) { 1.0 / shape_p }
let(:rate_gamma) { described_class.new(shape: shape_p) }
let(:scale_gamma) { described_class.new(shape: shape_p, scale: scale_p) }
before do
# so we can check that the rate/scale attr has been called
allow(rate_gamma).to receive(:rate).and_call_original
allow(scale_gamma).to receive(:scale).and_call_original
end
describe '#as_rate?' do
it 'is true when no scale parameter is defined' do
expect(described_class.new(shape: 1)).to be_as_rate
end
it 'is false when a scale parameter is defined' do
expect(described_class.new(shape: 1, scale: 1)).to_not be_as_rate
end
end
describe '#mean' do
context 'when the scale parameter is defined' do
it 'returns the expected value using the scale parameter' do
expect(scale_gamma.mean).to eq shape_p * scale_p
expect(scale_gamma).to have_received(:scale).at_least(:once)
end
end
context 'when the rate parameter is used' do
it 'returns the expected value using the rate parameter' do
expect(rate_gamma.mean).to eq shape_p / rate_p
expect(rate_gamma).to have_received(:rate).once
end
end
end
describe '#mode' do
it 'returns zero if the shape is zero' do
expect(described_class.new(shape: 0).mode).to be_zero
end
it 'returns zero if the shape is negative' do
expect(described_class.new(shape: -1).mode).to be_zero
end
context 'when the scale parameter is defined' do
it 'returns the expected calculations using the scale parameter' do
expect(scale_gamma.mode).to eq (shape_p - 1.0) * scale_p
expect(scale_gamma).to have_received(:scale).at_least(:once)
end
end
context 'when the rate parameter is used' do
it 'returns the expected calculations using the rate parameter' do
expect(rate_gamma.mode).to eq (shape_p - 1.0) / rate_p
expect(rate_gamma).to have_received(:rate).at_least(:once)
end
end
end
describe '#variance' do
context 'when the scale parameter is defined' do
it 'returns the expected calculations using the scale parameter' do
expect(scale_gamma.variance).to eq shape_p * (scale_p ** 2.0)
expect(scale_gamma).to have_received(:scale).at_least(:once)
end
end
context 'when the rate parameter is used' do
it 'returns the expected calculations using the rate parameter' do
expect(rate_gamma.variance).to eq shape_p / (rate_p ** 2.0)
expect(rate_gamma).to have_received(:rate).at_least(:once)
end
end
end
describe '#skewness' do
it 'returns the expected calculation for both types of gamma definitions' do
skewness = 2.0 / Math.sqrt(shape_p)
expect(rate_gamma.skewness).to eq(skewness)
expect(scale_gamma.skewness).to eq(skewness)
end
end
describe '#density_function' do
context 'when the scale parameter is defined' do
it 'returns the excepted calculations' do
# Values extracted using dgamma(x, shape = shape_p, scale = scale_p) in R 4.4.1
values = [0.006318028, 0.03065662, 0.06275536, 0.09022352, 0.1068815]
values.each_with_index do |value, index|
expect(scale_gamma.density_function(index + 1)).to be_within(0.0000001).of(value)
end
end
end
context 'when the rate parameter is used' do
it 'returns the expected calculations' do
# Values extracted using dgamma(x, shape = shape_p, rate = rate_p) in R 4.4.1
values = [0.0005070318, 0.003159014, 0.008303318, 0.01532831, 0.02331582]
values.each_with_index do |value, index|
expect(rate_gamma.density_function(index + 1)).to be_within(0.0000001).of(value)
end
end
end
end
describe '#cumulative_function' do
context 'when the scale parameter is defined' do
it 'returns the expected calculations' do
# Values extracted using pgamma(x, shape = shape_p, scale = scale_p) in R 4.4.1
values = [0.001751623, 0.01898816, 0.06564245, 0.1428765, 0.2424239]
values.each_with_index do |value, index|
expect(scale_gamma.cumulative_function(index + 1)).to be_within(0.0000001).of(value)
end
end
end
context 'when the rate parameter is used' do
it 'returns the expected calculations' do
# Values extracted using pgamma(x, shape = shape_p, rate = rate_p) in R 4.4.1
values = [0.0001333697, 0.001751623, 0.007292167, 0.01898816, 0.03826905]
values.each_with_index do |value, index|
expect(rate_gamma.cumulative_function(index + 1)).to be_within(0.0000001).of(value)
end
end
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
estebanz01/ruby-statistics | https://github.com/estebanz01/ruby-statistics/blob/0ac388ab734120bd708164a10261b25b53faf29d/spec/ruby-statistics/distribution/weibull_spec.rb | spec/ruby-statistics/distribution/weibull_spec.rb | require 'spec_helper'
describe RubyStatistics::Distribution::Weibull do
describe '#density_function' do
it 'returns the expected density for the weibull distribution' do
results = [0.04614827, 0.16546817, 0.27667238, 0.27590958, 0.16620722]
(1..5).each_with_index do |number, index|
weibull = described_class.new(3, 4) # shape: 3, scale: 4
expect(weibull.density_function(number)).to be_within(0.00000001).of(results[index])
end
end
it 'returns zero for values less than zero' do
expect(described_class.new(3, 4).density_function(rand(-10...0))).to eq 0
end
it 'is not defined for shape and scale values less or equal than zero' do
expect(described_class.new(rand(-5..0), rand(-5..0)).density_function(rand(10))).to be_nil
end
end
describe '#cumulative_function' do
it 'returns the expected probability for the weibull distribution using the specified value' do
results = [0.1051607, 0.3588196, 0.6321206, 0.8309867, 0.9378235]
(1..5).each_with_index do |number, index|
weibull = described_class.new(2, 3) # shape: 2, scale: 3
expect(weibull.cumulative_function(number)).to be_within(0.0000001).of(results[index])
end
end
it 'returns zero for specified vaules less or equal than zero' do
expect(described_class.new(2, 3).cumulative_function(rand(-5..0))).to eq 0
end
end
describe '#mean' do
it 'returns the expected mean for the weibull distribution' do
shape = rand(1..10).to_f
scale = rand(1..10).to_f
expected = scale * Math.gamma(1 + (1/shape))
expect(described_class.new(shape, scale).mean).to eq expected
end
end
describe '#mode' do
it 'returns zero if the shape is less or equal than one' do
expect(described_class.new(rand(-5..1),rand(10)).mode).to eq 0
end
it 'returns the expected mode for the weibull distribution' do
shape = rand(2..10).to_f
scale = rand(1..10).to_f
expected = scale * (((shape - 1)/shape) ** (1/shape))
expect(described_class.new(shape, scale).mode).to eq expected
end
end
describe '#variance' do
it 'returns the expected variance for the weibull distribution' do
scale, shape = rand(1..10).to_f, rand(1..10).to_f
left = Math.gamma(1 + (2/shape))
right = Math.gamma(1 + (1/shape)) ** 2
expected = (scale ** 2) * (left - right)
expect(described_class.new(shape, scale).variance).to eq expected
end
end
# To test random generation, we are going to use the Goodness-of-fit test
# to validate if a sample fits a weibull distribution.
describe '#random' do
it 'returns a pseudo random number that belongs to a weibull distribution' do
# Weibull sample generated from R with shape (k) 5, scale (lambda) 2.0 and seed 100
alpha = 0.01
weibull_sample = [2.066758, 2.125623, 1.801906, 2.470445, 1.892243]
random_sample = described_class.new(5.0, 2.0).random(elements: 5, seed: 100)
test = RubyStatistics::StatisticalTest::ChiSquaredTest.goodness_of_fit(alpha, weibull_sample, random_sample)
# Null hypothesis: Both samples belongs to the same distribution (weibull in this case)
# Alternative hypotesis: Each sample is generated with a different distribution.
expect(test[:null]).to be true
expect(test[:alternative]).to be false
end
it 'does not generate a random sample that follows an uniform distribution' do
# Uniform sample elements generated in R with seed 100
uniform_sample = [0.30776611, 0.25767250, 0.55232243, 0.05638315, 0.46854928]
random_sample = described_class.new(5.0, 2.0).random(elements: 5, seed: 100)
test = RubyStatistics::StatisticalTest::ChiSquaredTest.goodness_of_fit(0.01, uniform_sample, random_sample)
expect(test[:null]).to be false
expect(test[:alternative]).to be true
end
it 'generates the specified number of random elements and store it into an array' do
elements = rand(2..5)
sample = described_class.new(5.0, 2.0).random(elements: elements)
expect(sample).to be_a Array
expect(sample.size).to eq elements
end
it 'returns a single random number when only one element is required' do
weibull = described_class.new(5.0, 2.0)
sample_1 = weibull.random # 1 element by default
sample_2 = weibull.random(elements: 1)
expect(sample_1).to be_a Numeric
expect(sample_2).to be_a Numeric
end
end
end
| ruby | MIT | 0ac388ab734120bd708164a10261b25b53faf29d | 2026-01-04T17:46:01.529291Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.