CombinedText stringlengths 4 3.42M |
|---|
require 'action_view'
require 'fileutils'
require 'cucumber/formatter/io'
require 'cucumber/formatter/duration'
require 'cucumber/ast/scenario'
require 'cucumber/ast/table'
require 'cucumber/ast/outline_table'
require File.join(File.dirname(__FILE__), 'view_helper')
require File.join(File.dirname(__FILE__), 'report')
module PrettyFace
module Formatter
class Html
include Cucumber::Formatter::Io
include Cucumber::Formatter::Duration
include ViewHelper
attr_reader :report
def initialize(step_mother, path_or_io, options)
@path = path_or_io
@io = ensure_io(path_or_io, 'html')
@path_to_erb = File.join(File.dirname(__FILE__), '..', 'templates')
@step_mother = step_mother
@options = options
@report = Report.new
@img_id = 0
end
def embed(src, mime_type, label)
case(mime_type)
when /^image\/(png|gi|jpg|jpeg)/
embed_image(src, label)
end
end
def embed_image(src, label)
@report.current_scenario.image = src.split('/').last
@report.current_scenario.image_label = label
@report.current_scenario.image_id = "img_#{@img_id}"
@img_id += 1
FileUtils.cp src, "#{File.dirname(@path)}/images"
end
def before_features(features)
make_output_directories
@tests_started = Time.now
end
def features_summary_file
if @io.respond_to? 'to_path'
parts = @io.to_path.split('/')
else
parts = @io.path.split('/')
end
parts[parts.length - 1]
end
def before_feature(feature)
@report.add_feature ReportFeature.new(feature, features_summary_file)
end
def after_feature(feature)
@report.current_feature.close(feature)
end
def before_background(background)
@report.begin_background
end
def after_background(background)
@report.end_background
@report.current_feature.background << ReportStep.new(background)
end
def before_feature_element(feature_element)
unless scenario_outline? feature_element
@report.add_scenario ReportScenario.new(feature_element)
end
end
def after_feature_element(feature_element)
unless scenario_outline?(feature_element)
process_scenario(feature_element)
end
end
def before_table_row(example_row)
@report.add_scenario ReportScenario.new(example_row) unless info_row?(example_row)
end
def after_table_row(example_row)
unless info_row?(example_row)
@report.current_scenario.populate(example_row)
build_scenario_outline_steps(example_row)
end
populate_cells(example_row) if example_row.instance_of? Cucumber::Ast::Table::Cells
end
def before_step(step)
@step_timer = Time.now
end
def after_step(step)
step = process_step(step) unless step_belongs_to_outline? step
if @cells
step.table = @cells
@cells = nil
end
end
def after_features(features)
@features = features
@duration = format_duration(Time.now - @tests_started)
generate_report
copy_images_directory
copy_stylesheets_directory
end
def features
@report.features
end
private
def generate_report
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'main')
@io.puts renderer.render(:file => filename, :locals => {:report => self})
features.each do |feature|
write_feature_file(feature)
end
end
def write_feature_file(feature)
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'feature')
file = File.open("#{File.dirname(@path)}/#{feature.file}", Cucumber.file_mode('w'))
file.puts renderer.render(:file => filename, :locals => {:feature => feature})
=======
path_to_erb = File.join(File.dirname(__FILE__), '..', 'templates')
filename = File.join(path_to_erb, 'main.erb')
=======
filename = File.join(File.dirname(__FILE__), '..', 'templates', 'main.erb')
>>>>>>> extracted info out into step partial
text = File.new(filename).read
@io.puts ERB.new(text, nil, "%>").result(binding)
erbfile = File.join(File.dirname(__FILE__), '..', 'templates', 'feature.erb')
text = File.new(erbfile).read
=======
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'main')
@io.puts renderer.render(:file => filename, :locals => {:report => self})
>>>>>>> converted all to use ActionView
features.each do |feature|
write_feature_file(feature)
end
end
<<<<<<< HEAD
<<<<<<< HEAD
def write_feature_file(feature, viewer, filename)
html = viewer.render(:file => filename, :locals => {:feature => feature})
file = File.open("#{File.dirname(@path)}/#{feature.file}", Cucumber.file_mode('w'))
file.puts html
>>>>>>> merge upstream changes
file.flush
file.close
=======
def write_feature_file(feature, text)
file = File.open("#{File.dirname(@path)}/#{feature.file}", Cucumber.file_mode('w'))
file.puts ERB.new(text, nil, "%").result(feature.get_binding)
file.flush
file.close
>>>>>>> extracted info out into step partial
=======
def write_feature_file(feature)
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'feature')
file = File.open("#{File.dirname(@path)}/#{feature.file}", Cucumber.file_mode('w'))
file.puts renderer.render(:file => filename, :locals => {:feature => feature})
file.flush
file.close
>>>>>>> converted all to use ActionView
end
def make_output_directories
make_directory 'images'
make_directory 'stylesheets'
end
def make_directory(dir)
path = "#{File.dirname(@path)}/#{dir}"
FileUtils.mkdir path unless File.directory? path
end
def copy_directory(dir, file_names, file_extension)
path = "#{File.dirname(@path)}/#{dir}"
file_names.each do |file|
FileUtils.cp File.join(File.dirname(__FILE__), '..', 'templates', "#{file}.#{file_extension}"), path
end
end
def copy_images_directory
copy_directory 'images', %w(face failed passed pending undefined skipped), "jpg"
end
def copy_stylesheets_directory
copy_directory 'stylesheets', ['style'], 'css'
end
def process_scenario(scenario)
@report.current_scenario.populate(scenario)
end
def process_step(step, status=nil)
duration = Time.now - @step_timer
report_step = ReportStep.new(step)
report_step.duration = duration
report_step.status = status unless status.nil?
if step.background?
@report.current_feature.background << report_step if @report.processing_background_steps?
else
@report.add_step report_step
end
report_step
end
def scenario_outline?(feature_element)
feature_element.is_a? Cucumber::Ast::ScenarioOutline
end
def info_row?(example_row)
return example_row.scenario_outline.nil? if example_row.respond_to? :scenario_outline
return true if example_row.instance_of? Cucumber::Ast::Table::Cells
false
end
def step_belongs_to_outline?(step)
scenario = step.instance_variable_get "@feature_element"
not scenario.nil?
end
def build_scenario_outline_steps(example_row)
values = example_row.to_hash
steps = example_row.scenario_outline.raw_steps.clone
steps.each do |step|
name = nil
values.each do |key, value|
name = step.name.gsub("<#{key}>", "'#{value}'") if step.name.include? "<#{key}>"
end
current_step = process_step(step, example_row.status)
current_step.name = name if name
current_step.error = step_error(example_row.exception, step)
end
end
def step_error(exception, step)
return nil if exception.nil?
exception.backtrace[-1] =~ /^#{step.file_colon_line}/ ? exception : nil
end
def populate_cells(example_row)
@cells ||= []
values = []
example_row.to_a.each do |cell|
values << cell.value
end
@cells << values
end
end
end
end
Add back link for navigation back from feature details page to return to main summary
require 'action_view'
require 'fileutils'
require 'cucumber/formatter/io'
require 'cucumber/formatter/duration'
require 'cucumber/ast/scenario'
require 'cucumber/ast/table'
require 'cucumber/ast/outline_table'
require File.join(File.dirname(__FILE__), 'view_helper')
require File.join(File.dirname(__FILE__), 'report')
module PrettyFace
module Formatter
class Html
include Cucumber::Formatter::Io
include Cucumber::Formatter::Duration
include ViewHelper
attr_reader :report
def initialize(step_mother, path_or_io, options)
@path = path_or_io
@io = ensure_io(path_or_io, 'html')
@path_to_erb = File.join(File.dirname(__FILE__), '..', 'templates')
@step_mother = step_mother
@options = options
@report = Report.new
@img_id = 0
end
def embed(src, mime_type, label)
case(mime_type)
when /^image\/(png|gi|jpg|jpeg)/
embed_image(src, label)
end
end
def embed_image(src, label)
@report.current_scenario.image = src.split('/').last
@report.current_scenario.image_label = label
@report.current_scenario.image_id = "img_#{@img_id}"
@img_id += 1
FileUtils.cp src, "#{File.dirname(@path)}/images"
end
def before_features(features)
make_output_directories
@tests_started = Time.now
end
def features_summary_file
parts = @io.path.split('/')
parts[parts.length - 1]
end
def before_feature(feature)
@report.add_feature ReportFeature.new(feature, features_summary_file)
end
def after_feature(feature)
@report.current_feature.close(feature)
end
def before_background(background)
@report.begin_background
end
def after_background(background)
@report.end_background
@report.current_feature.background << ReportStep.new(background)
end
def before_feature_element(feature_element)
unless scenario_outline? feature_element
@report.add_scenario ReportScenario.new(feature_element)
end
end
def after_feature_element(feature_element)
unless scenario_outline?(feature_element)
process_scenario(feature_element)
end
end
def before_table_row(example_row)
@report.add_scenario ReportScenario.new(example_row) unless info_row?(example_row)
end
def after_table_row(example_row)
unless info_row?(example_row)
@report.current_scenario.populate(example_row)
build_scenario_outline_steps(example_row)
end
populate_cells(example_row) if example_row.instance_of? Cucumber::Ast::Table::Cells
end
def before_step(step)
@step_timer = Time.now
end
def after_step(step)
step = process_step(step) unless step_belongs_to_outline? step
if @cells
step.table = @cells
@cells = nil
end
end
def after_features(features)
@features = features
@duration = format_duration(Time.now - @tests_started)
generate_report
copy_images_directory
copy_stylesheets_directory
end
def features
@report.features
end
private
def generate_report
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'main')
@io.puts renderer.render(:file => filename, :locals => {:report => self})
features.each do |feature|
write_feature_file(feature)
end
end
def write_feature_file(feature)
renderer = ActionView::Base.new(@path_to_erb)
filename = File.join(@path_to_erb, 'feature')
file = File.open("#{File.dirname(@path)}/#{feature.file}", Cucumber.file_mode('w'))
file.puts renderer.render(:file => filename, :locals => {:feature => feature})
file.flush
file.close
end
def make_output_directories
make_directory 'images'
make_directory 'stylesheets'
end
def make_directory(dir)
path = "#{File.dirname(@path)}/#{dir}"
FileUtils.mkdir path unless File.directory? path
end
def copy_directory(dir, file_names, file_extension)
path = "#{File.dirname(@path)}/#{dir}"
file_names.each do |file|
FileUtils.cp File.join(File.dirname(__FILE__), '..', 'templates', "#{file}.#{file_extension}"), path
end
end
def copy_images_directory
copy_directory 'images', %w(face failed passed pending undefined skipped), "jpg"
end
def copy_stylesheets_directory
copy_directory 'stylesheets', ['style'], 'css'
end
def process_scenario(scenario)
@report.current_scenario.populate(scenario)
end
def process_step(step, status=nil)
duration = Time.now - @step_timer
report_step = ReportStep.new(step)
report_step.duration = duration
report_step.status = status unless status.nil?
if step.background?
@report.current_feature.background << report_step if @report.processing_background_steps?
else
@report.add_step report_step
end
report_step
end
def scenario_outline?(feature_element)
feature_element.is_a? Cucumber::Ast::ScenarioOutline
end
def info_row?(example_row)
return example_row.scenario_outline.nil? if example_row.respond_to? :scenario_outline
return true if example_row.instance_of? Cucumber::Ast::Table::Cells
false
end
def step_belongs_to_outline?(step)
scenario = step.instance_variable_get "@feature_element"
not scenario.nil?
end
def build_scenario_outline_steps(example_row)
values = example_row.to_hash
steps = example_row.scenario_outline.raw_steps.clone
steps.each do |step|
name = nil
values.each do |key, value|
name = step.name.gsub("<#{key}>", "'#{value}'") if step.name.include? "<#{key}>"
end
current_step = process_step(step, example_row.status)
current_step.name = name if name
current_step.error = step_error(example_row.exception, step)
end
end
def step_error(exception, step)
return nil if exception.nil?
exception.backtrace[-1] =~ /^#{step.file_colon_line}/ ? exception : nil
end
def populate_cells(example_row)
@cells ||= []
values = []
example_row.to_a.each do |cell|
values << cell.value
end
@cells << values
end
end
end
end |
module PunctualDateSelect
module DateHash
def year
get_integer_of :year
end
def month
get_integer_of :month
end
def day
get_integer_of :day
end
private
def get_integer_of(key)
self[key].blank? ? nil : self[key].to_i # This plugin also extends select_year that if this method returns nil it goes well.
end
end
module Model
module ClassMethods
def punctual_date_column(*args)
options = args.extract_options!
args.each do |column_name|
cast_method = :"cast_#{column_name}_if_possible"
before_validation cast_method
define_method cast_method do
casted_date = self.class.punctual_date_value_to_date(send(column_name), options)
send("#{column_name}=", casted_date) if casted_date
end
validation_method = :"validate_#{column_name}_is_casted"
validate validation_method
define_method validation_method do
errors.add(column_name, :invalid) if send(column_name) && !send(column_name).kind_of?(Date) && !send(column_name).kind_of?(Time)
end
define_method "#{column_name}=" do |value|
self[column_name] = (value.kind_of?(Hash) && !value.values.any?{|t| !t.blank?}) ? nil : value
if value.kind_of?(Hash) && !value.kind_of?(PunctualDateSelect::DateHash)
class << value
include PunctualDateSelect::DateHash
end
end
self[column_name]
end
private cast_method, validation_method
end
end
# options
# * :allow_string - If true, it's used to create a Date. You can specify regular expression here.
def punctual_date_value_to_date(value, options = {})
allow_string = options[:allow_string]
string_condition = allow_string.kind_of?(Regexp) ? allow_string : String
case value
when Hash
if value[:year].blank? || value[:month].blank? || value[:day].blank?
nil
else
begin
Date.new(value[:year].to_i,value[:month].to_i, value[:day].to_i)
rescue
nil
end
end
else
if allow_string
case value
when string_condition
begin
Date.parse(value)
rescue
nil
end
end
else
nil
end
end
end
end
def self.included(base)
base.extend(ClassMethods)
end
end
end
ActiveRecord::Base.send(:include, PunctualDateSelect::Model)
Refactor punctual_date_value_to_date
module PunctualDateSelect
module DateHash
def year
get_integer_of :year
end
def month
get_integer_of :month
end
def day
get_integer_of :day
end
def to_date
string_condition = allow_string.kind_of?(Regexp) ? allow_string : String
case value
when Hash
if value[:year].blank? || value[:month].blank? || value[:day].blank?
nil
else
begin
Date.new(value[:year].to_i,value[:month].to_i, value[:day].to_i)
rescue
nil
end
end
else
case value
when string_condition
begin
Date.parse(value)
rescue
nil
end
end
end
end
private
def get_integer_of(key)
self[key].blank? ? nil : self[key].to_i # This plugin also extends select_year that if this method returns nil it goes well.
end
end
module Model
module ClassMethods
def punctual_date_column(*args)
args.each do |column_name|
cast_method = :"cast_#{column_name}_if_possible"
before_validation cast_method
define_method cast_method do
casted_date = send(column_name).to_date
send("#{column_name}=", casted_date) if casted_date
end
validation_method = :"validate_#{column_name}_is_casted"
validate validation_method
define_method validation_method do
errors.add(column_name, :invalid) if send(column_name) && !send(column_name).kind_of?(Date) && !send(column_name).kind_of?(Time)
end
define_method "#{column_name}=" do |value|
self[column_name] = (value.kind_of?(Hash) && !value.values.any?{|t| !t.blank?}) ? nil : value
if value.kind_of?(Hash) && !value.kind_of?(PunctualDateSelect::DateHash)
class << value
include PunctualDateSelect::DateHash
end
end
self[column_name]
end
private cast_method, validation_method
end
end
end
def self.included(base)
base.extend(ClassMethods)
end
end
end
ActiveRecord::Base.send(:include, PunctualDateSelect::Model)
|
module PvOutputWrapper
require 'addressable/uri'
class Response
attr_reader :response
SERVICE_MAPPER = {
# rubocop:disable Metrics/LineLength
:get_statistic => %i(energy_generated energy_exported average_generation minimum_generation maximum_generation average_efficiency outputs actual_date_from actual_date_to record_efficiency record_date energy_consumed peak_energy_import off_peak_energy_import shoulder_energy_import high_shoulder_energy_import average_consumption minimum_consumption maximum_consumption credit_amount debit_amount),
:get_status => %i(date time energy_generation power_generation energy_consumption power_consumption efficiency temperature voltage extended_value1 extended_value2 extended_value3 extended_value4 extended_value5 extended_value6),
:get_system => %i(field system_name system_size postcode number_of_panels panel_power panel_brand number_of_inverters inverter_power inverter_brand orientation array_tilt shade install_date latitude longitude status_interval number_of_panels_secondary panel_power_secondary orientation_secondary array_tilt_secondary export_tariff import_peak_tariff import_off_peak_tariff import_shoulder_tariff import_high_shoulder_tariff import_daily_charge teams donations extended_data_config monthly_estimations),
:search => %i(system_name system_size postcode orientation outputs last_output system_id panel inverter distance latitude longitude),
# rubocop:enable Metrics/LineLength
}
# @arg [Addressable::URI], [Net::HTTPOK].
def initialize(uri, response)
@uri = uri
@response = response
end
# @return [String].
def body
@response.body.encode('UTF-8', { :invalid => :replace,
:undef => :replace,
:replace => '?',
}
)
end
# TODO: response body changes depending on params!
# TODO: fix specs
# TODO: raise exception
# @return [Hash], [Array] data mapped to keys.
def parse
# if body.chomp!
if body.include?("\n")
parse_multi_line
else
parse_line(response_keys, body)
end
end
private
# @arg [Array<Symbol>] keys for the hash.
# [String] one line of the response body.
# @return [Hash].
def parse_line(keys, line)
Hash[keys.zip line.split(/,/)]
end
# @return [Array].
def parse_multi_line
keys = response_keys
raise hell unless keys
body.split("\n").reduce([]) do |a, line|
a << parse_line(keys, line)
end
end
# @return [Array<Symbol>].
def response_keys
SERVICE_MAPPER[service]
end
# @return [Symbol].
def service
@uri.path.split('/').last.split('.').first.to_sym
end
# @return [Array<Symbol>].
def param_keys
@uri.query_values.keys.flat_map(:to_sym)
end
end
end
Correct SERVICE_MAPPER keys...
since services is generated from uri, services cannot have underscores.
module PvOutputWrapper
class Response
require 'addressable/uri'
attr_reader :response
SERVICE_MAPPER = {
# rubocop:disable Metrics/LineLength
:getstatistic => %i(energy_generated energy_exported average_generation minimum_generation maximum_generation average_efficiency outputs actual_date_from actual_date_to record_efficiency record_date energy_consumed peak_energy_import off_peak_energy_import shoulder_energy_import high_shoulder_energy_import average_consumption minimum_consumption maximum_consumption credit_amount debit_amount),
:getstatus => %i(date time energy_generation power_generation energy_consumption power_consumption efficiency temperature voltage extended_value1 extended_value2 extended_value3 extended_value4 extended_value5 extended_value6),
:getsystem => %i(system_name system_size postcode number_of_panels panel_power panel_brand number_of_inverters inverter_power inverter_brand orientation array_tilt shade install_date latitude longitude status_interval number_of_panels_secondary panel_power_secondary orientation_secondary array_tilt_secondary),
:search => %i(system_name system_size postcode orientation outputs last_output system_id panel inverter distance latitude longitude),
# rubocop:enable Metrics/LineLength
}
# @arg [Addressable::URI], [Net::HTTPOK].
def initialize(uri, response)
@uri = uri
@response = response
end
# @return [String].
def body
@response.body.encode('UTF-8', { :invalid => :replace,
:undef => :replace,
:replace => '?',
}
)
end
# TODO: response body changes depending on params!
# TODO: fix specs
# TODO: raise exception
# @return [Hash], [Array] data mapped to keys.
def parse
# if body.chomp!
if body.include?("\n")
parse_multi_line
else
parse_line(response_keys, body)
end
end
private
# @arg [Array<Symbol>] keys for the hash.
# [String] one line of the response body.
# @return [Hash].
def parse_line(keys, line)
Hash[keys.zip line.split(/,/)]
end
# @return [Array].
def parse_multi_line
keys = response_keys
raise hell unless keys
body.split("\n").reduce([]) do |a, line|
a << parse_line(keys, line)
end
end
# @return [Array<Symbol>].
def response_keys
SERVICE_MAPPER[service]
end
# @return [Symbol].
def service
@uri.path.split('/').last.split('.').first.to_sym
end
# @return [Array<Symbol>].
def param_keys
@uri.query_values.keys.flat_map(:to_sym)
end
end
end
|
module Pwrake
class ReportMulti
def initialize(list,pattern)
@reports = list.map do |base|
Report.new(base,pattern)
end
@pattern = pattern
@elap_png = 'elap.png'
end
def report(stat_html)
if true
@reports.each do |r|
r.report_html
end
plot_elap
end
html = Report::HTML_HEAD + "<body><h1>Pwrake Statistics</h1>\n"
html << "<h2>Log files</h2>\n"
html << "<table>\n"
html << "<tr><th>log file</th><th>id</th><th>ncore</th><th>elapsed time(sec)</th><tr>\n"
@reports.each do |r|
html << "<tr><td><a href='#{r.html_file}'>#{r.base}</a></td>"
html << "<td>#{r.id_str}</td><td>#{r.ncore}</td><td>#{r.elap}</td><tr>\n"
end
html << "</table>\n"
html << "<h2>Elapsed time</h2>\n"
html << "<img src='#{@elap_png}' align='top'/></br>\n"
html << "<h2>Histogram of Execution time</h2>\n"
html << report_histogram()
html << "</body></html>\n"
File.open(stat_html,"w") do |f|
f.puts html
end
end
def plot_elap
a = @reports.map{|r| r.ncore * r.elap}.min
ymin = @reports.map{|r| r.elap}.min
ymin = 10**(Math.log10(ymin).floor)
ymax = 10**(Math.log10(ymin).floor+2)
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png size 640,480
set output '#{@elap_png}'
set xlabel 'ncore'
set ylabel 'time (sec)'
set yrange [#{ymin}:#{ymax}]
set logscale xy
plot #{a}/x,'-' w lp lw 2 ps 2 title 'elapsed time'
"
@reports.sort_by{|r| r.ncore}.each do |r|
f.puts "#{r.ncore} #{r.elap}"
end
f.puts "e"
end
puts "Ncore-time plot: "+@elap_png
end
def report_histogram
@images = {}
@stats = {}
@reports.each do |r|
r.cmd_stat.each do |cmd,stat|
if stat.n > 2
@stats[cmd] ||= {}
@stats[cmd]["#{r.id_str}(nc=#{r.ncore})"] = stat
end
end
end
@stats.each_key do |cmd|
@images[cmd] = 'hist_'+cmd+'.png'
end
histogram_plot
histogram_html
end
def histogram_html
html = ""
@stats.each do |cmd,stats|
html << "<p>Statistics of Elapsed time of #{cmd}</p>\n<table>\n"
html << "<th>ncore</th>"+Stat.html_th
stats.each do |ncore,s|
html << "<tr><td>#{ncore}</td>" + s.html_td + "</tr>\n"
end
html << "</table>\n"
html << "<img src='./#{@images[cmd]}'/>\n"
end
html
end
def histogram_plot
@stats.each do |cmd,stats|
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png # size 480,360
set output '#{@images[cmd]}'
set ylabel 'histogram'
set xlabel 'Execution time (sec)'
set logscale x
set title '#{cmd}'"
a = []
ncores = stats.keys
ncores.each_with_index{|n,i|
a << "'-' w histeps ls #{i+1} title ''"
a << "'-' w lines ls #{i+1} title '#{n}'"
}
f.puts "plot "+ a.join(',')
stats.each do |ncore,s|
2.times do
s.hist_each do |x1,x2,y|
x = Math.sqrt(x1*x2)
f.printf "%f %f\n", x, y
end
f.puts "e"
end
end
end
puts "Histogram plot: #{@images[cmd]}"
end
end
def histogram_plot2
@stats.each do |cmd,stats|
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png # size 480,360
set output '#{@images[cmd]}'
set nohidden3d
set palette rgb 33,13,10
set pm3d
set ticslevel 0
unset colorbox
set yrange [#{stats.size}:0]
set logscale x
set title '#{cmd}'"
a = []
ncores = stats.keys.sort
ncores.each_with_index{|n,i|
a << "'-' w lines ls #{i+1} title '#{n} cores'"
}
f.puts "splot "+ a.join(',')
ncores.each_with_index do |ncore,i|
s = stats[ncore]
y = i
s.hist_each do |x1,x2,z|
f.printf "%g %g 0\n", x1,y
f.printf "%g %g 0\n", x2,y
f.printf "%g %g 0\n", x2,y
end
f.puts ""
s.hist_each do |x1,x2,z|
f.printf "%g %g %g\n", x1,y,z
f.printf "%g %g %g\n", x2,y,z
f.printf "%g %g 0\n", x2,y,z
end
f.puts ""
y = i+1
s.hist_each do |x1,x2,z|
f.printf "%g %g %g\n", x1,y,z
f.printf "%g %g %g\n", x2,y,z
f.printf "%g %g 0\n", x2,y,z
end
f.puts ""
s.hist_each do |x1,x2,z|
f.printf "%g %g 0\n", x1,y
f.printf "%g %g 0\n", x2,y
f.printf "%g %g 0\n", x2,y
end
f.puts "e"
i = i+1
end
end
puts "Histogram plot: #{@images[cmd]}"
end
end
end
end
s/stats/cmd_rep/
module Pwrake
class ReportMulti
def initialize(list,pattern)
@reports = list.map do |base|
Report.new(base,pattern)
end
@pattern = pattern
@elap_png = 'elap.png'
end
def report(stat_html)
if true
@reports.each do |r|
r.report_html
end
plot_elap
end
html = Report::HTML_HEAD + "<body><h1>Pwrake Statistics</h1>\n"
html << "<h2>Log files</h2>\n"
html << "<table>\n"
html << "<tr><th>log file</th><th>id</th><th>ncore</th><th>elapsed time(sec)</th><tr>\n"
@reports.each do |r|
html << "<tr><td><a href='#{r.html_file}'>#{r.base}</a></td>"
html << "<td>#{r.id_str}</td><td>#{r.ncore}</td><td>#{r.elap}</td><tr>\n"
end
html << "</table>\n"
html << "<h2>Elapsed time</h2>\n"
html << "<img src='#{@elap_png}' align='top'/></br>\n"
html << "<h2>Histogram of Execution time</h2>\n"
html << report_histogram()
html << "</body></html>\n"
File.open(stat_html,"w") do |f|
f.puts html
end
end
def plot_elap
a = @reports.map{|r| r.ncore * r.elap}.min
ymin = @reports.map{|r| r.elap}.min
ymin = 10**(Math.log10(ymin).floor)
ymax = 10**(Math.log10(ymin).floor+2)
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png size 640,480
set output '#{@elap_png}'
set xlabel 'ncore'
set ylabel 'time (sec)'
set yrange [#{ymin}:#{ymax}]
set logscale xy
plot #{a}/x,'-' w lp lw 2 ps 2 title 'elapsed time'
"
@reports.sort_by{|r| r.ncore}.each do |r|
f.puts "#{r.ncore} #{r.elap}"
end
f.puts "e"
end
puts "Ncore-time plot: "+@elap_png
end
def report_histogram
@images = {}
@cmd_rep = {}
@reports.each do |r|
r.cmd_stat.each do |cmd,stat|
if stat.n > 2
@cmd_rep[cmd] ||= {}
@cmd_rep[cmd][r.id_str] = r # stat
end
end
end
@cmd_rep.each_key do |cmd|
@images[cmd] = 'hist_'+cmd+'.png'
end
histogram_plot
histogram_html
end
def histogram_html
html = ""
@cmd_rep.each do |cmd,cmd_rep|
html << "<p>Statistics of Elapsed time of #{cmd}</p>\n<table>\n"
html << "<th>id</th><th>ncore</th>"+Stat.html_th
cmd_rep.each do |id,r|
s = r.cmd_stat[cmd]
html << "<tr><td>#{id}</td><td>#{r.ncore}</td>" + s.html_td + "</tr>\n"
end
html << "</table>\n"
html << "<img src='./#{@images[cmd]}'/>\n"
end
html
end
def histogram_plot
@cmd_rep.each do |cmd,cmd_rep|
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png # size 480,360
set output '#{@images[cmd]}'
set ylabel 'histogram'
set xlabel 'Execution time (sec)'
set logscale x
set title '#{cmd}'"
a = []
ncores = cmd_rep.keys
ncores.each_with_index{|n,i|
a << "'-' w histeps ls #{i+1} title ''"
a << "'-' w lines ls #{i+1} title '#{n}'"
}
f.puts "plot "+ a.join(',')
cmd_rep.each do |ncore,r|
s = r.cmd_stat[cmd]
2.times do
s.hist_each do |x1,x2,y|
x = Math.sqrt(x1*x2)
f.printf "%f %d\n", x, y
end
f.puts "e"
end
end
end
puts "Histogram plot: #{@images[cmd]}"
end
end
def histogram_plot2
@cmd_rep.each do |cmd,cmd_rep|
IO.popen("gnuplot","r+") do |f|
f.puts "
set terminal png # size 480,360
set output '#{@images[cmd]}'
set nohidden3d
set palette rgb 33,13,10
set pm3d
set ticslevel 0
unset colorbox
set yrange [#{cmd_rep.size}:0]
set logscale x
set title '#{cmd}'"
a = []
ncores = cmd_rep.keys.sort
ncores.each_with_index{|n,i|
a << "'-' w lines ls #{i+1} title '#{n} cores'"
}
f.puts "splot "+ a.join(',')
ncores.each_with_index do |ncore,i|
s = cmd_rep[ncore]
y = i
s.hist_each do |x1,x2,z|
f.printf "%g %g 0\n", x1,y
f.printf "%g %g 0\n", x2,y
f.printf "%g %g 0\n", x2,y
end
f.puts ""
s.hist_each do |x1,x2,z|
f.printf "%g %g %g\n", x1,y,z
f.printf "%g %g %g\n", x2,y,z
f.printf "%g %g 0\n", x2,y,z
end
f.puts ""
y = i+1
s.hist_each do |x1,x2,z|
f.printf "%g %g %g\n", x1,y,z
f.printf "%g %g %g\n", x2,y,z
f.printf "%g %g 0\n", x2,y,z
end
f.puts ""
s.hist_each do |x1,x2,z|
f.printf "%g %g 0\n", x1,y
f.printf "%g %g 0\n", x2,y
f.printf "%g %g 0\n", x2,y
end
f.puts "e"
i = i+1
end
end
puts "Histogram plot: #{@images[cmd]}"
end
end
end
end
|
module RailsSpecHarness
VERSION = "0.0.2"
end
Bump to v0.0.3
module RailsSpecHarness
VERSION = "0.0.3"
end
|
file "app/models/user_session.rb" do
%{
class UserSession < Authlogic::Session::Base
logout_on_timeout true # default is false
end
}
end
file "app/models/user.rb" do
%{
class User < ActiveRecord::Base
acts_as_authentic do |c|
c.logged_in_timeout = 10.minutes # default is 10.minutes
end
end
}
end
file "app/controllers/user_sessions_controller.rb" do
%{
class UserSessionsController < ApplicationController
before_filter :require_no_user, :only => [:new, :create]
before_filter :require_user, :only => :destroy
def new
@user_session = UserSession.new
end
def create
@user_session = UserSession.new(params[:user_session])
if @user_session.save
flash[:notice] = "Login successful!"
redirect_back_or_default account_url
else
render :action => :new
end
end
def destroy
current_user_session.destroy
flash[:notice] = "Logout successful!"
redirect_back_or_default new_user_session_url
end
end
}
end
inside "app/views/user_sessions" do
file "new.html.erb" do
%{
<h1>Login</h1>
<% form_for @user_session, :url => user_session_path do |f| %>
<%= f.error_messages %>
<%= f.label :login %><br />
<%= f.text_field :login %><br />
<br />
<%= f.label :password %><br />
<%= f.password_field :password %><br />
<br />
<%= f.check_box :remember_me %><%= f.label :remember_me %><br />
<br />
<%= f.submit "Login" %>
<% end %>
}
end
end
route 'map.resource :user_session'
file "app/controllers/application.rb" do
%{
class ApplicationController < ActionController::Base
filter_parameter_logging :password, :password_confirmation
helper_method :current_user_session, :current_user
private
def current_user_session
return @current_user_session if defined?(@current_user_session)
@current_user_session = UserSession.find
end
def current_user
return @current_user if defined?(@current_user)
@current_user = current_user_session && current_user_session.user
end
end
}
end
file "app/controllers/users_controller.rb" do
%{
class UsersController < ApplicationController
before_filter :require_no_user, :only => [:new, :create]
before_filter :require_user, :only => [:show, :edit, :update]
def new
@user = User.new
end
def create
@user = User.new(params[:user])
if @user.save
flash[:notice] = "Account registered!"
redirect_back_or_default account_url
else
render :action => :new
end
end
def show
@user = @current_user
end
def edit
@user = @current_user
end
def update
@user = @current_user # makes our views "cleaner" and more consistent
if @user.update_attributes(params[:user])
flash[:notice] = "Account updated!"
redirect_to account_url
else
render :action => :edit
end
end
end
}
end
inside "app/views/users" do
file "_form.html.erb" do
%{
= form.label :login %><br />
<%= form.text_field :login %><br />
<br />
<%= form.label :password, form.object.new_record? ? nil : "Change password" %><br />
<%= form.password_field :password %><br />
<br />
<%= form.label :password_confirmation %><br />
<%= form.password_field :password_confirmation %><br />
}
end
file "edit.html.erb" do
%{
<h1>Edit My Account</h1>
<% form_for @user, :url => account_path do |f| %>
<%= f.error_messages %>
<%= render :partial => "form", :object => f %>
<%= f.submit "Update" %>
<% end %>
<br /><%= link_to "My Profile", account_path %>
}
end
file "new.html.erb" do
%{
<h1>Register</h1>
<% form_for @user, :url => account_path do |f| %>
<%= f.error_messages %>
<%= render :partial => "form", :object => f %>
<%= f.submit "Register" %>
<% end %>
}
end
file "show.html.erb" do
%{
<p>
<b>Login:</b>
<%=h @user.login %>
</p>
<p>
<b>Login count:</b>
<%=h @user.login_count %>
</p>
<p>
<b>Last request at:</b>
<%=h @user.last_request_at %>
</p>
<p>
<b>Last login at:</b>
<%=h @user.last_login_at %>
</p>
<p>
<b>Current login at:</b>
<%=h @user.current_login_at %>
</p>
<p>
<b>Last login ip:</b>
<%=h @user.last_login_ip %>
</p>
<p>
<b>Current login ip:</b>
<%=h @user.current_login_ip %>
</p>
<%= link_to 'Edit', edit_account_path %>
}
end
end
# can't rely on internal rails migration generation, so we do it this way
run "script/generate migration beet_authlogic_create_user"
#now open it
file "db/migrate/*beet_authlogic_create_user*" do
%{
class BeetAuthlogicCreateUser < ActiveRecord::Migration
def self.up
create_table :users do |t|
t.string :login, :null => false # optional, you can use email instead, or both
t.string :email, :null => false # optional, you can use login instead, or both
t.string :crypted_password, :null => false # optional, see below
t.string :password_salt, :null => false # optional, but highly recommended
t.string :persistence_token, :null => false # required
t.string :single_access_token, :null => false # optional, see Authlogic::Session::Params
t.string :perishable_token, :null => false # optional, see Authlogic::Session::Perishability
# Magic columns, just like ActiveRecord's created_at and updated_at. These are automatically maintained by Authlogic if they are present.
t.integer :login_count, :null => false, :default => 0 # optional, see Authlogic::Session::MagicColumns
t.integer :failed_login_count, :null => false, :default => 0 # optional, see Authlogic::Session::MagicColumns
t.datetime :last_request_at # optional, see Authlogic::Session::MagicColumns
t.datetime :current_login_at # optional, see Authlogic::Session::MagicColumns
t.datetime :last_login_at # optional, see Authlogic::Session::MagicColumns
t.string :current_login_ip # optional, see Authlogic::Session::MagicColumns
t.string :last_login_ip # optional, see Authlogic::Session::MagicColumns
end
end
def self.down
drop_table :users
end
end
}
end
fix dir glob
file "app/models/user_session.rb" do
%{
class UserSession < Authlogic::Session::Base
logout_on_timeout true # default is false
end
}
end
file "app/models/user.rb" do
%{
class User < ActiveRecord::Base
acts_as_authentic do |c|
c.logged_in_timeout = 10.minutes # default is 10.minutes
end
end
}
end
file "app/controllers/user_sessions_controller.rb" do
%{
class UserSessionsController < ApplicationController
before_filter :require_no_user, :only => [:new, :create]
before_filter :require_user, :only => :destroy
def new
@user_session = UserSession.new
end
def create
@user_session = UserSession.new(params[:user_session])
if @user_session.save
flash[:notice] = "Login successful!"
redirect_back_or_default account_url
else
render :action => :new
end
end
def destroy
current_user_session.destroy
flash[:notice] = "Logout successful!"
redirect_back_or_default new_user_session_url
end
end
}
end
inside "app/views/user_sessions" do
file "new.html.erb" do
%{
<h1>Login</h1>
<% form_for @user_session, :url => user_session_path do |f| %>
<%= f.error_messages %>
<%= f.label :login %><br />
<%= f.text_field :login %><br />
<br />
<%= f.label :password %><br />
<%= f.password_field :password %><br />
<br />
<%= f.check_box :remember_me %><%= f.label :remember_me %><br />
<br />
<%= f.submit "Login" %>
<% end %>
}
end
end
route 'map.resource :user_session'
file "app/controllers/application.rb" do
%{
class ApplicationController < ActionController::Base
filter_parameter_logging :password, :password_confirmation
helper_method :current_user_session, :current_user
private
def current_user_session
return @current_user_session if defined?(@current_user_session)
@current_user_session = UserSession.find
end
def current_user
return @current_user if defined?(@current_user)
@current_user = current_user_session && current_user_session.user
end
end
}
end
file "app/controllers/users_controller.rb" do
%{
class UsersController < ApplicationController
before_filter :require_no_user, :only => [:new, :create]
before_filter :require_user, :only => [:show, :edit, :update]
def new
@user = User.new
end
def create
@user = User.new(params[:user])
if @user.save
flash[:notice] = "Account registered!"
redirect_back_or_default account_url
else
render :action => :new
end
end
def show
@user = @current_user
end
def edit
@user = @current_user
end
def update
@user = @current_user # makes our views "cleaner" and more consistent
if @user.update_attributes(params[:user])
flash[:notice] = "Account updated!"
redirect_to account_url
else
render :action => :edit
end
end
end
}
end
inside "app/views/users" do
file "_form.html.erb" do
%{
= form.label :login %><br />
<%= form.text_field :login %><br />
<br />
<%= form.label :password, form.object.new_record? ? nil : "Change password" %><br />
<%= form.password_field :password %><br />
<br />
<%= form.label :password_confirmation %><br />
<%= form.password_field :password_confirmation %><br />
}
end
file "edit.html.erb" do
%{
<h1>Edit My Account</h1>
<% form_for @user, :url => account_path do |f| %>
<%= f.error_messages %>
<%= render :partial => "form", :object => f %>
<%= f.submit "Update" %>
<% end %>
<br /><%= link_to "My Profile", account_path %>
}
end
file "new.html.erb" do
%{
<h1>Register</h1>
<% form_for @user, :url => account_path do |f| %>
<%= f.error_messages %>
<%= render :partial => "form", :object => f %>
<%= f.submit "Register" %>
<% end %>
}
end
file "show.html.erb" do
%{
<p>
<b>Login:</b>
<%=h @user.login %>
</p>
<p>
<b>Login count:</b>
<%=h @user.login_count %>
</p>
<p>
<b>Last request at:</b>
<%=h @user.last_request_at %>
</p>
<p>
<b>Last login at:</b>
<%=h @user.last_login_at %>
</p>
<p>
<b>Current login at:</b>
<%=h @user.current_login_at %>
</p>
<p>
<b>Last login ip:</b>
<%=h @user.last_login_ip %>
</p>
<p>
<b>Current login ip:</b>
<%=h @user.current_login_ip %>
</p>
<%= link_to 'Edit', edit_account_path %>
}
end
end
# can't rely on internal rails migration generation, so we do it this way
run "script/generate migration beet_authlogic_create_user"
#now open it
file(Dir.glob('db/migrate/*beet_authlogic_create_user*').first) do
%{
class BeetAuthlogicCreateUser < ActiveRecord::Migration
def self.up
create_table :users do |t|
t.string :login, :null => false # optional, you can use email instead, or both
t.string :email, :null => false # optional, you can use login instead, or both
t.string :crypted_password, :null => false # optional, see below
t.string :password_salt, :null => false # optional, but highly recommended
t.string :persistence_token, :null => false # required
t.string :single_access_token, :null => false # optional, see Authlogic::Session::Params
t.string :perishable_token, :null => false # optional, see Authlogic::Session::Perishability
# Magic columns, just like ActiveRecord's created_at and updated_at. These are automatically maintained by Authlogic if they are present.
t.integer :login_count, :null => false, :default => 0 # optional, see Authlogic::Session::MagicColumns
t.integer :failed_login_count, :null => false, :default => 0 # optional, see Authlogic::Session::MagicColumns
t.datetime :last_request_at # optional, see Authlogic::Session::MagicColumns
t.datetime :current_login_at # optional, see Authlogic::Session::MagicColumns
t.datetime :last_login_at # optional, see Authlogic::Session::MagicColumns
t.string :current_login_ip # optional, see Authlogic::Session::MagicColumns
t.string :last_login_ip # optional, see Authlogic::Session::MagicColumns
end
end
def self.down
drop_table :users
end
end
}
end
|
module Bibliothecary
module Parsers
class Carthage
include Bibliothecary::Analyser
def self.mapping
{
/^Cartfile$/ => :parse_cartfile,
/^Cartfile\.private$/ => :parse_cartfile_private,
/^Cartfile\.resolved$/ => :parse_cartfile_resolved
}
end
def self.parse_cartfile(manifest)
response = Typhoeus.post("https://carthageparser.herokuapp.com/cartfile", params: {body: manifest})
json = JSON.parse(response.body)
json.map do |dependency|
{
name: dependency['name'],
version: dependency['version'],
type: dependency["type"]
}
end
end
def self.parse_cartfile_private(manifest)
response = Typhoeus.post("https://carthageparser.herokuapp.com/cartfile.private", params: {body: manifest})
json = JSON.parse(response.body)
json.map do |dependency|
{
name: dependency['name'],
version: dependency['version'],
type: dependency["type"]
}
end
end
def self.parse_cartfile_resolved(manifest)
response = Typhoeus.post("https://carthageparser.herokuapp.com/cartfile.resolved", params: {body: manifest})
json = JSON.parse(response.body)
json.map do |dependency|
{
name: dependency['name'],
version: dependency['version'],
type: dependency["type"]
}
end
end
end
end
end
Refactored Carthage parser
module Bibliothecary
module Parsers
class Carthage
include Bibliothecary::Analyser
def self.mapping
{
/^Cartfile$/ => :parse_cartfile,
/^Cartfile\.private$/ => :parse_cartfile_private,
/^Cartfile\.resolved$/ => :parse_cartfile_resolved
}
end
def self.parse_cartfile(manifest)
map_dependencies(manifest, 'cartfile')
end
def self.parse_cartfile_private(manifest)
map_dependencies(manifest, 'cartfile.private')
end
def self.parse_cartfile_resolved(manifest)
map_dependencies(manifest, 'cartfile.resolved')
end
def self.map_dependencies(manifest, path)
response = Typhoeus.post("https://carthageparser.herokuapp.com/#{path}", params: {body: manifest})
json = JSON.parse(response.body)
json.map do |dependency|
{
name: dependency['name'],
version: dependency['version'],
type: dependency["type"]
}
end
end
end
end
end
|
##
## NEW! for sidekiqs new deamonized style
##
# https://github.com/seuros/capistrano-sidekiq
namespace :load do
task :defaults do
set :sidekiq_six_default_hooks, -> { true }
set :sidekiq_six_deamon_file, -> { "sidekiq_#{fetch(:application)}_#{fetch(:stage)}" }
set :sidekiq_six_timeout, -> { 10 }
set :sidekiq_six_roles, -> { :app }
set :sidekiq_six_processes, -> { 1 }
# Sidekiq queued processes:
set :sidekiq_six_special_queues, -> { false }
set :sidekiq_six_queued_processes, -> { [] }
## If needed you can set special queues and configure it seperately
## .. options:
## - queue: string # => queue-name (default: "default")
## - processes: integer # => number processes (default: 1)
## - worker: integer # => concurrency (default: 7)
## => [ { queue: "queue_name", processes: "count", worker: "count" }]
set :sidekiq_six_deamon_path, -> { "/lib/systemd/system" }
set :sidekiq_six_deamon_template, -> { :default }
set :sidekiq_six_ruby_vm, -> { :system } # ( :rvm | :rbenv | :system )
set :sidekiq_six_user, -> { 'deploy' } # role-user
end
end
namespace :sidekiq_six do
def for_each_process(reverse = false, &block)
pids = processes_deamones
pids.reverse! if reverse
pids.each_with_index do |service_file, idx|
within fetch(:sidekiq_six_deamon_path) do
yield(service_file, idx)
end
end
end
def processes_deamones
deamons = []
if fetch(:sidekiq_six_special_queues)
fetch(:sidekiq_six_queued_processes, []).each do |qp|
counter = (qp[:processes] && qp[:processes].to_i > 0 ? qp[:processes].to_i : 1)
if counter > 1
counter.times do |idx|
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ qp[:queue] }-#{ idx }"
end
else
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ qp[:queue] }"
end
end
else
counter = fetch(:sidekiq_six_processes).to_i
if counter > 1
counter.times do |idx|
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ idx }"
end
else
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }"
end
end
deamons
end
def sidekiq_special_config(idx)
if fetch(:sidekiq_six_special_queues)
settingz = []
fetch(:sidekiq_six_queued_processes).each do |that|
(that[:processes] && that[:processes].to_i > 0 ? that[:processes].to_i : 1 ).to_i.times do
sttng_hash = {}
sttng_hash[:queue] = that[:queue] ? that[:queue] : "default"
sttng_hash[:concurrency] = that[:worker] && that[:worker].to_i > 0 ? that[:worker].to_i : 7
settingz.push( sttng_hash )
end
end
settingz[ idx.to_i ]
else
{}
end
end
def upload_deamon(service_file, idx = 0)
args = []
args.push "--environment #{fetch(:stage)}"
args.push "--require #{fetch(:sidekiq_six_require)}" if fetch(:sidekiq_six_require)
args.push "--tag #{fetch(:sidekiq_six_tag)}" if fetch(:sidekiq_six_tag)
if fetch(:sidekiq_six_special_queues)
queue_config = sidekiq_special_config(idx)
args.push "--queue #{ queue_config[:queue] || 'default' }"
args.push "--concurrency #{ queue_config[:concurrency] || 7 }"
else
Array(fetch(:sidekiq_six_queue)).each do |queue|
args.push "--queue #{queue}"
end
args.push "--concurrency #{fetch(:sidekiq_six_concurrency)}" if fetch(:sidekiq_six_concurrency)
end
args.push "--config #{fetch(:sidekiq_six_config)}" if fetch(:sidekiq_six_config)
# use sidekiq_options for special options
args.push fetch(:sidekiq_six_options) if fetch(:sidekiq_six_options)
side_kiq_args = args.compact.join(' ')
@service_file = service_file
@side_kiq_args = side_kiq_args
if fetch(:sidekiq_six_deamon_template, :default) == :default
magic_template("sidekiq.service", '/tmp/sidekiq.service')
else
magic_template(fetch(:sidekiq_six_deamon_template), '/tmp/sidekiq.service')
end
execute :sudo, :mv, '/tmp/sidekiq.service', "#{ fetch(:sidekiq_six_deamon_path) }/#{ service_file }.service"
end
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
desc 'Creates and uploads sidekiq6 DEAMON files'
task :upload_deamons do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
upload_deamon(service_file, idx)
end
end
end
%w[start stop restart enable disable is-enabled].each do |cmnd|
desc "#{cmnd.capitalize} sidekiq6 service"
task cmnd.gsub(/-/, '_') do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
execute :sudo, :systemctl, cmnd, service_file
end
end
end
end
desc "Quiet sidekiq6 service"
task :quiet do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
execute :sudo, :systemctl, 'kill -s TSTP', service_file
end
end
end
desc "check sidekiq6 service status"
task :check_status do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
puts service_file
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
output = capture :sudo, "systemctl status", service_file
output.each_line do |line|
puts line
end
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
end
end
end
end
namespace :deploy do
before :starting, :stop_sidekiq_services do
if fetch(:sidekiq_six_default_hooks)
invoke "sidekiq_six:stop"
end
end
after :finished, :restart_sidekiq_services do
if fetch(:sidekiq_six_default_hooks)
invoke "sidekiq_six:start"
end
end
end
add logs to sidekiq-six
##
## NEW! for sidekiqs new deamonized style
##
# https://github.com/seuros/capistrano-sidekiq
namespace :load do
task :defaults do
set :sidekiq_six_default_hooks, -> { true }
set :sidekiq_six_deamon_file, -> { "sidekiq_#{fetch(:application)}_#{fetch(:stage)}" }
set :sidekiq_six_timeout, -> { 10 }
set :sidekiq_six_roles, -> { :app }
set :sidekiq_six_processes, -> { 1 }
# Sidekiq queued processes:
set :sidekiq_six_special_queues, -> { false }
set :sidekiq_six_queued_processes, -> { [] }
## If needed you can set special queues and configure it seperately
## .. options:
## - queue: string # => queue-name (default: "default")
## - processes: integer # => number processes (default: 1)
## - worker: integer # => concurrency (default: 7)
## => [ { queue: "queue_name", processes: "count", worker: "count" }]
set :sidekiq_six_deamon_path, -> { "/lib/systemd/system" }
set :sidekiq_six_deamon_template, -> { :default }
set :sidekiq_six_ruby_vm, -> { :system } # ( :rvm | :rbenv | :system )
set :sidekiq_six_user, -> { 'deploy' } # role-user
set :sidekiq_six_log_lines, -> { 100 }
end
end
namespace :sidekiq_six do
def for_each_process(reverse = false, &block)
pids = processes_deamones
pids.reverse! if reverse
pids.each_with_index do |service_file, idx|
within fetch(:sidekiq_six_deamon_path) do
yield(service_file, idx)
end
end
end
def processes_deamones
deamons = []
if fetch(:sidekiq_six_special_queues)
fetch(:sidekiq_six_queued_processes, []).each do |qp|
counter = (qp[:processes] && qp[:processes].to_i > 0 ? qp[:processes].to_i : 1)
if counter > 1
counter.times do |idx|
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ qp[:queue] }-#{ idx }"
end
else
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ qp[:queue] }"
end
end
else
counter = fetch(:sidekiq_six_processes).to_i
if counter > 1
counter.times do |idx|
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }-#{ idx }"
end
else
deamons.push "#{ fetch(:sidekiq_six_deamon_file) }"
end
end
deamons
end
def sidekiq_special_config(idx)
if fetch(:sidekiq_six_special_queues)
settingz = []
fetch(:sidekiq_six_queued_processes).each do |that|
(that[:processes] && that[:processes].to_i > 0 ? that[:processes].to_i : 1 ).to_i.times do
sttng_hash = {}
sttng_hash[:queue] = that[:queue] ? that[:queue] : "default"
sttng_hash[:concurrency] = that[:worker] && that[:worker].to_i > 0 ? that[:worker].to_i : 7
settingz.push( sttng_hash )
end
end
settingz[ idx.to_i ]
else
{}
end
end
def upload_deamon(service_file, idx = 0)
args = []
args.push "--environment #{fetch(:stage)}"
args.push "--require #{fetch(:sidekiq_six_require)}" if fetch(:sidekiq_six_require)
args.push "--tag #{fetch(:sidekiq_six_tag)}" if fetch(:sidekiq_six_tag)
if fetch(:sidekiq_six_special_queues)
queue_config = sidekiq_special_config(idx)
args.push "--queue #{ queue_config[:queue] || 'default' }"
args.push "--concurrency #{ queue_config[:concurrency] || 7 }"
else
Array(fetch(:sidekiq_six_queue)).each do |queue|
args.push "--queue #{queue}"
end
args.push "--concurrency #{fetch(:sidekiq_six_concurrency)}" if fetch(:sidekiq_six_concurrency)
end
args.push "--config #{fetch(:sidekiq_six_config)}" if fetch(:sidekiq_six_config)
# use sidekiq_options for special options
args.push fetch(:sidekiq_six_options) if fetch(:sidekiq_six_options)
side_kiq_args = args.compact.join(' ')
@service_file = service_file
@side_kiq_args = side_kiq_args
if fetch(:sidekiq_six_deamon_template, :default) == :default
magic_template("sidekiq.service", '/tmp/sidekiq.service')
else
magic_template(fetch(:sidekiq_six_deamon_template), '/tmp/sidekiq.service')
end
execute :sudo, :mv, '/tmp/sidekiq.service', "#{ fetch(:sidekiq_six_deamon_path) }/#{ service_file }.service"
end
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
desc 'Creates and uploads sidekiq6 DEAMON files'
task :upload_deamons do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
upload_deamon(service_file, idx)
end
end
end
%w[start stop restart enable disable is-enabled].each do |cmnd|
desc "#{cmnd.capitalize} sidekiq6 service"
task cmnd.gsub(/-/, '_') do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
execute :sudo, :systemctl, cmnd, service_file
end
end
end
end
desc "Quiet sidekiq6 service"
task :quiet do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
execute :sudo, :systemctl, 'kill -s TSTP', service_file
end
end
end
desc "Get logs for sidekiq6 service"
task :logs do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
execute :sudo, :journalctl, '-u', service_file, '-rn', fetch(:sidekiq_six_log_lines, 100)
end
end
end
desc "check sidekiq6 service status"
task :check_status do
on roles fetch(:sidekiq_six_roles) do
for_each_process do |service_file, idx|
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
puts service_file
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
output = capture :sudo, "systemctl status", service_file
output.each_line do |line|
puts line
end
puts "#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#"
end
end
end
end
namespace :deploy do
before :starting, :stop_sidekiq_services do
if fetch(:sidekiq_six_default_hooks)
invoke "sidekiq_six:stop"
end
end
after :finished, :restart_sidekiq_services do
if fetch(:sidekiq_six_default_hooks)
invoke "sidekiq_six:start"
end
end
end |
#
# Author:: Joshua Timberman (<joshua@chef.io>)
# Author:: Graeme Mathieson (<mathie@woss.name>)
#
# Copyright:: Copyright (c) Chef Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "etc" unless defined?(Etc)
require_relative "../../mixin/homebrew_user"
class Chef
class Provider
class Package
class Homebrew < Chef::Provider::Package
allow_nils
use_multipackage_api
provides :package, os: "darwin", override: true
provides :homebrew_package
include Chef::Mixin::HomebrewUser
def load_current_resource
@current_resource = Chef::Resource::HomebrewPackage.new(new_resource.name)
current_resource.package_name(new_resource.package_name)
current_resource.version(get_current_versions)
logger.trace("#{new_resource} current package version(s): #{current_resource.version}") if current_resource.version
current_resource
end
def candidate_version
package_name_array.map do |package_name|
available_version(package_name)
end
end
def get_current_versions
package_name_array.map do |package_name|
installed_version(package_name)
end
end
def install_package(names, versions)
brew_cmd_output("install", options, names.compact)
end
# upgrades are a bit harder in homebrew than other package formats. If you try to
# brew upgrade a package that isn't installed it will fail so if a user specifies
# the action of upgrade we need to figure out which packages need to be installed
# and which packages can be upgrades. We do this by checking if brew_info has an entry
# via the installed_version helper.
def upgrade_package(names, versions)
upgrade_pkgs = names.filter_map { |x| x if installed_version(x) }
install_pkgs = names.filter_map { |x| x unless installed_version(x) }
brew_cmd_output("upgrade", options, upgrade_pkgs) unless upgrade_pkgs.empty?
brew_cmd_output("install", options, install_pkgs) unless install_pkgs.empty?
end
def remove_package(names, versions)
brew_cmd_output("uninstall", options, names.compact)
end
# Homebrew doesn't really have a notion of purging, do a "force remove"
def purge_package(names, versions)
brew_cmd_output("uninstall", "--force", options, names.compact)
end
# We implement a querying method that returns the JSON-as-Hash
# data for a formula per the Homebrew documentation. Previous
# implementations of this provider in the homebrew cookbook
# performed a bit of magic with the load path to get this
# information, but that is not any more robust than using the
# command-line interface that returns the same thing.
#
# https://docs.brew.sh/Querying-Brew
#
# @returns [Hash] a hash of package information where the key is the package name
def brew_info
@brew_info ||= begin
command_array = ["info", "--json=v1"].concat Array(new_resource.package_name)
# convert the array of hashes into a hash where the key is the package name
Hash[Chef::JSONCompat.from_json(brew_cmd_output(command_array)).collect { |pkg| [pkg["name"], pkg] }]
end
end
#
# Return the package information given a package name or package alias
#
# @param [String] name_or_alias The name of the package or its alias
#
# @return [Hash] Package information
#
def package_info(package_name)
# return the package hash if it's in the brew info hash
return brew_info[package_name] if brew_info[package_name]
# check each item in the hash to see if we were passed an alias
brew_info.each_value do |p|
return p if p["aliases"].include?(package_name)
end
{}
end
# Some packages (formula) are "keg only" and aren't linked,
# because multiple versions installed can cause conflicts. We
# handle this by using the last installed version as the
# "current" (as in latest). Otherwise, we will use the version
# that brew thinks is linked as the current version.
#
# @param [String] package name
#
# @returns [String] package version
def installed_version(i)
p_data = package_info(i)
if p_data["keg_only"]
if p_data["installed"].empty?
nil
else
p_data["installed"].last["version"]
end
else
p_data["linked_keg"]
end
end
# Packages (formula) available to install should have a
# "stable" version, per the Homebrew project's acceptable
# formula documentation, so we will rely on that being the
# case. Older implementations of this provider in the homebrew
# cookbook would fall back to +brew_info['version']+, but the
# schema has changed, and homebrew is a constantly rolling
# forward project.
#
# https://github.com/Homebrew/homebrew/wiki/Acceptable-Formulae#stable-versions
#
# @param [String] package name
#
# @returns [String] package version
def available_version(i)
p_data = package_info(i)
p_data["versions"]["stable"]
end
def brew_cmd_output(*command)
homebrew_uid = find_homebrew_uid(new_resource.respond_to?(:homebrew_user) && new_resource.homebrew_user)
homebrew_user = Etc.getpwuid(homebrew_uid)
logger.trace "Executing 'brew #{command.join(" ")}' as user '#{homebrew_user.name}'"
# FIXME: this 1800 second default timeout should be deprecated
output = shell_out!('brew', *command, timeout: 1800, user: homebrew_uid, environment: { "HOME" => homebrew_user.dir, "RUBYOPT" => nil, "TMPDIR" => nil })
output.stdout.chomp
end
end
end
end
end
Chefstyle fixes
Signed-off-by: Tim Smith <764ef62106582a09ed09dfa0b6bff7c05fd7d1e4@chef.io>
#
# Author:: Joshua Timberman (<joshua@chef.io>)
# Author:: Graeme Mathieson (<mathie@woss.name>)
#
# Copyright:: Copyright (c) Chef Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "etc" unless defined?(Etc)
require_relative "../../mixin/homebrew_user"
class Chef
class Provider
class Package
class Homebrew < Chef::Provider::Package
allow_nils
use_multipackage_api
provides :package, os: "darwin", override: true
provides :homebrew_package
include Chef::Mixin::HomebrewUser
def load_current_resource
@current_resource = Chef::Resource::HomebrewPackage.new(new_resource.name)
current_resource.package_name(new_resource.package_name)
current_resource.version(get_current_versions)
logger.trace("#{new_resource} current package version(s): #{current_resource.version}") if current_resource.version
current_resource
end
def candidate_version
package_name_array.map do |package_name|
available_version(package_name)
end
end
def get_current_versions
package_name_array.map do |package_name|
installed_version(package_name)
end
end
def install_package(names, versions)
brew_cmd_output("install", options, names.compact)
end
# upgrades are a bit harder in homebrew than other package formats. If you try to
# brew upgrade a package that isn't installed it will fail so if a user specifies
# the action of upgrade we need to figure out which packages need to be installed
# and which packages can be upgrades. We do this by checking if brew_info has an entry
# via the installed_version helper.
def upgrade_package(names, versions)
upgrade_pkgs = names.filter_map { |x| x if installed_version(x) }
install_pkgs = names.filter_map { |x| x unless installed_version(x) }
brew_cmd_output("upgrade", options, upgrade_pkgs) unless upgrade_pkgs.empty?
brew_cmd_output("install", options, install_pkgs) unless install_pkgs.empty?
end
def remove_package(names, versions)
brew_cmd_output("uninstall", options, names.compact)
end
# Homebrew doesn't really have a notion of purging, do a "force remove"
def purge_package(names, versions)
brew_cmd_output("uninstall", "--force", options, names.compact)
end
# We implement a querying method that returns the JSON-as-Hash
# data for a formula per the Homebrew documentation. Previous
# implementations of this provider in the homebrew cookbook
# performed a bit of magic with the load path to get this
# information, but that is not any more robust than using the
# command-line interface that returns the same thing.
#
# https://docs.brew.sh/Querying-Brew
#
# @returns [Hash] a hash of package information where the key is the package name
def brew_info
@brew_info ||= begin
command_array = ["info", "--json=v1"].concat Array(new_resource.package_name)
# convert the array of hashes into a hash where the key is the package name
Hash[Chef::JSONCompat.from_json(brew_cmd_output(command_array)).collect { |pkg| [pkg["name"], pkg] }]
end
end
#
# Return the package information given a package name or package alias
#
# @param [String] name_or_alias The name of the package or its alias
#
# @return [Hash] Package information
#
def package_info(package_name)
# return the package hash if it's in the brew info hash
return brew_info[package_name] if brew_info[package_name]
# check each item in the hash to see if we were passed an alias
brew_info.each_value do |p|
return p if p["aliases"].include?(package_name)
end
{}
end
# Some packages (formula) are "keg only" and aren't linked,
# because multiple versions installed can cause conflicts. We
# handle this by using the last installed version as the
# "current" (as in latest). Otherwise, we will use the version
# that brew thinks is linked as the current version.
#
# @param [String] package name
#
# @returns [String] package version
def installed_version(i)
p_data = package_info(i)
if p_data["keg_only"]
if p_data["installed"].empty?
nil
else
p_data["installed"].last["version"]
end
else
p_data["linked_keg"]
end
end
# Packages (formula) available to install should have a
# "stable" version, per the Homebrew project's acceptable
# formula documentation, so we will rely on that being the
# case. Older implementations of this provider in the homebrew
# cookbook would fall back to +brew_info['version']+, but the
# schema has changed, and homebrew is a constantly rolling
# forward project.
#
# https://github.com/Homebrew/homebrew/wiki/Acceptable-Formulae#stable-versions
#
# @param [String] package name
#
# @returns [String] package version
def available_version(i)
p_data = package_info(i)
p_data["versions"]["stable"]
end
def brew_cmd_output(*command)
homebrew_uid = find_homebrew_uid(new_resource.respond_to?(:homebrew_user) && new_resource.homebrew_user)
homebrew_user = Etc.getpwuid(homebrew_uid)
logger.trace "Executing 'brew #{command.join(" ")}' as user '#{homebrew_user.name}'"
# FIXME: this 1800 second default timeout should be deprecated
output = shell_out!("brew", *command, timeout: 1800, user: homebrew_uid, environment: { "HOME" => homebrew_user.dir, "RUBYOPT" => nil, "TMPDIR" => nil })
output.stdout.chomp
end
end
end
end
end
|
require 'json'
require 'chef/mixin/shell_out'
require 'chef_metal/provisioner'
require 'chef_metal/version'
require 'chef_metal/machine/basic_machine'
require 'chef_metal/machine/unix_machine'
require 'chef_metal/convergence_strategy/install_cached'
require 'chef_metal/transport/ssh'
module ChefMetalSsh
# Provisions machines in ssh.
class SshProvisioner < ChefMetal::Provisioner
include Chef::Mixin::ShellOut
#def initialize(ssh_connect_options={})
# @ssh_connect_options = ssh_connect_options
#end
def initialize(target_host=nil)
@target_host = target_host
end
attr_reader :target_host
# Acquire a machine, generally by provisioning it. Returns a Machine
# object pointing at the machine, allowing useful actions like setup,
# converge, execute, file and directory. The Machine object will have a
# "node" property which must be saved to the server (if it is any
# different from the original node object).
#
# ## Parameters
# action_handler - the action_handler object that provides context.
# node - node object (deserialized json) representing this machine. If
# the node has a provisioner_options hash in it, these will be used
# instead of options provided by the provisioner. TODO compare and
# fail if different?
# node will have node['normal']['provisioner_options'] in it with any options.
# It is a hash with this format:
#
# -- provisioner_url: ssh:<ssh_path>
# -- template: template name
# -- template_options: additional arguments for templates
# -- backingstore: backing storage (lvm, thinpools, btrfs etc)
# -- config_file: <path> path to ssh file a la https://wiki.archlinux.org/index.php/Linux_Containers#Configuration_file
# -- extra_config: { 'key' => 'value', ... } a set of ssh config key/value pairs to individually set. Merges with, and overrides any values in config_file.
#
# node['normal']['provisioner_output'] will be populated with information
# about the created machine. For ssh, it is a hash with this
# format:
#
# -- provisioner_url: ssh:<ssh_path>
# -- name: container name
#
def acquire_machine(action_handler, node)
# TODO verify that the existing provisioner_url in the node is the same as ours
# Set up the modified node data
provisioner_options = node['normal']['provisioner_options']
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - provisioner_options.inspect: #{provisioner_options.inspect}")
Chef::Log.debug("======================================>")
if @target_host.nil?
target_host = get_target_connection_method(node)
@target_host = target_host
end
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - target_host: #{target_host}")
Chef::Log.debug("======================================>")
# Set up Provisioner Output
# TODO - make url the chef server url path? maybe disk path if zero?
provisioner_output = node['normal']['provisioner_output'] || {
'provisioner_url' => "ssh:#{target_host}",
'name' => node['name']
}
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - provisioner_output.inspect: #{provisioner_output.inspect}")
Chef::Log.debug("======================================>")
node['normal']['provisioner_output'] = provisioner_output
# Create machine object for callers to use
machine_for(node)
end
# Connect to machine without acquiring it
def connect_to_machine(node)
if @target_host.nil?
target_host = get_target_connection_method(node)
@target_host = target_host
end
Chef::Log.debug("======================================>")
Chef::Log.debug("connect_to_machine - target_host: #{target_host}")
Chef::Log.debug("======================================>")
machine_for(node)
end
def delete_machine(action_handler, node)
true
end
def stop_machine(action_handler, node)
true
end
# Not meant to be part of public interface
def transport_for(node)
create_ssh_transport(node)
end
protected
def get_target_connection_method(node)
provisioner_options = node['normal']['provisioner_options']
target_ip = ''
target_ip = provisioner_options['target_ip'] || nil
target_fqdn = ''
target_fqdn = provisioner_options['target_fqdn'] || nil
remote_host = ''
if @target_host
remote_host = @target_host
elsif target_ip
remote_host = target_ip
elsif target_fqdn
remote_host = target_fqdn
else
raise "aint got no target yo, that dog dont hunt"
end
Chef::Log.debug("======================================>")
Chef::Log.debug("get_target_connection_method - remote_host: #{remote_host}")
Chef::Log.debug("======================================>")
remote_host
end
def machine_for(node)
ChefMetal::Machine::UnixMachine.new(node, transport_for(node), convergence_strategy_for(node))
end
def convergence_strategy_for(node)
@convergence_strategy ||= begin
ChefMetal::ConvergenceStrategy::InstallCached.new
end
end
# Setup Ssh
def create_ssh_transport(node)
# TODO - verify target_host resolves
# Verify Valid IP
provisioner_options = node['normal']['provisioner_options']
##
# Ssh Target
target_host = ''
target_host = @target_host
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - target_host: #{target_host}")
Chef::Log.debug("======================================>")
##
# Ssh Username
username = ''
username = provisioner_options['ssh_user'] || 'vagrant'
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - username: #{username}")
Chef::Log.debug("======================================>")
##
# Ssh Password
ssh_pass = ''
ssh_pass = provisioner_options['ssh_connect_options']['ssh_pass']
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - ssh_pass: #{ssh_pass}")
Chef::Log.debug("======================================>")
##
# Ssh Main Options
ssh_options = {}
ssh_options = {
# TODO create a user known hosts file
# :user_known_hosts_file => provisioner_options['ssh_connect_options']['UserKnownHostsFile'],
# :paranoid => true,
# :auth_methods => [ 'publickey' ],
:keys_only => false,
:password => ssh_pass
}
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - ssh_options: #{ssh_options.inspect}")
Chef::Log.debug("======================================>")
##
# Ssh Additional Options
options = {}
#Enable pty by default
options[:ssh_pty_enable] = true
if username != 'root'
options[:prefix] = 'sudo '
end
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - options: #{options.inspect}")
Chef::Log.debug("======================================>")
ChefMetal::Transport::SSH.new(target_host, username, ssh_options, options)
end
end
end
updated inline documentation
require 'chef_metal/provisioner'
require 'chef_metal/version'
require 'chef_metal/machine/basic_machine'
require 'chef_metal/machine/unix_machine'
require 'chef_metal/convergence_strategy/install_cached'
require 'chef_metal/transport/ssh'
module ChefMetalSsh
# Provisions machines with ssh.
class SshProvisioner < ChefMetal::Provisioner
def initialize(target_host=nil)
@target_host = target_host
end
attr_reader :target_host
# Acquire a machine, generally by provisioning it. Returns a Machine
# object pointing at the machine, allowing useful actions like setup,
# converge, execute, file and directory. The Machine object will have a
# "node" property which must be saved to the server (if it is any
# different from the original node object).
#
# ## Parameters
# action_handler - the action_handler object that provides context.
# node - node object (deserialized json) representing this machine. If
# the node has a provisioner_options hash in it, these will be used
# instead of options provided by the provisioner. TODO compare and
# fail if different?
# node will have node['normal']['provisioner_options'] in it with any options.
# It is a hash with this format:
#
# -- provisioner_url: ssh:<ssh_path>
# -- target_ip: the IP address of the target machine - IP or FQDN is required
# -- target_fqdn: The Resolvable name of the target machine - IP or FQDN is required
# -- ssh_user: the user to ssh as
# -- ssh_config: options to pass the ssh command. available options are here - https://github.com/net-ssh/net-ssh/blob/master/lib/net/ssh.rb#L61
#
# node['normal']['provisioner_output'] will be populated with information
# about the created machine. For ssh, it is a hash with this
# format:
#
# -- provisioner_url: ssh:<ssh_path>
# -- name: container name
#
def acquire_machine(action_handler, node)
# TODO verify that the existing provisioner_url in the node is the same as ours
# Set up the modified node data
provisioner_options = node['normal']['provisioner_options']
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - provisioner_options.inspect: #{provisioner_options.inspect}")
Chef::Log.debug("======================================>")
if @target_host.nil?
target_host = get_target_connection_method(node)
@target_host = target_host
end
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - target_host: #{target_host}")
Chef::Log.debug("======================================>")
# Set up Provisioner Output
# TODO - make url the chef server url path? maybe disk path if zero?
provisioner_output = node['normal']['provisioner_output'] || {
'provisioner_url' => "ssh:#{target_host}",
'name' => node['name']
}
Chef::Log.debug("======================================>")
Chef::Log.debug("acquire_machine - provisioner_output.inspect: #{provisioner_output.inspect}")
Chef::Log.debug("======================================>")
node['normal']['provisioner_output'] = provisioner_output
# Create machine object for callers to use
machine_for(node)
end
# Connect to machine without acquiring it
def connect_to_machine(node)
if @target_host.nil?
target_host = get_target_connection_method(node)
@target_host = target_host
end
Chef::Log.debug("======================================>")
Chef::Log.debug("connect_to_machine - target_host: #{target_host}")
Chef::Log.debug("======================================>")
machine_for(node)
end
def delete_machine(action_handler, node)
true
end
def stop_machine(action_handler, node)
true
end
# Not meant to be part of public interface
def transport_for(node)
create_ssh_transport(node)
end
protected
def get_target_connection_method(node)
provisioner_options = node['normal']['provisioner_options']
target_ip = ''
target_ip = provisioner_options['target_ip'] || nil
target_fqdn = ''
target_fqdn = provisioner_options['target_fqdn'] || nil
remote_host = ''
if @target_host
remote_host = @target_host
elsif target_ip
remote_host = target_ip
elsif target_fqdn
remote_host = target_fqdn
else
raise "aint got no target yo, that dog dont hunt"
end
Chef::Log.debug("======================================>")
Chef::Log.debug("get_target_connection_method - remote_host: #{remote_host}")
Chef::Log.debug("======================================>")
remote_host
end
def machine_for(node)
ChefMetal::Machine::UnixMachine.new(node, transport_for(node), convergence_strategy_for(node))
end
def convergence_strategy_for(node)
@convergence_strategy ||= begin
ChefMetal::ConvergenceStrategy::InstallCached.new
end
end
# Setup Ssh
def create_ssh_transport(node)
# TODO - verify target_host resolves
# Verify Valid IP
provisioner_options = node['normal']['provisioner_options']
##
# Ssh Target
target_host = ''
target_host = @target_host
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - target_host: #{target_host}")
Chef::Log.debug("======================================>")
##
# Ssh Username
username = ''
username = provisioner_options['ssh_user'] || 'vagrant'
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - username: #{username}")
Chef::Log.debug("======================================>")
##
# Ssh Password
ssh_pass = ''
ssh_pass = provisioner_options['ssh_connect_options']['ssh_pass']
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - ssh_pass: #{ssh_pass}")
Chef::Log.debug("======================================>")
##
# Ssh Main Options
ssh_options = {}
ssh_options = {
# TODO create a user known hosts file
# :user_known_hosts_file => provisioner_options['ssh_connect_options']['UserKnownHostsFile'],
# :paranoid => true,
# :auth_methods => [ 'publickey' ],
:keys_only => false,
:password => ssh_pass
}
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - ssh_options: #{ssh_options.inspect}")
Chef::Log.debug("======================================>")
##
# Ssh Additional Options
options = {}
#Enable pty by default
options[:ssh_pty_enable] = true
if username != 'root'
options[:prefix] = 'sudo '
end
Chef::Log.debug("======================================>")
Chef::Log.debug("create_ssh_transport - options: #{options.inspect}")
Chef::Log.debug("======================================>")
ChefMetal::Transport::SSH.new(target_host, username, ssh_options, options)
end
end
end
|
require 'circuit/behavior/stack'
module Circuit
module Behavior
module ClassMethods
# creates and memoizes a stack object that will hold the middleware objects
# attached to the behavior.
#
def stack
@stack ||= (superclass and superclass.ancestors.include?(::Circuit::Behavior) ? superclass.stack.dup : Stack.new)
end
[:use, :delete, :insert, :insert_after, :insert_before, :swap].each do |methud|
module_eval <<-METHOD
def #{methud.to_s}(*args)
self.stack.#{methud.to_s} *args
end
METHOD
end
end
end
end
adds comments for behavior/class_methods
require 'circuit/behavior/stack'
module Circuit
module Behavior
module ClassMethods
# creates and memoizes a stack object that will hold the middleware
# objects attached to the behavior.
#
# @returns [Stack] newly create stack or superclass duplicate
def stack
@stack ||= (mixing_behaviors? ? superclass.stack.dup : Stack.new)
end
# defines accessor methods for commonly used stack configuration
# methods. This would allow you to define behaviors that inherit
# stacks form their parents, configure them, without affecting the
# parents stack object.
[:use, :delete, :insert, :insert_after, :insert_before, :swap].each do |methud|
module_eval <<-METHOD
def #{methud.to_s}(*args)
self.stack.#{methud.to_s} *args
end
METHOD
end
private ##############
# @private
def self_mixes_behaviors?
self.superclass and self.superclass.ancestors.include?(::Circuit::Behavior)
end
end
end
end
|
module Pod
module Downloader
class Mercurial < Base
def self.options
[:revision, :tag, :branch]
end
def options_specific?
!options[:revision].nil?
end
def checkout_options
Dir.chdir(target_path) do
options = {}
options[:hg] = url
options[:revision] = `hg --debug id -i`.chomp
options
end
end
private
executable :hg
def download!
if options[:revision]
download_revision!
elsif options[:tag]
download_tag!
elsif options[:branch]
download_branch!
else
download_head!
end
end
def download_head!
hg! %|clone #{url} #{@target_path.shellescape}|
end
def download_revision!
hg! %|clone "#{url}" --rev '#{options[:revision]}' #{@target_path.shellescape}|
end
def download_tag!
hg! %|clone "#{url}" --updaterev '#{options[:tag]}' #{@target_path.shellescape}|
end
def download_branch!
hg! %|clone "#{url}" --updaterev '#{options[:branch]}' #{@target_path.shellescape}|
end
end
end
end
Adjusted options_specific
module Pod
module Downloader
class Mercurial < Base
def self.options
[:revision, :tag, :branch]
end
def options_specific?
!options[:revision].nil? || !options[:tag].nil?
end
def checkout_options
Dir.chdir(target_path) do
options = {}
options[:hg] = url
options[:revision] = `hg --debug id -i`.chomp
options
end
end
private
executable :hg
def download!
if options[:revision]
download_revision!
elsif options[:tag]
download_tag!
elsif options[:branch]
download_branch!
else
download_head!
end
end
def download_head!
hg! %|clone #{url} #{@target_path.shellescape}|
end
def download_revision!
hg! %|clone "#{url}" --rev '#{options[:revision]}' #{@target_path.shellescape}|
end
def download_tag!
hg! %|clone "#{url}" --updaterev '#{options[:tag]}' #{@target_path.shellescape}|
end
def download_branch!
hg! %|clone "#{url}" --updaterev '#{options[:branch]}' #{@target_path.shellescape}|
end
end
end
end
|
class CodeRunner
module Moab
#def self.configure_environment
#eputs "Configuring Hector"
#conf = <<EOF
#eval `modulecmd bash swap PrgEnv-pgi PrgEnv-gnu`
#eval `modulecmd bash load fftw/3.2.2`
#export XTPE_LINK_TYPE=dynamic
#export LD_LIBRARY_PATH=/opt/xt-libsci/10.4.1/gnu/lib/44:$LD_LIBRARY_PATH
#EOF
#Kernel.change_environment_with_shell_script(conf)
#end
def queue_status
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
%x[cat #{CodeRunner.launcher_directory}/queue_status.txt | grep sh] +
%x[cat #{CodeRunner.launcher_directory}/queue_status2.txt | grep sh]
else
%x[qstat | grep $USER]
end
end
def mpi_prog
"aprun -n #{nprocstot} -N #{ppn}"
end
def nodes
nodes, ppn = @nprocs.split(/:/)[0].split(/x/)
nodes.to_i
end
def ppn
nodes, ppn = @nprocs.split(/:/)[0].split(/x/)
ppn.to_i
end
def nprocstot
#nodes, ppn = @nprocs.split(/x/)
nprocstot = nodes.to_i * ppn.to_i
end
def run_command
# "qsub #{batch_script_file}"
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return %[#{mpi_prog} #{executable_location}/#{executable_name} #{parameter_string} > #{output_file} 2> #{error_file}]
else
"#{mpi_prog} #{executable_location}/#{executable_name} #{parameter_string}"
end
end
def execute
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
launch_id = "#{Time.now.to_i}#{$$}"
fname = "#{CodeRunner.launcher_directory}/#{launch_id}"
File.open(fname + '.start', 'w'){|file| file.print "cd #{Dir.pwd};", run_command, "\n"}
sleep 2 until FileTest.exist? fname + '.pid'
pid = File.read(fname + '.pid').to_i
FileUtils.rm fname + '.pid'
return pid
else
File.open(batch_script_file, 'w'){|file| file.puts batch_script + run_command + "\n"}
pid = %x[qsub #{batch_script_file}].to_i
end
end
def batch_script_file
"#{executable_name}.#{job_identifier}.sh"
end
def max_ppn
raise "Please define max_ppn for your system"
end
def hours_minutes_seconds
if @wall_mins
ep @wall_mins
hours = (@wall_mins / 60).floor
mins = @wall_mins.to_i % 60
secs = ((@wall_mins - @wall_mins.to_i) * 60).to_i
else
raise "Please specify wall mins using the W flag"
end
eputs "Allotted wall time is " + sprintf("%02d:%02d:%02d", hours, mins, secs)
return [hours, mins, secs]
end
def ppn_checks
eputs "Warning: Underuse of nodes (#{ppn} cores per node instead of #{max_ppn})" if ppn.to_i < max_ppn
raise "Error: cores per node cannot excede #{max_ppn}" if ppn.to_i > max_ppn
end
def batch_script
ppn_checks
hours, mins, secs = hours_minutes_seconds
<<EOF
#!/bin/bash --login
#PBS -N #{executable_name}.#{job_identifier}
#PBS -l mppwidth=#{nprocstot}
#PBS -l mppnppn=#{ppn}
#PBS -l walltime=#{sprintf("%02d:%02d:%02d", hours, mins, secs)}
#{@project ? "#PBS -A #@project" : ""}
### start of jobscript
cd $PBS_O_WORKDIR
echo "workdir: $PBS_O_WORKDIR"
#{code_run_environment}
echo "Submitting #{nodes}x#{ppn} job on #{CodeRunner::SYS} for project #@project..."
EOF
end
def cancel_job
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
fname = CodeRunner.launcher_directory + "/#{$$}.stop"
File.open(fname, 'w'){|file| file.puts "\n"}
else
`qdel #{@job_no}`
end
end
def error_file
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return "#{executable_name}.#{job_identifier}.e"
else
return "#{executable_name}.#{job_identifier}.e#@job_no"
end
end
def output_file
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return "#{executable_name}.#{job_identifier}.o"
else
return "#{executable_name}.#{job_identifier}.o#@job_no"
end
end
def get_run_status(job_no, current_status)
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
if current_status =~ Regexp.new(job_no.to_s)
@running = true
return :Running
else
@running = false
return :Unknown
end
end
line = current_status.split(/\n/).grep(Regexp.new(job_no.to_s))[0]
unless line
return :Unknown
else
if line =~ /\sQ\s/
return :Queueing
elsif line =~ /\sR\s/
return :Running
elsif line =~ /\sH\s/
return :Queueing
elsif line =~ /\sC\s/
@running=false
return :Unknown
else
ep 'line', line
raise 'Could not get run status'
end
end
end
end
end
--Added code_run_environment to interactive submission in Moab
class CodeRunner
module Moab
#def self.configure_environment
#eputs "Configuring Hector"
#conf = <<EOF
#eval `modulecmd bash swap PrgEnv-pgi PrgEnv-gnu`
#eval `modulecmd bash load fftw/3.2.2`
#export XTPE_LINK_TYPE=dynamic
#export LD_LIBRARY_PATH=/opt/xt-libsci/10.4.1/gnu/lib/44:$LD_LIBRARY_PATH
#EOF
#Kernel.change_environment_with_shell_script(conf)
#end
def queue_status
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
%x[cat #{CodeRunner.launcher_directory}/queue_status.txt | grep sh] +
%x[cat #{CodeRunner.launcher_directory}/queue_status2.txt | grep sh]
else
%x[qstat | grep $USER]
end
end
def mpi_prog
"aprun -n #{nprocstot} -N #{ppn}"
end
def nodes
nodes, ppn = @nprocs.split(/:/)[0].split(/x/)
nodes.to_i
end
def ppn
nodes, ppn = @nprocs.split(/:/)[0].split(/x/)
ppn.to_i
end
def nprocstot
#nodes, ppn = @nprocs.split(/x/)
nprocstot = nodes.to_i * ppn.to_i
end
def run_command
# "qsub #{batch_script_file}"
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return %[#{code_run_environment}
#{mpi_prog} #{executable_location}/#{executable_name} #{parameter_string} > #{output_file} 2> #{error_file}]
else
"#{mpi_prog} #{executable_location}/#{executable_name} #{parameter_string}"
end
end
def execute
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
launch_id = "#{Time.now.to_i}#{$$}"
fname = "#{CodeRunner.launcher_directory}/#{launch_id}"
File.open(fname + '.start', 'w'){|file| file.print "cd #{Dir.pwd};", run_command, "\n"}
sleep 2 until FileTest.exist? fname + '.pid'
pid = File.read(fname + '.pid').to_i
FileUtils.rm fname + '.pid'
return pid
else
File.open(batch_script_file, 'w'){|file| file.puts batch_script + run_command + "\n"}
pid = %x[qsub #{batch_script_file}].to_i
end
end
def batch_script_file
"#{executable_name}.#{job_identifier}.sh"
end
def max_ppn
raise "Please define max_ppn for your system"
end
def hours_minutes_seconds
if @wall_mins
ep @wall_mins
hours = (@wall_mins / 60).floor
mins = @wall_mins.to_i % 60
secs = ((@wall_mins - @wall_mins.to_i) * 60).to_i
else
raise "Please specify wall mins using the W flag"
end
eputs "Allotted wall time is " + sprintf("%02d:%02d:%02d", hours, mins, secs)
return [hours, mins, secs]
end
def ppn_checks
eputs "Warning: Underuse of nodes (#{ppn} cores per node instead of #{max_ppn})" if ppn.to_i < max_ppn
raise "Error: cores per node cannot excede #{max_ppn}" if ppn.to_i > max_ppn
end
def batch_script
ppn_checks
hours, mins, secs = hours_minutes_seconds
<<EOF
#!/bin/bash --login
#PBS -N #{executable_name}.#{job_identifier}
#PBS -l mppwidth=#{nprocstot}
#PBS -l mppnppn=#{ppn}
#PBS -l walltime=#{sprintf("%02d:%02d:%02d", hours, mins, secs)}
#{@project ? "#PBS -A #@project" : ""}
### start of jobscript
cd $PBS_O_WORKDIR
echo "workdir: $PBS_O_WORKDIR"
#{code_run_environment}
echo "Submitting #{nodes}x#{ppn} job on #{CodeRunner::SYS} for project #@project..."
EOF
end
def cancel_job
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
fname = CodeRunner.launcher_directory + "/#{$$}.stop"
File.open(fname, 'w'){|file| file.puts "\n"}
else
`qdel #{@job_no}`
end
end
def error_file
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return "#{executable_name}.#{job_identifier}.e"
else
return "#{executable_name}.#{job_identifier}.e#@job_no"
end
end
def output_file
if (ENV['CODE_RUNNER_LAUNCHER'].size > 0 rescue false)
return "#{executable_name}.#{job_identifier}.o"
else
return "#{executable_name}.#{job_identifier}.o#@job_no"
end
end
def get_run_status(job_no, current_status)
if ((prefix = ENV['CODE_RUNNER_LAUNCHER']).size > 0 rescue false)
if current_status =~ Regexp.new(job_no.to_s)
@running = true
return :Running
else
@running = false
return :Unknown
end
end
line = current_status.split(/\n/).grep(Regexp.new(job_no.to_s))[0]
unless line
return :Unknown
else
if line =~ /\sQ\s/
return :Queueing
elsif line =~ /\sR\s/
return :Running
elsif line =~ /\sH\s/
return :Queueing
elsif line =~ /\sC\s/
@running=false
return :Unknown
else
ep 'line', line
raise 'Could not get run status'
end
end
end
end
end
|
module RAutomation
module Adapter
autoload :Autoit, File.dirname(__FILE__) + "/autoit.rb"
autoload :Ffi, File.dirname(__FILE__) + "/ffi.rb"
module Helper
extend self
# @private
def default_adapter
case RUBY_PLATFORM
when /mswin|msys|mingw32/
:autoit
else
raise "unsupported platform for RAutomation: #{RUBY_PLATFORM}"
end
end
end
end
end
make FFI adapter default for the Windows
module RAutomation
module Adapter
autoload :Autoit, File.dirname(__FILE__) + "/autoit.rb"
autoload :Ffi, File.dirname(__FILE__) + "/ffi.rb"
module Helper
extend self
# @private
def default_adapter
case RUBY_PLATFORM
when /mswin|msys|mingw32/
:ffi
else
raise "unsupported platform for RAutomation: #{RUBY_PLATFORM}"
end
end
end
end
end |
# default cron env is "/usr/bin:/bin" which is not sufficient as govuk_env is in /usr/local/bin
env :PATH, '/usr/local/bin:/usr/bin:/bin'
set :output, {:error => 'log/cron.error.log', :standard => 'log/cron.log'}
job_type :rake, 'cd :path && /usr/local/bin/govuk_setenv local-links-manager bundle exec rake :task :output'
every :day, at: '2am' do
rake 'check-links'
end
# Run the rake task to import all services and interactions into Local Links Manager on the 1st of each month
every :month, on: '1st' do
rake 'import:service_interactions:import_all'
end
# Run the rake task to export data to CSV for data.gov.uk.
every :day, at: '3am' do
rake 'export:links:all'
end
# Run the rake task to import Google analytics for local transactions.
every :day, at: '5am' do
rake 'import:google_analytics'
end
every :day, at: '6am' do
rake 'export:google_analytics:bad_links'
end
Import services every night instead of once a month
# default cron env is "/usr/bin:/bin" which is not sufficient as govuk_env is in /usr/local/bin
env :PATH, '/usr/local/bin:/usr/bin:/bin'
set :output, {:error => 'log/cron.error.log', :standard => 'log/cron.log'}
job_type :rake, 'cd :path && /usr/local/bin/govuk_setenv local-links-manager bundle exec rake :task :output'
# Run the rake task to import all services and interactions into Local Links Manager
every :day, at: '1am' do
rake 'import:service_interactions:import_all'
end
every :day, at: '2am' do
rake 'check-links'
end
# Run the rake task to export data to CSV for data.gov.uk.
every :day, at: '3am' do
rake 'export:links:all'
end
# Run the rake task to import Google analytics for local transactions.
every :day, at: '5am' do
rake 'import:google_analytics'
end
every :day, at: '6am' do
rake 'export:google_analytics:bad_links'
end
|
module RemoteFiles
class Configuration
attr_reader :name
def initialize(name, config = {})
@name = name
@stores = []
@stores_map = {}
from_hash(config)
end
def logger=(logger)
@logger = logger
end
def logger
@logger ||= RemoteFiles.logger
end
def clear
@stores.clear
@stores_map.clear
end
def from_hash(hash)
hash.each do |store_identifier, config|
#symbolize_keys!
cfg = {}
config.each { |name, value| cfg[name.to_sym] = config[name] }
config = cfg
#camelize
type = config[:type].gsub(/\/(.?)/) { "::#{$1.upcase}" }.gsub(/(?:^|_)(.)/) { $1.upcase } + 'Store'
klass = RemoteFiles.const_get(type) rescue nil
unless klass
require "remote_files/#{config[:type]}_store"
klass = RemoteFiles.const_get(type)
end
config.delete(:type)
add_store(store_identifier.to_sym, :class => klass, :primary => !!config.delete(:primary)) do |store|
config.each do |name, value|
store[name] = value
end
end
end
self
end
def add_store(store_identifier, options = {}, &block)
store = (options[:class] || FogStore).new(store_identifier)
block.call(store) if block_given?
if options[:read_only]
store[:read_only] = options[:read_only]
end
if options[:primary]
@stores.unshift(store)
else
@stores << store
end
@stores_map[store_identifier] = store
end
def configured?
!@stores.empty?
end
def stores(only_editables = false)
raise "You need to configure add stores to the #{name} RemoteFiles configuration" unless configured?
if only_editables
@stores.reject {|s| s.read_only?}
else
@stores
end
end
def lookup_store(store_identifier)
@stores_map[store_identifier.to_sym]
end
def primary_store
stores.first
end
def store_once!(file)
return file.stored_in.first if file.stored?
exception = nil
stores(true).each do |store|
begin
stored = store.store!(file)
file.stored_in << store.identifier
break
rescue ::RemoteFiles::Error => e
file.logger.info(e) if file.logger
exception = e
end
end
raise exception unless file.stored?
file.stored_in.first
end
def store!(file)
store_once!(file) unless file.stored?
RemoteFiles.synchronize_stores(file) unless file.stored_everywhere?
true
end
def delete!(file)
RemoteFiles.delete_file(file)
end
def delete_now!(file)
exceptions = []
file.editable_stores.each do |store|
begin
store.delete!(file.identifier)
rescue NotFoundError => e
exceptions << e
end
end
if exceptions.size == file.editable_stores.size # they all failed
raise exceptions.first
end
true
end
def synchronize!(file)
file.missing_stores.each do |store|
next if store.read_only?
store.store!(file)
file.stored_in << store.identifier
end
end
def file_from_url(url, options = {})
stores.each do |store|
file = store.file_from_url(url, options.merge(:configuration => name))
return file if file
end
nil
end
end
end
Cosmetic
module RemoteFiles
class Configuration
attr_reader :name
def initialize(name, config = {})
@name = name
@stores = []
@stores_map = {}
from_hash(config)
end
def logger=(logger)
@logger = logger
end
def logger
@logger ||= RemoteFiles.logger
end
def clear
@stores.clear
@stores_map.clear
end
def from_hash(hash)
hash.each do |store_identifier, config|
#symbolize_keys!
cfg = {}
config.each { |name, value| cfg[name.to_sym] = config[name] }
config = cfg
#camelize
type = config[:type].gsub(/\/(.?)/) { "::#{$1.upcase}" }.gsub(/(?:^|_)(.)/) { $1.upcase } + 'Store'
klass = RemoteFiles.const_get(type) rescue nil
unless klass
require "remote_files/#{config[:type]}_store"
klass = RemoteFiles.const_get(type)
end
config.delete(:type)
add_store(store_identifier.to_sym, :class => klass, :primary => !!config.delete(:primary)) do |store|
config.each do |name, value|
store[name] = value
end
end
end
self
end
def add_store(store_identifier, options = {}, &block)
store = (options[:class] || FogStore).new(store_identifier)
block.call(store) if block_given?
if options[:read_only]
store[:read_only] = options[:read_only]
end
if options[:primary]
@stores.unshift(store)
else
@stores << store
end
@stores_map[store_identifier] = store
end
def configured?
!@stores.empty?
end
def stores(editable_only = false)
raise "You need to configure add stores to the #{name} RemoteFiles configuration" unless configured?
if editable_only
@stores.reject {|s| s.read_only?}
else
@stores
end
end
def lookup_store(store_identifier)
@stores_map[store_identifier.to_sym]
end
def primary_store
stores.first
end
def store_once!(file)
return file.stored_in.first if file.stored?
exception = nil
stores(true).each do |store|
begin
stored = store.store!(file)
file.stored_in << store.identifier
break
rescue ::RemoteFiles::Error => e
file.logger.info(e) if file.logger
exception = e
end
end
raise exception unless file.stored?
file.stored_in.first
end
def store!(file)
store_once!(file) unless file.stored?
RemoteFiles.synchronize_stores(file) unless file.stored_everywhere?
true
end
def delete!(file)
RemoteFiles.delete_file(file)
end
def delete_now!(file)
exceptions = []
file.editable_stores.each do |store|
begin
store.delete!(file.identifier)
rescue NotFoundError => e
exceptions << e
end
end
if exceptions.size == file.editable_stores.size # they all failed
raise exceptions.first
end
true
end
def synchronize!(file)
file.missing_stores.each do |store|
next if store.read_only?
store.store!(file)
file.stored_in << store.identifier
end
end
def file_from_url(url, options = {})
stores.each do |store|
file = store.file_from_url(url, options.merge(:configuration => name))
return file if file
end
nil
end
end
end
|
# job_type :rake, 'cd /home/prometheusapp/www/current && RAILS_ENV=:environment bundle exec rake :task :output'
# every 1.hour do
# command 'kill -s USR2 `cat /tmp/unicorn.my_site.pid`'
# end
every 1.day, at: '00:10' do
rake 'ts:index'
end
every '5 1-23 * * *' do
rake 'sisyphus:update p8:update p7:update t7:update p6:update t6:update p5:update 51:update 50:update 41:update 40:update gear:update'
end
# every 1.day, at: '05:30' do
# rake 'sisyphusarm:update'
# end
every 1.day, at: '05:00' do
rake 'sisyphus:bugs'
end
every 1.day, at: '06:45' do
rake 'sisyphus:repocops sisyphus:repocop_patches'
end
every 1.day, at: '12:50' do
rake 'ftbfs:update'
end
every :sunday, at: '03:30' do
rake 'sitemap:clean sitemap:refresh'
end
every :sunday, at: '06:30' do
rake 'perlwatch:update'
end
# Learn more: http://github.com/javan/whenever
Update schedule
# job_type :rake, 'cd /home/prometheusapp/www/current && RAILS_ENV=:environment bundle exec rake :task :output'
# every 1.hour do
# command 'kill -s USR2 `cat /tmp/unicorn.my_site.pid`'
# end
every 1.day, at: '00:10' do
rake 'ts:index'
end
every '5 1-23 * * *' do
rake 'sisyphus:update p8:update p7:update t7:update p6:update t6:update p5:update 51:update 50:update 41:update 40:update gear:update'
end
# every 1.day, at: '05:30' do
# rake 'sisyphusarm:update'
# end
every 1.day, at: '05:00' do
rake 'sisyphus:bugs'
end
every 1.day, at: '06:45' do
rake 'sisyphus:repocops sisyphus:repocop_patches'
end
every 1.day, at: '12:50' do
rake 'ftbfs:update'
end
every :sunday, at: '03:30' do
rake 'sitemap:clean sitemap:refresh'
end
every :sunday, at: '06:30' do
rake 'perlwatch:update'
end
every 5.minutes do
rake 'pghero:capture_query_stats'
end
# Learn more: http://github.com/javan/whenever
|
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# Example:
#
# set :output, "/path/to/my/cron_log.log"
#
# every 2.hours do
# command "/usr/bin/some_great_command"
# runner "MyModel.some_method"
# rake "some:great:rake:task"
# end
#
# every 4.days do
# runner "AnotherModel.prune_old_records"
# end
# Learn more: http://github.com/javan/whenever
DIR_RBENV_BIN = "#{ENV['HOME']}/.rbenv/bin"
set :output, "#{ENV['HOME']}/oreore-radio.log"
env :PATH, ENV['PATH']
env :RIPDIKO_OUTDIR, ENV['RIPDIKO_OUTDIR'] if ENV['RIPDIKO_OUTDIR']
env :RIPDIRU_OUTDIR, ENV['RIPDIRU_OUTDIR'] if ENV['RIPDIRU_OUTDIR']
job_type :rbenv_rake, %Q!PATH="#{DIR_RBENV_BIN}:$PATH"; eval "$(rbenv init -)"; cd :path && :bundle_command rake :task --silent :output!
job_type :be_rake, 'cd :path && :bundle_command rake :task --silent :output'
def rake_method
File.exist?(DIR_RBENV_BIN) ? :rbenv_rake : :be_rake
end
# ij
every('05 11 * * 1-4') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 11 * * 1-4') { send rake_method, 'oreore:podcast' }
# so
#every('05 13 * * 1-4') { send rake_method, 'oreore:import_from_ripdiko' }
#every('10 13 * * 1-4') { send rake_method, 'oreore:podcast' }
# tama954
every('35 15 * * 1-5') { send rake_method, 'oreore:import_from_ripdiko' }
every('40 15 * * 1-5') { send rake_method, 'oreore:podcast' }
# midnight
every('05 1 * * 2-6') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 1 * * 2-6') { send rake_method, 'oreore:podcast' }
# junk
every('05 3 * * 2-7') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 3 * * 2-7') { send rake_method, 'oreore:podcast' }
# kamataku
every(:monday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:monday, at: '22:10') { send rake_method, 'oreore:podcast' }
# ia
every(:tuesday, at: '21:35') { send rake_method, 'oreore:import_from_ripdiko' }
every(:tuesday, at: '21:40') { send rake_method, 'oreore:podcast' }
# suppin
every(:wednesday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:wednesday, at: '22:10') { send rake_method, 'oreore:podcast' }
# edo
every(:friday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:friday, at: '22:10') { send rake_method, 'oreore:podcast' }
# deso
every(:saturday, at: '21:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:saturday, at: '21:10') { send rake_method, 'oreore:podcast' }
# nichiten
every(:sunday, at: '12:00') { send rake_method, 'oreore:import_from_ripdiko' }
every(:sunday, at: '12:05') { send rake_method, 'oreore:podcast' }
# nichiyou
#every(:sunday, at: '17:05') { send rake_method, 'oreore:import_from_ripdiko' }
#every(:sunday, at: '17:10') { send rake_method, 'oreore:podcast' }
# dsk
#every(:sunday, at: '18:05') { send rake_method, 'oreore:import_from_ripdiko' }
#every(:sunday, at: '18:10') { send rake_method, 'oreore:podcast' }
# yose / world
every(:sunday, at: '21:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:sunday, at: '21:10') { send rake_method, 'oreore:podcast' }
Disable ia
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# Example:
#
# set :output, "/path/to/my/cron_log.log"
#
# every 2.hours do
# command "/usr/bin/some_great_command"
# runner "MyModel.some_method"
# rake "some:great:rake:task"
# end
#
# every 4.days do
# runner "AnotherModel.prune_old_records"
# end
# Learn more: http://github.com/javan/whenever
DIR_RBENV_BIN = "#{ENV['HOME']}/.rbenv/bin"
set :output, "#{ENV['HOME']}/oreore-radio.log"
env :PATH, ENV['PATH']
env :RIPDIKO_OUTDIR, ENV['RIPDIKO_OUTDIR'] if ENV['RIPDIKO_OUTDIR']
env :RIPDIRU_OUTDIR, ENV['RIPDIRU_OUTDIR'] if ENV['RIPDIRU_OUTDIR']
job_type :rbenv_rake, %Q!PATH="#{DIR_RBENV_BIN}:$PATH"; eval "$(rbenv init -)"; cd :path && :bundle_command rake :task --silent :output!
job_type :be_rake, 'cd :path && :bundle_command rake :task --silent :output'
def rake_method
File.exist?(DIR_RBENV_BIN) ? :rbenv_rake : :be_rake
end
# ij
every('05 11 * * 1-4') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 11 * * 1-4') { send rake_method, 'oreore:podcast' }
# so
#every('05 13 * * 1-4') { send rake_method, 'oreore:import_from_ripdiko' }
#every('10 13 * * 1-4') { send rake_method, 'oreore:podcast' }
# tama954
every('35 15 * * 1-5') { send rake_method, 'oreore:import_from_ripdiko' }
every('40 15 * * 1-5') { send rake_method, 'oreore:podcast' }
# midnight
every('05 1 * * 2-6') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 1 * * 2-6') { send rake_method, 'oreore:podcast' }
# junk
every('05 3 * * 2-7') { send rake_method, 'oreore:import_from_ripdiko' }
every('10 3 * * 2-7') { send rake_method, 'oreore:podcast' }
# kamataku
every(:monday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:monday, at: '22:10') { send rake_method, 'oreore:podcast' }
# ia
#every(:tuesday, at: '21:35') { send rake_method, 'oreore:import_from_ripdiko' }
#every(:tuesday, at: '21:40') { send rake_method, 'oreore:podcast' }
# suppin
every(:wednesday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:wednesday, at: '22:10') { send rake_method, 'oreore:podcast' }
# edo
every(:friday, at: '22:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:friday, at: '22:10') { send rake_method, 'oreore:podcast' }
# deso
every(:saturday, at: '21:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:saturday, at: '21:10') { send rake_method, 'oreore:podcast' }
# nichiten
every(:sunday, at: '12:00') { send rake_method, 'oreore:import_from_ripdiko' }
every(:sunday, at: '12:05') { send rake_method, 'oreore:podcast' }
# nichiyou
#every(:sunday, at: '17:05') { send rake_method, 'oreore:import_from_ripdiko' }
#every(:sunday, at: '17:10') { send rake_method, 'oreore:podcast' }
# dsk
#every(:sunday, at: '18:05') { send rake_method, 'oreore:import_from_ripdiko' }
#every(:sunday, at: '18:10') { send rake_method, 'oreore:podcast' }
# yose / world
every(:sunday, at: '21:05') { send rake_method, 'oreore:import_from_ripdiko' }
every(:sunday, at: '21:10') { send rake_method, 'oreore:podcast' }
|
require 'map'
require 'control_elements'
Dir["#{File.join(File.dirname(__FILE__), 'map', '*.rb')}"].each do |filename|
require filename
end
Dir["#{File.join(File.dirname(__FILE__), 'markers', '*.rb')}"].each do |filename|
require filename
end
require files in the appropriate order so everything loads correctly
require File.join(File.dirname(__FILE__), 'control_elements.rb')
Dir["#{File.join(File.dirname(__FILE__), 'map', '*.rb')}"].each do |filename|
require filename
end
Dir["#{File.join(File.dirname(__FILE__), 'markers', '*.rb')}"].each do |filename|
require filename
end
require File.join(File.dirname(__FILE__), 'map.rb') |
module Rentlinx
module Photoable
def post_with_photos
post
Rentlinx.client.post_photos(@photos)
end
def photos
@photos ||= get_photos_for_property_id(propertyID)
end
def photos=(photo_list)
@photos = photo_list.map do |photo|
photo.propertyID = propertyID
photo.unitID = unitID if defined? unitID
photo
end
end
def add_photo(options)
options.merge!(propertyID: propertyID)
options.merge!(unitID: unitID) if defined? unitID
@photos ||= []
@photos << photo_class.new(options)
end
private
def get_photos_for_property_id(id)
Rentlinx.client.get_photos_for_property_id(id)
end
end
end
as/ne - add post_photos method to photoable
module Rentlinx
module Photoable
def post_with_photos
post
post_photos
end
def post_photos
Rentlinx.client.post_photos(@photos)
end
def photos
@photos ||= get_photos_for_property_id(propertyID)
end
def photos=(photo_list)
@photos = photo_list.map do |photo|
photo.propertyID = propertyID
photo.unitID = unitID if defined? unitID
photo
end
end
def add_photo(options)
options.merge!(propertyID: propertyID)
options.merge!(unitID: unitID) if defined? unitID
@photos ||= []
@photos << photo_class.new(options)
end
private
def get_photos_for_property_id(id)
Rentlinx.client.get_photos_for_property_id(id)
end
end
end
|
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# rbenv support
rbenv = 'export PATH="$HOME/.rbenv/bin:$PATH";eval "$(rbenv init -)";'
path = Whenever.path.sub(%r{\/releases\/.*}, '/current')
set :job_template, "/bin/bash -l -c '#{rbenv} :job'"
job_type :runner, "cd #{path} && bin/rails r -e :environment \":task\" :output"
# cron output
set :output, 'log/cron.log'
every 10.minutes do
runner 'ZonefileSetting.generate_zonefiles'
end
every 6.months, at: '12:01am' do
runner 'Contact.destroy_orphans'
end
every :day, at: '12:10am' do
runner 'Invoice.cancel_overdue_invoices'
end
# TODO
# every :day, at: '12:15am' do
# runner 'Domain.expire_domains'
# end
every :day, at: '12:20am' do
runner 'Domain.clean_expired_pendings'
end
every 3.hours do
runner 'Certificate.update_crl'
end
every 42.minutes do
runner 'Domain.destroy_delete_candidates'
end
every 45.minutes do
runner 'Domain.start_expire_period'
end
every 50.minutes do
runner 'Domain.start_delete_period'
end
every 52.minutes do
runner 'Domain.start_redemption_grace_period'
end
Schedule test #2925
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# rbenv support
rbenv = 'export PATH="$HOME/.rbenv/bin:$PATH";eval "$(rbenv init -)";'
path = Whenever.path.sub(%r{\/releases\/.*}, '/current')
set :job_template, "/bin/bash -l -c '#{rbenv} :job'"
job_type :runner, "cd #{path} && bin/rails r -e :environment \":task\" :output"
puts @environment
# cron output
set :output, 'log/cron.log'
every 10.minutes do
runner 'ZonefileSetting.generate_zonefiles'
end
every 6.months, at: '12:01am' do
runner 'Contact.destroy_orphans'
end
every :day, at: '12:10am' do
runner 'Invoice.cancel_overdue_invoices'
end
# TODO
# every :day, at: '12:15am' do
# runner 'Domain.expire_domains'
# end
every :day, at: '12:20am' do
runner 'Domain.clean_expired_pendings'
end
every 3.hours do
runner 'Certificate.update_crl'
end
every 42.minutes do
runner 'Domain.destroy_delete_candidates'
end
every 45.minutes do
runner 'Domain.start_expire_period'
end
every 50.minutes do
runner 'Domain.start_delete_period'
end
every 52.minutes do
runner 'Domain.start_redemption_grace_period'
end
every 10.minutes do
runner 'Setting.reload_settings!'
end
|
class Graph
class Node
attr_reader :name
# @api private
def initialize(name)
@name = name.to_sym
end
end
end
Add docs for Graph::Node
class Graph
class Node
# The node name
#
# @example
#
# node = Node.new(:name)
# node.name
#
# @return [Symbol]
#
# @api public
attr_reader :name
# Initialize a new node
#
# @param [#to_sym] name
# the node name
#
# @return [undefined]
#
# @api private
def initialize(name)
@name = name.to_sym
end
end
end
|
module Reunion
class Rule
attr_accessor :condition, :actions, :chain
def initialize(chain_array)
@chain = chain_array
@condition = Re::And.new(chain.select{|i| i[:is_filter]}.map{|f| f[:conditions]})
@actions = chain.select{|i| i[:is_action]}
@modified_transactions ||= []
@changed_transactions ||= []
@matched_transactions ||= []
end
attr_accessor :matched_transactions, :modified_transactions, :changed_transactions
def modify(txn)
@matched_transactions << txn
@modified_transactions << txn
actual_change = false
@actions.each do |action|
#puts "Performing action"
actual_change = true if action[:definition].apply_action.call(txn, action[:arguments])
end
@changed_transactions << txn if actual_change
actual_change
end
def inspect
"#{@condition.inspect} -> action"
end
end
class RuleEngine
attr_accessor :rules, :tree
def initialize(rules)
rules.flush(true)
@rules = rules.rules.map{|r| Rule.new(r)}
@builder = Reunion::Re::DecisionTreeBuilder.new
@rules.each{|r| @builder.add_rule(r.condition, r)}
@tree = @builder.build
end
def prep_transactions(transactions)
return [] if transactions.empty?
schema = transactions.first.schema
prep_fields_methods = schema.fields.to_a.map{|field,v| v.query_methods.map{|smd| smd.prep_data}.compact.map{|m| [field,m]}}.flatten(1)
transactions.map{|t|
prepped = t.data.clone
prep_fields_methods.each{ |field, method| method.call(field, prepped[field], prepped)}
prepped[:_txn] = t
prepped
}
end
#TODO apply source data sanitization
def run(transactions)
current_set = transactions
next_set = []
max_iterations = 3
max_iterations.times do |i|
current_set = prep_transactions(current_set)
break if current_set.empty?
next_set = current_set.select{|t| modify_txn(t)}.map{|t| t[:_txn]}
#puts "#{next_set.length} of #{current_set.length} transactions modified. Rule count = #{rules.length}"
current_set = next_set
end
end
def modify_txn(t)
modified = false
@tree.get_results(t) do |rule|
m = rule.modify(t[:_txn])
modified = true if m
end
modified
end
def find_matches(transactions)
transactions.select do |t|
matches = false
@tree.get_results(t) do |rule|
matches = true
end
matches
end
end
end
end
RulesEngine now runs prep_transactions prior to find_matches so it will, um work.
module Reunion
class Rule
attr_accessor :condition, :actions, :chain
def initialize(chain_array)
@chain = chain_array
@condition = Re::And.new(chain.select{|i| i[:is_filter]}.map{|f| f[:conditions]})
@actions = chain.select{|i| i[:is_action]}
@modified_transactions ||= []
@changed_transactions ||= []
@matched_transactions ||= []
end
attr_accessor :matched_transactions, :modified_transactions, :changed_transactions
def modify(txn)
@matched_transactions << txn
@modified_transactions << txn
actual_change = false
@actions.each do |action|
#puts "Performing action"
actual_change = true if action[:definition].apply_action.call(txn, action[:arguments])
end
@changed_transactions << txn if actual_change
actual_change
end
def inspect
"#{@condition.inspect} -> action"
end
end
class RuleEngine
attr_accessor :rules, :tree
def initialize(rules)
rules.flush(true)
@rules = rules.rules.map{|r| Rule.new(r)}
@builder = Reunion::Re::DecisionTreeBuilder.new
@rules.each{|r| @builder.add_rule(r.condition, r)}
@tree = @builder.build
end
def prep_transactions(transactions)
return [] if transactions.empty?
schema = transactions.first.schema
prep_fields_methods = schema.fields.to_a.map{|field,v| v.query_methods.map{|smd| smd.prep_data}.compact.map{|m| [field,m]}}.flatten(1)
transactions.map{|t|
prepped = t.data.clone
prep_fields_methods.each{ |field, method| method.call(field, prepped[field], prepped)}
prepped[:_txn] = t
prepped
}
end
#TODO apply source data sanitization
def run(transactions)
current_set = transactions
next_set = []
max_iterations = 3
max_iterations.times do |i|
current_set = prep_transactions(current_set)
break if current_set.empty?
next_set = current_set.select{|t| modify_txn(t)}.map{|t| t[:_txn]}
#puts "#{next_set.length} of #{current_set.length} transactions modified. Rule count = #{rules.length}"
current_set = next_set
end
end
def modify_txn(t)
modified = false
@tree.get_results(t) do |rule|
m = rule.modify(t[:_txn])
modified = true if m
end
modified
end
def find_matches(transactions)
prep_transactions(transactions)
transactions.select do |t|
matches = false
@tree.get_results(t) do |rule|
matches = true
end
matches
end
end
end
end
|
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# Example:
#
# set :output, "/path/to/my/cron_log.log"
#
# every 2.hours do
# command "/usr/bin/some_great_command"
# runner "MyModel.some_method"
# rake "some:great:rake:task"
# end
#
# every 4.days do
# runner "AnotherModel.prune_old_records"
# end
# Learn more: http://github.com/javan/whenever
set :output, "~rails/julia_observer/log/cron.log"
every :day, at: %w[ 2am 2pm 8am 8pm ] do
command "echo 'boot job triggered.'"
rake "job:boot"
end
every :day, at: %w[ 2:30am 2:30pm 8:30am 8:30pm ] do
command "echo 'download job triggered.'"
rake "job:download"
end
every :day, at: %w[ 4am 4pm 10am 10pm ] do
command "echo 'expand job triggered.'"
rake "job:expand"
end
# every :day, at: %w[ 5am 5pm 11am 11pm ] do
# command "echo 'update job triggered.'"
# rake "job:update"
# end
every :day, at: '12:30am' do
command "echo 'log job triggered.'"
rake "job:log"
end
every :day, at: '12:30pm' do
command "echo 'refresh job triggered.'"
rake "sitemap:refresh"
end
every :day, at: '6:30am' do
command "echo 'scour job triggered.'"
rake "job:scour"
end
# every :day, at: '6:30pm' do
# command "echo 'clean job triggered.'"
# rake "job:clean"
# end
Turn update and cleaning back on
# Use this file to easily define all of your cron jobs.
#
# It's helpful, but not entirely necessary to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
# Example:
#
# set :output, "/path/to/my/cron_log.log"
#
# every 2.hours do
# command "/usr/bin/some_great_command"
# runner "MyModel.some_method"
# rake "some:great:rake:task"
# end
#
# every 4.days do
# runner "AnotherModel.prune_old_records"
# end
# Learn more: http://github.com/javan/whenever
set :output, "~rails/julia_observer/log/cron.log"
every :day, at: %w[ 2am 2pm 8am 8pm ] do
command "echo 'boot job triggered.'"
rake "job:boot"
end
every :day, at: %w[ 2:30am 2:30pm 8:30am 8:30pm ] do
command "echo 'download job triggered.'"
rake "job:download"
end
every :day, at: %w[ 4am 4pm 10am 10pm ] do
command "echo 'expand job triggered.'"
rake "job:expand"
end
every :day, at: %w[ 5am 5pm 11am 11pm ] do
command "echo 'update job triggered.'"
rake "job:update"
end
every :day, at: '12:30am' do
command "echo 'log job triggered.'"
rake "job:log"
end
every :day, at: '12:30pm' do
command "echo 'refresh job triggered.'"
rake "sitemap:refresh"
end
every :day, at: '6:30am' do
command "echo 'scour job triggered.'"
rake "job:scour"
end
every :day, at: '6:30pm' do
command "echo 'clean job triggered.'"
rake "job:clean"
end
|
#
# Extra classes needed to operate with Chimp
#
module Chimp
#
# This class allows to check on the status of any of the tasks created.
#
class Task
attr_writer :tasker
attr_reader :tasker
def initialize
@api_polling_rate = ENV['API_POLLING_RATE'].to_i || 30
end
def wait_for_state(desired_state, timeout = 900)
while timeout > 0
# Make compatible with RL10.
status = state.downcase
return true if status.match(desired_state)
friendly_url = Connection.audit_url + '/audit_entries/'
friendly_url += href.split(/\//).last
friendly_url = friendly_url.gsub('ae-', '')
if status.match('failed') || status.match('aborted')
raise "FATAL error, #{status}\n\n Audit: #{friendly_url}\n "
end
Log.debug "Polling again in #{#@api_polling_rate}"
sleep @api_polling_rate
timeout -= @api_polling_rate
end
raise "FATAL: Timeout waiting for Executable to complete. State was #{status}" if timeout <= 0
end
def wait_for_completed(timeout = 900)
wait_for_state('completed', timeout)
end
def state
tasker.show.summary
end
def href
tasker.href
end
def friendly_url
friendly_url = Connection.audit_url + '/audit_entries/'
friendly_url += href.split(/\//).last
friendly_url = friendly_url.gsub('ae-', '')
friendly_url
end
def details
tasker.show(view: 'extended').detail
end
end
end
OPS-4541 removed typo in log statement
#
# Extra classes needed to operate with Chimp
#
module Chimp
#
# This class allows to check on the status of any of the tasks created.
#
class Task
attr_writer :tasker
attr_reader :tasker
def initialize
@api_polling_rate = ENV['API_POLLING_RATE'].to_i || 30
end
def wait_for_state(desired_state, timeout = 900)
while timeout > 0
# Make compatible with RL10.
status = state.downcase
return true if status.match(desired_state)
friendly_url = Connection.audit_url + '/audit_entries/'
friendly_url += href.split(/\//).last
friendly_url = friendly_url.gsub('ae-', '')
if status.match('failed') || status.match('aborted')
raise "FATAL error, #{status}\n\n Audit: #{friendly_url}\n "
end
Log.debug "Polling again in #{@api_polling_rate}"
sleep @api_polling_rate
timeout -= @api_polling_rate
end
raise "FATAL: Timeout waiting for Executable to complete. State was #{status}" if timeout <= 0
end
def wait_for_completed(timeout = 900)
wait_for_state('completed', timeout)
end
def state
tasker.show.summary
end
def href
tasker.href
end
def friendly_url
friendly_url = Connection.audit_url + '/audit_entries/'
friendly_url += href.split(/\//).last
friendly_url = friendly_url.gsub('ae-', '')
friendly_url
end
def details
tasker.show(view: 'extended').detail
end
end
end
|
module Risu
module Base
class TemplateManager
attr_accessor :registered_templates
#
#
def initialize (path)
@registered_templates = Array.new
@templates = Array.new
load_templates(path)
end
#
#
def load_templates(path)
begin
base_dir = __FILE__.gsub("risu/base/template_manager.rb", "")
Dir["#{base_dir + path}/**/*.rb"].each do |x|
begin
load x
rescue => e
next
end
end
TemplateBase.possible_templates.each do |p|
if validate(p) == true
@registered_templates << p
end
end
rescue => e
puts "Bad plugin"
end
end
#
#
def validate(template)
t = template.new
if t == nil
return false
end
if t.respond_to?(:render) == false
return false
end
return true
end
#
#
def find_plugins(file_name)
Dir.new("#{file_name}").each do |file|
next if file.match(/^\.+/)
path = "#{file_name}/#{file}"
if FileTest.directory?("#{path}")
list("#{path}")
else
self.register_template path
end
end
end
#
#
def register_template(plugin)
load plugin
@templates.push(plugin)
end
#
#
def display_templates
@registered_templates.each do |x|
p = x.new
puts "[*] Template: #{p.template_info[:name]}\n",
end
end
end
end
end
Changed the way templates are displayed
module Risu
module Base
class TemplateManager
attr_accessor :registered_templates
#
#
def initialize (path)
@registered_templates = Array.new
@templates = Array.new
load_templates(path)
end
#
#
def load_templates(path)
begin
base_dir = __FILE__.gsub("risu/base/template_manager.rb", "")
Dir["#{base_dir + path}/**/*.rb"].each do |x|
begin
load x
rescue => e
next
end
end
TemplateBase.possible_templates.each do |p|
if validate(p) == true
@registered_templates << p
end
end
rescue => e
puts "Bad plugin"
end
end
#
#
def validate(template)
t = template.new
if t == nil
return false
end
if t.respond_to?(:render) == false
return false
end
return true
end
#
#
def find_plugins(file_name)
Dir.new("#{file_name}").each do |file|
next if file.match(/^\.+/)
path = "#{file_name}/#{file}"
if FileTest.directory?("#{path}")
list("#{path}")
else
self.register_template path
end
end
end
#
#
def register_template(plugin)
load plugin
@templates.push(plugin)
end
#
#
def display_templates
puts "Available Templates"
@registered_templates.each do |x|
p = x.new
puts "\t#{p.template_info[:name]} - #{p.template_info[:description]}\n",
end
end
end
end
end
|
module Ruboty
module TrainDelay
VERSION = "0.2.0"
end
end
Bump to 0.2.1
module Ruboty
module TrainDelay
VERSION = "0.2.1"
end
end
|
require 'ruby-redtail/contact'
require 'ruby-redtail/client'
module RubyRedtail
class User
class Contacts
CONTACT_SEARCH_FIELDS = ['LastUpdate','Name','RecAdd','PhoneNumber','Tag_Group','FirstName','LastName','FamilyName','FamilyHead','ClientStatus','ContactType','ClientSource','TaxId']
CONTACT_SEARCH_OPERANDS = ['=','>','<','!=','Like','BeginsWith','IsEmpty']
def initialize(api_hash)
@api_hash = api_hash
end
# Contact Search by Name Fetch
# returns a paged list of limited Contact Information, including the ContactID
# *value = searched name
# *page = pagination
# http://help.redtailtechnology.com/entries/21937828-contacts-search-contacts-search
def search_by_name (value, page = 1)
build_contacts_array RubyRedtail::Query.run("contacts/search?value=#{value}&page=#{page}", @api_hash, "GET")["Contacts"]
end
# Contact Search by Letter Fetch
# returns a paged list of limited Contact Information, including the ContactID,
# based on a partial name or a single character.
# *value = searched beginning letter(s)
# *page = pagination
# http://help.redtailtechnology.com/entries/21937828-contacts-search-contacts-search
def search_by_letter (value, page = 1)
build_contacts_array RubyRedtail::Query.run("contacts/search/beginswith?value=#{value}&page=#{page}", @api_hash, "GET")["Contacts"]
end
# TODO: Test this properly
# Search Contact by Custom Query
# returns a paged list of Basic Contact Information, including the ContactID,
# based on the specified field, operand, and field value.
# http://help.redtailtechnology.com/entries/22550401
def search (query, page = 1)
body = Array.new(query.length) { {} }
query.each_with_index do |expr, i|
body[i]["Field"] = CONTACT_SEARCH_FIELDS.index(expr[0]).to_s
body[i]["Operand"] = CONTACT_SEARCH_OPERANDS.index(expr[1]).to_s
body[i]["Value"] = expr[2]
end
build_clients_array RubyRedtail::Query.run("contacts/search?page=#{page}", @api_hash, "POST", body)["Contacts"]
end
# Create New Contact
def create (params)
update(0, params)
end
protected
def build_contact contact_hash
if contact_hash
RubyRedtail::Contact.new(contact_hash,@api_hash)
else
raise RubyRedtail::AuthenticationError
end
end
def build_contacts_array contact_hashes
if client_hashes
contact_hashes.collect { |contact_hash| self.build_contact contact_hash }
else
raise RubyRedtail::AuthenticationError
end
end
def build_client client_hash
if client_hash
RubyRedtail::Client.new(client_hash,@api_hash)
else
raise RubyRedtail::AuthenticationError
end
end
def build_clients_array client_hashes
if client_hashes
client_hashes.collect { |client_hash| self.build_client client_hash }
else
raise RubyRedtail::AuthenticationError
end
end
end
end
end
Fixed missing variable 'client_hash'
When using user.contacts.search I was getting an undef'ed variable 'clients_hash' coming from User::Contacts#build_contacts_array
require 'ruby-redtail/contact'
require 'ruby-redtail/client'
module RubyRedtail
class User
class Contacts
CONTACT_SEARCH_FIELDS = ['LastUpdate','Name','RecAdd','PhoneNumber','Tag_Group','FirstName','LastName','FamilyName','FamilyHead','ClientStatus','ContactType','ClientSource','TaxId']
CONTACT_SEARCH_OPERANDS = ['=','>','<','!=','Like','BeginsWith','IsEmpty']
def initialize(api_hash)
@api_hash = api_hash
end
# Contact Search by Name Fetch
# returns a paged list of limited Contact Information, including the ContactID
# *value = searched name
# *page = pagination
# http://help.redtailtechnology.com/entries/21937828-contacts-search-contacts-search
def search_by_name (value, page = 1)
build_contacts_array RubyRedtail::Query.run("contacts/search?value=#{value}&page=#{page}", @api_hash, "GET")["Contacts"]
end
# Contact Search by Letter Fetch
# returns a paged list of limited Contact Information, including the ContactID,
# based on a partial name or a single character.
# *value = searched beginning letter(s)
# *page = pagination
# http://help.redtailtechnology.com/entries/21937828-contacts-search-contacts-search
def search_by_letter (value, page = 1)
build_contacts_array RubyRedtail::Query.run("contacts/search/beginswith?value=#{value}&page=#{page}", @api_hash, "GET")["Contacts"]
end
# TODO: Test this properly
# Search Contact by Custom Query
# returns a paged list of Basic Contact Information, including the ContactID,
# based on the specified field, operand, and field value.
# http://help.redtailtechnology.com/entries/22550401
def search (query, page = 1)
body = Array.new(query.length) { {} }
query.each_with_index do |expr, i|
body[i]["Field"] = CONTACT_SEARCH_FIELDS.index(expr[0]).to_s
body[i]["Operand"] = CONTACT_SEARCH_OPERANDS.index(expr[1]).to_s
body[i]["Value"] = expr[2]
end
build_clients_array RubyRedtail::Query.run("contacts/search?page=#{page}", @api_hash, "POST", body)["Contacts"]
end
# Create New Contact
def create (params)
update(0, params)
end
protected
def build_contact contact_hash
if contact_hash
RubyRedtail::Contact.new(contact_hash,@api_hash)
else
raise RubyRedtail::AuthenticationError
end
end
def build_contacts_array contact_hashes
if contact_hashes
contact_hashes.collect { |contact_hash| self.build_contact contact_hash }
else
raise RubyRedtail::AuthenticationError
end
end
def build_client client_hash
if client_hash
RubyRedtail::Client.new(client_hash,@api_hash)
else
raise RubyRedtail::AuthenticationError
end
end
def build_clients_array client_hashes
if client_hashes
client_hashes.collect { |client_hash| self.build_client client_hash }
else
raise RubyRedtail::AuthenticationError
end
end
end
end
end
|
class SystemApi < ErrorsApi
def initialize(api)
@engines_api = api
@engines_conf_cache = {}
end
def is_startup_complete(container)
clear_error
return File.exist?(ContainerStateFiles.container_state_dir(container) + '/run/flags/startup_complete')
rescue StandardError => e
SystemUtils.log_exception(e)
end
def get_build_report(engine_name)
clear_error
state_dir = SystemConfig.RunDir + '/containers/' + engine_name
return File.read(state_dir + '/buildreport.txt') if File.exist?(state_dir + '/buildreport.txt')
return 'Build Not Successful'
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_build_report(container, build_report)
clear_error
state_dir = ContainerStateFiles.container_state_dir(container)
f = File.new(state_dir + '/buildreport.txt', File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(build_report)
f.close
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_container(container)
clear_error
# FIXME:
api = container.container_api
container.container_api = nil
last_result = container.last_result
# last_error = container.last_error
# save_last_result_and_error(container)
container.last_result = ''
serialized_object = YAML.dump(container)
container.container_api = api
# container.last_result = last_result
#container.last_error = last_error
state_dir = ContainerStateFiles.container_state_dir(container)
FileUtils.mkdir_p(state_dir) if Dir.exist?(state_dir) == false
statefile = state_dir + '/running.yaml'
# BACKUP Current file with rename
if File.exist?(statefile)
statefile_bak = statefile + '.bak'
File.rename(statefile, statefile_bak)
end
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(serialized_object)
f.close
return true
rescue StandardError => e
container.last_error = last_error
# FIXME: Need to rename back if failure
SystemUtils.log_exception(e)
end
def set_engine_network_properties(engine, params)
clear_error
return set_engine_hostname_details(engine, params) if set_engine_web_protocol_properties(engine, params)
return false
end
def set_engine_web_protocol_properties(engine, params)
clear_error
# engine_name = params[:engine_name]
protocol = params[:http_protocol]
return false if protocol.nil?
SystemUtils.debug_output('Changing protocol to _', protocol)
if protocol.include?('HTTPS only')
engine.enable_https_only
elsif protocol.include?('HTTP only')
engine.enable_http_only
elsif protocol.include?('HTTPS and HTTP')
engine.enable_http_and_https
end
rescue StandardError => e
SystemUtils.log_exception(e)
end
def set_engine_hostname_details(container, params)
clear_error
# engine_name = params[:engine_name]
hostname = params[:host_name]
domain_name = params[:domain_name]
SystemUtils.debug_output('Changing Domainame to ', domain_name)
# saved_hostName = container.hostname
# saved_domainName = container.domain_name
SystemUtils.debug_output('Changing Domainame to ', domain_name)
container.remove_nginx_service
container.set_hostname_details(hostname, domain_name)
save_container(container)
container.add_nginx_service
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def getManagedEngines
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
if File.exist?(yfn)
managed_engine = loadManagedEngine(contdir)
if managed_engine.is_a?(ManagedEngine)
ret_val.push(managed_engine)
else
log_error_mesg('failed to load ', yfn)
end
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
end
def loadManagedEngine(engine_name)
# p :load_me
# p engine_name
e = engine_from_cache(engine_name)
return e unless e.nil?
return log_error_mesg('No Engine name', engine_name) if engine_name.nil? || engine_name.length == 0
yam_file_name = SystemConfig.RunDir + '/containers/' + engine_name + '/running.yaml'
return log_error_mesg('No Engine file', engine_name) unless File.exist?(yam_file_name)
yaml_file = File.read(yam_file_name)
managed_engine = ManagedEngine.from_yaml(yaml_file, @engines_api.container_api)
return false if managed_engine.nil? || managed_engine == false
cache_engine(engine_name,managed_engine)
return managed_engine
rescue StandardError => e
unless engine_name.nil?
unless managed_engine.nil?
managed_engine.last_error = 'Failed To get Managed Engine ' + engine_name + ' ' + e.to_s
log_error_mesg(managed_engine.last_error, e)
end
else
log_error_mesg('nil Engine Name', engine_name)
end
log_exception(e)
end
def loadSystemService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/system_services/')
end
def loadManagedService(service_name)
s = engine_from_cache('/services/' + service_name)
# p :service_from_cache unless s.nil?
return s unless s.nil?
s = _loadManagedService(service_name, SystemConfig.RunDir + '/services/')
cache_engine('/services/' + service_name, s)
# p :loaded_service
# p service_name
return s
end
def _loadManagedService(service_name, service_type_dir)
if service_name.nil? || service_name.length == 0
@last_error = 'No Service Name'
return false
end
yam1_file_name = service_type_dir + service_name + '/running.yaml'
unless File.exist?(yam1_file_name)
return log_error_mesg('failed to create service file ', service_type_dir + '/' + service_name.to_s) unless ContainerStateFiles.build_running_service(service_name, service_type_dir)
end
yaml_file = File.read(yam1_file_name)
# managed_service = YAML::load( yaml_file)
managed_service = SystemService.from_yaml(yaml_file, @engines_api.service_api) if service_type_dir == '/sytem_services/'
managed_service = ManagedService.from_yaml(yaml_file, @engines_api.service_api)
return log_error_mesg('Failed to load', yaml_file) if managed_service.nil?
managed_service
rescue StandardError => e
if service_name.nil? == false
unless managed_service.nil?
managed_service.last_error = ('Failed To get Managed Engine ' + service_name.to_s + ' ' + e.to_s)
log_exception(e)
end
else
log_error_mesg('nil Service Name', service_name)
end
log_exception(e)
end
def engine_from_cache(ident)
return @engines_conf_cache[ident.to_sym] if @engines_conf_cache.key?(ident.to_sym)
return nil
end
def delete_engine(engine_name)
@engines_conf_cache.delete(engine_name.to_sym)
end
def cache_engine(ident, engine)
@engines_conf_cache[ident.to_sym] = engine
Thread.new { sleep 5; @engines_conf_cache[ident.to_sym] = nil }
end
def getManagedServices
begin
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
if File.exist?(yfn) == true
managed_service = loadManagedService(contdir)
ret_val.push(managed_service) if managed_service
end
end
return ret_val
rescue StandardError => e
log_exception(e)
end
end
def list_managed_engines
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
ret_val.push(contdir) if File.exist?(yfn)
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def list_managed_services
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
ret_val.push(contdir) if File.exist?(yfn)
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def generate_engines_user_ssh_key
newkey = regen_system_ssh_key # SystemUtils.run_command(SystemConfig.generate_ssh_private_keyfile)
return log_error_mesg("Not an RSA key",newkey) unless newkey.include?('-----BEGIN RSA PRIVATE KEY-----')
return newkey
rescue StandardError => e
SystemUtils.log_exception(e)
end
def update_public_key(key)
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system_access engines@172.17.42.1 /opt/engines/bin/update_system_access.sh ' + key)
end
def regen_system_ssh_key
SystemUtils.run_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/regen_private engines@172.17.42.1 /opt/engines/bin/regen_private.sh ')
end
def system_update_status
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/deb_update_status engines@172.17.42.1 /opt/engines/bin/deb_update_status.sh')
end
def restart_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/restart_system engines@172.17.42.1 /opt/engines/bin/restart_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system engines@172.17.42.1 /opt/engines/bin/update_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
end
def update_engines_system_software
result = SystemUtils.execute_command('sudo /opt/engines/scripts/_update_engines_system_software.sh ')
if result[:result] == -1
@last_error = result[:stderr]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
# FIXME: The following carp was added to support gui debug please remove all rails references once gui is sorted
if Rails.env.production?
if result[:stdout].include?('Already up-to-date') && File.exist?('/opt/engines/run/system/flags/test_engines_update') == false
@last_error = result[:stdout]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
end
res = Thread.new { SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_engines_system_software engines@172.17.42.1 /opt/engines/bin/update_engines_system_software.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
@last_error = result[:stdout]
return true if res.status == 'run'
return false
end
# FIXME: Kludge should read from network namespace /proc ?
def get_container_network_metrics(container_name)
ret_val = {}
clear_error
def error_result
ret_val = {}
ret_val[:in] = 'n/a'
ret_val[:out] = 'n/a'
return ret_val
end
commandargs = 'docker exec ' + container_name + " netstat --interfaces -e | grep bytes |head -1 | awk '{ print $2 \" \" $6}' 2>&1"
p :netstat_cmd
p commandargs
result = SystemUtils.execute_command(commandargs)
if result[:result] != 0
ret_val = error_result
else
res = result[:stdout]
vals = res.split('bytes:')
if vals.count > 2
if vals[1].nil? == false && vals[2].nil? == false
ret_val[:in] = vals[1].chop
ret_val[:out] = vals[2].chop
else
ret_val = error_result
end
else
ret_val = error_result
end
return ret_val
end
rescue StandardError => e
log_exception(e)
return error_result
end
def api_shutdown
File.delete(SystemConfig.BuildRunningParamsFile) if File.exist?(SystemConfig.BuildRunningParamsFile)
end
end
random text
class SystemApi < ErrorsApi
def initialize(api)
@engines_api = api
@engines_conf_cache = {}
end
def is_startup_complete(container)
clear_error
return File.exist?(ContainerStateFiles.container_state_dir(container) + '/run/flags/startup_complete')
rescue StandardError => e
SystemUtils.log_exception(e)
end
def get_build_report(engine_name)
clear_error
state_dir = SystemConfig.RunDir + '/containers/' + engine_name
return File.read(state_dir + '/buildreport.txt') if File.exist?(state_dir + '/buildreport.txt')
return 'Build Not Successful'
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_build_report(container, build_report)
clear_error
state_dir = ContainerStateFiles.container_state_dir(container)
f = File.new(state_dir + '/buildreport.txt', File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(build_report)
f.close
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_container(container)
clear_error
# FIXME:
api = container.container_api
container.container_api = nil
last_result = container.last_result
# last_error = container.last_error
# save_last_result_and_error(container)
container.last_result = ''
serialized_object = YAML.dump(container)
container.container_api = api
# container.last_result = last_result
#container.last_error = last_error
state_dir = ContainerStateFiles.container_state_dir(container)
FileUtils.mkdir_p(state_dir) if Dir.exist?(state_dir) == false
statefile = state_dir + '/running.yaml'
# BACKUP Current file with rename
if File.exist?(statefile)
statefile_bak = statefile + '.bak'
File.rename(statefile, statefile_bak)
end
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(serialized_object)
f.close
return true
rescue StandardError => e
container.last_error = last_error
# FIXME: Need to rename back if failure
SystemUtils.log_exception(e)
end
def set_engine_network_properties(engine, params)
clear_error
return set_engine_hostname_details(engine, params) if set_engine_web_protocol_properties(engine, params)
return false
end
def set_engine_web_protocol_properties(engine, params)
clear_error
# engine_name = params[:engine_name]
protocol = params[:http_protocol]
return false if protocol.nil?
SystemUtils.debug_output('Changing protocol to _', protocol)
if protocol.include?('HTTPS only')
engine.enable_https_only
elsif protocol.include?('HTTP only')
engine.enable_http_only
elsif protocol.include?('HTTPS and HTTP')
engine.enable_http_and_https
end
rescue StandardError => e
SystemUtils.log_exception(e)
end
def set_engine_hostname_details(container, params)
clear_error
# engine_name = params[:engine_name]
hostname = params[:host_name]
domain_name = params[:domain_name]
SystemUtils.debug_output('Changing Domainame to ', domain_name)
# saved_hostName = container.hostname
# saved_domainName = container.domain_name
SystemUtils.debug_output('Changing Domainame to ', domain_name)
container.remove_nginx_service
container.set_hostname_details(hostname, domain_name)
save_container(container)
container.add_nginx_service
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def getManagedEngines
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
if File.exist?(yfn)
managed_engine = loadManagedEngine(contdir)
if managed_engine.is_a?(ManagedEngine)
ret_val.push(managed_engine)
else
log_error_mesg('failed to load ', yfn)
end
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
end
def loadManagedEngine(engine_name)
# p :load_me
# p engine_name
e = engine_from_cache(engine_name)
return e unless e.nil?
return log_error_mesg('No Engine name', engine_name) if engine_name.nil? || engine_name.length == 0
yam_file_name = SystemConfig.RunDir + '/containers/' + engine_name + '/running.yaml'
return log_error_mesg('No Engine file', engine_name) unless File.exist?(yam_file_name)
yaml_file = File.read(yam_file_name)
managed_engine = ManagedEngine.from_yaml(yaml_file, @engines_api.container_api)
return false if managed_engine.nil? || managed_engine == false
cache_engine(engine_name,managed_engine)
return managed_engine
rescue StandardError => e
unless engine_name.nil?
unless managed_engine.nil?
managed_engine.last_error = 'Failed To get Managed Engine ' + engine_name + ' ' + e.to_s
log_error_mesg(managed_engine.last_error, e)
end
else
log_error_mesg('nil Engine Name', engine_name)
end
log_exception(e)
end
def loadSystemService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/system_services/')
end
def loadManagedService(service_name)
s = engine_from_cache('/services/' + service_name)
# p :service_from_cache unless s.nil?
return s unless s.nil?
s = _loadManagedService(service_name, SystemConfig.RunDir + '/services/')
cache_engine('/services/' + service_name, s)
# p :loaded_service
# p service_name
return s
end
def _loadManagedService(service_name, service_type_dir)
if service_name.nil? || service_name.length == 0
@last_error = 'No Service Name'
return false
end
yam1_file_name = service_type_dir + service_name + '/running.yaml'
unless File.exist?(yam1_file_name)
return log_error_mesg('failed to create service file ', service_type_dir + '/' + service_name.to_s) unless ContainerStateFiles.build_running_service(service_name, service_type_dir)
end
yaml_file = File.read(yam1_file_name)
# managed_service = YAML::load( yaml_file)
managed_service = SystemService.from_yaml(yaml_file, @engines_api.service_api) if service_type_dir == '/system_services/'
managed_service = ManagedService.from_yaml(yaml_file, @engines_api.service_api)
return log_error_mesg('Failed to load', yaml_file) if managed_service.nil?
managed_service
rescue StandardError => e
if service_name.nil? == false
unless managed_service.nil?
managed_service.last_error = ('Failed To get Managed Engine ' + service_name.to_s + ' ' + e.to_s)
log_exception(e)
end
else
log_error_mesg('nil Service Name', service_name)
end
log_exception(e)
end
def engine_from_cache(ident)
return @engines_conf_cache[ident.to_sym] if @engines_conf_cache.key?(ident.to_sym)
return nil
end
def delete_engine(engine_name)
@engines_conf_cache.delete(engine_name.to_sym)
end
def cache_engine(ident, engine)
@engines_conf_cache[ident.to_sym] = engine
Thread.new { sleep 5; @engines_conf_cache[ident.to_sym] = nil }
end
def getManagedServices
begin
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
if File.exist?(yfn) == true
managed_service = loadManagedService(contdir)
ret_val.push(managed_service) if managed_service
end
end
return ret_val
rescue StandardError => e
log_exception(e)
end
end
def list_managed_engines
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
ret_val.push(contdir) if File.exist?(yfn)
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def list_managed_services
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
ret_val.push(contdir) if File.exist?(yfn)
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def generate_engines_user_ssh_key
newkey = regen_system_ssh_key # SystemUtils.run_command(SystemConfig.generate_ssh_private_keyfile)
return log_error_mesg("Not an RSA key",newkey) unless newkey.include?('-----BEGIN RSA PRIVATE KEY-----')
return newkey
rescue StandardError => e
SystemUtils.log_exception(e)
end
def update_public_key(key)
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system_access engines@172.17.42.1 /opt/engines/bin/update_system_access.sh ' + key)
end
def regen_system_ssh_key
SystemUtils.run_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/regen_private engines@172.17.42.1 /opt/engines/bin/regen_private.sh ')
end
def system_update_status
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/deb_update_status engines@172.17.42.1 /opt/engines/bin/deb_update_status.sh')
end
def restart_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/restart_system engines@172.17.42.1 /opt/engines/bin/restart_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system engines@172.17.42.1 /opt/engines/bin/update_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
end
def update_engines_system_software
result = SystemUtils.execute_command('sudo /opt/engines/scripts/_update_engines_system_software.sh ')
if result[:result] == -1
@last_error = result[:stderr]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
# FIXME: The following carp was added to support gui debug please remove all rails references once gui is sorted
if Rails.env.production?
if result[:stdout].include?('Already up-to-date') && File.exist?('/opt/engines/run/system/flags/test_engines_update') == false
@last_error = result[:stdout]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
end
res = Thread.new { SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_engines_system_software engines@172.17.42.1 /opt/engines/bin/update_engines_system_software.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
@last_error = result[:stdout]
return true if res.status == 'run'
return false
end
# FIXME: Kludge should read from network namespace /proc ?
def get_container_network_metrics(container_name)
ret_val = {}
clear_error
def error_result
ret_val = {}
ret_val[:in] = 'n/a'
ret_val[:out] = 'n/a'
return ret_val
end
commandargs = 'docker exec ' + container_name + " netstat --interfaces -e | grep bytes |head -1 | awk '{ print $2 \" \" $6}' 2>&1"
p :netstat_cmd
p commandargs
result = SystemUtils.execute_command(commandargs)
if result[:result] != 0
ret_val = error_result
else
res = result[:stdout]
vals = res.split('bytes:')
if vals.count > 2
if vals[1].nil? == false && vals[2].nil? == false
ret_val[:in] = vals[1].chop
ret_val[:out] = vals[2].chop
else
ret_val = error_result
end
else
ret_val = error_result
end
return ret_val
end
rescue StandardError => e
log_exception(e)
return error_result
end
def api_shutdown
File.delete(SystemConfig.BuildRunningParamsFile) if File.exist?(SystemConfig.BuildRunningParamsFile)
end
end
|
class SystemApi < ErrorsApi
def initialize(api)
@engines_api = api
end
def create_container(container)
clear_error
cid = read_container_id(container)
container.container_id = cid
stateDir = container_state_dir(container)
if File.directory?(stateDir) == false
Dir.mkdir(stateDir)
if Dir.exist?(stateDir + '/run') == false
Dir.mkdir(stateDir + '/run')
Dir.mkdir(stateDir + '/run/flags')
end
FileUtils.chown_R(nil, 'containers', stateDir + '/run')
FileUtils.chmod_R('u+r', stateDir + '/run')
end
log_dir = container_log_dir(container)
Dir.mkdir(log_dir) if File.directory?(log_dir) == false
if container.is_service?
Dir.mkdir(stateDir + '/configurations/') if File.directory?(stateDir + '/configurations') == false
Dir.mkdir(stateDir + '/configurations/default') if File.directory?(stateDir + '/configurations/default') == false
end
return save_container(container)
rescue StandardError => e
container.last_error = 'Failed To Create ' + e.to_s
SystemUtils.log_exception(e)
end
def clear_cid(container)
container.container_id = -1
end
def is_startup_complete(container)
clear_error
return File.exist?(container_state_dir(container) + '/run/flags/startup_complete')
rescue StandardError => e
SystemUtils.log_exception(e)
end
def clear_cid_file container
clear_error
cidfile = container_cid_file(container)
File.delete(cidfile) if File.exist?(cidfile)
clear_cid(container)
return true
rescue StandardError => e
container.last_error = 'Failed To remove cid file' + e.to_s
SystemUtils.log_exception(e)
end
def read_container_id(container)
clear_error
cidfile = container_cid_file(container)
return File.read(cidfile) if File.exist?(cidfile)
rescue StandardError => e
SystemUtils.log_exception(e)
return '-1'
end
def destroy_container container
clear_error
container.container_id = -1
return File.delete(container_cid_file(container)) if File.exist?(container_cid_file(container))
return true # File may or may not exist
rescue StandardError => e
container.last_error = 'Failed To delete cid ' + e.to_s
SystemUtils.log_exception(e)
end
def delete_container_configs(container)
clear_error
cidfile = SystemConfig.CidDir + '/' + container.container_name + '.cid'
File.delete(cidfile) if File.exist?(cidfile)
cmd = 'docker rm volbuilder'
retval = SystemUtils.run_system(cmd)
cmd = 'docker run --name volbuilder --memory=20m -e fw_user=www-data -v /opt/engines/run/containers/' + container.container_name + '/:/client/state:rw -v /var/log/engines/containers/' + container.container_name + ':/client/log:rw -t engines/volbuilder:' + SystemUtils.system_release + ' /home/remove_container.sh state logs'
retval = SystemUtils.run_system(cmd)
cmd = 'docker rm volbuilder'
retval = SystemUtils.run_system(cmd)
if retval == true
FileUtils.rm_rf(container_state_dir(container))
return true
else
container.last_error = 'Failed to Delete state and logs:' + retval.to_s
log_error_mesg('Failed to Delete state and logs:' + retval.to_s, container)
end
rescue StandardError => e
container.last_error = 'Failed To Delete '
log_exception(e)
end
# def get_cert_name(fqdn)
# if File.exists?(SystemConfig.NginxCertDir + '/' + fqdn + '.crt')
# return fqdn
# else
# return SystemConfig.NginxDefaultCert
# end
# end
def get_build_report(engine_name)
clear_error
stateDir = SystemConfig.RunDir + '/containers/' + engine_name
return File.read(stateDir + '/buildreport.txt') if File.exist?(stateDir + '/buildreport.txt')
return 'Build Not Successful'
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_build_report(container, build_report)
clear_error
stateDir = container_state_dir(container)
f = File.new(stateDir + '/buildreport.txt', File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(build_report)
f.close
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_container(container)
clear_error
# FIXME:
api = container.container_api
container.container_api = nil
last_result = container.last_result
last_error = container.last_error
# save_last_result_and_error(container)
container.last_result = ''
container.last_error = ''
serialized_object = YAML::dump(container)
container.container_api = api
container.last_result = last_result
container.last_error = last_error
stateDir = container_state_dir(container)
FileUtils.mkdir_p(stateDir) if Dir.exist?(stateDir) == false
statefile = stateDir + '/running.yaml'
# BACKUP Current file with rename
if File.exist?(statefile)
statefile_bak = statefile + '.bak'
File.rename(statefile, statefile_bak)
end
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(serialized_object)
f.close
return true
rescue StandardError => e
container.last_error = last_error
# FIXME: Need to rename back if failure
SystemUtils.log_exception(e)
end
def save_blueprint(blueprint, container)
clear_error
if blueprint.nil? == false
puts blueprint.to_s
else
return false
end
stateDir = container_state_dir(container)
Dir.mkdir(stateDir) if File.directory?(stateDir) == false
statefile = stateDir + '/blueprint.json'
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.write(blueprint.to_json)
f.close
rescue StandardError => e
SystemUtils.log_exception(e)
end
def load_blueprint(container)
clear_error
stateDir = container_state_dir(container)
return false if File.directory?(stateDir) == false
statefile = stateDir + '/blueprint.json'
if File.exist?(statefile)
f = File.new(statefile, 'r')
blueprint = JSON.parse(f.read)
f.close
else
return false
end
return blueprint
rescue StandardError => e
SystemUtils.log_exception(e)
end
def get_container_memory_stats(container)
clear_error
ret_val = {}
if container && container.container_id.nil? || container.container_id == '-1'
container_id = read_container_id(container)
container.container_id = container_id
end
if container && container.container_id.nil? == false && container.container_id != '-1'
# path = '/sys/fs/cgroup/memory/docker/' + container.container_id.to_s + '/'
path = SystemUtils.cgroup_mem_dir(container.container_id.to_s)
if Dir.exist?(path)
ret_val.store(:maximum, File.read(path + '/memory.max_usage_in_bytes'))
ret_val.store(:current, File.read(path + '/memory.usage_in_bytes'))
ret_val.store(:limit, File.read(path + '/memory.limit_in_bytes'))
else
p :no_cgroup_file
p path
ret_val.store(:maximum, 'No Container')
ret_val.store(:current, 'No Container')
ret_val.store(:limit, 'No Container')
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
ret_val.store(:maximum, e.to_s)
ret_val.store(:current, 'NA')
ret_val.store(:limit, 'NA')
return ret_val
end
def set_engine_network_properties(engine, params)
clear_error
return set_engine_hostname_details(engine, params) if set_engine_web_protocol_properties(engine, params)
return false
end
def set_engine_web_protocol_properties(engine, params)
clear_error
# engine_name = params[:engine_name]
protocol = params[:http_protocol]
if protocol.nil?
p params
return false
end
SystemUtils.debug_output('Changing protocol to _', protocol)
if protocol.include?('HTTPS only')
engine.enable_https_only
elsif protocol.include?('HTTP only')
engine.enable_http_only
elsif protocol.include?('HTTPS and HTTP')
engine.enable_http_and_https
end
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def set_engine_hostname_details(container, params)
clear_error
# engine_name = params[:engine_name]
hostname = params[:host_name]
domain_name = params[:domain_name]
SystemUtils.debug_output('Changing Domainame to ', domain_name)
# saved_hostName = container.hostname
# saved_domainName = container.domain_name
SystemUtils.debug_output('Changing Domainame to ', domain_name)
container.remove_nginx_service
container.set_hostname_details(hostname, domain_name)
save_container(container)
container.add_nginx_service
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def getManagedEngines
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
if File.exist?(yfn) == true
managed_engine = loadManagedEngine(contdir)
if managed_engine.is_a?(ManagedEngine)
ret_val.push(managed_engine)
else
log_error_mesg('failed to load ', yfn)
end
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
end
def loadManagedEngine(engine_name)
if engine_name.nil? || engine_name.length == 0
@last_error = 'No Engine Name'
return false
end
yam_file_name = SystemConfig.RunDir + '/containers/' + engine_name + '/running.yaml'
if File.exist?(yam_file_name) == false
log_error_mesg('no such file ', yam_file_name)
return false # return failed(yam_file_name,'No such configuration:','Load Engine')
end
yaml_file = File.read(yam_file_name)
managed_engine = ManagedEngine.from_yaml(yaml_file, @engines_api.container_api)
return false if managed_engine.nil? || managed_engine == false
return managed_engine
rescue StandardError => e
if engine_name.nil? == false
if managed_engine.nil? == false
managed_engine.last_error = 'Failed To get Managed Engine ' + engine_name + ' ' + e.to_s
log_error_mesg(managed_engine.last_error,e)
end
else
log_error_mesg('nil Engine Name',engine_name)
end
log_exception(e)
end
def build_running_service(service_name, service_type_dir)
config_template_file_name = service_type_dir + service_name + '/config.yaml'
if File.exist?(config_template_file_name) == false
log_error_mesg('Running exits',service_name)
return false
end
config_template = File.read(config_template_file_name)
system_access = SystemAccess.new
templator = Templater.new(system_access, nil)
running_config = templator.process_templated_string(config_template)
yam1_file_name = service_type_dir + service_name + '/running.yaml'
yaml_file = File.new(yam1_file_name, 'w+')
yaml_file.write(running_config)
yaml_file.close
return true
end
def loadSystemService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/system_services/')
end
def loadManagedService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/services/')
end
def _loadManagedService(service_name, service_type_dir)
if service_name.nil? || service_name.length == 0
@last_error = 'No Service Name'
return false
end
yam1_file_name = service_type_dir + service_name + '/running.yaml'
if File.exist?(yam1_file_name) == false
if build_running_service(service_name, service_type_dir) == false
log_error_mesg('No build_running_service file ', service_type_dir + '/' + service_name.to_s)
return false # return failed(yam_file_name,'No such configuration:','Load Service')
end
end
yaml_file = File.read(yam1_file_name)
# managed_service = YAML::load( yaml_file)
if service_type_dir == '/sytem_services/'
managed_service = SystemService.from_yaml(yaml_file, @engines_api.service_api)
else
managed_service = ManagedService.from_yaml(yaml_file, @engines_api.service_api)
end
if managed_service.nil?
p :load_managed_servic_failed
log_error_mesg('load_managed_servic_failed loading:', yam1_file_name.to_s + ' service name: ' + service_name.to_s)
return false # return EnginsOSapiResult.failed(yam_file_name,'Fail to Load configuration:','Load Service')
end
return managed_service
rescue StandardError => e
if service_name.nil? == false
if managed_service.nil? == false
managed_service.last_error = ('Failed To get Managed Engine ' + service_name.to_s + ' ' + e.to_s)
log_exception(e)
end
else
log_error_mesg('nil Service Name',service_name)
end
log_exception(e)
end
def getManagedServices
begin
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
if File.exist?(yfn) == true
managed_service = loadManagedService(contdir)
ret_val.push(managed_service) if managed_service
end
end
return ret_val
rescue StandardError => e
log_exception(e)
end
end
def list_managed_engines
clear_error
ret_val = []
begin
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
ret_val.push(contdir) if File.exist?(yfn) == true
end
rescue StandardError => e
log_exception(e)
return ret_val
end
return ret_val
end
def list_managed_services
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
ret_val.push(contdir) if File.exist?(yfn) == true
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def clear_container_var_run(container)
clear_error
File.unlink(container_state_dir(container) + '/startup_complete') if File.exist?(container_state_dir(container) + '/startup_complete')
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def generate_engines_user_ssh_key
newkey = SystemUtils.run_command(SystemConfig.generate_ssh_private_keyfile)
if newkey.start_with?('-----BEGIN RSA PRIVATE KEY-----') == false
@last_error = newkey
return false
end
return newkey
rescue StandardError => e
SystemUtils.log_exception(e)
end
def update_public_key(key)
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_access_system_pub engines@172.17.42.1 /opt/engines/bin/update_access_system_pub.sh ' + key)
end
def regen_system_ssh_key
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_access_system_pub engines@172.17.42.1 /opt/engines/bin/regen_private.sh ')
end
def container_state_dir(container)
SystemConfig.RunDir + '/' + container.ctype + 's/' + container.container_name
end
def system_update_status
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/deb_update_status engines@172.17.42.1 /opt/engines/bin/deb_update_status.sh')
end
def restart_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/restart_system engines@172.17.42.1 /opt/engines/bin/restart_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system engines@172.17.42.1 /opt/engines/bin/update_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_engines_system_software
result = SystemUtils.execute_command('sudo /opt/engines/scripts/_update_engines_system_software.sh ')
if result[:result] == -1
@last_error = result[:stderr]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
if result[:stdout].include?('Already up-to-date')
@last_error = result[:stdout]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
res = Thread.new { SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_engines_system_software engines@172.17.42.1 /opt/engines/bin/update_engines_system_software.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
@last_error = result[:stdout]
return true if res.status == 'run'
return false
end
def update_domain(params)
old_domain_name = params[:original_domain_name]
return false if DNSHosting.update_domain(old_domain_name, params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:original_domain_name]
service_hash[:service_handle] = params[:original_domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
@engines_api.dettach_service(service_hash)
# @engines_api.deregister_non_persistant_service(service_hash)
# @engines_api.delete_service_from_engine_registry(service_hash)
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
if params[:internal_only]
ip = DNSHosting.get_local_ip
else
ip = open('http://jsonip.com/') { |s| JSON::parse(s.string)['ip'] }
end
service_hash[:variables][:ip] = ip
if @engines_api.attach_service(service_hash) == true
@engines_api.register_non_persistant_service(service_hash)
return true
end
return false
rescue StandardError => e
SystemUtils.log_exception(e)
end
def add_domain(params)
return false if DNSHosting.add_domain(params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
if params[:internal_only]
ip = DNSHosting.get_local_ip
else
ip = open('http://jsonip.com/') { |s| JSON::parse(s.string)['ip'] }
end
service_hash[:variables][:ip] = ip
return @engines_api.register_non_persistant_service(service_hash) if @engines_api.attach_service(service_hash) == true
return false
rescue StandardError => e
log_error_mesg('Add self hosted domain exception', params.to_s)
log_exception(e)
end
# FIXME: Kludge should read from network namespace /proc ?
def get_container_network_metrics(container_name)
ret_val = {}
clear_error
def error_result
ret_val = {}
ret_val[:in] = 'n/a'
ret_val[:out] = 'n/a'
return ret_val
end
commandargs = 'docker exec ' + container_name + " netstat --interfaces -e | grep bytes |head -1 | awk '{ print $2 \' \' $6}' 2>&1"
result = SystemUtils.execute_command(commandargs)
if result[:result] != 0
ret_val = error_result
else
res = result[:stdout]
vals = res.split('bytes:')
if vals.count > 2
if vals[1].nil? == false && vals[2].nil? == false
ret_val[:in] = vals[1].chop
ret_val[:out] = vals[2].chop
else
ret_val = error_result
end
else
ret_val = error_result
end
return ret_val
end
rescue StandardError => e
log_exception(e)
return error_result
end
def remove_domain(params)
return false if DNSHosting.rm_domain(params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
if @engines_api.dettach_service(service_hash) == true
@engines_api.deregister_non_persistant_service(service_hash)
@engines_api.delete_service_from_engine_registry(service_hash)
return true
end
return false
rescue StandardError => e
log_exception(e)
return false
end
def list_domains
return DNSHosting.list_domains
rescue StandardError => e
return log_exception(e)
end
protected
def container_cid_file(container)
SystemConfig.CidDir + '/' + container.container_name + '.cid'
end
def container_log_dir(container)
SystemConfig.SystemLogRoot + '/' + container.ctype + 's/' + container.container_name
end
def run_system(cmd)
clear_error
begin
cmd += ' 2>&1'
res = (%x<#{cmd}>)
SystemUtils.debug_output('run System', res)
# FIXME: should be case insensitive The last one is a pure kludge
# really need to get stderr and stdout separately
return true if $CHILD_STATUS == 0 && res.downcase.include?('error') == false && res.downcase.include?('fail') == false && res.downcase.include?('could not resolve hostname') == false && res.downcase.include?('unsuccessful') == false
return res
rescue StandardError => e
log_exception(e)
return res
end
end
end
call service manger to register services
class SystemApi < ErrorsApi
def initialize(api)
@engines_api = api
end
def create_container(container)
clear_error
cid = read_container_id(container)
container.container_id = cid
stateDir = container_state_dir(container)
if File.directory?(stateDir) == false
Dir.mkdir(stateDir)
if Dir.exist?(stateDir + '/run') == false
Dir.mkdir(stateDir + '/run')
Dir.mkdir(stateDir + '/run/flags')
end
FileUtils.chown_R(nil, 'containers', stateDir + '/run')
FileUtils.chmod_R('u+r', stateDir + '/run')
end
log_dir = container_log_dir(container)
Dir.mkdir(log_dir) if File.directory?(log_dir) == false
if container.is_service?
Dir.mkdir(stateDir + '/configurations/') if File.directory?(stateDir + '/configurations') == false
Dir.mkdir(stateDir + '/configurations/default') if File.directory?(stateDir + '/configurations/default') == false
end
return save_container(container)
rescue StandardError => e
container.last_error = 'Failed To Create ' + e.to_s
SystemUtils.log_exception(e)
end
def clear_cid(container)
container.container_id = -1
end
def is_startup_complete(container)
clear_error
return File.exist?(container_state_dir(container) + '/run/flags/startup_complete')
rescue StandardError => e
SystemUtils.log_exception(e)
end
def clear_cid_file container
clear_error
cidfile = container_cid_file(container)
File.delete(cidfile) if File.exist?(cidfile)
clear_cid(container)
return true
rescue StandardError => e
container.last_error = 'Failed To remove cid file' + e.to_s
SystemUtils.log_exception(e)
end
def read_container_id(container)
clear_error
cidfile = container_cid_file(container)
return File.read(cidfile) if File.exist?(cidfile)
rescue StandardError => e
SystemUtils.log_exception(e)
return '-1'
end
def destroy_container container
clear_error
container.container_id = -1
return File.delete(container_cid_file(container)) if File.exist?(container_cid_file(container))
return true # File may or may not exist
rescue StandardError => e
container.last_error = 'Failed To delete cid ' + e.to_s
SystemUtils.log_exception(e)
end
def delete_container_configs(container)
clear_error
cidfile = SystemConfig.CidDir + '/' + container.container_name + '.cid'
File.delete(cidfile) if File.exist?(cidfile)
cmd = 'docker rm volbuilder'
retval = SystemUtils.run_system(cmd)
cmd = 'docker run --name volbuilder --memory=20m -e fw_user=www-data -v /opt/engines/run/containers/' + container.container_name + '/:/client/state:rw -v /var/log/engines/containers/' + container.container_name + ':/client/log:rw -t engines/volbuilder:' + SystemUtils.system_release + ' /home/remove_container.sh state logs'
retval = SystemUtils.run_system(cmd)
cmd = 'docker rm volbuilder'
retval = SystemUtils.run_system(cmd)
if retval == true
FileUtils.rm_rf(container_state_dir(container))
return true
else
container.last_error = 'Failed to Delete state and logs:' + retval.to_s
log_error_mesg('Failed to Delete state and logs:' + retval.to_s, container)
end
rescue StandardError => e
container.last_error = 'Failed To Delete '
log_exception(e)
end
# def get_cert_name(fqdn)
# if File.exists?(SystemConfig.NginxCertDir + '/' + fqdn + '.crt')
# return fqdn
# else
# return SystemConfig.NginxDefaultCert
# end
# end
def get_build_report(engine_name)
clear_error
stateDir = SystemConfig.RunDir + '/containers/' + engine_name
return File.read(stateDir + '/buildreport.txt') if File.exist?(stateDir + '/buildreport.txt')
return 'Build Not Successful'
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_build_report(container, build_report)
clear_error
stateDir = container_state_dir(container)
f = File.new(stateDir + '/buildreport.txt', File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(build_report)
f.close
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def save_container(container)
clear_error
# FIXME:
api = container.container_api
container.container_api = nil
last_result = container.last_result
last_error = container.last_error
# save_last_result_and_error(container)
container.last_result = ''
container.last_error = ''
serialized_object = YAML::dump(container)
container.container_api = api
container.last_result = last_result
container.last_error = last_error
stateDir = container_state_dir(container)
FileUtils.mkdir_p(stateDir) if Dir.exist?(stateDir) == false
statefile = stateDir + '/running.yaml'
# BACKUP Current file with rename
if File.exist?(statefile)
statefile_bak = statefile + '.bak'
File.rename(statefile, statefile_bak)
end
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.puts(serialized_object)
f.close
return true
rescue StandardError => e
container.last_error = last_error
# FIXME: Need to rename back if failure
SystemUtils.log_exception(e)
end
def save_blueprint(blueprint, container)
clear_error
if blueprint.nil? == false
puts blueprint.to_s
else
return false
end
stateDir = container_state_dir(container)
Dir.mkdir(stateDir) if File.directory?(stateDir) == false
statefile = stateDir + '/blueprint.json'
f = File.new(statefile, File::CREAT | File::TRUNC | File::RDWR, 0644)
f.write(blueprint.to_json)
f.close
rescue StandardError => e
SystemUtils.log_exception(e)
end
def load_blueprint(container)
clear_error
stateDir = container_state_dir(container)
return false if File.directory?(stateDir) == false
statefile = stateDir + '/blueprint.json'
if File.exist?(statefile)
f = File.new(statefile, 'r')
blueprint = JSON.parse(f.read)
f.close
else
return false
end
return blueprint
rescue StandardError => e
SystemUtils.log_exception(e)
end
def get_container_memory_stats(container)
clear_error
ret_val = {}
if container && container.container_id.nil? || container.container_id == '-1'
container_id = read_container_id(container)
container.container_id = container_id
end
if container && container.container_id.nil? == false && container.container_id != '-1'
# path = '/sys/fs/cgroup/memory/docker/' + container.container_id.to_s + '/'
path = SystemUtils.cgroup_mem_dir(container.container_id.to_s)
if Dir.exist?(path)
ret_val.store(:maximum, File.read(path + '/memory.max_usage_in_bytes'))
ret_val.store(:current, File.read(path + '/memory.usage_in_bytes'))
ret_val.store(:limit, File.read(path + '/memory.limit_in_bytes'))
else
p :no_cgroup_file
p path
ret_val.store(:maximum, 'No Container')
ret_val.store(:current, 'No Container')
ret_val.store(:limit, 'No Container')
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
ret_val.store(:maximum, e.to_s)
ret_val.store(:current, 'NA')
ret_val.store(:limit, 'NA')
return ret_val
end
def set_engine_network_properties(engine, params)
clear_error
return set_engine_hostname_details(engine, params) if set_engine_web_protocol_properties(engine, params)
return false
end
def set_engine_web_protocol_properties(engine, params)
clear_error
# engine_name = params[:engine_name]
protocol = params[:http_protocol]
if protocol.nil?
p params
return false
end
SystemUtils.debug_output('Changing protocol to _', protocol)
if protocol.include?('HTTPS only')
engine.enable_https_only
elsif protocol.include?('HTTP only')
engine.enable_http_only
elsif protocol.include?('HTTPS and HTTP')
engine.enable_http_and_https
end
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def set_engine_hostname_details(container, params)
clear_error
# engine_name = params[:engine_name]
hostname = params[:host_name]
domain_name = params[:domain_name]
SystemUtils.debug_output('Changing Domainame to ', domain_name)
# saved_hostName = container.hostname
# saved_domainName = container.domain_name
SystemUtils.debug_output('Changing Domainame to ', domain_name)
container.remove_nginx_service
container.set_hostname_details(hostname, domain_name)
save_container(container)
container.add_nginx_service
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def getManagedEngines
ret_val = []
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
if File.exist?(yfn) == true
managed_engine = loadManagedEngine(contdir)
if managed_engine.is_a?(ManagedEngine)
ret_val.push(managed_engine)
else
log_error_mesg('failed to load ', yfn)
end
end
end
return ret_val
rescue StandardError => e
SystemUtils.log_exception(e)
end
def loadManagedEngine(engine_name)
if engine_name.nil? || engine_name.length == 0
@last_error = 'No Engine Name'
return false
end
yam_file_name = SystemConfig.RunDir + '/containers/' + engine_name + '/running.yaml'
if File.exist?(yam_file_name) == false
log_error_mesg('no such file ', yam_file_name)
return false # return failed(yam_file_name,'No such configuration:','Load Engine')
end
yaml_file = File.read(yam_file_name)
managed_engine = ManagedEngine.from_yaml(yaml_file, @engines_api.container_api)
return false if managed_engine.nil? || managed_engine == false
return managed_engine
rescue StandardError => e
if engine_name.nil? == false
if managed_engine.nil? == false
managed_engine.last_error = 'Failed To get Managed Engine ' + engine_name + ' ' + e.to_s
log_error_mesg(managed_engine.last_error,e)
end
else
log_error_mesg('nil Engine Name',engine_name)
end
log_exception(e)
end
def build_running_service(service_name, service_type_dir)
config_template_file_name = service_type_dir + service_name + '/config.yaml'
if File.exist?(config_template_file_name) == false
log_error_mesg('Running exits',service_name)
return false
end
config_template = File.read(config_template_file_name)
system_access = SystemAccess.new
templator = Templater.new(system_access, nil)
running_config = templator.process_templated_string(config_template)
yam1_file_name = service_type_dir + service_name + '/running.yaml'
yaml_file = File.new(yam1_file_name, 'w+')
yaml_file.write(running_config)
yaml_file.close
return true
end
def loadSystemService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/system_services/')
end
def loadManagedService(service_name)
_loadManagedService(service_name, SystemConfig.RunDir + '/services/')
end
def _loadManagedService(service_name, service_type_dir)
if service_name.nil? || service_name.length == 0
@last_error = 'No Service Name'
return false
end
yam1_file_name = service_type_dir + service_name + '/running.yaml'
if File.exist?(yam1_file_name) == false
if build_running_service(service_name, service_type_dir) == false
log_error_mesg('No build_running_service file ', service_type_dir + '/' + service_name.to_s)
return false # return failed(yam_file_name,'No such configuration:','Load Service')
end
end
yaml_file = File.read(yam1_file_name)
# managed_service = YAML::load( yaml_file)
if service_type_dir == '/sytem_services/'
managed_service = SystemService.from_yaml(yaml_file, @engines_api.service_api)
else
managed_service = ManagedService.from_yaml(yaml_file, @engines_api.service_api)
end
if managed_service.nil?
p :load_managed_servic_failed
log_error_mesg('load_managed_servic_failed loading:', yam1_file_name.to_s + ' service name: ' + service_name.to_s)
return false # return EnginsOSapiResult.failed(yam_file_name,'Fail to Load configuration:','Load Service')
end
return managed_service
rescue StandardError => e
if service_name.nil? == false
if managed_service.nil? == false
managed_service.last_error = ('Failed To get Managed Engine ' + service_name.to_s + ' ' + e.to_s)
log_exception(e)
end
else
log_error_mesg('nil Service Name',service_name)
end
log_exception(e)
end
def getManagedServices
begin
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
if File.exist?(yfn) == true
managed_service = loadManagedService(contdir)
ret_val.push(managed_service) if managed_service
end
end
return ret_val
rescue StandardError => e
log_exception(e)
end
end
def list_managed_engines
clear_error
ret_val = []
begin
Dir.entries(SystemConfig.RunDir + '/containers/').each do |contdir|
yfn = SystemConfig.RunDir + '/containers/' + contdir + '/running.yaml'
ret_val.push(contdir) if File.exist?(yfn) == true
end
rescue StandardError => e
log_exception(e)
return ret_val
end
return ret_val
end
def list_managed_services
clear_error
ret_val = []
Dir.entries(SystemConfig.RunDir + '/services/').each do |contdir|
yfn = SystemConfig.RunDir + '/services/' + contdir + '/config.yaml'
ret_val.push(contdir) if File.exist?(yfn) == true
end
return ret_val
rescue StandardError => e
log_exception(e)
return ret_val
end
def clear_container_var_run(container)
clear_error
File.unlink(container_state_dir(container) + '/startup_complete') if File.exist?(container_state_dir(container) + '/startup_complete')
return true
rescue StandardError => e
SystemUtils.log_exception(e)
end
def generate_engines_user_ssh_key
newkey = SystemUtils.run_command(SystemConfig.generate_ssh_private_keyfile)
if newkey.start_with?('-----BEGIN RSA PRIVATE KEY-----') == false
@last_error = newkey
return false
end
return newkey
rescue StandardError => e
SystemUtils.log_exception(e)
end
def update_public_key(key)
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_access_system_pub engines@172.17.42.1 /opt/engines/bin/update_access_system_pub.sh ' + key)
end
def regen_system_ssh_key
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_access_system_pub engines@172.17.42.1 /opt/engines/bin/regen_private.sh ')
end
def container_state_dir(container)
SystemConfig.RunDir + '/' + container.ctype + 's/' + container.container_name
end
def system_update_status
SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/deb_update_status engines@172.17.42.1 /opt/engines/bin/deb_update_status.sh')
end
def restart_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/restart_system engines@172.17.42.1 /opt/engines/bin/restart_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_system
res = Thread.new { system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_system engines@172.17.42.1 /opt/engines/bin/update_system.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
return true if res.status == 'run'
return false
end
def update_engines_system_software
result = SystemUtils.execute_command('sudo /opt/engines/scripts/_update_engines_system_software.sh ')
if result[:result] == -1
@last_error = result[:stderr]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
if result[:stdout].include?('Already up-to-date')
@last_error = result[:stdout]
FileUtils.rm_f(SystemConfig.EnginesSystemUpdatingFlag)
return false
end
res = Thread.new { SystemUtils.execute_command('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/engines/.ssh/mgmt/update_engines_system_software engines@172.17.42.1 /opt/engines/bin/update_engines_system_software.sh') }
# FIXME: check a status flag after sudo side post ssh run ie when we know it's definititly happenging
@last_error = result[:stdout]
return true if res.status == 'run'
return false
end
def update_domain(params)
old_domain_name = params[:original_domain_name]
return false if DNSHosting.update_domain(old_domain_name, params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:original_domain_name]
service_hash[:service_handle] = params[:original_domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
@engines_api.dettach_service(service_hash)
# @engines_api.deregister_non_persistant_service(service_hash)
# @engines_api.delete_service_from_engine_registry(service_hash)
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
if params[:internal_only]
ip = DNSHosting.get_local_ip
else
ip = open('http://jsonip.com/') { |s| JSON::parse(s.string)['ip'] }
end
service_hash[:variables][:ip] = ip
if @engines_api.attach_service(service_hash) == true
@engines_api.register_non_persistant_service(service_hash)
return true
end
return false
rescue StandardError => e
SystemUtils.log_exception(e)
end
def add_domain(params)
return false if DNSHosting.add_domain(params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
if params[:internal_only]
ip = DNSHosting.get_local_ip
else
ip = open('http://jsonip.com/') { |s| JSON::parse(s.string)['ip'] }
end
service_hash[:variables][:ip] = ip
return @engines_api.service_manager.register_non_persistant_service(service_hash) if @engines_api.attach_service(service_hash) == true
return false
rescue StandardError => e
log_error_mesg('Add self hosted domain exception', params.to_s)
log_exception(e)
end
# FIXME: Kludge should read from network namespace /proc ?
def get_container_network_metrics(container_name)
ret_val = {}
clear_error
def error_result
ret_val = {}
ret_val[:in] = 'n/a'
ret_val[:out] = 'n/a'
return ret_val
end
commandargs = 'docker exec ' + container_name + " netstat --interfaces -e | grep bytes |head -1 | awk '{ print $2 \' \' $6}' 2>&1"
result = SystemUtils.execute_command(commandargs)
if result[:result] != 0
ret_val = error_result
else
res = result[:stdout]
vals = res.split('bytes:')
if vals.count > 2
if vals[1].nil? == false && vals[2].nil? == false
ret_val[:in] = vals[1].chop
ret_val[:out] = vals[2].chop
else
ret_val = error_result
end
else
ret_val = error_result
end
return ret_val
end
rescue StandardError => e
log_exception(e)
return error_result
end
def remove_domain(params)
return false if DNSHosting.rm_domain(params) == false
return true if params[:self_hosted] == false
service_hash = {}
service_hash[:parent_engine] = 'system'
service_hash[:variables] = {}
service_hash[:variables][:domainname] = params[:domain_name]
service_hash[:service_handle] = params[:domain_name] + '_dns'
service_hash[:container_type] = 'system'
service_hash[:publisher_namespace] = 'EnginesSystem'
service_hash[:type_path] = 'dns'
if @engines_api.dettach_service(service_hash) == true
@engines_api.deregister_non_persistant_service(service_hash)
@engines_api.delete_service_from_engine_registry(service_hash)
return true
end
return false
rescue StandardError => e
log_exception(e)
return false
end
def list_domains
return DNSHosting.list_domains
rescue StandardError => e
return log_exception(e)
end
protected
def container_cid_file(container)
SystemConfig.CidDir + '/' + container.container_name + '.cid'
end
def container_log_dir(container)
SystemConfig.SystemLogRoot + '/' + container.ctype + 's/' + container.container_name
end
def run_system(cmd)
clear_error
begin
cmd += ' 2>&1'
res = (%x<#{cmd}>)
SystemUtils.debug_output('run System', res)
# FIXME: should be case insensitive The last one is a pure kludge
# really need to get stderr and stdout separately
return true if $CHILD_STATUS == 0 && res.downcase.include?('error') == false && res.downcase.include?('fail') == false && res.downcase.include?('could not resolve hostname') == false && res.downcase.include?('unsuccessful') == false
return res
rescue StandardError => e
log_exception(e)
return res
end
end
end
|
# frozen_string_literal: true
require 'discordrb/bot'
require 'discordrb/data'
require 'discordrb/commands/parser'
require 'discordrb/commands/events'
require 'discordrb/commands/container'
require 'discordrb/commands/rate_limiter'
# Specialized bot to run commands
module Discordrb::Commands
# Bot that supports commands and command chains
class CommandBot < Discordrb::Bot
# @return [Hash] this bot's attributes.
attr_reader :attributes
# @return [String] the prefix commands are triggered with.
attr_reader :prefix
include CommandContainer
# Creates a new CommandBot and logs in to Discord.
# @param attributes [Hash] The attributes to initialize the CommandBot with.
# @see {Discordrb::Bot#initialize} for other attributes that should be used to create the underlying regular bot.
# @option attributes [String] :prefix The prefix that should trigger this bot's commands. Can be any string (including the empty
# string), but note that it will be literal - if the prefix is "hi" then the corresponding trigger string for
# a command called "test" would be "hitest". Don't forget to put spaces in if you need them!
# @option attributes [true, false] :advanced_functionality Whether to enable advanced functionality (very powerful
# way to nest commands into chains, see https://github.com/meew0/discordrb/wiki/Commands#command-chain-syntax
# for info. Default is true.
# @option attributes [Symbol, Array<Symbol>, false] :help_command The name of the command that displays info for
# other commands. Use an array if you want to have aliases. Default is "help". If none should be created, use
# `false` as the value.
# @option attributes [String] :command_doesnt_exist_message The message that should be displayed if a user attempts
# to use a command that does not exist. If none is specified, no message will be displayed. In the message, you
# can use the string '%command%' that will be replaced with the name of the command.
# @option attributes [true, false] :spaces_allowed Whether spaces are allowed to occur between the prefix and the
# command. Default is false.
# @option attributes [String] :previous Character that should designate the result of the previous command in
# a command chain (see :advanced_functionality). Default is '~'.
# @option attributes [String] :chain_delimiter Character that should designate that a new command begins in the
# command chain (see :advanced_functionality). Default is '>'.
# @option attributes [String] :chain_args_delim Character that should separate the command chain arguments from the
# chain itself (see :advanced_functionality). Default is ':'.
# @option attributes [String] :sub_chain_start Character that should start a sub-chain (see
# :advanced_functionality). Default is '['.
# @option attributes [String] :sub_chain_end Character that should end a sub-chain (see
# :advanced_functionality). Default is ']'.
# @option attributes [String] :quote_start Character that should start a quoted string (see
# :advanced_functionality). Default is '"'.
# @option attributes [String] :quote_end Character that should end a quoted string (see
# :advanced_functionality). Default is '"'.
def initialize(attributes = {})
super(
email: attributes[:email],
password: attributes[:password],
log_mode: attributes[:log_mode],
token: attributes[:token],
application_id: attributes[:application_id],
type: attributes[:type],
name: attributes[:name],
fancy_log: attributes[:fancy_log],
suppress_ready: attributes[:suppress_ready],
parse_self: attributes[:parse_self])
@prefix = attributes[:prefix]
@attributes = {
# Whether advanced functionality such as command chains are enabled
advanced_functionality: attributes[:advanced_functionality].nil? ? false : attributes[:advanced_functionality],
# The name of the help command (that displays information to other commands). False if none should exist
help_command: (attributes[:help_command].is_a? FalseClass) ? nil : (attributes[:help_command] || :help),
# The message to display for when a command doesn't exist, %command% to get the command name in question and nil for no message
# No default value here because it may not be desired behaviour
command_doesnt_exist_message: attributes[:command_doesnt_exist_message],
# Spaces allowed between prefix and command
spaces_allowed: attributes[:spaces_allowed].nil? ? false : attributes[:spaces_allowed],
# All of the following need to be one character
# String to designate previous result in command chain
previous: attributes[:previous] || '~',
# Command chain delimiter
chain_delimiter: attributes[:chain_delimiter] || '>',
# Chain argument delimiter
chain_args_delim: attributes[:chain_args_delim] || ':',
# Sub-chain starting character
sub_chain_start: attributes[:sub_chain_start] || '[',
# Sub-chain ending character
sub_chain_end: attributes[:sub_chain_end] || ']',
# Quoted mode starting character
quote_start: attributes[:quote_start] || '"',
# Quoted mode ending character
quote_end: attributes[:quote_end] || '"'
}
@permissions = {
roles: {},
users: {}
}
return unless @attributes[:help_command]
command(@attributes[:help_command], max_args: 1, description: 'Shows a list of all the commands available or displays help for a specific command.', usage: 'help [command name]') do |event, command_name|
if command_name
command = @commands[command_name.to_sym]
return "The command `#{command_name}` does not exist!" unless command
desc = command.attributes[:description] || '*No description available*'
usage = command.attributes[:usage]
result = "**`#{command_name}`**: #{desc}"
result += "\nUsage: `#{usage}`" if usage
result
else
available_commands = @commands.values.reject { |c| !c.attributes[:help_available] }
case available_commands.length
when 0..5
available_commands.reduce "**List of commands:**\n" do |memo, c|
memo + "**`#{c.name}`**: #{c.attributes[:description] || '*No description available*'}\n"
end
when 5..50
(available_commands.reduce "**List of commands:**\n" do |memo, c|
memo + "`#{c.name}`, "
end)[0..-3]
else
event.user.pm(available_commands.reduce("**List of commands:**\n") { |a, e| a + "`#{e.name}`, " })[0..-3]
'Sending list in PM!'
end
end
end
end
# Executes a particular command on the bot. Mostly useful for internal stuff, but one can never know.
# @param name [Symbol] The command to execute.
# @param event [CommandEvent] The event to pass to the command.
# @param arguments [Array<String>] The arguments to pass to the command.
# @param chained [true, false] Whether or not it should be executed as part of a command chain. If this is false,
# commands that have chain_usable set to false will not work.
# @return [String, nil] the command's result, if there is any.
def execute_command(name, event, arguments, chained = false)
debug("Executing command #{name} with arguments #{arguments}")
command = @commands[name]
unless command
event.respond @attributes[:command_doesnt_exist_message].gsub('%command%', name.to_s) if @attributes[:command_doesnt_exist_message]
return
end
if permission?(event.user, command.attributes[:permission_level], event.server)
event.command = command
result = command.call(event, arguments, chained)
stringify(result)
else
event.respond command.attributes[:permission_message].gsub('%name%', name.to_s) if command.attributes[:permission_message]
return
end
end
# Executes a command in a simple manner, without command chains or permissions.
# @param chain [String] The command with its arguments separated by spaces.
# @param event [CommandEvent] The event to pass to the command.
# @return [String, nil] the command's result, if there is any.
def simple_execute(chain, event)
return nil if chain.empty?
args = chain.split(' ')
execute_command(args[0].to_sym, event, args[1..-1])
end
# Sets the permission level of a user
# @param id [Integer] the ID of the user whose level to set
# @param level [Integer] the level to set the permission to
def set_user_permission(id, level)
@permissions[:users][id] = level
end
# Sets the permission level of a role - this applies to all users in the role
# @param id [Integer] the ID of the role whose level to set
# @param level [Integer] the level to set the permission to
def set_role_permission(id, level)
@permissions[:roles][id] = level
end
# Check if a user has permission to do something
# @param user [User] The user to check
# @param level [Integer] The minimum permission level the user should have (inclusive)
# @param server [Server] The server on which to check
# @return [true, false] whether or not the user has the given permission
def permission?(user, level, server)
determined_level = server.nil? ? 0 : user.roles.reduce(0) do |memo, role|
[@permissions[:roles][role.id] || 0, memo].max
end
[@permissions[:users][user.id] || 0, determined_level].max >= level
end
private
# Internal handler for MESSAGE_CREATE that is overwritten to allow for command handling
def create_message(data)
message = Discordrb::Message.new(data, self)
return if message.from_bot? && !@should_parse_self
event = CommandEvent.new(message, self)
return unless message.content.start_with? @prefix
chain = message.content[@prefix.length..-1]
# Don't allow spaces between the prefix and the command
if chain.start_with?(' ') && !@attributes[:spaces_allowed]
debug('Chain starts with a space')
return
end
if chain.strip.empty?
debug('Chain is empty')
return
end
execute_chain(chain, event)
end
def required_permissions?(member, required, channel = nil)
required.reduce(true) do |a, action|
a && member.permission?(action, channel)
end
end
def execute_chain(chain, event)
t = Thread.new do
@event_threads << t
Thread.current[:discordrb_name] = "ct-#{@current_thread += 1}"
begin
debug("Parsing command chain #{chain}")
result = (@attributes[:advanced_functionality]) ? CommandChain.new(chain, self).execute(event) : simple_execute(chain, event)
result = event.saved_message + (result || '')
event.respond result unless result.nil? || result.empty?
rescue => e
log_exception(e)
ensure
@event_threads.delete(t)
end
end
end
# Turns the object into a string, using to_s by default
def stringify(object)
return '' if object.is_a? Discordrb::Message
object.to_s
end
end
end
Check for required_permissions? in execute_command
# frozen_string_literal: true
require 'discordrb/bot'
require 'discordrb/data'
require 'discordrb/commands/parser'
require 'discordrb/commands/events'
require 'discordrb/commands/container'
require 'discordrb/commands/rate_limiter'
# Specialized bot to run commands
module Discordrb::Commands
# Bot that supports commands and command chains
class CommandBot < Discordrb::Bot
# @return [Hash] this bot's attributes.
attr_reader :attributes
# @return [String] the prefix commands are triggered with.
attr_reader :prefix
include CommandContainer
# Creates a new CommandBot and logs in to Discord.
# @param attributes [Hash] The attributes to initialize the CommandBot with.
# @see {Discordrb::Bot#initialize} for other attributes that should be used to create the underlying regular bot.
# @option attributes [String] :prefix The prefix that should trigger this bot's commands. Can be any string (including the empty
# string), but note that it will be literal - if the prefix is "hi" then the corresponding trigger string for
# a command called "test" would be "hitest". Don't forget to put spaces in if you need them!
# @option attributes [true, false] :advanced_functionality Whether to enable advanced functionality (very powerful
# way to nest commands into chains, see https://github.com/meew0/discordrb/wiki/Commands#command-chain-syntax
# for info. Default is true.
# @option attributes [Symbol, Array<Symbol>, false] :help_command The name of the command that displays info for
# other commands. Use an array if you want to have aliases. Default is "help". If none should be created, use
# `false` as the value.
# @option attributes [String] :command_doesnt_exist_message The message that should be displayed if a user attempts
# to use a command that does not exist. If none is specified, no message will be displayed. In the message, you
# can use the string '%command%' that will be replaced with the name of the command.
# @option attributes [true, false] :spaces_allowed Whether spaces are allowed to occur between the prefix and the
# command. Default is false.
# @option attributes [String] :previous Character that should designate the result of the previous command in
# a command chain (see :advanced_functionality). Default is '~'.
# @option attributes [String] :chain_delimiter Character that should designate that a new command begins in the
# command chain (see :advanced_functionality). Default is '>'.
# @option attributes [String] :chain_args_delim Character that should separate the command chain arguments from the
# chain itself (see :advanced_functionality). Default is ':'.
# @option attributes [String] :sub_chain_start Character that should start a sub-chain (see
# :advanced_functionality). Default is '['.
# @option attributes [String] :sub_chain_end Character that should end a sub-chain (see
# :advanced_functionality). Default is ']'.
# @option attributes [String] :quote_start Character that should start a quoted string (see
# :advanced_functionality). Default is '"'.
# @option attributes [String] :quote_end Character that should end a quoted string (see
# :advanced_functionality). Default is '"'.
def initialize(attributes = {})
super(
email: attributes[:email],
password: attributes[:password],
log_mode: attributes[:log_mode],
token: attributes[:token],
application_id: attributes[:application_id],
type: attributes[:type],
name: attributes[:name],
fancy_log: attributes[:fancy_log],
suppress_ready: attributes[:suppress_ready],
parse_self: attributes[:parse_self])
@prefix = attributes[:prefix]
@attributes = {
# Whether advanced functionality such as command chains are enabled
advanced_functionality: attributes[:advanced_functionality].nil? ? false : attributes[:advanced_functionality],
# The name of the help command (that displays information to other commands). False if none should exist
help_command: (attributes[:help_command].is_a? FalseClass) ? nil : (attributes[:help_command] || :help),
# The message to display for when a command doesn't exist, %command% to get the command name in question and nil for no message
# No default value here because it may not be desired behaviour
command_doesnt_exist_message: attributes[:command_doesnt_exist_message],
# Spaces allowed between prefix and command
spaces_allowed: attributes[:spaces_allowed].nil? ? false : attributes[:spaces_allowed],
# All of the following need to be one character
# String to designate previous result in command chain
previous: attributes[:previous] || '~',
# Command chain delimiter
chain_delimiter: attributes[:chain_delimiter] || '>',
# Chain argument delimiter
chain_args_delim: attributes[:chain_args_delim] || ':',
# Sub-chain starting character
sub_chain_start: attributes[:sub_chain_start] || '[',
# Sub-chain ending character
sub_chain_end: attributes[:sub_chain_end] || ']',
# Quoted mode starting character
quote_start: attributes[:quote_start] || '"',
# Quoted mode ending character
quote_end: attributes[:quote_end] || '"'
}
@permissions = {
roles: {},
users: {}
}
return unless @attributes[:help_command]
command(@attributes[:help_command], max_args: 1, description: 'Shows a list of all the commands available or displays help for a specific command.', usage: 'help [command name]') do |event, command_name|
if command_name
command = @commands[command_name.to_sym]
return "The command `#{command_name}` does not exist!" unless command
desc = command.attributes[:description] || '*No description available*'
usage = command.attributes[:usage]
result = "**`#{command_name}`**: #{desc}"
result += "\nUsage: `#{usage}`" if usage
result
else
available_commands = @commands.values.reject { |c| !c.attributes[:help_available] }
case available_commands.length
when 0..5
available_commands.reduce "**List of commands:**\n" do |memo, c|
memo + "**`#{c.name}`**: #{c.attributes[:description] || '*No description available*'}\n"
end
when 5..50
(available_commands.reduce "**List of commands:**\n" do |memo, c|
memo + "`#{c.name}`, "
end)[0..-3]
else
event.user.pm(available_commands.reduce("**List of commands:**\n") { |a, e| a + "`#{e.name}`, " })[0..-3]
'Sending list in PM!'
end
end
end
end
# Executes a particular command on the bot. Mostly useful for internal stuff, but one can never know.
# @param name [Symbol] The command to execute.
# @param event [CommandEvent] The event to pass to the command.
# @param arguments [Array<String>] The arguments to pass to the command.
# @param chained [true, false] Whether or not it should be executed as part of a command chain. If this is false,
# commands that have chain_usable set to false will not work.
# @return [String, nil] the command's result, if there is any.
def execute_command(name, event, arguments, chained = false)
debug("Executing command #{name} with arguments #{arguments}")
command = @commands[name]
unless command
event.respond @attributes[:command_doesnt_exist_message].gsub('%command%', name.to_s) if @attributes[:command_doesnt_exist_message]
return
end
if permission?(event.user, command.attributes[:permission_level], event.server) &&
required_permissions?(event.author, command.attributes[:required_permissions], event.channel)
event.command = command
result = command.call(event, arguments, chained)
stringify(result)
else
event.respond command.attributes[:permission_message].gsub('%name%', name.to_s) if command.attributes[:permission_message]
return
end
end
# Executes a command in a simple manner, without command chains or permissions.
# @param chain [String] The command with its arguments separated by spaces.
# @param event [CommandEvent] The event to pass to the command.
# @return [String, nil] the command's result, if there is any.
def simple_execute(chain, event)
return nil if chain.empty?
args = chain.split(' ')
execute_command(args[0].to_sym, event, args[1..-1])
end
# Sets the permission level of a user
# @param id [Integer] the ID of the user whose level to set
# @param level [Integer] the level to set the permission to
def set_user_permission(id, level)
@permissions[:users][id] = level
end
# Sets the permission level of a role - this applies to all users in the role
# @param id [Integer] the ID of the role whose level to set
# @param level [Integer] the level to set the permission to
def set_role_permission(id, level)
@permissions[:roles][id] = level
end
# Check if a user has permission to do something
# @param user [User] The user to check
# @param level [Integer] The minimum permission level the user should have (inclusive)
# @param server [Server] The server on which to check
# @return [true, false] whether or not the user has the given permission
def permission?(user, level, server)
determined_level = server.nil? ? 0 : user.roles.reduce(0) do |memo, role|
[@permissions[:roles][role.id] || 0, memo].max
end
[@permissions[:users][user.id] || 0, determined_level].max >= level
end
private
# Internal handler for MESSAGE_CREATE that is overwritten to allow for command handling
def create_message(data)
message = Discordrb::Message.new(data, self)
return if message.from_bot? && !@should_parse_self
event = CommandEvent.new(message, self)
return unless message.content.start_with? @prefix
chain = message.content[@prefix.length..-1]
# Don't allow spaces between the prefix and the command
if chain.start_with?(' ') && !@attributes[:spaces_allowed]
debug('Chain starts with a space')
return
end
if chain.strip.empty?
debug('Chain is empty')
return
end
execute_chain(chain, event)
end
def required_permissions?(member, required, channel = nil)
required.reduce(true) do |a, action|
a && member.permission?(action, channel)
end
end
def execute_chain(chain, event)
t = Thread.new do
@event_threads << t
Thread.current[:discordrb_name] = "ct-#{@current_thread += 1}"
begin
debug("Parsing command chain #{chain}")
result = (@attributes[:advanced_functionality]) ? CommandChain.new(chain, self).execute(event) : simple_execute(chain, event)
result = event.saved_message + (result || '')
event.respond result unless result.nil? || result.empty?
rescue => e
log_exception(e)
ensure
@event_threads.delete(t)
end
end
end
# Turns the object into a string, using to_s by default
def stringify(object)
return '' if object.is_a? Discordrb::Message
object.to_s
end
end
end
|
module RubyProvisioningApi
# @attr [String] user_name User's username
# @attr [String] given_name User's first name
# @attr [String] family_name User's last name
# @attr [Boolean] suspended User's state (suspended if true, active if false)
# @attr [String] quota User's disk space quota
#
class User
extend Entity
extend Member
extend Owner
include ActiveModel::Validations
include ActiveModel::Dirty
attr_accessor :user_name, :family_name, :given_name, :suspended, :quota
alias_method :suspended?, :suspended
define_attribute_methods [:user_name]
validates :user_name, :family_name, :given_name, :presence => true
# @param [Hash] params the options to create a User with.
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota (optional, default is 1024)
# @option params [Boolean] :suspended true if user is suspended, false otherwise (optional, default is false)
#
def initialize(params = {})
params.each do |name, value|
send("#{name}=", value)
end
self.quota = "1024" if quota.nil?
self.suspended = false if suspended.nil?
end
# Retrieve all users in the domain
# @note This method executes a <b>GET</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i>
#
# @example Retrieve all users in the current domain
# RubyProvisioningApi::User.all # => [Array<User>]
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_all_users_in_a_domain
# @return [Array<User>] all users in the domain
#
def self.all
users = []
response = perform(RubyProvisioningApi.configuration.user_actions[:retrieve_all])
check_response(response)
doc = Nokogiri::XML(response.body)
doc.css("entry").each do |user_entry|
users << extract_user(user_entry)
end
users
end
# Retrieve a user account
# @note This method executes a <b>GET</b> request to <i>https://apps-apis.google.com/a/feeds/domain/user/2.0/userName</i>
#
# @example Retrieve the user account for "test"
# user = RubyProvisioningApi::User.find("test") # => [User]
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_user_accounts
# @param [String] user_name
# @return [User]
# @raise [Error] if user does not exist
#
def self.find(user_name)
params = prepare_params_for(:retrieve, "userName" => userName)
response = perform(params)
check_response(response)
doc = Nokogiri::XML(response.body)
extract_user(doc)
end
# Save a user account. If the user account exists it will be updated, if not, a new user account will be created
#
# @note This method executes a <b>POST</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i> for the create action
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Create a user account in multiple steps
# user = RubyProvisioningApi::User.new
# user.user_name = "test" # => "test"
# user.given_name = "foo" # => "foo"
# user.family_name = "bar" # => "bar"
# user.save # => true
#
# @example Create a user account in a unique step
# user = RubyProvisioningApi::User.new(:user_name => "test",
# :given_name => "foo",
# :family_name => "bar",
# :quota => "2000") # => [User]
# user.save # => true
#
# @example Update a user account
# user = RubyProvisioningApi::User.find("test") # => [User]
# user.given_name = "foo2" # => "foo2"
# user.save # => true
#
# @see https://developers.google.com/google-apps/provisioning/#creating_a_user_account
# @see https://developers.google.com/google-apps/provisioning/#updating_a_user_account
# @param [Hash] save_options
# @option save_options [Boolean] :validate skip validations before save if false, validate otherwise (defaults to true)
# @return [Boolean] true if saved, false if not valid or not saved
# @raise [Error] if the user already exists (user_name must be unique)
#
def save(save_options = {:validate => true})
if save_options[:validate]
return false unless valid?
end
builder = Nokogiri::XML::Builder.new(:encoding => 'UTF-8') do |xml|
xml.send(:'atom:entry', 'xmlns:atom' => 'http://www.w3.org/2005/Atom', 'xmlns:apps' => 'http://schemas.google.com/apps/2006') {
xml.send(:'atom:category', 'scheme' => 'http://schemas.google.com/g/2005#kind', 'term' => 'http://schemas.google.com/apps/2006#user')
xml.send(:'apps:login', 'userName' => user_name, 'password' => '51eea05d46317fadd5cad6787a8f562be90b4446', 'suspended' => suspended)
xml.send(:'apps:quota', 'limit' => quota)
xml.send(:'apps:name', 'familyName' => family_name, 'givenName' => given_name)
}
end
if User.present?(user_name_was)
# UPDATING an old record
params = self.class.prepare_params_for(:update, "userName" => user_name_was)
response = self.class.perform(params, builder.to_xml)
else
# SAVING a new record
response = self.class.perform(RubyProvisioningApi.configuration.user_actions[:create], builder.to_xml)
end
User.check_response(response)
end
# Initialize and save a user.
# @param [Hash] params the options to create a User with
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota (optional, default is 1024)
# @option params [Boolean] :suspended true if user is suspended, false otherwise (optional, default is false)
# @note This method executes a <b>POST</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i>
#
# @example Create the user "test"
# user = RubyProvisioningApi::User.create(:user_name => "test",
# :given_name => "foo",
# :family_name => "bar",
# :quota => "2000") # => true
#
# @see https://developers.google.com/google-apps/provisioning/#creating_a_user_account
# @return [Boolean] true if created, false if not valid or not created
# @raise [Error] if user already exists (user_name must be unique)
#
def self.create(params = {})
user = User.new(params)
user.save
end
# Update user attributes (except suspend) and save
#
# @param [Hash] params the options to update the User with
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota
#
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
# @note With {User#update_attributes update_attributes} it's not possible to suspend or restore a user account. For these actions take a look
# at the {User#suspend suspend} and {User#restore restore} methods.
#
# @example Change the family name and the given_name of a user
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.update_attributes(:family_name => "smith", :given_name => "john") # => true
#
# @see https://developers.google.com/google-apps/provisioning/#updating_a_user_account
# @return [Boolean] true if updated, false if not valid or not updated
# @raise [Error] if user already exists (user_name must be unique)
#
def update_attributes(params)
if params.has_key? :user_name and params[:user_name] != self.user_name
user_name_will_change!
self.user_name = params[:user_name]
end
self.family_name = params[:family_name] if params.has_key? :family_name
self.given_name = params[:given_name] if params.has_key? :given_name
self.quota = params[:quota] if params.has_key? :quota
save
end
# Suspend a user account
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Suspend the user account of the user "foo"
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.suspend # => true
#
# @see https://developers.google.com/google-apps/provisioning/#suspending_a_user_account
# @return [Boolean] true if the operation succeeded, false otherwise
#
def suspend
self.suspended = true
save(:validate => false)
end
# Restore a user account
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Restore the user account of the user "foo"
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.restore # => true
#
# @see https://developers.google.com/google-apps/provisioning/#restoring_a_user_account
# @return [Boolean] true if the operation succeeded, false otherwise
#
def restore
self.suspended = false
save(:validate => false)
end
#Delete user DELETE https://apps-apis.google.com/a/feeds/domain/user/2.0/userName
def delete
params = self.class.prepare_params_for(:delete, "userName" => user_name)
response = self.class.perform(params)
end
# Returns all the groups which the user is subscribed to
# TODO: move this inside member
def groups
Group.groups(user_name)
end
# Check if the user is a member of the given group
# @note This method executes a <b>GET</b> request to <i>apps-apis.google.com/a/feeds/group/2.0/domain/groupId/member/memberId</i>
#
# @example Find a user and check if is member of the group 'test'
# user = RubyProvisioningApi::User.find("username")
# user.is_member_of? "test" # => true
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_all_members_of_a_group
# @return [Boolean] true if the user is member of the group, false otherwise
# @raise [Error] if group_id does not exist
#
def is_member_of?(group_id)
params = self.class.prepare_params_for(:group_id, {"groupId" => group_id, "memberId" => user_name} )
begin
self.class.check_response(self.class.perform(params))
rescue
Group.find(group_id)
false
end
end
private
def self.extract_user(doc)
u = new
u.user_name = doc.css("apps|login").first.attributes["userName"].value
u.suspended = doc.css("apps|login").first.attributes["suspended"].value
u.family_name = doc.css("apps|name").first.attributes["familyName"].value
u.given_name = doc.css("apps|name").first.attributes["givenName"].value
u.quota = doc.css("apps|quota").first.attributes["limit"].value
u
end
end
end
Completed User class documentation.
module RubyProvisioningApi
# @attr [String] user_name User's username
# @attr [String] given_name User's first name
# @attr [String] family_name User's last name
# @attr [Boolean] suspended User's state (suspended if true, active if false)
# @attr [String] quota User's disk space quota
#
class User
extend Entity
extend Member
extend Owner
include ActiveModel::Validations
include ActiveModel::Dirty
attr_accessor :user_name, :family_name, :given_name, :suspended, :quota
alias_method :suspended?, :suspended
define_attribute_methods [:user_name]
validates :user_name, :family_name, :given_name, :presence => true
# @param [Hash] params the options to create a User with.
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota (optional, default is 1024)
# @option params [Boolean] :suspended true if user is suspended, false otherwise (optional, default is false)
#
def initialize(params = {})
params.each do |name, value|
send("#{name}=", value)
end
self.quota = "1024" if quota.nil?
self.suspended = false if suspended.nil?
end
# Retrieve all users in the domain
# @note This method executes a <b>GET</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i>
#
# @example Retrieve all users in the current domain
# RubyProvisioningApi::User.all # => [Array<User>]
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_all_users_in_a_domain
# @return [Array<User>] all users in the domain
#
def self.all
users = []
response = perform(RubyProvisioningApi.configuration.user_actions[:retrieve_all])
check_response(response)
doc = Nokogiri::XML(response.body)
doc.css("entry").each do |user_entry|
users << extract_user(user_entry)
end
users
end
# Retrieve a user account
# @note This method executes a <b>GET</b> request to <i>https://apps-apis.google.com/a/feeds/domain/user/2.0/userName</i>
#
# @example Retrieve the user account for "test"
# user = RubyProvisioningApi::User.find("test") # => [User]
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_user_accounts
# @param [String] user_name
# @return [User]
# @raise [Error] if user does not exist
#
def self.find(user_name)
params = prepare_params_for(:retrieve, "userName" => userName)
response = perform(params)
check_response(response)
doc = Nokogiri::XML(response.body)
extract_user(doc)
end
# Save a user account. If the user account exists it will be updated, if not, a new user account will be created
#
# @note This method executes a <b>POST</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i> for the create action
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Create a user account in multiple steps
# user = RubyProvisioningApi::User.new
# user.user_name = "test" # => "test"
# user.given_name = "foo" # => "foo"
# user.family_name = "bar" # => "bar"
# user.save # => true
#
# @example Create a user account in a unique step
# user = RubyProvisioningApi::User.new(:user_name => "test",
# :given_name => "foo",
# :family_name => "bar",
# :quota => "2000") # => [User]
# user.save # => true
#
# @example Update a user account
# user = RubyProvisioningApi::User.find("test") # => [User]
# user.given_name = "foo2" # => "foo2"
# user.save # => true
#
# @see https://developers.google.com/google-apps/provisioning/#creating_a_user_account
# @see https://developers.google.com/google-apps/provisioning/#updating_a_user_account
# @param [Hash] save_options
# @option save_options [Boolean] :validate skip validations before save if false, validate otherwise (defaults to true)
# @return [Boolean] true if saved, false if not valid or not saved
# @raise [Error] if the user already exists (user_name must be unique)
#
def save(save_options = {:validate => true})
if save_options[:validate]
return false unless valid?
end
builder = Nokogiri::XML::Builder.new(:encoding => 'UTF-8') do |xml|
xml.send(:'atom:entry', 'xmlns:atom' => 'http://www.w3.org/2005/Atom', 'xmlns:apps' => 'http://schemas.google.com/apps/2006') {
xml.send(:'atom:category', 'scheme' => 'http://schemas.google.com/g/2005#kind', 'term' => 'http://schemas.google.com/apps/2006#user')
xml.send(:'apps:login', 'userName' => user_name, 'password' => '51eea05d46317fadd5cad6787a8f562be90b4446', 'suspended' => suspended)
xml.send(:'apps:quota', 'limit' => quota)
xml.send(:'apps:name', 'familyName' => family_name, 'givenName' => given_name)
}
end
if User.present?(user_name_was)
# UPDATING an old record
params = self.class.prepare_params_for(:update, "userName" => user_name_was)
response = self.class.perform(params, builder.to_xml)
else
# SAVING a new record
response = self.class.perform(RubyProvisioningApi.configuration.user_actions[:create], builder.to_xml)
end
User.check_response(response)
end
# Initialize and save a user.
# @param [Hash] params the options to create a User with
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota (optional, default is 1024)
# @option params [Boolean] :suspended true if user is suspended, false otherwise (optional, default is false)
# @note This method executes a <b>POST</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0</i>
#
# @example Create the user "test"
# user = RubyProvisioningApi::User.create(:user_name => "test",
# :given_name => "foo",
# :family_name => "bar",
# :quota => "2000") # => true
#
# @see https://developers.google.com/google-apps/provisioning/#creating_a_user_account
# @return [Boolean] true if created, false if not valid or not created
# @raise [Error] if user already exists (user_name must be unique)
#
def self.create(params = {})
user = User.new(params)
user.save
end
# Update user attributes (except suspend) and save
#
# @param [Hash] params the options to update the User with
# @option params [String] :user_name User identification
# @option params [String] :given_name User's first name
# @option params [String] :family_name User's last name
# @option params [String] :quota User's disk space quota
#
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
# @note With {User#update_attributes update_attributes} it's not possible to suspend or restore a user account. For these actions take a look
# at the {User#suspend suspend} and {User#restore restore} methods.
#
# @example Change the family name and the given_name of a user
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.update_attributes(:family_name => "smith", :given_name => "john") # => true
#
# @see https://developers.google.com/google-apps/provisioning/#updating_a_user_account
# @return [Boolean] true if updated, false if not valid or not updated
# @raise [Error] if user already exists (user_name must be unique)
#
def update_attributes(params)
if params.has_key? :user_name and params[:user_name] != self.user_name
user_name_will_change!
self.user_name = params[:user_name]
end
self.family_name = params[:family_name] if params.has_key? :family_name
self.given_name = params[:given_name] if params.has_key? :given_name
self.quota = params[:quota] if params.has_key? :quota
save
end
# Suspend a user account
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Suspend the user account of the user "foo"
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.suspend # => true
#
# @see https://developers.google.com/google-apps/provisioning/#suspending_a_user_account
# @return [Boolean] true if the operation succeeded, false otherwise
#
def suspend
self.suspended = true
save(:validate => false)
end
# Restore a user account
# @note This method executes a <b>PUT</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i> for the update action
#
# @example Restore the user account of the user "foo"
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.restore # => true
#
# @see https://developers.google.com/google-apps/provisioning/#restoring_a_user_account
# @return [Boolean] true if the operation succeeded, false otherwise
#
def restore
self.suspended = false
save(:validate => false)
end
# Delete a user account
# @note This method executes a <b>DELETE</b> request to <i>apps-apis.google.com/a/feeds/domain/user/2.0/userName</i>
#
# @example Delete the user "foo"
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.delete # => true
#
# @see https://developers.google.com/google-apps/provisioning/#deleting_a_user_account
# @return [Boolean] true if the operation succeeded, false otherwise
#
def delete
params = self.class.prepare_params_for(:delete, "userName" => user_name)
response = self.class.perform(params)
end
# Returns all the groups which the user is subscribed to
#
# @example List all the groups which the user "foo" is subscribed to
# user = RubyProvisioningApi::User.find("foo") # => [User]
# user.groups # => [Array<Group>]
#
# @see For more info see: Group.groups
# @return [Array<Group>]
#
def groups
Group.groups(user_name)
end
# Check if the user is a member of the given group
# @note This method executes a <b>GET</b> request to <i>apps-apis.google.com/a/feeds/group/2.0/domain/groupId/member/memberId</i>
#
# @example Find a user and check if is member of the group 'test'
# user = RubyProvisioningApi::User.find("username")
# user.is_member_of? "test" # => true
#
# @see https://developers.google.com/google-apps/provisioning/#retrieving_all_members_of_a_group
# @return [Boolean] true if the user is member of the group, false otherwise
# @raise [Error] if group_id does not exist
#
def is_member_of?(group_id)
params = self.class.prepare_params_for(:group_id, {"groupId" => group_id, "memberId" => user_name} )
begin
self.class.check_response(self.class.perform(params))
rescue
Group.find(group_id)
false
end
end
private
def self.extract_user(doc)
u = new
u.user_name = doc.css("apps|login").first.attributes["userName"].value
u.suspended = doc.css("apps|login").first.attributes["suspended"].value
u.family_name = doc.css("apps|name").first.attributes["familyName"].value
u.given_name = doc.css("apps|name").first.attributes["givenName"].value
u.quota = doc.css("apps|quota").first.attributes["limit"].value
u
end
end
end |
require 'rinda/rinda'
require 'rubyonacid/factory'
module RubyOnAcid
class RindaFactory < Factory
attr_accessor :uri
#Takes the URI to connect to. Default is "druby://127.0.0.1:12243" (12243 == 1ACID).
def initialize(uri = "druby://127.0.0.1:12243")
@uri = uri
end
def start_service
DRb.start_service
@space = Rinda::TupleSpaceProxy.new(DRbObject.new(nil, @uri))
end
#Get key from Rinda server.
def get_unit(key)
key, value = @space.take([key, Float])
value
end
end
end
Updated default port for RindaGenerator, now 7632 (RNDA)
require 'rinda/rinda'
require 'rubyonacid/factory'
module RubyOnAcid
class RindaFactory < Factory
attr_accessor :uri
#Takes the URI to connect to. Default is "druby://127.0.0.1:7632" (7632 == RNDA).
def initialize(uri = "druby://127.0.0.1:7632")
@uri = uri
end
def start_service
DRb.start_service
@space = Rinda::TupleSpaceProxy.new(DRbObject.new(nil, @uri))
end
#Get key from Rinda server.
def get_unit(key)
key, value = @space.take([key, Float])
value
end
end
end |
module EagleSearch
class Interpreter::Query
def initialize(klass, query, options)
@klass = klass
@query = query
@options = options
end
def payload
case @query
when String
if @query == "*"
{ match_all: {} }
else
{ multi_match: { query: query, type: "best_fields", fields: ["name"], tie_breaker: 0.3 } }
end
end
end
end
end
implement an embrionary query interpreter
module EagleSearch
class Interpreter::Query
def initialize(index, query, options)
@index = index
@query = query
@options = options
end
def payload
case @query
when String
if @query == "*"
{ match_all: {} }
else
query_payload
end
end
end
private
def properties
@index.mappings[@index.type_name][:properties]
end
def analyzed_properties
properties.select { |field_name, field_hash| field_hash[:type] == "string" && field_hash[:index] == "analyzed" }
end
def not_analyzed_properties
properties.select { |field_name, field_hash| field_hash[:type] == "string" && field_hash[:index] == "not_analyzed" }
end
def query_payload
if analyzed_properties
payload = {
bool: {
should: [
{
multi_match: {
query: @query,
fields: @options[:fields] || analyzed_properties.keys,
tie_breaker: 0.3
}
},
{
bool: {
should: []
}
}
]
}
}
#shingle for analyzed properties
analyzed_properties.keys.each do |field_name|
payload[:bool][:should][1][:bool][:should] << {
match: {
"#{ field_name }.shingle" => @query
}
}
end
end
payload
end
end
end
|
# encoding: utf-8
module EasyCaptcha
module Generator
# default generator
class Default < Base
# set default values
def defaults
@font_size = 24
@font_fill_color = '#333333'
@font = File.expand_path('../../../../resources/captcha.ttf', __FILE__)
@font_stroke = '#000000'
@font_stroke_color = 0
@image_background_color = '#FFFFFF'
@sketch = true
@sketch_radius = 3
@sketch_sigma = 1
@wave = true
@wave_length = (60..100)
@wave_amplitude = (3..5)
@implode = 0.05
@blur = true
@blur_radius = 1
@blur_sigma = 2
end
# Font
attr_accessor :font_size, :font_fill_color, :font, :font_family, :font_stroke, :font_stroke_color
# Background
attr_accessor :image_background_color
# Sketch
attr_accessor :sketch, :sketch_radius, :sketch_sigma
# Wave
attr_accessor :wave, :wave_length, :wave_amplitude
# Implode
attr_accessor :implode
# Gaussian Blur
attr_accessor :blur, :blur_radius, :blur_sigma
def sketch? #:nodoc:
@sketch
end
def wave? #:nodoc:
@wave
end
def blur? #:nodoc:
@blur
end
# generate image
def generate(code)
config = self
canvas = Magick::Image.new(EasyCaptcha.image_width, EasyCaptcha.image_height) do |variable|
self.background_color = config.image_background_color unless config.image_background_color.nil?
end
# Render the text in the image
canvas.annotate(Magick::Draw.new, 0, 0, 0, 0, code) {
self.gravity = Magick::CenterGravity
self.font = config.font
self.font_weight = Magick::LighterWeight
self.fill = config.font_fill_color
if config.font_stroke.to_i > 0
self.stroke = config.font_stroke_color
self.stroke_width = config.font_stroke
end
self.pointsize = config.font_size
}
# Blur
canvas = canvas.blur_image(config.blur_radius, config.blur_sigma) if config.blur?
# Wave
w = config.wave_length
a = config.wave_amplitude
canvas = canvas.wave(rand(a.last - a.first) + a.first, rand(w.last - w.first) + w.first) if config.wave?
# Sketch
canvas = canvas.sketch(config.sketch_radius, config.sketch_sigma, rand(180)) if config.sketch?
# Implode
canvas = canvas.implode(config.implode.to_f) if config.implode.is_a? Float
# Crop image because to big after waveing
canvas = canvas.crop(Magick::CenterGravity, EasyCaptcha.image_width, EasyCaptcha.image_height)
image = canvas.to_blob { self.format = 'PNG' }
# ruby-1.9
image = image.force_encoding 'UTF-8' if image.respond_to? :force_encoding
canvas.destroy!
image
end
end
end
end
adds support for background images
* with transparent PNG support
# encoding: utf-8
module EasyCaptcha
module Generator
# default generator
class Default < Base
# set default values
def defaults
@font_size = 24
@font_fill_color = '#333333'
@font = File.expand_path('../../../../resources/captcha.ttf', __FILE__)
@font_stroke = '#000000'
@font_stroke_color = 0
@image_background_color = '#FFFFFF'
@sketch = true
@sketch_radius = 3
@sketch_sigma = 1
@wave = true
@wave_length = (60..100)
@wave_amplitude = (3..5)
@implode = 0.05
@blur = true
@blur_radius = 1
@blur_sigma = 2
end
# Font
attr_accessor :font_size, :font_fill_color, :font, :font_family, :font_stroke, :font_stroke_color
# Background
attr_accessor :image_background_color, :background_image
# Sketch
attr_accessor :sketch, :sketch_radius, :sketch_sigma
# Wave
attr_accessor :wave, :wave_length, :wave_amplitude
# Implode
attr_accessor :implode
# Gaussian Blur
attr_accessor :blur, :blur_radius, :blur_sigma
def sketch? #:nodoc:
@sketch
end
def wave? #:nodoc:
@wave
end
def blur? #:nodoc:
@blur
end
# generate image
def generate(code)
config = self
canvas = Magick::Image.new(EasyCaptcha.image_width, EasyCaptcha.image_height) do |variable|
self.background_color = config.image_background_color unless config.image_background_color.nil?
self.background_color = 'none' if config.background_image.present?
end
# Render the text in the image
canvas.annotate(Magick::Draw.new, 0, 0, 0, 0, code) {
self.gravity = Magick::CenterGravity
self.font = config.font
self.font_weight = Magick::LighterWeight
self.fill = config.font_fill_color
if config.font_stroke.to_i > 0
self.stroke = config.font_stroke_color
self.stroke_width = config.font_stroke
end
self.pointsize = config.font_size
}
# Blur
canvas = canvas.blur_image(config.blur_radius, config.blur_sigma) if config.blur?
# Wave
w = config.wave_length
a = config.wave_amplitude
canvas = canvas.wave(rand(a.last - a.first) + a.first, rand(w.last - w.first) + w.first) if config.wave?
# Sketch
canvas = canvas.sketch(config.sketch_radius, config.sketch_sigma, rand(180)) if config.sketch?
# Implode
canvas = canvas.implode(config.implode.to_f) if config.implode.is_a? Float
# Crop image because to big after waveing
canvas = canvas.crop(Magick::CenterGravity, EasyCaptcha.image_width, EasyCaptcha.image_height)
# Combine images if background image is present
if config.background_image.present?
background = Magick::Image.read(config.background_image).first
background.composite!(canvas, Magick::CenterGravity, Magick::OverCompositeOp)
image = background.to_blob { self.format = 'PNG' }
else
image = canvas.to_blob { self.format = 'PNG' }
end
# ruby-1.9
image = image.force_encoding 'UTF-8' if image.respond_to? :force_encoding
canvas.destroy!
image
end
end
end
end
|
module Salesforceapi
module Rest
VERSION = "0.0.3"
end
end
version 0.0.4
module Salesforceapi
module Rest
VERSION = "0.0.4"
end
end
|
module Ekylibre::Record
class RecordNotUpdateable < ActiveRecord::RecordNotSaved
end
class RecordNotDestroyable < ActiveRecord::RecordNotSaved
end
module Acts #:nodoc:
module Protected #:nodoc:
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
# Blocks update or destroy if necessary
def protect(options = {}, &block)
options[:on] = [:update, :destroy] unless options[:on]
code = ''.c
for callback in [options[:on]].flatten
method_name = "protected_on_#{callback}?".to_sym
code << "before_#{callback} :raise_exception_unless_#{callback}able?\n"
code << "def raise_exception_unless_#{callback}able?\n"
code << " unless self.#{callback}able?\n"
if options[:"allow_#{callback}_on"]
puts options.to_s.yellow
code << " if self.changed.any? { |e| !"+options[:"allow_#{callback}_on"].to_s+".include? e }\n"
end
code << " raise RecordNot#{callback.to_s.camelcase}able.new('Record cannot be #{callback}d', self)\n"
if options[:"allow_#{callback}_on"]
code << " end\n"
end
code << " end\n"
code << "end\n"
code << "def #{callback}able?\n"
code << " !#{method_name}\n"
code << "end\n"
if options[:"allow_#{callback}_on"]
puts code.split("\n").map(&:yellow).join("\n")
end
define_method(method_name, &block) if block_given?
end
class_eval code
end
# Blocks update or destroy if necessary
# If result is false, it stops intervention
def secure(options = {}, &block)
options[:on] = [:update, :destroy] unless options[:on]
code = ''.c
for callback in [options[:on]].flatten
method_name = "secured_on_#{callback}?".to_sym
code << "before_#{callback} :secure_#{callback}ability!\n"
code << "def secure_#{callback}ability!\n"
code << " unless self.#{callback}able?\n"
code << " raise RecordNot#{callback.to_s.camelcase}able.new('Record cannot be #{callback}d because it is secured', self)\n"
code << " end\n"
code << "end\n"
code << "def #{callback}able?\n"
code << " #{method_name}\n"
code << "end\n"
define_method(method_name, &block) if block_given?
end
class_eval code
end
end
end
end
end
Ekylibre::Record::Base.send(:include, Ekylibre::Record::Acts::Protected)
Removed debugging log in Record#protected.
module Ekylibre::Record
class RecordNotUpdateable < ActiveRecord::RecordNotSaved
end
class RecordNotDestroyable < ActiveRecord::RecordNotSaved
end
module Acts #:nodoc:
module Protected #:nodoc:
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
# Blocks update or destroy if necessary
def protect(options = {}, &block)
options[:on] = [:update, :destroy] unless options[:on]
code = ''.c
for callback in [options[:on]].flatten
method_name = "protected_on_#{callback}?".to_sym
code << "before_#{callback} :raise_exception_unless_#{callback}able?\n"
code << "def raise_exception_unless_#{callback}able?\n"
code << " unless self.#{callback}able?\n"
if options[:"allow_#{callback}_on"]
code << " if self.changed.any? { |e| !"+options[:"allow_#{callback}_on"].to_s+".include? e }\n"
end
code << " raise RecordNot#{callback.to_s.camelcase}able.new('Record cannot be #{callback}d', self)\n"
if options[:"allow_#{callback}_on"]
code << " end\n"
end
code << " end\n"
code << "end\n"
code << "def #{callback}able?\n"
code << " !#{method_name}\n"
code << "end\n"
define_method(method_name, &block) if block_given?
end
class_eval code
end
# Blocks update or destroy if necessary
# If result is false, it stops intervention
def secure(options = {}, &block)
options[:on] = [:update, :destroy] unless options[:on]
code = ''.c
for callback in [options[:on]].flatten
method_name = "secured_on_#{callback}?".to_sym
code << "before_#{callback} :secure_#{callback}ability!\n"
code << "def secure_#{callback}ability!\n"
code << " unless self.#{callback}able?\n"
code << " raise RecordNot#{callback.to_s.camelcase}able.new('Record cannot be #{callback}d because it is secured', self)\n"
code << " end\n"
code << "end\n"
code << "def #{callback}able?\n"
code << " #{method_name}\n"
code << "end\n"
define_method(method_name, &block) if block_given?
end
class_eval code
end
end
end
end
end
Ekylibre::Record::Base.send(:include, Ekylibre::Record::Acts::Protected)
|
module Saorin
module Server
class Reel
VERSION = '0.1.0'
end
end
end
version bump to 0.1.1
module Saorin
module Server
class Reel
VERSION = '0.1.1'
end
end
end
|
class SciencemagLatestNews::CLI
def call
greeting
list
menu
end
def greeting
puts "Welcome to ScienceMag Latest News!"
end
def list
# need to list all the story headlines here
# use each.with_index(1) on collection of all stories
# like @stories = Story.all, @stories.each.with_index(1) blah blah
puts "1. Bees"
puts "2. Volcano"
end
def menu
input = nil
while input != "exit"
puts "Type the number of the story you'd like to read. You can also type 'list' to see the stories again or 'exit' to exit."
input = gets.chomp
# need to check that input is correct - more than 0 but less than
# total num of stories
# could use a find method here and pass in input, story = Story.find(i)
# then puts story.headline, story.author | story.date, story.content
case input
when "1" then puts "Here's more info on bees."
when "2" then puts "Here's more info on volcano stuff."
when "list" then list
else "I don't know what you want to do. Type 'list' or 'exit'."
end
end
end
end
added primer method to call and list can now display story headlines
class SciencemagLatestNews::CLI
def call
SciencemagLatestNews::Story.scrape_latest_stories
greeting
list
menu
end
def greeting
puts "Welcome to ScienceMag Latest News!"
end
def list
# need to list all the story headlines here
# use each.with_index(1) on collection of all stories
# like @stories = Story.all, @stories.each.with_index(1) blah blah
@stories = SciencemagLatestNews::Story.latest_stories
@stories.each.with_index(1) do |story, i|
puts "#{i}. #{story.headline}"
end
end
def menu
input = nil
while input != "exit"
puts "Type the number of the story you'd like to read. You can also type 'list' to see the stories again or 'exit' to exit."
input = gets.chomp
# need to check that input is correct - more than 0 but less than
# total num of stories
# could use a find method here and pass in input, story = Story.find(i)
# then puts story.headline, story.author | story.date, story.content
case input
when "1" then puts "Here's more info on bees."
when "2" then puts "Here's more info on volcano stuff."
when "list" then list
else "I don't know what you want to do. Type 'list' or 'exit'."
end
end
end
end
|
module SecureHeaders
module ViewHelpers
include SecureHeaders::HashHelper
SECURE_HEADERS_RAKE_TASK = "rake secure_headers:generate_hashes"
class UnexpectedHashedScriptException < StandardError; end
# Public: create a style tag using the content security policy nonce.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns an html-safe style tag with the nonce attribute.
def nonced_style_tag(content_or_options = {}, &block)
nonced_tag(:style, content_or_options, block)
end
# Public: create a script tag using the content security policy nonce.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns an html-safe script tag with the nonce attribute.
def nonced_javascript_tag(content_or_options = {}, &block)
nonced_tag(:script, content_or_options, block)
end
# Public: use the content security policy nonce for this request directly.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns a non-html-safe nonce value.
def content_security_policy_nonce(type)
case type
when :script
SecureHeaders.content_security_policy_script_nonce(@_request)
when :style
SecureHeaders.content_security_policy_style_nonce(@_request)
end
end
##
# Checks to see if the hashed code is expected and adds the hash source
# value to the current CSP.
#
# By default, in development/test/etc. an exception will be raised.
def hashed_javascript_tag(raise_error_on_unrecognized_hash = nil, &block)
hashed_tag(
:script,
:script_src,
Configuration.instance_variable_get(:@script_hashes),
raise_error_on_unrecognized_hash,
block
)
end
def hashed_style_tag(raise_error_on_unrecognized_hash = nil, &block)
hashed_tag(
:style,
:style_src,
Configuration.instance_variable_get(:@style_hashes),
raise_error_on_unrecognized_hash,
block
)
end
private
def hashed_tag(type, directive, hashes, raise_error_on_unrecognized_hash, block)
if raise_error_on_unrecognized_hash.nil?
raise_error_on_unrecognized_hash = ENV["RAILS_ENV"] != "production"
end
content = capture(&block)
file_path = File.join('app', 'views', self.instance_variable_get(:@virtual_path) + '.html.erb')
if raise_error_on_unrecognized_hash
hash_value = hash_source(content)
message = unexpected_hash_error_message(file_path, content, hash_value)
if hashes.nil? || hashes[file_path].nil? || !hashes[file_path].include?(hash_value)
raise UnexpectedHashedScriptException.new(message)
end
end
SecureHeaders.append_content_security_policy_directives(request, directive => hashes[file_path])
content_tag type, content
end
def unexpected_hash_error_message(file_path, content, hash_value)
<<-EOF
\n\n*** WARNING: Unrecognized hash in #{file_path}!!! Value: #{hash_value} ***
#{content}
*** Run #{SECURE_HEADERS_RAKE_TASK} or add the following to config/script_hashes.yml:***
#{file_path}:
- #{hash_value}\n\n
NOTE: dynamic javascript is not supported using script hash integration
on purpose. It defeats the point of using it in the first place.
EOF
end
def nonced_tag(type, content_or_options, block)
options = {}
content = if block
options = content_or_options
capture(&block)
else
content_or_options.html_safe # :'(
end
content_tag type, content, options.merge(nonce: content_security_policy_nonce(type))
end
end
end
module ActionView #:nodoc:
class Base #:nodoc:
include SecureHeaders::ViewHelpers
end
end
hook include of helper to loading of ActionView
When using rspec with rails-controller-testing, specs would break due to incorrect load order. More info here: https://github.com/rails/rails-controller-testing/issues/24
module SecureHeaders
module ViewHelpers
include SecureHeaders::HashHelper
SECURE_HEADERS_RAKE_TASK = "rake secure_headers:generate_hashes"
class UnexpectedHashedScriptException < StandardError; end
# Public: create a style tag using the content security policy nonce.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns an html-safe style tag with the nonce attribute.
def nonced_style_tag(content_or_options = {}, &block)
nonced_tag(:style, content_or_options, block)
end
# Public: create a script tag using the content security policy nonce.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns an html-safe script tag with the nonce attribute.
def nonced_javascript_tag(content_or_options = {}, &block)
nonced_tag(:script, content_or_options, block)
end
# Public: use the content security policy nonce for this request directly.
# Instructs secure_headers to append a nonce to style/script-src directives.
#
# Returns a non-html-safe nonce value.
def content_security_policy_nonce(type)
case type
when :script
SecureHeaders.content_security_policy_script_nonce(@_request)
when :style
SecureHeaders.content_security_policy_style_nonce(@_request)
end
end
##
# Checks to see if the hashed code is expected and adds the hash source
# value to the current CSP.
#
# By default, in development/test/etc. an exception will be raised.
def hashed_javascript_tag(raise_error_on_unrecognized_hash = nil, &block)
hashed_tag(
:script,
:script_src,
Configuration.instance_variable_get(:@script_hashes),
raise_error_on_unrecognized_hash,
block
)
end
def hashed_style_tag(raise_error_on_unrecognized_hash = nil, &block)
hashed_tag(
:style,
:style_src,
Configuration.instance_variable_get(:@style_hashes),
raise_error_on_unrecognized_hash,
block
)
end
private
def hashed_tag(type, directive, hashes, raise_error_on_unrecognized_hash, block)
if raise_error_on_unrecognized_hash.nil?
raise_error_on_unrecognized_hash = ENV["RAILS_ENV"] != "production"
end
content = capture(&block)
file_path = File.join('app', 'views', self.instance_variable_get(:@virtual_path) + '.html.erb')
if raise_error_on_unrecognized_hash
hash_value = hash_source(content)
message = unexpected_hash_error_message(file_path, content, hash_value)
if hashes.nil? || hashes[file_path].nil? || !hashes[file_path].include?(hash_value)
raise UnexpectedHashedScriptException.new(message)
end
end
SecureHeaders.append_content_security_policy_directives(request, directive => hashes[file_path])
content_tag type, content
end
def unexpected_hash_error_message(file_path, content, hash_value)
<<-EOF
\n\n*** WARNING: Unrecognized hash in #{file_path}!!! Value: #{hash_value} ***
#{content}
*** Run #{SECURE_HEADERS_RAKE_TASK} or add the following to config/script_hashes.yml:***
#{file_path}:
- #{hash_value}\n\n
NOTE: dynamic javascript is not supported using script hash integration
on purpose. It defeats the point of using it in the first place.
EOF
end
def nonced_tag(type, content_or_options, block)
options = {}
content = if block
options = content_or_options
capture(&block)
else
content_or_options.html_safe # :'(
end
content_tag type, content, options.merge(nonce: content_security_policy_nonce(type))
end
end
end
ActiveSupport.on_load :action_view do
include SecureHeaders::ViewHelpers
end
|
require "exception_notification/td/version"
require "td-logger"
module ExceptionNotifier
class TdNotifier
BACKTRACE_LIMIT_DEFAULT = 10
def initialize(options)
@table_name = options.delete(:table_name)
@backtrace_limit = options.delete(:backtrace_limit) || BACKTRACE_LIMIT_DEFAULT
raise "Please set table_name. options: #{options.inspect}" unless @table_name
unless defined? TreasureData::Logger::Agent::Rails
@database = options.delete(:database)
raise "Please set database. options: #{options.inspect}" unless @database
TreasureData::Logger.open(@database, options)
end
end
def call(exception, options = {})
TD.event.post(@table_name, exception_to_td_data(exception, options))
end
private
def request_klass
@request_klass ||= if defined?(ActionDispatch::Request)
ActionDispatch::Request
else
require 'rack/request'
Rack::Request
end
rescue LoadError, NameError
warn "ExceptionNotification::Td is designed to be used with Rack-based apps. Skip some of features."
nil
end
def exception_to_td_data(exception, options)
backtrace = exception.backtrace ? exception.backtrace[0, @backtrace_limit] : []
params = {
class: exception.class.to_s,
message: exception.message,
backtrace: backtrace,
hostname: (Socket.gethostname rescue nil),
environment: Rails.env,
}
if request_klass && options[:env]
request = request_klass.new(options[:env])
params.merge!(
method: request.request_method,
request_url: request.url,
cookies: request.cookies,
referer: request.referer,
)
params[:post_body] = request.body unless request.get?
end
params
end
end
end
Add @custom_param_proc option to make customized JSON
require "exception_notification/td/version"
require "td-logger"
module ExceptionNotifier
class TdNotifier
BACKTRACE_LIMIT_DEFAULT = 10
def initialize(options)
@table_name = options.delete(:table_name)
raise "Please set table_name. options: #{options.inspect}" unless @table_name
@backtrace_limit = options.delete(:backtrace_limit) || BACKTRACE_LIMIT_DEFAULT
@custom_param_proc = options.delete(:custom_param_proc)
unless defined? TreasureData::Logger::Agent::Rails
@database = options.delete(:database)
raise "Please set database. options: #{options.inspect}" unless @database
TreasureData::Logger.open(@database, options)
end
end
def call(exception, options = {})
TD.event.post(@table_name, exception_to_td_data(exception, options))
end
private
def request_klass
@request_klass ||= if defined?(ActionDispatch::Request)
ActionDispatch::Request
else
require 'rack/request'
Rack::Request
end
rescue LoadError, NameError
warn "ExceptionNotification::Td is designed to be used with Rack-based apps. Skip some of features."
nil
end
def exception_to_td_data(exception, options)
backtrace = exception.backtrace ? exception.backtrace[0, @backtrace_limit] : []
params = {
class: exception.class.to_s,
message: exception.message,
backtrace: backtrace,
hostname: (Socket.gethostname rescue nil),
environment: Rails.env,
}
if request_klass && options[:env]
request = request_klass.new(options[:env])
params.merge!(
method: request.request_method,
request_url: request.url,
cookies: request.cookies,
referer: request.referer,
)
params[:post_body] = request.body unless request.get?
end
if @custom_param_proc
@custom_param_proc.call(params, exception, request)
end
params
end
end
end
|
module SecureHeaders
class UnexpectedHashedScriptException < StandardError
end
module ViewHelpers
include SecureHeaders::HashHelper
SECURE_HEADERS_RAKE_TASK = "rake secure_headers:generate_hashes"
def nonced_style_tag(content = nil, &block)
nonced_tag(content, :style, block)
end
def nonced_javascript_tag(content = nil, &block)
nonced_tag(content, :script, block)
end
def hashed_javascript_tag(raise_error_on_unrecognized_hash = false, &block)
content = capture(&block)
if ['development', 'test'].include?(ENV["RAILS_ENV"])
hash_value = hash_source(content)
file_path = File.join('app', 'views', self.instance_variable_get(:@virtual_path) + '.html.erb')
script_hashes = controller.instance_variable_get(:@script_hashes)[file_path]
unless script_hashes && script_hashes.include?(hash_value)
message = unexpected_hash_error_message(file_path, hash_value, content)
if raise_error_on_unrecognized_hash
raise UnexpectedHashedScriptException.new(message)
else
puts message
request.env[HASHES_ENV_KEY] = (request.env[HASHES_ENV_KEY] || []) << hash_value
end
end
end
content_tag :script, content
end
private
def nonced_tag(content, type, block)
content = if block
capture(&block)
else
content.html_safe # :'(
end
content_tag type, content, :nonce => @content_security_policy_nonce
end
def unexpected_hash_error_message(file_path, hash_value, content)
<<-EOF
\n\n*** WARNING: Unrecognized hash in #{file_path}!!! Value: #{hash_value} ***
<script>#{content}</script>
*** This is fine in dev/test, but will raise exceptions in production. ***
*** Run #{SECURE_HEADERS_RAKE_TASK} or add the following to config/script_hashes.yml:***
#{file_path}:
- #{hash_value}\n\n
EOF
end
end
end
module ActionView #:nodoc:
module Helpers #:nodoc:
include SecureHeaders::ViewHelpers
end
end
Include ActionView helpers in ActionView::Base
Including the helpers directly in Helpers meant they were unavailable in
views in Rails 4.2. This makes them available.
module SecureHeaders
class UnexpectedHashedScriptException < StandardError
end
module ViewHelpers
include SecureHeaders::HashHelper
SECURE_HEADERS_RAKE_TASK = "rake secure_headers:generate_hashes"
def nonced_style_tag(content = nil, &block)
nonced_tag(content, :style, block)
end
def nonced_javascript_tag(content = nil, &block)
nonced_tag(content, :script, block)
end
def hashed_javascript_tag(raise_error_on_unrecognized_hash = false, &block)
content = capture(&block)
if ['development', 'test'].include?(ENV["RAILS_ENV"])
hash_value = hash_source(content)
file_path = File.join('app', 'views', self.instance_variable_get(:@virtual_path) + '.html.erb')
script_hashes = controller.instance_variable_get(:@script_hashes)[file_path]
unless script_hashes && script_hashes.include?(hash_value)
message = unexpected_hash_error_message(file_path, hash_value, content)
if raise_error_on_unrecognized_hash
raise UnexpectedHashedScriptException.new(message)
else
puts message
request.env[HASHES_ENV_KEY] = (request.env[HASHES_ENV_KEY] || []) << hash_value
end
end
end
content_tag :script, content
end
private
def nonced_tag(content, type, block)
content = if block
capture(&block)
else
content.html_safe # :'(
end
content_tag type, content, :nonce => @content_security_policy_nonce
end
def unexpected_hash_error_message(file_path, hash_value, content)
<<-EOF
\n\n*** WARNING: Unrecognized hash in #{file_path}!!! Value: #{hash_value} ***
<script>#{content}</script>
*** This is fine in dev/test, but will raise exceptions in production. ***
*** Run #{SECURE_HEADERS_RAKE_TASK} or add the following to config/script_hashes.yml:***
#{file_path}:
- #{hash_value}\n\n
EOF
end
end
end
module ActionView #:nodoc:
class Base #:nodoc:
include SecureHeaders::ViewHelpers
end
end
|
require "sensu/extension"
require "sensu/extensions/snmp-trap/snmp-patch"
require "thread"
module Sensu
module Extension
class SNMPTrap < Check
RESULT_MAP = [
[/checkname/i, :name],
[/notification/i, :output],
[/description/i, :output],
[/pansystemseverity/i, Proc.new { |value| value > 3 ? 2 : 0 }, :status],
[/severity/i, :status]
]
RESULT_STATUS_MAP = [
[/down/i, 2],
[/authenticationfailure/i, 1]
]
RUBY_ASN1_MAP = {
"INTEGER" => :to_i,
"OCTET STRING" => :to_s,
"OBJECT IDENTIFIER" => :to_s,
"IpAddress" => :to_s,
"Counter32" => :to_i,
"Gauge32" => :to_i,
"Unsigned32" => :to_i,
"TimeTicks" => :to_i,
"Opaque" => :to_s,
"Counter64" => :to_i
}
def name
"snmp_trap"
end
def description
"receives snmp traps and translates them to check results"
end
def definition
{
name: name,
publish: false
}
end
def options
return @options if @options
@options = {
:bind => "0.0.0.0",
:port => 1062,
:community => "public",
:handlers => ["default"],
:mibs_dir => "/etc/sensu/mibs",
:imported_dir => File.join(Dir.tmpdir, "sensu_snmp_imported_mibs"),
:custom_attributes => {}
}
@options.merge!(@settings[:snmp_trap]) if @settings[:snmp_trap].is_a?(Hash)
@options
end
def start_snmpv2_listener!
@listener = SNMP::TrapListener.new(
:host => options[:bind],
:port => options[:port],
:community => options[:community]) do |listener|
listener.on_trap_v2c do |trap|
@logger.debug("snmp trap check extension received a v2 trap")
@traps << trap
end
end
end
def determine_mib_preload(module_name)
preload = []
if @mibs_map[module_name]
imports = @mibs_map[module_name][:imports]
# two enumerators are required for preload ordering
imports.each do |import|
if @mibs_map[import]
preload << @mibs_map[import][:mib_file]
end
end
imports.each do |import|
preload << determine_mib_preload(import)
end
else
@logger.warn("snmp trap check extension unknown mib preload module", :module_name => module_name)
end
preload.flatten
end
def create_mibs_map!
@logger.info("snmp trap check extension creating mibs map", :mibs_dir => options[:mibs_dir])
@mibs_map = {}
Dir.glob(File.join(options[:mibs_dir], "*")).each do |mib_file|
begin
mib_contents = IO.read(mib_file).force_encoding("UTF-8")
module_name = mib_contents.scan(/([\w-]+)\s+DEFINITIONS\s+::=\s+BEGIN/).flatten.first
details = {
:mib_file => mib_file,
:imports => mib_contents.scan(/FROM\s+([\w-]+)/).flatten
}
if @mibs_map.has_key?(module_name)
@logger.warn("snmp trap check extension overriding mib map entry", {
:module_name => module_name,
:old_details => @mibs_map[module_name],
:new_details => details
})
end
@mibs_map[module_name] = details
rescue => error
@logger.error("snmp trap check extension mibs map error", {
:mib_file => mib_file,
:error => error.to_s
})
end
end
@mibs_map.each_key do |module_name|
@mibs_map[module_name][:preload] = determine_mib_preload(module_name)
end
@mibs_map
end
def import_mibs!
@logger.info("snmp trap check extension importing mibs", :mibs_dir => options[:mibs_dir])
@mibs_map.each do |module_name, details|
@logger.debug("snmp trap check extension importing mib", {
:module_name => module_name,
:details => details
})
begin
@logger.debug("snmp trap check extension mib dependencies", {
:module_name => module_name,
:details => details
})
unless details[:preload].empty?
arguments = "-p "
arguments << details[:preload].map { |preload| preload }.join(" -p ")
else
arguments = nil
end
SNMP::MIB.import_module(details[:mib_file], options[:imported_dir], arguments)
rescue StandardError, SyntaxError => error
@logger.debug("snmp trap check extension failed to import mib", {
:module_name => module_name,
:details => details,
:error => error
})
end
end
end
def load_mibs!
@logger.info("snmp trap check extension loading mibs", :imported_dir => options[:imported_dir])
@mibs = SNMP::MIB.new
@logger.debug("snmp trap check extension loading mibs")
SNMP::MIB.list_imported(/.*/, SNMP::MIB::DEFAULT_MIB_PATH).each do |module_name|
@logger.debug("snmp trap check extension loading mib", :module_name => module_name)
@mibs.load_module(module_name, SNMP::MIB::DEFAULT_MIB_PATH)
end
SNMP::MIB.list_imported(/.*/, options[:imported_dir]).each do |module_name|
@logger.debug("snmp trap check extension loading mib", :module_name => module_name)
@mibs.load_module(module_name, options[:imported_dir])
end
@mibs
end
def send_result(result)
socket = UDPSocket.new
socket.send(Sensu::JSON.dump(result), 0, "127.0.0.1", 3030)
socket.close
end
def determine_hostname(address)
begin
Resolv.getname(address)
rescue Resolv::ResolvError
@logger.debug("snmp trap check extension unable to resolve hostname", :address => address)
address
end
end
def determine_trap_oid(trap)
varbind = trap.varbind_list.detect do |varbind|
varbind.name.to_oid == SNMP::SNMP_TRAP_OID_OID
end
begin
@mibs.name(varbind.value.to_oid).gsub(/[^\w\.-]/i, "-")
rescue
varbind.value.to_s.gsub(/[^\w\.-]/i, "-") rescue "trap_oid_unknown"
end
end
def determine_trap_name(trap)
oid_symbolic_name = determine_trap_oid(trap)
if oid_symbolic_name =~ /link(down|up)/i
name = "link_status"
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
if symbolic_name =~ /ifindex/i || symbolic_name =~ /systemobject/i
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
name = "#{name}_#{value}"
end
end
end
name
else
oid_symbolic_name
end
end
def determine_trap_output(trap)
oid_symbolic_name = determine_trap_oid(trap)
if matched = /link(down|up)/i.match(oid_symbolic_name)
link_status = matched[1].downcase
output = "link is #{link_status}"
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
if symbolic_name =~ /ifalias/i || symbolic_name =~ /ifdesc/i
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
output = "#{output} - #{value}"
end
end
end
output
else
"received snmp trap"
end
end
def determine_trap_status(trap)
oid_symbolic_name = determine_trap_oid(trap)
mapping = RESULT_STATUS_MAP.detect do |mapping|
oid_symbolic_name =~ mapping.first
end
mapping ? mapping.last : 0
end
def process_trap(trap)
@logger.debug("snmp trap check extension processing a v2 trap")
result = options[:custom_attributes].merge(
{
:source => determine_hostname(trap.source_ip),
:handlers => options[:handlers],
:snmp_trap => {}
}
)
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
result[:snmp_trap][symbolic_name] = value
mapping = RESULT_MAP.detect do |mapping|
symbolic_name =~ mapping.first
end
if mapping && !result[mapping.last]
if mapping.size == 3
result[mapping.last] = mapping[1].call(value)
else
result[mapping.last] = value
end
end
else
@logger.error("snmp trap check extension failed to convert varbind", {
:symbolic_name => symbolic_name,
:asn1_type => varbind.value.asn1_type,
:raw_value => varbind.value
})
end
end
result[:name] ||= determine_trap_name(trap)
result[:output] ||= determine_trap_output(trap)
result[:status] ||= determine_trap_status(trap)
send_result(result)
end
def start_trap_processor!
@processor = Thread.new do
create_mibs_map!
import_mibs!
load_mibs!
loop do
process_trap(@traps.pop)
end
end
@processor.abort_on_exception = true
@processor
end
def post_init
@traps = Queue.new
start_snmpv2_listener!
start_trap_processor!
end
def stop
@listener.kill if @listener
@processor.kill if @processor
end
def run(event, &callback)
yield "no-op", 0
end
end
end
end
link status varbind value might be an empty string
require "sensu/extension"
require "sensu/extensions/snmp-trap/snmp-patch"
require "thread"
module Sensu
module Extension
class SNMPTrap < Check
RESULT_MAP = [
[/checkname/i, :name],
[/notification/i, :output],
[/description/i, :output],
[/pansystemseverity/i, Proc.new { |value| value > 3 ? 2 : 0 }, :status],
[/severity/i, :status]
]
RESULT_STATUS_MAP = [
[/down/i, 2],
[/authenticationfailure/i, 1]
]
RUBY_ASN1_MAP = {
"INTEGER" => :to_i,
"OCTET STRING" => :to_s,
"OBJECT IDENTIFIER" => :to_s,
"IpAddress" => :to_s,
"Counter32" => :to_i,
"Gauge32" => :to_i,
"Unsigned32" => :to_i,
"TimeTicks" => :to_i,
"Opaque" => :to_s,
"Counter64" => :to_i
}
def name
"snmp_trap"
end
def description
"receives snmp traps and translates them to check results"
end
def definition
{
name: name,
publish: false
}
end
def options
return @options if @options
@options = {
:bind => "0.0.0.0",
:port => 1062,
:community => "public",
:handlers => ["default"],
:mibs_dir => "/etc/sensu/mibs",
:imported_dir => File.join(Dir.tmpdir, "sensu_snmp_imported_mibs"),
:custom_attributes => {}
}
@options.merge!(@settings[:snmp_trap]) if @settings[:snmp_trap].is_a?(Hash)
@options
end
def start_snmpv2_listener!
@listener = SNMP::TrapListener.new(
:host => options[:bind],
:port => options[:port],
:community => options[:community]) do |listener|
listener.on_trap_v2c do |trap|
@logger.debug("snmp trap check extension received a v2 trap")
@traps << trap
end
end
end
def determine_mib_preload(module_name)
preload = []
if @mibs_map[module_name]
imports = @mibs_map[module_name][:imports]
# two enumerators are required for preload ordering
imports.each do |import|
if @mibs_map[import]
preload << @mibs_map[import][:mib_file]
end
end
imports.each do |import|
preload << determine_mib_preload(import)
end
else
@logger.warn("snmp trap check extension unknown mib preload module", :module_name => module_name)
end
preload.flatten
end
def create_mibs_map!
@logger.info("snmp trap check extension creating mibs map", :mibs_dir => options[:mibs_dir])
@mibs_map = {}
Dir.glob(File.join(options[:mibs_dir], "*")).each do |mib_file|
begin
mib_contents = IO.read(mib_file).force_encoding("UTF-8")
module_name = mib_contents.scan(/([\w-]+)\s+DEFINITIONS\s+::=\s+BEGIN/).flatten.first
details = {
:mib_file => mib_file,
:imports => mib_contents.scan(/FROM\s+([\w-]+)/).flatten
}
if @mibs_map.has_key?(module_name)
@logger.warn("snmp trap check extension overriding mib map entry", {
:module_name => module_name,
:old_details => @mibs_map[module_name],
:new_details => details
})
end
@mibs_map[module_name] = details
rescue => error
@logger.error("snmp trap check extension mibs map error", {
:mib_file => mib_file,
:error => error.to_s
})
end
end
@mibs_map.each_key do |module_name|
@mibs_map[module_name][:preload] = determine_mib_preload(module_name)
end
@mibs_map
end
def import_mibs!
@logger.info("snmp trap check extension importing mibs", :mibs_dir => options[:mibs_dir])
@mibs_map.each do |module_name, details|
@logger.debug("snmp trap check extension importing mib", {
:module_name => module_name,
:details => details
})
begin
@logger.debug("snmp trap check extension mib dependencies", {
:module_name => module_name,
:details => details
})
unless details[:preload].empty?
arguments = "-p "
arguments << details[:preload].map { |preload| preload }.join(" -p ")
else
arguments = nil
end
SNMP::MIB.import_module(details[:mib_file], options[:imported_dir], arguments)
rescue StandardError, SyntaxError => error
@logger.debug("snmp trap check extension failed to import mib", {
:module_name => module_name,
:details => details,
:error => error
})
end
end
end
def load_mibs!
@logger.info("snmp trap check extension loading mibs", :imported_dir => options[:imported_dir])
@mibs = SNMP::MIB.new
@logger.debug("snmp trap check extension loading mibs")
SNMP::MIB.list_imported(/.*/, SNMP::MIB::DEFAULT_MIB_PATH).each do |module_name|
@logger.debug("snmp trap check extension loading mib", :module_name => module_name)
@mibs.load_module(module_name, SNMP::MIB::DEFAULT_MIB_PATH)
end
SNMP::MIB.list_imported(/.*/, options[:imported_dir]).each do |module_name|
@logger.debug("snmp trap check extension loading mib", :module_name => module_name)
@mibs.load_module(module_name, options[:imported_dir])
end
@mibs
end
def send_result(result)
socket = UDPSocket.new
socket.send(Sensu::JSON.dump(result), 0, "127.0.0.1", 3030)
socket.close
end
def determine_hostname(address)
begin
Resolv.getname(address)
rescue Resolv::ResolvError
@logger.debug("snmp trap check extension unable to resolve hostname", :address => address)
address
end
end
def determine_trap_oid(trap)
varbind = trap.varbind_list.detect do |varbind|
varbind.name.to_oid == SNMP::SNMP_TRAP_OID_OID
end
begin
@mibs.name(varbind.value.to_oid).gsub(/[^\w\.-]/i, "-")
rescue
varbind.value.to_s.gsub(/[^\w\.-]/i, "-") rescue "trap_oid_unknown"
end
end
def determine_trap_name(trap)
oid_symbolic_name = determine_trap_oid(trap)
if oid_symbolic_name =~ /link(down|up)/i
name = "link_status"
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
if symbolic_name =~ /ifindex/i || symbolic_name =~ /systemobject/i
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
unless value == ""
name = "#{name}_#{value}"
end
end
end
end
name
else
oid_symbolic_name
end
end
def determine_trap_output(trap)
oid_symbolic_name = determine_trap_oid(trap)
if matched = /link(down|up)/i.match(oid_symbolic_name)
link_status = matched[1].downcase
output = "link is #{link_status}"
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
if symbolic_name =~ /ifalias/i || symbolic_name =~ /ifdesc/i
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
unless value == ""
output = "#{output} (#{value})"
end
end
end
end
output
else
"received snmp trap"
end
end
def determine_trap_status(trap)
oid_symbolic_name = determine_trap_oid(trap)
mapping = RESULT_STATUS_MAP.detect do |mapping|
oid_symbolic_name =~ mapping.first
end
mapping ? mapping.last : 0
end
def process_trap(trap)
@logger.debug("snmp trap check extension processing a v2 trap")
result = options[:custom_attributes].merge(
{
:source => determine_hostname(trap.source_ip),
:handlers => options[:handlers],
:snmp_trap => {}
}
)
trap.varbind_list.each do |varbind|
symbolic_name = @mibs.name(varbind.name.to_oid)
type_conversion = RUBY_ASN1_MAP[varbind.value.asn1_type]
if type_conversion
value = varbind.value.send(type_conversion)
result[:snmp_trap][symbolic_name] = value
mapping = RESULT_MAP.detect do |mapping|
symbolic_name =~ mapping.first
end
if mapping && !result[mapping.last]
if mapping.size == 3
result[mapping.last] = mapping[1].call(value)
else
result[mapping.last] = value
end
end
else
@logger.error("snmp trap check extension failed to convert varbind", {
:symbolic_name => symbolic_name,
:asn1_type => varbind.value.asn1_type,
:raw_value => varbind.value
})
end
end
result[:name] ||= determine_trap_name(trap)
result[:output] ||= determine_trap_output(trap)
result[:status] ||= determine_trap_status(trap)
send_result(result)
end
def start_trap_processor!
@processor = Thread.new do
create_mibs_map!
import_mibs!
load_mibs!
loop do
process_trap(@traps.pop)
end
end
@processor.abort_on_exception = true
@processor
end
def post_init
@traps = Queue.new
start_snmpv2_listener!
start_trap_processor!
end
def stop
@listener.kill if @listener
@processor.kill if @processor
end
def run(event, &callback)
yield "no-op", 0
end
end
end
end
|
class <%= migration_name %> < ActiveRecord::Migration
<% list_of_models.each do |model| -%>
class <%= model.classify %> < ActiveRecord::Base
end
<% end -%>
def self.up
<% list_of_models.each do |model| -%>
change_table(:<%= model.classify.constantize.table_name %>) do |t|
t.datetime :<%= model %>_LM
t.string :<%= model %>_OW
t.string :<%= model %>_OL
end
<% end -%>
end
def self.down
<% list_of_models.each do |model| -%>
<% ['LM', 'OW', 'OL'].each do |e| -%>
remove_column :<%= model.classify.constantize.table_name %>, :<%= "#{model}_#{e}" %>
<% end -%><% end -%>
end
end
Aggiunto timestamp nella migrazione
class <%= migration_name %> < ActiveRecord::Migration
<% list_of_models.each do |model| -%>
class <%= model.classify %> < ActiveRecord::Base
end
<% end -%>
def self.up
<% list_of_models.each do |model| -%>
say_with_time 'Adding Esagon fields to <%= model.classify %>...' do
change_table(:<%= model.classify.constantize.table_name %>) do |t|
t.datetime :<%= model %>_LM
t.string :<%= model %>_OW
t.string :<%= model %>_OL
end
end
<% end -%>
end
def self.down
<% list_of_models.each do |model| -%>
say_with_time 'Removing Esagon fields from <%= model.classify %>...' do
<% ['LM', 'OW', 'OL'].each do |e| -%>
remove_column :<%= model.classify.constantize.table_name %>, :<%= "#{model}_#{e}" %>
<% end -%>
end
<% end -%>
end
end
|
require 'geocoder/lookups/base'
require 'geocoder/results/maxmind_local'
module Geocoder::Lookup
class MaxmindLocal < Base
def initialize
begin
require (RUBY_PLATFORM == 'java' ? 'jgeoip' : 'geoip')
rescue LoadError => e
raise 'Could not load geoip dependency. To use MaxMind Local lookup you must add geoip gem to your Gemfile or have it installed in your system.'
end
super
end
def name
"MaxMind Local"
end
def required_api_key_parts
[]
end
private
def results(query)
if configuration[:database].nil?
raise(
Geocoder::ConfigurationError,
"When using MaxMind Database you MUST specify the path: " +
"Geocoder.configure(:maxmind_local => {:database => ...}), "
)
end
result = (RUBY_PLATFORM == "java" ? JGeoIP.new(configuration[:database]).city(query.to_s) : GeoIP.new(configuration[:database]).city(query.to_s))
result.nil? ? [] : [result.to_hash]
end
end
end
Reduce code duplication.
require 'geocoder/lookups/base'
require 'geocoder/results/maxmind_local'
module Geocoder::Lookup
class MaxmindLocal < Base
def initialize
begin
gem = RUBY_PLATFORM == 'java' ? 'jgeoip' : 'geoip'
require gem
rescue LoadError => e
raise 'Could not load geoip dependency. To use MaxMind Local lookup you must add the #{gem} gem to your Gemfile or have it installed in your system.'
end
super
end
def name
"MaxMind Local"
end
def required_api_key_parts
[]
end
private
def results(query)
if configuration[:database].nil?
raise(
Geocoder::ConfigurationError,
"When using MaxMind Database you MUST specify the path: " +
"Geocoder.configure(:maxmind_local => {:database => ...}), "
)
end
geoip_class = RUBY_PLATFORM == "java" ? JGeoIP : GeoIP
result = geoip_class.new(configuration[:database]).city(query.to_s)
result.nil? ? [] : [result.to_hash]
end
end
end
|
require 'rspec/expectations'
# NOTE: This matcher does not work for multiple NOTIFY statements on the same channel
RSpec::Matchers.define :notify do |channel,payload|
supports_block_expectations
def diffable?
@notified == true
end
match do |code|
pg = PGconn.open(:dbname => 'clicker_test')
pg.exec "LISTEN #{channel}"
@notified = false
code.call
wait = Proc.new do
pg.wait_for_notify(0.5) do |actual_channel, pid, actual_payload|
return wait if channel != actual_channel
@notified = true
@actual = actual_payload
expect(actual_payload).to eq payload
end
end
wait.call
expect(@notified).to eq true
end
failure_message do
if @notified == false
"Expected a NOTIFY on channel `#{channel}` (received nothing on that channel instead)"
else
"Expected a NOTIFY on channel `#{channel}`\n\tExpected payload: #{payload.inspect}\n\tGot payload: #{@actual.inspect}"
end
end
end
Update postgres matcher to accept blocks
require 'rspec/expectations'
# NOTE: This matcher does not work for multiple NOTIFY statements on the same channel
# RSpec::Matchers.define :notify do |channel,payload,&match_block|
module NotifyMatcher
def notify(*args, &block)
Notify.new(*args, &block)
end
class Notify
def initialize(channel, payload=nil, &payload_block)
@channel = channel
@payload = payload
@payload_block = payload_block if block_given?
end
def supports_block_expectations?
true
end
def diffable?
@notified == true
end
def matches?(code)
pg = PGconn.open(:dbname => 'clicker_test')
pg.exec "LISTEN #{@channel}"
@notified = false
code.call
payload_matches = nil
wait = Proc.new do
pg.wait_for_notify(0.5) do |actual_channel, pid, actual_payload|
return wait if @channel != actual_channel
@notified = true
@actual = actual_payload
payload_matches = actual_payload == @payload if @payload
@payload_block.call(actual_payload) if @payload_block
end
end
wait.call
if @payload
@notified == true && payload_matches == true
else
@notified == true
end
end
def failure_message
if @notified == false
"Expected a NOTIFY on channel `#{@channel}` (received nothing on that channel instead)"
else
"Expected a NOTIFY on channel `#{@channel}`\n\tExpected payload: #{@payload.inspect}\n\tGot payload: #{@actual.inspect}"
end
end
end
end
RSpec.configure do |config|
config.include(NotifyMatcher)
end
|
require 'tmpdir'
module RepositoryHelper
module_function
attr_reader :repo, :tmp_git_dir
def create_repository
@tmp_git_dir = Dir.mktmpdir
@repo = Rugged::Repository.init_at(@tmp_git_dir)
end
def delete_repository
FileUtils.rm_r(tmp_git_dir)
end
def repository_dir
File.realpath(tmp_git_dir) + '/'
end
def current_branch_name
repo.head.name.sub(/^refs\/heads\//, '')
end
def add_to_index(file_name, blob_content)
object_id = repo.write(blob_content, :blob)
repo.index.add(path: file_name, oid: object_id, mode: 0100755)
repo.index.write
end
def create_commit
author = { email: 'john.doe@example.com', name: 'John Doe', time: Time.now }
tree = repo.index.write_tree(repo)
Rugged::Commit.create(repo,
author: author,
message: 'commit message',
committer: author,
parents: repo.empty? ? [] : [repo.head.target].compact,
tree: tree,
update_ref: 'HEAD')
repo.checkout(current_branch_name, strategy: [:force])
end
def create_branch(branch_name, checkout: false)
repo.create_branch(branch_name)
checkout_branch(branch_name) if checkout
end
def checkout_branch(branch_name)
repo.checkout(branch_name, strategy: [:force])
end
end
add newline
require 'tmpdir'
module RepositoryHelper
module_function
attr_reader :repo, :tmp_git_dir
def create_repository
@tmp_git_dir = Dir.mktmpdir
@repo = Rugged::Repository.init_at(@tmp_git_dir)
end
def delete_repository
FileUtils.rm_r(tmp_git_dir)
end
def repository_dir
File.realpath(tmp_git_dir) + '/'
end
def current_branch_name
repo.head.name.sub(/^refs\/heads\//, '')
end
def add_to_index(file_name, blob_content)
object_id = repo.write(blob_content, :blob)
repo.index.add(path: file_name, oid: object_id, mode: 0100755)
repo.index.write
end
def create_commit
author = { email: 'john.doe@example.com', name: 'John Doe', time: Time.now }
tree = repo.index.write_tree(repo)
Rugged::Commit.create(repo,
author: author,
message: 'commit message',
committer: author,
parents: repo.empty? ? [] : [repo.head.target].compact,
tree: tree,
update_ref: 'HEAD')
repo.checkout(current_branch_name, strategy: [:force])
end
def create_branch(branch_name, checkout: false)
repo.create_branch(branch_name)
checkout_branch(branch_name) if checkout
end
def checkout_branch(branch_name)
repo.checkout(branch_name, strategy: [:force])
end
end
|
require 'json'
require 'openssl'
require 'faraday'
require 'faraday_middleware'
module Shenzhen::Plugins
module HockeyApp
class Client
HOSTNAME = 'rink.hockeyapp.net'
def initialize(api_token)
@api_token = api_token
@connection = Faraday.new(:url => "https://#{HOSTNAME}") do |builder|
builder.request :multipart
builder.request :url_encoded
builder.response :json, :content_type => /\bjson$/
builder.use FaradayMiddleware::FollowRedirects
builder.adapter :net_http
end
end
def upload_build(ipa, options)
options[:ipa] = Faraday::UploadIO.new(ipa, 'application/octet-stream') unless ipa.nil?
if dsym_filename = options.delete(:dsym_filename)
options[:dsym] = Faraday::UploadIO.new(dsym_filename, 'application/octet-stream')
end
@connection.post do |req|
if options[:public_identifier].nil?
req.url("/api/2/apps/upload")
else
req.url("/api/2/apps/#{options.delete(:public_identifier)}/app_versions/upload")
end
req.headers['X-HockeyAppToken'] = @api_token
req.body = options
end.on_complete do |env|
yield env[:status], env[:body] if block_given?
end
end
end
end
end
command :'distribute:hockeyapp' do |c|
c.syntax = "ipa distribute:hockeyapp [options]"
c.summary = "Distribute an .ipa file over HockeyApp"
c.description = ""
c.option '-f', '--file FILE', ".ipa file for the build"
c.option '-d', '--dsym FILE', "zipped .dsym package for the build"
c.option '-t', '--token TOKEN', "API Token. Available at https://rink.hockeyapp.net/manage/auth_tokens"
c.option '-i', '--identifier PUBLIC_IDENTIFIER', "Public identifier of the app you are targeting, if not specified HockeyApp will use the bundle identifier to choose the right"
c.option '-m', '--notes NOTES', "Release notes for the build (Default: Textile)"
c.option '--markdown', 'Notes are written with Markdown'
c.option '--tags TAGS', "Comma separated list of tags which will receive access to the build"
c.option '--notify', "Notify permitted teammates to install the build"
c.option '--downloadOff', "Upload but don't allow download of this version just yet"
c.option '--mandatory', "Make this update mandatory"
c.action do |args, options|
determine_file! unless @file = options.file
say_warning "Missing or unspecified .ipa file" unless @file and File.exist?(@file)
determine_dsym! unless @dsym = options.dsym
say_warning "Specified dSYM.zip file doesn't exist" if @dsym and !File.exist?(@dsym)
determine_hockeyapp_api_token! unless @api_token = options.token || ENV['HOCKEYAPP_API_TOKEN']
say_error "Missing API Token" and abort unless @api_token
determine_notes! unless @notes = options.notes
say_error "Missing release notes" and abort unless @notes
parameters = {}
parameters[:public_identifier] = options.identifier if options.identifier
parameters[:notes] = @notes
parameters[:notes_type] = options.markdown ? "1" : "0"
parameters[:notify] = "1" if options.notify && !options.downloadOff
parameters[:status] = options.downloadOff ? "1" : "2"
parameters[:tags] = options.tags if options.tags
parameters[:dsym_filename] = @dsym if @dsym
parameters[:mandatory] = "1" if options.mandatory
client = Shenzhen::Plugins::HockeyApp::Client.new(@api_token)
response = client.upload_build(@file, parameters)
case response.status
when 200...300
say_ok "Build successfully uploaded to HockeyApp"
else
say_error "Error uploading to HockeyApp: #{response.body}"
end
end
private
def determine_hockeyapp_api_token!
@api_token ||= ask "API Token:"
end
end
[Issue #44] Fixing optional .ipa option for HockeyApp. Now to skip .ipa if present in current directory, one can specify a non-existent file.
require 'json'
require 'openssl'
require 'faraday'
require 'faraday_middleware'
module Shenzhen::Plugins
module HockeyApp
class Client
HOSTNAME = 'rink.hockeyapp.net'
def initialize(api_token)
@api_token = api_token
@connection = Faraday.new(:url => "https://#{HOSTNAME}") do |builder|
builder.request :multipart
builder.request :url_encoded
builder.response :json, :content_type => /\bjson$/
builder.use FaradayMiddleware::FollowRedirects
builder.adapter :net_http
end
end
def upload_build(ipa, options)
options[:ipa] = Faraday::UploadIO.new(ipa, 'application/octet-stream') if ipa and File.exist?(ipa)
if dsym_filename = options.delete(:dsym_filename)
options[:dsym] = Faraday::UploadIO.new(dsym_filename, 'application/octet-stream')
end
@connection.post do |req|
if options[:public_identifier].nil?
req.url("/api/2/apps/upload")
else
req.url("/api/2/apps/#{options.delete(:public_identifier)}/app_versions/upload")
end
req.headers['X-HockeyAppToken'] = @api_token
req.body = options
end.on_complete do |env|
yield env[:status], env[:body] if block_given?
end
end
end
end
end
command :'distribute:hockeyapp' do |c|
c.syntax = "ipa distribute:hockeyapp [options]"
c.summary = "Distribute an .ipa file over HockeyApp"
c.description = ""
c.option '-f', '--file FILE', ".ipa file for the build"
c.option '-d', '--dsym FILE', "zipped .dsym package for the build"
c.option '-t', '--token TOKEN', "API Token. Available at https://rink.hockeyapp.net/manage/auth_tokens"
c.option '-i', '--identifier PUBLIC_IDENTIFIER', "Public identifier of the app you are targeting, if not specified HockeyApp will use the bundle identifier to choose the right"
c.option '-m', '--notes NOTES', "Release notes for the build (Default: Textile)"
c.option '--markdown', 'Notes are written with Markdown'
c.option '--tags TAGS', "Comma separated list of tags which will receive access to the build"
c.option '--notify', "Notify permitted teammates to install the build"
c.option '--downloadOff', "Upload but don't allow download of this version just yet"
c.option '--mandatory', "Make this update mandatory"
c.action do |args, options|
determine_file! unless @file = options.file
say_warning "Missing or unspecified .ipa file" unless @file and File.exist?(@file)
determine_dsym! unless @dsym = options.dsym
say_warning "Specified dSYM.zip file doesn't exist" if @dsym and !File.exist?(@dsym)
determine_hockeyapp_api_token! unless @api_token = options.token || ENV['HOCKEYAPP_API_TOKEN']
say_error "Missing API Token" and abort unless @api_token
determine_notes! unless @notes = options.notes
say_error "Missing release notes" and abort unless @notes
parameters = {}
parameters[:public_identifier] = options.identifier if options.identifier
parameters[:notes] = @notes
parameters[:notes_type] = options.markdown ? "1" : "0"
parameters[:notify] = "1" if options.notify && !options.downloadOff
parameters[:status] = options.downloadOff ? "1" : "2"
parameters[:tags] = options.tags if options.tags
parameters[:dsym_filename] = @dsym if @dsym
parameters[:mandatory] = "1" if options.mandatory
client = Shenzhen::Plugins::HockeyApp::Client.new(@api_token)
response = client.upload_build(@file, parameters)
case response.status
when 200...300
say_ok "Build successfully uploaded to HockeyApp"
else
say_error "Error uploading to HockeyApp: #{response.body}"
end
end
private
def determine_hockeyapp_api_token!
@api_token ||= ask "API Token:"
end
end
|
require 'spec_helper'
RSpec.describe Synapsis::Transaction do
before(:all) do
@oauth_token = UserFactory.create_user.oauth.oauth_key
@bank_id = UserFactory.create_node(
oauth_token: @oauth_token,
fingerprint: UserFactory.default_fingerprint
).nodes.first._id.send(:$oid)
@receiver_oauth_token = UserFactory.create_user.oauth.oauth_key
@receiver_bank_id = UserFactory.create_node(
oauth_token: @receiver_oauth_token,
fingerprint: UserFactory.default_fingerprint
).nodes.first._id.send(:$oid)
end
context '.add and .cancel' do
let(:add_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
to: {
type: 'ACH-US',
id: @receiver_bank_id
},
from: {
type: 'ACH-US',
id: @bank_id
},
extra: {
ip: '192.168.0.1'
},
amount: {
amount: 10.10,
currency: 'USD'
}
}
}}
context 'happy path' do
it 'returns the correct transaction details, then cancels the transaction' do
add_transaction_response = Synapsis::Transaction.add(add_transaction_params)
expect(add_transaction_response.success).to be_truthy
expect(add_transaction_response.trans._id.send(:$oid)).not_to be_nil
expect(add_transaction_response.trans.amount.amount).to eq add_transaction_params[:trans][:amount][:amount]
expect(add_transaction_response.trans.timeline.first.status).to eq Synapsis::Transaction::Status::CREATED
expect(add_transaction_response.trans.to.id.send(:$oid)).to eq @receiver_bank_id
cancel_transaction_params = {
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
_id: {
'$oid' => add_transaction_response.trans._id.send(:$oid)
}
}
}
cancel_transaction_response = Synapsis::Transaction.cancel(cancel_transaction_params)
expect(cancel_transaction_response.success).to be_truthy
expect(cancel_transaction_response.message.en).to eq 'Transaction has been canceled.'
end
end
context 'errors' do
context '.add' do
it 'wrong password raises a Synapsis Error' do
wrong_transaction_params = add_transaction_params.clone
wrong_transaction_params[:login][:oauth_key] = 'WRONG PASSWORD'
expect { Synapsis::Transaction.add(wrong_transaction_params) }.to raise_error(Synapsis::Error)
end
end
context '.cancel' do
xit 'pending--you can\'t cancel a SETTLED transaction' do
end
end
end
end
# This is when we want to send money from a bank account to a Synapse account
context '.add ACH to Synapse' do
before(:all) do
show_node_params = {
login: { oauth_key: @receiver_oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
filter: {
'type' => 'SYNAPSE-US'
}
}
show_node_response = Synapsis::Node.show(show_node_params)
@receiver_synapse_us_id = show_node_response.nodes.first._id.send(:$oid)
end
let(:add_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
to: {
type: 'SYNAPSE-US',
id: @receiver_synapse_us_id
},
from: {
type: 'ACH-US',
id: @bank_id
},
extra: {
ip: '192.168.0.1'
},
amount: {
amount: 10.10,
currency: 'USD'
}
}
}}
#<Synapsis::Response success=true, trans=#<Synapsis::Response _id=#<Synapsis::Response $oid="55d1b31f86c2736bd9172aba">, amount=#<Synapsis::Response amount=10.1, currency="USD">, client=#<Synapsis::Response id=854, name="Daryll Santos">, extra=#<Synapsis::Response created_on=#<Synapsis::Response $date=1439806239190>, ip="192.168.0.1", latlon="0,0", note="", other=#<Synapsis::Response>, process_on=#<Synapsis::Response $date=1439806239190>, supp_id="", webhook="">, fees=[#<Synapsis::Response fee=0.25, note="Synapse Facilitator Fee", to=#<Synapsis::Response id=#<Synapsis::Response $oid="559339aa86c273605ccd35df">>>], from=#<Synapsis::Response id=#<Synapsis::Response $oid="55bf3be186c2735f97979bb9">, nickname="LIFEGREEN CHECKING F", type="ACH-US", user=#<Synapsis::Response _id=#<Synapsis::Response $oid="55bf3b5e86c273627b20ea5f">, legal_names=["Sample Sender"]>>, recent_status=#<Synapsis::Response date=#<Synapsis::Response $date=1439806239190>, note="Transaction created", status="CREATED", status_id="1">, timeline=[#<Synapsis::Response date=#<Synapsis::Response $date=1439806239190>, note="Transaction created", status="CREATED", status_id="1">], to=#<Synapsis::Response id=#<Synapsis::Response $oid="55afa3d686c27312caffa669">, nickname="Default Synapse Node", type="SYNAPSE-US", user=#<Synapsis::Response _id=#<Synapsis::Response $oid="55afa3d686c27312caffa668">, legal_names=["Daryll Santos"]>>>>
context 'happy path' do
it 'returns the correct transaction details, then cancels the transaction' do
add_transaction_response = Synapsis::Transaction.add(add_transaction_params)
expect(add_transaction_response.success).to be_truthy
expect(add_transaction_response.trans._id.send(:$oid)).not_to be_nil
expect(add_transaction_response.trans.amount.amount).to eq add_transaction_params[:trans][:amount][:amount]
expect(add_transaction_response.trans.timeline.first.status).to eq Synapsis::Transaction::Status::CREATED
expect(add_transaction_response.trans.to.id.send(:$oid)).to eq @receiver_synapse_us_id
end
end
end
context '.show' do
let(:view_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint }
}}
context 'happy path' do
context 'no filter' do
it 'shows all transactions' do
view_transaction_response = Synapsis::Transaction::show(view_transaction_params)
expect(view_transaction_response.success).to be_truthy
expect(view_transaction_response.trans).to be_a_kind_of(Array)
end
end
context 'filter based on $oid' do
xit 'PENDING--filter does not work on hashes--shows all transactions' do
view_transaction_params = {
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
'filter' => {
'page' => 1
}
}
view_transaction_response = Synapsis::Transaction::show(view_transaction_params)
expect(view_transaction_response.success).to be_truthy
expect(view_transaction_response.trans).to be_a_kind_of(Array)
end
end
end
end
end
Temporarily comment out spec related to SYNAPSE-US nodes
require 'spec_helper'
RSpec.describe Synapsis::Transaction do
before(:all) do
@oauth_token = UserFactory.create_user.oauth.oauth_key
@bank_id = UserFactory.create_node(
oauth_token: @oauth_token,
fingerprint: UserFactory.default_fingerprint
).nodes.first._id.send(:$oid)
@receiver_oauth_token = UserFactory.create_user.oauth.oauth_key
@receiver_bank_id = UserFactory.create_node(
oauth_token: @receiver_oauth_token,
fingerprint: UserFactory.default_fingerprint
).nodes.first._id.send(:$oid)
end
context '.add and .cancel' do
let(:add_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
to: {
type: 'ACH-US',
id: @receiver_bank_id
},
from: {
type: 'ACH-US',
id: @bank_id
},
extra: {
ip: '192.168.0.1'
},
amount: {
amount: 10.10,
currency: 'USD'
}
}
}}
context 'happy path' do
it 'returns the correct transaction details, then cancels the transaction' do
add_transaction_response = Synapsis::Transaction.add(add_transaction_params)
expect(add_transaction_response.success).to be_truthy
expect(add_transaction_response.trans._id.send(:$oid)).not_to be_nil
expect(add_transaction_response.trans.amount.amount).to eq add_transaction_params[:trans][:amount][:amount]
expect(add_transaction_response.trans.timeline.first.status).to eq Synapsis::Transaction::Status::CREATED
expect(add_transaction_response.trans.to.id.send(:$oid)).to eq @receiver_bank_id
cancel_transaction_params = {
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
_id: {
'$oid' => add_transaction_response.trans._id.send(:$oid)
}
}
}
cancel_transaction_response = Synapsis::Transaction.cancel(cancel_transaction_params)
expect(cancel_transaction_response.success).to be_truthy
expect(cancel_transaction_response.message.en).to eq 'Transaction has been canceled.'
end
end
context 'errors' do
context '.add' do
it 'wrong password raises a Synapsis Error' do
wrong_transaction_params = add_transaction_params.clone
wrong_transaction_params[:login][:oauth_key] = 'WRONG PASSWORD'
expect { Synapsis::Transaction.add(wrong_transaction_params) }.to raise_error(Synapsis::Error)
end
end
context '.cancel' do
xit 'pending--you can\'t cancel a SETTLED transaction' do
end
end
end
end
# This is when we want to send money from a bank account to a Synapse account
context '.add ACH to Synapse' do
before(:all) do
show_node_params = {
login: { oauth_key: @receiver_oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
filter: {
'type' => 'SYNAPSE-US'
}
}
show_node_response = Synapsis::Node.show(show_node_params)
# @receiver_synapse_us_id = show_node_response.nodes.first._id.send(:$oid)
@receiver_synapse_us_id = 'TODO' # Pending--apparently you need to explicitly create a SYNAPSE-US node
end
let(:add_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
trans: {
to: {
type: 'SYNAPSE-US',
id: @receiver_synapse_us_id
},
from: {
type: 'ACH-US',
id: @bank_id
},
extra: {
ip: '192.168.0.1'
},
amount: {
amount: 10.10,
currency: 'USD'
}
}
}}
#<Synapsis::Response success=true, trans=#<Synapsis::Response _id=#<Synapsis::Response $oid="55d1b31f86c2736bd9172aba">, amount=#<Synapsis::Response amount=10.1, currency="USD">, client=#<Synapsis::Response id=854, name="Daryll Santos">, extra=#<Synapsis::Response created_on=#<Synapsis::Response $date=1439806239190>, ip="192.168.0.1", latlon="0,0", note="", other=#<Synapsis::Response>, process_on=#<Synapsis::Response $date=1439806239190>, supp_id="", webhook="">, fees=[#<Synapsis::Response fee=0.25, note="Synapse Facilitator Fee", to=#<Synapsis::Response id=#<Synapsis::Response $oid="559339aa86c273605ccd35df">>>], from=#<Synapsis::Response id=#<Synapsis::Response $oid="55bf3be186c2735f97979bb9">, nickname="LIFEGREEN CHECKING F", type="ACH-US", user=#<Synapsis::Response _id=#<Synapsis::Response $oid="55bf3b5e86c273627b20ea5f">, legal_names=["Sample Sender"]>>, recent_status=#<Synapsis::Response date=#<Synapsis::Response $date=1439806239190>, note="Transaction created", status="CREATED", status_id="1">, timeline=[#<Synapsis::Response date=#<Synapsis::Response $date=1439806239190>, note="Transaction created", status="CREATED", status_id="1">], to=#<Synapsis::Response id=#<Synapsis::Response $oid="55afa3d686c27312caffa669">, nickname="Default Synapse Node", type="SYNAPSE-US", user=#<Synapsis::Response _id=#<Synapsis::Response $oid="55afa3d686c27312caffa668">, legal_names=["Daryll Santos"]>>>>
context 'happy path' do
xit 'returns the correct transaction details, then cancels the transaction' do
add_transaction_response = Synapsis::Transaction.add(add_transaction_params)
expect(add_transaction_response.success).to be_truthy
expect(add_transaction_response.trans._id.send(:$oid)).not_to be_nil
expect(add_transaction_response.trans.amount.amount).to eq add_transaction_params[:trans][:amount][:amount]
expect(add_transaction_response.trans.timeline.first.status).to eq Synapsis::Transaction::Status::CREATED
expect(add_transaction_response.trans.to.id.send(:$oid)).to eq @receiver_synapse_us_id
end
end
end
context '.show' do
let(:view_transaction_params) {{
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint }
}}
context 'happy path' do
context 'no filter' do
it 'shows all transactions' do
view_transaction_response = Synapsis::Transaction::show(view_transaction_params)
expect(view_transaction_response.success).to be_truthy
expect(view_transaction_response.trans).to be_a_kind_of(Array)
end
end
context 'filter based on $oid' do
xit 'PENDING--filter does not work on hashes--shows all transactions' do
view_transaction_params = {
login: { oauth_key: @oauth_token },
user: { fingerprint: UserFactory.default_fingerprint },
'filter' => {
'page' => 1
}
}
view_transaction_response = Synapsis::Transaction::show(view_transaction_params)
expect(view_transaction_response.success).to be_truthy
expect(view_transaction_response.trans).to be_a_kind_of(Array)
end
end
end
end
end
|
require 'celluloid'
require 'sidekiq/fetch'
require 'redis_rate_limiter'
module Sidekiq::RateLimiter
DEFAULT_LIMIT_NAME =
'sidekiq-rate-limit'.freeze unless defined?(DEFAULT_LIMIT_NAME)
class Fetch < Sidekiq::BasicFetch
def retrieve_work
limit(super)
end
def limit(work)
message = JSON.parse(work.message) rescue {}
args = message['args']
klass = message['class']
rate = Rate.new(message)
return work unless !!(klass && rate.valid?)
limit = rate.limit
interval = rate.interval
name = rate.name
options = {
:limit => (limit.respond_to?(:call) ? limit.call(*args) : limit).to_i,
:interval => (interval.respond_to?(:call) ? interval.call(*args) : interval).to_f,
:name => (name.respond_to?(:call) ? name.call(*args) : name).to_s,
}
Sidekiq.redis do |conn|
lim = Limit.new(conn, options)
if lim.exceeded?(klass)
conn.lpush("queue:#{work.queue_name}", work.message)
nil
else
lim.add(klass)
work
end
end
end
end
class Rate
def initialize(message)
@message = message
end
def limit
rate['limit'] || rate['threshold']
end
def interval
rate['interval'] || rate['period']
end
def name
rate['name'] || DEFAULT_LIMIT_NAME
end
def valid?
!!(limit && interval)
end
private
def rate
use_server_rate? ? server_rate : client_rate
end
def use_server_rate?
server_rate['limit'] && server_rate['limit'].respond_to?(:call) ||
server_rate['threshold'] && server_rate['threshold'].respond_to?(:call) ||
server_rate['period'] && server_rate['period'].respond_to?(:call) ||
server_rate['interval'] && server_rate['interval'].respond_to?(:call) ||
server_rate['name'] && server_rate['name'].respond_to?(:call)
end
def client_rate
@client_rate ||= @message['rate'] || @message['throttle'] || {}
end
def server_rate
return @server_rate if @server_rate
worker_class = @message['class']
options = Object.const_get(worker_class).get_sidekiq_options rescue {}
@server_rate = options['rate'] || options['throttle'] || {}
end
end
class Limit < RedisRateLimiter
def initialize(redis, options = {})
options = options.dup
name = options.delete('name') ||
options.delete(:name)
super(name, redis, options)
end
end
end
Fix procs being ignored due to non-stringified keys.
require 'celluloid'
require 'sidekiq/fetch'
require 'redis_rate_limiter'
module Sidekiq::RateLimiter
DEFAULT_LIMIT_NAME =
'sidekiq-rate-limit'.freeze unless defined?(DEFAULT_LIMIT_NAME)
class Fetch < Sidekiq::BasicFetch
def retrieve_work
limit(super)
end
def limit(work)
message = JSON.parse(work.message) rescue {}
args = message['args']
klass = message['class']
rate = Rate.new(message)
return work unless !!(klass && rate.valid?)
limit = rate.limit
interval = rate.interval
name = rate.name
options = {
:limit => (limit.respond_to?(:call) ? limit.call(*args) : limit).to_i,
:interval => (interval.respond_to?(:call) ? interval.call(*args) : interval).to_f,
:name => (name.respond_to?(:call) ? name.call(*args) : name).to_s,
}
Sidekiq.redis do |conn|
lim = Limit.new(conn, options)
if lim.exceeded?(klass)
conn.lpush("queue:#{work.queue_name}", work.message)
nil
else
lim.add(klass)
work
end
end
end
end
class Rate
def initialize(message)
@message = message
end
def limit
rate['limit'] || rate['threshold']
end
def interval
rate['interval'] || rate['period']
end
def name
rate['name'] || DEFAULT_LIMIT_NAME
end
def valid?
!!(limit && interval)
end
private
def rate
use_server_rate? ? server_rate : client_rate
end
def use_server_rate?
server_rate['limit'] && server_rate['limit'].respond_to?(:call) ||
server_rate['threshold'] && server_rate['threshold'].respond_to?(:call) ||
server_rate['period'] && server_rate['period'].respond_to?(:call) ||
server_rate['interval'] && server_rate['interval'].respond_to?(:call) ||
server_rate['name'] && server_rate['name'].respond_to?(:call)
end
def client_rate
@client_rate ||= @message['rate'] || @message['throttle'] || {}
end
def server_rate
return @server_rate if @server_rate
worker_class = @message['class']
options = Object.const_get(worker_class).get_sidekiq_options rescue {}
server_rate = options['rate'] || options['throttle'] || {}
@server_rate = server_rate.stringify_keys
end
end
class Limit < RedisRateLimiter
def initialize(redis, options = {})
options = options.dup
name = options.delete('name') ||
options.delete(:name)
super(name, redis, options)
end
end
end
|
require 'geary'
describe "a worker's client" do
let(:factory) do
Geary::Factory.new(:host => 'localhost', :port => 4730)
end
let(:client) { factory.client }
let(:worker) { factory.worker_client }
let(:admin_client) { factory.admin_client }
after do
client.connection.close
worker.connection.close
admin_client.connection.close
end
it 'grabs jobs once it registers abilities' do
submitted_job = client.submit_job(:grab_job_test, 'something')
worker.can_do(:grab_job_test)
grabbed_job = worker.grab_job
expect(grabbed_job.job_handle).to eql(submitted_job.job_handle)
end
it 'gets NO_JOB if there are no jobs to grab' do
worker.can_do(:no_job_test)
grabbed_job = worker.grab_job
expect(grabbed_job).to be_a(Geary::Packet::NoJob)
end
it 'gets NO_JOB if there are jobs but it cannot do any of them' do
worker.can_do(:cant_do_test)
client.submit_job(:cant_do_test, 'data')
worker.cant_do(:cant_do_test)
expect(worker.grab_job).to be_a(Geary::Packet::NoJob)
end
it 'gets NO_JOB if there jobs after it has reset its abilities' do
worker.can_do(:cant_do_test)
client.submit_job(:cant_do_test, 'data')
worker.reset_abilities
expect(worker.grab_job).to be_a(Geary::Packet::NoJob)
end
it 'gets a NOOP if it checks after a PRE_SLEEP and there is a job waiting' do
worker.can_do(:job_after_sleep)
worker.pre_sleep
client.submit_job(:job_after_sleep, 'wake up!')
expect(worker).to have_jobs_waiting
end
it 'does not have jobs waiting if none have been submitted' do
worker.can_do(:job_that_doesnt_exist)
worker.pre_sleep
expect(worker).to_not have_jobs_waiting
end
it 'grabs a job and get its unique id' do
random = Time.now.to_i.to_s + rand.to_s
fake_generator = double('Generator', :generate => random)
client = factory.client(:unique_id_generator => fake_generator)
worker.can_do(:job_that_cares_about_uniqe_ids)
client.submit_job(:job_that_cares_about_uniqe_ids, 'cool!')
assigned_job = worker.grab_job_uniq
expect(assigned_job.unique_id).to eql(random)
end
it 'sends an update on status' do
client_job = client.submit_job(:long_running_sends_status, 'data')
worker.can_do(:long_running_sends_status)
worker.grab_job.tap do |job|
worker.send_work_status(job.job_handle, 0.5)
end
status_packet = client.connection.read_response
client_status = client.get_status(client_job.job_handle)
expect(client_status.percent_complete).to eql(0.5)
end
it 'updates status to 100% complete' do
client_job = client.submit_job(:long_running_sends_status, 'data')
worker.can_do(:long_running_sends_status)
worker.grab_job.tap do |job|
worker.send_work_status(job.job_handle, 1)
end
status_packet = client.connection.read_response
client_status = client.get_status(client_job.job_handle)
expect(client_status).to be_complete
end
it 'sends data on completion' do
client_job = client.submit_job(:long_running_will_complete, 'data')
worker.can_do(:long_running_will_complete)
worker.grab_job.tap do |job|
worker.send_work_complete(job.job_handle, 'complete')
end
work_complete = client.connection.read_response
expect(work_complete.data).to eql('complete')
end
it 'sends failure notices' do
client_job = client.submit_job(:long_running_will_fail, 'data')
worker.can_do(:long_running_will_fail)
worker.grab_job.tap do |job|
worker.send_work_fail(job.job_handle)
end
work_fail = client.connection.read_response
expect(work_fail).to be_a(Geary::Packet::WorkFailResponse)
end
it 'sends exception notices' do
client.set_server_option('exceptions')
client_job = client.submit_job(:long_running_will_raise, 'data')
worker.can_do(:long_running_will_raise)
worker.grab_job.tap do |job|
worker.send_work_exception(job.job_handle, 'oh no!')
end
work_exception = client.connection.read_response
expect(work_exception.data).to eql('oh no!')
end
it 'sends work data' do
client_job = client.submit_job(:long_running_will_send_data, 'data')
worker.can_do(:long_running_will_send_data)
worker.grab_job.tap do |job|
worker.send_work_data(job.job_handle, 'woo!')
end
work_data = client.connection.read_response
expect(work_data.data).to eql('woo!')
end
it 'sends work warnings' do
client_job = client.submit_job(:long_running_will_warn, 'data')
worker.can_do(:long_running_will_warn)
worker.grab_job.tap do |job|
worker.send_work_warning(job.job_handle, 'watch out!')
end
work_warning = client.connection.read_response
expect(work_warning.data).to eql('watch out!')
end
it 'set its client id' do
random = Time.now.to_i.to_s + rand.to_s
id = "worker-with-id-#{random}"
worker.set_client_id(id)
worker.can_do(:hi_mom)
observed_worker = admin_client.workers.find do |worker|
worker.client_id == id
end
expect(observed_worker).to_not be_nil
end
it 'optionally sets a timeout when it registers abilities' do
pending "Investigation as to how the timeout is triggered"
job = client.submit_job(:timeout_ability, 'failure!')
worker.can_do_timeout(:timeout_ability, 1)
worker.grab_job
status_packet = client.connection.read_response
expect(status_packet).to be_a(Geary::Packet::WorkFailResponse)
end
end
Account for potential server delay when testing client id
require 'geary'
describe "a worker's client" do
let(:factory) do
Geary::Factory.new(:host => 'localhost', :port => 4730)
end
let(:client) { factory.client }
let(:worker) { factory.worker_client }
let(:admin_client) { factory.admin_client }
after do
client.connection.close
worker.connection.close
admin_client.connection.close
end
it 'grabs jobs once it registers abilities' do
submitted_job = client.submit_job(:grab_job_test, 'something')
worker.can_do(:grab_job_test)
grabbed_job = worker.grab_job
expect(grabbed_job.job_handle).to eql(submitted_job.job_handle)
end
it 'gets NO_JOB if there are no jobs to grab' do
worker.can_do(:no_job_test)
grabbed_job = worker.grab_job
expect(grabbed_job).to be_a(Geary::Packet::NoJob)
end
it 'gets NO_JOB if there are jobs but it cannot do any of them' do
worker.can_do(:cant_do_test)
client.submit_job(:cant_do_test, 'data')
worker.cant_do(:cant_do_test)
expect(worker.grab_job).to be_a(Geary::Packet::NoJob)
end
it 'gets NO_JOB if there jobs after it has reset its abilities' do
worker.can_do(:cant_do_test)
client.submit_job(:cant_do_test, 'data')
worker.reset_abilities
expect(worker.grab_job).to be_a(Geary::Packet::NoJob)
end
it 'gets a NOOP if it checks after a PRE_SLEEP and there is a job waiting' do
worker.can_do(:job_after_sleep)
worker.pre_sleep
client.submit_job(:job_after_sleep, 'wake up!')
expect(worker).to have_jobs_waiting
end
it 'does not have jobs waiting if none have been submitted' do
worker.can_do(:job_that_doesnt_exist)
worker.pre_sleep
expect(worker).to_not have_jobs_waiting
end
it 'grabs a job and get its unique id' do
random = Time.now.to_i.to_s + rand.to_s
fake_generator = double('Generator', :generate => random)
client = factory.client(:unique_id_generator => fake_generator)
worker.can_do(:job_that_cares_about_uniqe_ids)
client.submit_job(:job_that_cares_about_uniqe_ids, 'cool!')
assigned_job = worker.grab_job_uniq
expect(assigned_job.unique_id).to eql(random)
end
it 'sends an update on status' do
client_job = client.submit_job(:long_running_sends_status, 'data')
worker.can_do(:long_running_sends_status)
worker.grab_job.tap do |job|
worker.send_work_status(job.job_handle, 0.5)
end
status_packet = client.connection.read_response
client_status = client.get_status(client_job.job_handle)
expect(client_status.percent_complete).to eql(0.5)
end
it 'updates status to 100% complete' do
client_job = client.submit_job(:long_running_sends_status, 'data')
worker.can_do(:long_running_sends_status)
worker.grab_job.tap do |job|
worker.send_work_status(job.job_handle, 1)
end
status_packet = client.connection.read_response
client_status = client.get_status(client_job.job_handle)
expect(client_status).to be_complete
end
it 'sends data on completion' do
client_job = client.submit_job(:long_running_will_complete, 'data')
worker.can_do(:long_running_will_complete)
worker.grab_job.tap do |job|
worker.send_work_complete(job.job_handle, 'complete')
end
work_complete = client.connection.read_response
expect(work_complete.data).to eql('complete')
end
it 'sends failure notices' do
client_job = client.submit_job(:long_running_will_fail, 'data')
worker.can_do(:long_running_will_fail)
worker.grab_job.tap do |job|
worker.send_work_fail(job.job_handle)
end
work_fail = client.connection.read_response
expect(work_fail).to be_a(Geary::Packet::WorkFailResponse)
end
it 'sends exception notices' do
client.set_server_option('exceptions')
client_job = client.submit_job(:long_running_will_raise, 'data')
worker.can_do(:long_running_will_raise)
worker.grab_job.tap do |job|
worker.send_work_exception(job.job_handle, 'oh no!')
end
work_exception = client.connection.read_response
expect(work_exception.data).to eql('oh no!')
end
it 'sends work data' do
client_job = client.submit_job(:long_running_will_send_data, 'data')
worker.can_do(:long_running_will_send_data)
worker.grab_job.tap do |job|
worker.send_work_data(job.job_handle, 'woo!')
end
work_data = client.connection.read_response
expect(work_data.data).to eql('woo!')
end
it 'sends work warnings' do
client_job = client.submit_job(:long_running_will_warn, 'data')
worker.can_do(:long_running_will_warn)
worker.grab_job.tap do |job|
worker.send_work_warning(job.job_handle, 'watch out!')
end
work_warning = client.connection.read_response
expect(work_warning.data).to eql('watch out!')
end
def with_cushion(&block)
Array.new(5) { block.call ; sleep 0.001 }.compact.first
end
it 'sets its client id' do
random = Time.now.to_i.to_s + rand.to_s
id = "worker-with-id-#{random}"
worker.set_client_id(id)
worker.can_do(:hi_mom)
observed_worker = with_cushion do
admin_client.workers.find do |worker|
worker.client_id == id
end
end
expect(observed_worker).to_not be_nil
end
it 'optionally sets a timeout when it registers abilities' do
pending "Investigation as to how the timeout is triggered"
job = client.submit_job(:timeout_ability, 'failure!')
worker.can_do_timeout(:timeout_ability, 1)
worker.grab_job
status_packet = client.connection.read_response
expect(status_packet).to be_a(Geary::Packet::WorkFailResponse)
end
end
|
module Sidetiq
module Middleware
class History
def call(worker, msg, queue, &block)
if worker.kind_of?(Sidetiq::Schedulable)
call_with_sidetiq_history(worker, msg, queue, &block)
else
yield
end
end
def call_with_sidetiq_history(worker, msg, queue)
entry = {
status: :success,
error: "",
exception: "",
backtrace: "",
processor: "#{Socket.gethostname}:#{Process.pid}-#{Thread.current.object_id}",
processed: Time.now.iso8601
}
yield
rescue StandardError => e
entry[:status] = :failure
entry[:exception] = e.class.to_s
entry[:error] = e.message
entry[:backtrace] = e.backtrace
raise e
ensure
Sidekiq.redis do |redis|
redis.pipelined do |pipe|
list_name = "sidetiq:#{worker.class.name}:history"
pipe.lpush(list_name, JSON.dump(entry))
pipe.ltrim(list_name, 0, Sidetiq.config.worker_history - 1)
end
end
end
end
end
end
Sidekiq.configure_server do |config|
config.server_middleware do |chain|
chain.add Sidetiq::Middleware::History
end
end
Break up History#call_with_sidetiq_history.
module Sidetiq
module Middleware
class History
def call(worker, msg, queue, &block)
if worker.kind_of?(Sidetiq::Schedulable)
call_with_sidetiq_history(worker, msg, queue, &block)
else
yield
end
end
private
def call_with_sidetiq_history(worker, msg, queue)
entry = new_history_entry
yield
rescue StandardError => e
entry[:status] = :failure
entry[:exception] = e.class.to_s
entry[:error] = e.message
entry[:backtrace] = e.backtrace
raise e
ensure
save_entry_for_worker(entry, worker)
end
def new_history_entry
{
status: :success,
error: "",
exception: "",
backtrace: "",
processor: "#{Socket.gethostname}:#{Process.pid}-#{Thread.current.object_id}",
processed: Time.now.iso8601
}
end
def save_entry_for_worker(entry, worker)
Sidekiq.redis do |redis|
redis.pipelined do |pipe|
list_name = "sidetiq:#{worker.class.name}:history"
pipe.lpush(list_name, JSON.dump(entry))
pipe.ltrim(list_name, 0, Sidetiq.config.worker_history - 1)
end
end
end
end
end
end
Sidekiq.configure_server do |config|
config.server_middleware do |chain|
chain.add Sidetiq::Middleware::History
end
end
|
require 'thor'
module GithubSniffer
module Cli
class Application < Thor
desc 'test NAME', 'test name'
def test(name)
greeting = "Test is fine, #{name}"
puts greeting
end
desc 'get_user USERNAME', 'Returns github profile of user'
def get_user(username)
github = Github.new(user: username)
languages = {}
# Cyle throug repos and sum up the main language
github.repos.list.each do |repo|
repo_language = repo.language.to_s.downcase!
languages[repo_language] = 0 if languages[repo_language].nil?
languages[repo_language] += 1
end
p languages
end
end # Application
end # Cli
end # GithubSniffer
Displaying return value
- Method counts and returns the most used language from all repos.
require 'thor'
module GithubSniffer
module Cli
class Application < Thor
desc 'test NAME', 'test name'
def test(name)
greeting = "Test is fine, #{name}"
puts greeting
end
desc 'get_user USERNAME', 'Returns github profile of user'
def get_user(username)
github = Github.new(user: username)
languages = {}
# Cyle throug repos and sum up the main language
github.repos.list.each do |repo|
repo_language = repo.language.to_s.downcase!
languages[repo_language] = 0 if languages[repo_language].nil?
languages[repo_language] += 1
end
prefered_lang = languages.sort_by{ |_k, value| value }.last
p "#{username} has #{prefered_lang[0]} as main language in #{prefered_lang[1]} repos."
end
end # Application
end # Cli
end # GithubSniffer
|
require 'semantic_date_time_tags/format_parser'
require 'test_helper'
module SemanticDateTimeTags
describe FormatParser do
describe '#to_html' do
describe 'd / m / Y' do
let(:format) { '%d / %m / %Y' }
let(:string) { '12 / 12 / 2014' }
let(:res) { FormatParser.new(format, string).to_html }
it 'wraps the components into span tags' do
res.must_equal '<span class="day d">12</span> <span class="sep">/</span> <span class="month m">12</span> <span class="sep">/</span> <span class="year Y">2014</span>'
end
end
describe 'd / m / Y' do
let(:format) { '%I.%M %p' }
let(:string) { '10.00 AM' }
let(:res) { FormatParser.new(format, string).to_html }
it 'wraps the components into span tags' do
res.must_equal '<span class="hours I">10</span><span class="sep">.</span><span class="minutes M">00</span> <span class="ampm p">AM</span>'
end
end
end
end
end
test cleanup
require 'semantic_date_time_tags/format_parser'
require 'test_helper'
module SemanticDateTimeTags
describe FormatParser do
subject { FormatParser.new(format, string).to_html }
describe '#to_html' do
describe 'd / m / Y' do
let(:format) { '%d / %m / %Y' }
let(:string) { '12 / 12 / 2014' }
it 'wraps the components into span tags' do
subject.must_equal '<span class="day d">12</span> <span class="sep">/</span> <span class="month m">12</span> <span class="sep">/</span> <span class="year Y">2014</span>'
end
end
describe 'd / m / Y' do
let(:format) { '%I.%M %p' }
let(:string) { '10.00 AM' }
it 'wraps the components into span tags' do
subject.must_equal '<span class="hours I">10</span><span class="sep">.</span><span class="minutes M">00</span> <span class="ampm p">AM</span>'
end
end
end
end
end
|
require 'spec_helper'
describe 'default recipe' do
context 'When the platform doesn\'t matter' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '16.04') do |node|
node.automatic['maven']['version'] = '1.2.3'
node.automatic['maven']['url'] = 'https://maven/maven.tar.gz'
node.automatic['maven']['checksum'] = '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
node.automatic['maven']['m2_home'] = '/home/maven-user'
node.automatic['maven']['setup_bin'] = false
end
runner.converge('maven::default')
end
it 'includes the ark recipe' do
expect(chef_run).to include_recipe('ark::default')
end
it 'downloads ark' do
expect(chef_run).to install_ark('maven')
.with(version: '1.2.3')
.with(url: 'https://maven/maven.tar.gz')
.with(checksum: '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef')
.with(home_dir: '/home/maven-user')
.with(win_install_dir: '/home/maven-user')
.with(append_env_path: false)
end
end
context 'On a non-Windows platform' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '16.04')
runner.converge('maven::default')
end
it 'writes the `/etc/mavenrc`' do
expect(chef_run).to create_template('/etc/mavenrc')
.with(source: 'mavenrc.erb')
.with(mode: '0755')
end
end
context 'On a Windows platform' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'windows', version: '7') do |node|
node.automatic['maven']['m2_home'] = 'C:\Users\Maven-User'
node.automatic['maven']['mavenrc']['opts'] = '-Ddummy=true'
end
runner.converge('maven::default')
end
it 'sets the M2_HOME environment variable' do
expect(chef_run).to create_env('M2_HOME')
.with(value: 'C:\Users\Maven-User')
end
it 'sets the M2_OPTS' do
expect(chef_run).to create_env('MAVEN_OPTS')
.with(value: '-Ddummy=true')
end
end
end
Use described_recipe variable for reduced repetition
Instead of repeating the name of the recipe we're testing against, we
can instead use the `described_recipe` variable ChefSpec provides for
us.
As part of #82.
Signed-off-by: Jamie Tanna <b4dde37aa2aac745c0e6c43447507eaeace3e59f@jamietanna.co.uk>
require 'spec_helper'
describe 'maven::default' do
context 'When the platform doesn\'t matter' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '16.04') do |node|
node.automatic['maven']['version'] = '1.2.3'
node.automatic['maven']['url'] = 'https://maven/maven.tar.gz'
node.automatic['maven']['checksum'] = '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
node.automatic['maven']['m2_home'] = '/home/maven-user'
node.automatic['maven']['setup_bin'] = false
end
runner.converge(described_recipe)
end
it 'includes the ark recipe' do
expect(chef_run).to include_recipe('ark::default')
end
it 'downloads ark' do
expect(chef_run).to install_ark('maven')
.with(version: '1.2.3')
.with(url: 'https://maven/maven.tar.gz')
.with(checksum: '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef')
.with(home_dir: '/home/maven-user')
.with(win_install_dir: '/home/maven-user')
.with(append_env_path: false)
end
end
context 'On a non-Windows platform' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '16.04')
runner.converge(described_recipe)
end
it 'writes the `/etc/mavenrc`' do
expect(chef_run).to create_template('/etc/mavenrc')
.with(source: 'mavenrc.erb')
.with(mode: '0755')
end
end
context 'On a Windows platform' do
cached(:chef_run) do
runner = ChefSpec::ServerRunner.new(platform: 'windows', version: '7') do |node|
node.automatic['maven']['m2_home'] = 'C:\Users\Maven-User'
node.automatic['maven']['mavenrc']['opts'] = '-Ddummy=true'
end
runner.converge(described_recipe)
end
it 'sets the M2_HOME environment variable' do
expect(chef_run).to create_env('M2_HOME')
.with(value: 'C:\Users\Maven-User')
end
it 'sets the M2_OPTS' do
expect(chef_run).to create_env('MAVEN_OPTS')
.with(value: '-Ddummy=true')
end
end
end
|
require 'google_drive'
require 'yaml'
class CompanyDashboard
@@lookups = YAML.load(File.open(File.join(File.dirname(__FILE__), '..', '..', 'config/lookups.yaml')))
@queue = :metrics
extend MetricsHelper
def self.perform
current_year = DateTime.now.year
# reach
store_metric("current-year-reach", DateTime.now, reach(current_year))
store_metric("cumulative-reach", DateTime.now, reach(nil))
# bookings
store_metric("current-year-bookings", DateTime.now, bookings(current_year))
store_metric("cumulative-bookings", DateTime.now, bookings(nil))
# value-unlocked
store_metric("current-year-value-unlocked", DateTime.now, value(current_year))
store_metric("cumulative-value-unlocked", DateTime.now, value(nil))
# performance against KPIs
store_metric("current-year-kpi-performance", DateTime.now, kpis(current_year))
# commercial bookings
store_metric("current-year-commercial-bookings", DateTime.now, bookings_by_type("Commercial", current_year))
store_metric("current-year-non-commercial-bookings", DateTime.now, bookings_by_type("Non-commercial", current_year))
# grant funding
store_metric("current-year-grant-funding", DateTime.now, grant_funding(current_year))
# income by type
store_metric("current-year-income-by-type", DateTime.now, income_by_type(current_year))
# Done - clear cached sheet
clear_cache!
end
def self.reach(year = nil)
if year.nil?
years.inject(0) { |total, year| total += reach(year) }
else
metrics_cell('Reach', year).to_i
end
end
def self.bookings(year = nil)
if year.nil?
years.inject(0) { |total, year| total += bookings(year) }
else
metrics_cell('Bookings', year).to_i
end
end
def self.value(year = nil)
if year.nil?
years.inject(0) { |total, year| total += value(year) }
else
metrics_cell('Value unlocked', year).to_i
end
end
def self.network
h = {}
h[:total] = metrics_cell('Network size').to_i
breakdown = {}
breakdown[:members] = metrics_cell('Members').to_i
breakdown[:nodes] = metrics_cell('Nodes').to_i
breakdown[:startups] = metrics_cell('Startups').to_i
breakdown[:affiliates] = metrics_cell('Affiliates').to_i
h[:breakdown] = breakdown
h
end
def self.kpis(year)
metrics_cell('KPI percentage', year).to_f.round(1)
end
def self.total_income(year)
metrics_cell('Total income', year).to_i
end
def self.bookings_by_type(type, year)
{
actual: metrics_cell("#{type} Bookings Actual", year).to_f,
target: metrics_cell("#{type} Bookings Target", year).to_f
}
end
def self.grant_funding(year)
{
actual: metrics_cell("Grant Funding Actual", year).to_f,
target: metrics_cell("Grant Funding Target", year).to_f
}
end
def self.income_by_type(year)
{
research: metrics_cell("Research income", year).to_f,
training: metrics_cell("Training income", year).to_f,
projects: metrics_cell("Project income", year).to_f,
network: metrics_cell("Network income", year).to_f
}
end
def self.google_drive
GoogleDrive.login(ENV['GAPPS_USER_EMAIL'], ENV['GAPPS_PASSWORD'])
end
def self.metrics_spreadsheet(doc_name)
key = @@lookups['document_keys'][ENV['RACK_ENV'] || 'production'][doc_name]
@@metrics_spreadsheets ||= {}
@@metrics_spreadsheets[key] ||= google_drive.spreadsheet_by_key(key)
end
def self.metrics_worksheet doc_name, worksheet_name
metrics_spreadsheet(doc_name).worksheet_by_title worksheet_name.to_s
end
def self.cell_location year, identifier
@@lookups['cell_lookups'][year][identifier]
end
def self.metrics_cell identifier, year = nil
year = Date.today.year if year.nil?
location = cell_location(year, identifier)
metrics_worksheet(location["document"], location["sheet"])[location["cell_ref"]]
end
def self.years
2013..Date.today.year
end
def self.clear_cache!
@@metrics_spreadsheets = {}
end
end
Make internal methods private
require 'google_drive'
require 'yaml'
class CompanyDashboard
@@lookups = YAML.load(File.open(File.join(File.dirname(__FILE__), '..', '..', 'config/lookups.yaml')))
@queue = :metrics
extend MetricsHelper
def self.perform
current_year = DateTime.now.year
# reach
store_metric("current-year-reach", DateTime.now, reach(current_year))
store_metric("cumulative-reach", DateTime.now, reach(nil))
# bookings
store_metric("current-year-bookings", DateTime.now, bookings(current_year))
store_metric("cumulative-bookings", DateTime.now, bookings(nil))
# value-unlocked
store_metric("current-year-value-unlocked", DateTime.now, value(current_year))
store_metric("cumulative-value-unlocked", DateTime.now, value(nil))
# performance against KPIs
store_metric("current-year-kpi-performance", DateTime.now, kpis(current_year))
# commercial bookings
store_metric("current-year-commercial-bookings", DateTime.now, bookings_by_type("Commercial", current_year))
store_metric("current-year-non-commercial-bookings", DateTime.now, bookings_by_type("Non-commercial", current_year))
# grant funding
store_metric("current-year-grant-funding", DateTime.now, grant_funding(current_year))
# income by type
store_metric("current-year-income-by-type", DateTime.now, income_by_type(current_year))
# Done - clear cached sheet
clear_cache!
end
def self.reach(year = nil)
if year.nil?
years.inject(0) { |total, year| total += reach(year) }
else
metrics_cell('Reach', year).to_i
end
end
def self.bookings(year = nil)
if year.nil?
years.inject(0) { |total, year| total += bookings(year) }
else
metrics_cell('Bookings', year).to_i
end
end
def self.value(year = nil)
if year.nil?
years.inject(0) { |total, year| total += value(year) }
else
metrics_cell('Value unlocked', year).to_i
end
end
def self.network
h = {}
h[:total] = metrics_cell('Network size').to_i
breakdown = {}
breakdown[:members] = metrics_cell('Members').to_i
breakdown[:nodes] = metrics_cell('Nodes').to_i
breakdown[:startups] = metrics_cell('Startups').to_i
breakdown[:affiliates] = metrics_cell('Affiliates').to_i
h[:breakdown] = breakdown
h
end
def self.kpis(year)
metrics_cell('KPI percentage', year).to_f.round(1)
end
def self.total_income(year)
metrics_cell('Total income', year).to_i
end
def self.bookings_by_type(type, year)
{
actual: metrics_cell("#{type} Bookings Actual", year).to_f,
target: metrics_cell("#{type} Bookings Target", year).to_f
}
end
def self.grant_funding(year)
{
actual: metrics_cell("Grant Funding Actual", year).to_f,
target: metrics_cell("Grant Funding Target", year).to_f
}
end
def self.income_by_type(year)
{
research: metrics_cell("Research income", year).to_f,
training: metrics_cell("Training income", year).to_f,
projects: metrics_cell("Project income", year).to_f,
network: metrics_cell("Network income", year).to_f
}
end
end
private
def self.google_drive
GoogleDrive.login(ENV['GAPPS_USER_EMAIL'], ENV['GAPPS_PASSWORD'])
end
def self.metrics_spreadsheet(doc_name)
key = @@lookups['document_keys'][ENV['RACK_ENV'] || 'production'][doc_name]
@@metrics_spreadsheets ||= {}
@@metrics_spreadsheets[key] ||= google_drive.spreadsheet_by_key(key)
end
def self.metrics_worksheet doc_name, worksheet_name
metrics_spreadsheet(doc_name).worksheet_by_title worksheet_name.to_s
end
def self.cell_location year, identifier
@@lookups['cell_lookups'][year][identifier]
end
def self.metrics_cell identifier, year = nil
year = Date.today.year if year.nil?
location = cell_location(year, identifier)
metrics_worksheet(location["document"], location["sheet"])[location["cell_ref"]]
end
def self.years
2013..Date.today.year
end
def self.clear_cache!
@@metrics_spreadsheets = {}
end
end
|
module GoTransverseTractApi
VERSION = "0.1.1"
TARGET_API_VERSION = "1.28"
end
Setting Ruby version requirement. Built gem version: 0.1.2
module GoTransverseTractApi
VERSION = "0.1.2"
TARGET_API_VERSION = "1.28"
end
|
module GM
class GradientView < UIView
include SetupView
# list of colors, you can control spacing with the `points` array
attr_accessor :colors
# alias for colors[0], ignored if you assign a `colors` array.
attr_accessor :startColor
# alias for colors[points.length - 1], ignored if you assign a `colors` array.
attr_accessor :finalColor
# :linear, :radial
attr_accessor :type
# array of numbers from 0..1, indicating where the color begins. the list
# better be sorted!
attr_accessor :points
# for linear gradient:
attr_accessor :angle # 0..2.pi
# for radial gradient:
attr_accessor :gradientCenter # CGPoint
def setup
self.startColor = :white
self.finalColor = :black
self.colors = nil
self.points = nil
self.angle = Math::PI / 2
self.type = :linear
self.backgroundColor = UIColor.clearColor
end
def drawRect(rect)
case self.type
when :linear
drawLinearGradient
when :radial
drawRadialGradient
end
end
def colors
if @colors
@colors
else
[self.startColor, self.finalColor]
end
end
def points
if @points
@points
else
colors.each_index.map do |i|
i.to_f / (colors.length - 1).to_f
end
end
end
def drawLinearGradient
w = CGRectGetWidth(self.frame)
h = CGRectGetHeight(self.frame)
if w == 0 or h == 0
return
end
colors = self.colors.dup
points = self.points.dup
if colors.length != points.length
raise "Number of points (#{points.inspect}) does not match number of colors (#{colors.inspect})"
end
# colors is a list of `UIColor`s, but we will need a list of `CGColorRef`s
cgcolors = colors.map { |color|
color = color.uicolor unless color.is_a? UIColor
color.CGColor
}
# simplify things a little by getting an angle between 0...2π
angle = self.angle % (Math::PI * 2)
# CG coordinate system has angles increasing clockwise, but angles should
# increase counter-clockwise
angle = Math::PI * 2 - angle
if angle > Math::PI
angle = angle - Math::PI
cgcolors.reverse!
points = points.map{ |p| 1 - p }.reverse
end
# make sure all the points are ascending
points.inject { |p1, p2|
if p1 > p2
raise "Points must be in ascending order (not #{points.inspect})"
end
p2
}
context = UIGraphicsGetCurrentContext()
color_space = CGColorSpaceCreateDeviceRGB()
center = CGPoint.new(w/2, h/2)
radius = Math.hypot(center.x, center.y)
r_angle = Math.atan2(center.y, center.x)
if angle < Math::PI/2 || angle > 3*Math::PI/2
inner_angle = r_angle - (angle)
else
inner_angle = r_angle - (Math::PI - angle)
end
l = radius * Math.cos(inner_angle)
start_point = center + CGPoint.new(l * Math.cos(angle - Math::PI),
l * Math.sin(angle - Math::PI))
final_point = center + CGPoint.new(l * Math.cos(angle),
l * Math.sin(angle))
gradient = CGGradientCreateWithColors(color_space, cgcolors, points.to_pointer(:float))
CGContextDrawLinearGradient(context, gradient, start_point, final_point, 0)
end
def drawRadialGradient
end
def to_s(options={})
super options.merge(inner: {colors: colors, points: points, angle: angle})
end
end
end
use attr_updates accessors, so that setNeedsDisplay is fired
module GM
class GradientView < UIView
include SetupView
# list of colors, you can control spacing with the `points` array
attr_updates :colors
# alias for colors[0], ignored if you assign a `colors` array.
attr_updates :startColor
# alias for colors[points.length - 1], ignored if you assign a `colors` array.
attr_updates :finalColor
# :linear, :radial
attr_updates :type
# array of numbers from 0..1, indicating where the color begins. the list
# better be sorted!
attr_updates :points
# for linear gradient:
attr_updates :angle # 0..2.pi
# for radial gradient:
attr_updates :gradientCenter # CGPoint
def setup
self.startColor = :white
self.finalColor = :black
self.colors = nil
self.points = nil
self.angle = Math::PI / 2
self.type = :linear
self.backgroundColor = UIColor.clearColor
end
def drawRect(rect)
case self.type
when :linear
drawLinearGradient
when :radial
drawRadialGradient
end
end
def colors
if @colors
@colors
else
[self.startColor, self.finalColor]
end
end
def points
if @points
@points
else
colors.each_index.map do |i|
i.to_f / (colors.length - 1).to_f
end
end
end
def drawLinearGradient
w = CGRectGetWidth(self.frame)
h = CGRectGetHeight(self.frame)
if w == 0 or h == 0
return
end
colors = self.colors.dup
points = self.points.dup
if colors.length != points.length
raise "Number of points (#{points.inspect}) does not match number of colors (#{colors.inspect})"
end
# colors is a list of `UIColor`s, but we will need a list of `CGColorRef`s
cgcolors = colors.map { |color|
color = color.uicolor unless color.is_a? UIColor
color.CGColor
}
# simplify things a little by getting an angle between 0...2π
angle = self.angle % (Math::PI * 2)
# CG coordinate system has angles increasing clockwise, but angles should
# increase counter-clockwise
angle = Math::PI * 2 - angle
if angle > Math::PI
angle = angle - Math::PI
cgcolors.reverse!
points = points.map{ |p| 1 - p }.reverse
end
# make sure all the points are ascending
points.inject { |p1, p2|
if p1 > p2
raise "Points must be in ascending order (not #{points.inspect})"
end
p2
}
context = UIGraphicsGetCurrentContext()
color_space = CGColorSpaceCreateDeviceRGB()
center = CGPoint.new(w/2, h/2)
radius = Math.hypot(center.x, center.y)
r_angle = Math.atan2(center.y, center.x)
if angle < Math::PI/2 || angle > 3*Math::PI/2
inner_angle = r_angle - (angle)
else
inner_angle = r_angle - (Math::PI - angle)
end
l = radius * Math.cos(inner_angle)
start_point = center + CGPoint.new(l * Math.cos(angle - Math::PI),
l * Math.sin(angle - Math::PI))
final_point = center + CGPoint.new(l * Math.cos(angle),
l * Math.sin(angle))
gradient = CGGradientCreateWithColors(color_space, cgcolors, points.to_pointer(:float))
CGContextDrawLinearGradient(context, gradient, start_point, final_point, 0)
end
def drawRadialGradient
end
def to_s(options={})
super options.merge(inner: {colors: colors, points: points, angle: angle})
end
end
end
|
require 'date'
class DateTime
attr_accessor :hh_year, :hh_month, :hh_day
## These are the years (mod 400) that have Xtr
XTR_YEARS = [
4, 9, 15, 20, 26, 32, 37, 43, 48, 54, 60, 65, 71, 76,
82, 88, 93, 99, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156,
161, 167, 172, 178, 184, 189, 195, 201, 207, 212, 218, 224, 229, 235,
240, 246, 252, 257, 263, 268, 274, 280, 285, 291, 296, 303, 308, 314,
320, 325, 331, 336, 342, 348, 353, 359, 364, 370, 376, 381, 387, 392,
398
]
## number of days since January 1 (of Hanke-Henry year) for month before
## the first of the H-H month
DAYS_SINCE_NEW_YEAR = {
:"1" => 0,
:"2" => 30,
:"3" => 60,
:"4" => 91,
:"5" => 121,
:"6" => 151,
:"7" => 182,
:"8" => 212,
:"9" => 242,
:"10" => 273,
:"11" => 303,
:"12" => 333,
:x => 364
}
def self.hh(*args)
hh_year, hh_month, hh_day, hour, minute, second = _validate(*args)
dt = DateTime.jd _to_julian_date(hh_year, hh_month, hh_day)
dt.hh_year = hh_year
dt.hh_month = hh_month
dt.hh_day = hh_day
dt
end
# Returns true if year designated by +DateTime+ object
# contains the Xtr week
def xtr?
::HankeHenryDate.xtr?(@hh_year)
end
private
## translate args given in Hanke-Henry calendar into Julian date,
## so that it can be passed to Date.jd
def self._to_julian_date(year, month, day)
month = month.to_s.to_sym # normalize
## Hanke-Henry and Gregorian days will sync up every 400 years
## Compute the number of days since the sync
days_since_last_sync = 365 * ( year % 400 ) + _xtrs(year) * 7 +
DAYS_SINCE_NEW_YEAR[month] + day - 1
days_since_last_sync + 2451544.5 # offset for January 1, 2000
end
# Xtrs have we seen in years since year 400N
def self._xtrs(year)
XTR_YEARS.select { |y| y < year % 400 }.length
end
def self._validate(*args)
if args.length > 8
raise ArgumentError, "Too many arguments (#{args.length}) given"
end
year, month, day, hour, minute, second = args
# defaults
month ||= 1
day ||= 1
hour ||= 0
minute ||= 0
second ||= 0
if month.to_s.to_sym == :x
days_in_month = 7
unless ::HankeHenryDate.xtr?(year)
raise ArgumentError, "Hanke-Henry year #{year} does not have Xtr"
end
elsif month % 3 == 0 # month is 3, 6, 9 or 12
days_in_month = 31
else
days_in_month = 30
end
if day && day > days_in_month
raise ArgumentError, "Invalid day #{day}: only #{days_in_month} days in month #{month}"
end
[ year, month, day, hour, minute, second ]
end
end
Fix comment.
require 'date'
class DateTime
attr_accessor :hh_year, :hh_month, :hh_day
## These are the years (mod 400) that have Xtr
XTR_YEARS = [
4, 9, 15, 20, 26, 32, 37, 43, 48, 54, 60, 65, 71, 76,
82, 88, 93, 99, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156,
161, 167, 172, 178, 184, 189, 195, 201, 207, 212, 218, 224, 229, 235,
240, 246, 252, 257, 263, 268, 274, 280, 285, 291, 296, 303, 308, 314,
320, 325, 331, 336, 342, 348, 353, 359, 364, 370, 376, 381, 387, 392,
398
]
## number of days since January 1 (of Hanke-Henry year) before
## the first of the H-H month
DAYS_SINCE_NEW_YEAR = {
:"1" => 0,
:"2" => 30,
:"3" => 60,
:"4" => 91,
:"5" => 121,
:"6" => 151,
:"7" => 182,
:"8" => 212,
:"9" => 242,
:"10" => 273,
:"11" => 303,
:"12" => 333,
:x => 364
}
def self.hh(*args)
hh_year, hh_month, hh_day, hour, minute, second = _validate(*args)
dt = DateTime.jd _to_julian_date(hh_year, hh_month, hh_day)
dt.hh_year = hh_year
dt.hh_month = hh_month
dt.hh_day = hh_day
dt
end
# Returns true if year designated by +DateTime+ object
# contains the Xtr week
def xtr?
::HankeHenryDate.xtr?(@hh_year)
end
private
## translate args given in Hanke-Henry calendar into Julian date,
## so that it can be passed to Date.jd
def self._to_julian_date(year, month, day)
month = month.to_s.to_sym # normalize
## Hanke-Henry and Gregorian days will sync up every 400 years
## Compute the number of days since the sync
days_since_last_sync = 365 * ( year % 400 ) + _xtrs(year) * 7 +
DAYS_SINCE_NEW_YEAR[month] + day - 1
days_since_last_sync + 2451544.5 # offset for January 1, 2000
end
# Xtrs have we seen in years since year 400N
def self._xtrs(year)
XTR_YEARS.select { |y| y < year % 400 }.length
end
def self._validate(*args)
if args.length > 8
raise ArgumentError, "Too many arguments (#{args.length}) given"
end
year, month, day, hour, minute, second = args
# defaults
month ||= 1
day ||= 1
hour ||= 0
minute ||= 0
second ||= 0
if month.to_s.to_sym == :x
days_in_month = 7
unless ::HankeHenryDate.xtr?(year)
raise ArgumentError, "Hanke-Henry year #{year} does not have Xtr"
end
elsif month % 3 == 0 # month is 3, 6, 9 or 12
days_in_month = 31
else
days_in_month = 30
end
if day && day > days_in_month
raise ArgumentError, "Invalid day #{day}: only #{days_in_month} days in month #{month}"
end
[ year, month, day, hour, minute, second ]
end
end
|
# frozen_string_literal: true
require 'optparse'
require 'hbtrack/command'
require 'hbtrack/importer/streaks_importer'
require 'hbtrack/importer/hbtrack_importer'
require 'hbtrack/database/sequel_store'
module Hbtrack
# ImportCommand class is responsible for handling
# `hbtrack import` command in CLI
class ImportCommand < Command
def initialize(file_path = 'hbtrack.db', options)
@importer = Hbtrack::Importer::HbtrackImporter.new
# To allow creation of test.db
@store = Hbtrack::Database::SequelStore.new(name: file_path)
super(nil, options)
end
def execute
import(@names[0])
super
end
def create_option_parser
OptionParser.new do |opts|
opts.banner = 'Usage: hbtrack import <file_path> <options>'
opts.on('--streaks', 'Import data from streaks') do
@importer = Hbtrack::Importer::StreaksImporter.new
end
end
end
def import(file_path)
@importer.import_from(file_path)
@importer.store_in(local_store)
end
end
end
Modify import_command.rb file mode
# frozen_string_literal: true
require 'optparse'
require 'hbtrack/command'
require 'hbtrack/importer/streaks_importer'
require 'hbtrack/importer/hbtrack_importer'
require 'hbtrack/database/sequel_store'
module Hbtrack
# ImportCommand class is responsible for handling
# `hbtrack import` command in CLI
class ImportCommand < Command
def initialize(file_path = 'hbtrack.db', options)
@importer = Hbtrack::Importer::HbtrackImporter.new
# To allow creation of test.db
@store = Hbtrack::Database::SequelStore.new(name: file_path)
super(nil, options)
end
def execute
import(@names[0])
super
end
def create_option_parser
OptionParser.new do |opts|
opts.banner = 'Usage: hbtrack import <file_path> <options>'
opts.on('--streaks', 'Import data from streaks') do
@importer = Hbtrack::Importer::StreaksImporter.new
end
end
end
def import(file_path)
@importer.import_from(file_path)
@importer.store_in(local_store)
end
end
end
|
module KeplerProcessor
class Matcher < MultifileTaskBase
attr_accessor :output_data
include Saveable
def execute!
@options[:file_columns] = (0..4).to_a
super do
produce_arrays
patch_early_sc
match_observation_cycle
report_missing_entries
sort_results
save!
end
end
private
def produce_arrays
@observation_index = if @runners.first.input_filename_without_path.include? 'observation_index'
@runners.first.input_data
else
@runners.last.input_data
end
@fourier_information = if @runners.first.input_filename_without_path.include? 'fourier_information'
@runners.first.input_data
else
@runners.last.input_data
end
@observation_index.each { |line| line.compact! }
end
def patch_early_sc
@observation_index.each do |line|
line.first.gsub!("Q0","Q0.0") if line.first.include? "SC,Q0,"
line.first.gsub("Q1","Q1.1") if line.first.include? "SC,Q1,"
end
end
def match_observation_cycle
puts "Cross-examining input files. Transfering fourier information..."
@output_data ||= []
@fourier_information.uniq!
@fourier_information.each do |line|
line[0].gsub!('kic','')
@observation_index.each_with_index do |observation_cycle, index|
if observation_cycle.first.include?(line[0]) && observation_cycle.first.include?(line[1])
observation_cycle.first.insert(-1, ",#{line[2]},#{line[3]},#{line[4]}")
@output_data << observation_cycle.first.split(",").to_a
@observation_index.delete_at index
break
end
end
end
leftovers = @observation_index.map { |line| line.first.split(",").to_a }
leftovers.each { |line| @output_data << line }
puts "Transfer of Fourier information complete. Identifying any observations missing information..."
end
def report_missing_entries
@output_data.each do |line|
puts "\t#{line[0]} (#{line[2]}) does not have the expected number of attributes. Fourier information is probably not appended." if line.size < 10
end
end
def sort_results
puts "Sorting the results by KIC number and season..."
@output_data.sort! do |a, b|
comparison_result = a[0] <=> b[0]
comparison_result = a[2] <=> b[2] if comparison_result == 0
comparison_result
end
puts "Removing duplicates..."
@output_data.uniq!
end
def output_filename
"matched_table.txt"
end
end
end
Correct typo: missing "!" in gsub!
module KeplerProcessor
class Matcher < MultifileTaskBase
attr_accessor :output_data
include Saveable
def execute!
@options[:file_columns] = (0..4).to_a
super do
produce_arrays
patch_early_sc
match_observation_cycle
report_missing_entries
sort_results
save!
end
end
private
def produce_arrays
@observation_index = if @runners.first.input_filename_without_path.include? 'observation_index'
@runners.first.input_data
else
@runners.last.input_data
end
@fourier_information = if @runners.first.input_filename_without_path.include? 'fourier_information'
@runners.first.input_data
else
@runners.last.input_data
end
@observation_index.each { |line| line.compact! }
end
def patch_early_sc
@observation_index.each do |line|
line.first.gsub!("Q0","Q0.0") if line.first.include? "SC,Q0,"
line.first.gsub!("Q1","Q1.1") if line.first.include? "SC,Q1,"
end
end
def match_observation_cycle
puts "Cross-examining input files. Transfering fourier information..."
@output_data ||= []
@fourier_information.uniq!
@fourier_information.each do |line|
line[0].gsub!('kic','')
@observation_index.each_with_index do |observation_cycle, index|
if observation_cycle.first.include?(line[0]) && observation_cycle.first.include?(line[1])
observation_cycle.first.insert(-1, ",#{line[2]},#{line[3]},#{line[4]}")
@output_data << observation_cycle.first.split(",").to_a
@observation_index.delete_at index
break
end
end
end
leftovers = @observation_index.map { |line| line.first.split(",").to_a }
leftovers.each { |line| @output_data << line }
puts "Transfer of Fourier information complete. Identifying any observations missing information..."
end
def report_missing_entries
@output_data.each do |line|
puts "\t#{line[0]} (#{line[2]}) does not have the expected number of attributes. Fourier information is probably not appended." if line.size < 10
end
end
def sort_results
puts "Sorting the results by KIC number and season..."
@output_data.sort! do |a, b|
comparison_result = a[0] <=> b[0]
comparison_result = a[2] <=> b[2] if comparison_result == 0
comparison_result
end
puts "Removing duplicates..."
@output_data.uniq!
end
def output_filename
"matched_table.txt"
end
end
end
|
module KomachiConcernedWith
VERSION = "0.1.1"
end
Bump to v0.1.2
module KomachiConcernedWith
VERSION = "0.1.2"
end
|
module LayerVault
module Configurable
attr_accessor :access_token, :api_endpoint, :user_agent, :middleware
class << self
def keys
@keys ||= [
:access_token,
:api_endpoint,
:user_agent,
:middleware
]
end
end
def configure
yield self
end
def reset!
LayerVault::Configurable.keys.each do |key|
instance_variable_set(:"@#{key}", LayerVault::Default.options[key])
end
self
end
alias setup reset!
def api_endpoint
::File.join(@api_endpoint, "")
end
private
def options
Hash[LayerVault::Configurable.keys.map{|key| [key, instance_variable_get(:"@#{key}")]}]
end
end
end
Fixes #4 - API Endpoint doesn't get set through LayerVault.client.api_endpoint
module LayerVault
module Configurable
attr_accessor :access_token, :user_agent, :middleware
attr_writer :api_endpoint
class << self
def keys
@keys ||= [
:access_token,
:api_endpoint,
:user_agent,
:middleware
]
end
end
def configure
yield self
end
def reset!
LayerVault::Configurable.keys.each do |key|
instance_variable_set(:"@#{key}", LayerVault::Default.options[key])
end
self
end
alias setup reset!
def api_endpoint
::File.join(@api_endpoint, "")
end
private
def options
Hash[LayerVault::Configurable.keys.map{|key| [key, instance_variable_get(:"@#{key}")]}]
end
end
end
|
module Less
module Rails
module SemanticUI
VERSION = '1.12.0.0'
end
end
end
Bump version
module Less
module Rails
module SemanticUI
VERSION = '1.12.0.1'
end
end
end |
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
def initialize(load_path = nil)
@load_path = Pathname.new(load_path) || Rails.root.join('lib', 'flows')
preload_flows! if Rails.env.production?
end
def find(name)
raise NotFound unless available?(name)
absolute_path = @load_path.join("#{name}.rb").to_s
preloaded(name) || Flow.new do
eval(File.read(absolute_path), binding, absolute_path)
end
end
def available?(name)
available_flows.include?(name)
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path).gsub(/\.rb$/, '')
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = find(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end
Bug fix again.
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
def initialize(load_path = nil)
@load_path = Pathname.new(load_path || Rails.root.join('lib', 'flows'))
preload_flows! if Rails.env.production?
end
def find(name)
raise NotFound unless available?(name)
absolute_path = @load_path.join("#{name}.rb").to_s
preloaded(name) || Flow.new do
eval(File.read(absolute_path), binding, absolute_path)
end
end
def available?(name)
available_flows.include?(name)
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path).gsub(/\.rb$/, '')
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = find(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end |
SMART_ANSWER_FLOW_NAMES = %w(
additional-commodity-code
am-i-getting-minimum-wage
appeal-a-benefits-decision
apply-tier-4-visa
benefit-cap-calculator
benefits-abroad
calculate-agricultural-holiday-entitlement
calculate-married-couples-allowance
calculate-state-pension
calculate-your-child-maintenance
calculate-your-holiday-entitlement
check-uk-visa-v2
check-uk-visa
childcare-costs-for-tax-credits
energy-grants-calculator
estimate-self-assessment-penalties
help-if-you-are-arrested-abroad
inherits-someone-dies-without-will
legalisation-document-checker
maternity-paternity-calculator
minimum-wage-calculator-employers
overseas-passports
pip-checker
plan-adoption-leave
register-a-birth
register-a-death
report-a-lost-or-stolen-passport
simplified-expenses-checker-v2
simplified-expenses-checker
state-pension-through-partner
state-pension-topup
student-finance-calculator
towing-rules
uk-benefits-abroad
vat-payment-deadlines
)
SMART_ANSWER_FLOW_NAMES.each do |name|
require "smart_answer_flows/#{name}"
end
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
FLOW_DIR = Rails.root.join('lib', 'smart_answer_flows')
def self.instance
@instance ||= new(FLOW_REGISTRY_OPTIONS)
end
def self.reset_instance
@instance = nil
end
def initialize(options = {})
@load_path = Pathname.new(options[:smart_answer_load_path] || FLOW_DIR)
@show_drafts = options.fetch(:show_drafts, false)
@show_transitions = options.fetch(:show_transitions, false)
preload_flows! if Rails.env.production? or options[:preload_flows]
end
attr_reader :load_path
def find(name)
raise NotFound unless available?(name)
find_by_name(name) or raise NotFound
end
def flows
available_flows.map { |s| find_by_name(s) }.compact
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path, ".rb")
end
end
private
def find_by_name(name)
flow = @preloaded ? preloaded(name) : build_flow(name)
return nil if flow && flow.draft? && !@show_drafts
return nil if flow && flow.transition? && !@show_transitions
flow
end
def available?(name)
if @preloaded
@preloaded.has_key?(name)
else
available_flows.include?(name)
end
end
def build_flow(name)
if SMART_ANSWER_FLOW_NAMES.include?(name)
class_prefix = name.gsub("-", "_").camelize
load "lib/smart_answer_flows/#{name}.rb" if Rails.env.development?
namespaced_class = "SmartAnswer::#{class_prefix}Flow".constantize
namespaced_class.build
else
absolute_path = @load_path.join("#{name}.rb").to_s
Flow.new do
eval(File.read(absolute_path), binding, absolute_path)
name(name)
end
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = build_flow(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end
Remove redundand 'lib/' in the load path
lib is on the LOAD_PATH already
SMART_ANSWER_FLOW_NAMES = %w(
additional-commodity-code
am-i-getting-minimum-wage
appeal-a-benefits-decision
apply-tier-4-visa
benefit-cap-calculator
benefits-abroad
calculate-agricultural-holiday-entitlement
calculate-married-couples-allowance
calculate-state-pension
calculate-your-child-maintenance
calculate-your-holiday-entitlement
check-uk-visa-v2
check-uk-visa
childcare-costs-for-tax-credits
energy-grants-calculator
estimate-self-assessment-penalties
help-if-you-are-arrested-abroad
inherits-someone-dies-without-will
legalisation-document-checker
maternity-paternity-calculator
minimum-wage-calculator-employers
overseas-passports
pip-checker
plan-adoption-leave
register-a-birth
register-a-death
report-a-lost-or-stolen-passport
simplified-expenses-checker-v2
simplified-expenses-checker
state-pension-through-partner
state-pension-topup
student-finance-calculator
towing-rules
uk-benefits-abroad
vat-payment-deadlines
)
SMART_ANSWER_FLOW_NAMES.each do |name|
require "smart_answer_flows/#{name}"
end
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
FLOW_DIR = Rails.root.join('lib', 'smart_answer_flows')
def self.instance
@instance ||= new(FLOW_REGISTRY_OPTIONS)
end
def self.reset_instance
@instance = nil
end
def initialize(options = {})
@load_path = Pathname.new(options[:smart_answer_load_path] || FLOW_DIR)
@show_drafts = options.fetch(:show_drafts, false)
@show_transitions = options.fetch(:show_transitions, false)
preload_flows! if Rails.env.production? or options[:preload_flows]
end
attr_reader :load_path
def find(name)
raise NotFound unless available?(name)
find_by_name(name) or raise NotFound
end
def flows
available_flows.map { |s| find_by_name(s) }.compact
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path, ".rb")
end
end
private
def find_by_name(name)
flow = @preloaded ? preloaded(name) : build_flow(name)
return nil if flow && flow.draft? && !@show_drafts
return nil if flow && flow.transition? && !@show_transitions
flow
end
def available?(name)
if @preloaded
@preloaded.has_key?(name)
else
available_flows.include?(name)
end
end
def build_flow(name)
if SMART_ANSWER_FLOW_NAMES.include?(name)
class_prefix = name.gsub("-", "_").camelize
load "smart_answer_flows/#{name}.rb" if Rails.env.development?
namespaced_class = "SmartAnswer::#{class_prefix}Flow".constantize
namespaced_class.build
else
absolute_path = @load_path.join("#{name}.rb").to_s
Flow.new do
eval(File.read(absolute_path), binding, absolute_path)
name(name)
end
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = build_flow(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end
|
require 'rkelly'
require 'babel/transpiler'
require 'sourcemap'
module Starscope::Lang
module Javascript
VERSION = 0
def self.match_file(name)
name.end_with?('.js')
end
def self.extract(path, contents, &block)
transform = Babel::Transpiler.transform(contents,
'stage' => 0,
'blacklist' => ['validation.react'],
'externalHelpers' => true,
'compact' => false,
'sourceMaps' => true)
map = SourceMap::Map.from_hash(transform['map'])
ast = RKelly::Parser.new.parse(transform['code'])
lines = contents.lines.to_a
return unless ast
found = extract_methods(ast, map, lines, &block)
found = extract_var_decls(ast, map, lines, found, &block)
extract_var_reads(ast, map, lines, found, &block)
end
def self.extract_methods(ast, map, lines, &block)
found = {}
ast.each do |node|
case node
when RKelly::Nodes::FunctionExprNode, RKelly::Nodes::FunctionDeclNode
line = find_line(node.range.from, map, lines, node.value)
next unless line
type = :func
type = :class if lines[line - 1].include?("class #{node.value}")
yield :defs, node.value, line_no: line, type: type
found[node.value] ||= Set.new
found[node.value].add(line)
next if type == :class
mapping = map.bsearch(SourceMap::Offset.new(node.range.to.line, node.range.to.char))
if lines[mapping.original.line - 1].include? '}'
yield :end, '}', line_no: mapping.original.line, type: type
else
yield :end, '', line_no: mapping.original.line, type: type, col: mapping.original.column
end
when RKelly::Nodes::FunctionCallNode
name = node_name(node.value)
next unless name
node = node.arguments.value[0] if name == 'require'
line = find_line(node.range.from, map, lines, name)
next unless line
found[name] ||= Set.new
found[name].add(line)
if name == 'require'
yield :requires, node.value[1...-1], line_no: line
else
yield :calls, name, line_no: line
end
end
end
found
end
def self.extract_var_decls(ast, map, lines, found, &block)
ast.each do |node|
next unless node.is_a? RKelly::Nodes::VarDeclNode
line = find_line(node.range.from, map, lines, node.name)
next unless line
if node.value.is_a?(RKelly::Nodes::AssignExprNode) &&
node.value.value.is_a?(RKelly::Nodes::FunctionCallNode) &&
node.value.value.value.is_a?(RKelly::Nodes::ResolveNode) &&
node.value.value.value.value == 'require'
found[node.name] ||= Set.new
found[node.name].add(line)
next
end
next if found[node.name] && found[node.name].include?(line)
yield :defs, node.name, line_no: line
found[node.name] ||= Set.new
found[node.name].add(line)
end
found
end
def self.extract_var_reads(ast, map, lines, found, &block)
ast.each do |node|
name = node_name(node)
next unless name
line = find_line(node.range.from, map, lines, name)
next unless line
next if found[name] && found[name].include?(line)
yield :reads, name, line_no: line
end
end
def self.node_name(node)
case node
when RKelly::Nodes::DotAccessorNode
node.accessor
when RKelly::Nodes::ResolveNode
node.value
end
end
def self.find_line(from, map, lines, name)
mapping = map.bsearch(SourceMap::Offset.new(from.line, from.char))
return unless mapping
line = lines[mapping.original.line - 1]
return unless line.include?(name) || (name == 'require' && line.include?('import'))
mapping.original.line
end
end
end
Bump JS extractor version for release
require 'rkelly'
require 'babel/transpiler'
require 'sourcemap'
module Starscope::Lang
module Javascript
VERSION = 1
def self.match_file(name)
name.end_with?('.js')
end
def self.extract(path, contents, &block)
transform = Babel::Transpiler.transform(contents,
'stage' => 0,
'blacklist' => ['validation.react'],
'externalHelpers' => true,
'compact' => false,
'sourceMaps' => true)
map = SourceMap::Map.from_hash(transform['map'])
ast = RKelly::Parser.new.parse(transform['code'])
lines = contents.lines.to_a
return unless ast
found = extract_methods(ast, map, lines, &block)
found = extract_var_decls(ast, map, lines, found, &block)
extract_var_reads(ast, map, lines, found, &block)
end
def self.extract_methods(ast, map, lines, &block)
found = {}
ast.each do |node|
case node
when RKelly::Nodes::FunctionExprNode, RKelly::Nodes::FunctionDeclNode
line = find_line(node.range.from, map, lines, node.value)
next unless line
type = :func
type = :class if lines[line - 1].include?("class #{node.value}")
yield :defs, node.value, line_no: line, type: type
found[node.value] ||= Set.new
found[node.value].add(line)
next if type == :class
mapping = map.bsearch(SourceMap::Offset.new(node.range.to.line, node.range.to.char))
if lines[mapping.original.line - 1].include? '}'
yield :end, '}', line_no: mapping.original.line, type: type
else
yield :end, '', line_no: mapping.original.line, type: type, col: mapping.original.column
end
when RKelly::Nodes::FunctionCallNode
name = node_name(node.value)
next unless name
node = node.arguments.value[0] if name == 'require'
line = find_line(node.range.from, map, lines, name)
next unless line
found[name] ||= Set.new
found[name].add(line)
if name == 'require'
yield :requires, node.value[1...-1], line_no: line
else
yield :calls, name, line_no: line
end
end
end
found
end
def self.extract_var_decls(ast, map, lines, found, &block)
ast.each do |node|
next unless node.is_a? RKelly::Nodes::VarDeclNode
line = find_line(node.range.from, map, lines, node.name)
next unless line
if node.value.is_a?(RKelly::Nodes::AssignExprNode) &&
node.value.value.is_a?(RKelly::Nodes::FunctionCallNode) &&
node.value.value.value.is_a?(RKelly::Nodes::ResolveNode) &&
node.value.value.value.value == 'require'
found[node.name] ||= Set.new
found[node.name].add(line)
next
end
next if found[node.name] && found[node.name].include?(line)
yield :defs, node.name, line_no: line
found[node.name] ||= Set.new
found[node.name].add(line)
end
found
end
def self.extract_var_reads(ast, map, lines, found, &block)
ast.each do |node|
name = node_name(node)
next unless name
line = find_line(node.range.from, map, lines, name)
next unless line
next if found[name] && found[name].include?(line)
yield :reads, name, line_no: line
end
end
def self.node_name(node)
case node
when RKelly::Nodes::DotAccessorNode
node.accessor
when RKelly::Nodes::ResolveNode
node.value
end
end
def self.find_line(from, map, lines, name)
mapping = map.bsearch(SourceMap::Offset.new(from.line, from.char))
return unless mapping
line = lines[mapping.original.line - 1]
return unless line.include?(name) || (name == 'require' && line.include?('import'))
mapping.original.line
end
end
end
|
module LiterateRandomizer
class Randomizer
DEFAULT_PUNCTUATION_DISTRIBUTION = %w{. . . . . . . . . . . . . . . . ? !}
PREPOSITION_REGEX = /^(had|the|to|or|and|a|in|that|it|if|of|is|was|for|on|as|an|your|our|my|per|until)$/
# The source of all random values. Must implement: #rand(limit)
#
# Default: Random.new()
attr_accessor :randomizer
# To end setences, one of the strings in this array is selected at random (uniform-distribution)
#
# Default: DEFAULT_PUNCTUATION_DISTRIBUTION
attr_accessor :punctuation_distribution
# an instance of SourceParser attached to the source_material
attr_reader :source_parser
# The random-generator model
attr_reader :model
private
def extend_trailing_preposition(max_words,words)
while words.length < max_words && words[-1] && words[-1][PREPOSITION_REGEX]
words << model.next_word(words[-1],randomizer)
end
words
end
public
# Initialize a new instance. Each Markov randomizer instance can run against its own source_material.
#
# Options:
#
# * :source_material => string OR
# * :source_material_file => filename
# * :punctuation_distribution => DEFAULT_PUNCTUATION_DISTRIBUTION
# punctiation is randomly selected from this array
#
# Advanced options: (primiarilly for testing)
#
# * :randomizer => Random.new # must respond to #rand(limit)
# * :source_parser => SourceParser.new options
# * :model => MarkovModel.new :source_parser => source_parser
def initialize(options={})
@init_options = options
@randomizer = randomizer || Random.new
@punctuation_distribution = options[:punctuation_distribution] || DEFAULT_PUNCTUATION_DISTRIBUTION
@source_parser = options[:source_parser] || SourceParser.new(options)
@model = options[:model] || MarkovModel.new(:source_parser => source_parser)
end
# Returns a quick summary of the instance.
def inspect
"#<#{self.class}: #{model.words.length} words, #{model.markov_chains.length} word-chains, #{model.first_words.length} first_words>"
end
# return a random word
def word
@cached_word_keys ||= model.words.keys
@cached_word_keys[rand(@cached_word_keys.length)]
end
# return a random first word of a sentence
def first_word
@cached_first_word_keys ||= model.first_words.keys
@cached_first_word_keys[rand(@cached_first_word_keys.length)]
end
# return a random first word of one of the markov-chains
def markov_word
@cached_markov_word_keys ||= model.markov_chains.keys
@cached_markov_word_keys[rand(@cached_markov_word_keys.length)]
end
# return a random number generated by randomizer
def rand(limit=nil)
@randomizer.rand(limit)
end
# return a random end-sentence string from punctuation_distribution
def punctuation
@punctuation_distribution[rand(@punctuation_distribution.length)]
end
# return a random sentence
#
# Options:
#
# * :first_word => nil - the start word
# * :words => range or int - number of words in sentence
# * :punctuation => nil - punction to end the sentence with (nil == randomly selected from punctuation_distribution)
def sentence(options={})
word = options[:first_word] || self.first_word
num_words_option = options[:words] || (3..15)
count = Util.rand_count(num_words_option,randomizer)
punctuation = options[:punctuation] || self.punctuation
words = count.times.collect do
word.tap {word = model.next_word(word,randomizer)}
end.compact
words = extend_trailing_preposition(Util.max(num_words_option), words)
Util.capitalize words.compact.join(" ") + punctuation
end
# return a random paragraph
#
# Options:
#
# * :first_word => nil - the first word of the paragraph
# * :words => range or int - number of words in sentence
# * :sentences => range or int - number of sentences in paragraph
# * :punctuation => nil - punction to end the paragraph with (nil == randomly selected from punctuation_distribution)
def paragraph(options={})
count = Util.rand_count(options[:sentences] || (5..15),randomizer)
count.times.collect do |i|
op = options.clone
op.delete :punctuation unless i==count-1
op.delete :first_word unless i==0
sentence op
end.join(" ")
end
# return random paragraphs
#
# Options:
#
# * :first_word => nil - the first word of the paragraph
# * :words => range or int - number of words in sentence
# * :sentences => range or int - number of sentences in paragraph
# * :paragraphs => range or int - number of paragraphs in paragraph
# * :join => "\n\n" - join the paragraphs. if :join => false, returns an array of the paragraphs
# * :punctuation => nil - punction to end the paragraph with (nil == randomly selected from punctuation_distribution)
def paragraphs(options={})
count = Util.rand_count(options[:paragraphs] || (3..5),randomizer)
join_str = options[:join]
res = count.times.collect do |i|
op = options.clone
op.delete :punctuation unless i==count-1
op.delete :first_word unless i==0
paragraph op
end
join_str!=false ? res.join(join_str || "\n\n") : res
end
end
end
removed unused method
module LiterateRandomizer
class Randomizer
DEFAULT_PUNCTUATION_DISTRIBUTION = %w{. . . . . . . . . . . . . . . . ? !}
PREPOSITION_REGEX = /^(had|the|to|or|and|a|in|that|it|if|of|is|was|for|on|as|an|your|our|my|per|until)$/
# The source of all random values. Must implement: #rand(limit)
#
# Default: Random.new()
attr_accessor :randomizer
# To end setences, one of the strings in this array is selected at random (uniform-distribution)
#
# Default: DEFAULT_PUNCTUATION_DISTRIBUTION
attr_accessor :punctuation_distribution
# an instance of SourceParser attached to the source_material
attr_reader :source_parser
# The random-generator model
attr_reader :model
private
def extend_trailing_preposition(max_words,words)
while words.length < max_words && words[-1] && words[-1][PREPOSITION_REGEX]
words << model.next_word(words[-1],randomizer)
end
words
end
public
# Initialize a new instance. Each Markov randomizer instance can run against its own source_material.
#
# Options:
#
# * :source_material => string OR
# * :source_material_file => filename
# * :punctuation_distribution => DEFAULT_PUNCTUATION_DISTRIBUTION
# punctiation is randomly selected from this array
#
# Advanced options: (primiarilly for testing)
#
# * :randomizer => Random.new # must respond to #rand(limit)
# * :source_parser => SourceParser.new options
# * :model => MarkovModel.new :source_parser => source_parser
def initialize(options={})
@init_options = options
@randomizer = randomizer || Random.new
@punctuation_distribution = options[:punctuation_distribution] || DEFAULT_PUNCTUATION_DISTRIBUTION
@source_parser = options[:source_parser] || SourceParser.new(options)
@model = options[:model] || MarkovModel.new(:source_parser => source_parser)
end
# Returns a quick summary of the instance.
def inspect
"#<#{self.class}: #{model.words.length} words, #{model.markov_chains.length} word-chains, #{model.first_words.length} first_words>"
end
# return a random word
def word
@cached_word_keys ||= model.words.keys
@cached_word_keys[rand(@cached_word_keys.length)]
end
# return a random first word of a sentence
def first_word
@cached_first_word_keys ||= model.first_words.keys
@cached_first_word_keys[rand(@cached_first_word_keys.length)]
end
# return a random number generated by randomizer
def rand(limit=nil)
@randomizer.rand(limit)
end
# return a random end-sentence string from punctuation_distribution
def punctuation
@punctuation_distribution[rand(@punctuation_distribution.length)]
end
# return a random sentence
#
# Options:
#
# * :first_word => nil - the start word
# * :words => range or int - number of words in sentence
# * :punctuation => nil - punction to end the sentence with (nil == randomly selected from punctuation_distribution)
def sentence(options={})
word = options[:first_word] || self.first_word
num_words_option = options[:words] || (3..15)
count = Util.rand_count(num_words_option,randomizer)
punctuation = options[:punctuation] || self.punctuation
words = count.times.collect do
word.tap {word = model.next_word(word,randomizer)}
end.compact
words = extend_trailing_preposition(Util.max(num_words_option), words)
Util.capitalize words.compact.join(" ") + punctuation
end
# return a random paragraph
#
# Options:
#
# * :first_word => nil - the first word of the paragraph
# * :words => range or int - number of words in sentence
# * :sentences => range or int - number of sentences in paragraph
# * :punctuation => nil - punction to end the paragraph with (nil == randomly selected from punctuation_distribution)
def paragraph(options={})
count = Util.rand_count(options[:sentences] || (5..15),randomizer)
count.times.collect do |i|
op = options.clone
op.delete :punctuation unless i==count-1
op.delete :first_word unless i==0
sentence op
end.join(" ")
end
# return random paragraphs
#
# Options:
#
# * :first_word => nil - the first word of the paragraph
# * :words => range or int - number of words in sentence
# * :sentences => range or int - number of sentences in paragraph
# * :paragraphs => range or int - number of paragraphs in paragraph
# * :join => "\n\n" - join the paragraphs. if :join => false, returns an array of the paragraphs
# * :punctuation => nil - punction to end the paragraph with (nil == randomly selected from punctuation_distribution)
def paragraphs(options={})
count = Util.rand_count(options[:paragraphs] || (3..5),randomizer)
join_str = options[:join]
res = count.times.collect do |i|
op = options.clone
op.delete :punctuation unless i==count-1
op.delete :first_word unless i==0
paragraph op
end
join_str!=false ? res.join(join_str || "\n\n") : res
end
end
end
|
require 'forwardable'
module TableTransform
class Properties
extend Forwardable
def_delegators :@props, :delete, :each, :[], :[]=
def initialize(init_properties = {})
validate(init_properties)
@props = init_properties.clone
end
def validate(properties)
raise 'Default properties must be a hash' unless properties.is_a? Hash
end
def to_h
@props.clone
end
def update(properties)
validate(properties)
@props.merge! properties
end
def reset(properties)
validate(properties)
@props = properties
end
end
end
Removed []= operator from properties
require 'forwardable'
module TableTransform
class Properties
extend Forwardable
def_delegators :@props, :delete, :each, :[]
def initialize(init_properties = {})
validate(init_properties)
@props = init_properties.clone
end
def validate(properties)
raise 'Default properties must be a hash' unless properties.is_a? Hash
end
def to_h
@props.clone
end
def update(properties)
validate(properties)
@props.merge! properties
end
def reset(properties)
validate(properties)
@props = properties
end
end
end
|
require 'active_record/fixtures'
require 'tasks/color'
desc "create initial records for enju_library"
namespace :enju_library do
task :setup => :environment do
ActiveRecord::FixtureSet.create_fixtures('db/fixtures/enju_library', 'library_groups')
Dir.glob(Rails.root.to_s + '/db/fixtures/enju_library/**/*.yml').each do |file|
dirname = File.basename(File.dirname file)
dirname = nil if dirname == "enju_library"
basename = [ dirname, File.basename(file, ".*") ].compact
basename = File.join(*basename)
next if basename == 'library_groups'
ActiveRecord::FixtureSet.create_fixtures('db/fixtures/enju_library', basename)
end
Shelf.create!(name: 'web', library: Library.find_by(name: 'web'))
Shelf.create!(name: 'first_shelf', library: Library.find_by(name: 'yours'))
end
desc "upgrade enju_library to 1.3"
task :upgrade_to_13 => :environment do
Rake::Task['statesman:backfill_most_recent'].invoke('UserExportFile')
Rake::Task['statesman:backfill_most_recent'].invoke('UserImportFile')
library_group = LibraryGroup.site_config
library_group.user = User.find(1)
login_ja = <<"EOS"
このシステムはオープンソース図書館システム Next-L Enju です。このメッセージは管理者によって変更することができます。
EOS
login_en = <<"EOS"
Next-L Enju, an open-source integrated library system. You can edit this message after logging in as Administrator.
EOS
footer_ja = <<"EOS"
[Next-L Enju Leaf __VERSION__](https://github.com/next-l/enju_leaf), オープンソース統合図書館システム
Developed by [Kosuke Tanabe](https://github.com/nabeta) and [Project Next-L](https://www.next-l.jp) \| [このシステムについて](/page/about) \| [不具合を報告する](https://github.com/next-l/enju_leaf/issues) \| [マニュアル](https://next-l.github.com/manual/1.3/)
EOS
footer_en = <<"EOS"
[Next-L Enju Leaf __VERSION__](https://github.com/next-l/enju_leaf), an open source integrated library system
Developed by [Kosuke Tanabe](https://github.com/nabeta) and [Project Next-L](https://www.next-l.jp) \| [About this system](/page/about) \| [Report bugs](https://github.com/next-l/enju_leaf/issues) \| [Manual](https://next-l.github.com/manual/1.3/)
EOS
library_group.login_banner_ja = login_ja if library_group.login_banner_ja.blank?
library_group.login_banner_en = login_en if library_group.login_banner_en.blank?
library_group.footer_banner_ja = footer_ja if library_group.footer_banner_ja.blank?
library_group.footer_banner_en = footer_en if library_group.footer_banner_en.blank?
library_group.book_jacket_source = 'google'
library_group.screenshot_generator = 'mozshot'
library_group.save
puts 'enju_library: The upgrade completed successfully.'
end
desc "upgrade enju_biblio to 2.0"
task upgrade: :environment do
class_names = [
BudgetType, Library, LibraryGroup, RequestStatusType, RequestType,
SearchEngine, Shelf, UserGroup
]
class_names.each do |klass|
klass.find_each do |record|
I18n.available_locales.each do |locale|
next unless record.respond_to?("display_name_#{locale}")
record.update("display_name_#{locale}": YAML.safe_load(record[:display_name])[locale.to_s])
end
end
end
I18n.available_locales.each do |locale|
LibraryGroup.with_translations(locale).each do |library_group|
['login_banner', 'footer_banner'].each do |column|
library_group.update("#{column}_#{locale}": library_group.send(column.to_h))
end
end
end
puts 'enju_library: The upgrade completed successfully.'
end
end
fix enju_library:upgrade task
require 'active_record/fixtures'
require 'tasks/color'
desc "create initial records for enju_library"
namespace :enju_library do
task :setup => :environment do
ActiveRecord::FixtureSet.create_fixtures('db/fixtures/enju_library', 'library_groups')
Dir.glob(Rails.root.to_s + '/db/fixtures/enju_library/**/*.yml').each do |file|
dirname = File.basename(File.dirname file)
dirname = nil if dirname == "enju_library"
basename = [ dirname, File.basename(file, ".*") ].compact
basename = File.join(*basename)
next if basename == 'library_groups'
ActiveRecord::FixtureSet.create_fixtures('db/fixtures/enju_library', basename)
end
Shelf.create!(name: 'web', library: Library.find_by(name: 'web'))
Shelf.create!(name: 'first_shelf', library: Library.find_by(name: 'yours'))
end
desc "upgrade enju_library to 1.3"
task :upgrade_to_13 => :environment do
Rake::Task['statesman:backfill_most_recent'].invoke('UserExportFile')
Rake::Task['statesman:backfill_most_recent'].invoke('UserImportFile')
library_group = LibraryGroup.site_config
library_group.user = User.find(1)
login_ja = <<"EOS"
このシステムはオープンソース図書館システム Next-L Enju です。このメッセージは管理者によって変更することができます。
EOS
login_en = <<"EOS"
Next-L Enju, an open-source integrated library system. You can edit this message after logging in as Administrator.
EOS
footer_ja = <<"EOS"
[Next-L Enju Leaf __VERSION__](https://github.com/next-l/enju_leaf), オープンソース統合図書館システム
Developed by [Kosuke Tanabe](https://github.com/nabeta) and [Project Next-L](https://www.next-l.jp) \| [このシステムについて](/page/about) \| [不具合を報告する](https://github.com/next-l/enju_leaf/issues) \| [マニュアル](https://next-l.github.com/manual/1.3/)
EOS
footer_en = <<"EOS"
[Next-L Enju Leaf __VERSION__](https://github.com/next-l/enju_leaf), an open source integrated library system
Developed by [Kosuke Tanabe](https://github.com/nabeta) and [Project Next-L](https://www.next-l.jp) \| [About this system](/page/about) \| [Report bugs](https://github.com/next-l/enju_leaf/issues) \| [Manual](https://next-l.github.com/manual/1.3/)
EOS
library_group.login_banner_ja = login_ja if library_group.login_banner_ja.blank?
library_group.login_banner_en = login_en if library_group.login_banner_en.blank?
library_group.footer_banner_ja = footer_ja if library_group.footer_banner_ja.blank?
library_group.footer_banner_en = footer_en if library_group.footer_banner_en.blank?
library_group.book_jacket_source = 'google'
library_group.screenshot_generator = 'mozshot'
library_group.save
puts 'enju_library: The upgrade completed successfully.'
end
desc "upgrade enju_biblio to 2.0"
task upgrade: :environment do
class_names = [
BudgetType, Library, LibraryGroup, RequestStatusType, RequestType,
SearchEngine, Shelf, UserGroup
]
class_names.each do |klass|
klass.find_each do |record|
I18n.available_locales.each do |locale|
next unless record.respond_to?("display_name_#{locale}")
record.update("display_name_#{locale}": YAML.safe_load(record[:display_name])[locale.to_s])
end
end
end
sql = 'SELECT * FROM library_group_translations;'
results = ActiveRecord::Base.connection.execute(sql)
results.each do |row|
library_group = LibraryGroup.find(row['library_group_id'])
library_group.update(
"login_banner_#{row['locale']}": row['login_banner'],
"footer_banner_#{row['locale']}": row['footer_banner']
)
end
puts 'enju_library: The upgrade completed successfully.'
end
end
|
namespace :import do
desc 'Import trade codes'
task :trade_codes => [:environment] do
puts "#{TradeCode.delete_all} trade codes deleted"
terms = [
{:code => 'BAL', :name_en => 'baleen'},
{:code => 'BAR', :name_en => 'bark'},
{:code => 'BEL', :name_en => 'belts'},
{:code => 'BOD', :name_en => 'bodies'},
{:code => 'BOC', :name_en => 'bone carvings'},
{:code => 'BOP', :name_en => 'bone pieces'},
{:code => 'BPR', :name_en => 'bone products'},
{:code => 'BON', :name_en => 'bones'},
{:code => 'CAL', :name_en => 'calipee'},
{:code => 'CAP', :name_en => 'carapaces'},
{:code => 'CAR', :name_en => 'carvings'},
{:code => 'CAV', :name_en => 'caviar'},
{:code => 'CST', :name_en => 'chess sets'},
{:code => 'CHP', :name_en => 'chips'},
{:code => 'CLA', :name_en => 'claws'},
{:code => 'CLO', :name_en => 'cloth'},
{:code => 'COS', :name_en => 'coral sand'},
{:code => 'CUL', :name_en => 'cultures'},
{:code => 'DER', :name_en => 'derivatives'},
{:code => 'DPL', :name_en => 'dried plants'},
{:code => 'EAR', :name_en => 'ears'},
{:code => 'EGG', :name_en => 'eggs'},
{:code => 'EGL', :name_en => 'eggs (live)'},
{:code => 'EXT', :name_en => 'extract'},
{:code => 'FEA', :name_en => 'feathers'},
{:code => 'FOO', :name_en => 'feet'},
{:code => 'FIB', :name_en => 'fibres'},
{:code => 'FIG', :name_en => 'fingerlings'},
{:code => 'FIN', :name_en => 'fins'},
{:code => 'FPT', :name_en => 'flower pots'},
{:code => 'FLO', :name_en => 'flowers'},
{:code => 'FRU', :name_en => 'fruit'},
{:code => 'GAL', :name_en => 'gall'},
{:code => 'GAB', :name_en => 'gall bladder(s)'},
{:code => 'GAR', :name_en => 'garments'},
{:code => 'GEN', :name_en => 'genitalia'},
{:code => 'GRS', :name_en => 'graft rootstocks'},
{:code => 'HAI', :name_en => 'hair'},
{:code => 'HAP', :name_en => 'hair products'},
{:code => 'HAN', :name_en => 'handbags'},
{:code => 'HEA', :name_en => 'heads'},
{:code => 'HOC', :name_en => 'horn carvings'},
{:code => 'HOP', :name_en => 'horn pieces'},
{:code => 'HPR', :name_en => 'horn products'},
{:code => 'HOS', :name_en => 'horn scraps'},
{:code => 'HOR', :name_en => 'horns'},
{:code => 'FRN', :name_en => 'items of furniture'},
{:code => 'IVC', :name_en => 'ivory carvings'},
{:code => 'IVP', :name_en => 'ivory pieces'},
{:code => 'IVS', :name_en => 'ivory scraps'},
{:code => 'LEA', :name_en => 'leather'},
{:code => 'SKO', :name_en => 'leather items'},
{:code => 'LPL', :name_en => 'leather products (l)'},
{:code => 'LPS', :name_en => 'leather products (s)'},
{:code => 'LVS', :name_en => 'leaves'},
{:code => 'LEG', :name_en => 'legs'},
{:code => 'LIV', :name_en => 'live'},
{:code => 'LOG', :name_en => 'logs'},
{:code => 'MEA', :name_en => 'meat'},
{:code => 'MED', :name_en => 'medicine'},
{:code => 'MUS', :name_en => 'musk'},
{:code => 'OIL', :name_en => 'oil'},
{:code => 'OTH', :name_en => 'other'},
{:code => 'SHO', :name_en => 'pairs of shoes'},
{:code => 'PEA', :name_en => 'pearls'},
{:code => 'PKY', :name_en => 'piano keys'},
{:code => 'PIE', :name_en => 'pieces'},
{:code => 'PLA', :name_en => 'plates'},
{:code => 'PLY', :name_en => 'plywood'},
{:code => 'POW', :name_en => 'powder'},
{:code => 'QUI', :name_en => 'quills'},
{:code => 'COR', :name_en => 'raw corals'},
{:code => 'ROO', :name_en => 'roots'},
{:code => 'SAW', :name_en => 'sawn wood'},
{:code => 'SCA', :name_en => 'scales'},
{:code => 'SCR', :name_en => 'scraps'},
{:code => 'SEE', :name_en => 'seeds'},
{:code => 'SHE', :name_en => 'shells'},
{:code => 'SKD', :name_en => 'sides'},
{:code => 'SID', :name_en => 'sides'},
{:code => 'SKE', :name_en => 'skeletons'},
{:code => 'SKP', :name_en => 'skin pieces'},
{:code => 'SKS', :name_en => 'skin scraps'},
{:code => 'SKI', :name_en => 'skins'},
{:code => 'SKU', :name_en => 'skulls'},
{:code => 'SOU', :name_en => 'soup'},
{:code => 'SPE', :name_en => 'specimens'},
{:code => 'FRA', :name_en => 'spectacle frames'},
{:code => 'STE', :name_en => 'stems'},
{:code => 'SWI', :name_en => 'swim bladders'},
{:code => 'TAI', :name_en => 'tails'},
{:code => 'TEE', :name_en => 'teeth'},
{:code => 'TIM', :name_en => 'timber'},
{:code => 'TIC', :name_en => 'timber carvings'},
{:code => 'TIP', :name_en => 'timber pieces'},
{:code => 'TIS', :name_en => 'tissue cultures'},
{:code => 'TRO', :name_en => 'trophies'},
{:code => 'TUS', :name_en => 'tusks'},
{:code => 'VEN', :name_en => 'veneer'},
{:code => 'VNM', :name_en => 'venom'},
{:code => 'WAL', :name_en => 'wallets'},
{:code => 'WAT', :name_en => 'watchstraps'},
{:code => 'WAX', :name_en => 'wax'},
{:code => 'WOO', :name_en => 'wood products'}
]
terms.each{ |t| Term.create(t) }
sources = [
{:code => 'A', :name_en => 'Artificially propagated plants'},
{:code => 'C', :name_en => 'Captive-bred animals'},
{:code => 'D', :name_en => 'Captive-bred/artificially propagated (Appendix I)'},
{:code => 'F', :name_en => 'Born in captivity (F1 and subsequent)'},
{:code => 'I', :name_en => 'Confiscations/seizures'},
{:code => 'O', :name_en => 'Pre-Convention'},
{:code => 'R', :name_en => 'Ranched'},
{:code => 'W', :name_en => 'Wild'}
]
sources.each{ |t| Source.create(t) }
purposes = [
{:code => 'B', :name_en => 'Breeding in captivity or artificially propagation'},
{:code => 'E', :name_en => 'Educational'},
{:code => 'G', :name_en => 'Botanical garden'},
{:code => 'H', :name_en => 'Hunting trophy'},
{:code => 'L', :name_en => 'Law enforcement/judicial/forensic'},
{:code => 'M', :name_en => 'Medical (including biomedical research)'},
{:code => 'N', :name_en => 'Reintroduction or introduction into the wild'},
{:code => 'P', :name_en => 'Personal'},
{:code => 'Q', :name_en => 'Circus and travelling exhibitions'},
{:code => 'S', :name_en => 'Scientific'},
{:code => 'T', :name_en => 'Commercial'},
{:code => 'Z', :name_en => 'Zoo'}
]
purposes.each{ |t| Purpose.create(t) }
units = [
{:code => 'BAG', :name_en => 'Bags'},
{:code => 'BAK', :name_en => 'Back skins'},
{:code => 'BOT', :name_en => 'Bottles'},
{:code => 'BOX', :name_en => 'Boxes'},
{:code => 'BSK', :name_en => 'Belly skins'},
{:code => 'CAN', :name_en => 'Cans'},
{:code => 'CAS', :name_en => 'Cases'},
{:code => 'CCM', :name_en => 'Cubic centimetres'},
{:code => 'CRT', :name_en => 'Cartons'},
{:code => 'CTM', :name_en => 'Centimetres'},
{:code => 'CUF', :name_en => 'Cubic feet'},
{:code => 'CUM', :name_en => 'Cubic metres'},
{:code => 'FEE', :name_en => 'Feet'},
{:code => 'FLA', :name_en => 'Flasks'},
{:code => 'GRM', :name_en => 'Grams'},
{:code => 'HRN', :name_en => 'Hornback skins'},
{:code => 'INC', :name_en => 'Inches'},
{:code => 'ITE', :name_en => 'Items'},
{:code => 'KIL', :name_en => 'Kilograms'},
{:code => 'LTR', :name_en => 'Litres'},
{:code => 'MGM', :name_en => 'Milligrams'},
{:code => 'MLT', :name_en => 'Millilitres'},
{:code => 'MTR', :name_en => 'Metres'},
{:code => 'MYG', :name_en => 'Micrograms'},
{:code => 'OUN', :name_en => 'Ounces'},
{:code => 'PAI', :name_en => 'Pairs'},
{:code => 'PCS', :name_en => 'Pieces'},
{:code => 'PND', :name_en => 'Pounds'},
{:code => 'SET', :name_en => 'Sets'},
{:code => 'SHP', :name_en => 'Shipments'},
{:code => 'SID', :name_en => 'Sides'},
{:code => 'SKI', :name_en => 'Skins'},
{:code => 'SQC', :name_en => 'Square centimetres'},
{:code => 'SQD', :name_en => 'Square decimetres'},
{:code => 'SQF', :name_en => 'Square feet'},
{:code => 'SQM', :name_en => 'Square metres'},
{:code => 'TON', :name_en => 'Metric tons'}
]
units.each{ |t| Unit.create(t) }
puts "#{TradeCode.count} trade codes created"
end
desc "Import terms and purpose codes acceptable pairing"
task :trade_codes_t_p_pairs => [:environment] do
TMP_TABLE = "terms_and_purpose_pairs_import"
file = "lib/files/term_purpose_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = TermTradeCodesPair.count
sql = <<-SQL
INSERT INTO term_trade_codes_pairs(term_id,
trade_code_id, trade_code_type, created_at, updated_at)
SELECT DISTINCT terms.id, trade_codes.id,
trade_codes.type, current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN trade_codes AS terms ON BTRIM(UPPER(terms.code)) = BTRIM(UPPER(#{TMP_TABLE}.TERM_CODE))
AND terms.type = 'Term'
INNER JOIN trade_codes AS trade_codes ON BTRIM(UPPER(trade_codes.code)) = BTRIM(UPPER(#{TMP_TABLE}.PURPOSE_CODE))
AND trade_codes.type = 'Purpose';
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{TermTradeCodesPair.count - initial_count} terms and purpose codes pairs created"
end
desc "Import terms and unit codes acceptable pairing"
task :trade_codes_t_u_pairs => [:environment] do
TMP_TABLE = "terms_and_unit_pairs_import"
file = "lib/files/term_unit_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = TermTradeCodesPair.count
sql = <<-SQL
INSERT INTO term_trade_codes_pairs(term_id,
trade_code_id, trade_code_type, created_at, updated_at)
SELECT DISTINCT terms.id, trade_codes.id,
trade_codes.type, current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN trade_codes AS terms ON BTRIM(UPPER(terms.code)) = BTRIM(UPPER(#{TMP_TABLE}.TERM_CODE))
AND terms.type = 'Term'
INNER JOIN trade_codes AS trade_codes ON BTRIM(UPPER(trade_codes.code)) = BTRIM(UPPER(#{TMP_TABLE}.UNIT_CODE))
AND trade_codes.type = 'Unit';
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{TermTradeCodesPair.count - initial_count} terms and unit codes pairs created"
end
desc "Import taxon concepts terms acceptable pairing. (i.e.: which terms can go with each taxon concept)"
task :taxon_concept_terms_pairs => [:environment] do
TMP_TABLE = "taxon_concepts_and_terms_pairs_import"
file = "lib/files/taxon_concept_term_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = Trade::TaxonConceptCodePair.count
sql = <<-SQL
INSERT INTO trade_taxon_concept_code_pairs(taxon_concept_id, trade_code_id, trade_code_type,
created_at, updated_at)
SELECT DISTINCT taxon_concepts.id, terms.id, 'Term', current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN taxon_concepts_mview AS taxon_concepts ON UPPER(BTRIM(taxon_concepts.full_name)) = UPPER(BTRIM(#{TMP_TABLE}.TAXON_FAMILY))
INNER JOIN trade_codes AS terms ON UPPER(BTRIM(terms.code)) = UPPER(BTRIM(#{TMP_TABLE}.TERM_CODE))
WHERE taxon_concepts.rank_name = '#{Rank::FAMILY}' AND taxon_concepts.taxonomy_is_cites_eu
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{Trade::TaxonConceptCodePair.count - initial_count} terms and unit codes pairs created"
end
end
Fixes task to import taxon_concept_term_pairs that was using the old table name and attributes. And I've grepped for more occurrences an none showed
namespace :import do
desc 'Import trade codes'
task :trade_codes => [:environment] do
puts "#{TradeCode.delete_all} trade codes deleted"
terms = [
{:code => 'BAL', :name_en => 'baleen'},
{:code => 'BAR', :name_en => 'bark'},
{:code => 'BEL', :name_en => 'belts'},
{:code => 'BOD', :name_en => 'bodies'},
{:code => 'BOC', :name_en => 'bone carvings'},
{:code => 'BOP', :name_en => 'bone pieces'},
{:code => 'BPR', :name_en => 'bone products'},
{:code => 'BON', :name_en => 'bones'},
{:code => 'CAL', :name_en => 'calipee'},
{:code => 'CAP', :name_en => 'carapaces'},
{:code => 'CAR', :name_en => 'carvings'},
{:code => 'CAV', :name_en => 'caviar'},
{:code => 'CST', :name_en => 'chess sets'},
{:code => 'CHP', :name_en => 'chips'},
{:code => 'CLA', :name_en => 'claws'},
{:code => 'CLO', :name_en => 'cloth'},
{:code => 'COS', :name_en => 'coral sand'},
{:code => 'CUL', :name_en => 'cultures'},
{:code => 'DER', :name_en => 'derivatives'},
{:code => 'DPL', :name_en => 'dried plants'},
{:code => 'EAR', :name_en => 'ears'},
{:code => 'EGG', :name_en => 'eggs'},
{:code => 'EGL', :name_en => 'eggs (live)'},
{:code => 'EXT', :name_en => 'extract'},
{:code => 'FEA', :name_en => 'feathers'},
{:code => 'FOO', :name_en => 'feet'},
{:code => 'FIB', :name_en => 'fibres'},
{:code => 'FIG', :name_en => 'fingerlings'},
{:code => 'FIN', :name_en => 'fins'},
{:code => 'FPT', :name_en => 'flower pots'},
{:code => 'FLO', :name_en => 'flowers'},
{:code => 'FRU', :name_en => 'fruit'},
{:code => 'GAL', :name_en => 'gall'},
{:code => 'GAB', :name_en => 'gall bladder(s)'},
{:code => 'GAR', :name_en => 'garments'},
{:code => 'GEN', :name_en => 'genitalia'},
{:code => 'GRS', :name_en => 'graft rootstocks'},
{:code => 'HAI', :name_en => 'hair'},
{:code => 'HAP', :name_en => 'hair products'},
{:code => 'HAN', :name_en => 'handbags'},
{:code => 'HEA', :name_en => 'heads'},
{:code => 'HOC', :name_en => 'horn carvings'},
{:code => 'HOP', :name_en => 'horn pieces'},
{:code => 'HPR', :name_en => 'horn products'},
{:code => 'HOS', :name_en => 'horn scraps'},
{:code => 'HOR', :name_en => 'horns'},
{:code => 'FRN', :name_en => 'items of furniture'},
{:code => 'IVC', :name_en => 'ivory carvings'},
{:code => 'IVP', :name_en => 'ivory pieces'},
{:code => 'IVS', :name_en => 'ivory scraps'},
{:code => 'LEA', :name_en => 'leather'},
{:code => 'SKO', :name_en => 'leather items'},
{:code => 'LPL', :name_en => 'leather products (l)'},
{:code => 'LPS', :name_en => 'leather products (s)'},
{:code => 'LVS', :name_en => 'leaves'},
{:code => 'LEG', :name_en => 'legs'},
{:code => 'LIV', :name_en => 'live'},
{:code => 'LOG', :name_en => 'logs'},
{:code => 'MEA', :name_en => 'meat'},
{:code => 'MED', :name_en => 'medicine'},
{:code => 'MUS', :name_en => 'musk'},
{:code => 'OIL', :name_en => 'oil'},
{:code => 'OTH', :name_en => 'other'},
{:code => 'SHO', :name_en => 'pairs of shoes'},
{:code => 'PEA', :name_en => 'pearls'},
{:code => 'PKY', :name_en => 'piano keys'},
{:code => 'PIE', :name_en => 'pieces'},
{:code => 'PLA', :name_en => 'plates'},
{:code => 'PLY', :name_en => 'plywood'},
{:code => 'POW', :name_en => 'powder'},
{:code => 'QUI', :name_en => 'quills'},
{:code => 'COR', :name_en => 'raw corals'},
{:code => 'ROO', :name_en => 'roots'},
{:code => 'SAW', :name_en => 'sawn wood'},
{:code => 'SCA', :name_en => 'scales'},
{:code => 'SCR', :name_en => 'scraps'},
{:code => 'SEE', :name_en => 'seeds'},
{:code => 'SHE', :name_en => 'shells'},
{:code => 'SKD', :name_en => 'sides'},
{:code => 'SID', :name_en => 'sides'},
{:code => 'SKE', :name_en => 'skeletons'},
{:code => 'SKP', :name_en => 'skin pieces'},
{:code => 'SKS', :name_en => 'skin scraps'},
{:code => 'SKI', :name_en => 'skins'},
{:code => 'SKU', :name_en => 'skulls'},
{:code => 'SOU', :name_en => 'soup'},
{:code => 'SPE', :name_en => 'specimens'},
{:code => 'FRA', :name_en => 'spectacle frames'},
{:code => 'STE', :name_en => 'stems'},
{:code => 'SWI', :name_en => 'swim bladders'},
{:code => 'TAI', :name_en => 'tails'},
{:code => 'TEE', :name_en => 'teeth'},
{:code => 'TIM', :name_en => 'timber'},
{:code => 'TIC', :name_en => 'timber carvings'},
{:code => 'TIP', :name_en => 'timber pieces'},
{:code => 'TIS', :name_en => 'tissue cultures'},
{:code => 'TRO', :name_en => 'trophies'},
{:code => 'TUS', :name_en => 'tusks'},
{:code => 'VEN', :name_en => 'veneer'},
{:code => 'VNM', :name_en => 'venom'},
{:code => 'WAL', :name_en => 'wallets'},
{:code => 'WAT', :name_en => 'watchstraps'},
{:code => 'WAX', :name_en => 'wax'},
{:code => 'WOO', :name_en => 'wood products'}
]
terms.each{ |t| Term.create(t) }
sources = [
{:code => 'A', :name_en => 'Artificially propagated plants'},
{:code => 'C', :name_en => 'Captive-bred animals'},
{:code => 'D', :name_en => 'Captive-bred/artificially propagated (Appendix I)'},
{:code => 'F', :name_en => 'Born in captivity (F1 and subsequent)'},
{:code => 'I', :name_en => 'Confiscations/seizures'},
{:code => 'O', :name_en => 'Pre-Convention'},
{:code => 'R', :name_en => 'Ranched'},
{:code => 'W', :name_en => 'Wild'}
]
sources.each{ |t| Source.create(t) }
purposes = [
{:code => 'B', :name_en => 'Breeding in captivity or artificially propagation'},
{:code => 'E', :name_en => 'Educational'},
{:code => 'G', :name_en => 'Botanical garden'},
{:code => 'H', :name_en => 'Hunting trophy'},
{:code => 'L', :name_en => 'Law enforcement/judicial/forensic'},
{:code => 'M', :name_en => 'Medical (including biomedical research)'},
{:code => 'N', :name_en => 'Reintroduction or introduction into the wild'},
{:code => 'P', :name_en => 'Personal'},
{:code => 'Q', :name_en => 'Circus and travelling exhibitions'},
{:code => 'S', :name_en => 'Scientific'},
{:code => 'T', :name_en => 'Commercial'},
{:code => 'Z', :name_en => 'Zoo'}
]
purposes.each{ |t| Purpose.create(t) }
units = [
{:code => 'BAG', :name_en => 'Bags'},
{:code => 'BAK', :name_en => 'Back skins'},
{:code => 'BOT', :name_en => 'Bottles'},
{:code => 'BOX', :name_en => 'Boxes'},
{:code => 'BSK', :name_en => 'Belly skins'},
{:code => 'CAN', :name_en => 'Cans'},
{:code => 'CAS', :name_en => 'Cases'},
{:code => 'CCM', :name_en => 'Cubic centimetres'},
{:code => 'CRT', :name_en => 'Cartons'},
{:code => 'CTM', :name_en => 'Centimetres'},
{:code => 'CUF', :name_en => 'Cubic feet'},
{:code => 'CUM', :name_en => 'Cubic metres'},
{:code => 'FEE', :name_en => 'Feet'},
{:code => 'FLA', :name_en => 'Flasks'},
{:code => 'GRM', :name_en => 'Grams'},
{:code => 'HRN', :name_en => 'Hornback skins'},
{:code => 'INC', :name_en => 'Inches'},
{:code => 'ITE', :name_en => 'Items'},
{:code => 'KIL', :name_en => 'Kilograms'},
{:code => 'LTR', :name_en => 'Litres'},
{:code => 'MGM', :name_en => 'Milligrams'},
{:code => 'MLT', :name_en => 'Millilitres'},
{:code => 'MTR', :name_en => 'Metres'},
{:code => 'MYG', :name_en => 'Micrograms'},
{:code => 'OUN', :name_en => 'Ounces'},
{:code => 'PAI', :name_en => 'Pairs'},
{:code => 'PCS', :name_en => 'Pieces'},
{:code => 'PND', :name_en => 'Pounds'},
{:code => 'SET', :name_en => 'Sets'},
{:code => 'SHP', :name_en => 'Shipments'},
{:code => 'SID', :name_en => 'Sides'},
{:code => 'SKI', :name_en => 'Skins'},
{:code => 'SQC', :name_en => 'Square centimetres'},
{:code => 'SQD', :name_en => 'Square decimetres'},
{:code => 'SQF', :name_en => 'Square feet'},
{:code => 'SQM', :name_en => 'Square metres'},
{:code => 'TON', :name_en => 'Metric tons'}
]
units.each{ |t| Unit.create(t) }
puts "#{TradeCode.count} trade codes created"
end
desc "Import terms and purpose codes acceptable pairing"
task :trade_codes_t_p_pairs => [:environment] do
TMP_TABLE = "terms_and_purpose_pairs_import"
file = "lib/files/term_purpose_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = TermTradeCodesPair.count
sql = <<-SQL
INSERT INTO term_trade_codes_pairs(term_id,
trade_code_id, trade_code_type, created_at, updated_at)
SELECT DISTINCT terms.id, trade_codes.id,
trade_codes.type, current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN trade_codes AS terms ON BTRIM(UPPER(terms.code)) = BTRIM(UPPER(#{TMP_TABLE}.TERM_CODE))
AND terms.type = 'Term'
INNER JOIN trade_codes AS trade_codes ON BTRIM(UPPER(trade_codes.code)) = BTRIM(UPPER(#{TMP_TABLE}.PURPOSE_CODE))
AND trade_codes.type = 'Purpose';
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{TermTradeCodesPair.count - initial_count} terms and purpose codes pairs created"
end
desc "Import terms and unit codes acceptable pairing"
task :trade_codes_t_u_pairs => [:environment] do
TMP_TABLE = "terms_and_unit_pairs_import"
file = "lib/files/term_unit_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = TermTradeCodesPair.count
sql = <<-SQL
INSERT INTO term_trade_codes_pairs(term_id,
trade_code_id, trade_code_type, created_at, updated_at)
SELECT DISTINCT terms.id, trade_codes.id,
trade_codes.type, current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN trade_codes AS terms ON BTRIM(UPPER(terms.code)) = BTRIM(UPPER(#{TMP_TABLE}.TERM_CODE))
AND terms.type = 'Term'
INNER JOIN trade_codes AS trade_codes ON BTRIM(UPPER(trade_codes.code)) = BTRIM(UPPER(#{TMP_TABLE}.UNIT_CODE))
AND trade_codes.type = 'Unit';
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{TermTradeCodesPair.count - initial_count} terms and unit codes pairs created"
end
desc "Import taxon concepts terms acceptable pairing. (i.e.: which terms can go with each taxon concept)"
task :taxon_concept_terms_pairs => [:environment] do
TMP_TABLE = "taxon_concepts_and_terms_pairs_import"
file = "lib/files/taxon_concept_term_pairs_utf8.csv"
drop_table(TMP_TABLE)
create_table_from_csv_headers(file, TMP_TABLE)
copy_data(file, TMP_TABLE)
initial_count = Trade::TaxonConceptTermPair.count
sql = <<-SQL
INSERT INTO trade_taxon_concept_term_pairs(taxon_concept_id, term_id,
created_at, updated_at)
SELECT DISTINCT taxon_concepts.id, terms.id, current_date, current_date
FROM #{TMP_TABLE}
INNER JOIN taxon_concepts_mview AS taxon_concepts ON UPPER(BTRIM(taxon_concepts.full_name)) = UPPER(BTRIM(#{TMP_TABLE}.TAXON_FAMILY))
INNER JOIN trade_codes AS terms ON UPPER(BTRIM(terms.code)) = UPPER(BTRIM(#{TMP_TABLE}.TERM_CODE))
AND terms.type = 'Term'
WHERE taxon_concepts.rank_name = '#{Rank::FAMILY}' AND taxon_concepts.taxonomy_is_cites_eu
SQL
ActiveRecord::Base.connection.execute(sql)
puts "#{Trade::TaxonConceptTermPair.count - initial_count} terms and unit codes pairs created"
end
end
|
module TestBoosters
module ProjectInfo
module_function
def display_ruby_version
version = TestBoosters::Shell.evaluate("ruby --version").gsub("ruby ", "")
puts "Ruby Version: #{version}"
end
def display_bundler_version
version = TestBoosters::Shell.evaluate("bundle --version").gsub("Bundler version ", "")
puts "Bundler Version: #{version}"
end
def display_rspec_version
command = "(bundle list | grep -q '* rspec') && bundle exec rspec --version || echo 'not found'"
version = TestBoosters::Shell.evaluate(command)
puts "RSpec Version: #{version}"
end
def display_cucumber_version
command = "(bundle list | grep -q '* cucumber') && bundle exec cucumber --version || echo 'not found'"
version = TestBoosters::Shell.evaluate(command)
puts "Cucumber Version: #{version}"
end
end
end
Fix rspec version display
module TestBoosters
module ProjectInfo
module_function
def display_ruby_version
version = TestBoosters::Shell.evaluate("ruby --version").gsub("ruby ", "")
puts "Ruby Version: #{version}"
end
def display_bundler_version
version = TestBoosters::Shell.evaluate("bundle --version").gsub("Bundler version ", "")
puts "Bundler Version: #{version}"
end
def display_rspec_version
command = "(bundle list | grep -q '* rspec') && (bundle exec rspec --version | head -1) || echo 'not found'"
version = TestBoosters::Shell.evaluate(command)
puts "RSpec Version: #{version}"
end
def display_cucumber_version
command = "(bundle list | grep -q '* cucumber') && (bundle exec cucumber --version | head -1) || echo 'not found'"
version = TestBoosters::Shell.evaluate(command)
puts "Cucumber Version: #{version}"
end
end
end
|
# Just to save you 10 seconds, the reason we always use #each to extract
# elements instead of something simpler is because Enumerable can not assume
# any other methods than #each. If needed, class-specific versions of any of
# these methods can be written *in those classes* to override these.
module Enumerable
def chunk(initial_state = nil, &original_block)
raise ArgumentError, "no block given" unless block_given?
::Enumerator.new do |yielder|
previous = nil
accumulate = []
block = if initial_state.nil?
original_block
else
duplicated_initial_state = initial_state.dup
Proc.new{ |val| original_block.yield(val, duplicated_initial_state)}
end
each do |element|
key = block.yield(element)
if key.nil? || (key.is_a?(Symbol) && key.to_s[0, 1] == "_")
yielder.yield [previous, accumulate] unless accumulate.empty?
accumulate = []
previous = nil
case key
when nil, :_separator
when :_alone
yielder.yield [key, [element]]
else
raise RuntimeError, "symbols beginning with an underscore are reserved"
end
else
if previous.nil? || previous == key
accumulate << element
else
yielder.yield [previous, accumulate] unless accumulate.empty?
accumulate = [element]
end
previous = key
end
end
yielder.yield [previous, accumulate] unless accumulate.empty?
end
end
def collect
if block_given?
ary = []
each do |*element|
ary << yield(*element)
end
ary
else
to_enum(:collect) { enumerator_size }
end
end
alias_method :map, :collect
def count(item = undefined)
seq = 0
if !undefined.equal?(item)
each { |element| seq += 1 if item == element }
elsif block_given?
each { |element| seq += 1 if yield(element) }
else
each { seq += 1 }
end
seq
end
def each_entry(*pass)
return to_enum(:each_entry, *pass) { enumerator_size } unless block_given?
each(*pass) do |*element|
yield element.size == 1 ? element[0] : element
end
self
end
def each_with_object(memo)
return to_enum(:each_with_object, memo) { enumerator_size } unless block_given?
each do
element = Rubinius.single_block_arg
yield element, memo
end
memo
end
alias_method :with_object, :each_with_object
def flat_map
return to_enum(:flat_map) { enumerator_size } unless block_given?
array = []
each do |*element|
result = yield(*element)
value = Rubinius::Type.try_convert(result, Array, :to_ary) || result
if value.kind_of? Array
array.concat value
else
array.push value
end
end
array
end
alias_method :collect_concat, :flat_map
def lazy
Enumerator::Lazy.new(self, enumerator_size) do |yielder, *args|
yielder.<<(*args)
end
end
def enumerator_size
Rubinius::Type.object_respond_to?(self, :size) ? size : nil
end
private :enumerator_size
def group_by
return to_enum(:group_by) { enumerator_size } unless block_given?
h = {}
each do
element = Rubinius.single_block_arg
key = yield(element)
if h.key?(key)
h[key] << element
else
h[key] = [element]
end
end
Rubinius::Type.infect h, self
h
end
def slice_before(arg = undefined, &block)
if block_given?
has_init = !(undefined.equal? arg)
else
raise ArgumentError, "wrong number of arguments (0 for 1)" if undefined.equal? arg
block = Proc.new{ |elem| arg === elem }
end
Enumerator.new do |yielder|
init = arg.dup if has_init
accumulator = nil
each do |element|
start_new = has_init ? block.yield(element, init) : block.yield(element)
if start_new
yielder.yield accumulator if accumulator
accumulator = [element]
else
accumulator ||= []
accumulator << element
end
end
yielder.yield accumulator if accumulator
end
end
def slice_after(pattern = undefined, &block)
pattern_given = !undefined.equal?(pattern)
raise ArgumentError, "cannot pass both pattern and block" if pattern_given && block_given?
raise ArgumentError, "wrong number of arguments (0 for 1)" if !pattern_given && !block_given?
block = Proc.new { |elem| pattern === elem } if pattern_given
Enumerator.new do |yielder|
accumulator = nil
each do |element|
end_chunk = block.yield(element)
accumulator ||= []
if end_chunk
accumulator << element
yielder.yield accumulator
accumulator = nil
else
accumulator << element
end
end
yielder.yield accumulator if accumulator
end
end
def slice_when(&block)
raise ArgumentError, "wrong number of arguments (0 for 1)" unless block_given?
Enumerator.new do |enum|
ary = nil
last_after = nil
each_cons(2) do |before, after|
last_after = after
match = block.call before, after
ary ||= []
if match
ary << before
enum.yield ary
ary = []
else
ary << before
end
end
unless ary.nil?
ary << last_after
enum.yield ary
end
end
end
def to_a(*arg)
ary = []
each(*arg) do
element = Rubinius.single_block_arg
ary << element
nil
end
Rubinius::Type.infect ary, self
ary
end
alias_method :entries, :to_a
def to_h(*arg)
h = {}
each_with_index(*arg) do |element, i|
unless element.respond_to?(:to_ary)
raise TypeError, "wrong element type #{element.class} at #{i} (expected array)"
end
ary = element.to_ary
if ary.size != 2
raise ArgumentError, "wrong array length at #{i} (expected 2, was #{ary.size})"
end
h[ary[0]] = ary[1]
end
h
end
def zip(*args)
args.map! do |a|
if a.respond_to? :to_ary
a.to_ary
else
a.to_enum(:each)
end
end
results = []
i = 0
each do
element = Rubinius.single_block_arg
entry = args.inject([element]) do |ary, a|
ary << case a
when Array
a[i]
else
begin
a.next
rescue StopIteration
nil
end
end
end
yield entry if block_given?
results << entry
i += 1
end
return nil if block_given?
results
end
def each_with_index(*args)
return to_enum(:each_with_index, *args) { enumerator_size } unless block_given?
idx = 0
each(*args) do
element = Rubinius.single_block_arg
yield element, idx
idx += 1
end
self
end
def grep(pattern)
ary = []
each do
element = Rubinius.single_block_arg
if pattern === element
Regexp.set_block_last_match
if block_given?
ary << yield(element)
else
ary << element
end
end
end
ary
end
def grep_v(pattern)
ary = []
each do
element = Rubinius.single_block_arg
unless pattern === element
if block_given?
ary << yield(element)
else
ary << element
end
end
end
ary
end
def sort(&prc)
ary = to_a
ary.frozen? ? ary.sort(&prc) : ary.sort!(&prc)
end
class SortedElement
def initialize(val, sort_id)
@value, @sort_id = val, sort_id
end
attr_reader :value
attr_reader :sort_id
def <=>(other)
@sort_id <=> other.sort_id
end
end
def sort_by
return to_enum(:sort_by) { enumerator_size } unless block_given?
# Transform each value to a tuple with the value and it's sort by value
sort_values = map do
element = Rubinius.single_block_arg
SortedElement.new(element, yield(element))
end
# Now sort the tuple according to the sort by value
sort_values.sort!
# Now strip of the tuple leaving the original value
sort_values.map! { |ary| ary.value }
end
def inject(initial=undefined, sym=undefined)
if !block_given? or !undefined.equal?(sym)
if undefined.equal?(sym)
sym = initial
initial = undefined
end
# Do the sym version
sym = sym.to_sym
each do
element = Rubinius.single_block_arg
if undefined.equal? initial
initial = element
else
initial = initial.__send__(sym, element)
end
end
# Block version
else
each do
element = Rubinius.single_block_arg
if undefined.equal? initial
initial = element
else
initial = yield(initial, element)
end
end
end
undefined.equal?(initial) ? nil : initial
end
alias_method :reduce, :inject
def all?
if block_given?
each { |*element| return false unless yield(*element) }
else
each { return false unless Rubinius.single_block_arg }
end
true
end
def any?
if block_given?
each { |*element| return true if yield(*element) }
else
each { return true if Rubinius.single_block_arg }
end
false
end
def cycle(many=nil)
unless block_given?
return to_enum(:cycle, many) do
Rubinius::EnumerableHelper.cycle_size(enumerator_size, many)
end
end
if many
many = Rubinius::Type.coerce_to_collection_index many
return nil if many <= 0
else
many = nil
end
cache = []
each do
element = Rubinius.single_block_arg
cache << element
yield element
end
return nil if cache.empty?
if many
i = 0
many -= 1
while i < many
cache.each { |element| yield element }
i += 1
end
else
while true
cache.each { |element| yield element }
end
end
nil
end
def drop(n)
n = Rubinius::Type.coerce_to_collection_index n
raise ArgumentError, "attempt to drop negative size" if n < 0
ary = to_a
return [] if n > ary.size
ary[n...ary.size]
end
def drop_while
return to_enum(:drop_while) unless block_given?
ary = []
dropping = true
each do
element = Rubinius.single_block_arg
ary << element unless dropping &&= yield(element)
end
ary
end
def each_cons(num)
n = Rubinius::Type.coerce_to_collection_index num
raise ArgumentError, "invalid size: #{n}" if n <= 0
unless block_given?
return to_enum(:each_cons, num) do
enum_size = enumerator_size
if enum_size.nil?
nil
elsif enum_size == 0 || enum_size < n
0
else
enum_size - n + 1
end
end
end
array = []
each do
element = Rubinius.single_block_arg
array << element
array.shift if array.size > n
yield array.dup if array.size == n
end
nil
end
def each_slice(slice_size)
n = Rubinius::Type.coerce_to_collection_index slice_size
raise ArgumentError, "invalid slice size: #{n}" if n <= 0
unless block_given?
return to_enum(:each_slice, slice_size) do
enum_size = enumerator_size
enum_size.nil? ? nil : (enum_size.to_f / n).ceil
end
end
a = []
each do
element = Rubinius.single_block_arg
a << element
if a.length == n
yield a
a = []
end
end
yield a unless a.empty?
nil
end
def find(ifnone=nil)
return to_enum(:find, ifnone) unless block_given?
each do
element = Rubinius.single_block_arg
return element if yield(element)
end
ifnone.call if ifnone
end
alias_method :detect, :find
def find_all
return to_enum(:find_all) { enumerator_size } unless block_given?
ary = []
each do
element = Rubinius.single_block_arg
ary << element if yield(element)
end
ary
end
alias_method :select, :find_all
def find_index(value=undefined)
if undefined.equal? value
return to_enum(:find_index) unless block_given?
i = 0
each do |*args|
return i if yield(*args)
i += 1
end
else
i = 0
each do
e = Rubinius.single_block_arg
return i if e == value
i += 1
end
end
nil
end
def first(n=undefined)
return __take__(n) unless undefined.equal?(n)
each do
return Rubinius.single_block_arg
end
nil
end
def min(n=nil)
if n.nil?
min = undefined
each do
element = Rubinius.single_block_arg
if undefined.equal? min
min = element
else
comp = block_given? ? yield(element, min) : element <=> min
unless comp
raise ArgumentError, "comparison of #{element.class} with #{min} failed"
end
if Comparable.compare_int(comp) < 0
min = element
end
end
end
undefined.equal?(min) ? nil : min
else
if block_given?
sort { |a, b| yield a, b }.take n
else
sort.take n
end
end
end
def max(n=nil)
if n.nil?
max = undefined
each do
element = Rubinius.single_block_arg
if undefined.equal? max
max = element
else
comp = block_given? ? yield(element, max) : element <=> max
unless comp
raise ArgumentError, "comparison of #{element.class} with #{max} failed"
end
if Comparable.compare_int(comp) > 0
max = element
end
end
end
undefined.equal?(max) ? nil : max
else
if block_given?
sort { |a, b| yield a, b }.reverse.take n
else
sort.reverse.take n
end
end
end
def max_by(n=nil)
return to_enum(:max_by, n) { enumerator_size } unless block_given?
if n.nil?
max_element = nil
max_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(max_result) or \
Rubinius::Type.coerce_to_comparison(max_result, result) < 0
max_element = element
max_result = result
end
end
max_element
else
sort_by { |element| yield element }.reverse.take n
end
end
def min_by(n=nil)
return to_enum(:min_by, n) { enumerator_size } unless block_given?
if n.nil?
min_element = nil
min_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(min_result) or \
Rubinius::Type.coerce_to_comparison(min_result, result) > 0
min_element = element
min_result = result
end
end
min_element
else
sort_by { |element| yield element }.take n
end
end
def self.sort_proc
@sort_proc ||= Proc.new do |a, b|
unless ret = a <=> b
raise ArgumentError, "Improper spaceship value"
end
ret
end
end
def minmax(&block)
block = Enumerable.sort_proc unless block
first_time = true
min, max = nil
each do
element = Rubinius.single_block_arg
if first_time
min = max = element
first_time = false
else
unless min_cmp = block.call(min, element)
raise ArgumentError, "comparison failed"
end
min = element if min_cmp > 0
unless max_cmp = block.call(max, element)
raise ArgumentError, "comparison failed"
end
max = element if max_cmp < 0
end
end
[min, max]
end
def minmax_by(&block)
return to_enum(:minmax_by) { enumerator_size } unless block_given?
min_element = nil
min_result = undefined
max_element = nil
max_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(min_result) or \
Rubinius::Type.coerce_to_comparison(min_result, result) > 0
min_element = element
min_result = result
end
if undefined.equal?(max_result) or \
Rubinius::Type.coerce_to_comparison(max_result, result) < 0
max_element = element
max_result = result
end
end
[min_element, max_element]
end
def none?
if block_given?
each { |*element| return false if yield(*element) }
else
each { return false if Rubinius.single_block_arg }
end
return true
end
def one?
found_one = false
if block_given?
each do |*element|
if yield(*element)
return false if found_one
found_one = true
end
end
else
each do
if Rubinius.single_block_arg
return false if found_one
found_one = true
end
end
end
found_one
end
def partition
return to_enum(:partition) { enumerator_size } unless block_given?
left = []
right = []
each do
element = Rubinius.single_block_arg
yield(element) ? left.push(element) : right.push(element)
end
return [left, right]
end
def reject
return to_enum(:reject) { enumerator_size } unless block_given?
ary = []
each do
element = Rubinius.single_block_arg
ary << element unless yield(element)
end
ary
end
def reverse_each(&block)
return to_enum(:reverse_each) { enumerator_size } unless block_given?
# There is no other way then to convert to an array first... see 1.9's source.
to_a.reverse_each(&block)
self
end
def take(n)
n = Rubinius::Type.coerce_to_collection_index n
raise ArgumentError, "attempt to take negative size: #{n}" if n < 0
array = []
unless n <= 0
each do
element = Rubinius.single_block_arg
array << element
break if array.size >= n
end
end
array
end
alias_method :__take__, :take
private :__take__
def take_while
return to_enum(:take_while) unless block_given?
array = []
each do |element|
return array unless yield(element)
array << element
end
array
end
def include?(obj)
each { return true if Rubinius.single_block_arg == obj }
false
end
alias_method :member?, :include?
end
Implement chunk_while
# Just to save you 10 seconds, the reason we always use #each to extract
# elements instead of something simpler is because Enumerable can not assume
# any other methods than #each. If needed, class-specific versions of any of
# these methods can be written *in those classes* to override these.
module Enumerable
def chunk(initial_state = nil, &original_block)
raise ArgumentError, "no block given" unless block_given?
::Enumerator.new do |yielder|
previous = nil
accumulate = []
block = if initial_state.nil?
original_block
else
duplicated_initial_state = initial_state.dup
Proc.new{ |val| original_block.yield(val, duplicated_initial_state)}
end
each do |element|
key = block.yield(element)
if key.nil? || (key.is_a?(Symbol) && key.to_s[0, 1] == "_")
yielder.yield [previous, accumulate] unless accumulate.empty?
accumulate = []
previous = nil
case key
when nil, :_separator
when :_alone
yielder.yield [key, [element]]
else
raise RuntimeError, "symbols beginning with an underscore are reserved"
end
else
if previous.nil? || previous == key
accumulate << element
else
yielder.yield [previous, accumulate] unless accumulate.empty?
accumulate = [element]
end
previous = key
end
end
yielder.yield [previous, accumulate] unless accumulate.empty?
end
end
def collect
if block_given?
ary = []
each do |*element|
ary << yield(*element)
end
ary
else
to_enum(:collect) { enumerator_size }
end
end
alias_method :map, :collect
def count(item = undefined)
seq = 0
if !undefined.equal?(item)
each { |element| seq += 1 if item == element }
elsif block_given?
each { |element| seq += 1 if yield(element) }
else
each { seq += 1 }
end
seq
end
def each_entry(*pass)
return to_enum(:each_entry, *pass) { enumerator_size } unless block_given?
each(*pass) do |*element|
yield element.size == 1 ? element[0] : element
end
self
end
def each_with_object(memo)
return to_enum(:each_with_object, memo) { enumerator_size } unless block_given?
each do
element = Rubinius.single_block_arg
yield element, memo
end
memo
end
alias_method :with_object, :each_with_object
def flat_map
return to_enum(:flat_map) { enumerator_size } unless block_given?
array = []
each do |*element|
result = yield(*element)
value = Rubinius::Type.try_convert(result, Array, :to_ary) || result
if value.kind_of? Array
array.concat value
else
array.push value
end
end
array
end
alias_method :collect_concat, :flat_map
def lazy
Enumerator::Lazy.new(self, enumerator_size) do |yielder, *args|
yielder.<<(*args)
end
end
def enumerator_size
Rubinius::Type.object_respond_to?(self, :size) ? size : nil
end
private :enumerator_size
def group_by
return to_enum(:group_by) { enumerator_size } unless block_given?
h = {}
each do
element = Rubinius.single_block_arg
key = yield(element)
if h.key?(key)
h[key] << element
else
h[key] = [element]
end
end
Rubinius::Type.infect h, self
h
end
def slice_before(arg = undefined, &block)
if block_given?
has_init = !(undefined.equal? arg)
else
raise ArgumentError, "wrong number of arguments (0 for 1)" if undefined.equal? arg
block = Proc.new{ |elem| arg === elem }
end
Enumerator.new do |yielder|
init = arg.dup if has_init
accumulator = nil
each do |element|
start_new = has_init ? block.yield(element, init) : block.yield(element)
if start_new
yielder.yield accumulator if accumulator
accumulator = [element]
else
accumulator ||= []
accumulator << element
end
end
yielder.yield accumulator if accumulator
end
end
def slice_after(pattern = undefined, &block)
pattern_given = !undefined.equal?(pattern)
raise ArgumentError, "cannot pass both pattern and block" if pattern_given && block_given?
raise ArgumentError, "wrong number of arguments (0 for 1)" if !pattern_given && !block_given?
block = Proc.new { |elem| pattern === elem } if pattern_given
Enumerator.new do |yielder|
accumulator = nil
each do |element|
end_chunk = block.yield(element)
accumulator ||= []
if end_chunk
accumulator << element
yielder.yield accumulator
accumulator = nil
else
accumulator << element
end
end
yielder.yield accumulator if accumulator
end
end
def slice_when(&block)
raise ArgumentError, "wrong number of arguments (0 for 1)" unless block_given?
Enumerator.new do |enum|
ary = nil
last_after = nil
each_cons(2) do |before, after|
last_after = after
match = block.call before, after
ary ||= []
if match
ary << before
enum.yield ary
ary = []
else
ary << before
end
end
unless ary.nil?
ary << last_after
enum.yield ary
end
end
end
def chunk_while(&block)
raise ArgumentError, "wrong number of arguments (0 for 1)" unless block_given?
slice_when { |before, after| !(block.call(before, after)) }
end
def to_a(*arg)
ary = []
each(*arg) do
element = Rubinius.single_block_arg
ary << element
nil
end
Rubinius::Type.infect ary, self
ary
end
alias_method :entries, :to_a
def to_h(*arg)
h = {}
each_with_index(*arg) do |element, i|
unless element.respond_to?(:to_ary)
raise TypeError, "wrong element type #{element.class} at #{i} (expected array)"
end
ary = element.to_ary
if ary.size != 2
raise ArgumentError, "wrong array length at #{i} (expected 2, was #{ary.size})"
end
h[ary[0]] = ary[1]
end
h
end
def zip(*args)
args.map! do |a|
if a.respond_to? :to_ary
a.to_ary
else
a.to_enum(:each)
end
end
results = []
i = 0
each do
element = Rubinius.single_block_arg
entry = args.inject([element]) do |ary, a|
ary << case a
when Array
a[i]
else
begin
a.next
rescue StopIteration
nil
end
end
end
yield entry if block_given?
results << entry
i += 1
end
return nil if block_given?
results
end
def each_with_index(*args)
return to_enum(:each_with_index, *args) { enumerator_size } unless block_given?
idx = 0
each(*args) do
element = Rubinius.single_block_arg
yield element, idx
idx += 1
end
self
end
def grep(pattern)
ary = []
each do
element = Rubinius.single_block_arg
if pattern === element
Regexp.set_block_last_match
if block_given?
ary << yield(element)
else
ary << element
end
end
end
ary
end
def grep_v(pattern)
ary = []
each do
element = Rubinius.single_block_arg
unless pattern === element
if block_given?
ary << yield(element)
else
ary << element
end
end
end
ary
end
def sort(&prc)
ary = to_a
ary.frozen? ? ary.sort(&prc) : ary.sort!(&prc)
end
class SortedElement
def initialize(val, sort_id)
@value, @sort_id = val, sort_id
end
attr_reader :value
attr_reader :sort_id
def <=>(other)
@sort_id <=> other.sort_id
end
end
def sort_by
return to_enum(:sort_by) { enumerator_size } unless block_given?
# Transform each value to a tuple with the value and it's sort by value
sort_values = map do
element = Rubinius.single_block_arg
SortedElement.new(element, yield(element))
end
# Now sort the tuple according to the sort by value
sort_values.sort!
# Now strip of the tuple leaving the original value
sort_values.map! { |ary| ary.value }
end
def inject(initial=undefined, sym=undefined)
if !block_given? or !undefined.equal?(sym)
if undefined.equal?(sym)
sym = initial
initial = undefined
end
# Do the sym version
sym = sym.to_sym
each do
element = Rubinius.single_block_arg
if undefined.equal? initial
initial = element
else
initial = initial.__send__(sym, element)
end
end
# Block version
else
each do
element = Rubinius.single_block_arg
if undefined.equal? initial
initial = element
else
initial = yield(initial, element)
end
end
end
undefined.equal?(initial) ? nil : initial
end
alias_method :reduce, :inject
def all?
if block_given?
each { |*element| return false unless yield(*element) }
else
each { return false unless Rubinius.single_block_arg }
end
true
end
def any?
if block_given?
each { |*element| return true if yield(*element) }
else
each { return true if Rubinius.single_block_arg }
end
false
end
def cycle(many=nil)
unless block_given?
return to_enum(:cycle, many) do
Rubinius::EnumerableHelper.cycle_size(enumerator_size, many)
end
end
if many
many = Rubinius::Type.coerce_to_collection_index many
return nil if many <= 0
else
many = nil
end
cache = []
each do
element = Rubinius.single_block_arg
cache << element
yield element
end
return nil if cache.empty?
if many
i = 0
many -= 1
while i < many
cache.each { |element| yield element }
i += 1
end
else
while true
cache.each { |element| yield element }
end
end
nil
end
def drop(n)
n = Rubinius::Type.coerce_to_collection_index n
raise ArgumentError, "attempt to drop negative size" if n < 0
ary = to_a
return [] if n > ary.size
ary[n...ary.size]
end
def drop_while
return to_enum(:drop_while) unless block_given?
ary = []
dropping = true
each do
element = Rubinius.single_block_arg
ary << element unless dropping &&= yield(element)
end
ary
end
def each_cons(num)
n = Rubinius::Type.coerce_to_collection_index num
raise ArgumentError, "invalid size: #{n}" if n <= 0
unless block_given?
return to_enum(:each_cons, num) do
enum_size = enumerator_size
if enum_size.nil?
nil
elsif enum_size == 0 || enum_size < n
0
else
enum_size - n + 1
end
end
end
array = []
each do
element = Rubinius.single_block_arg
array << element
array.shift if array.size > n
yield array.dup if array.size == n
end
nil
end
def each_slice(slice_size)
n = Rubinius::Type.coerce_to_collection_index slice_size
raise ArgumentError, "invalid slice size: #{n}" if n <= 0
unless block_given?
return to_enum(:each_slice, slice_size) do
enum_size = enumerator_size
enum_size.nil? ? nil : (enum_size.to_f / n).ceil
end
end
a = []
each do
element = Rubinius.single_block_arg
a << element
if a.length == n
yield a
a = []
end
end
yield a unless a.empty?
nil
end
def find(ifnone=nil)
return to_enum(:find, ifnone) unless block_given?
each do
element = Rubinius.single_block_arg
return element if yield(element)
end
ifnone.call if ifnone
end
alias_method :detect, :find
def find_all
return to_enum(:find_all) { enumerator_size } unless block_given?
ary = []
each do
element = Rubinius.single_block_arg
ary << element if yield(element)
end
ary
end
alias_method :select, :find_all
def find_index(value=undefined)
if undefined.equal? value
return to_enum(:find_index) unless block_given?
i = 0
each do |*args|
return i if yield(*args)
i += 1
end
else
i = 0
each do
e = Rubinius.single_block_arg
return i if e == value
i += 1
end
end
nil
end
def first(n=undefined)
return __take__(n) unless undefined.equal?(n)
each do
return Rubinius.single_block_arg
end
nil
end
def min(n=nil)
if n.nil?
min = undefined
each do
element = Rubinius.single_block_arg
if undefined.equal? min
min = element
else
comp = block_given? ? yield(element, min) : element <=> min
unless comp
raise ArgumentError, "comparison of #{element.class} with #{min} failed"
end
if Comparable.compare_int(comp) < 0
min = element
end
end
end
undefined.equal?(min) ? nil : min
else
if block_given?
sort { |a, b| yield a, b }.take n
else
sort.take n
end
end
end
def max(n=nil)
if n.nil?
max = undefined
each do
element = Rubinius.single_block_arg
if undefined.equal? max
max = element
else
comp = block_given? ? yield(element, max) : element <=> max
unless comp
raise ArgumentError, "comparison of #{element.class} with #{max} failed"
end
if Comparable.compare_int(comp) > 0
max = element
end
end
end
undefined.equal?(max) ? nil : max
else
if block_given?
sort { |a, b| yield a, b }.reverse.take n
else
sort.reverse.take n
end
end
end
def max_by(n=nil)
return to_enum(:max_by, n) { enumerator_size } unless block_given?
if n.nil?
max_element = nil
max_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(max_result) or \
Rubinius::Type.coerce_to_comparison(max_result, result) < 0
max_element = element
max_result = result
end
end
max_element
else
sort_by { |element| yield element }.reverse.take n
end
end
def min_by(n=nil)
return to_enum(:min_by, n) { enumerator_size } unless block_given?
if n.nil?
min_element = nil
min_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(min_result) or \
Rubinius::Type.coerce_to_comparison(min_result, result) > 0
min_element = element
min_result = result
end
end
min_element
else
sort_by { |element| yield element }.take n
end
end
def self.sort_proc
@sort_proc ||= Proc.new do |a, b|
unless ret = a <=> b
raise ArgumentError, "Improper spaceship value"
end
ret
end
end
def minmax(&block)
block = Enumerable.sort_proc unless block
first_time = true
min, max = nil
each do
element = Rubinius.single_block_arg
if first_time
min = max = element
first_time = false
else
unless min_cmp = block.call(min, element)
raise ArgumentError, "comparison failed"
end
min = element if min_cmp > 0
unless max_cmp = block.call(max, element)
raise ArgumentError, "comparison failed"
end
max = element if max_cmp < 0
end
end
[min, max]
end
def minmax_by(&block)
return to_enum(:minmax_by) { enumerator_size } unless block_given?
min_element = nil
min_result = undefined
max_element = nil
max_result = undefined
each do
element = Rubinius.single_block_arg
result = yield element
if undefined.equal?(min_result) or \
Rubinius::Type.coerce_to_comparison(min_result, result) > 0
min_element = element
min_result = result
end
if undefined.equal?(max_result) or \
Rubinius::Type.coerce_to_comparison(max_result, result) < 0
max_element = element
max_result = result
end
end
[min_element, max_element]
end
def none?
if block_given?
each { |*element| return false if yield(*element) }
else
each { return false if Rubinius.single_block_arg }
end
return true
end
def one?
found_one = false
if block_given?
each do |*element|
if yield(*element)
return false if found_one
found_one = true
end
end
else
each do
if Rubinius.single_block_arg
return false if found_one
found_one = true
end
end
end
found_one
end
def partition
return to_enum(:partition) { enumerator_size } unless block_given?
left = []
right = []
each do
element = Rubinius.single_block_arg
yield(element) ? left.push(element) : right.push(element)
end
return [left, right]
end
def reject
return to_enum(:reject) { enumerator_size } unless block_given?
ary = []
each do
element = Rubinius.single_block_arg
ary << element unless yield(element)
end
ary
end
def reverse_each(&block)
return to_enum(:reverse_each) { enumerator_size } unless block_given?
# There is no other way then to convert to an array first... see 1.9's source.
to_a.reverse_each(&block)
self
end
def take(n)
n = Rubinius::Type.coerce_to_collection_index n
raise ArgumentError, "attempt to take negative size: #{n}" if n < 0
array = []
unless n <= 0
each do
element = Rubinius.single_block_arg
array << element
break if array.size >= n
end
end
array
end
alias_method :__take__, :take
private :__take__
def take_while
return to_enum(:take_while) unless block_given?
array = []
each do |element|
return array unless yield(element)
array << element
end
array
end
def include?(obj)
each { return true if Rubinius.single_block_arg == obj }
false
end
alias_method :member?, :include?
end
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{noodall-core}
s.version = "0.0.1"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Steve England"]
s.date = %q{2010-09-29}
s.description = %q{Core data objects for Noodall}
s.email = %q{steve@wearebeef.co.uk}
s.extra_rdoc_files = [
"LICENSE",
"README.rdoc"
]
s.files = [
".document",
".gitignore",
"Gemfile",
"Gemfile.lock",
"LICENSE",
"README.rdoc",
"Rakefile",
"VERSION",
"lib/noodall-core.rb",
"lib/noodall/component.rb",
"lib/noodall/global_update_time.rb",
"lib/noodall/multi_parameter_attributes.rb",
"lib/noodall/node.rb",
"lib/noodall/permalink.rb",
"lib/noodall/search.rb",
"lib/noodall/tagging.rb",
"noodall-core.gemspec",
"spec/component_spec.rb",
"spec/factories/component.rb",
"spec/factories/node.rb",
"spec/node_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = %q{http://github.com/beef/noodall-core}
s.rdoc_options = ["--charset=UTF-8"]
s.require_paths = ["lib"]
s.rubygems_version = %q{1.3.7}
s.summary = %q{Core data objects for Noodall}
s.test_files = [
"spec/component_spec.rb",
"spec/factories/component.rb",
"spec/factories/node.rb",
"spec/node_spec.rb",
"spec/spec_helper.rb"
]
if s.respond_to? :specification_version then
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_runtime_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_runtime_dependency(%q<canable>, ["= 0.1.1"])
s.add_runtime_dependency(%q<ruby-stemmer>, [">= 0"])
else
s.add_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_dependency(%q<canable>, ["= 0.1.1"])
s.add_dependency(%q<ruby-stemmer>, [">= 0"])
end
else
s.add_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_dependency(%q<canable>, ["= 0.1.1"])
s.add_dependency(%q<ruby-stemmer>, [">= 0"])
end
end
Modified gemspec
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{noodall-core}
s.version = "0.0.1"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Steve England"]
s.date = %q{2010-09-29}
s.description = %q{Core data objects for Noodall}
s.email = %q{steve@wearebeef.co.uk}
s.extra_rdoc_files = [
"LICENSE",
"README.rdoc"
]
s.files = [
".document",
".gitignore",
"Gemfile",
"Gemfile.lock",
"LICENSE",
"README.rdoc",
"Rakefile",
"VERSION",
"lib/noodall-core.rb",
"lib/noodall/component.rb",
"lib/noodall/global_update_time.rb",
"lib/noodall/indexer.rb",
"lib/noodall/multi_parameter_attributes.rb",
"lib/noodall/node.rb",
"lib/noodall/permalink.rb",
"lib/noodall/search.rb",
"lib/noodall/tagging.rb",
"noodall-core.gemspec",
"spec/component_spec.rb",
"spec/factories/component.rb",
"spec/factories/node.rb",
"spec/node_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = %q{http://github.com/beef/noodall-core}
s.rdoc_options = ["--charset=UTF-8"]
s.require_paths = ["lib"]
s.rubygems_version = %q{1.3.7}
s.summary = %q{Core data objects for Noodall}
s.test_files = [
"spec/component_spec.rb",
"spec/factories/component.rb",
"spec/factories/node.rb",
"spec/node_spec.rb",
"spec/spec_helper.rb"
]
if s.respond_to? :specification_version then
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_runtime_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_runtime_dependency(%q<canable>, ["= 0.1.1"])
s.add_runtime_dependency(%q<ruby-stemmer>, [">= 0"])
else
s.add_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_dependency(%q<canable>, ["= 0.1.1"])
s.add_dependency(%q<ruby-stemmer>, [">= 0"])
end
else
s.add_dependency(%q<mongo_mapper>, ["= 0.8.4"])
s.add_dependency(%q<ramdiv-mongo_mapper_acts_as_tree>, ["= 0.1.1"])
s.add_dependency(%q<canable>, ["= 0.1.1"])
s.add_dependency(%q<ruby-stemmer>, [">= 0"])
end
end
|
module MM
module Console
module SimpleCommand
class Help
include Base
def self.keys
['help']
end
def self.doc(runtime)
"show this doc"
end
def execute(runtime)
sections = Sections.instance_methods.sort.collect(&:titleize)
runtime[:list] = SelectingList.new(sections, Section)
end
end
module Sections
def quick_start(runtime)
%Q{
* Set a variable named site, for integrating with Mingle project.
- Example: site = 'http://login:password@mingle.domain.com/projects/project_identifier'
- Once it is setted, you'll see your project_identifier in the prompt.
* Type 'tabs' to view all tabs in your project
- You can select tab by index to view card list of the tab
* Type 'Start fix #1'
- Run transition 'Start fix' on card number of which is 1
- The context would be changed to card 1, so that you can view details of the card directly
* Type 'properties'
- Show all card properties have value
* Type 'description'
- Show card 1 description, it is plain text.
* Type any property name of card
- Show property value of the card
* Normally, we may have complex transition need to run after you fixed bug.
- For example, we have a transition called 'Complete fix', which need you type in revision number after you fixed the bug.
- To run this transition, you setup a transition script: Complete fix #1 with revision => \#{revision}
- When mm context is card 1, the script can be: Complete fix with revision => \#{revision}.
- You can use runtime variable to store the transition script: fixed = 'Complete fix with revision => \#{revision}'
* Type '`svn commit -m "[fixed]: some details"', after you fixed the bug
- Any command start with '`' would be run as system command out of mm console
- On Mac OS, or any system that command 'which' works, you don't need start with '`' unless the system command is conflic with something inside MM e.g. card attribute name, transition name.
- Specify runtime variable in '[' and ']'
- This command will do the followings:
- run svn commit to commit code with message "Complete fix #1: some details"
- parse output of svn command to get revision number, for example, this time we get 25
- run transition: Complete fix #1 with revision => 25
}
end
def basic_commands(runtime)
Processor::REGISTERED_COMMANDS.collect do |cmd|
r = " - #{cmd.name.titleize.split('/').last}: #{cmd.doc(runtime)}\n"
r << " - command keys or match pattern: #{cmd.keys.join(", ")}\n"
r
end.join("\n")
end
def transition(runtime)
%{Run Mingle Transition script grammar:
1. <transition name> <card number> with <properties> <comment>
2. card number: #number
3. properties: <property_name> => <property_value>, <property_name> => <property_value>
4. comment: (comment content)
5. examples
* Run transition 'Start development' on card 4012: Start development #4012
* Run transition 'Start development' on card 4012 and set required user input property owner to xli: Start development #4012 with owner => xli
* Run transition 'Start development' on card 4012 and add a comment to card: Start development #4012 (comment detail)
6. if current context is the card you want to apply the transition, you don't need specify the <card number>, for example:
* Run transition 'Start development' on card 4012: mingle> story 4012> Start development
}
end
def context(runtime)
%{Context is something MM remembered for you. There are 2 kinds of context, one is card, another is view. Context will be displayed as prompt, so that you can know what's context you have currently. For example, if your context is a card, you can simply type in 'open' to open the card in your default browser.
}
end
def variables(runtime)
"MM is built in runtime variables, type 'v' to #{Console::SimpleCommand::RuntimeVariables.doc(runtime)}"
end
end
class Section
include Sections
def initialize(name)
@name = name
end
def execute(runtime)
send(@name.downcase.split(' ').join('_'), runtime)
end
end
end
end
end
update help doc
module MM
module Console
module SimpleCommand
class Help
include Base
def self.keys
['help']
end
def self.doc(runtime)
"show this doc"
end
def execute(runtime)
sections = Sections.instance_methods.sort.collect(&:titleize)
runtime[:list] = SelectingList.new(sections, Section)
end
end
module Sections
def quick_start(runtime)
%Q{
* Set a variable named site, for integrating with Mingle project.
- Example: site = 'http://login:password@mingle.domain.com/projects/project_identifier'
- Once it is setted, you'll see your project_identifier in the prompt.
* Type 'tabs' to view all tabs in your project
- You can select tab by index to view card list of the tab
* Type 'Start fix #1'
- Run transition 'Start fix' on card number of which is 1
- The context would be changed to card 1, so that you can view details of the card directly
* Type 'properties'
- Show all card properties have value
* Type 'description'
- Show card 1 description, it is plain text.
* Type any property name of card
- Show property value of the card
* Normally, we may have complex transition need to run after you fixed bug.
- For example, we have a transition called 'Complete fix', which need you type in revision number after you fixed the bug.
- To run this transition, you setup a transition script: Complete fix #1 with revision => \#{revision}
- When mm context is card 1, the script can be: Complete fix with revision => \#{revision}.
- You can use runtime variable to store the transition script: fixed = 'Complete fix with revision => \#{revision}'
* Type '`svn commit -m "[fixed]: some details"', after you fixed the bug
- Any command start with '`' would be run as system command out of mm console
- On Mac OS, or any system that command 'which' works, you don't need start with '`' unless the system command is conflic with something inside MM e.g. card attribute name, transition name.
- Specify runtime variable in '[' and ']'
- This command will do the followings:
- run svn commit to commit code with message "Complete fix #1: some details"
- parse output of svn command to get revision number, for example, this time we get 25
- run transition: Complete fix #1 with revision => 25
}
end
def basic_commands(runtime)
Processor::REGISTERED_COMMANDS.collect do |cmd|
r = " - #{cmd.name.titleize.split('/').last}: #{cmd.doc(runtime)}\n"
r << " - command keys or match pattern: #{cmd.keys.join(", ")}\n"
r
end.join("\n")
end
def transition(runtime)
%{Run Mingle Transition script grammar:
1. <transition name> <card number> with <properties> <comment>
2. card number: #number
3. properties: <property_name> => <property_value>, <property_name> => <property_value>
4. comment: (comment content)
5. examples
* Run transition 'Start development' on card 4012: Start development #4012
* Run transition 'Start development' on card 4012 and set required user input property owner to xli: Start development #4012 with owner => xli
* Run transition 'Start development' on card 4012 and add a comment to card: Start development #4012 (comment detail)
6. if current context is the card you want to apply the transition, you don't need specify the <card number>, for example:
* Run transition 'Start development' on card 4012: mingle> story 4012> Start development
}
end
def context(runtime)
%{Context is something MM remembered for you. There are 2 kinds of context, one is card, another is view. Context will be displayed as prompt, so that you can know what's context you have currently. For example, if your context is a card, you can simply type in 'open' to open the card in your default browser.
}
end
def variables(runtime)
"MM is built in runtime variables, type 'v' to #{Console::SimpleCommand::RuntimeVariables.doc(runtime)}"
end
def integrating_with_mingle(runtime)
%{You need setup a variable in the MM console named site for your mingle project url with your login name and password, see the following example log how to setup it:
>>>
mm, version 0.0.3
Type 'help' for usage
> > site='http://your_login:your_password@your_mingle_server.com/projects/your_project_identifier'
http://your_login:your_password@your_mingle_server.com/projects/your_project_identifier
your_project_identifier> >
}
end
end
class Section
include Sections
def initialize(name)
@name = name
end
def execute(runtime)
send(@name.downcase.split(' ').join('_'), runtime)
end
end
end
end
end
|
module Modulr
class GlobalExportCollector < Collector
def initialize(options = {})
@global = options[:global]
super
end
def to_js(buffer = '')
buffer << "var #{@global} = (function() {\n"
buffer << File.read(PATH_TO_MODULR_SYNC_JS)
buffer << transport
buffer << "\n return require('#{main.id}');\n"
buffer << "})();\n"
end
end
end
Allow the global export method to handle nested namespaces.
module Modulr
class GlobalExportCollector < Collector
def initialize(options = {})
@global = options[:global]
super
end
def to_js(buffer = '')
buffer << "#{define_global} = (function() {\n"
buffer << File.read(PATH_TO_MODULR_SYNC_JS)
buffer << transport
buffer << "\n return require('#{main.id}');\n"
buffer << "})();\n"
end
def define_global
if @global.include?('.')
props = @global.split('.')
str = props.shift
results = "var #{str};"
props.each do |prop|
results << "\n#{str} = #{str} || {};"
str << ".#{prop}"
end
"#{results}\n#{str}"
else
"var #{@global}"
end
end
end
end |
# encoding: UTF-8
module MongoMapper
module Plugins
module Callbacks
def self.configure(model)
model.class_eval do
extend ::ActiveModel::Callbacks
include ::ActiveModel::Validations::Callbacks
define_model_callbacks :validation, :save, :create, :update, :destroy, :only => [ :before, :after ]
define_model_callbacks :initialize, :find, :only => :after
end
end
module InstanceMethods
def valid?(context = nil)
context ||= (new_record? ? :create : :update)
super(context) && errors.empty?
end
def destroy
_run_destroy_callbacks { super }
end
private
def create_or_update(*)
_run_save_callbacks do
super
end
end
def create(*)
_run_create_callbacks { super }
end
def update(*)
_run_update_callbacks { super }
end
end
end
end
end
Use #run_callbacks instead of internal method
# encoding: UTF-8
module MongoMapper
module Plugins
module Callbacks
def self.configure(model)
model.class_eval do
extend ::ActiveModel::Callbacks
include ::ActiveModel::Validations::Callbacks
define_model_callbacks :validation, :save, :create, :update, :destroy, :only => [ :before, :after ]
define_model_callbacks :initialize, :find, :only => :after
end
end
module InstanceMethods
def valid?(context = nil)
context ||= (new_record? ? :create : :update)
super(context) && errors.empty?
end
def destroy
run_callbacks(:destroy) { super }
end
private
def create_or_update(*)
run_callbacks(:save) do
super
end
end
def create(*)
run_callbacks(:create) { super }
end
def update(*)
run_callbacks(:update) { super }
end
end
end
end
end
|
# encoding: UTF-8
module Texas
module Template
module Helper
#
# Basic helper methods for finding files and template handling
#
module Base
def default_search_paths
[
__path__,
path_with_templates_basename,
build_path,
build.root
].compact
end
# Returns a subdir with the current template's basename
#
# Example:
# In /example/introduction.tex.erb this method
# returns "/example/introduction" if that directory exists
# and nil if it doesn't.
#
def path_with_templates_basename
subdir = Template.basename @output_filename
File.directory?(subdir) ? subdir : nil
end
# Searches for the given file in +possible_paths+, also checking for +possible_exts+ as extensions
#
# Example:
# find_template_file(["figures", "titel"], [:pdf, :png])
# # => will check
# figures/titel.pdf
# figures/titel.png
# tmp/figures/titel.pdf
# tmp/figures/titel.png
#
def find_template_file(parts, possible_exts = [], possible_paths = default_search_paths)
possible_paths.each do |base|
([""] + possible_exts).each do |ext|
path = parts.clone.map(&:to_s).map(&:dup)
path.unshift base.to_s
path.last << ".#{ext}" unless ext.empty?
filename = File.join(*path)
return filename if File.exist?(filename) && !File.directory?(filename)
end
end
nil
end
# Searches for the given file and raises an error if it is not found anywhere
#
def find_template_file!(parts, possible_exts = [], possible_paths = default_search_paths)
if filename = find_template_file(parts, possible_exts, possible_paths)
filename
else
raise TemplateError.new(self, "File doesnot exists anywhere: #{parts.size > 1 ? parts : parts.first}")
end
end
# Renders a partial with the given locals.
#
def partial(name, locals = {})
render("_#{name}", locals)
end
# Renders a template with the given locals.
#
def render(options, locals = {})
if [String, Symbol].include?(options.class)
options = {:templates => [options]}
end
if name = options[:template]
options[:templates] = [name]
end
if glob = options[:glob]
options[:templates] = templates_by_glob(glob)
end
options[:locals] = locals unless locals.empty?
render_as_array(options).join(options[:join].to_s)
end
def render_as_array(options)
options[:templates].map do |name|
template_file = find_template_file!([name], template_extensions)
Texas::Template.create(template_file, build).__run__(options[:locals])
end
end
# Returns all extensions the Template::Runner can handle.
#
def template_extensions
Texas::Template.known_extensions
end
# Returns all templates in the current template's path matching the given glob
#
def templates_by_glob(glob = "*")
files = Dir[File.join(__path__, glob)]
templates = files.map do |f|
Texas::Template.basename(f).gsub(__path__, '')
end
templates.uniq.sort
end
end
end
end
end
Fix typo in error message.
# encoding: UTF-8
module Texas
module Template
module Helper
#
# Basic helper methods for finding files and template handling
#
module Base
def default_search_paths
[
__path__,
path_with_templates_basename,
build_path,
build.root
].compact
end
# Returns a subdir with the current template's basename
#
# Example:
# In /example/introduction.tex.erb this method
# returns "/example/introduction" if that directory exists
# and nil if it doesn't.
#
def path_with_templates_basename
subdir = Template.basename @output_filename
File.directory?(subdir) ? subdir : nil
end
# Searches for the given file in +possible_paths+, also checking for +possible_exts+ as extensions
#
# Example:
# find_template_file(["figures", "titel"], [:pdf, :png])
# # => will check
# figures/titel.pdf
# figures/titel.png
# tmp/figures/titel.pdf
# tmp/figures/titel.png
#
def find_template_file(parts, possible_exts = [], possible_paths = default_search_paths)
possible_paths.each do |base|
([""] + possible_exts).each do |ext|
path = parts.clone.map(&:to_s).map(&:dup)
path.unshift base.to_s
path.last << ".#{ext}" unless ext.empty?
filename = File.join(*path)
return filename if File.exist?(filename) && !File.directory?(filename)
end
end
nil
end
# Searches for the given file and raises an error if it is not found anywhere
#
def find_template_file!(parts, possible_exts = [], possible_paths = default_search_paths)
if filename = find_template_file(parts, possible_exts, possible_paths)
filename
else
raise TemplateError.new(self, "File doesn't exists anywhere: #{parts.size > 1 ? parts : parts.first}")
end
end
# Renders a partial with the given locals.
#
def partial(name, locals = {})
render("_#{name}", locals)
end
# Renders a template with the given locals.
#
def render(options, locals = {})
if [String, Symbol].include?(options.class)
options = {:templates => [options]}
end
if name = options[:template]
options[:templates] = [name]
end
if glob = options[:glob]
options[:templates] = templates_by_glob(glob)
end
options[:locals] = locals unless locals.empty?
render_as_array(options).join(options[:join].to_s)
end
def render_as_array(options)
options[:templates].map do |name|
template_file = find_template_file!([name], template_extensions)
Texas::Template.create(template_file, build).__run__(options[:locals])
end
end
# Returns all extensions the Template::Runner can handle.
#
def template_extensions
Texas::Template.known_extensions
end
# Returns all templates in the current template's path matching the given glob
#
def templates_by_glob(glob = "*")
files = Dir[File.join(__path__, glob)]
templates = files.map do |f|
Texas::Template.basename(f).gsub(__path__, '')
end
templates.uniq.sort
end
end
end
end
end
|
module Moysklad::Entities
class Counterparty < Entity
attribute :email, String
attribute :phone, String
attribute :fax, String
attribute :tags, Array[String]
attribute :actualAddress, String
attribute :legalTitle, String
attribute :legalAddress, String
attribute :inn, String
attribute :kpp, String
attribute :ogrn, String
attribute :okpo, String
attribute :description, String
end
end
add syncid to counterparty
https://dev.moysklad.ru/doc/api/remap/1.1/#%D0%BA%D0%BE%D0%BD%D1%82%D1%80%D0%B0%D0%B3%D0%B5%D0%BD%D1%82-%D0%BA%D0%BE%D0%BD%D1%82%D1%80%D0%B0%D0%B3%D0%B5%D0%BD%D1%82%D1%8B-post
module Moysklad::Entities
class Counterparty < Entity
attribute :email, String
attribute :phone, String
attribute :fax, String
attribute :tags, Array[String]
attribute :actualAddress, String
attribute :legalTitle, String
attribute :legalAddress, String
attribute :inn, String
attribute :kpp, String
attribute :ogrn, String
attribute :okpo, String
attribute :description, String
attribute :syncid, String
end
end
|
require 'test_helper'
module SingersRepresenter
include Roar::Representer::JSON
collection :singers, :extend => SingerRepresenter
def singers
each
end
end
class ResponderTest < ActionController::TestCase
include Roar::Rails::TestCase
class BaseController < ActionController::Base
include Roar::Rails::ControllerAdditions
respond_to :json
def execute
instance_exec &@block
end
end
class UniqueRepresentsOptionsTest < MiniTest::Spec
class One < BaseController
represents :json, Object
end
class Two < BaseController
represents :json, Singer
end
it "each subclass of a roar-augmented controller can represent different things" do
One.represents_options.wont_equal Two.represents_options
end
it "does not share RepresenterComputer instances when inheriting" do
Class.new(One) do
represents :json, Singer
end.represents_options.wont_equal One.represents_options
end
it "inherits when subclass doesn't call ::represents" do
Class.new(One).represents_options.must_equal One.represents_options
end
end
class JsonResponseTest < ResponderTest
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
end
tests SingersController
test "set Content-type to json" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
@response.headers["Content-Type"].must_match "application/json"
end
end
class SuppressingRepresenterForFormatTest < ResponderTest
Singer = Struct.new(:name, :age)
class SingersController < BaseController
end
tests SingersController
test "returns non-represented json of model by falling back to Rails default responding" do
singer = Singer.new('Bumi', 42)
get do
respond_with singer, :represented_formats => []
end
assert_equal singer.to_json, @response.body
end
end
class ProvidingRepresenterForFormatTest < ResponderTest
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
represents :json, :entity => SingerRepresenter
end
tests SingersController
test "returns represented json of model" do
singer = Singer.new('Bumi')
get do
respond_with singer, :represented_formats => [:json]
end
assert_equal %{{"name":"Bumi","links":[{"rel":"self","href":"http://roar.apotomo.de/singers/Bumi"}]}}, @response.body
end
end
class XmlResponseTest < ResponderTest
module SingerRepresenter
include Roar::Representer::XML
property :name
self.representation_wrap = :singer
end
class SingersController < BaseController
respond_to :xml
end
tests SingersController
test "set Content-type to xml" do
get :xml do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal_xml '<singer><name>Bumi</name></singer>'
@response.headers["Content-Type"].must_match "application/xml"
end
end
class UnconfiguredControllerTest < ResponderTest
SingersRepresenter = ::SingersRepresenter
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
end
tests SingersController
test "responder finds SingerRepresenter representer by convention" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
end
test "responder finds SingersRepresenter for collections by convention" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers
end
@response.body.must_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json)
end
test "responder allows empty response bodies to pass through" do
put do
singer = Singer.new("Bumi")
respond_with singer
end
end
end
class RespondToOptionsOverridingConfigurationTest < ResponderTest
class SingersController < BaseController
represents :json, Object
end
tests SingersController
test "responder uses passed representer" do
get do
singer = Singer.new("Bumi")
respond_with singer, :represent_with => SingerRepresenter
end
assert_equal singer.to_json, @response.body
end
test "responder uses passed representer for collection" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers, :represent_with => SingersRepresenter
end
assert_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json, @response.body)
end
test "responder uses passed representer for collection items when :represent_items_with set" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers, :represent_items_with => SingerRepresenter
end
assert_equal(singers.collect {|s| s.extend(SingerRepresenter).to_hash }.to_json, @response.body)
end
end
class ConfiguredControllerTest < ResponderTest
class MusicianController < BaseController
represents :json, :entity => SingerRepresenter, :collection => SingersRepresenter
end
tests MusicianController
test "responder uses configured representer" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
end
test "responder uses configured representer for collection" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers
end
assert_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json, @response.body)
end
end
class ControllerWithDecoratorTest < ResponderTest
class SingerRepresentation < Representable::Decorator
include Roar::Representer::JSON
include Roar::Representer::JSON::HAL
property :name
link(:self) { "http://singers/#{represented.name}" }
end
class MusicianController < BaseController
represents :json, :entity => SingerRepresentation
end
tests MusicianController
test "rendering uses decorating representer" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
assert_equal %{{"name":"Bumi","_links":{"self":{"href":"http://singers/Bumi"}}}}, @response.body
end
test "parsing uses decorating representer" do # FIXME: move to controller_test.
created_singer = nil
put singer.to_json do
created_singer = consume!(Singer.new)
respond_with created_singer
end
created_singer.must_be_kind_of(Singer)
created_singer.name.must_equal "Bumi"
end
end
class PassingUserOptionsTest < ResponderTest
# FIXME: should be in generic roar-rails test.
module DynamicSingerRepresenter
include Roar::Representer::JSON
property :name, :setter => lambda { |val, opts| self.name = "#{opts[:title]} #{val}" },
:getter => lambda { |opts| "#{opts[:title]} #{name}" }
end
class MusicianController < BaseController
represents :json, :entity => DynamicSingerRepresenter, :collection => SingersRepresenter
end
tests MusicianController
test "passes options to entity representer" do
get do
singer = Singer.new("Bumi")
respond_with singer, :title => "Mr."
end
@response.body.must_equal("{\"name\":\"Mr. Bumi\"}")
end
test "passes options to explicit collection representer" do
get do
respond_with [Singer.new("Bumi"), Singer.new("Iggy")], :title => "Mr.", :represent_items_with => DynamicSingerRepresenter
end
@response.body.must_equal("[{\"name\":\"Mr. Bumi\"},{\"name\":\"Mr. Iggy\"}]")
end
test "passes options in #consume!" do
created_singer = nil
put singer.to_json do
created_singer = consume!(Singer.new, :title => "Mr.")
respond_with created_singer
end
created_singer.must_be_kind_of(Singer)
created_singer.name.must_equal "Mr. Bumi"
end
end
class FallbackTest < ResponderTest
class MusicianController < BaseController
represents :json, Object
end
tests MusicianController
test "passes options to entity representer" do
get do
render :text => "Rendered template"
end
@response.body.must_equal("Rendered template")
end
end
def get(format=:json, &block)
@controller.instance_eval do
@block = block
end
super :execute, :format => format
end
def put(body="", format=:json, &block)
@controller.instance_eval do
@block = block
end
super :execute, body, :format => format
end
def singer(name="Bumi")
singer = Musician.new(name)
singer.extend SingerRepresenter
end
def singers
[singer("Bumi"), singer("Bjork"), singer("Sinead")]
end
end
Add test for supressing the representation for specific formats from the application configuration.
require 'test_helper'
module SingersRepresenter
include Roar::Representer::JSON
collection :singers, :extend => SingerRepresenter
def singers
each
end
end
class ResponderTest < ActionController::TestCase
include Roar::Rails::TestCase
class BaseController < ActionController::Base
include Roar::Rails::ControllerAdditions
respond_to :json
def execute
instance_exec &@block
end
end
class UniqueRepresentsOptionsTest < MiniTest::Spec
class One < BaseController
represents :json, Object
end
class Two < BaseController
represents :json, Singer
end
it "each subclass of a roar-augmented controller can represent different things" do
One.represents_options.wont_equal Two.represents_options
end
it "does not share RepresenterComputer instances when inheriting" do
Class.new(One) do
represents :json, Singer
end.represents_options.wont_equal One.represents_options
end
it "inherits when subclass doesn't call ::represents" do
Class.new(One).represents_options.must_equal One.represents_options
end
end
class JsonResponseTest < ResponderTest
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
end
tests SingersController
test "set Content-type to json" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
@response.headers["Content-Type"].must_match "application/json"
end
end
class SuppressingRepresenterForFormatTest < ResponderTest
Singer = Struct.new(:name, :age)
class SingersController < BaseController
end
tests SingersController
test "returns non-represented json of model by falling back to Rails default responding when supressed in respond_with" do
singer = Singer.new('Bumi', 42)
get do
respond_with singer, :represented_formats => []
end
assert_equal singer.to_json, @response.body
end
test "return non-represented json model by falling back to Rails default responding when supressed in the configuration" do
singer = Singer.new('Bumi', 42)
Rails.application.config.representer.represented_formats = []
get do
respond_with singer
end
Rails.application.config.representer.represented_formats = nil
assert_equal singer.to_json, @response.body
end
end
class ProvidingRepresenterForFormatTest < ResponderTest
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
represents :json, :entity => SingerRepresenter
end
tests SingersController
test "returns represented json of model" do
singer = Singer.new('Bumi')
get do
respond_with singer, :represented_formats => [:json]
end
assert_equal %{{"name":"Bumi","links":[{"rel":"self","href":"http://roar.apotomo.de/singers/Bumi"}]}}, @response.body
end
end
class XmlResponseTest < ResponderTest
module SingerRepresenter
include Roar::Representer::XML
property :name
self.representation_wrap = :singer
end
class SingersController < BaseController
respond_to :xml
end
tests SingersController
test "set Content-type to xml" do
get :xml do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal_xml '<singer><name>Bumi</name></singer>'
@response.headers["Content-Type"].must_match "application/xml"
end
end
class UnconfiguredControllerTest < ResponderTest
SingersRepresenter = ::SingersRepresenter
SingerRepresenter = ::SingerRepresenter
class SingersController < BaseController
end
tests SingersController
test "responder finds SingerRepresenter representer by convention" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
end
test "responder finds SingersRepresenter for collections by convention" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers
end
@response.body.must_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json)
end
test "responder allows empty response bodies to pass through" do
put do
singer = Singer.new("Bumi")
respond_with singer
end
end
end
class RespondToOptionsOverridingConfigurationTest < ResponderTest
class SingersController < BaseController
represents :json, Object
end
tests SingersController
test "responder uses passed representer" do
get do
singer = Singer.new("Bumi")
respond_with singer, :represent_with => SingerRepresenter
end
assert_equal singer.to_json, @response.body
end
test "responder uses passed representer for collection" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers, :represent_with => SingersRepresenter
end
assert_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json, @response.body)
end
test "responder uses passed representer for collection items when :represent_items_with set" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers, :represent_items_with => SingerRepresenter
end
assert_equal(singers.collect {|s| s.extend(SingerRepresenter).to_hash }.to_json, @response.body)
end
end
class ConfiguredControllerTest < ResponderTest
class MusicianController < BaseController
represents :json, :entity => SingerRepresenter, :collection => SingersRepresenter
end
tests MusicianController
test "responder uses configured representer" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
@response.body.must_equal singer.to_json
end
test "responder uses configured representer for collection" do
get do
singers = [Singer.new("Bumi"), Singer.new("Bjork"), Singer.new("Sinead")]
respond_with singers
end
assert_equal({:singers => singers.collect {|s| s.extend(SingerRepresenter).to_hash }}.to_json, @response.body)
end
end
class ControllerWithDecoratorTest < ResponderTest
class SingerRepresentation < Representable::Decorator
include Roar::Representer::JSON
include Roar::Representer::JSON::HAL
property :name
link(:self) { "http://singers/#{represented.name}" }
end
class MusicianController < BaseController
represents :json, :entity => SingerRepresentation
end
tests MusicianController
test "rendering uses decorating representer" do
get do
singer = Singer.new("Bumi")
respond_with singer
end
assert_equal %{{"name":"Bumi","_links":{"self":{"href":"http://singers/Bumi"}}}}, @response.body
end
test "parsing uses decorating representer" do # FIXME: move to controller_test.
created_singer = nil
put singer.to_json do
created_singer = consume!(Singer.new)
respond_with created_singer
end
created_singer.must_be_kind_of(Singer)
created_singer.name.must_equal "Bumi"
end
end
class PassingUserOptionsTest < ResponderTest
# FIXME: should be in generic roar-rails test.
module DynamicSingerRepresenter
include Roar::Representer::JSON
property :name, :setter => lambda { |val, opts| self.name = "#{opts[:title]} #{val}" },
:getter => lambda { |opts| "#{opts[:title]} #{name}" }
end
class MusicianController < BaseController
represents :json, :entity => DynamicSingerRepresenter, :collection => SingersRepresenter
end
tests MusicianController
test "passes options to entity representer" do
get do
singer = Singer.new("Bumi")
respond_with singer, :title => "Mr."
end
@response.body.must_equal("{\"name\":\"Mr. Bumi\"}")
end
test "passes options to explicit collection representer" do
get do
respond_with [Singer.new("Bumi"), Singer.new("Iggy")], :title => "Mr.", :represent_items_with => DynamicSingerRepresenter
end
@response.body.must_equal("[{\"name\":\"Mr. Bumi\"},{\"name\":\"Mr. Iggy\"}]")
end
test "passes options in #consume!" do
created_singer = nil
put singer.to_json do
created_singer = consume!(Singer.new, :title => "Mr.")
respond_with created_singer
end
created_singer.must_be_kind_of(Singer)
created_singer.name.must_equal "Mr. Bumi"
end
end
class FallbackTest < ResponderTest
class MusicianController < BaseController
represents :json, Object
end
tests MusicianController
test "passes options to entity representer" do
get do
render :text => "Rendered template"
end
@response.body.must_equal("Rendered template")
end
end
def get(format=:json, &block)
@controller.instance_eval do
@block = block
end
super :execute, :format => format
end
def put(body="", format=:json, &block)
@controller.instance_eval do
@block = block
end
super :execute, body, :format => format
end
def singer(name="Bumi")
singer = Musician.new(name)
singer.extend SingerRepresenter
end
def singers
[singer("Bumi"), singer("Bjork"), singer("Sinead")]
end
end
|
# frozen_string_literal: true
require "#{Rails.root}/lib/wikitext"
#= Takes wikitext for an on-wiki slide and extracts title and content
class WikiSlideParser
def initialize(wikitext)
@wikitext = wikitext&.dup || String.new
set_utf8_encoding
remove_noinclude
remove_translation_markers
remove_translate_tags
remove_category
extract_quiz_template
convert_image_template
convert_video_template
end
# The first translated line is the slide title
def title
return '' if @wikitext.blank?
title = @wikitext.lines.first.chomp
# remove header markup for level 2 or lower
title.gsub(/==+/, '').strip
end
# Everything after the first translated line is the slide content
def content
return '' if @wikitext.blank?
wikitext = @wikitext.lines[1..-1].join # Line 0 is the title
wikitext[0] = '' while wikitext[0] == "\n" # Remove leading newlines
markdown = Wikitext.mediawiki_to_markdown(wikitext)
# Make sure first line after a figure gets parsed as a new paragraph
markdown.gsub("figure>\n", "figure>\n\n")
end
def quiz
return unless @quiz_template
{ correct_answer_id: quiz_correct_answer,
question: quiz_question,
answers: quiz_answers }
end
private
def set_utf8_encoding
@wikitext = @wikitext.force_encoding('UTF-8')
end
def remove_noinclude
@wikitext.gsub!(%r{<noinclude>.*?</noinclude>\n*}m, '')
end
# Category tags are useful for categorizing pages, but we don't want them to show up in the slides
# Example: [[Category:Programs & Events Dashboard]]
def remove_category
@wikitext.gsub!(%r{\[\[Category:.*?\]\]\n*}m, '')
end
def remove_translation_markers
# Remove both marker and any trailing whitespace after it,
# which may interfere with correct markdown conversion.
@wikitext.gsub!(/<!--.+?-->\s*\n*/, '')
end
def remove_translate_tags
# Remove both the tags and any excess whitespace within them,
# which may interfere with correct markdown conversion.
@wikitext.gsub!(/<translate>\s*/, '')
@wikitext.gsub!(%r{\s*</translate>}, '')
@wikitext.gsub!(/<tvar.*?>/, '')
@wikitext.gsub!(%r{</>}, '')
end
def extract_quiz_template
@wikitext.gsub!(/(?<template>{{Training module quiz.*?\n}})/m, '')
@quiz_template = Regexp.last_match && Regexp.last_match['template']
end
def quiz_correct_answer
# Looks like:
# | correct_answer_id = 3
Integer(template_parameter_value(@quiz_template, 'correct_answer_id'))
end
def quiz_question
# Looks like:
# | question = What... is your favorite colour?
template_parameter_value(@quiz_template, 'question')
end
def quiz_answers
answers = (1..9).map do |answer_number|
answer_hash(answer_number)
end
answers.compact
end
def answer_hash(number)
text = template_parameter_value(@quiz_template, "answer_#{number}")
return unless text
explanation = template_parameter_value(@quiz_template, "explanation_#{number}")
{ id: number,
text: text,
explanation: explanation }
end
def template_parameter_value(template, parameter)
# Extract value from something like:
# | parameter_name = value
match = template.match(/\|\s*#{parameter}\s*=\s*(?<value>.*)/)
match && match['value']
end
def convert_image_template
# Get all the image templates on the page to allow for multiple images in the same slide
image_templates = @wikitext.scan(/(?<image>{{Training module image.*?\n}})/m)
return unless image_templates
# Replace each one with the correct figure markup
image_templates.each { |template| @wikitext.sub! template[0], figure_markup_from_template(template[0]) }
end
def convert_video_template
# Get all the video templates on the page to allow for multiple videos in the same slide
video_templates = @wikitext.scan(/(?<video>{{Training module video.*?\n}})/m)
return unless video_templates
# Replace each one with the correct figure markup
video_templates.each { |template| @wikitext.sub! template[0], video_markup_from_template(template[0]) }
end
def figure_markup_from_template(template)
image_layout = image_layout_from(template)
image_source = image_source_from(template)
image_filename = image_filename_from(template)
image_credit = image_credit_from(template)
<<-FIGURE
<figure class="#{image_layout}"><img src="#{image_source}" />
<figcaption class="image-credit">
<a href="https://commons.wikimedia.org/wiki/#{image_filename}">#{image_credit}</a>
</figcaption>
</figure>
FIGURE
end
def video_markup_from_template(template)
video_source = video_source_from(template)
<<-VIDEO
<iframe width="420" height="315" src="#{video_source}" frameborder="0" allowfullscreen></iframe>
VIDEO
end
def image_layout_from(template)
template_parameter_value(template, 'layout')
end
def image_source_from(template)
template_parameter_value(template, 'source')
end
def image_filename_from(template)
template_parameter_value(template, 'image')
end
def image_credit_from(template)
template_parameter_value(template, 'credit')
end
def video_source_from(template)
template_parameter_value(template, 'source')
end
end
Remove `remove_category` from WikiSlideParser
This can be accomplished on the wikitext side via <noinclude>, so we don't need to special-case categories.
# frozen_string_literal: true
require "#{Rails.root}/lib/wikitext"
#= Takes wikitext for an on-wiki slide and extracts title and content
class WikiSlideParser
def initialize(wikitext)
@wikitext = wikitext&.dup || String.new
set_utf8_encoding
remove_noinclude
remove_translation_markers
remove_translate_tags
extract_quiz_template
convert_image_template
convert_video_template
end
# The first translated line is the slide title
def title
return '' if @wikitext.blank?
title = @wikitext.lines.first.chomp
# remove header markup for level 2 or lower
title.gsub(/==+/, '').strip
end
# Everything after the first translated line is the slide content
def content
return '' if @wikitext.blank?
wikitext = @wikitext.lines[1..-1].join # Line 0 is the title
wikitext[0] = '' while wikitext[0] == "\n" # Remove leading newlines
markdown = Wikitext.mediawiki_to_markdown(wikitext)
# Make sure first line after a figure gets parsed as a new paragraph
markdown.gsub("figure>\n", "figure>\n\n")
end
def quiz
return unless @quiz_template
{ correct_answer_id: quiz_correct_answer,
question: quiz_question,
answers: quiz_answers }
end
private
def set_utf8_encoding
@wikitext = @wikitext.force_encoding('UTF-8')
end
def remove_noinclude
@wikitext.gsub!(%r{<noinclude>.*?</noinclude>\n*}m, '')
end
def remove_translation_markers
# Remove both marker and any trailing whitespace after it,
# which may interfere with correct markdown conversion.
@wikitext.gsub!(/<!--.+?-->\s*\n*/, '')
end
def remove_translate_tags
# Remove both the tags and any excess whitespace within them,
# which may interfere with correct markdown conversion.
@wikitext.gsub!(/<translate>\s*/, '')
@wikitext.gsub!(%r{\s*</translate>}, '')
@wikitext.gsub!(/<tvar.*?>/, '')
@wikitext.gsub!(%r{</>}, '')
end
def extract_quiz_template
@wikitext.gsub!(/(?<template>{{Training module quiz.*?\n}})/m, '')
@quiz_template = Regexp.last_match && Regexp.last_match['template']
end
def quiz_correct_answer
# Looks like:
# | correct_answer_id = 3
Integer(template_parameter_value(@quiz_template, 'correct_answer_id'))
end
def quiz_question
# Looks like:
# | question = What... is your favorite colour?
template_parameter_value(@quiz_template, 'question')
end
def quiz_answers
answers = (1..9).map do |answer_number|
answer_hash(answer_number)
end
answers.compact
end
def answer_hash(number)
text = template_parameter_value(@quiz_template, "answer_#{number}")
return unless text
explanation = template_parameter_value(@quiz_template, "explanation_#{number}")
{ id: number,
text: text,
explanation: explanation }
end
def template_parameter_value(template, parameter)
# Extract value from something like:
# | parameter_name = value
match = template.match(/\|\s*#{parameter}\s*=\s*(?<value>.*)/)
match && match['value']
end
def convert_image_template
# Get all the image templates on the page to allow for multiple images in the same slide
image_templates = @wikitext.scan(/(?<image>{{Training module image.*?\n}})/m)
return unless image_templates
# Replace each one with the correct figure markup
image_templates.each do |template|
@wikitext.sub! template[0], figure_markup_from_template(template[0])
end
end
def convert_video_template
# Get all the video templates on the page to allow for multiple videos in the same slide
video_templates = @wikitext.scan(/(?<video>{{Training module video.*?\n}})/m)
return unless video_templates
# Replace each one with the correct figure markup
video_templates.each do |template|
@wikitext.sub! template[0], video_markup_from_template(template[0])
end
end
def figure_markup_from_template(template)
image_layout = image_layout_from(template)
image_source = image_source_from(template)
image_filename = image_filename_from(template)
image_credit = image_credit_from(template)
<<-FIGURE
<figure class="#{image_layout}"><img src="#{image_source}" />
<figcaption class="image-credit">
<a href="https://commons.wikimedia.org/wiki/#{image_filename}">#{image_credit}</a>
</figcaption>
</figure>
FIGURE
end
def video_markup_from_template(template)
video_source = video_source_from(template)
<<-VIDEO
<iframe width="420" height="315" src="#{video_source}" frameborder="0" allowfullscreen></iframe>
VIDEO
end
def image_layout_from(template)
template_parameter_value(template, 'layout')
end
def image_source_from(template)
template_parameter_value(template, 'source')
end
def image_filename_from(template)
template_parameter_value(template, 'image')
end
def image_credit_from(template)
template_parameter_value(template, 'credit')
end
def video_source_from(template)
template_parameter_value(template, 'source')
end
end
|
# encoding: utf-8
require 'pstore'
module Nanoc3
# Nanoc3::DependencyTracker is responsible for remembering dependencies
# between items. It is used to speed up compilation by only letting an item
# be recompiled when it is outdated or any of its dependencies (or
# dependencies' dependencies, etc) is outdated.
#
# The dependencies tracked by the dependency tracker are not dependencies
# based on an item's content. When one item uses an attribute of another
# item, then this is also treated as a dependency. While dependencies based
# on an item's content (handled in Nanoc3::Compiler) cannot be mutually
# recursive, the more general dependencies in Nanoc3::DependencyTracker can
# (e.g. item A can use an attribute of item B and vice versa without
# problems).
class DependencyTracker
attr_accessor :filename
# Creates a new dependency tracker for the given items.
def initialize(items)
@items = items
@filename = 'tmp/dependencies'
@graph = {}
end
# Starts listening for dependency messages (+:visit_started+ and
# +:visit_ended+) and start recording dependencies.
def start
# Initialize dependency stack. An item will be pushed onto this stack
# when it is visited. Therefore, an item on the stack always depends on
# all items pushed above it.
@stack = []
# Register start of visits
Nanoc3::NotificationCenter.on(:visit_started, self) do |item|
# Record possible dependency
unless @stack.empty?
self.record_dependency(@stack[-1], item)
end
@stack.push(item)
end
# Register end of visits
Nanoc3::NotificationCenter.on(:visit_ended, self) do |item|
@stack.pop
end
end
# Stop listening for dependency messages and stop recording dependencies.
def stop
# Unregister
Nanoc3::NotificationCenter.remove(:visit_started, self)
Nanoc3::NotificationCenter.remove(:visit_ended, self)
end
# Returns the direct dependencies for +item+, i.e. the items that, when
# outdated, will cause +item+ to be marked as outdated. Indirect
# dependencies will not be returned (e.g. if A depends on B which depends
# on C, then the direct dependencies of A do not include C).
def direct_dependencies_for(item)
@graph[item] || []
end
# Returns all dependencies (direct and indirect) for +item+, i.e. the
# items that, when outdated, will cause +item+ to be marked as outdated.
def all_dependencies_for(item)
direct_dependencies = direct_dependencies_for(item)
indirect_dependencies = direct_dependencies.map { |i| all_dependencies_for(i) }
(direct_dependencies + indirect_dependencies).flatten
end
# Records a dependency from +src+ to +dst+ in the dependency graph. When
# +dst+ is oudated, +src+ will also become outdated.
def record_dependency(src, dst)
@graph[src] ||= []
# Don't include self in dependencies
return if src == dst
# Don't include doubles in dependencies
return if @graph[src].include?(dst)
# Record dependency
@graph[src] << dst
end
# Stores the dependency graph into the file specified by the +filename+
# attribute.
def store_graph
# Create dir
FileUtils.mkdir_p(File.dirname(self.filename))
# Complete the graph
complete_graph
# Convert graph into something more storeable
new_graph = {}
@graph.each_pair do |second_item, first_items|
new_graph[second_item.identifier] = first_items.map { |f| f.identifier }
end
# Store dependencies
store = PStore.new(self.filename)
store.transaction do
store[:dependencies] = new_graph
end
end
# Loads the dependency graph from the file specified by the +filename+
# attribute. This method will overwrite an existing dependency graph.
def load_graph
# Create new graph
@graph = {}
# Don't do anything if dependencies haven't been stored yet
return if !File.file?(self.filename)
# Load dependencies
store = PStore.new(self.filename)
store.transaction do
old_graph = store[:dependencies]
# Convert graph into something more usable
old_graph.each_pair do |second_item_identifier, first_item_identifiers|
# Convert second and first item identifiers into items
second_item = item_with_identifier(second_item_identifier)
first_items = first_item_identifiers.map { |p| item_with_identifier(p) }
@graph[second_item] = first_items
end
end
end
# Traverses the dependency graph and marks all items that (directly or
# indirectly) depend on an outdated item as outdated.
def mark_outdated_items
# Invert dependency graph
inverted_graph = invert_graph(@graph)
# Unmark everything
@items.each { |i| i.dependencies_outdated = false }
# Mark items that appear in @items but not in the dependency graph
added_items = @items - @graph.keys
added_items.each { |i| i.dependencies_outdated = true }
# Walk graph and mark items as outdated if necessary
# (#keys and #sort is used instead of #each_pair to add determinism)
first_items = inverted_graph.keys.sort_by { |i| i.nil? ? '/' : i.identifier }
something_changed = true
while something_changed
something_changed = false
first_items.each do |first_item|
second_items = inverted_graph[first_item]
if first_item.nil? || # item was removed
first_item.outdated? || # item itself is outdated
first_item.dependencies_outdated? # item is outdated because of its dependencies
second_items.each do |item|
# Ignore this item
next if item.nil?
something_changed = true if !item.dependencies_outdated?
item.dependencies_outdated = true
end
end
end
end
end
# Empties the list of dependencies for the given item. This is necessary
# before recompiling the given item, because otherwise old dependencies
# will stick around and new dependencies will appear twice.
def forget_dependencies_for(item)
@graph[item] = []
end
private
# Returns the item with the given identifier, or nil if no item is found.
def item_with_identifier(identifier)
@items.find { |i| i.identifier == identifier }
end
# Inverts the given graph (keys become values and values become keys).
#
# For example, this graph
#
# {
# :a => [ :b, :c ],
# :b => [ :x, :c ]
# }
#
# is turned into
#
# {
# :b => [ :a ],
# :c => [ :a, :b ],
# :x => [ :b ]
# }
def invert_graph(graph)
inverted_graph = {}
graph.each_pair do |key, values|
values.each do |v|
inverted_graph[v] ||= []
inverted_graph[v] << key
end
end
inverted_graph
end
# Ensures that all items in the dependency graph have a list of
# dependecies, even if it is empty. Items without a list of dependencies
# will be treated as "added" and will depend on all other pages, which is
# not necessary for non-added items.
def complete_graph
@items.each do |item|
@graph[item] ||= []
end
end
end
end
cleaned up descriptions of graph converting code in {load,store}_graph
# encoding: utf-8
require 'pstore'
module Nanoc3
# Nanoc3::DependencyTracker is responsible for remembering dependencies
# between items. It is used to speed up compilation by only letting an item
# be recompiled when it is outdated or any of its dependencies (or
# dependencies' dependencies, etc) is outdated.
#
# The dependencies tracked by the dependency tracker are not dependencies
# based on an item's content. When one item uses an attribute of another
# item, then this is also treated as a dependency. While dependencies based
# on an item's content (handled in Nanoc3::Compiler) cannot be mutually
# recursive, the more general dependencies in Nanoc3::DependencyTracker can
# (e.g. item A can use an attribute of item B and vice versa without
# problems).
class DependencyTracker
attr_accessor :filename
# Creates a new dependency tracker for the given items.
def initialize(items)
@items = items
@filename = 'tmp/dependencies'
@graph = {}
end
# Starts listening for dependency messages (+:visit_started+ and
# +:visit_ended+) and start recording dependencies.
def start
# Initialize dependency stack. An item will be pushed onto this stack
# when it is visited. Therefore, an item on the stack always depends on
# all items pushed above it.
@stack = []
# Register start of visits
Nanoc3::NotificationCenter.on(:visit_started, self) do |item|
# Record possible dependency
unless @stack.empty?
self.record_dependency(@stack[-1], item)
end
@stack.push(item)
end
# Register end of visits
Nanoc3::NotificationCenter.on(:visit_ended, self) do |item|
@stack.pop
end
end
# Stop listening for dependency messages and stop recording dependencies.
def stop
# Unregister
Nanoc3::NotificationCenter.remove(:visit_started, self)
Nanoc3::NotificationCenter.remove(:visit_ended, self)
end
# Returns the direct dependencies for +item+, i.e. the items that, when
# outdated, will cause +item+ to be marked as outdated. Indirect
# dependencies will not be returned (e.g. if A depends on B which depends
# on C, then the direct dependencies of A do not include C).
def direct_dependencies_for(item)
@graph[item] || []
end
# Returns all dependencies (direct and indirect) for +item+, i.e. the
# items that, when outdated, will cause +item+ to be marked as outdated.
def all_dependencies_for(item)
direct_dependencies = direct_dependencies_for(item)
indirect_dependencies = direct_dependencies.map { |i| all_dependencies_for(i) }
(direct_dependencies + indirect_dependencies).flatten
end
# Records a dependency from +src+ to +dst+ in the dependency graph. When
# +dst+ is oudated, +src+ will also become outdated.
def record_dependency(src, dst)
@graph[src] ||= []
# Don't include self in dependencies
return if src == dst
# Don't include doubles in dependencies
return if @graph[src].include?(dst)
# Record dependency
@graph[src] << dst
end
# Stores the dependency graph into the file specified by the +filename+
# attribute.
def store_graph
# Create dir
FileUtils.mkdir_p(File.dirname(self.filename))
# Complete the graph
complete_graph
# Convert graph of items into graph of item identifiers
new_graph = {}
@graph.each_pair do |second_item, first_items|
new_graph[second_item.identifier] = first_items.map { |f| f.identifier }
end
# Store dependencies
store = PStore.new(self.filename)
store.transaction do
store[:dependencies] = new_graph
end
end
# Loads the dependency graph from the file specified by the +filename+
# attribute. This method will overwrite an existing dependency graph.
def load_graph
# Create new graph
@graph = {}
# Don't do anything if dependencies haven't been stored yet
return if !File.file?(self.filename)
# Load dependencies
store = PStore.new(self.filename)
store.transaction do
# Convert graph of identifiers into graph of items
store[:dependencies].each_pair do |second_item_identifier, first_item_identifiers|
# Convert second and first item identifiers into items
second_item = item_with_identifier(second_item_identifier)
first_items = first_item_identifiers.map { |p| item_with_identifier(p) }
@graph[second_item] = first_items
end
end
end
# Traverses the dependency graph and marks all items that (directly or
# indirectly) depend on an outdated item as outdated.
def mark_outdated_items
# Invert dependency graph
inverted_graph = invert_graph(@graph)
# Unmark everything
@items.each { |i| i.dependencies_outdated = false }
# Mark items that appear in @items but not in the dependency graph
added_items = @items - @graph.keys
added_items.each { |i| i.dependencies_outdated = true }
# Walk graph and mark items as outdated if necessary
# (#keys and #sort is used instead of #each_pair to add determinism)
first_items = inverted_graph.keys.sort_by { |i| i.nil? ? '/' : i.identifier }
something_changed = true
while something_changed
something_changed = false
first_items.each do |first_item|
second_items = inverted_graph[first_item]
if first_item.nil? || # item was removed
first_item.outdated? || # item itself is outdated
first_item.dependencies_outdated? # item is outdated because of its dependencies
second_items.each do |item|
# Ignore this item
next if item.nil?
something_changed = true if !item.dependencies_outdated?
item.dependencies_outdated = true
end
end
end
end
end
# Empties the list of dependencies for the given item. This is necessary
# before recompiling the given item, because otherwise old dependencies
# will stick around and new dependencies will appear twice.
def forget_dependencies_for(item)
@graph[item] = []
end
private
# Returns the item with the given identifier, or nil if no item is found.
def item_with_identifier(identifier)
@items.find { |i| i.identifier == identifier }
end
# Inverts the given graph (keys become values and values become keys).
#
# For example, this graph
#
# {
# :a => [ :b, :c ],
# :b => [ :x, :c ]
# }
#
# is turned into
#
# {
# :b => [ :a ],
# :c => [ :a, :b ],
# :x => [ :b ]
# }
def invert_graph(graph)
inverted_graph = {}
graph.each_pair do |key, values|
values.each do |v|
inverted_graph[v] ||= []
inverted_graph[v] << key
end
end
inverted_graph
end
# Ensures that all items in the dependency graph have a list of
# dependecies, even if it is empty. Items without a list of dependencies
# will be treated as "added" and will depend on all other pages, which is
# not necessary for non-added items.
def complete_graph
@items.each do |item|
@graph[item] ||= []
end
end
end
end
|
require 'active_support/concern'
require 'simple_states'
class Worker
module States
extend ActiveSupport::Concern
included do
include SimpleStates, Travis::Notifications
states :created, :starting, :ready, :working, :stopping, :stopped, :errored
end
def ping(report)
update_attributes!(:state => report.state, :last_seen_at => Time.now.utc)
notify('update', report)
end
end
end
do not update the state unless it has changed
require 'active_support/concern'
require 'simple_states'
class Worker
module States
extend ActiveSupport::Concern
included do
include SimpleStates, Travis::Notifications
states :created, :starting, :ready, :working, :stopping, :stopped, :errored
end
def ping(report)
if state != report.state
update_attributes!(:state => report.state, :last_seen_at => Time.now.utc)
notify('update', report)
else
touch(:last_seen_at)
end
end
end
end
|
# == Overview
# PrimoService is an Umlaut Service that makes a call to the Primo web services based on the requested context object.
# It first looks for rft.primo *DEPRECATED*, failing that, it parses the request referrer identifier for an id.
# If the Primo id is present, the service gets the PNX record from the Primo web
# services.
# If no Primo id is found, the service searches Primo by (in order of precedence):
# * ISBN
# * ISSN
# * Title, Author, Genre
#
# == Available Services
# Several service types are available in the PrimoService. The default service types are:
# fulltext, holding, holding_search, table_of_contents, referent_enhance, cover_image
# Available service types are listed below and can be configured using the service_types parameter
# in config/umlaut_services.yml:
# * fulltext - parsed from links/linktorsrc elements in the PNX record
# * holding - parsed from display/availlibrary elements in the PNX record
# * holding_search - link to an exact title search in Primo if no holdings found AND the OpenURL did not come from Primo
# * primo_source - similar to holdings but used in conjuction with the PrimoSource service to map Primo records to their original sources; a PrimoSource service must be defined in service.yml for this to work
# * table_of_contents - parsed from links/linktotoc elements in the PNX record
# * referent_enhance - metadata parsed from the addata section of the PNX record when the record was found by Primo id
# * highlighted_link - parsed from links/addlink elements in the PNX record
#
# ==Available Parameters
# Several configurations parameters are available to be set in config/umlaut_services.yml.
# Primo: # Name of your choice
# type: PrimoService # Required
# priority: 2 # Required. I suggest running after the SFX service so you get the SFX referent enhancements
# base_url: http://primo.library.edu # Required
# vid: VID # Required
# institution: INST # Required
# holding_search_institution: SEARCH_INST # Optional. Defaults to the institution above.
# holding_search_text: Search for this title in Primo. # Optional text for holding search. Defaults to "Search for this title."
# suppress_holdings: [ !ruby/regexp '/\$\$LWEB/', !ruby/regexp '/\$\$1Restricted Internet Resources/' ] # Optional
# ez_proxy: !ruby/regexp '/https\:\/\/ezproxy\.library\.edu\/login\?url=/' # Optional
# service_types: # Optional. Defaults to [ "fulltext", "holding", "holding_search", "table_of_contents", "referent_enhance" ]
# - holding
# - holding_search
# - fulltext
# - table_of_contents
# - referent_enhance
# - highlighted_link
#
# base_url:: _required_ host and port of Primo server; used for Primo web services, deep links and holding_search
# base_path:: *DEPRECATED* previous name of base_url
# vid:: _required_ view id for Primo deep links and holding_search.
# institution:: _required_ institution id for Primo institution; used for Primo web services
# base_view_id:: *DEPRECATED* previous name of vid
# holding_search_institution:: if service types include holding_search_ and the holding search institution is different from
# institution to be used for the holding_search
# holding_search_text:: _optional_ text to display for the holding_search
# default holding search text:: "Search for this title."
# link_to_search_text:: *DEPRECATED* previous name of holding_search_text
# service_types:: _optional_ array of strings that represent the service types desired.
# options are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image, primo_source
# defaults are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image
# if no options are specified, default service types will be added.
# suppress_urls:: _optional_ array of strings or regexps to NOT use from the catalog.
# Used for linktorsrc elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/sagepub.com$/']
# suppress_holdings:: _optional_ array of strings or regexps to NOT use from the catalog.
# Used for availlibrary elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/\$\$LWEB$/']
# suppress_tocs:: _optional_ array of strings or regexps to NOT link to for Tables of Contents.
# Used for linktotoc elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/\$\$LWEB$/']
# service_types:: _optional_ array of strings that represent the service types desired.
# options are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image, primo_source
# defaults are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance
# if no options are specified, default service types will be added.
# ez_proxy:: _optional_ string or regexp of an ezproxy prefix.
# used in the case where an ezproxy prefix (on any other regexp) is hardcoded in the URL,
# and needs to be removed in order to match against SFXUrls.
# Example:
# !ruby/regexp '/https\:\/\/ezproxy\.library\.nyu\.edu\/login\?url=/'
# primo_config:: _optional_ string representing the primo yaml config file in config/
# default file name: primo.yml
# hash mappings from yaml config
# institutions:
# "primo_institution_code": "Primo Institution String"
# libraries:
# "primo_library_code": "Primo Library String"
# availability_statuses:
# "status1_code": "Status One"
# sources:
# data_source1:
# base_url: "http://source1.base.url
# type: source_type
# class_name: Source1Implementation (in exlibris/primo/sources or exlibris/primo/sources/local)
# source1_config_option1: source1_config_option1
# source1_config_option2: source1_config_option2
# data_source2:
# base_url: "http://source2.base.url
# type: source_type
# class_name: Source2Implementation (in exlibris/primo/sources or exlibris/primo/sources/local)
# source2_config_option1: source2_config_option1
# source2_config_option2: source2_config_option2
#
require 'exlibris-primo'
class PrimoService < Service
required_config_params :base_url, :vid, :institution
# For matching purposes.
attr_reader :title, :author
def self.default_config_file
"#{Rails.root}/config/primo.yml"
end
# Overwrites Service#new.
def initialize(config)
@holding_search_text = "Search for this title."
# Configure Primo
configure_primo
# Attributes for holding service data.
@holding_attributes = [:record_id, :original_id, :title, :author, :display_type,
:source_id, :original_source_id, :source_record_id, :ils_api_id, :institution_code,
:institution, :library_code, :library, :collection, :call_number, :coverage, :notes,
:subfields, :status_code, :status, :source_data]
@link_attributes = [:institution, :record_id, :original_id, :url, :display, :notes, :subfields]
# TODO: Run these decisions someone to see if they make sense.
@referent_enhancements = {
# Prefer SFX journal titles to Primo journal titles
:jtitle => { :overwrite => false },
:btitle => { :overwrite => true },
:aulast => { :overwrite => true },
:aufirst => { :overwrite => true },
:aucorp => { :overwrite => true },
:au => { :overwrite => true },
:pub => { :overwrite => true },
:place => { :value => :cop, :overwrite => false },
# Prefer SFX journal titles to Primo journal titles
:title => { :value => :jtitle, :overwrite => false},
:title => { :value => :btitle, :overwrite => true},
# Primo lccn and oclcid are spotty in Primo, so don't overwrite
:lccn => { :overwrite => false },
:oclcnum => { :value => :oclcid, :overwrite => false}
}
@suppress_urls = []
@suppress_tocs = []
@suppress_related_links = []
@suppress_holdings = []
@service_types = [ "fulltext", "holding", "holding_search",
"table_of_contents", "referent_enhance" ] if @service_types.nil?
backward_compatibility(config)
super(config)
# Handle the case where holding_search_institution is the same as institution.
@holding_search_institution = @institution if @service_types.include?("holding_search") and @holding_search_institution.nil?
end
# Overwrites Service#service_types_generated.
def service_types_generated
types = Array.new
@service_types.each do |type|
types.push(ServiceTypeValue[type.to_sym])
end
return types
end
# Overwrites Service#handle.
def handle(request)
# Get the possible search params
@identifier = request.referrer_id
@record_id = record_id(request)
@isbn = isbn(request)
@issn = issn(request)
@title = title(request)
@author = author(request)
@genre = genre(request)
# Setup the Primo search object
search = Exlibris::Primo::Search.new.base_url!(@base_url).institution!(@institution)
# Search if we have a:
# Primo record id OR
# ISBN OR
# ISSN OR
# Title and author and genre
if @record_id
search.record_id! @record_id
elsif @isbn
search.isbn_is @isbn
elsif @issn
search.isbn_is @issn
elsif @title and @author and @genre
search.title_is(@title).creator_is(@author).genre_is(@genre)
else # Don't do a search.
return request.dispatched(self, true)
end
begin
records = search.records
# Enhance the referent with metadata from Primo Searcher if Primo record id is present
# i.e. if we did our search with the Primo system number
if @record_id and @service_types.include?("referent_enhance")
# We'll take the first record, since there should only be one.
enhance_referent(request, records.first)
end
# Get cover image only if @record_id is defined
# TODO: make cover image service smarter and only
# include things that are actually URLs.
# if @record_id and @service_types.include?("cover_image")
# cover_image = primo_searcher.cover_image
# unless cover_image.nil?
# request.add_service_response(
# :service => self,
# :display_text => 'Cover Image',
# :key => 'medium',
# :url => cover_image,
# :size => 'medium',
# :service_type_value => :cover_image)
# end
# end
# Add holding services
if @service_types.include?("holding") or @service_types.include?("primo_source")
# Get holdings from the returned Primo records
holdings = records.collect{|record| record.holdings}.flatten
# Add the holding services
add_holding_services(request, holdings) unless holdings.empty?
# Provide title search functionality in the absence of available holdings.
# The logic below says only present the holdings search in the following case:
# We've configured to present holding search
# We didn't find any actual holdings
# We didn't come from Primo (prevent round trips since that would be weird)
# We have a title to search for.
if @service_types.include?("holding_search") and holdings.empty? and (not primo_identifier?) and (not @title.nil?)
# Add the holding search service
add_holding_search_service(request)
end
end
# Add fulltext services
if @service_types.include?("fulltext")
# Get fulltexts from the returned Primo records
fulltexts = records.collect{|record| record.fulltexts}.flatten
# Add the fulltext services
add_fulltext_services(request, fulltexts) unless fulltexts.empty?
end
# Add table of contents services
if @service_types.include?("table_of_contents")
# Get tables of contents from the returned Primo records
tables_of_contents = records.collect{|record| record.tables_of_contents}.flatten
# Add the table of contents services
add_table_of_contents_services(request, tables_of_contents) unless tables_of_contents.empty?
end
if @service_types.include?("highlighted_link")
# Get related links from the returned Primo records
highlighted_links = records.collect{|record| record.related_links}.flatten
add_highlighted_link_services(request, highlighted_links) unless highlighted_links.empty?
end
rescue Exception => e
# Log error and return finished
Rails.logger.error(
"Error in Exlibris::Primo::Search. "+
"Returning 0 Primo services for search #{search.inspect}. "+
"Exlibris::Primo::Search raised the following exception:\n#{e}\n#{e.backtrace.inspect}")
end
return request.dispatched(self, true)
end
# Called by ServiceType#view_data to provide custom functionality for Primo sources.
# For more information on Primo sources see PrimoSource.
def to_primo_source(service_response)
source_parameters = {}
@holding_attributes.each { |attr|
source_parameters[attr] = service_response.data_values[attr] }
return Exlibris::Primo::Holding.new(source_parameters).to_source
end
def default_config_file
self.class.default_config_file
end
# Return the Primo dlDisplay URL.
def deep_link_display_url(holding)
"#{@base_url}/primo_library/libweb/action/dlDisplay.do?docId=#{holding.record_id}&institution=#{@institution}&vid=#{@vid}"
end
protected :deep_link_display_url
# Return the Primo dlSearch URL.
def deep_link_search_url
@base_url+"/primo_library/libweb/action/dlSearch.do?institution=#{@holding_search_institution}&vid=#{@vid}&onCampus=false&query=#{CGI::escape("title,exact,"+@title)}&indx=1&bulkSize=10&group=GUEST"
end
protected :deep_link_search_url
# Configure Primo if this is the first time through
def configure_primo
Exlibris::Primo.configure { |primo_config|
primo_config.load_yaml config_file unless primo_config.load_time
} if File.exists?(config_file)
end
private :configure_primo
# Reset Primo configuration
# Only used in testing
def reset_primo_config
Exlibris::Primo.configure do |primo_config|
primo_config.load_time = nil
primo_config.libraries = {}
primo_config.availability_statuses = {}
primo_config.sources = {}
end
end
private :reset_primo_config
# Enhance the referent based on metadata in the given record
def enhance_referent(request, record)
@referent_enhancements.each do |key, options|
metadata_element = (options[:value].nil?) ? key : options[:value]
# Enhance the referent from the 'addata' section
metadata_method = "addata_#{metadata_element}".to_sym
# Get the metadata value if it's there
metadata_value = record.send(metadata_method) if record.respond_to? metadata_method
# Enhance the referent
request.referent.enhance_referent(key.to_s, metadata_value,
true, false, options) unless metadata_value.nil?
end
end
private :enhance_referent
# Add a holding service for each holding returned from Primo
def add_holding_services(request, holdings)
holdings.each do |holding|
next if @suppress_holdings.find {|suppress_holding| suppress_holding === holding.availlibrary}
service_data = {}
# Availability status from Primo is probably out of date, so set to "check_holdings"
holding.status_code = "check_holdings"
@holding_attributes.each do |attr|
service_data[attr] = holding.send(attr) if holding.respond_to?(attr)
end
# Only add one service type, either "primo_source" OR "holding", not both.
service_type = (@service_types.include?("primo_source")) ? "primo_source" : "holding"
# Umlaut specific attributes.
service_data[:match_reliability] =
(reliable_match?(:title => holding.title, :author => holding.author)) ?
ServiceResponse::MatchExact : ServiceResponse::MatchUnsure
service_data[:url] = deep_link_display_url(holding)
# Add some other holding information
service_data.merge!({
:collection_str => "#{holding.library} #{holding.collection}",
:coverage_str => holding.coverage.join("<br />"),
:coverage_str_array => holding.coverage }) if service_type.eql? "holding"
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => service_type))
end
end
private :add_holding_services
# Add a holding search service
def add_holding_search_service(request)
service_data = {}
service_data[:type] = "link_to_search"
service_data[:display_text] = @holding_search_text
service_data[:note] = ""
service_data[:url] = deep_link_search_url
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => 'holding_search'))
end
private :add_holding_search_service
# Add a full text service for each fulltext returned from Primo
def add_fulltext_services(request, fulltexts)
add_link_services(request, fulltexts, 'fulltext', @suppress_urls) { |fulltext|
# Don't add the URL if it matches our SFXUrl finder (unless fulltext is empty,
# [assuming something is better than nothing]), because
# that means we think this is an SFX controlled URL.
next if SfxUrl.sfx_controls_url?(handle_ezproxy(fulltext.url)) and
request.referent.metadata['genre'] != "book" and
!request.get_service_type("fulltext", { :refresh => true }).empty?
}
end
private :add_fulltext_services
# Add a table of contents service for each table of contents returned from Primo
def add_table_of_contents_services(request, tables_of_contents)
add_link_services(request, tables_of_contents, 'table_of_contents', @suppress_tocs)
end
private :add_table_of_contents_services
# Add a highlighted link service for each related link returned from Primo
def add_highlighted_link_services(request, highlight_links)
add_link_services(request, highlight_links, 'highlighted_link', @suppress_related_links)
end
private :add_highlighted_link_services
# Add a link service (specified by the given type) for each link returned from Primo
def add_link_services(request, links, service_type, suppress_links, &block)
links_seen = [] # for de-duplicating urls
links.each do |link|
next if links_seen.include?(link.url)
# Check the list of URLs to suppress, array of strings or regexps.
# If we have a match, suppress.
next if suppress_links.find {|suppress_link| suppress_link === link.url}
# No url? Forget it.
next if link.url.nil?
yield link unless block.nil?
links_seen.push(link.url)
service_data = {}
@link_attributes.each do |attr|
service_data[attr] = link.send(attr)
end
# Default display text to URL.
service_data[:display_text] = (service_data[:display].nil?) ? service_data[:url] : service_data[:display]
# Add the response
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => service_type))
end
end
private :add_link_services
# Map old config names to new config names for backwards compatibility
def backward_compatibility(config)
# For backward compatibility, re-map "old" config values to new more
# Umlaut-y names and print deprecation warning in the logs.
old_to_new_mappings = {
:base_path => :base_url,
:base_view_id => :vid,
:link_to_search_text => :holding_search_text
}
old_to_new_mappings.each do |old_param, new_param|
unless config["#{old_param}"].nil?
config["#{new_param}"] = config["#{old_param}"] if config["#{new_param}"].nil?
Rails.logger.warn("Parameter '#{old_param}' is deprecated. Please use '#{new_param}' instead.")
end
end # End backward compatibility maintenance
end
private :backward_compatibility
# Determine how sure we are that this is a match.
# Dynamically compares record metadata to input values
# based on the values passed in.
# Minimum requirement is to check title.
def reliable_match?(record_metadata)
return true unless (@record_id.nil? or @record_id.empty?)
return true unless (@issn.nil? or @issn.empty?) and (@isbn.nil? or @isbn.empty?)
return false if (record_metadata.nil? or record_metadata.empty? or record_metadata[:title].nil? or record_metadata[:title].empty?)
# Titles must be equal
return false unless record_metadata[:title].to_s.downcase.eql?(@title.downcase)
# Author must be equal
return false unless record_metadata[:author].to_s.downcase.eql?(@author.downcase)
return true
end
private :reliable_match?
def config_file
config_file = @primo_config.nil? ? default_config_file : "#{Rails.root}/config/"+ @primo_config
Rails.logger.info("Primo config file not found: #{config_file}.") and return "" unless File.exists?(config_file)
config_file
end
private :config_file
# If an ezproxy prefix (on any other regexp) is hardcoded in the URL,
# strip it out for matching against SFXUrls
def handle_ezproxy(str)
return str if @ez_proxy.nil?
return (str.gsub(@ez_proxy, '').nil? ? str : str.gsub(@ez_proxy, ''))
end
private :handle_ezproxy
def record_id(request)
# Let SFX handle primoArticles (is that even a thing anymore?)
return if @identifier.match(/primoArticle/) if primo_identifier?
@identifier.match(/primo-(.+)/)[1] if primo_identifier?
end
private :record_id
def isbn(request)
request.referent.metadata['isbn']
end
private :isbn
def issn(request)
# don't send mal-formed issn
request.referent.metadata['issn'] if request.referent.metadata['issn'] =~ /\d{4}(-)?\d{3}(\d|X)/
end
private :issn
def title(request)
(request.referent.metadata['jtitle'] || request.referent.metadata['btitle'] ||
request.referent.metadata['title'] || request.referent.metadata['atitle'])
end
private :title
def author(request)
(request.referent.metadata['au'] || request.referent.metadata['aulast'] ||
request.referent.metadata['aucorp'])
end
private :author
def genre(request)
request.referent.metadata['genre']
end
private :genre
def primo_identifier?
return false if @identifier.nil?
return @identifier.start_with?('info:sid/primo.exlibrisgroup.com')
end
private :primo_identifier?
end
Update logic for retrieving search using blank? instead of checking for nil. Can't get a test to fail though, so not sure if it's the actual problem.
# == Overview
# PrimoService is an Umlaut Service that makes a call to the Primo web services based on the requested context object.
# It first looks for rft.primo *DEPRECATED*, failing that, it parses the request referrer identifier for an id.
# If the Primo id is present, the service gets the PNX record from the Primo web
# services.
# If no Primo id is found, the service searches Primo by (in order of precedence):
# * ISBN
# * ISSN
# * Title, Author, Genre
#
# == Available Services
# Several service types are available in the PrimoService. The default service types are:
# fulltext, holding, holding_search, table_of_contents, referent_enhance, cover_image
# Available service types are listed below and can be configured using the service_types parameter
# in config/umlaut_services.yml:
# * fulltext - parsed from links/linktorsrc elements in the PNX record
# * holding - parsed from display/availlibrary elements in the PNX record
# * holding_search - link to an exact title search in Primo if no holdings found AND the OpenURL did not come from Primo
# * primo_source - similar to holdings but used in conjuction with the PrimoSource service to map Primo records to their original sources; a PrimoSource service must be defined in service.yml for this to work
# * table_of_contents - parsed from links/linktotoc elements in the PNX record
# * referent_enhance - metadata parsed from the addata section of the PNX record when the record was found by Primo id
# * highlighted_link - parsed from links/addlink elements in the PNX record
#
# ==Available Parameters
# Several configurations parameters are available to be set in config/umlaut_services.yml.
# Primo: # Name of your choice
# type: PrimoService # Required
# priority: 2 # Required. I suggest running after the SFX service so you get the SFX referent enhancements
# base_url: http://primo.library.edu # Required
# vid: VID # Required
# institution: INST # Required
# holding_search_institution: SEARCH_INST # Optional. Defaults to the institution above.
# holding_search_text: Search for this title in Primo. # Optional text for holding search. Defaults to "Search for this title."
# suppress_holdings: [ !ruby/regexp '/\$\$LWEB/', !ruby/regexp '/\$\$1Restricted Internet Resources/' ] # Optional
# ez_proxy: !ruby/regexp '/https\:\/\/ezproxy\.library\.edu\/login\?url=/' # Optional
# service_types: # Optional. Defaults to [ "fulltext", "holding", "holding_search", "table_of_contents", "referent_enhance" ]
# - holding
# - holding_search
# - fulltext
# - table_of_contents
# - referent_enhance
# - highlighted_link
#
# base_url:: _required_ host and port of Primo server; used for Primo web services, deep links and holding_search
# base_path:: *DEPRECATED* previous name of base_url
# vid:: _required_ view id for Primo deep links and holding_search.
# institution:: _required_ institution id for Primo institution; used for Primo web services
# base_view_id:: *DEPRECATED* previous name of vid
# holding_search_institution:: if service types include holding_search_ and the holding search institution is different from
# institution to be used for the holding_search
# holding_search_text:: _optional_ text to display for the holding_search
# default holding search text:: "Search for this title."
# link_to_search_text:: *DEPRECATED* previous name of holding_search_text
# service_types:: _optional_ array of strings that represent the service types desired.
# options are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image, primo_source
# defaults are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image
# if no options are specified, default service types will be added.
# suppress_urls:: _optional_ array of strings or regexps to NOT use from the catalog.
# Used for linktorsrc elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/sagepub.com$/']
# suppress_holdings:: _optional_ array of strings or regexps to NOT use from the catalog.
# Used for availlibrary elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/\$\$LWEB$/']
# suppress_tocs:: _optional_ array of strings or regexps to NOT link to for Tables of Contents.
# Used for linktotoc elements that may duplicate resources from in other services.
# Regexps can be put in the services.yml like this:
# [!ruby/regexp '/\$\$LWEB$/']
# service_types:: _optional_ array of strings that represent the service types desired.
# options are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance, cover_image, primo_source
# defaults are: fulltext, holding, holding_search, table_of_contents,
# referent_enhance
# if no options are specified, default service types will be added.
# ez_proxy:: _optional_ string or regexp of an ezproxy prefix.
# used in the case where an ezproxy prefix (on any other regexp) is hardcoded in the URL,
# and needs to be removed in order to match against SFXUrls.
# Example:
# !ruby/regexp '/https\:\/\/ezproxy\.library\.nyu\.edu\/login\?url=/'
# primo_config:: _optional_ string representing the primo yaml config file in config/
# default file name: primo.yml
# hash mappings from yaml config
# institutions:
# "primo_institution_code": "Primo Institution String"
# libraries:
# "primo_library_code": "Primo Library String"
# availability_statuses:
# "status1_code": "Status One"
# sources:
# data_source1:
# base_url: "http://source1.base.url
# type: source_type
# class_name: Source1Implementation (in exlibris/primo/sources or exlibris/primo/sources/local)
# source1_config_option1: source1_config_option1
# source1_config_option2: source1_config_option2
# data_source2:
# base_url: "http://source2.base.url
# type: source_type
# class_name: Source2Implementation (in exlibris/primo/sources or exlibris/primo/sources/local)
# source2_config_option1: source2_config_option1
# source2_config_option2: source2_config_option2
#
require 'exlibris-primo'
class PrimoService < Service
required_config_params :base_url, :vid, :institution
# For matching purposes.
attr_reader :title, :author
def self.default_config_file
"#{Rails.root}/config/primo.yml"
end
# Overwrites Service#new.
def initialize(config)
@holding_search_text = "Search for this title."
# Configure Primo
configure_primo
# Attributes for holding service data.
@holding_attributes = [:record_id, :original_id, :title, :author, :display_type,
:source_id, :original_source_id, :source_record_id, :ils_api_id, :institution_code,
:institution, :library_code, :library, :collection, :call_number, :coverage, :notes,
:subfields, :status_code, :status, :source_data]
@link_attributes = [:institution, :record_id, :original_id, :url, :display, :notes, :subfields]
# TODO: Run these decisions someone to see if they make sense.
@referent_enhancements = {
# Prefer SFX journal titles to Primo journal titles
:jtitle => { :overwrite => false },
:btitle => { :overwrite => true },
:aulast => { :overwrite => true },
:aufirst => { :overwrite => true },
:aucorp => { :overwrite => true },
:au => { :overwrite => true },
:pub => { :overwrite => true },
:place => { :value => :cop, :overwrite => false },
# Prefer SFX journal titles to Primo journal titles
:title => { :value => :jtitle, :overwrite => false},
:title => { :value => :btitle, :overwrite => true},
# Primo lccn and oclcid are spotty in Primo, so don't overwrite
:lccn => { :overwrite => false },
:oclcnum => { :value => :oclcid, :overwrite => false}
}
@suppress_urls = []
@suppress_tocs = []
@suppress_related_links = []
@suppress_holdings = []
@service_types = [ "fulltext", "holding", "holding_search",
"table_of_contents", "referent_enhance" ] if @service_types.nil?
backward_compatibility(config)
super(config)
# Handle the case where holding_search_institution is the same as institution.
@holding_search_institution = @institution if @service_types.include?("holding_search") and @holding_search_institution.nil?
end
# Overwrites Service#service_types_generated.
def service_types_generated
types = Array.new
@service_types.each do |type|
types.push(ServiceTypeValue[type.to_sym])
end
return types
end
# Overwrites Service#handle.
def handle(request)
# Get the possible search params
@identifier = request.referrer_id
@record_id = record_id(request)
@isbn = isbn(request)
@issn = issn(request)
@title = title(request)
@author = author(request)
@genre = genre(request)
# Setup the Primo search object
search = Exlibris::Primo::Search.new.base_url!(@base_url).institution!(@institution)
# Search if we have a:
# Primo record id OR
# ISBN OR
# ISSN OR
# Title and author and genre
if((not @record_id.blank?))
search.record_id! @record_id
elsif((not @isbn.blank?))
search.isbn_is @isbn
elsif((not @issn.blank?))
search.isbn_is @issn
elsif((not @title.blank?) and (not @author.blank?) and (not @genre.blank?))
search.title_is(@title).creator_is(@author).genre_is(@genre)
else # Don't do a search.
return request.dispatched(self, true)
end
begin
records = search.records
# Enhance the referent with metadata from Primo Searcher if Primo record id is present
# i.e. if we did our search with the Primo system number
if @record_id and @service_types.include?("referent_enhance")
# We'll take the first record, since there should only be one.
enhance_referent(request, records.first)
end
# Get cover image only if @record_id is defined
# TODO: make cover image service smarter and only
# include things that are actually URLs.
# if @record_id and @service_types.include?("cover_image")
# cover_image = primo_searcher.cover_image
# unless cover_image.nil?
# request.add_service_response(
# :service => self,
# :display_text => 'Cover Image',
# :key => 'medium',
# :url => cover_image,
# :size => 'medium',
# :service_type_value => :cover_image)
# end
# end
# Add holding services
if @service_types.include?("holding") or @service_types.include?("primo_source")
# Get holdings from the returned Primo records
holdings = records.collect{|record| record.holdings}.flatten
# Add the holding services
add_holding_services(request, holdings) unless holdings.empty?
# Provide title search functionality in the absence of available holdings.
# The logic below says only present the holdings search in the following case:
# We've configured to present holding search
# We didn't find any actual holdings
# We didn't come from Primo (prevent round trips since that would be weird)
# We have a title to search for.
if @service_types.include?("holding_search") and holdings.empty? and (not primo_identifier?) and (not @title.nil?)
# Add the holding search service
add_holding_search_service(request)
end
end
# Add fulltext services
if @service_types.include?("fulltext")
# Get fulltexts from the returned Primo records
fulltexts = records.collect{|record| record.fulltexts}.flatten
# Add the fulltext services
add_fulltext_services(request, fulltexts) unless fulltexts.empty?
end
# Add table of contents services
if @service_types.include?("table_of_contents")
# Get tables of contents from the returned Primo records
tables_of_contents = records.collect{|record| record.tables_of_contents}.flatten
# Add the table of contents services
add_table_of_contents_services(request, tables_of_contents) unless tables_of_contents.empty?
end
if @service_types.include?("highlighted_link")
# Get related links from the returned Primo records
highlighted_links = records.collect{|record| record.related_links}.flatten
add_highlighted_link_services(request, highlighted_links) unless highlighted_links.empty?
end
rescue Exception => e
# Log error and return finished
Rails.logger.error(
"Error in Exlibris::Primo::Search. "+
"Returning 0 Primo services for search #{search.inspect}. "+
"Exlibris::Primo::Search raised the following exception:\n#{e}\n#{e.backtrace.inspect}")
end
return request.dispatched(self, true)
end
# Called by ServiceType#view_data to provide custom functionality for Primo sources.
# For more information on Primo sources see PrimoSource.
def to_primo_source(service_response)
source_parameters = {}
@holding_attributes.each { |attr|
source_parameters[attr] = service_response.data_values[attr] }
return Exlibris::Primo::Holding.new(source_parameters).to_source
end
def default_config_file
self.class.default_config_file
end
# Return the Primo dlDisplay URL.
def deep_link_display_url(holding)
"#{@base_url}/primo_library/libweb/action/dlDisplay.do?docId=#{holding.record_id}&institution=#{@institution}&vid=#{@vid}"
end
protected :deep_link_display_url
# Return the Primo dlSearch URL.
def deep_link_search_url
@base_url+"/primo_library/libweb/action/dlSearch.do?institution=#{@holding_search_institution}&vid=#{@vid}&onCampus=false&query=#{CGI::escape("title,exact,"+@title)}&indx=1&bulkSize=10&group=GUEST"
end
protected :deep_link_search_url
# Configure Primo if this is the first time through
def configure_primo
Exlibris::Primo.configure { |primo_config|
primo_config.load_yaml config_file unless primo_config.load_time
} if File.exists?(config_file)
end
private :configure_primo
# Reset Primo configuration
# Only used in testing
def reset_primo_config
Exlibris::Primo.configure do |primo_config|
primo_config.load_time = nil
primo_config.libraries = {}
primo_config.availability_statuses = {}
primo_config.sources = {}
end
end
private :reset_primo_config
# Enhance the referent based on metadata in the given record
def enhance_referent(request, record)
@referent_enhancements.each do |key, options|
metadata_element = (options[:value].nil?) ? key : options[:value]
# Enhance the referent from the 'addata' section
metadata_method = "addata_#{metadata_element}".to_sym
# Get the metadata value if it's there
metadata_value = record.send(metadata_method) if record.respond_to? metadata_method
# Enhance the referent
request.referent.enhance_referent(key.to_s, metadata_value,
true, false, options) unless metadata_value.nil?
end
end
private :enhance_referent
# Add a holding service for each holding returned from Primo
def add_holding_services(request, holdings)
holdings.each do |holding|
next if @suppress_holdings.find {|suppress_holding| suppress_holding === holding.availlibrary}
service_data = {}
# Availability status from Primo is probably out of date, so set to "check_holdings"
holding.status_code = "check_holdings"
@holding_attributes.each do |attr|
service_data[attr] = holding.send(attr) if holding.respond_to?(attr)
end
# Only add one service type, either "primo_source" OR "holding", not both.
service_type = (@service_types.include?("primo_source")) ? "primo_source" : "holding"
# Umlaut specific attributes.
service_data[:match_reliability] =
(reliable_match?(:title => holding.title, :author => holding.author)) ?
ServiceResponse::MatchExact : ServiceResponse::MatchUnsure
service_data[:url] = deep_link_display_url(holding)
# Add some other holding information
service_data.merge!({
:collection_str => "#{holding.library} #{holding.collection}",
:coverage_str => holding.coverage.join("<br />"),
:coverage_str_array => holding.coverage }) if service_type.eql? "holding"
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => service_type))
end
end
private :add_holding_services
# Add a holding search service
def add_holding_search_service(request)
service_data = {}
service_data[:type] = "link_to_search"
service_data[:display_text] = @holding_search_text
service_data[:note] = ""
service_data[:url] = deep_link_search_url
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => 'holding_search'))
end
private :add_holding_search_service
# Add a full text service for each fulltext returned from Primo
def add_fulltext_services(request, fulltexts)
add_link_services(request, fulltexts, 'fulltext', @suppress_urls) { |fulltext|
# Don't add the URL if it matches our SFXUrl finder (unless fulltext is empty,
# [assuming something is better than nothing]), because
# that means we think this is an SFX controlled URL.
next if SfxUrl.sfx_controls_url?(handle_ezproxy(fulltext.url)) and
request.referent.metadata['genre'] != "book" and
!request.get_service_type("fulltext", { :refresh => true }).empty?
}
end
private :add_fulltext_services
# Add a table of contents service for each table of contents returned from Primo
def add_table_of_contents_services(request, tables_of_contents)
add_link_services(request, tables_of_contents, 'table_of_contents', @suppress_tocs)
end
private :add_table_of_contents_services
# Add a highlighted link service for each related link returned from Primo
def add_highlighted_link_services(request, highlight_links)
add_link_services(request, highlight_links, 'highlighted_link', @suppress_related_links)
end
private :add_highlighted_link_services
# Add a link service (specified by the given type) for each link returned from Primo
def add_link_services(request, links, service_type, suppress_links, &block)
links_seen = [] # for de-duplicating urls
links.each do |link|
next if links_seen.include?(link.url)
# Check the list of URLs to suppress, array of strings or regexps.
# If we have a match, suppress.
next if suppress_links.find {|suppress_link| suppress_link === link.url}
# No url? Forget it.
next if link.url.nil?
yield link unless block.nil?
links_seen.push(link.url)
service_data = {}
@link_attributes.each do |attr|
service_data[attr] = link.send(attr)
end
# Default display text to URL.
service_data[:display_text] = (service_data[:display].nil?) ? service_data[:url] : service_data[:display]
# Add the response
request.add_service_response(
service_data.merge(
:service => self,
:service_type_value => service_type))
end
end
private :add_link_services
# Map old config names to new config names for backwards compatibility
def backward_compatibility(config)
# For backward compatibility, re-map "old" config values to new more
# Umlaut-y names and print deprecation warning in the logs.
old_to_new_mappings = {
:base_path => :base_url,
:base_view_id => :vid,
:link_to_search_text => :holding_search_text
}
old_to_new_mappings.each do |old_param, new_param|
unless config["#{old_param}"].nil?
config["#{new_param}"] = config["#{old_param}"] if config["#{new_param}"].nil?
Rails.logger.warn("Parameter '#{old_param}' is deprecated. Please use '#{new_param}' instead.")
end
end # End backward compatibility maintenance
end
private :backward_compatibility
# Determine how sure we are that this is a match.
# Dynamically compares record metadata to input values
# based on the values passed in.
# Minimum requirement is to check title.
def reliable_match?(record_metadata)
return true unless (@record_id.nil? or @record_id.empty?)
return true unless (@issn.nil? or @issn.empty?) and (@isbn.nil? or @isbn.empty?)
return false if (record_metadata.nil? or record_metadata.empty? or record_metadata[:title].nil? or record_metadata[:title].empty?)
# Titles must be equal
return false unless record_metadata[:title].to_s.downcase.eql?(@title.downcase)
# Author must be equal
return false unless record_metadata[:author].to_s.downcase.eql?(@author.downcase)
return true
end
private :reliable_match?
def config_file
config_file = @primo_config.nil? ? default_config_file : "#{Rails.root}/config/"+ @primo_config
Rails.logger.info("Primo config file not found: #{config_file}.") and return "" unless File.exists?(config_file)
config_file
end
private :config_file
# If an ezproxy prefix (on any other regexp) is hardcoded in the URL,
# strip it out for matching against SFXUrls
def handle_ezproxy(str)
return str if @ez_proxy.nil?
return (str.gsub(@ez_proxy, '').nil? ? str : str.gsub(@ez_proxy, ''))
end
private :handle_ezproxy
def record_id(request)
# Let SFX handle primoArticles (is that even a thing anymore?)
return if @identifier.match(/primoArticle/) if primo_identifier?
@identifier.match(/primo-(.+)/)[1] if primo_identifier?
end
private :record_id
def isbn(request)
request.referent.metadata['isbn']
end
private :isbn
def issn(request)
# don't send mal-formed issn
request.referent.metadata['issn'] if request.referent.metadata['issn'] =~ /\d{4}(-)?\d{3}(\d|X)/
end
private :issn
def title(request)
(request.referent.metadata['jtitle'] || request.referent.metadata['btitle'] ||
request.referent.metadata['title'] || request.referent.metadata['atitle'])
end
private :title
def author(request)
(request.referent.metadata['au'] || request.referent.metadata['aulast'] ||
request.referent.metadata['aucorp'])
end
private :author
def genre(request)
request.referent.metadata['genre']
end
private :genre
def primo_identifier?
return false if @identifier.nil?
return @identifier.start_with?('info:sid/primo.exlibrisgroup.com')
end
private :primo_identifier?
end |
module Net::TNS
# This module includes common string helper methods for monkey-patching
# or mixing-in to string objects.
module StringHelpers
HEXCHARS = [("0".."9").to_a, ("a".."f").to_a].flatten
#From the Ruby Black Bag (http://github.com/emonti/rbkb/)
# Convert a string to ASCII hex string. Supports a few options for format:
#
# :delim - delimter between each hex byte
# :prefix - prefix before each hex byte
# :suffix - suffix after each hex byte
#
def tns_hexify(opts={})
delim = opts[:delim]
pre = (opts[:prefix] || "")
suf = (opts[:suffix] || "")
if (rx=opts[:rx]) and not rx.kind_of? Regexp
raise ArgumentError.new("rx must be a regular expression")
end
out=Array.new
self.each_byte do |c|
hc = if (rx and not rx.match c.chr)
c.chr
else
pre + (HEXCHARS[(c >> 4)] + HEXCHARS[(c & 0xf )]) + suf
end
out << (hc)
end
out.join(delim)
end
# Convert ASCII hex string to raw.
#
# Parameters:
#
# d = optional 'delimiter' between hex bytes (zero+ spaces by default)
def tns_unhexify(d=/\s*/)
self.strip.gsub(/([A-Fa-f0-9]{1,2})#{d}?/) { $1.hex.chr }
end
end
end
class String
include Net::TNS::StringHelpers
end
Simplify tns_hexify
This has a bunch of unused functionality/unneeded complexity.
module Net::TNS
# This module includes common string helper methods for monkey-patching
# or mixing-in to string objects.
module StringHelpers
HEXCHARS = [("0".."9").to_a, ("a".."f").to_a].flatten
# Adapted from the Ruby Black Bag (http://github.com/emonti/rbkb/)
# Convert a string to ASCII hex string
def tns_hexify
self.each_byte.map do |byte|
(HEXCHARS[(byte >> 4)] + HEXCHARS[(byte & 0xf )])
end.join()
end
# Convert ASCII hex string to raw.
#
# Parameters:
#
# d = optional 'delimiter' between hex bytes (zero+ spaces by default)
def tns_unhexify(d=/\s*/)
self.strip.gsub(/([A-Fa-f0-9]{1,2})#{d}?/) { $1.hex.chr }
end
end
end
class String
include Net::TNS::StringHelpers
end
|
require_relative './test_helper'
require 'minitest/autorun'
require 'aws4_signer/signature'
require 'uri'
require 'digest/sha2'
describe Aws4Signer::Signature do
let(:uri) { URI('https://example.org/foo/bar?baz=blah') }
let(:verb) { 'PUT' }
let(:headers) do
{'x-foo' => 'bar'}
end
let(:body) { 'hello' }
let(:options) { {} }
let(:signature) do
Aws4Signer::Signature.new(
'AKID',
'SECRET',
'xx-region-1',
'svc',
verb,
uri,
headers,
body,
**options
)
end
describe "headers" do
describe "without x-amz-date" do
it "assigns" do
assert signature.headers['x-amz-date'].is_a?(String)
end
end
describe "with x-amz-date" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "doesn't assign" do
assert signature.headers['x-amz-date'].is_a?(String)
assert_equal Time.utc(2014,02,22,07,06,05), signature.date
end
end
describe "without host" do
it "assigns" do
assert_equal 'example.org', signature.headers['Host']
end
end
describe "with host" do
before do
headers['host'] = 'example.com'
end
it "doesn't assign" do
assert_equal 'example.com', signature.headers['Host']
end
end
describe "with security token" do
let(:options) { {security_token: 'session-token'} }
it "assigns x-amz-security-token" do
assert_equal 'session-token', signature.headers['x-amz-security-token']
end
end
end
describe "#attach_to_http_request" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "assigns headers" do
headers = {}
class << headers
def body
'hello'
end
end
signature.attach_to_http_request(headers)
assert_equal 'example.org', headers['host']
assert_equal '20140222T070605Z', headers['x-amz-date']
assert_equal 'bar', headers['x-foo']
assert_equal signature.authorization_header, headers['authorization']
assert_equal Digest::SHA2.hexdigest('hello', 256), headers['x-amz-content-sha256']
end
end
describe "#authorization_header" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns" do
assert_equal 'AWS4-HMAC-SHA256 '\
'Credential=AKID/20140222/xx-region-1/svc/aws4_request,' \
'SignedHeaders=host;x-amz-date;x-foo,' \
'Signature=2845eebf2510f52010a9d9e228d4b60d4dd33fb7e9f349fb21bd6a533bfc37b6',
signature.authorization_header
end
end
describe "#signature" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "return the sign" do
assert_equal '2845eebf2510f52010a9d9e228d4b60d4dd33fb7e9f349fb21bd6a533bfc37b6', signature.signature
end
end
describe "#canonical_headers,signed_headers" do
let(:headers) do
{
'x-test-b' => '2',
'X-Test-A' => '1',
'x-test-c' => '3',
'Authorization' => 'skip',
}
end
it "ends with return" do
assert_equal "\n", signature.canonical_headers[-1]
end
it "contains headers" do
assert signature.canonical_headers.lines.include?("x-test-b:2\n")
assert signature.canonical_headers.lines.include?("x-test-a:1\n") # downcase
assert signature.canonical_headers.lines.include?("x-test-c:3\n")
assert !signature.canonical_headers.lines.include?("Authorization:skip\n")
%w(x-test-a x-test-b x-test-c).each do |name|
assert signature.signed_headers.split(/;/).include?(name)
end
end
it "sorts headers" do
assert %w(host x-amz-date x-test-a x-test-b x-test-c),
signature.canonical_headers.lines.map { |_| _.split(/:/,2).first }
assert_equal 'host;x-amz-date;x-test-a;x-test-b;x-test-c', signature.signed_headers
end
end
describe "#hashed_payload" do
let(:body) { 'body' }
it "returns hashed payloed" do
assert_equal Digest::SHA2.hexdigest('body', 256), signature.hashed_payload
end
end
describe "#canonical_request" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns canonical request" do
canonical_request = signature.canonical_request
assert_equal <<-EXPECTED.chomp, canonical_request
PUT
/foo/bar
baz=blah
host:example.org
x-amz-date:20140222T070605Z
x-foo:bar
host;x-amz-date;x-foo
#{Digest::SHA2.hexdigest('hello', 256)}
EXPECTED
end
end
describe "#scope" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns scope" do
assert_equal '20140222/xx-region-1/svc/aws4_request', signature.scope
end
end
describe "#string_to_sign" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns string to sign" do
assert_equal <<-EXPECTED.chomp, signature.string_to_sign
AWS4-HMAC-SHA256
20140222T070605Z
20140222/xx-region-1/svc/aws4_request
#{Digest::SHA2.hexdigest(signature.canonical_request, 256)}
EXPECTED
end
end
end
add test for #1
require_relative './test_helper'
require 'minitest/autorun'
require 'aws4_signer/signature'
require 'uri'
require 'digest/sha2'
describe Aws4Signer::Signature do
let(:uri) { URI('https://example.org/foo/bar?baz=blah') }
let(:verb) { 'PUT' }
let(:headers) do
{'x-foo' => 'bar'}
end
let(:body) { 'hello' }
let(:options) { {} }
let(:signature) do
Aws4Signer::Signature.new(
'AKID',
'SECRET',
'xx-region-1',
'svc',
verb,
uri,
headers,
body,
**options
)
end
describe "headers" do
describe "without x-amz-date" do
it "assigns" do
assert signature.headers['x-amz-date'].is_a?(String)
end
end
describe "with x-amz-date" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "doesn't assign" do
assert signature.headers['x-amz-date'].is_a?(String)
assert_equal Time.utc(2014,02,22,07,06,05), signature.date
end
end
describe "without host" do
it "assigns" do
assert_equal 'example.org', signature.headers['Host']
end
end
describe "with host" do
before do
headers['host'] = 'example.com'
end
it "doesn't assign" do
assert_equal 'example.com', signature.headers['Host']
end
end
describe "with security token" do
let(:options) { {security_token: 'session-token'} }
it "assigns x-amz-security-token" do
assert_equal 'session-token', signature.headers['x-amz-security-token']
end
end
end
describe "#attach_to_http_request" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "assigns headers" do
headers = {}
class << headers
def body
'hello'
end
end
signature.attach_to_http_request(headers)
assert_equal 'example.org', headers['host']
assert_equal '20140222T070605Z', headers['x-amz-date']
assert_equal 'bar', headers['x-foo']
assert_equal signature.authorization_header, headers['authorization']
assert_equal Digest::SHA2.hexdigest('hello', 256), headers['x-amz-content-sha256']
end
end
describe "#authorization_header" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns" do
assert_equal 'AWS4-HMAC-SHA256 '\
'Credential=AKID/20140222/xx-region-1/svc/aws4_request,' \
'SignedHeaders=host;x-amz-date;x-foo,' \
'Signature=2845eebf2510f52010a9d9e228d4b60d4dd33fb7e9f349fb21bd6a533bfc37b6',
signature.authorization_header
end
end
describe "#signature" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "return the sign" do
assert_equal '2845eebf2510f52010a9d9e228d4b60d4dd33fb7e9f349fb21bd6a533bfc37b6', signature.signature
end
end
describe "#canonical_headers,signed_headers" do
let(:headers) do
{
'x-test-b' => '2',
'X-Test-A' => '1',
'x-test-c' => '3',
'Authorization' => 'skip',
}
end
it "ends with return" do
assert_equal "\n", signature.canonical_headers[-1]
end
it "contains headers" do
assert signature.canonical_headers.lines.include?("x-test-b:2\n")
assert signature.canonical_headers.lines.include?("x-test-a:1\n") # downcase
assert signature.canonical_headers.lines.include?("x-test-c:3\n")
assert !signature.canonical_headers.lines.include?("Authorization:skip\n")
%w(x-test-a x-test-b x-test-c).each do |name|
assert signature.signed_headers.split(/;/).include?(name)
end
end
it "sorts headers" do
assert %w(host x-amz-date x-test-a x-test-b x-test-c),
signature.canonical_headers.lines.map { |_| _.split(/:/,2).first }
assert_equal 'host;x-amz-date;x-test-a;x-test-b;x-test-c', signature.signed_headers
end
end
describe "#hashed_payload" do
let(:body) { 'body' }
it "returns hashed payloed" do
assert_equal Digest::SHA2.hexdigest('body', 256), signature.hashed_payload
end
end
describe "#canonical_request" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns canonical request" do
canonical_request = signature.canonical_request
assert_equal <<-EXPECTED.chomp, canonical_request
PUT
/foo/bar
baz=blah
host:example.org
x-amz-date:20140222T070605Z
x-foo:bar
host;x-amz-date;x-foo
#{Digest::SHA2.hexdigest('hello', 256)}
EXPECTED
end
describe "when request has multiple URL query parameters" do
let(:uri) { URI('https://example.org/foo/bar?b=blah&a=blah') }
it "sorts URL query parameter" do
canonical_request = signature.canonical_request
assert_equal <<-EXPECTED.chomp, canonical_request
PUT
/foo/bar
a=blah&b=blah
host:example.org
x-amz-date:20140222T070605Z
x-foo:bar
host;x-amz-date;x-foo
#{Digest::SHA2.hexdigest('hello', 256)}
EXPECTED
end
end
end
describe "#scope" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns scope" do
assert_equal '20140222/xx-region-1/svc/aws4_request', signature.scope
end
end
describe "#string_to_sign" do
before do
headers['x-amz-date'] = '20140222T070605Z'
end
it "returns string to sign" do
assert_equal <<-EXPECTED.chomp, signature.string_to_sign
AWS4-HMAC-SHA256
20140222T070605Z
20140222/xx-region-1/svc/aws4_request
#{Digest::SHA2.hexdigest(signature.canonical_request, 256)}
EXPECTED
end
end
end
|
#
# Copyright 2012 Stefano Tortarolo
# Copyright 2013 Fabio Rapposelli and Timo Sugliani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "log4r"
require "vagrant/util/busy"
require "vagrant/util/platform"
require "vagrant/util/retryable"
require "vagrant/util/subprocess"
module VagrantPlugins
module VCloud
module Driver
class UnauthorizedAccess < StandardError; end
class WrongAPIVersion < StandardError; end
class WrongItemIDError < StandardError; end
class InvalidStateError < StandardError; end
class InternalServerError < StandardError; end
class UnhandledError < StandardError; end
# Main class to access vCloud rest APIs
class Base
include Vagrant::Util::Retryable
def initialize
@logger = Log4r::Logger.new("vagrant::provider::vcloud::base")
end
##
# Authenticate against the specified server
def login
end
##
# Destroy the current session
def logout
end
##
# Fetch existing organizations and their IDs
def get_organizations
end
##
# friendly helper method to fetch an Organization Id by name
# - name (this isn't case sensitive)
def get_organization_id_by_name(name)
end
##
# friendly helper method to fetch an Organization by name
# - name (this isn't case sensitive)
def get_organization_by_name(name)
end
##
# Fetch details about an organization:
# - catalogs
# - vdcs
# - networks
def get_organization(orgId)
end
##
# Fetch details about a given catalog
def get_catalog(catalogId)
end
##
# Friendly helper method to fetch an catalog id by name
# - organization hash (from get_organization/get_organization_by_name)
# - catalog name
def get_catalog_id_by_name(organization, catalogName)
end
##
# Friendly helper method to fetch an catalog by name
# - organization hash (from get_organization/get_organization_by_name)
# - catalog name
def get_catalog_by_name(organization, catalogName)
end
##
# Fetch details about a given vdc:
# - description
# - vapps
# - networks
def get_vdc(vdcId)
end
##
# Friendly helper method to fetch a Organization VDC Id by name
# - Organization object
# - Organization VDC Name
def get_vdc_id_by_name(organization, vdcName)
end
##
# Friendly helper method to fetch a Organization VDC by name
# - Organization object
# - Organization VDC Name
def get_vdc_by_name(organization, vdcName)
end
##
# Fetch details about a given catalog item:
# - description
# - vApp templates
def get_catalog_item(catalogItemId)
end
##
# friendly helper method to fetch an catalogItem by name
# - catalogId (use get_catalog_name(org, name))
# - catalagItemName
def get_catalog_item_by_name(catalogId, catalogItemName)
end
##
# Fetch details about a given vapp:
# - name
# - description
# - status
# - IP
# - Children VMs:
# -- IP addresses
# -- status
# -- ID
def get_vapp(vAppId)
end
##
# Delete a given vapp
# NOTE: It doesn't verify that the vapp is shutdown
def delete_vapp(vAppId)
end
##
# Suspend a given vapp
def suspend_vapp(vAppId)
end
##
# reboot a given vapp
# This will basically initial a guest OS reboot, and will only work if
# VMware-tools are installed on the underlying VMs.
# vShield Edge devices are not affected
def reboot_vapp(vAppId)
end
##
# reset a given vapp
# This will basically reset the VMs within the vApp
# vShield Edge devices are not affected.
def reset_vapp(vAppId)
end
##
# Boot a given vapp
def poweron_vapp(vAppId)
end
##
# Create a vapp starting from a template
#
# Params:
# - vdc: the associated VDC
# - vapp_name: name of the target vapp
# - vapp_description: description of the target vapp
# - vapp_templateid: ID of the vapp template
def create_vapp_from_template(vdc, vapp_name, vapp_description, vapp_templateid, poweron=false)
end
##
# Compose a vapp using existing virtual machines
#
# Params:
# - vdc: the associated VDC
# - vapp_name: name of the target vapp
# - vapp_description: description of the target vapp
# - vm_list: hash with IDs of the VMs to be used in the composing process
# - network_config: hash of the network configuration for the vapp
def compose_vapp_from_vm(vdc, vapp_name, vapp_description, vm_list={}, network_config={})
end
# Fetch details about a given vapp template:
# - name
# - description
# - Children VMs:
# -- ID
def get_vapp_template(vAppId)
end
##
# Set vApp port forwarding rules
#
# - vappid: id of the vapp to be modified
# - network_name: name of the vapp network to be modified
# - config: hash with network configuration specifications, must contain an array inside :nat_rules with the nat rules to be applied.
def set_vapp_port_forwarding_rules(vappid, network_name, config={})
end
##
# Get vApp port forwarding rules
#
# - vappid: id of the vApp
def get_vapp_port_forwarding_rules(vAppId)
end
##
# get vApp edge public IP from the vApp ID
# Only works when:
# - vApp needs to be poweredOn
# - FenceMode is set to "natRouted"
# - NatType" is set to "portForwarding
# This will be required to know how to connect to VMs behind the Edge device.
def get_vapp_edge_public_ip(vAppId)
end
##
# Upload an OVF package
# - vdcId
# - vappName
# - vappDescription
# - ovfFile
# - catalogId
# - uploadOptions {}
def upload_ovf(vdcId, vappName, vappDescription, ovfFile, catalogId, uploadOptions={})
end
##
# Fetch information for a given task
def get_task(taskid)
end
##
# Poll a given task until completion
def wait_task_completion(taskid)
end
##
# Set vApp Network Config
def set_vapp_network_config(vappid, network_name, config={})
end
##
# Set VM Network Config
def set_vm_network_config(vmid, network_name, config={})
end
##
# Set VM Guest Customization Config
def set_vm_guest_customization(vmid, computer_name, config={})
end
##
# Fetch details about a given VM
def get_vm(vmId)
end
private
##
# Sends a synchronous request to the vCloud API and returns the response as parsed XML + headers using HTTPClient.
def send_request(params, payload=nil, content_type=nil)
# Create a new HTTP client
clnt = HTTPClient.new
# Disable SSL cert verification
clnt.ssl_config.verify_mode=(OpenSSL::SSL::VERIFY_NONE)
# Suppress SSL depth message
clnt.ssl_config.verify_callback=proc{ |ok, ctx|; true };
extheader = {}
extheader["accept"] = "application/*+xml;version=#{@api_version}"
if !content_type.nil?
extheader['Content-Type'] = content_type
end
if @auth_key
extheader['x-vcloud-authorization'] = @auth_key
else
clnt.set_auth(nil, "#{@username}@#{@org_name}", @password)
end
url = "#{@api_url}#{params['command']}"
begin
response = clnt.request(params['method'], url, nil, payload, extheader)
if !response.ok?
raise "Warning: unattended code #{response.status} #{response.reason}"
end
[Nokogiri.parse(response.body), response.headers]
rescue SocketError
raise "Impossible to connect, verify endpoint"
rescue Errno::EADDRNOTAVAIL
raise "Impossible to connect, verify endpoint"
end
end
##
# Upload a large file in configurable chunks, output an optional progressbar
def upload_file(uploadURL, uploadFile, vAppTemplate, config={})
# Set chunksize to 10M if not specified otherwise
chunkSize = (config[:chunksize] || 10485760)
# Set progress bar to default format if not specified otherwise
progressBarFormat = (config[:progressbar_format] || "%t Progress: %p%% %e")
# Set progress bar length to 120 if not specified otherwise
progressBarLength = (config[:progressbar_length] || 80)
# Open our file for upload
uploadFileHandle = File.new(uploadFile, "rb" )
fileName = File.basename(uploadFileHandle)
progressBarTitle = "Uploading: " + fileName.to_s
# Create a progressbar object if progress bar is enabled
if config[:progressbar_enable] == true && uploadFileHandle.size.to_i > chunkSize
progressbar = ProgressBar.create(
:title => progressBarTitle,
:starting_at => 0,
:total => uploadFileHandle.size.to_i,
##:length => progressBarLength,
:format => progressBarFormat
)
else
puts progressBarTitle
end
# Create a new HTTP client
clnt = HTTPClient.new
# Disable SSL cert verification
clnt.ssl_config.verify_mode=(OpenSSL::SSL::VERIFY_NONE)
# Suppress SSL depth message
clnt.ssl_config.verify_callback=proc{ |ok, ctx|; true };
# Perform ranged upload until the file reaches its end
until uploadFileHandle.eof?
# Create ranges for this chunk upload
rangeStart = uploadFileHandle.pos
rangeStop = uploadFileHandle.pos.to_i + chunkSize
# Read current chunk
fileContent = uploadFileHandle.read(chunkSize)
# If statement to handle last chunk transfer if is > than filesize
if rangeStop.to_i > uploadFileHandle.size.to_i
contentRange = "bytes #{rangeStart.to_s}-#{uploadFileHandle.size.to_s}/#{uploadFileHandle.size.to_s}"
rangeLen = uploadFileHandle.size.to_i - rangeStart.to_i
else
contentRange = "bytes #{rangeStart.to_s}-#{rangeStop.to_s}/#{uploadFileHandle.size.to_s}"
rangeLen = rangeStop.to_i - rangeStart.to_i
end
# Build headers
extheader = {
'x-vcloud-authorization' => @auth_key,
'Content-Range' => contentRange,
'Content-Length' => rangeLen.to_s
}
begin
uploadRequest = "#{@host_url}#{uploadURL}"
connection = clnt.request('PUT', uploadRequest, nil, fileContent, extheader)
if config[:progressbar_enable] == true && uploadFileHandle.size.to_i > chunkSize
params = {
'method' => :get,
'command' => "/vAppTemplate/vappTemplate-#{vAppTemplate}"
}
response, headers = send_request(params)
response.css("Files File [name='#{fileName}']").each do |file|
progressbar.progress=file[:bytesTransferred].to_i
end
end
rescue # FIXME: HUGE FIXME!!!! DO SOMETHING WITH THIS, IT'S JUST STUPID AS IT IS NOW!!!
retryTime = (config[:retry_time] || 5)
puts "Range #{contentRange} failed to upload, retrying the chunk in #{retryTime.to_s} seconds, to stop the action press CTRL+C."
sleep retryTime.to_i
retry
end
end
uploadFileHandle.close
end
##
# Convert vApp status codes into human readable description
def convert_vapp_status(status_code)
case status_code.to_i
when 0
'suspended'
when 3
'paused'
when 4
'running'
when 8
'stopped'
when 10
'mixed'
else
"Unknown #{status_code}"
end
end
end # class
end
end
end
modified base.rb to allow extensive debug logs through awesome_print
#
# Copyright 2012 Stefano Tortarolo
# Copyright 2013 Fabio Rapposelli and Timo Sugliani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "log4r"
require "vagrant/util/busy"
require "vagrant/util/platform"
require "vagrant/util/retryable"
require "vagrant/util/subprocess"
require "awesome_print"
module VagrantPlugins
module VCloud
module Driver
class UnauthorizedAccess < StandardError; end
class WrongAPIVersion < StandardError; end
class WrongItemIDError < StandardError; end
class InvalidStateError < StandardError; end
class InternalServerError < StandardError; end
class UnhandledError < StandardError; end
# Main class to access vCloud rest APIs
class Base
include Vagrant::Util::Retryable
def initialize
@logger = Log4r::Logger.new("vagrant::provider::vcloud::base")
end
##
# Authenticate against the specified server
def login
end
##
# Destroy the current session
def logout
end
##
# Fetch existing organizations and their IDs
def get_organizations
end
##
# friendly helper method to fetch an Organization Id by name
# - name (this isn't case sensitive)
def get_organization_id_by_name(name)
end
##
# friendly helper method to fetch an Organization by name
# - name (this isn't case sensitive)
def get_organization_by_name(name)
end
##
# Fetch details about an organization:
# - catalogs
# - vdcs
# - networks
def get_organization(orgId)
end
##
# Fetch details about a given catalog
def get_catalog(catalogId)
end
##
# Friendly helper method to fetch an catalog id by name
# - organization hash (from get_organization/get_organization_by_name)
# - catalog name
def get_catalog_id_by_name(organization, catalogName)
end
##
# Friendly helper method to fetch an catalog by name
# - organization hash (from get_organization/get_organization_by_name)
# - catalog name
def get_catalog_by_name(organization, catalogName)
end
##
# Fetch details about a given vdc:
# - description
# - vapps
# - networks
def get_vdc(vdcId)
end
##
# Friendly helper method to fetch a Organization VDC Id by name
# - Organization object
# - Organization VDC Name
def get_vdc_id_by_name(organization, vdcName)
end
##
# Friendly helper method to fetch a Organization VDC by name
# - Organization object
# - Organization VDC Name
def get_vdc_by_name(organization, vdcName)
end
##
# Fetch details about a given catalog item:
# - description
# - vApp templates
def get_catalog_item(catalogItemId)
end
##
# friendly helper method to fetch an catalogItem by name
# - catalogId (use get_catalog_name(org, name))
# - catalagItemName
def get_catalog_item_by_name(catalogId, catalogItemName)
end
##
# Fetch details about a given vapp:
# - name
# - description
# - status
# - IP
# - Children VMs:
# -- IP addresses
# -- status
# -- ID
def get_vapp(vAppId)
end
##
# Delete a given vapp
# NOTE: It doesn't verify that the vapp is shutdown
def delete_vapp(vAppId)
end
##
# Suspend a given vapp
def suspend_vapp(vAppId)
end
##
# reboot a given vapp
# This will basically initial a guest OS reboot, and will only work if
# VMware-tools are installed on the underlying VMs.
# vShield Edge devices are not affected
def reboot_vapp(vAppId)
end
##
# reset a given vapp
# This will basically reset the VMs within the vApp
# vShield Edge devices are not affected.
def reset_vapp(vAppId)
end
##
# Boot a given vapp
def poweron_vapp(vAppId)
end
##
# Create a vapp starting from a template
#
# Params:
# - vdc: the associated VDC
# - vapp_name: name of the target vapp
# - vapp_description: description of the target vapp
# - vapp_templateid: ID of the vapp template
def create_vapp_from_template(vdc, vapp_name, vapp_description, vapp_templateid, poweron=false)
end
##
# Compose a vapp using existing virtual machines
#
# Params:
# - vdc: the associated VDC
# - vapp_name: name of the target vapp
# - vapp_description: description of the target vapp
# - vm_list: hash with IDs of the VMs to be used in the composing process
# - network_config: hash of the network configuration for the vapp
def compose_vapp_from_vm(vdc, vapp_name, vapp_description, vm_list={}, network_config={})
end
# Fetch details about a given vapp template:
# - name
# - description
# - Children VMs:
# -- ID
def get_vapp_template(vAppId)
end
##
# Set vApp port forwarding rules
#
# - vappid: id of the vapp to be modified
# - network_name: name of the vapp network to be modified
# - config: hash with network configuration specifications, must contain
# an array inside :nat_rules with the nat rules to be applied.
def set_vapp_port_forwarding_rules(vappid, network_name, config={})
end
##
# Get vApp port forwarding rules
#
# - vappid: id of the vApp
def get_vapp_port_forwarding_rules(vAppId)
end
##
# get vApp edge public IP from the vApp ID
# Only works when:
# - vApp needs to be poweredOn
# - FenceMode is set to "natRouted"
# - NatType" is set to "portForwarding
# This will be required to know how to connect to VMs behind the Edge device.
def get_vapp_edge_public_ip(vAppId)
end
##
# Upload an OVF package
# - vdcId
# - vappName
# - vappDescription
# - ovfFile
# - catalogId
# - uploadOptions {}
def upload_ovf(vdcId, vappName, vappDescription, ovfFile, catalogId, uploadOptions={})
end
##
# Fetch information for a given task
def get_task(taskid)
end
##
# Poll a given task until completion
def wait_task_completion(taskid)
end
##
# Set vApp Network Config
def set_vapp_network_config(vappid, network_name, config={})
end
##
# Set VM Network Config
def set_vm_network_config(vmid, network_name, config={})
end
##
# Set VM Guest Customization Config
def set_vm_guest_customization(vmid, computer_name, config={})
end
##
# Fetch details about a given VM
def get_vm(vmId)
end
private
##
# Sends a synchronous request to the vCloud API and returns the
# response as parsed XML + headers using HTTPClient.
def send_request(params, payload=nil, content_type=nil)
# Create a new HTTP client
clnt = HTTPClient.new
# Disable SSL cert verification
clnt.ssl_config.verify_mode=(OpenSSL::SSL::VERIFY_NONE)
# Suppress SSL depth message
clnt.ssl_config.verify_callback=proc{ |ok, ctx|; true };
extheader = {}
extheader["accept"] = "application/*+xml;version=#{@api_version}"
if !content_type.nil?
extheader['Content-Type'] = content_type
end
if @auth_key
extheader['x-vcloud-authorization'] = @auth_key
else
clnt.set_auth(nil, "#{@username}@#{@org_name}", @password)
end
url = "#{@api_url}#{params['command']}"
# Massive debug when LOG=DEBUG
# Using awesome_print to get nice XML output for better readability
if @logger.level == 1
ap "SEND #{url}"
if payload
payloadXML = Nokogiri.XML(payload)
ap payloadXML
end
end
begin
response = clnt.request(params['method'], url, nil, payload, extheader)
if !response.ok?
raise "Warning: unattended code #{response.status} #{response.reason}"
end
nicexml = Nokogiri.XML(response.body)
# Massive debug when LOG=DEBUG
# Using awesome_print to get nice XML output for better readability
if @logger.level == 1
ap "RECV #{response.status}"
# Just avoid the task spam.
if !url.index("/task/")
ap nicexml
end
end
[Nokogiri.parse(response.body), response.headers]
rescue SocketError
raise "Impossible to connect, verify endpoint"
rescue Errno::EADDRNOTAVAIL
raise "Impossible to connect, verify endpoint"
end
end
##
# Upload a large file in configurable chunks, output an optional progressbar
def upload_file(uploadURL, uploadFile, vAppTemplate, config={})
# Set chunksize to 10M if not specified otherwise
chunkSize = (config[:chunksize] || 10485760)
# Set progress bar to default format if not specified otherwise
progressBarFormat = (config[:progressbar_format] || "%t Progress: %p%% %e")
# Set progress bar length to 120 if not specified otherwise
progressBarLength = (config[:progressbar_length] || 80)
# Open our file for upload
uploadFileHandle = File.new(uploadFile, "rb" )
fileName = File.basename(uploadFileHandle)
progressBarTitle = "Uploading: " + fileName.to_s
# Create a progressbar object if progress bar is enabled
if config[:progressbar_enable] == true && uploadFileHandle.size.to_i > chunkSize
progressbar = ProgressBar.create(
:title => progressBarTitle,
:starting_at => 0,
:total => uploadFileHandle.size.to_i,
##:length => progressBarLength,
:format => progressBarFormat
)
else
puts progressBarTitle
end
# Create a new HTTP client
clnt = HTTPClient.new
# Disable SSL cert verification
clnt.ssl_config.verify_mode=(OpenSSL::SSL::VERIFY_NONE)
# Suppress SSL depth message
clnt.ssl_config.verify_callback=proc{ |ok, ctx|; true };
# Perform ranged upload until the file reaches its end
until uploadFileHandle.eof?
# Create ranges for this chunk upload
rangeStart = uploadFileHandle.pos
rangeStop = uploadFileHandle.pos.to_i + chunkSize
# Read current chunk
fileContent = uploadFileHandle.read(chunkSize)
# If statement to handle last chunk transfer if is > than filesize
if rangeStop.to_i > uploadFileHandle.size.to_i
contentRange = "bytes #{rangeStart.to_s}-#{uploadFileHandle.size.to_s}/#{uploadFileHandle.size.to_s}"
rangeLen = uploadFileHandle.size.to_i - rangeStart.to_i
else
contentRange = "bytes #{rangeStart.to_s}-#{rangeStop.to_s}/#{uploadFileHandle.size.to_s}"
rangeLen = rangeStop.to_i - rangeStart.to_i
end
# Build headers
extheader = {
'x-vcloud-authorization' => @auth_key,
'Content-Range' => contentRange,
'Content-Length' => rangeLen.to_s
}
begin
uploadRequest = "#{@host_url}#{uploadURL}"
connection = clnt.request('PUT', uploadRequest, nil, fileContent, extheader)
if config[:progressbar_enable] == true && uploadFileHandle.size.to_i > chunkSize
params = {
'method' => :get,
'command' => "/vAppTemplate/vappTemplate-#{vAppTemplate}"
}
response, headers = send_request(params)
response.css("Files File [name='#{fileName}']").each do |file|
progressbar.progress=file[:bytesTransferred].to_i
end
end
rescue # FIXME: HUGE FIXME!!!! DO SOMETHING WITH THIS, IT'S JUST STUPID AS IT IS NOW!!!
retryTime = (config[:retry_time] || 5)
puts "Range #{contentRange} failed to upload, retrying the chunk in #{retryTime.to_s} seconds, to stop the action press CTRL+C."
sleep retryTime.to_i
retry
end
end
uploadFileHandle.close
end
##
# Convert vApp status codes into human readable description
def convert_vapp_status(status_code)
case status_code.to_i
when 0
'suspended'
when 3
'paused'
when 4
'running'
when 8
'stopped'
when 10
'mixed'
else
"Unknown #{status_code}"
end
end
end # class
end
end
end |
module NewRelic::RedshiftPlugin
# Register and run the agent
def self.run
# Register this agent.
NewRelic::Plugin::Setup.install_agent :redshift, self
# Launch the agent; this never returns.
NewRelic::Plugin::Run.setup_and_run
end
class Agent < NewRelic::Plugin::Agent::Base
agent_guid 'com.chirag.nr.redshift'
agent_version NewRelic::RedshiftPlugin::VERSION
agent_config_options :host, :port, :user, :password, :dbname, :label, :schema
agent_human_labels('Redshift') { "#{label || host}" }
def initialize(*args)
@previous_metrics = {}
@previous_result_for_query ||= {}
super
end
#
# Required, but not used
#
def setup_metrics
end
#
# Picks up default port for redshift, if user doesn't specify port in yml file
#
def port
@port || 5439
end
#
# Get a connection to redshift
#
def connect
PG::Connection.new(:host => host, :port => port, :user => user, :password => password, :dbname => dbname)
end
#
# Fallowing is called for every polling cycle
#
def poll_cycle
@connection = self.connect
puts 'Connected'
report_metrics
rescue => e
$stderr.puts "#{e}: #{e.backtrace.join("\n ")}"
ensure
@connection.finish if @connection
end
def report_metrics
@connection.exec(percentage_memory_utilization) do |result|
report_metric "Database/Memory/PercentageUtilized", '%' , result[0]['percentage_used']
end
@connection.exec(memory_used) do |result|
report_metric "Database/Memory/Used", 'Gbytes' , result[0]['memory_used']
end
@connection.exec(maximum_capacity) do |result|
report_metric "Database/Memory/MaxCapacity", 'Gbytes' , result[0]['capacity_gbytes']
end
@connection.exec(database_connections) do |result|
report_metric "Database/Connections/NumberOfConnections", 'count' , result[0]['database_connections']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/TotalRows/#{result["table_name"]}", 'count' , result['total_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/SortedRows/#{result["table_name"]}", 'count' , result['sorted_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/UnsortedRows/#{result["table_name"]}", 'count' , result['unsorted_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/UnsortedRatio/#{result["table_name"]}", '%' , result['unsorted_ratio']
end
#pct_of_total: Size of the table in proportion to the cluster size
@connection.exec(table_storage_information).each do |result|
report_metric "TableStats/SizeInProportionToCluster/#{result["table_name"]}", '%' , result['pct_of_total']
end
#pct_stats_off: Measure of staleness of table statistics (real size versus size recorded in stats)
@connection.exec(table_storage_information).each do |result|
report_metric "TableStats/SizeStaleness/#{result["table_name"]}", '%' , result['pct_stats_off']
end
end
private
def percentage_memory_utilization
'SELECT ((SUM(used)/1024.00)*100)/((SUM(capacity))/1024) AS percentage_used
FROM stv_partitions
WHERE part_begin=0;'
end
def memory_used
'SELECT (SUM(used)/1024.00) AS memory_used
FROM stv_partitions
WHERE part_begin=0;'
end
def maximum_capacity
'SELECT sum(capacity)/1024 AS capacity_gbytes
FROM stv_partitions
WHERE part_begin=0;'
end
def database_connections
'SELECT count(*) AS database_connections
FROM stv_sessions;'
end
def total_rows_unsorted_rows_per_table
%Q{SELECT btrim(p.name::character varying::text) AS table_name,
sum(p."rows") AS total_rows,
sum(p.sorted_rows) AS sorted_rows,
sum(p."rows") - sum(p.sorted_rows) AS unsorted_rows,
CASE WHEN sum(p."rows") <> 0 THEN 1.0::double precision - sum(p.sorted_rows)::double precision / sum(p."rows")::double precision
ELSE NULL::double precision
END AS unsorted_ratio
FROM stv_tbl_perm p
JOIN pg_database d ON d.oid = p.db_id::oid
JOIN pg_class ON pg_class.oid = p.id::oid
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE btrim(pg_namespace.nspname::character varying::text) = '#{schema}' AND p.id > 0
GROUP BY btrim(pg_namespace.nspname::character varying::text), btrim(p.name::character varying::text)
ORDER BY sum(p."rows") - sum(p.sorted_rows) DESC, sum(p.sorted_rows) DESC;}
end
def table_storage_information
%Q{
SELECT trim(a.name) as table_name,
decode(b.mbytes,0,0,((b.mbytes/part.total::decimal)*100)::decimal(5,2)) as pct_of_total,
(case when a.rows = 0 then NULL else ((a.rows - pgc.reltuples)::decimal(19,3)/a.rows::decimal(19,3)*100)::decimal(5,2) end) as pct_stats_off
from ( select db_id, id, name, sum(rows) as rows,
sum(rows)-sum(sorted_rows) as unsorted_rows from stv_tbl_perm a group by db_id, id, name ) as a
join pg_class as pgc on pgc.oid = a.id
join pg_namespace as pgn on pgn.oid = pgc.relnamespace
left outer join (select tbl, count(*) as mbytes
from stv_blocklist group by tbl) b on a.id=b.tbl
inner join ( SELECT attrelid, min(case attisdistkey when 't' then attname else null end) as "distkey",min(case attsortkeyord when 1 then attname else null end ) as head_sort , max(attsortkeyord) as n_sortkeys, max(attencodingtype) as max_enc FROM pg_attribute group by 1) as det
on det.attrelid = a.id
inner join ( select tbl, max(Mbytes)::decimal(32)/min(Mbytes) as ratio from
(select tbl, trim(name) as name, slice, count(*) as Mbytes
from svv_diskusage group by tbl, name, slice )
group by tbl, name ) as dist_ratio on a.id = dist_ratio.tbl
join ( select sum(capacity) as total
from stv_partitions where part_begin=0 ) as part on 1=1
where mbytes is not null
and pgn.nspname = '#{schema}'
order by mbytes desc;}
end
end
end
Update agent.rb
module NewRelic::RedshiftPlugin
# Register and run the agent
def self.run
# Register this agent.
NewRelic::Plugin::Setup.install_agent :redshift, self
# Launch the agent; this never returns.
NewRelic::Plugin::Run.setup_and_run
end
class Agent < NewRelic::Plugin::Agent::Base
agent_guid 'com.chirag.nr.redshift'
agent_version NewRelic::RedshiftPlugin::VERSION
agent_config_options :host, :port, :user, :password, :dbname, :label, :schema
agent_human_labels('Redshift') { "#{label || host}" }
def initialize(*args)
@previous_metrics = {}
@previous_result_for_query ||= {}
super
end
#
# Required, but not used
#
def setup_metrics
end
#
# Picks up default port for redshift, if user doesn't specify port in yml file
#
def port
@port || 5439
end
#
# Get a connection to redshift
#
def connect
PG::Connection.new(:host => host, :port => port, :user => user, :password => password, :dbname => dbname)
end
#
# Following is called for every polling cycle
#
def poll_cycle
@connection = self.connect
puts 'Connected'
report_metrics
rescue => e
$stderr.puts "#{e}: #{e.backtrace.join("\n ")}"
ensure
@connection.finish if @connection
end
def report_metrics
@connection.exec(percentage_memory_utilization) do |result|
report_metric "Database/Memory/PercentageUtilized", '%' , result[0]['percentage_used']
end
@connection.exec(memory_used) do |result|
report_metric "Database/Memory/Used", 'Gbytes' , result[0]['memory_used']
end
@connection.exec(maximum_capacity) do |result|
report_metric "Database/Memory/MaxCapacity", 'Gbytes' , result[0]['capacity_gbytes']
end
@connection.exec(database_connections) do |result|
report_metric "Database/Connections/NumberOfConnections", 'count' , result[0]['database_connections']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/TotalRows/#{result["table_name"]}", 'count' , result['total_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/SortedRows/#{result["table_name"]}", 'count' , result['sorted_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/UnsortedRows/#{result["table_name"]}", 'count' , result['unsorted_rows']
end
@connection.exec(total_rows_unsorted_rows_per_table).each do |result|
report_metric "TableStats/UnsortedRatio/#{result["table_name"]}", '%' , result['unsorted_ratio']
end
#pct_of_total: Size of the table in proportion to the cluster size
@connection.exec(table_storage_information).each do |result|
report_metric "TableStats/SizeInProportionToCluster/#{result["table_name"]}", '%' , result['pct_of_total']
end
#pct_stats_off: Measure of staleness of table statistics (real size versus size recorded in stats)
@connection.exec(table_storage_information).each do |result|
report_metric "TableStats/SizeStaleness/#{result["table_name"]}", '%' , result['pct_stats_off']
end
end
private
def percentage_memory_utilization
'SELECT ((SUM(used)/1024.00)*100)/((SUM(capacity))/1024) AS percentage_used
FROM stv_partitions
WHERE part_begin=0;'
end
def memory_used
'SELECT (SUM(used)/1024.00) AS memory_used
FROM stv_partitions
WHERE part_begin=0;'
end
def maximum_capacity
'SELECT sum(capacity)/1024 AS capacity_gbytes
FROM stv_partitions
WHERE part_begin=0;'
end
def database_connections
'SELECT count(*) AS database_connections
FROM stv_sessions;'
end
def total_rows_unsorted_rows_per_table
%Q{SELECT btrim(p.name::character varying::text) AS table_name,
sum(p."rows") AS total_rows,
sum(p.sorted_rows) AS sorted_rows,
sum(p."rows") - sum(p.sorted_rows) AS unsorted_rows,
CASE WHEN sum(p."rows") <> 0 THEN 1.0::double precision - sum(p.sorted_rows)::double precision / sum(p."rows")::double precision
ELSE NULL::double precision
END AS unsorted_ratio
FROM stv_tbl_perm p
JOIN pg_database d ON d.oid = p.db_id::oid
JOIN pg_class ON pg_class.oid = p.id::oid
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE btrim(pg_namespace.nspname::character varying::text) = '#{schema}' AND p.id > 0
GROUP BY btrim(pg_namespace.nspname::character varying::text), btrim(p.name::character varying::text)
ORDER BY sum(p."rows") - sum(p.sorted_rows) DESC, sum(p.sorted_rows) DESC;}
end
def table_storage_information
%Q{
SELECT trim(a.name) as table_name,
decode(b.mbytes,0,0,((b.mbytes/part.total::decimal)*100)::decimal(5,2)) as pct_of_total,
(case when a.rows = 0 then NULL else ((a.rows - pgc.reltuples)::decimal(19,3)/a.rows::decimal(19,3)*100)::decimal(5,2) end) as pct_stats_off
from ( select db_id, id, name, sum(rows) as rows,
sum(rows)-sum(sorted_rows) as unsorted_rows from stv_tbl_perm a group by db_id, id, name ) as a
join pg_class as pgc on pgc.oid = a.id
join pg_namespace as pgn on pgn.oid = pgc.relnamespace
left outer join (select tbl, count(*) as mbytes
from stv_blocklist group by tbl) b on a.id=b.tbl
inner join ( SELECT attrelid, min(case attisdistkey when 't' then attname else null end) as "distkey",min(case attsortkeyord when 1 then attname else null end ) as head_sort , max(attsortkeyord) as n_sortkeys, max(attencodingtype) as max_enc FROM pg_attribute group by 1) as det
on det.attrelid = a.id
inner join ( select tbl, max(Mbytes)::decimal(32)/min(Mbytes) as ratio from
(select tbl, trim(name) as name, slice, count(*) as Mbytes
from svv_diskusage group by tbl, name, slice )
group by tbl, name ) as dist_ratio on a.id = dist_ratio.tbl
join ( select sum(capacity) as total
from stv_partitions where part_begin=0 ) as part on 1=1
where mbytes is not null
and pgn.nspname = '#{schema}'
order by mbytes desc;}
end
end
end
|
class EmailValidator < ActiveModel::EachValidator
def validate_each(record, attribute, value)
if (setting = SiteSetting.email_domains_whitelist).present?
unless email_in_restriction_setting?(setting, value)
record.errors.add(attribute, I18n.t(:'user.email.not_allowed'))
end
elsif (setting = SiteSetting.email_domains_blacklist).present?
if email_in_restriction_setting?(setting, value)
record.errors.add(attribute, I18n.t(:'user.email.not_allowed'))
end
end
if record.errors[attribute].blank? and ScreenedEmail.should_block?(value)
record.errors.add(attribute, I18n.t(:'user.email.blocked'))
end
end
def email_in_restriction_setting?(setting, value)
domains = setting.gsub('.', '\.')
regexp = Regexp.new("@(#{domains})", true)
value =~ regexp
end
def self.email_regex
/^[a-zA-Z0-9!#$%&'*+\/=?\^_`{|}~\-]+(?:\.[a-zA-Z0-9!#$%&'\*+\/=?\^_`{|}~\-]+)*@(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?$/
end
end
FIX: email regexp for older rubies
class EmailValidator < ActiveModel::EachValidator
def validate_each(record, attribute, value)
if (setting = SiteSetting.email_domains_whitelist).present?
unless email_in_restriction_setting?(setting, value)
record.errors.add(attribute, I18n.t(:'user.email.not_allowed'))
end
elsif (setting = SiteSetting.email_domains_blacklist).present?
if email_in_restriction_setting?(setting, value)
record.errors.add(attribute, I18n.t(:'user.email.not_allowed'))
end
end
if record.errors[attribute].blank? and ScreenedEmail.should_block?(value)
record.errors.add(attribute, I18n.t(:'user.email.blocked'))
end
end
def email_in_restriction_setting?(setting, value)
domains = setting.gsub('.', '\.')
regexp = Regexp.new("@(#{domains})", true)
value =~ regexp
end
def self.email_regex
/^[a-zA-Z0-9!#\$%&'*+\/=?\^_`{|}~\-]+(?:\.[a-zA-Z0-9!#\$%&'\*+\/=?\^_`{|}~\-]+)*@(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?$/
end
end |
module ViewServer
class CbPasteRunner
def self.run
$stdout << ViewServer::Client.new.server.paste
end
end
end
Testing hook
module ViewServer
class CbPasteRunner
def self.run
$stdout << ViewServer::Client.new.server.paste
binding.pry
end
end
end
|
# frozen_string_literal: true
# Helper to use the 'parallel' gem to perform tasks
require 'parallel'
require 'stringio'
module OctocatalogDiff
module Util
# This is a utility class to execute tasks in parallel, using the 'parallel' gem.
# If parallel processing has been disabled, this instead executes the tasks serially,
# but provides the same API as the parallel tasks.
class Parallel
# This class represents a parallel task. It requires a method reference, which will be executed with
# any supplied arguments. It can optionally take a text description and a validator function.
class Task
attr_reader :description
attr_accessor :args
def initialize(opts = {})
@method = opts.fetch(:method)
@args = opts.fetch(:args, {})
@description = opts[:description] || @method.name
@validator = opts[:validator]
@validator_args = opts[:validator_args] || {}
end
def execute(logger = Logger.new(StringIO.new))
@method.call(@args, logger)
end
def validate(result, logger = Logger.new(StringIO.new))
return true if @validator.nil?
@validator.call(result, logger, @validator_args)
end
end
# This class represents the result from a parallel task. The status is set to true (success), false (error),
# or nil (task was killed before it could complete). The exception (for failure) and output object (for success)
# are readable attributes. The validity of the results, determined by executing the 'validate' method of the Task,
# is available to be set and fetched.
class Result
attr_reader :output, :args
attr_accessor :status, :exception
def initialize(opts = {})
@status = opts[:status]
@exception = opts[:exception]
@output = opts[:output]
@args = opts.fetch(:args, {})
end
end
# Entry point for parallel processing. By default this will perform parallel processing,
# but it will also accept an option to do serial processing instead.
# @param task_array [Array<Parallel::Task>] Tasks to run
# @param logger [Logger] Optional logger object
# @param parallelized [Boolean] True for parallel processing, false for serial processing
# @return [Array<Parallel::Result>] Parallel results (same order as tasks)
def self.run_tasks(task_array, logger = nil, parallelized = true)
# Create a throwaway logger object if one is not given
logger ||= Logger.new(StringIO.new)
# Validate input - we need an array. If the array is empty then return an empty array right away.
raise ArgumentError, "run_tasks() argument must be array, not #{task_array.class}" unless task_array.is_a?(Array)
return [] if task_array.empty?
# Make sure each element in the array is a OctocatalogDiff::Util::Parallel::Task
task_array.each do |x|
next if x.is_a?(OctocatalogDiff::Util::Parallel::Task)
raise ArgumentError, "Element #{x.inspect} must be a OctocatalogDiff::Util::Parallel::Task, not a #{x.class}"
end
# Actually do the processing - choose here between parallel and serial
parallelized ? run_tasks_parallel(task_array, logger) : run_tasks_serial(task_array, logger)
end
# Use the parallel gem to run each task in the task array. Under the hood this is forking a process for
# each task, and serializing/deserializing the arguments and the outputs.
# @param task_array [Array<OctocatalogDiff::Util::Parallel::Task>] Tasks to perform
# @param logger [Logger] Logger
# @return [Array<OctocatalogDiff::Util::Parallel::Result>] Parallel task results
def self.run_tasks_parallel(task_array, logger)
# Create an empty array of results. The status is nil and the exception is pre-populated. If the code
# runs successfully and doesn't get killed, all of these default values will be overwritten. If the code
# gets killed before the task finishes, this exception will remain.
result = task_array.map do |x|
Result.new(exception: ::Parallel::Kill.new('Killed'), args: x.args)
end
logger.debug "Initialized parallel task result array: size=#{result.size}"
# Do parallel processing
::Parallel.each(task_array,
finish: lambda do |item, i, parallel_result|
# Set the result array element to the result
result[i] = parallel_result
# Kill all other parallel tasks if this task failed by throwing an exception
raise ::Parallel::Kill unless parallel_result.exception.nil?
# Run the validator to determine if the result is in fact valid. The validator
# returns true or false. If true, set the 'valid' attribute in the result. If
# false, kill all other parallel tasks.
if item.validate(parallel_result.output, logger)
logger.debug("Success #{item.description}")
else
logger.warn("Failed #{item.description}")
result[i].status = false
raise ::Parallel::Kill
end
end) do |ele|
# simplecov does not detect that this code runs because it's forked, but this is
# tested extensively in the parallel_spec.rb spec file.
# :nocov:
begin
logger.debug("Begin #{ele.description}")
output = ele.execute(logger)
logger.debug("Success #{ele.description}")
Result.new(output: output, status: true, args: ele.args)
rescue => exc
logger.debug("Failed #{ele.description}: #{exc.class} #{exc.message}")
Result.new(exception: exc, status: false, args: ele.args)
end
# :nocov:
end
# Return result
result
end
# Perform the tasks in serial.
# @param task_array [Array<OctocatalogDiff::Util::Parallel::Task>] Tasks to perform
# @param logger [Logger] Logger
# @return [Array<OctocatalogDiff::Util::Parallel::Result>] Parallel task results
def self.run_tasks_serial(task_array, logger)
# Create an empty array of results. The status is nil and the exception is pre-populated. If the code
# runs successfully, all of these default values will be overwritten. If a predecessor task fails, all
# later task will have the defined exception.
result = task_array.map do |x|
Result.new(exception: ::RuntimeError.new('Cancellation - A prior task failed'), args: x.args)
end
# Perform the tasks 1 by 1 - each successful task will replace an element in the 'result' array,
# whereas a failed task will replace the current element with an exception, and all later tasks
# will not be replaced (thereby being populated with the cancellation error).
task_counter = 0
task_array.each do |ele|
begin
logger.debug("Begin #{ele.description}")
output = ele.execute(logger)
result[task_counter] = Result.new(output: output, status: true, args: ele.args)
rescue => exc
logger.debug("Failed #{ele.description}: #{exc.class} #{exc.message}")
result[task_counter] = Result.new(exception: exc, status: false, args: ele.args)
end
if ele.validate(output, logger)
logger.debug("Success #{ele.description}")
else
logger.warn("Failed #{ele.description}")
result[task_counter].status = false
end
break unless result[task_counter].status
task_counter += 1
end
# Return the result
result
end
end
end
end
Handle Parallel::DeadWorker
# frozen_string_literal: true
# Helper to use the 'parallel' gem to perform tasks
require 'parallel'
require 'stringio'
module OctocatalogDiff
module Util
# This is a utility class to execute tasks in parallel, using the 'parallel' gem.
# If parallel processing has been disabled, this instead executes the tasks serially,
# but provides the same API as the parallel tasks.
class Parallel
# This class represents a parallel task. It requires a method reference, which will be executed with
# any supplied arguments. It can optionally take a text description and a validator function.
class Task
attr_reader :description
attr_accessor :args
def initialize(opts = {})
@method = opts.fetch(:method)
@args = opts.fetch(:args, {})
@description = opts[:description] || @method.name
@validator = opts[:validator]
@validator_args = opts[:validator_args] || {}
end
def execute(logger = Logger.new(StringIO.new))
@method.call(@args, logger)
end
def validate(result, logger = Logger.new(StringIO.new))
return true if @validator.nil?
@validator.call(result, logger, @validator_args)
end
end
# This class represents the result from a parallel task. The status is set to true (success), false (error),
# or nil (task was killed before it could complete). The exception (for failure) and output object (for success)
# are readable attributes. The validity of the results, determined by executing the 'validate' method of the Task,
# is available to be set and fetched.
class Result
attr_reader :output, :args
attr_accessor :status, :exception
def initialize(opts = {})
@status = opts[:status]
@exception = opts[:exception]
@output = opts[:output]
@args = opts.fetch(:args, {})
end
end
# Entry point for parallel processing. By default this will perform parallel processing,
# but it will also accept an option to do serial processing instead.
# @param task_array [Array<Parallel::Task>] Tasks to run
# @param logger [Logger] Optional logger object
# @param parallelized [Boolean] True for parallel processing, false for serial processing
# @return [Array<Parallel::Result>] Parallel results (same order as tasks)
def self.run_tasks(task_array, logger = nil, parallelized = true)
# Create a throwaway logger object if one is not given
logger ||= Logger.new(StringIO.new)
# Validate input - we need an array. If the array is empty then return an empty array right away.
raise ArgumentError, "run_tasks() argument must be array, not #{task_array.class}" unless task_array.is_a?(Array)
return [] if task_array.empty?
# Make sure each element in the array is a OctocatalogDiff::Util::Parallel::Task
task_array.each do |x|
next if x.is_a?(OctocatalogDiff::Util::Parallel::Task)
raise ArgumentError, "Element #{x.inspect} must be a OctocatalogDiff::Util::Parallel::Task, not a #{x.class}"
end
# Serial processing
return run_tasks_serial(task_array, logger) unless parallelized
# Parallel processing.
# Create an empty array of results. The status is nil and the exception is pre-populated. If the code
# runs successfully and doesn't get killed, all of these default values will be overwritten. If the code
# gets killed before the task finishes, this exception will remain.
result = task_array.map do |x|
Result.new(exception: ::Parallel::Kill.new('Killed'), args: x.args)
end
logger.debug "Initialized parallel task result array: size=#{result.size}"
begin
run_tasks_parallel(result, task_array, logger)
rescue ::Parallel::DeadWorker => exc
# Accept failure of any worker since result array will contain the initialized failure case.
# :nocov:
logger.warn "Rescued #{exc.class}: #{exc.message}"
# :nocov:
end
result
end
# Use the parallel gem to run each task in the task array. Under the hood this is forking a process for
# each task, and serializing/deserializing the arguments and the outputs.
# @param result [Array<OctocatalogDiff::Util::Parallel::Result>] Parallel task results
# @param task_array [Array<OctocatalogDiff::Util::Parallel::Task>] Tasks to perform
# @param logger [Logger] Logger
def self.run_tasks_parallel(result, task_array, logger)
# Do parallel processing
::Parallel.each(task_array,
finish: lambda do |item, i, parallel_result|
# Set the result array element to the result
result[i] = parallel_result
# Kill all other parallel tasks if this task failed by throwing an exception
raise ::Parallel::Kill unless parallel_result.exception.nil?
# Run the validator to determine if the result is in fact valid. The validator
# returns true or false. If true, set the 'valid' attribute in the result. If
# false, kill all other parallel tasks.
if item.validate(parallel_result.output, logger)
logger.debug("Success #{item.description}")
else
logger.warn("Failed #{item.description}")
result[i].status = false
raise ::Parallel::Kill
end
end) do |ele|
# simplecov does not detect that this code runs because it's forked, but this is
# tested extensively in the parallel_spec.rb spec file.
# :nocov:
begin
logger.debug("Begin #{ele.description}")
output = ele.execute(logger)
logger.debug("Success #{ele.description}")
Result.new(output: output, status: true, args: ele.args)
rescue => exc
logger.debug("Failed #{ele.description}: #{exc.class} #{exc.message}")
Result.new(exception: exc, status: false, args: ele.args)
end
# :nocov:
end
end
# Perform the tasks in serial.
# @param task_array [Array<OctocatalogDiff::Util::Parallel::Task>] Tasks to perform
# @param logger [Logger] Logger
# @return [Array<OctocatalogDiff::Util::Parallel::Result>] Parallel task results
def self.run_tasks_serial(task_array, logger)
# Create an empty array of results. The status is nil and the exception is pre-populated. If the code
# runs successfully, all of these default values will be overwritten. If a predecessor task fails, all
# later task will have the defined exception.
result = task_array.map do |x|
Result.new(exception: ::RuntimeError.new('Cancellation - A prior task failed'), args: x.args)
end
# Perform the tasks 1 by 1 - each successful task will replace an element in the 'result' array,
# whereas a failed task will replace the current element with an exception, and all later tasks
# will not be replaced (thereby being populated with the cancellation error).
task_counter = 0
task_array.each do |ele|
begin
logger.debug("Begin #{ele.description}")
output = ele.execute(logger)
result[task_counter] = Result.new(output: output, status: true, args: ele.args)
rescue => exc
logger.debug("Failed #{ele.description}: #{exc.class} #{exc.message}")
result[task_counter] = Result.new(exception: exc, status: false, args: ele.args)
end
if ele.validate(output, logger)
logger.debug("Success #{ele.description}")
else
logger.warn("Failed #{ele.description}")
result[task_counter].status = false
end
break unless result[task_counter].status
task_counter += 1
end
# Return the result
result
end
end
end
end
|
# encoding: ASCII-8BIT
# Force encoding of string literals. Must match solution text.
module VimGolf
class Keylog
include Enumerable
def initialize(input, time=Time.now.utc)
# Force encoding of solution text. Must match string literals.
@input = input.b
@time = time
end
def to_s(sep = '')
to_a.join(sep)
end
alias_method :convert , :to_s
alias_method :score , :count
def each
scanner = StringScanner.new(@input)
# A Vim keycode is either a single byte, or a 3-byte sequence starting
# with 0x80.
while (c = scanner.get_byte)
n = c.ord
if n == 0x80
b2, b3 = scanner.get_byte, scanner.get_byte
if b2 == "\xfd" && b3 >= "\x38" && @time > @@sniff_date
# Should we account for KE_SNIFF removal?
b3 = (b3.ord + 1).chr
end
code = @@kc_mbyte[b2+b3]
yield code if code # ignore "nil" keystrokes (like window focus)
else
yield @@kc_1byte[n]
end
end
end
# Quick lookup array for single-byte keycodes
@@kc_1byte = []
(0..255).each {|n| @@kc_1byte.push("<%#04x>" % n)} # Fallback for non-ASCII
(1..127).each {|n| @@kc_1byte[n] = "<C-#{(n ^ 0x40).chr}>"}
(32..126).each {|c| @@kc_1byte[c] = c.chr } # Printing chars
@@kc_1byte[0x1b] = "<Esc>" # Special names for a few control chars
@@kc_1byte[0x0d] = "<CR>"
@@kc_1byte[0x0a] = "<NL>"
@@kc_1byte[0x09] = "<Tab>"
# After this date, assume KE_SNIFF is removed
@@sniff_date = Time.utc(2016, 4)
@@kc_mbyte = Hash.new do |_h,k|
'<' + k.bytes.map {|b| "%02x" % b}.join('-') + '>' # For missing keycodes
end.update({
# This list has been populated by looking at
# :h terminal-options and vim source files:
# keymap.h and misc2.c
"k1" => "<F1>",
"k2" => "<F2>",
"k3" => "<F3>",
"k4" => "<F4>",
"k5" => "<F5>",
"k6" => "<F6>",
"k7" => "<F7>",
"k8" => "<F8>",
"k9" => "<F9>",
"k;" => "<F10>",
"F1" => "<F11>",
"F2" => "<F12>",
"F3" => "<F13>",
"F4" => "<F14>",
"F5" => "<F15>",
"F6" => "<F16>",
"F7" => "<F17>",
"F8" => "<F18>",
"F9" => "<F19>",
"%1" => "<Help>",
"&8" => "<Undo>",
"#2" => "<S-Home>",
"*7" => "<S-End>",
"K1" => "<kHome>",
"K4" => "<kEnd>",
"K3" => "<kPageUp>",
"K5" => "<kPageDown>",
"K6" => "<kPlus>",
"K7" => "<kMinus>",
"K8" => "<kDivide>",
"K9" => "<kMultiply>",
"KA" => "<kEnter>",
"KB" => "<kPoint>",
"KC" => "<k0>",
"KD" => "<k1>",
"KE" => "<k2>",
"KF" => "<k3>",
"KG" => "<k4>",
"KH" => "<k5>",
"KI" => "<k6>",
"KJ" => "<k7>",
"KK" => "<k8>",
"KL" => "<k9>",
"kP" => "<PageUp>",
"kN" => "<PageDown>",
"kh" => "<Home>",
"@7" => "<End>",
"kI" => "<Insert>",
"kD" => "<Del>",
"kb" => "<BS>",
"ku" => "<Up>",
"kd" => "<Down>",
"kl" => "<Left>",
"kr" => "<Right>",
"#4" => "<S-Left>",
"%i" => "<S-Right>",
"kB" => "<S-Tab>",
"\xffX" => "<C-@>",
# This is how you escape literal 0x80
"\xfeX" => "<0x80>",
# These rarely-used modifiers should be combined with the next
# stroke (like <S-Space>), but let's put them here for now
"\xfc\x02" => "<S->",
"\xfc\x04" => "<C->",
"\xfc\x06" => "<C-S->",
"\xfc\x08" => "<A->",
"\xfc\x0a" => "<A-S->",
"\xfc\x0c" => "<C-A>",
"\xfc\x0e" => "<C-A-S->",
"\xfc\x10" => "<M->",
"\xfc\x12" => "<M-S->",
"\xfc\x14" => "<M-C->",
"\xfc\x16" => "<M-C-S->",
"\xfc\x18" => "<M-A->",
"\xfc\x1a" => "<M-A-S->",
"\xfc\x1c" => "<M-C-A>",
"\xfc\x1e" => "<M-C-A-S->",
# KS_EXTRA keycodes (starting with 0x80 0xfd) are defined by an enum in
# Vim's keymap.h. Sometimes, a new Vim adds or removes a keycode, which
# changes the binary representation of every keycode after it. Very
# annoying.
"\xfd\x4" => "<S-Up>",
"\xfd\x5" => "<S-Down>",
"\xfd\x6" => "<S-F1>",
"\xfd\x7" => "<S-F2>",
"\xfd\x8" => "<S-F3>",
"\xfd\x9" => "<S-F4>",
"\xfd\xa" => "<S-F5>",
"\xfd\xb" => "<S-F6>",
"\xfd\xc" => "<S-F7>",
"\xfd\xd" => "<S-F9>",
"\xfd\xe" => "<S-F10>",
"\xfd\xf" => "<S-F10>",
"\xfd\x10" => "<S-F11>",
"\xfd\x11" => "<S-F12>",
"\xfd\x12" => "<S-F13>",
"\xfd\x13" => "<S-F14>",
"\xfd\x14" => "<S-F15>",
"\xfd\x15" => "<S-F16>",
"\xfd\x16" => "<S-F17>",
"\xfd\x17" => "<S-F18>",
"\xfd\x18" => "<S-F19>",
"\xfd\x19" => "<S-F20>",
"\xfd\x1a" => "<S-F21>",
"\xfd\x1b" => "<S-F22>",
"\xfd\x1c" => "<S-F23>",
"\xfd\x1d" => "<S-F24>",
"\xfd\x1e" => "<S-F25>",
"\xfd\x1f" => "<S-F26>",
"\xfd\x20" => "<S-F27>",
"\xfd\x21" => "<S-F28>",
"\xfd\x22" => "<S-F29>",
"\xfd\x23" => "<S-F30>",
"\xfd\x24" => "<S-F31>",
"\xfd\x25" => "<S-F32>",
"\xfd\x26" => "<S-F33>",
"\xfd\x27" => "<S-F34>",
"\xfd\x28" => "<S-F35>",
"\xfd\x29" => "<S-F36>",
"\xfd\x2a" => "<S-F37>",
"\xfd\x2b" => "<Mouse>",
"\xfd\x2c" => "<LeftMouse>",
"\xfd\x2d" => "<LeftDrag>",
"\xfd\x2e" => "<LeftRelease>",
"\xfd\x2f" => "<MiddleMouse>",
"\xfd\x30" => "<MiddleDrag>",
"\xfd\x31" => "<MiddleRelease>",
"\xfd\x32" => "<RightMouse>",
"\xfd\x33" => "<RightDrag>",
"\xfd\x34" => "<RightRelease>",
"\xfd\x35" => nil, # KE_IGNORE
#"\xfd\x36" => "KE_TAB",
#"\xfd\x37" => "KE_S_TAB_OLD",
# Vim 7.4.1433 removed KE_SNIFF. Unfortunately, this changed the
# offset of every keycode after it. Keycodes after this point should be
# accurate BEFORE that change.
#"\xfd\x38" => "KE_SNIFF",
#"\xfd\x39" => "KE_XF1",
#"\xfd\x3a" => "KE_XF2",
#"\xfd\x3b" => "KE_XF3",
#"\xfd\x3c" => "KE_XF4",
#"\xfd\x3d" => "KE_XEND",
#"\xfd\x3e" => "KE_ZEND",
#"\xfd\x3f" => "KE_XHOME",
#"\xfd\x40" => "KE_ZHOME",
#"\xfd\x41" => "KE_XUP",
#"\xfd\x42" => "KE_XDOWN",
#"\xfd\x43" => "KE_XLEFT",
#"\xfd\x44" => "KE_XRIGHT",
#"\xfd\x45" => "KE_LEFTMOUSE_NM",
#"\xfd\x46" => "KE_LEFTRELEASE_NM",
#"\xfd\x47" => "KE_S_XF1",
#"\xfd\x48" => "KE_S_XF2",
#"\xfd\x49" => "KE_S_XF3",
#"\xfd\x4a" => "KE_S_XF4",
"\xfd\x4b" => "<ScrollWheelUp>",
"\xfd\x4c" => "<ScrollWheelDown>",
# Horizontal scroll wheel support was added in Vim 7.3c. These
# 2 entries shifted the rest of the KS_EXTRA mappings down 2.
# Though Vim 7.2 is rare today, it was common soon after
# vimgolf.com was launched. In cases where the 7.3 code is
# never used but the 7.2 code was common, it makes sense to use
# the 7.2 code. There are conflicts though, so some legacy
# keycodes have to stay wrong.
"\xfd\x4d" => "<ScrollWheelRight>",
"\xfd\x4e" => "<ScrollWheelLeft>",
"\xfd\x4f" => "<kInsert>",
"\xfd\x50" => "<kDel>",
"\xfd\x51" => "<0x9b>", # :help <CSI>
#"\xfd\x52" => "KE_SNR",
#"\xfd\x53" => "KE_PLUG", # never used
"\xfd\x53" => "<C-Left>", # 7.2 compat
#"\xfd\x54" => "KE_CMDWIN", # never used
"\xfd\x54" => "<C-Right>", # 7.2 compat
"\xfd\x55" => "<C-Left>", # 7.2 <C-Home> conflict
"\xfd\x56" => "<C-Right>", # 7.2 <C-End> conflict
"\xfd\x57" => "<C-Home>",
"\xfd\x58" => "<C-End>",
#"\xfd\x59" => "KE_X1MOUSE",
#"\xfd\x5a" => "KE_X1DRAG",
#"\xfd\x5b" => "KE_X1RELEASE",
#"\xfd\x5c" => "KE_X2MOUSE",
#"\xfd\x5d" => "KE_X2DRAG",
#"\xfd\x5e" => "KE_X2RELEASE",
"\xfd\x5e" => nil, # 7.2 compat (I think?)
#"\xfd\x5f" => "KE_DROP",
#"\xfd\x60" => "KE_CURSORHOLD",
# If you use gvim, you'll get an entry in your keylog every time the
# window gains or loses focus. These "keystrokes" should not show and
# should not be counted.
"\xfd\x60" => nil, # 7.2 Focus Gained compat
"\xfd\x61" => nil, # Focus Gained (GVIM) (>7.4.1433)
"\xfd\x62" => nil, # Focus Gained (GVIM)
"\xfd\x63" => nil, # Focus Lost (GVIM)
})
end
end
Use constant instead of class variable
# encoding: ASCII-8BIT
# Force encoding of string literals. Must match solution text.
module VimGolf
class Keylog
include Enumerable
def initialize(input, time=Time.now.utc)
# Force encoding of solution text. Must match string literals.
@input = input.b
@time = time
end
def to_s(sep = '')
to_a.join(sep)
end
alias_method :convert , :to_s
alias_method :score , :count
def each
scanner = StringScanner.new(@input)
# A Vim keycode is either a single byte, or a 3-byte sequence starting
# with 0x80.
while (c = scanner.get_byte)
n = c.ord
if n == 0x80
b2, b3 = scanner.get_byte, scanner.get_byte
if b2 == "\xfd" && b3 >= "\x38" && @time > SNIFF_DATE
# Should we account for KE_SNIFF removal?
b3 = (b3.ord + 1).chr
end
code = KC_MBYTE[b2+b3]
yield code if code # ignore "nil" keystrokes (like window focus)
else
yield KC_1BYTE[n]
end
end
end
# Quick lookup array for single-byte keycodes
KC_1BYTE = []
(0..255).each {|n| KC_1BYTE.push("<%#04x>" % n)} # Fallback for non-ASCII
(1..127).each {|n| KC_1BYTE[n] = "<C-#{(n ^ 0x40).chr}>"}
(32..126).each {|c| KC_1BYTE[c] = c.chr } # Printing chars
KC_1BYTE[0x1b] = "<Esc>" # Special names for a few control chars
KC_1BYTE[0x0d] = "<CR>"
KC_1BYTE[0x0a] = "<NL>"
KC_1BYTE[0x09] = "<Tab>"
# After this date, assume KE_SNIFF is removed
SNIFF_DATE = Time.utc(2016, 4)
KC_MBYTE = Hash.new do |_h,k|
'<' + k.bytes.map {|b| "%02x" % b}.join('-') + '>' # For missing keycodes
end.update({
# This list has been populated by looking at
# :h terminal-options and vim source files:
# keymap.h and misc2.c
"k1" => "<F1>",
"k2" => "<F2>",
"k3" => "<F3>",
"k4" => "<F4>",
"k5" => "<F5>",
"k6" => "<F6>",
"k7" => "<F7>",
"k8" => "<F8>",
"k9" => "<F9>",
"k;" => "<F10>",
"F1" => "<F11>",
"F2" => "<F12>",
"F3" => "<F13>",
"F4" => "<F14>",
"F5" => "<F15>",
"F6" => "<F16>",
"F7" => "<F17>",
"F8" => "<F18>",
"F9" => "<F19>",
"%1" => "<Help>",
"&8" => "<Undo>",
"#2" => "<S-Home>",
"*7" => "<S-End>",
"K1" => "<kHome>",
"K4" => "<kEnd>",
"K3" => "<kPageUp>",
"K5" => "<kPageDown>",
"K6" => "<kPlus>",
"K7" => "<kMinus>",
"K8" => "<kDivide>",
"K9" => "<kMultiply>",
"KA" => "<kEnter>",
"KB" => "<kPoint>",
"KC" => "<k0>",
"KD" => "<k1>",
"KE" => "<k2>",
"KF" => "<k3>",
"KG" => "<k4>",
"KH" => "<k5>",
"KI" => "<k6>",
"KJ" => "<k7>",
"KK" => "<k8>",
"KL" => "<k9>",
"kP" => "<PageUp>",
"kN" => "<PageDown>",
"kh" => "<Home>",
"@7" => "<End>",
"kI" => "<Insert>",
"kD" => "<Del>",
"kb" => "<BS>",
"ku" => "<Up>",
"kd" => "<Down>",
"kl" => "<Left>",
"kr" => "<Right>",
"#4" => "<S-Left>",
"%i" => "<S-Right>",
"kB" => "<S-Tab>",
"\xffX" => "<C-@>",
# This is how you escape literal 0x80
"\xfeX" => "<0x80>",
# These rarely-used modifiers should be combined with the next
# stroke (like <S-Space>), but let's put them here for now
"\xfc\x02" => "<S->",
"\xfc\x04" => "<C->",
"\xfc\x06" => "<C-S->",
"\xfc\x08" => "<A->",
"\xfc\x0a" => "<A-S->",
"\xfc\x0c" => "<C-A>",
"\xfc\x0e" => "<C-A-S->",
"\xfc\x10" => "<M->",
"\xfc\x12" => "<M-S->",
"\xfc\x14" => "<M-C->",
"\xfc\x16" => "<M-C-S->",
"\xfc\x18" => "<M-A->",
"\xfc\x1a" => "<M-A-S->",
"\xfc\x1c" => "<M-C-A>",
"\xfc\x1e" => "<M-C-A-S->",
# KS_EXTRA keycodes (starting with 0x80 0xfd) are defined by an enum in
# Vim's keymap.h. Sometimes, a new Vim adds or removes a keycode, which
# changes the binary representation of every keycode after it. Very
# annoying.
"\xfd\x4" => "<S-Up>",
"\xfd\x5" => "<S-Down>",
"\xfd\x6" => "<S-F1>",
"\xfd\x7" => "<S-F2>",
"\xfd\x8" => "<S-F3>",
"\xfd\x9" => "<S-F4>",
"\xfd\xa" => "<S-F5>",
"\xfd\xb" => "<S-F6>",
"\xfd\xc" => "<S-F7>",
"\xfd\xd" => "<S-F9>",
"\xfd\xe" => "<S-F10>",
"\xfd\xf" => "<S-F10>",
"\xfd\x10" => "<S-F11>",
"\xfd\x11" => "<S-F12>",
"\xfd\x12" => "<S-F13>",
"\xfd\x13" => "<S-F14>",
"\xfd\x14" => "<S-F15>",
"\xfd\x15" => "<S-F16>",
"\xfd\x16" => "<S-F17>",
"\xfd\x17" => "<S-F18>",
"\xfd\x18" => "<S-F19>",
"\xfd\x19" => "<S-F20>",
"\xfd\x1a" => "<S-F21>",
"\xfd\x1b" => "<S-F22>",
"\xfd\x1c" => "<S-F23>",
"\xfd\x1d" => "<S-F24>",
"\xfd\x1e" => "<S-F25>",
"\xfd\x1f" => "<S-F26>",
"\xfd\x20" => "<S-F27>",
"\xfd\x21" => "<S-F28>",
"\xfd\x22" => "<S-F29>",
"\xfd\x23" => "<S-F30>",
"\xfd\x24" => "<S-F31>",
"\xfd\x25" => "<S-F32>",
"\xfd\x26" => "<S-F33>",
"\xfd\x27" => "<S-F34>",
"\xfd\x28" => "<S-F35>",
"\xfd\x29" => "<S-F36>",
"\xfd\x2a" => "<S-F37>",
"\xfd\x2b" => "<Mouse>",
"\xfd\x2c" => "<LeftMouse>",
"\xfd\x2d" => "<LeftDrag>",
"\xfd\x2e" => "<LeftRelease>",
"\xfd\x2f" => "<MiddleMouse>",
"\xfd\x30" => "<MiddleDrag>",
"\xfd\x31" => "<MiddleRelease>",
"\xfd\x32" => "<RightMouse>",
"\xfd\x33" => "<RightDrag>",
"\xfd\x34" => "<RightRelease>",
"\xfd\x35" => nil, # KE_IGNORE
#"\xfd\x36" => "KE_TAB",
#"\xfd\x37" => "KE_S_TAB_OLD",
# Vim 7.4.1433 removed KE_SNIFF. Unfortunately, this changed the
# offset of every keycode after it. Keycodes after this point should be
# accurate BEFORE that change.
#"\xfd\x38" => "KE_SNIFF",
#"\xfd\x39" => "KE_XF1",
#"\xfd\x3a" => "KE_XF2",
#"\xfd\x3b" => "KE_XF3",
#"\xfd\x3c" => "KE_XF4",
#"\xfd\x3d" => "KE_XEND",
#"\xfd\x3e" => "KE_ZEND",
#"\xfd\x3f" => "KE_XHOME",
#"\xfd\x40" => "KE_ZHOME",
#"\xfd\x41" => "KE_XUP",
#"\xfd\x42" => "KE_XDOWN",
#"\xfd\x43" => "KE_XLEFT",
#"\xfd\x44" => "KE_XRIGHT",
#"\xfd\x45" => "KE_LEFTMOUSE_NM",
#"\xfd\x46" => "KE_LEFTRELEASE_NM",
#"\xfd\x47" => "KE_S_XF1",
#"\xfd\x48" => "KE_S_XF2",
#"\xfd\x49" => "KE_S_XF3",
#"\xfd\x4a" => "KE_S_XF4",
"\xfd\x4b" => "<ScrollWheelUp>",
"\xfd\x4c" => "<ScrollWheelDown>",
# Horizontal scroll wheel support was added in Vim 7.3c. These
# 2 entries shifted the rest of the KS_EXTRA mappings down 2.
# Though Vim 7.2 is rare today, it was common soon after
# vimgolf.com was launched. In cases where the 7.3 code is
# never used but the 7.2 code was common, it makes sense to use
# the 7.2 code. There are conflicts though, so some legacy
# keycodes have to stay wrong.
"\xfd\x4d" => "<ScrollWheelRight>",
"\xfd\x4e" => "<ScrollWheelLeft>",
"\xfd\x4f" => "<kInsert>",
"\xfd\x50" => "<kDel>",
"\xfd\x51" => "<0x9b>", # :help <CSI>
#"\xfd\x52" => "KE_SNR",
#"\xfd\x53" => "KE_PLUG", # never used
"\xfd\x53" => "<C-Left>", # 7.2 compat
#"\xfd\x54" => "KE_CMDWIN", # never used
"\xfd\x54" => "<C-Right>", # 7.2 compat
"\xfd\x55" => "<C-Left>", # 7.2 <C-Home> conflict
"\xfd\x56" => "<C-Right>", # 7.2 <C-End> conflict
"\xfd\x57" => "<C-Home>",
"\xfd\x58" => "<C-End>",
#"\xfd\x59" => "KE_X1MOUSE",
#"\xfd\x5a" => "KE_X1DRAG",
#"\xfd\x5b" => "KE_X1RELEASE",
#"\xfd\x5c" => "KE_X2MOUSE",
#"\xfd\x5d" => "KE_X2DRAG",
#"\xfd\x5e" => "KE_X2RELEASE",
"\xfd\x5e" => nil, # 7.2 compat (I think?)
#"\xfd\x5f" => "KE_DROP",
#"\xfd\x60" => "KE_CURSORHOLD",
# If you use gvim, you'll get an entry in your keylog every time the
# window gains or loses focus. These "keystrokes" should not show and
# should not be counted.
"\xfd\x60" => nil, # 7.2 Focus Gained compat
"\xfd\x61" => nil, # Focus Gained (GVIM) (>7.4.1433)
"\xfd\x62" => nil, # Focus Gained (GVIM)
"\xfd\x63" => nil, # Focus Lost (GVIM)
})
end
end
|
require 'omniauth-oauth2'
module Omniauth
module Strategies
class Socialcast < OmniAuth::Strategies::OAuth2
# Give your strategy a name.
option :name, "socialcast"
# This is where you pass the options you would pass when
# initializing your consumer from the OAuth gem.
option :client_options, {
:site => nil,
:authorize_url => '/oauth2/authorization?response_type=code',
:token_url => '/oauth2/token'
}
uid{ raw_info['user']['id'] }
info do
{
:name => raw_info['user']['name']
:avatar16 => raw_info['user']['avatar']['square16'],
:avatar30 => raw_info['user']['avatar']['square30'],
:avatar45 => raw_info['user']['avatar']['square45'],
:avatar70 => raw_info['user']['avatar']['square70'],
:avatar140 => raw_info['user']['avatar']['square140']
}
end
# extra do
# {
# 'raw_info' => raw_info
# }
# end
def raw_info
@raw_info ||= access_token.get('/api/userinfo.json').parsed
end
end
end
end
Missed comma
require 'omniauth-oauth2'
module Omniauth
module Strategies
class Socialcast < OmniAuth::Strategies::OAuth2
# Give your strategy a name.
option :name, "socialcast"
# This is where you pass the options you would pass when
# initializing your consumer from the OAuth gem.
option :client_options, {
:site => nil,
:authorize_url => '/oauth2/authorization?response_type=code',
:token_url => '/oauth2/token'
}
uid{ raw_info['user']['id'] }
info do
{
:name => raw_info['user']['name'],
:avatar16 => raw_info['user']['avatar']['square16'],
:avatar30 => raw_info['user']['avatar']['square30'],
:avatar45 => raw_info['user']['avatar']['square45'],
:avatar70 => raw_info['user']['avatar']['square70'],
:avatar140 => raw_info['user']['avatar']['square140']
}
end
# extra do
# {
# 'raw_info' => raw_info
# }
# end
def raw_info
@raw_info ||= access_token.get('/api/userinfo.json').parsed
end
end
end
end
|
# encoding: utf-8
module VoshodAvtoImport
# Предварительная обработка выгрузки (распаковка архивов).
# Запуск обработчика. Отправка отчетов.
class Manager
def self.run
new.run
end # self.run
def initialize
end # new
def run
@has_files = false
extract_zip_files
processing
close_logger
yield if @has_files && block_given?
end # run
def log(msg = "")
create_logger unless @logger
@logger.error(msg)
puts msg
msg
end # log
private
def processing
unless ::VoshodAvtoImport::import_dir && ::FileTest.directory?(::VoshodAvtoImport::import_dir)
log "Директория #{::VoshodAvtoImport::import_dir} не существует!"
return
end
files = ::Dir.glob( ::File.join(::VoshodAvtoImport::import_dir, "**", "*.xml") )
return unless files && files.size > 0
@has_files = true
start = Time.now.to_f
# Сортируем по дате последнего доступа по-возрастанию
files.sort{ |a, b| ::File.new(a).mtime <=> ::File.new(b).atime }.each do |xml_file|
::VoshodAvtoImport::Worker.new(xml_file, self).parse
end # each
log "На импорт всех файлов затрачено времени: #{ '%0.3f' % (Time.now.to_f - start) } секунд."
log ""
self
end # processing
def extract_zip_files
# Ищем и распаковываем все zip-архивы, после - удаляем
files = ::Dir.glob( ::File.join(::VoshodAvtoImport::import_dir, "**", "*.zip") )
return unless files && files.size > 0
i = 0
files.each do |zip|
i+= 1
begin
::Zip::ZipFile.open(zip) { |zip_file|
zip_file.each { |f|
# Создаем дополнительную вложенность т.к. 1С 8 выгружает всегда одни и теже
# навания файлов, и если таких выгрузок будет много, то при распковке файлы
# будут перезатираться
f_path = ::File.join(::VoshodAvtoImport::import_dir, "#{i}", f.name)
::FileUtils.rm_rf f_path if ::File.exist?(f_path)
::FileUtils.mkdir_p(::File.dirname(f_path))
zip_file.extract(f, f_path)
} # each
} # open
::FileUtils.rm_rf(zip)
rescue
end
end # Dir.glob
end # extract_zip_files
def create_logger
return unless ::VoshodAvtoImport::log_dir && ::FileTest.directory?(::VoshodAvtoImport::log_dir)
return if @logger
::FileUtils.mkdir_p(::VoshodAvtoImport::log_dir) unless ::FileTest.directory?(::VoshodAvtoImport::log_dir)
log_file = ::File.open(
::File.join(::VoshodAvtoImport::log_dir, "import.log"),
::File::WRONLY | ::File::APPEND | ::File::CREAT
)
log_file.sync = true
@logger = ::Logger.new(log_file, 'weekly')
@logger
end # create_logger
def close_logger
return unless @logger
@logger.close
@logger = nil
end # close_logger
end # Manager
end # VoshodAvtoImport
Мелкая правка.
# encoding: utf-8
module VoshodAvtoImport
# Предварительная обработка выгрузки (распаковка архивов).
# Запуск обработчика. Отправка отчетов.
class Manager
def self.run
new.run
end # self.run
def initialize
end # new
def run
@has_files = false
extract_zip_files
processing
close_logger
yield if @has_files && block_given?
end # run
def log(msg = "")
create_logger unless @logger
@logger.error(msg)
msg
end # log
private
def processing
unless ::VoshodAvtoImport::import_dir && ::FileTest.directory?(::VoshodAvtoImport::import_dir)
log "Директория #{::VoshodAvtoImport::import_dir} не существует!"
return
end
files = ::Dir.glob( ::File.join(::VoshodAvtoImport::import_dir, "**", "*.xml") )
return unless files && files.size > 0
@has_files = true
start = Time.now.to_f
# Сортируем по дате последнего доступа по-возрастанию
files.sort{ |a, b| ::File.new(a).mtime <=> ::File.new(b).atime }.each do |xml_file|
::VoshodAvtoImport::Worker.new(xml_file, self).parse
end # each
log "На импорт всех файлов затрачено времени: #{ '%0.3f' % (Time.now.to_f - start) } секунд."
log ""
self
end # processing
def extract_zip_files
# Ищем и распаковываем все zip-архивы, после - удаляем
files = ::Dir.glob( ::File.join(::VoshodAvtoImport::import_dir, "**", "*.zip") )
return unless files && files.size > 0
i = 0
files.each do |zip|
i+= 1
begin
::Zip::ZipFile.open(zip) { |zip_file|
zip_file.each { |f|
# Создаем дополнительную вложенность т.к. 1С 8 выгружает всегда одни и теже
# навания файлов, и если таких выгрузок будет много, то при распковке файлы
# будут перезатираться
f_path = ::File.join(::VoshodAvtoImport::import_dir, "#{i}", f.name)
::FileUtils.rm_rf f_path if ::File.exist?(f_path)
::FileUtils.mkdir_p(::File.dirname(f_path))
zip_file.extract(f, f_path)
} # each
} # open
::FileUtils.rm_rf(zip)
rescue
end
end # Dir.glob
end # extract_zip_files
def create_logger
return unless ::VoshodAvtoImport::log_dir && ::FileTest.directory?(::VoshodAvtoImport::log_dir)
return if @logger
::FileUtils.mkdir_p(::VoshodAvtoImport::log_dir) unless ::FileTest.directory?(::VoshodAvtoImport::log_dir)
log_file = ::File.open(
::File.join(::VoshodAvtoImport::log_dir, "import.log"),
::File::WRONLY | ::File::APPEND | ::File::CREAT
)
log_file.sync = true
@logger = ::Logger.new(log_file, 'weekly')
@logger
end # create_logger
def close_logger
return unless @logger
@logger.close
@logger = nil
end # close_logger
end # Manager
end # VoshodAvtoImport
|
module OrigenTesters
class VectorPipeline
attr_reader :group_size, :pipeline
# Used to keep track of how many vectors since the last reset of the pipeline (i.e.
# since pattern start). This is used to implement padding if there is a minimum
# vector requirement.
attr_reader :vector_count
attr_reader :cycle_count
def initialize(group_size)
@group_size = group_size
@pipeline = []
# A new pipeline is instantiated per-pattern, so don't need to worry about
# clearing this
@vector_count = 0
@cycle_count = 0
end
def push_comment(comment)
comments << comment
end
def push_microcode(code)
if $tester.v93k? && code =~ /JSUB/
@vector_count += 1
end
comments << code
end
# Add a vector/comment to the pipeline
def <<(vector)
if vector.is_a?(Vector)
level_period(vector) do |vector|
consume_comments(vector)
if vector.repeat > 1
add_repeat_vector(vector)
else
pipeline << vector
end
end
# Keep a persistent record of the last vector so that we know what it
# was after the pipeline has been flushed
@last_vector = pipeline.last
elsif vector.is_a?(Symbol)
case vector
when :align
duplicate_last_vector until aligned?
when :align_last
duplicate_last_vector until aligned_to_last?
else
fail "Uknown vector generator instruction: #{vector}"
end
else
comments << vector
end
end
# If there are complete groups sitting at the top of the pipeline
# then this will yield them back line by line, stopping after the last
# complete group and leaving any remaining single vectors in the pipeline
#
# If there are no complete groups present then it will just return
def flush(&block)
while lead_group_finalized?
lead_group.each do |vector|
vector.comments.each do |comment|
yield comment
end
yield_vector(vector, &block)
end
pipeline.shift(group_size)
end
end
# Call at the end to force a flush out of any remaining vectors
def empty(options = {}, &block)
if !pipeline.empty? || !comments.empty?
if options[:min_vectors]
comment_written = false
while @vector_count < options[:min_vectors] - pipeline.size
unless comment_written
yield "#{$tester.comment_char} PADDING VECTORS ADDED TO MEET MIN #{options[:min_vectors]} FOR PATTERN"
comment_written = true
end
yield_vector(@last_vector, &block)
end
end
duplicate_last_vector until aligned?
pipeline.each do |vector|
vector.comments.each do |comment|
yield comment
end
yield_vector(vector, &block)
end
comments.each do |comment|
yield comment
end
@pipeline = []
@comments = []
end
end
private
def yield_vector(vector, &block)
vector.cycle = @cycle_count
vector.number = @vector_count
r = vector.repeat || 1
if $tester.min_repeat_loop && r < $tester.min_repeat_loop
vector.repeat = 1
if r > 1
vector.comments << '#R' + r.to_s
end
yield vector
(r - 1).times do |index|
vector.comments = ['#R' + (r - 1 - index).to_s]
vector.number += 1
vector.cycle += 1
yield vector
end
@vector_count += r
@cycle_count += r
else
yield vector
@vector_count += 1
@cycle_count += r
end
end
def level_period(vector)
if $tester.level_period?
vector.convert_to_timeset($tester.min_period_timeset) do |vector|
yield vector
end
else
yield vector
end
end
# Pushes a duplicate of the given vector with its repeat set to 1
#
# Also clears any comments associated with the vector with the rationale that we only
# want to see them the first time.
#
# Any microcode is cleared with the rationale that the caller is responsible for aligning
# this to the correct vector if required.
def push_duplicate(vector, options = {})
v = vector.dup
v.microcode = nil
v.repeat = 1
pipeline << v
if options[:existing_vector]
v.comments = []
else
vector.comments = []
end
end
def duplicate_last_vector
v = @last_vector.dup
v.comments = []
v.timeset = $tester.timeset
v.repeat = 1
v.microcode = nil
pipeline << v
end
def add_repeat_vector(vector)
count = vector.repeat
# Align to the start of a new group by splitting off single vectors
# to complete the current group
while !aligned? && count > 0
push_duplicate(vector)
count -= 1
end
if count > group_size
remainder = count % group_size
# Create a group with the required repeat
group_size.times do
push_duplicate(vector)
end
pipeline.last.repeat = (count - remainder) / group_size
# Then expand out any leftover
remainder.times do
push_duplicate(vector)
end
# For small repeats that fit within the group just expand them
else
while count > 0
push_duplicate(vector)
count -= 1
end
end
end
# Returns true of the next vector to be added to the pipeline will
# be at the start of a new group
def aligned?
(pipeline.size % group_size) == 0
end
# Returns true if the next vector to be added to the pipeline will
# complete the current group
def aligned_to_last?
(pipeline.size % group_size) == (group_size - 1)
end
def consume_comments(vector)
vector.comments = comments
@comments = []
end
def comments
@comments ||= []
end
# When true the lead group is complete and a further repeat of it is not possible
# Calling this will compress the 2nd group into the 1st if possible
def lead_group_finalized?
if first_group_present? && second_group_present?
if second_group_is_duplicate_of_first_group? && first_group_repeat != $tester.max_repeat_loop &&
first_group_can_be_compressed?
# Consume the second group by incrementing the first group repeat counter
self.first_group_repeat = first_group_repeat + second_group_repeat
# Delete the second group
group_size.times { pipeline.delete_at(group_size) }
# Now deal with any overflow of the first group repeat counter
if first_group_repeat > $tester.max_repeat_loop
r = first_group_repeat - $tester.max_repeat_loop
self.first_group_repeat = $tester.max_repeat_loop
group_size.times { |i| push_duplicate(pipeline[i], existing_vector: true) }
self.second_group_repeat = r
true
elsif first_group_repeat == $tester.max_repeat_loop
true
else
false
end
else
# Second group has started and is already different from the first group
true
end
end
end
def first_group_repeat
# This is currently hardcoded to the Teradyne concept of the repeat being applied
# to the last vector in the group. May need an abstraction here if other ATEs don't
# adhere to that approach.
first_group.last.repeat || 1
end
def first_group_repeat=(val)
first_group.last.repeat = val
end
def second_group_repeat
second_group.last.repeat || 1
end
def second_group_repeat=(val)
second_group.last.repeat = val
end
def first_group_can_be_compressed?
first_group.all? do |vector|
!vector.dont_compress
end
end
def second_group_is_duplicate_of_first_group?
i = -1
second_group.all? do |vector|
i += 1
(pipeline[i] == vector) && (vector.comments.size == 0) &&
# Don't consider vectors with matching microcode duplicates, caller is
# responsible for laying out microcode with the correct alignment
!pipeline[i].has_microcode? && !vector.has_microcode? &&
!vector.dont_compress
end
end
def first_group_present?
lead_group.size == group_size
end
def second_group_present?
second_group.size == group_size
end
def lead_group
pipeline[0..group_size - 1]
end
alias_method :first_group, :lead_group
def second_group
pipeline[group_size..(group_size * 2) - 1]
end
end
end
correct verbose inline vector comments with group_size > 1
module OrigenTesters
class VectorPipeline
attr_reader :group_size, :pipeline
# Used to keep track of how many vectors since the last reset of the pipeline (i.e.
# since pattern start). This is used to implement padding if there is a minimum
# vector requirement.
attr_reader :vector_count
attr_reader :cycle_count
def initialize(group_size)
@group_size = group_size
@pipeline = []
# A new pipeline is instantiated per-pattern, so don't need to worry about
# clearing this
@vector_count = 0
@cycle_count = 0
end
def push_comment(comment)
comments << comment
end
def push_microcode(code)
if $tester.v93k? && code =~ /JSUB/
@vector_count += 1
end
comments << code
end
# Add a vector/comment to the pipeline
def <<(vector)
if vector.is_a?(Vector)
level_period(vector) do |vector|
consume_comments(vector)
if vector.repeat > 1
add_repeat_vector(vector)
else
pipeline << vector
end
end
# Keep a persistent record of the last vector so that we know what it
# was after the pipeline has been flushed
@last_vector = pipeline.last
elsif vector.is_a?(Symbol)
case vector
when :align
duplicate_last_vector until aligned?
when :align_last
duplicate_last_vector until aligned_to_last?
else
fail "Uknown vector generator instruction: #{vector}"
end
else
comments << vector
end
end
# If there are complete groups sitting at the top of the pipeline
# then this will yield them back line by line, stopping after the last
# complete group and leaving any remaining single vectors in the pipeline
#
# If there are no complete groups present then it will just return
def flush(&block)
while lead_group_finalized?
lead_group.each do |vector|
vector.comments.each do |comment|
yield comment
end
yield_vector(vector, &block)
end
pipeline.shift(group_size)
end
end
# Call at the end to force a flush out of any remaining vectors
def empty(options = {}, &block)
if !pipeline.empty? || !comments.empty?
if options[:min_vectors]
comment_written = false
while @vector_count < options[:min_vectors] - pipeline.size
unless comment_written
yield "#{$tester.comment_char} PADDING VECTORS ADDED TO MEET MIN #{options[:min_vectors]} FOR PATTERN"
comment_written = true
end
yield_vector(@last_vector, &block)
end
end
duplicate_last_vector until aligned?
pipeline.each do |vector|
vector.comments.each do |comment|
yield comment
end
yield_vector(vector, &block)
end
comments.each do |comment|
yield comment
end
@pipeline = []
@comments = []
end
end
private
def yield_vector(vector, &block)
vector.cycle = @cycle_count
vector.number = @vector_count
r = vector.repeat || 1
if $tester.min_repeat_loop && r < $tester.min_repeat_loop
vector.repeat = 1
if r > 1
vector.comments << '#R' + r.to_s
end
yield vector
(r - 1).times do |index|
vector.comments = ['#R' + (r - 1 - index).to_s]
vector.number += 1
vector.cycle += 1
yield vector
end
@vector_count += r
@cycle_count += r
else
yield vector
@vector_count += 1
@cycle_count += r * @group_size
end
end
def level_period(vector)
if $tester.level_period?
vector.convert_to_timeset($tester.min_period_timeset) do |vector|
yield vector
end
else
yield vector
end
end
# Pushes a duplicate of the given vector with its repeat set to 1
#
# Also clears any comments associated with the vector with the rationale that we only
# want to see them the first time.
#
# Any microcode is cleared with the rationale that the caller is responsible for aligning
# this to the correct vector if required.
def push_duplicate(vector, options = {})
v = vector.dup
v.microcode = nil
v.repeat = 1
pipeline << v
if options[:existing_vector]
v.comments = []
else
vector.comments = []
end
end
def duplicate_last_vector
v = @last_vector.dup
v.comments = []
v.timeset = $tester.timeset
v.repeat = 1
v.microcode = nil
pipeline << v
end
def add_repeat_vector(vector)
count = vector.repeat
# Align to the start of a new group by splitting off single vectors
# to complete the current group
while !aligned? && count > 0
push_duplicate(vector)
count -= 1
end
if count > group_size
remainder = count % group_size
# Create a group with the required repeat
group_size.times do
push_duplicate(vector)
end
pipeline.last.repeat = (count - remainder) / group_size
# Then expand out any leftover
remainder.times do
push_duplicate(vector)
end
# For small repeats that fit within the group just expand them
else
while count > 0
push_duplicate(vector)
count -= 1
end
end
end
# Returns true of the next vector to be added to the pipeline will
# be at the start of a new group
def aligned?
(pipeline.size % group_size) == 0
end
# Returns true if the next vector to be added to the pipeline will
# complete the current group
def aligned_to_last?
(pipeline.size % group_size) == (group_size - 1)
end
def consume_comments(vector)
vector.comments = comments
@comments = []
end
def comments
@comments ||= []
end
# When true the lead group is complete and a further repeat of it is not possible
# Calling this will compress the 2nd group into the 1st if possible
def lead_group_finalized?
if first_group_present? && second_group_present?
if second_group_is_duplicate_of_first_group? && first_group_repeat != $tester.max_repeat_loop &&
first_group_can_be_compressed?
# Consume the second group by incrementing the first group repeat counter
self.first_group_repeat = first_group_repeat + second_group_repeat
# Delete the second group
group_size.times { pipeline.delete_at(group_size) }
# Now deal with any overflow of the first group repeat counter
if first_group_repeat > $tester.max_repeat_loop
r = first_group_repeat - $tester.max_repeat_loop
self.first_group_repeat = $tester.max_repeat_loop
group_size.times { |i| push_duplicate(pipeline[i], existing_vector: true) }
self.second_group_repeat = r
true
elsif first_group_repeat == $tester.max_repeat_loop
true
else
false
end
else
# Second group has started and is already different from the first group
true
end
end
end
def first_group_repeat
# This is currently hardcoded to the Teradyne concept of the repeat being applied
# to the last vector in the group. May need an abstraction here if other ATEs don't
# adhere to that approach.
first_group.last.repeat || 1
end
def first_group_repeat=(val)
first_group.last.repeat = val
end
def second_group_repeat
second_group.last.repeat || 1
end
def second_group_repeat=(val)
second_group.last.repeat = val
end
def first_group_can_be_compressed?
first_group.all? do |vector|
!vector.dont_compress
end
end
def second_group_is_duplicate_of_first_group?
i = -1
second_group.all? do |vector|
i += 1
(pipeline[i] == vector) && (vector.comments.size == 0) &&
# Don't consider vectors with matching microcode duplicates, caller is
# responsible for laying out microcode with the correct alignment
!pipeline[i].has_microcode? && !vector.has_microcode? &&
!vector.dont_compress
end
end
def first_group_present?
lead_group.size == group_size
end
def second_group_present?
second_group.size == group_size
end
def lead_group
pipeline[0..group_size - 1]
end
alias_method :first_group, :lead_group
def second_group
pipeline[group_size..(group_size * 2) - 1]
end
end
end
|
class WebPage < ActiveRecord::Base
# self.per_page = 50
# Nicer fetching by url name
extend FriendlyId
friendly_id :path, use: :scoped, scope: :web_site_id
# Nokogiri
require 'nokogiri'
# File storage for HTML page
include Paperclip::Glue
has_attached_file :html_page,
path: "system/web_pages/:attachment/:id_partition/:style/:filename",
styles: {original: {format: :html, processors: [:save_html]}}
has_attached_file :screenshot,
path: "system/web_pages/:attachment/:id_partition/:style.:extension",
styles: {thumbnail: "", pixel: ["1x1#", :png]},
convert_options: {thumbnail: "-gravity north -thumbnail 300x300^ -extent 300x300"}
# TODO : REPROCESS UP TO id <= 27000
# --- Associations ----------------------------------------------------------
belongs_to :web_site
has_one :color_palette
serialize :headers, Hash
# --- Validations -----------------------------------------------------------
validates :url, presence: true, format: {with: /\Ahttp/i}
# --- Scopes ----------------------------------------------------------------
scope :available, where(available: true)
# --- Methods ---------------------------------------------------------------
def filename
uri = Addressable::URI.parse(self.url)
f = File.basename(uri.path)
(f.blank? ? 'index' : f)
end
# --- HTML scrape methods ---
def scraped?; !!self.available? && !self.html_page_file_size.blank?; end
def rescrape?; !!self.available? && self.html_page_file_size.blank?; end
def rescrape!
begin
status = Timeout::timeout(15) do # 15 seconds
io = open(self.url, read_timeout: 15, "User-Agent" => CRAWLER_USER_AGENT, allow_redirections: :all)
io.class_eval { attr_accessor :original_filename }
io.original_filename = [File.basename(self.filename), "html"].join('.')
self.html_page = io
raise "Invalid content-type" unless io.content_type.match(/text\/html/i)
# Additional information
self.headers = io.meta.to_hash
self.base_uri = io.base_uri.to_s # redirect?
self.last_modified_at = io.last_modified
self.charset = io.charset
self.page_status = io.status[0]
self.available = true
end
rescue OpenURI::HTTPError => err
_debug("Fetch Page Error (OpenURI): #{err}", 1, self)
self.html_page = nil
self.html_page_updated_at = Time.now
self.page_status = err.io.status[0]
self.available = false
rescue Timeout::Error => err
_debug("Fetch Page Error (Timeout): #{err}", 1, self)
rescue => err
_debug("Fetch Page Error (Error): #{err}", 1, self)
# Do save the record
ensure
self.save
end
end
# --- HTML Parse Methods ---
def parse!
# page = Nokogiri::HTML(Paperclip.io_adapters.for(self.html_page).read)
_debug(self.html_page.url(:original), 2, self)
page = Nokogiri::HTML(open(self.html_page.url(:original), read_timeout: 15, "User-Agent" => CRAWLER_USER_AGENT).read)
self.title = page.css('title').to_s
self.meta_tags = page.css('meta').map{|m| t = {}; m.attributes.each{|k,v| t[k] = v.to_s}; t }
self.save
follow = page.css('meta[name="robots"]')[0].attributes['content'].to_s rescue 'index,follow'
page.css('a[href]').each{|h| PageQueue::add(h.attributes['href']) } unless follow.match(/nofollow/i)
end
# --- Screenshot Color Palette ---
def process_color_palette!
return false if self.screenshot_file_size.blank? || self.screenshot_file_size < 1
color_palette = self.color_palette rescue nil
color_palette ||= self.build_color_palette
Timeout::timeout(60) do # 60 seconds
img = Magick::ImageList.new
_debug(self.screenshot.url(:original), 1, [self])
img.from_blob(open(self.screenshot.url(:original), read_timeout: 5, "User-Agent" => CRAWLER_USER_AGENT).read)
img.delete_profile('*')
# primary = img.pixel_color(0,0)
palette = img.quantize(10).color_histogram.sort{|a,b| b.last <=> a.last}
primary = palette[0][0]
color_palette.assign_attributes({
dominant_color: [rgb(primary.red), rgb(primary.green), rgb(primary.blue)],
dominant_color_red: rgb(primary.red),
dominant_color_green: rgb(primary.blue),
dominant_color_blue: rgb(primary.green),
color_palette: palette.map{|p,c,r| [rgb(p.red), rgb(p.green), rgb(p.blue)]}
})
color_palette.save
end
end
# --- Screenshot Color Palette ---
def process_pixel_color!
return false if self.screenshot_file_size.blank? || self.screenshot_file_size < 1
color_palette = self.color_palette rescue nil
color_palette ||= self.build_color_palette
Timeout::timeout(20) do # 20 seconds
img = Magick::ImageList.new
_debug(self.screenshot.url(:pixel), 1, [self])
img.from_blob(open(self.screenshot.url(:pixel), read_timeout: 5, "User-Agent" => CRAWLER_USER_AGENT).read)
img.delete_profile('*')
primary = img.pixel_color(0,0)
color_palette.assign_attributes({
pixel_color: [rgb(primary.red), rgb(primary.green), rgb(primary.blue)],
pixel_color_red: rgb(primary.red),
pixel_color_green: rgb(primary.blue),
pixel_color_blue: rgb(primary.green)
})
color_palette.save
end
end
protected
def rgb(i=0)
(@q18 || i > 255 ? ((255*i)/65535) : i).round
end
end
repixel rescue
class WebPage < ActiveRecord::Base
# self.per_page = 50
# Nicer fetching by url name
extend FriendlyId
friendly_id :path, use: :scoped, scope: :web_site_id
# Nokogiri
require 'nokogiri'
# File storage for HTML page
include Paperclip::Glue
has_attached_file :html_page,
path: "system/web_pages/:attachment/:id_partition/:style/:filename",
styles: {original: {format: :html, processors: [:save_html]}}
has_attached_file :screenshot,
path: "system/web_pages/:attachment/:id_partition/:style.:extension",
styles: {thumbnail: "", pixel: ["1x1#", :png]},
convert_options: {thumbnail: "-gravity north -thumbnail 300x300^ -extent 300x300"}
# TODO : REPROCESS UP TO id <= 27000
# --- Associations ----------------------------------------------------------
belongs_to :web_site
has_one :color_palette
serialize :headers, Hash
# --- Validations -----------------------------------------------------------
validates :url, presence: true, format: {with: /\Ahttp/i}
# --- Scopes ----------------------------------------------------------------
scope :available, where(available: true)
# --- Methods ---------------------------------------------------------------
def filename
uri = Addressable::URI.parse(self.url)
f = File.basename(uri.path)
(f.blank? ? 'index' : f)
end
# --- HTML scrape methods ---
def scraped?; !!self.available? && !self.html_page_file_size.blank?; end
def rescrape?; !!self.available? && self.html_page_file_size.blank?; end
def rescrape!
begin
Timeout::timeout(15) do # 15 seconds
io = open(self.url, read_timeout: 15, "User-Agent" => CRAWLER_USER_AGENT, allow_redirections: :all)
io.class_eval { attr_accessor :original_filename }
io.original_filename = [File.basename(self.filename), "html"].join('.')
self.html_page = io
raise "Invalid content-type" unless io.content_type.match(/text\/html/i)
# Additional information
self.headers = io.meta.to_hash
self.base_uri = io.base_uri.to_s # redirect?
self.last_modified_at = io.last_modified
self.charset = io.charset
self.page_status = io.status[0]
self.available = true
end
rescue OpenURI::HTTPError => err
_debug("Fetch Page Error (OpenURI): #{err}", 1, self)
self.html_page = nil
self.html_page_updated_at = Time.now
self.page_status = err.io.status[0]
self.available = false
rescue Timeout::Error => err
_debug("Fetch Page Error (Timeout): #{err}", 1, self)
rescue => err
_debug("Fetch Page Error (Error): #{err}", 1, self)
# Do save the record
ensure
self.save
end
end
# --- HTML Parse Methods ---
def parse!
# page = Nokogiri::HTML(Paperclip.io_adapters.for(self.html_page).read)
_debug(self.html_page.url(:original), 2, self)
page = Nokogiri::HTML(open(self.html_page.url(:original), read_timeout: 15, "User-Agent" => CRAWLER_USER_AGENT).read)
self.title = page.css('title').to_s
self.meta_tags = page.css('meta').map{|m| t = {}; m.attributes.each{|k,v| t[k] = v.to_s}; t }
self.save
follow = page.css('meta[name="robots"]')[0].attributes['content'].to_s rescue 'index,follow'
page.css('a[href]').each{|h| PageQueue::add(h.attributes['href']) } unless follow.match(/nofollow/i)
end
# --- Screenshot Color Palette ---
def process_color_palette!
return false if self.screenshot_file_size.blank? || self.screenshot_file_size < 1
color_palette = self.color_palette rescue nil
color_palette ||= self.build_color_palette
begin
Timeout::timeout(60) do # 60 seconds
img = Magick::ImageList.new
_debug(self.screenshot.url(:original), 1, [self])
img.from_blob(open(self.screenshot.url(:original), read_timeout: 5, "User-Agent" => CRAWLER_USER_AGENT).read)
img.delete_profile('*')
# primary = img.pixel_color(0,0)
palette = img.quantize(10).color_histogram.sort{|a,b| b.last <=> a.last}
primary = palette[0][0]
color_palette.assign_attributes({
dominant_color: [rgb(primary.red), rgb(primary.green), rgb(primary.blue)],
dominant_color_red: rgb(primary.red),
dominant_color_green: rgb(primary.blue),
dominant_color_blue: rgb(primary.green),
color_palette: palette.map{|p,c,r| [rgb(p.red), rgb(p.green), rgb(p.blue)]}
})
color_palette.save
end
rescue OpenURI::HTTPError => err
_debug("Fetch Palette Error (OpenURI): #{err}", 1, self)
false
rescue Timeout::Error => err
_debug("Fetch Palette Error (Timeout): #{err}", 1, self)
false
rescue => err
_debug("Fetch Palette Error (Error): #{err}", 1, self)
false
end
end
# --- Screenshot Color Palette ---
def process_pixel_color!
return false if self.screenshot_file_size.blank? || self.screenshot_file_size < 1
color_palette = self.color_palette rescue nil
color_palette ||= self.build_color_palette
begin
Timeout::timeout(20) do # 20 seconds
img = Magick::ImageList.new
_debug(self.screenshot.url(:pixel), 1, [self])
img.from_blob(open(self.screenshot.url(:pixel), read_timeout: 5, "User-Agent" => CRAWLER_USER_AGENT).read)
img.delete_profile('*')
primary = img.pixel_color(0,0)
color_palette.assign_attributes({
pixel_color: [rgb(primary.red), rgb(primary.green), rgb(primary.blue)],
pixel_color_red: rgb(primary.red),
pixel_color_green: rgb(primary.blue),
pixel_color_blue: rgb(primary.green)
})
color_palette.save
end
rescue OpenURI::HTTPError => err
_debug("Fetch Pixel Error (OpenURI): #{err}", 1, self)
false
rescue Timeout::Error => err
_debug("Fetch Pixel Error (Timeout): #{err}", 1, self)
false
rescue => err
_debug("Fetch Pixel Error (Error): #{err}", 1, self)
false
end
end
protected
def rgb(i=0)
(@q18 || i > 255 ? ((255*i)/65535) : i).round
end
end |
module Wired
class AppBuilder < Rails::AppBuilder
include Wired::Actions
def readme
template 'README.md.erb', 'README.md'
end
def remove_doc_folder
remove_dir 'doc'
end
def remove_public_index
remove_file 'public/index.html'
end
def remove_rails_logo_image
remove_file 'app/assets/images/rails.png'
end
def remove_turbo_links
replace_in_file "app/assets/javascripts/application.js", /\/\/= require turbolinks\n/, ''
end
def replace_gemfile
remove_file 'Gemfile'
copy_file 'Gemfile_clean', 'Gemfile'
end
def set_ruby_to_version_being_used
inject_into_file 'Gemfile', "\n\nruby '#{RUBY_VERSION}'",
after: /source 'https:\/\/rubygems.org'/
end
def setup_database_config
template 'database.yml.erb', 'config/database.yml', :force => true
end
def create_database
bundle_command 'exec rake db:create'
end
def add_postgres_drop_override
copy_file 'database.rake', 'lib/tasks/database.rake'
end
def create_partials_directory
empty_directory 'app/views/application'
end
def create_shared_flashes
copy_file '_flashes.html.erb',
'app/views/application/_flashes.html.erb'
end
def create_shared_analytics
copy_file '_analytics.html.erb',
'app/views/application/_analytics.html.erb'
end
def create_application_layout
template 'layout.html.erb.erb',
'app/views/layouts/application.html.erb',
:force => true
end
def remove_public_robots
remove_file 'public/robots.txt'
end
def create_robots_txt
say 'Copying robots.txt'
copy_file 'robots/allow.txt.erb', 'app/views/robots/allow.txt.erb'
copy_file 'robots/disallow.txt.erb', 'app/views/robots/disallow.txt.erb'
copy_file 'robots/robots_controller.rb', 'app/controllers/robots_controller.rb'
end
def add_robots_routes
robots_routes =<<-ROUTES
get 'robots.txt' => 'robots#index'
ROUTES
inject_into_file "config/routes.rb", robots_routes, :before => "end"
end
def configure_time_zone
config = <<-RUBY
config.time_zone = 'Amsterdam'
RUBY
inject_into_class 'config/application.rb', 'Application', config
end
def set_asset_host
config = <<-RUBY
config.action_controller.asset_host = ENV["ASSET_HOST"]
RUBY
inject_into_file 'config/environments/production.rb', config, :after => "config.action_controller.asset_host = \"http://assets.example.com\"\n"
end
def set_action_mailer_config
config = <<-RUBY
config.action_mailer.delivery_method = :letter_opener
config.action_mailer.default_url_options = { host: '#{app_powder_name}.dev' }
config.action_mailer.asset_host = 'http://#{app_powder_name}.dev'
RUBY
inject_into_file 'config/environments/development.rb', config, before: "end\n"
config = <<-RUBY
config.action_mailer.default_url_options = { host: ENV["MAILER_HOST"] }
config.action_mailer.asset_host = ENV["ASSET_HOST"]
RUBY
inject_into_file 'config/environments/production.rb', config, before: "end\n"
end
def customize_error_pages
meta_tags =<<-EOS
<meta charset='utf-8' />
EOS
%w(500 404 422).each do |page|
inject_into_file "public/#{page}.html", meta_tags, :after => "<head>\n"
replace_in_file "public/#{page}.html", /<!--.+-->\n/, ''
end
end
def remove_routes_comment_lines
replace_in_file 'config/routes.rb',
/Application\.routes\.draw do.*end/m,
"Application.routes.draw do\nend"
end
def gitignore_files
concat_file 'wired_gitignore', '.gitignore'
[
'app/models',
'app/assets/images',
'app/views/pages',
'db/migrate',
'log',
'spec/support',
'spec/lib',
'spec/models',
'spec/views',
'spec/controllers',
'spec/helpers',
'spec/support/matchers',
'spec/support/mixins',
'spec/support/shared_examples'
].each do |dir|
empty_directory_with_keep_file dir
end
end
def test_configuration_files
copy_file 'spec/spec_helper.rb', 'spec/spec_helper.rb'
copy_file 'spec/simplecov', '.simplecov'
copy_file 'spec/travis.yml', 'travis.yml'
copy_file 'spec/rspec', '.rspec'
end
def setup_git
run 'git init'
run "git add ."
run "git commit -m 'initial commit'"
run "git checkout -b develop"
end
def deploy_github
github_result = run "hub create -p wirelab/#{app_name_clean}"
if github_result
puts "Github repo wirelab/#{app_name_clean} created."
else
puts "Github creation wirelab/#{app_name_clean} failed."
puts "Wired generation halted due to error."
puts "You might want to remove the created Rails app and retry."
exit
end
run "git push --all"
end
def powder_setup
run 'powder link'
end
def create_heroku_apps
%w(staging acceptance production).each do |env|
heroku_name = (env == "production") ? app_name_clean : "#{app_name_clean}-#{env}"
heroku_result = run "heroku create #{heroku_name} --remote=#{env} --region eu"
if heroku_result
puts "Heroku app #{heroku_name} created."
else
puts "Heroku app #{heroku_name} failed."
puts "Wired generation halted due to error."
puts "You might want to remove the GitHub repo and previously created heroku apps and retry."
exit
end
if env == 'production'
%w(papertrail pgbackups newrelic memcachier).each do |addon|
run "heroku addons:add #{addon} --remote=#{env}"
end
run "heroku config:add DISALLOW_SEARCH=false --remote=#{env}"
else
run "heroku config:add DISALLOW_SEARCH=true --remote=#{env}"
end
end
end
end
end
fix route comment cleaning
module Wired
class AppBuilder < Rails::AppBuilder
include Wired::Actions
def readme
template 'README.md.erb', 'README.md'
end
def remove_doc_folder
remove_dir 'doc'
end
def remove_public_index
remove_file 'public/index.html'
end
def remove_rails_logo_image
remove_file 'app/assets/images/rails.png'
end
def remove_turbo_links
replace_in_file "app/assets/javascripts/application.js", /\/\/= require turbolinks\n/, ''
end
def replace_gemfile
remove_file 'Gemfile'
copy_file 'Gemfile_clean', 'Gemfile'
end
def set_ruby_to_version_being_used
inject_into_file 'Gemfile', "\n\nruby '#{RUBY_VERSION}'",
after: /source 'https:\/\/rubygems.org'/
end
def setup_database_config
template 'database.yml.erb', 'config/database.yml', :force => true
end
def create_database
bundle_command 'exec rake db:create'
end
def add_postgres_drop_override
copy_file 'database.rake', 'lib/tasks/database.rake'
end
def create_partials_directory
empty_directory 'app/views/application'
end
def create_shared_flashes
copy_file '_flashes.html.erb',
'app/views/application/_flashes.html.erb'
end
def create_shared_analytics
copy_file '_analytics.html.erb',
'app/views/application/_analytics.html.erb'
end
def create_application_layout
template 'layout.html.erb.erb',
'app/views/layouts/application.html.erb',
:force => true
end
def remove_public_robots
remove_file 'public/robots.txt'
end
def create_robots_txt
say 'Copying robots.txt'
copy_file 'robots/allow.txt.erb', 'app/views/robots/allow.txt.erb'
copy_file 'robots/disallow.txt.erb', 'app/views/robots/disallow.txt.erb'
copy_file 'robots/robots_controller.rb', 'app/controllers/robots_controller.rb'
end
def add_robots_routes
robots_routes =<<-ROUTES
get 'robots.txt' => 'robots#index'
ROUTES
inject_into_file "config/routes.rb", robots_routes, :before => "end"
end
def configure_time_zone
config = <<-RUBY
config.time_zone = 'Amsterdam'
RUBY
inject_into_class 'config/application.rb', 'Application', config
end
def set_asset_host
config = <<-RUBY
config.action_controller.asset_host = ENV["ASSET_HOST"]
RUBY
inject_into_file 'config/environments/production.rb', config, :after => "config.action_controller.asset_host = \"http://assets.example.com\"\n"
end
def set_action_mailer_config
config = <<-RUBY
config.action_mailer.delivery_method = :letter_opener
config.action_mailer.default_url_options = { host: '#{app_powder_name}.dev' }
config.action_mailer.asset_host = 'http://#{app_powder_name}.dev'
RUBY
inject_into_file 'config/environments/development.rb', config, before: "end\n"
config = <<-RUBY
config.action_mailer.default_url_options = { host: ENV["MAILER_HOST"] }
config.action_mailer.asset_host = ENV["ASSET_HOST"]
RUBY
inject_into_file 'config/environments/production.rb', config, before: "end\n"
end
def customize_error_pages
meta_tags =<<-EOS
<meta charset='utf-8' />
EOS
%w(500 404 422).each do |page|
inject_into_file "public/#{page}.html", meta_tags, :after => "<head>\n"
replace_in_file "public/#{page}.html", /<!--.+-->\n/, ''
end
end
def remove_routes_comment_lines
replace_in_file 'config/routes.rb',
/Rails\.application\.routes\.draw do.*end/m,
"Rails.application.routes.draw do\nend"
end
def gitignore_files
concat_file 'wired_gitignore', '.gitignore'
[
'app/models',
'app/assets/images',
'app/views/pages',
'db/migrate',
'log',
'spec/support',
'spec/lib',
'spec/models',
'spec/views',
'spec/controllers',
'spec/helpers',
'spec/support/matchers',
'spec/support/mixins',
'spec/support/shared_examples'
].each do |dir|
empty_directory_with_keep_file dir
end
end
def test_configuration_files
copy_file 'spec/spec_helper.rb', 'spec/spec_helper.rb'
copy_file 'spec/simplecov', '.simplecov'
copy_file 'spec/travis.yml', 'travis.yml'
copy_file 'spec/rspec', '.rspec'
end
def setup_git
run 'git init'
run "git add ."
run "git commit -m 'initial commit'"
run "git checkout -b develop"
end
def deploy_github
github_result = run "hub create -p wirelab/#{app_name_clean}"
if github_result
puts "Github repo wirelab/#{app_name_clean} created."
else
puts "Github creation wirelab/#{app_name_clean} failed."
puts "Wired generation halted due to error."
puts "You might want to remove the created Rails app and retry."
exit
end
run "git push --all"
end
def powder_setup
run 'powder link'
end
def create_heroku_apps
%w(staging acceptance production).each do |env|
heroku_name = (env == "production") ? app_name_clean : "#{app_name_clean}-#{env}"
heroku_result = run "heroku create #{heroku_name} --remote=#{env} --region eu"
if heroku_result
puts "Heroku app #{heroku_name} created."
else
puts "Heroku app #{heroku_name} failed."
puts "Wired generation halted due to error."
puts "You might want to remove the GitHub repo and previously created heroku apps and retry."
exit
end
if env == 'production'
%w(papertrail pgbackups newrelic memcachier).each do |addon|
run "heroku addons:add #{addon} --remote=#{env}"
end
run "heroku config:add DISALLOW_SEARCH=false --remote=#{env}"
else
run "heroku config:add DISALLOW_SEARCH=true --remote=#{env}"
end
end
end
end
end
|
module Workerholic
class StatsAPI
CATEGORIES = %w(completed_jobs failed_jobs)
def self.job_statistics(options={})
if CATEGORIES.include? options[:category]
job_classes = storage.get_keys_for_namespace('workerholic:stats:' + options[:category] + ':*')
if options[:count_only]
self.parse_job_classes(job_classes)
else
self.parse_job_classes(job_classes, false)
end
else
logger("Invalid arguments. Please specify one of the following categories:\n'completed_jobs', 'failed_jobs'.")
end
end
def self.scheduled_jobs(options={})
namespace = 'workerholic:scheduled_jobs'
if options[:count_only]
storage.sorted_set_members_count(namespace)
else
serialized_jobs = storage.sorted_set_members(namespace)
parse_scheduled_jobs(serialized_jobs)
end
end
def self.jobs_classes
classes = storage.get_keys_for_namespace('workerholic:stats:*')
parsed_classes = classes.map do |klass|
klass.split(':').last
end.uniq
parsed_classes.empty? ? 'No class data is available yet.' : parsed_classes
end
def self.queued_jobs
fetched_queues = storage.fetch_queue_names
parsed_queues = fetched_queues.map do |queue|
[queue, storage.list_length(queue)]
end
parsed_queues
end
def self.process_stats
namespace = 'workerholic:stats:memory:processes'
storage.hash_get_all(namespace)
end
def self.active_proccesses
namespace = 'workerholic:stats:memory:processes'
storage.hash_keys(namespace)
end
def self.history_for_period(category, period = 30)
namespace = "workerholic:stats:historical:#{category}"
start_time = self.convert_to_time_ago(period)
end_time = Time.now.to_i
# with scores
jobs_range = storage.members_in_range(namespace, start_time, end_time)
jobs_range.map do |range|
jobs_count, time_int = range
[jobs_count, self.time_to_date(time_int)]
end
end
def self.history_for_class(category, klass, period = 30)
namespace = "workerholic:stats:historical:#{category}:#{klass}"
start_time = self.convert_to_time_ago(period)
end_time = Time.now.to_i
# with scores
classes_range = storage.members_in_range(namespace, start_time, end_time)
classes_range.map do |range|
classes_count, time_int = range
date = self.convert_time_to_date(time_int)
[classes_count, date]
end
end
private
def self.convert_to_time_ago(days)
Time.now.to_i - 86400 * 30 - Time.now.to_i % 86400
end
def self.convert_time_to_date(time_int)
Time.at time_int
end
def self.parse_scheduled_jobs(jobs)
jobs.map do |job|
deserialized_job = JobSerializer.deserialize_stats(job)
self.convert_klass_to_string(deserialized_job)
end
end
def self.parse_job_classes(job_classes, count_only = true)
job_classes.map do |job_class|
if count_only
self.jobs_per_class(job_class)
else
self.get_jobs_for_class(job_class)
end
end
end
def self.get_jobs_for_class(job_class)
serialized_jobs = storage.get_all_elements_from_list(job_class)
deserialized_stats = serialized_jobs.map do |serialized_job|
JobSerializer.deserialize_stats(serialized_job)
end
deserialized_stats << deserialized_stats.size
end
def self.jobs_per_class(job_class)
clean_class_name = job_class.split(':').last
[clean_class_name, storage.list_length(job_class)]
end
def self.convert_klass_to_string(obj)
obj[:klass] = obj[:klass].to_s
obj[:wrapper] = obj[:wrapper].to_s
obj
end
def self.storage
@storage ||= Storage::RedisWrapper.new
end
def self.logger(message)
@log ||= LogManager.new
end
end
end
Refactor historical API methods
module Workerholic
class StatsAPI
CATEGORIES = %w(completed_jobs failed_jobs)
def self.job_statistics(options={})
if CATEGORIES.include? options[:category]
job_classes = storage.get_keys_for_namespace('workerholic:stats:' + options[:category] + ':*')
if options[:count_only]
self.parse_job_classes(job_classes)
else
self.parse_job_classes(job_classes, false)
end
else
logger("Invalid arguments. Please specify one of the following categories:\n'completed_jobs', 'failed_jobs'.")
end
end
def self.scheduled_jobs(options={})
namespace = 'workerholic:scheduled_jobs'
if options[:count_only]
storage.sorted_set_members_count(namespace)
else
serialized_jobs = storage.sorted_set_members(namespace)
parse_scheduled_jobs(serialized_jobs)
end
end
def self.jobs_classes
classes = storage.get_keys_for_namespace('workerholic:stats:*')
parsed_classes = classes.map do |klass|
klass.split(':').last
end.uniq
parsed_classes.empty? ? 'No class data is available yet.' : parsed_classes
end
def self.queued_jobs
fetched_queues = storage.fetch_queue_names
parsed_queues = fetched_queues.map do |queue|
[queue, storage.list_length(queue)]
end
parsed_queues
end
def self.process_stats
namespace = 'workerholic:stats:memory:processes'
storage.hash_get_all(namespace)
end
def self.active_proccesses
namespace = 'workerholic:stats:memory:processes'
storage.hash_keys(namespace)
end
def self.history_for_period(options={})
raise ArgumentError, 'Please provide a category namespace' unless options[:category]
if options[:klass]
namespace = "workerholic:stats:historical:#{options[:category]}:#{options[:klass]}"
else
namespace = "workerholic:stats:historical:#{category}"
end
period = options[:period] || 30
start_time = self.convert_to_time_ago(period)
end_time = Time.now.to_i
job_ranges = storage.members_in_range(namespace, start_time, end_time)
parse_job_ranges(job_ranges: job_ranges, klass: klass)
end
private
def self.parse_job_ranges(options={})
job_ranges.map do |range|
jobs_count, time_int = range
date = self.convert_time_to_date(time_int)
options[:klass] ? [jobs_count, date] : [options[:klass], jobs_count, date]
end
end
def self.convert_to_time_ago(days)
Time.now.to_i - 86400 * 30 - Time.now.to_i % 86400
end
def self.convert_time_to_date(time_int)
Time.at time_int
end
def self.parse_scheduled_jobs(jobs)
jobs.map do |job|
deserialized_job = JobSerializer.deserialize_stats(job)
self.convert_klass_to_string(deserialized_job)
end
end
def self.parse_job_classes(job_classes, count_only = true)
job_classes.map do |job_class|
if count_only
self.jobs_per_class(job_class)
else
self.get_jobs_for_class(job_class)
end
end
end
def self.get_jobs_for_class(job_class)
serialized_jobs = storage.get_all_elements_from_list(job_class)
deserialized_stats = serialized_jobs.map do |serialized_job|
JobSerializer.deserialize_stats(serialized_job)
end
deserialized_stats << deserialized_stats.size
end
def self.jobs_per_class(job_class)
clean_class_name = job_class.split(':').last
[clean_class_name, storage.list_length(job_class)]
end
def self.convert_klass_to_string(obj)
obj[:klass] = obj[:klass].to_s
obj[:wrapper] = obj[:wrapper].to_s
obj
end
def self.storage
@storage ||= Storage::RedisWrapper.new
end
def self.logger(message)
@log ||= LogManager.new
end
end
end
|
#
# ensure.rb
#
# Checks a given test and returns the success value or a failure value based on test results.
#
module Puppet::Parser::Functions
newfunction(:ensure, :type => :rvalue, :doc => <<-EOS
This function checks a given test and returns the success value or a failure value based on test results.
EOS
) do |args|
value = nil
CORL.run do
raise(Puppet::ParseError, "ensure(): Must have at least a test and optional success and failure values specified; " +
"given (#{args.size} for 1)") if args.size < 1
test = args[0]
success_value = (args.size > 1 ? args[1] : test)
failure_value = (args.size > 2 ? args[2] : :undef)
#dbg(test, 'test')
#dbg(success_value, 'success')
value = CORL::Util::Data.ensure(test, success_value, failure_value)
#dbg(value, 'value')
end
return value
end
end
Removing old debug statements from the ensure puppet function.
#
# ensure.rb
#
# Checks a given test and returns the success value or a failure value based on test results.
#
module Puppet::Parser::Functions
newfunction(:ensure, :type => :rvalue, :doc => <<-EOS
This function checks a given test and returns the success value or a failure value based on test results.
EOS
) do |args|
value = nil
CORL.run do
raise(Puppet::ParseError, "ensure(): Must have at least a test and optional success and failure values specified; " +
"given (#{args.size} for 1)") if args.size < 1
test = args[0]
success_value = (args.size > 1 ? args[1] : test)
failure_value = (args.size > 2 ? args[2] : :undef)
value = CORL::Util::Data.ensure(test, success_value, failure_value)
end
return value
end
end
|
require 'rake'
require 'rake/tasklib'
module Yardstick
module Rake
class Measurement < ::Rake::TaskLib
# List of paths to measure
#
# @param [Array<#to_s>, #to_s] path
# optional list of paths to measure
#
# @return [undefined]
#
# @api public
attr_writer :path
# The path to the file where the measurements will be written
#
# @param [String, Pathname] output
# optional output path for measurements
#
# @return [undefined]
#
# @api public
def output=(output)
@output = Pathname(output)
end
# Initializes a Measurement task
#
# @example
# task = Yardstick::Rake::Measurement
#
# @param [Symbol] name
# optional task name
#
# @yield [task]
# yield to self
#
# @yieldparam [Yardstick::Rake::Measurement] task
# the measurement task
#
# @return [Yardstick::Rake::Measurement]
# the measurement task
#
# @api public
def initialize(name = :yardstick_measure)
@name = name
@path = 'lib/**/*.rb'
self.output = 'measurements/report.txt'
yield self if block_given?
define
end
# Measure the documentation
#
# @example
# task.yardstick_measure # (output measurement report)
#
# @return [undefined]
#
# @api public
def yardstick_measure
write_report { |io| Yardstick.measure(@path).puts(io) }
end
private
# Define the task
#
# @return [undefined]
#
# @api private
def define
desc "Measure docs in #{@path} with yardstick"
task(@name) { yardstick_measure }
end
# Open up a report for writing
#
# @yield [io]
# yield to an object that responds to #puts
#
# @yieldparam [#puts] io
# the object that responds to #puts
#
# @return [undefined]
#
# @api private
def write_report(&block)
@output.dirname.mkpath
@output.open('w', &block)
end
end # class Measurement
end # module Rake
end # module Yardstick
Forgot to require 'pathname'.
require 'rake'
require 'rake/tasklib'
require 'pathname'
module Yardstick
module Rake
class Measurement < ::Rake::TaskLib
# List of paths to measure
#
# @param [Array<#to_s>, #to_s] path
# optional list of paths to measure
#
# @return [undefined]
#
# @api public
attr_writer :path
# The path to the file where the measurements will be written
#
# @param [String, Pathname] output
# optional output path for measurements
#
# @return [undefined]
#
# @api public
def output=(output)
@output = Pathname(output)
end
# Initializes a Measurement task
#
# @example
# task = Yardstick::Rake::Measurement
#
# @param [Symbol] name
# optional task name
#
# @yield [task]
# yield to self
#
# @yieldparam [Yardstick::Rake::Measurement] task
# the measurement task
#
# @return [Yardstick::Rake::Measurement]
# the measurement task
#
# @api public
def initialize(name = :yardstick_measure)
@name = name
@path = 'lib/**/*.rb'
self.output = 'measurements/report.txt'
yield self if block_given?
define
end
# Measure the documentation
#
# @example
# task.yardstick_measure # (output measurement report)
#
# @return [undefined]
#
# @api public
def yardstick_measure
write_report { |io| Yardstick.measure(@path).puts(io) }
end
private
# Define the task
#
# @return [undefined]
#
# @api private
def define
desc "Measure docs in #{@path} with yardstick"
task(@name) { yardstick_measure }
end
# Open up a report for writing
#
# @yield [io]
# yield to an object that responds to #puts
#
# @yieldparam [#puts] io
# the object that responds to #puts
#
# @return [undefined]
#
# @api private
def write_report(&block)
@output.dirname.mkpath
@output.open('w', &block)
end
end # class Measurement
end # module Rake
end # module Yardstick
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.