CombinedText stringlengths 4 3.42M |
|---|
require 'dm-core'
require 'dm-validations/support/ordered_hash'
class Object
# If receiver is callable, calls it and
# returns result. If not, just returns receiver
# itself
#
# @return [Object]
def try_call(*args)
if self.respond_to?(:call)
self.call(*args)
else
self
end
end
end
module DataMapper
class Property
def self.new(model, name, options = {})
property = super
property.model.auto_generate_validations(property)
# FIXME: explicit return needed for YARD to parse this properly
return property
end
end
end
require 'dm-validations/exceptions'
require 'dm-validations/validation_errors'
require 'dm-validations/contextual_validators'
require 'dm-validations/auto_validate'
require 'dm-validations/validators/generic_validator'
require 'dm-validations/validators/required_field_validator'
require 'dm-validations/validators/primitive_validator'
require 'dm-validations/validators/absent_field_validator'
require 'dm-validations/validators/confirmation_validator'
require 'dm-validations/validators/format_validator'
require 'dm-validations/validators/length_validator'
require 'dm-validations/validators/within_validator'
require 'dm-validations/validators/numeric_validator'
require 'dm-validations/validators/method_validator'
require 'dm-validations/validators/block_validator'
require 'dm-validations/validators/uniqueness_validator'
require 'dm-validations/validators/acceptance_validator'
require 'dm-validations/support/context'
require 'dm-validations/support/object'
module DataMapper
module Validations
Model.append_inclusions self
extend Chainable
def self.included(model)
model.extend ClassMethods
end
# Ensures the object is valid for the context provided, and otherwise
# throws :halt and returns false.
#
chainable do
def save(context = default_validation_context)
validation_context(context) { super() }
end
end
chainable do
def update(attributes = {}, context = default_validation_context)
validation_context(context) { super(attributes) }
end
end
chainable do
def save_self(*)
return false unless !dirty_self? || validation_context_stack.empty? || valid?(current_validation_context)
super
end
end
# Return the ValidationErrors
#
def errors
@errors ||= ValidationErrors.new(self)
end
# Mark this resource as validatable. When we validate associations of a
# resource we can check if they respond to validatable? before trying to
# recursivly validate them
#
def validatable?
true
end
# Alias for valid?(:default)
#
def valid_for_default?
valid?(:default)
end
# Check if a resource is valid in a given context
#
def valid?(context = :default)
klass = respond_to?(:model) ? model : self.class
klass.validators.execute(context, self)
end
def validation_property_value(name)
__send__(name) if respond_to?(name, true)
end
module ClassMethods
include DataMapper::Validations::ValidatesPresence
include DataMapper::Validations::ValidatesAbsence
include DataMapper::Validations::ValidatesConfirmation
include DataMapper::Validations::ValidatesPrimitiveType
include DataMapper::Validations::ValidatesAcceptance
include DataMapper::Validations::ValidatesFormat
include DataMapper::Validations::ValidatesLength
include DataMapper::Validations::ValidatesWithin
include DataMapper::Validations::ValidatesNumericality
include DataMapper::Validations::ValidatesWithMethod
include DataMapper::Validations::ValidatesWithBlock
include DataMapper::Validations::ValidatesUniqueness
include DataMapper::Validations::AutoValidations
# Return the set of contextual validators or create a new one
#
def validators
@validators ||= ContextualValidators.new(self)
end
def inherited(base)
super
validators.contexts.each do |context, validators|
base.validators.context(context).concat(validators)
end
end
def create(attributes = {}, *args)
resource = new(attributes)
resource.save(*args)
resource
end
private
# Given a new context create an instance method of
# valid_for_<context>? which simply calls valid?(context)
# if it does not already exist
#
def self.create_context_instance_methods(model, context)
# TODO: deprecate `valid_for_#{context}?`
# what's wrong with requiring the caller to pass the context as an arg?
# eg, `valid?(:context)`
context = context.to_sym
name = "valid_for_#{context}?"
present = model.respond_to?(:resource_method_defined) ? model.resource_method_defined?(name) : model.instance_methods.include?(name)
unless present
model.class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{name} # def valid_for_signup?
valid?(#{context.inspect}) # valid?(:signup)
end # end
RUBY
end
end
end # module ClassMethods
end # module Validations
# Provide a const alias for backward compatibility with plugins
# This is scheduled to go away though, definitely before 1.0
Validate = Validations
end # module DataMapper
Add a TODO note.
require 'dm-core'
require 'dm-validations/support/ordered_hash'
class Object
# If receiver is callable, calls it and
# returns result. If not, just returns receiver
# itself
#
# @return [Object]
def try_call(*args)
if self.respond_to?(:call)
self.call(*args)
else
self
end
end
end
module DataMapper
class Property
def self.new(model, name, options = {})
property = super
property.model.auto_generate_validations(property)
# FIXME: explicit return needed for YARD to parse this properly
return property
end
end
end
require 'dm-validations/exceptions'
require 'dm-validations/validation_errors'
require 'dm-validations/contextual_validators'
require 'dm-validations/auto_validate'
require 'dm-validations/validators/generic_validator'
require 'dm-validations/validators/required_field_validator'
require 'dm-validations/validators/primitive_validator'
require 'dm-validations/validators/absent_field_validator'
require 'dm-validations/validators/confirmation_validator'
require 'dm-validations/validators/format_validator'
require 'dm-validations/validators/length_validator'
require 'dm-validations/validators/within_validator'
require 'dm-validations/validators/numeric_validator'
require 'dm-validations/validators/method_validator'
require 'dm-validations/validators/block_validator'
require 'dm-validations/validators/uniqueness_validator'
require 'dm-validations/validators/acceptance_validator'
require 'dm-validations/support/context'
require 'dm-validations/support/object'
module DataMapper
module Validations
Model.append_inclusions self
extend Chainable
def self.included(model)
model.extend ClassMethods
end
# Ensures the object is valid for the context provided, and otherwise
# throws :halt and returns false.
#
chainable do
def save(context = default_validation_context)
validation_context(context) { super() }
end
end
chainable do
def update(attributes = {}, context = default_validation_context)
validation_context(context) { super(attributes) }
end
end
chainable do
def save_self(*)
return false unless !dirty_self? || validation_context_stack.empty? || valid?(current_validation_context)
super
end
end
# Return the ValidationErrors
#
def errors
@errors ||= ValidationErrors.new(self)
end
# Mark this resource as validatable. When we validate associations of a
# resource we can check if they respond to validatable? before trying to
# recursivly validate them
#
def validatable?
true
end
# Alias for valid?(:default)
#
def valid_for_default?
valid?(:default)
end
# Check if a resource is valid in a given context
#
def valid?(context = :default)
klass = respond_to?(:model) ? model : self.class
klass.validators.execute(context, self)
end
def validation_property_value(name)
__send__(name) if respond_to?(name, true)
end
module ClassMethods
include DataMapper::Validations::ValidatesPresence
include DataMapper::Validations::ValidatesAbsence
include DataMapper::Validations::ValidatesConfirmation
include DataMapper::Validations::ValidatesPrimitiveType
include DataMapper::Validations::ValidatesAcceptance
include DataMapper::Validations::ValidatesFormat
include DataMapper::Validations::ValidatesLength
include DataMapper::Validations::ValidatesWithin
include DataMapper::Validations::ValidatesNumericality
include DataMapper::Validations::ValidatesWithMethod
include DataMapper::Validations::ValidatesWithBlock
include DataMapper::Validations::ValidatesUniqueness
include DataMapper::Validations::AutoValidations
# Return the set of contextual validators or create a new one
#
def validators
@validators ||= ContextualValidators.new(self)
end
def inherited(base)
super
# TODO: use ContextualValidators#add
# self.validators.contexts.each do |context, validators|
# validators.each do |v|
# base.validators.add(v.class, v.field_name, :context => context)
# end
# end
validators.contexts.each do |context, validators|
base.validators.context(context).concat(validators)
end
end
def create(attributes = {}, *args)
resource = new(attributes)
resource.save(*args)
resource
end
private
# Given a new context create an instance method of
# valid_for_<context>? which simply calls valid?(context)
# if it does not already exist
#
def self.create_context_instance_methods(model, context)
# TODO: deprecate `valid_for_#{context}?`
# what's wrong with requiring the caller to pass the context as an arg?
# eg, `valid?(:context)`
context = context.to_sym
name = "valid_for_#{context}?"
present = model.respond_to?(:resource_method_defined) ? model.resource_method_defined?(name) : model.instance_methods.include?(name)
unless present
model.class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{name} # def valid_for_signup?
valid?(#{context.inspect}) # valid?(:signup)
end # end
RUBY
end
end
end # module ClassMethods
end # module Validations
# Provide a const alias for backward compatibility with plugins
# This is scheduled to go away though, definitely before 1.0
Validate = Validations
end # module DataMapper
|
module Dotify
VERSION = "0.5.1.pre.1"
end
version switch to 0.5.1.pre
module Dotify
VERSION = "0.5.1.pre"
end
|
module Dynamoid
module Dirty
extend ActiveSupport::Concern
include ActiveModel::Dirty
module ClassMethods
def from_database(*)
super.tap { |d| d.send(:clear_changes_information) }
end
end
def save(*)
clear_changes { super }
end
def update!(*)
ret = super
clear_changes # update! completely reloads all fields on the class, so any extant changes are wiped out
ret
end
def reload
super.tap { clear_changes }
end
def clear_changes
previous = changes
(block_given? ? yield : true).tap do |result|
unless result == false # failed validation; nil is OK.
@previously_changed = previous
clear_changes_information
end
end
end
def write_attribute(name, value)
attribute_will_change!(name) unless self.read_attribute(name) == value
super
end
protected
def attribute_method?(attr)
super || self.class.attributes.has_key?(attr.to_sym)
end
if ActiveModel::VERSION::STRING >= '5.2.0'
# The ActiveModel::Dirty API was changed
# https://github.com/rails/rails/commit/c3675f50d2e59b7fc173d7b332860c4b1a24a726#diff-aaddd42c7feb0834b1b5c66af69814d3
# So we just try to disable new functionality
def mutations_from_database
@mutations_from_database ||= ActiveModel::NullMutationTracker.instance
end
def forget_attribute_assignments
end
end
end
end
Fix builds for Rails 4.0 and 4.1
module Dynamoid
module Dirty
extend ActiveSupport::Concern
include ActiveModel::Dirty
module ClassMethods
def from_database(*)
super.tap { |d| d.send(:clear_changes_information) }
end
end
def save(*)
clear_changes { super }
end
def update!(*)
ret = super
clear_changes # update! completely reloads all fields on the class, so any extant changes are wiped out
ret
end
def reload
super.tap { clear_changes }
end
def clear_changes
previous = changes
(block_given? ? yield : true).tap do |result|
unless result == false # failed validation; nil is OK.
@previously_changed = previous
clear_changes_information
end
end
end
def write_attribute(name, value)
attribute_will_change!(name) unless self.read_attribute(name) == value
super
end
protected
def attribute_method?(attr)
super || self.class.attributes.has_key?(attr.to_sym)
end
if ActiveModel::VERSION::STRING >= '5.2.0'
# The ActiveModel::Dirty API was changed
# https://github.com/rails/rails/commit/c3675f50d2e59b7fc173d7b332860c4b1a24a726#diff-aaddd42c7feb0834b1b5c66af69814d3
# So we just try to disable new functionality
def mutations_from_database
@mutations_from_database ||= ActiveModel::NullMutationTracker.instance
end
def forget_attribute_assignments
end
end
if ActiveModel::VERSION::STRING < '4.2.0'
def clear_changes_information
changed_attributes.clear
end
end
end
end
|
require 'rubygems'
require 'active_record'
require 'faster_csv'
require 'net/ftp'
require 'net/smtp'
require 'fileutils'
require 'uuidtools'
require 'thread'
require $workingdir + '/ftp_fetcher.rb'
class Logger
#Change the logging format to include a timestamp
def format_message(severity, timestamp, progname, msg)
"#{timestamp} (#{$$}) #{msg}\n"
end
end
module EchiConverter
def connect_database
databaseconfig = $workingdir + '/../config/database.yml'
dblogfile = $workingdir + '/../log/database.log'
ActiveRecord::Base.logger = Logger.new(dblogfile, $config["log_number"], $config["log_length"])
case $config["log_level"]
when 'FATAL'
ActiveRecord::Base.logger.level = Logger::FATAL
when 'ERROR'
ActiveRecord::Base.logger.level = Logger::ERROR
when 'WARN'
ActiveRecord::Base.logger.level = Logger::WARN
when 'INFO'
ActiveRecord::Base.logger.level = Logger::INFO
when 'DEBUG'
ActiveRecord::Base.logger.level = Logger::DEBUG
end
begin
ActiveRecord::Base.establish_connection(YAML::load(File.open(databaseconfig)))
@log.info "Initialized the database"
rescue => err
@log.fatal "Could not connect to the database - " + err
if $config["send_email"] == true
send_email_alert "DATABASE"
end
end
end
#Method to open our application log
def initiate_logger
logfile = $workingdir + '/../log/application.log'
@log = Logger.new(logfile, $config["log_number"], $config["log_length"])
case $config["log_level"]
when 'FATAL'
@log.level = Logger::FATAL
when 'ERROR'
@log.level = Logger::ERROR
when 'WARN'
@log.level = Logger::WARN
when 'INFO'
@log.level = Logger::INFO
when 'DEBUG'
@log.level = Logger::DEBUG
end
end
#Method to send alert emails
def send_email_alert reason
@log.debug "send_email_alert method"
begin
Net::SMTP.start($config["smtp_server"], $config["smtp_port"]) do |smtp|
smtp.open_message_stream('donotreply@echi-converter.rubyforge.org', [$config["alert_email_address"]]) do |f|
f.puts "From: donotreply@echi-converter.rubyforge.org"
f.puts "To: " + $config['alert_email_address']
f.puts "Subject: ECHI-Converter Failure"
case reason
when "DATABASE"
f.puts "Failed to connect to the database."
when "FTP"
f.puts "Failed to connect to the ftp server."
end
f.puts " "
f.puts "Please check the ECHI-Converter environment as soon as possible."
end
end
rescue => err
@log.warn err
end
end
#Set the working directory to copy processed files to, if it does not exist creat it
#Directory names based on year/month so as not to exceed 5K files in a single directory
def set_directory working_directory
@log.debug "set_directory method"
time = Time.now
directory_year = working_directory + "/../files/processed/" + time.year.to_s
directory_month = directory_year + "/" + time.month.to_s
if File.exists?(directory_month) == false
if File.exists?(directory_year) == false
Dir.mkdir(directory_year)
end
Dir.mkdir(directory_month)
end
return directory_month
end
#Method to get FTP files
def get_ftp_files
@log.debug "get_ftp_files method"
filelist_fetcher = FtpFetcher.new
filequeue = filelist_fetcher.fetch_list @log
if filequeue == nil
return -1
end
if $config["max_ftp_sessions"] > 1 && filequeue.length > 4
if $config["max_ftp_sessions"] > filequeue.length
@log.info "Using " + filequeue.length.to_s + " ftp sessions to fetch files"
my_threads = []
cnt = 0
while cnt < filequeue.length
my_threads << Thread.new do
fetcher = Fetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
cnt += 1
end
my_threads.each { |aThread| aThread.join }
else
@log.info "Using " + $config["max_ftp_sessions"].to_s + " ftp sessions to fetch files"
my_threads = []
cnt = 0
while cnt < $config["max_ftp_sessions"]
my_threads << Thread.new do
fetcher = FtpFetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
cnt += 1
end
my_threads.each { |aThread| aThread.join }
end
else
@log.info "Using a single ftp session to fetch the files"
fetcher = FtpFetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
if result == false
if $config["send_email"] == true
send_email_alert "FTP"
end
end
end
#Method to write to the log table
def log_processed_file type, filedata
@log.debug "log_processed file method"
begin
echi_log = EchiLog.new
echi_log.filename = filedata["name"]
if type == 'BINARY'
echi_log.filenumber = filedata["number"]
echi_log.version = filedata["version"]
end
echi_log.records = filedata["cnt"]
echi_log.processedat = Time.now
echi_log.save
rescue => err
@log.info "Error creating ECHI_LOG entry - " + err
return -1
end
return 0
end
#Method for parsing the various datatypes from the ECH file
def dump_binary type, length
@log.debug "dump_binary method"
case type
when 'int'
#Process integers, assigning appropriate profile based on length
#such as long int, short int and tiny int.
case length
when 4
value = @binary_file.read(length).unpack("l").first.to_i
when 2
value = @binary_file.read(length).unpack("s").first.to_i
when 1
value = @binary_file.read(length).unpack("U").first.to_i
end
#Process appropriate intergers into datetime format in the database
when 'datetime'
case length
when 4
value = @binary_file.read(length).unpack("l").first.to_i
value = Time.at(value)
end
#Process strings
when 'str'
value = @binary_file.read(length).unpack("M").first.to_s.rstrip
#Process individual bits that are booleans
when 'bool'
value = @binary_file.read(length).unpack("b8").last.to_s
#Process that one wierd boolean that is actually an int, instead of a bit
when 'boolint'
value = @binary_file.read(length).unpack("U").first.to_i
#Change the values of the field to Y/N for the varchar(1) representation of BOOLEAN
if value == 1
value = 'Y'
else
value = 'N'
end
end
return value
end
#Mehtod that performs the conversions
def convert_binary_file filename
@log.debug "convert_binary_file"
#Open the file to process
echi_file = $workingdir + "/../files/to_process/" + filename
@binary_file = open(echi_file,"rb")
@log.debug "File size: " + @binary_file.stat.size.to_s
#Read header information first
filenumber = dump_binary 'int', 4
@log.debug "File_number " + filenumber.to_s
fileversion = dump_binary 'int', 4
@log.debug "Version " + fileversion.to_s
begin
#Perform a transaction for each file, including the log table
#in order to commit as one atomic action upon success
EchiRecord.transaction do
bool_cnt = 0
bytearray = nil
@record_cnt = 0
while @binary_file.eof == FALSE do
@log.debug '<====================START RECORD ' + @record_cnt.to_s + ' ====================>'
echi_record = EchiRecord.new
@echi_schema["echi_records"].each do | field |
#We handle the 'boolean' fields differently, as they are all encoded as bits in a single 8-bit byte
if field["type"] == 'bool'
if bool_cnt == 0
bytearray = dump_binary field["type"], field["length"]
end
#Ensure we parse the bytearray and set the appropriate flags
#We need to make sure the entire array is not nil, in order to do Y/N
#if Nil we then set all no
if bytearray != '00000000'
if bytearray.slice(bool_cnt,1) == '1'
value = 'Y'
else
value = 'N'
end
else
value = 'N'
end
@log.debug field["name"] + " { type => #{field["type"]} & length => #{field["length"]} } value => " + value.to_s
bool_cnt += 1
if bool_cnt == 8
bool_cnt = 0
end
else
#Process 'standard' fields
value = dump_binary field["type"], field["length"]
@log.debug field["name"] + " { type => #{field["type"]} & length => #{field["length"]} } value => " + value.to_s
end
echi_record[field["name"]] = value
end
echi_record.save
#Scan past the end of line record
@binary_file.read(1)
@log.debug '<====================STOP RECORD ' + @record_cnt.to_s + ' ====================>'
@record_cnt += 1
end
@binary_file.close
end
rescue => err
@log.info "Error processing ECHI file - " + err
end
#Move the file to the processed directory
FileUtils.mv(echi_file, @processeddirectory)
if $config["echi_process_log"] == "Y"
log_processed_file "BINARY", { "name" => filename, "number" => filenumber, "version" => fileversion, "cnt" => @record_cnt }
end
return @record_cnt
end
def process_ascii filename
@log.debug "process_ascii method"
echi_file = $workingdir + "/../files/to_process/" + filename
begin
#Perform a transaction for each file, including the log table
#in order to commit as one atomic action upon success
EchiRecord.transaction do
@record_cnt = 0
FasterCSV.foreach(echi_file) do |row|
if row != nil
@log.debug '<====================START RECORD ' + @record_cnt.to_s + ' ====================>'
echi_record = EchiRecord.new
cnt = 0
@echi_schema["echi_records"].each do | field |
if field["type"] == "bool" || field["type"] == "boolint"
case row[cnt]
when "0"
echi_record[field["name"]] = "N"
when "1"
echi_record[field["name"]] = "Y"
when nil
echi_record[field["name"]] = "N"
else
echi_record[field["name"]] = "Y"
end
@log.debug field["name"] + ' == ' + row[cnt]
else
echi_record[field["name"]] = row[cnt]
if row[cnt] != nil
@log.debug field["name"] + ' == ' + row[cnt]
end
end
cnt += 1
end
echi_record.save
@log.debug '<====================STOP RECORD ' + @record_cnt.to_s + ' ====================>'
@record_cnt += 1
end
end
end
rescue => err
@log.info "Error processing ECHI file - " + err
end
#Move the file to the processed directory
FileUtils.mv(echi_file, @processeddirectory)
if $config["echi_process_log"] == "Y"
log_processed_file nil, { "name" => filename, "cnt" => @record_cnt }
end
return @record_cnt
end
def insert_dat_data tablename, row
@log.debug "insert_dat_data method"
begin
case tablename
when "echi_acds"
echi_dat_record = EchiAcd.new
when "echi_agents"
echi_dat_record = EchiAgent.new
when "echi_aux_reasons"
echi_dat_record = EchiAuxReason.new
when "echi_cwcs"
echi_dat_record = EchiCwc.new
when "echi_splits"
echi_dat_record = EchiSplit.new
when "echi_trunk_groups"
echi_dat_record = EchiTrunkGroup.new
when "echi_vdns"
echi_dat_record = EchiVdn.new
when "echi_vectors"
echi_dat_record = EchiVector.new
end
cnt = 0
@echi_schema[tablename].each do | field |
echi_dat_record[field["name"]] = row[cnt]
cnt += 1
end
echi_dat_record.save
rescue => err
@log.info "Unable to insert " + tablename + " file record - " + err
end
end
#Move the file to the archive location
def archive_file file, record_cnt
@log.debug "archive_file method"
case file["name"]
when "echi_acds"
filename_elements = $config["echi_acd_dat"].split(".")
when "echi_agents"
filename_elements = $config["echi_agent_dat"].split(".")
when "echi_aux_reasons"
filename_elements = $config["echi_aux_rsn_dat"].split(".")
when "echi_cwcs"
filename_elements = $config["echi_cwc_dat"].split(".")
when "echi_splits"
filename_elements = $config["echi_split_dat"].split(".")
when "echi_vdns"
filename_elements = $config["echi_vdn_dat"].split(".")
when "echi_trunk_groups"
filename_elements = $config["echi_trunk_group_dat"].split(".")
when "echi_vectors"
filename_elements = $config["echi_vector_dat"].split(".")
end
new_filename = filename_elements[0] + "_" + UUID.timestamp_create.to_s + "." + filename_elements[1]
target_file = @processeddirectory + "/" + new_filename
begin
FileUtils.mv(file["filename"], target_file)
if $config["echi_process_log"] == "Y"
log_processed_file nil, { "name" => new_filename, "cnt" => record_cnt }
end
rescue => err
@log.info "Unable to move processed file - " + err
end
end
#Process the appropriate table name
def process_proper_table file
@log.debug "process_proper_table method"
@record_cnt = 0
process_file = File.open(file["filename"])
process_file.each do |row|
if row != nil
begin
field = row.rstrip.split('|')
@log.debug '<====================START ' + file["name"] + ' RECORD ' + @record_cnt.to_s + ' ====================>'
case file["name"]
when "echi_acds"
record = EchiAcd.find(:first, :conditions => [ "number = ? AND acd_id = ?", field[1], field[0]])
when "echi_agents"
record = EchiAgent.find(:first, :conditions => [ "login_id = ? AND group_id = ?", field[1], field[0]])
when "echi_aux_reasons"
record = EchiAuxReason.find(:first, :conditions => [ "aux_reason = ? AND group_id = ?", field[1], field[0]])
when "echi_cwcs"
record = EchiCwc.find(:first, :conditions => [ "cwc = ? AND group_id = ?", field[1], field[0]])
when "echi_splits"
record = EchiSplit.find(:first, :conditions => [ "number = ? AND acd_number = ?", field[1], field[0]])
when "echi_trunk_groups"
record = EchiTrunkGroup.find(:first, :conditions => [ "trunk_group = ? AND acd_number = ?", field[1], field[0]])
when "echi_vdns"
record = EchiVdn.find(:first, :conditions => [ "vdn = ? AND group_id = ?", field[1], field[0]])
when "echi_vectors"
record = EchiVector.find(:first, :conditions => [ "number = ? AND acd_number = ?", field[1], field[0]])
end
if record != nil
if record.name != field[2]
record.name = field[2]
record.update
@record_cnt += 1
@log.debug "Updated record - " + field.inspect
else
@log.debug "No update required for - " + field.inspect
end
else
insert_dat_data file["name"], field
@record_cnt += 1
@log.debug "Inserted new record - " + field.inspect
end
rescue => err
@log.info "Error processing ECHI record in process_proper_table_method - " + err
end
end
@log.debug '<====================STOP ' + file["name"] + ' RECORD ' + @record_cnt.to_s + ' ====================>'
end
process_file.close
archive_file file, @record_cnt
end
#Method to insert data into 'echi_agents' based on agname.dat
def process_dat_files
@log.debug "process_dat_files method"
dat_files = Array.new
dat_files[0] = { "name" => "echi_acds", "filename" => $workingdir + "/../files/to_process/" + $config["echi_acd_dat"] }
dat_files[1] = { "name" => "echi_agents", "filename" => $workingdir + "/../files/to_process/" + $config["echi_agent_dat"] }
dat_files[2] = { "name" => "echi_aux_reasons", "filename" => $workingdir + "/../files/to_process/" + $config["echi_aux_rsn_dat"] }
dat_files[3] = { "name" => "echi_cwcs", "filename" => $workingdir + "/../files/to_process/" + $config["echi_cwc_dat"] }
dat_files[4] = { "name" => "echi_splits", "filename" => $workingdir + "/../files/to_process/" + $config["echi_split_dat"] }
dat_files[5] = { "name" => "echi_trunk_groups", "filename" => $workingdir + "/../files/to_process/" + $config["echi_trunk_group_dat"] }
dat_files[6] = { "name" => "echi_vdns", "filename" => $workingdir + "/../files/to_process/" + $config["echi_vdn_dat"] }
dat_files[7] = { "name" => "echi_vectors", "filename" => $workingdir + "/../files/to_process/" + $config["echi_vector_dat"] }
dat_files.each do |file|
if File.exists?(file["filename"]) && File.stat(file["filename"]).size > 0
case file["name"]
when "echi_acds"
EchiAcd.transaction do
process_proper_table file
end
when "echi_agents"
EchiAgent.transaction do
process_proper_table file
end
when "echi_aux_reasons"
EchiAuxReason.transaction do
process_proper_table file
end
when "echi_cwcs"
EchiCwc.transaction do
process_proper_table file
end
when "echi_splits"
EchiSplit.transaction do
process_proper_table file
end
when "echi_trunk_groups"
EchiTrunkGroup.transaction do
process_proper_table file
end
when "echi_vdns"
EchiVdn.transaction do
process_proper_table file
end
when "echi_vectors"
EchiVector.transaction do
process_proper_table file
end
end
else
archive_file file, 0
end
end
end
require $workingdir + '/echi-converter/version.rb'
end
Resolved BUG #19490 with inverted fileversion filenumber
git-svn-id: 10fddf754d7509e9fafc030fdd02f9780e53bdb0@161 592edb70-7897-4d5d-b34c-4c60ce0a6027
require 'rubygems'
require 'active_record'
require 'faster_csv'
require 'net/ftp'
require 'net/smtp'
require 'fileutils'
require 'uuidtools'
require 'thread'
require $workingdir + '/ftp_fetcher.rb'
class Logger
#Change the logging format to include a timestamp
def format_message(severity, timestamp, progname, msg)
"#{timestamp} (#{$$}) #{msg}\n"
end
end
module EchiConverter
def connect_database
databaseconfig = $workingdir + '/../config/database.yml'
dblogfile = $workingdir + '/../log/database.log'
ActiveRecord::Base.logger = Logger.new(dblogfile, $config["log_number"], $config["log_length"])
case $config["log_level"]
when 'FATAL'
ActiveRecord::Base.logger.level = Logger::FATAL
when 'ERROR'
ActiveRecord::Base.logger.level = Logger::ERROR
when 'WARN'
ActiveRecord::Base.logger.level = Logger::WARN
when 'INFO'
ActiveRecord::Base.logger.level = Logger::INFO
when 'DEBUG'
ActiveRecord::Base.logger.level = Logger::DEBUG
end
begin
ActiveRecord::Base.establish_connection(YAML::load(File.open(databaseconfig)))
@log.info "Initialized the database"
rescue => err
@log.fatal "Could not connect to the database - " + err
if $config["send_email"] == true
send_email_alert "DATABASE"
end
end
end
#Method to open our application log
def initiate_logger
logfile = $workingdir + '/../log/application.log'
@log = Logger.new(logfile, $config["log_number"], $config["log_length"])
case $config["log_level"]
when 'FATAL'
@log.level = Logger::FATAL
when 'ERROR'
@log.level = Logger::ERROR
when 'WARN'
@log.level = Logger::WARN
when 'INFO'
@log.level = Logger::INFO
when 'DEBUG'
@log.level = Logger::DEBUG
end
end
#Method to send alert emails
def send_email_alert reason
@log.debug "send_email_alert method"
begin
Net::SMTP.start($config["smtp_server"], $config["smtp_port"]) do |smtp|
smtp.open_message_stream('donotreply@echi-converter.rubyforge.org', [$config["alert_email_address"]]) do |f|
f.puts "From: donotreply@echi-converter.rubyforge.org"
f.puts "To: " + $config['alert_email_address']
f.puts "Subject: ECHI-Converter Failure"
case reason
when "DATABASE"
f.puts "Failed to connect to the database."
when "FTP"
f.puts "Failed to connect to the ftp server."
end
f.puts " "
f.puts "Please check the ECHI-Converter environment as soon as possible."
end
end
rescue => err
@log.warn err
end
end
#Set the working directory to copy processed files to, if it does not exist creat it
#Directory names based on year/month so as not to exceed 5K files in a single directory
def set_directory working_directory
@log.debug "set_directory method"
time = Time.now
directory_year = working_directory + "/../files/processed/" + time.year.to_s
directory_month = directory_year + "/" + time.month.to_s
if File.exists?(directory_month) == false
if File.exists?(directory_year) == false
Dir.mkdir(directory_year)
end
Dir.mkdir(directory_month)
end
return directory_month
end
#Method to get FTP files
def get_ftp_files
@log.debug "get_ftp_files method"
filelist_fetcher = FtpFetcher.new
filequeue = filelist_fetcher.fetch_list @log
if filequeue == nil
return -1
end
if $config["max_ftp_sessions"] > 1 && filequeue.length > 4
if $config["max_ftp_sessions"] > filequeue.length
@log.info "Using " + filequeue.length.to_s + " ftp sessions to fetch files"
my_threads = []
cnt = 0
while cnt < filequeue.length
my_threads << Thread.new do
fetcher = Fetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
cnt += 1
end
my_threads.each { |aThread| aThread.join }
else
@log.info "Using " + $config["max_ftp_sessions"].to_s + " ftp sessions to fetch files"
my_threads = []
cnt = 0
while cnt < $config["max_ftp_sessions"]
my_threads << Thread.new do
fetcher = FtpFetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
cnt += 1
end
my_threads.each { |aThread| aThread.join }
end
else
@log.info "Using a single ftp session to fetch the files"
fetcher = FtpFetcher.new
result = fetcher.fetch_ftp_files filequeue, @log
end
if result == false
if $config["send_email"] == true
send_email_alert "FTP"
end
end
end
#Method to write to the log table
def log_processed_file type, filedata
@log.debug "log_processed file method"
begin
echi_log = EchiLog.new
echi_log.filename = filedata["name"]
if type == 'BINARY'
echi_log.filenumber = filedata["number"]
echi_log.version = filedata["version"]
end
echi_log.records = filedata["cnt"]
echi_log.processedat = Time.now
echi_log.save
rescue => err
@log.info "Error creating ECHI_LOG entry - " + err
return -1
end
return 0
end
#Method for parsing the various datatypes from the ECH file
def dump_binary type, length
@log.debug "dump_binary method"
case type
when 'int'
#Process integers, assigning appropriate profile based on length
#such as long int, short int and tiny int.
case length
when 4
value = @binary_file.read(length).unpack("l").first.to_i
when 2
value = @binary_file.read(length).unpack("s").first.to_i
when 1
value = @binary_file.read(length).unpack("U").first.to_i
end
#Process appropriate intergers into datetime format in the database
when 'datetime'
case length
when 4
value = @binary_file.read(length).unpack("l").first.to_i
value = Time.at(value)
end
#Process strings
when 'str'
value = @binary_file.read(length).unpack("M").first.to_s.rstrip
#Process individual bits that are booleans
when 'bool'
value = @binary_file.read(length).unpack("b8").last.to_s
#Process that one wierd boolean that is actually an int, instead of a bit
when 'boolint'
value = @binary_file.read(length).unpack("U").first.to_i
#Change the values of the field to Y/N for the varchar(1) representation of BOOLEAN
if value == 1
value = 'Y'
else
value = 'N'
end
end
return value
end
#Mehtod that performs the conversions
def convert_binary_file filename
@log.debug "convert_binary_file"
#Open the file to process
echi_file = $workingdir + "/../files/to_process/" + filename
@binary_file = open(echi_file,"rb")
@log.debug "File size: " + @binary_file.stat.size.to_s
#Read header information first
fileversion = dump_binary 'int', 4
@log.debug "Version " + fileversion.to_s
filenumber = dump_binary 'int', 4
@log.debug "File_number " + filenumber.to_s
begin
#Perform a transaction for each file, including the log table
#in order to commit as one atomic action upon success
EchiRecord.transaction do
bool_cnt = 0
bytearray = nil
@record_cnt = 0
while @binary_file.eof == FALSE do
@log.debug '<====================START RECORD ' + @record_cnt.to_s + ' ====================>'
echi_record = EchiRecord.new
@echi_schema["echi_records"].each do | field |
#We handle the 'boolean' fields differently, as they are all encoded as bits in a single 8-bit byte
if field["type"] == 'bool'
if bool_cnt == 0
bytearray = dump_binary field["type"], field["length"]
end
#Ensure we parse the bytearray and set the appropriate flags
#We need to make sure the entire array is not nil, in order to do Y/N
#if Nil we then set all no
if bytearray != '00000000'
if bytearray.slice(bool_cnt,1) == '1'
value = 'Y'
else
value = 'N'
end
else
value = 'N'
end
@log.debug field["name"] + " { type => #{field["type"]} & length => #{field["length"]} } value => " + value.to_s
bool_cnt += 1
if bool_cnt == 8
bool_cnt = 0
end
else
#Process 'standard' fields
value = dump_binary field["type"], field["length"]
@log.debug field["name"] + " { type => #{field["type"]} & length => #{field["length"]} } value => " + value.to_s
end
echi_record[field["name"]] = value
end
echi_record.save
#Scan past the end of line record
@binary_file.read(1)
@log.debug '<====================STOP RECORD ' + @record_cnt.to_s + ' ====================>'
@record_cnt += 1
end
@binary_file.close
end
rescue => err
@log.info "Error processing ECHI file - " + err
end
#Move the file to the processed directory
FileUtils.mv(echi_file, @processeddirectory)
if $config["echi_process_log"] == "Y"
log_processed_file "BINARY", { "name" => filename, "number" => filenumber, "version" => fileversion, "cnt" => @record_cnt }
end
return @record_cnt
end
def process_ascii filename
@log.debug "process_ascii method"
echi_file = $workingdir + "/../files/to_process/" + filename
begin
#Perform a transaction for each file, including the log table
#in order to commit as one atomic action upon success
EchiRecord.transaction do
@record_cnt = 0
FasterCSV.foreach(echi_file) do |row|
if row != nil
@log.debug '<====================START RECORD ' + @record_cnt.to_s + ' ====================>'
echi_record = EchiRecord.new
cnt = 0
@echi_schema["echi_records"].each do | field |
if field["type"] == "bool" || field["type"] == "boolint"
case row[cnt]
when "0"
echi_record[field["name"]] = "N"
when "1"
echi_record[field["name"]] = "Y"
when nil
echi_record[field["name"]] = "N"
else
echi_record[field["name"]] = "Y"
end
@log.debug field["name"] + ' == ' + row[cnt]
else
echi_record[field["name"]] = row[cnt]
if row[cnt] != nil
@log.debug field["name"] + ' == ' + row[cnt]
end
end
cnt += 1
end
echi_record.save
@log.debug '<====================STOP RECORD ' + @record_cnt.to_s + ' ====================>'
@record_cnt += 1
end
end
end
rescue => err
@log.info "Error processing ECHI file - " + err
end
#Move the file to the processed directory
FileUtils.mv(echi_file, @processeddirectory)
if $config["echi_process_log"] == "Y"
log_processed_file nil, { "name" => filename, "cnt" => @record_cnt }
end
return @record_cnt
end
def insert_dat_data tablename, row
@log.debug "insert_dat_data method"
begin
case tablename
when "echi_acds"
echi_dat_record = EchiAcd.new
when "echi_agents"
echi_dat_record = EchiAgent.new
when "echi_aux_reasons"
echi_dat_record = EchiAuxReason.new
when "echi_cwcs"
echi_dat_record = EchiCwc.new
when "echi_splits"
echi_dat_record = EchiSplit.new
when "echi_trunk_groups"
echi_dat_record = EchiTrunkGroup.new
when "echi_vdns"
echi_dat_record = EchiVdn.new
when "echi_vectors"
echi_dat_record = EchiVector.new
end
cnt = 0
@echi_schema[tablename].each do | field |
echi_dat_record[field["name"]] = row[cnt]
cnt += 1
end
echi_dat_record.save
rescue => err
@log.info "Unable to insert " + tablename + " file record - " + err
end
end
#Move the file to the archive location
def archive_file file, record_cnt
@log.debug "archive_file method"
case file["name"]
when "echi_acds"
filename_elements = $config["echi_acd_dat"].split(".")
when "echi_agents"
filename_elements = $config["echi_agent_dat"].split(".")
when "echi_aux_reasons"
filename_elements = $config["echi_aux_rsn_dat"].split(".")
when "echi_cwcs"
filename_elements = $config["echi_cwc_dat"].split(".")
when "echi_splits"
filename_elements = $config["echi_split_dat"].split(".")
when "echi_vdns"
filename_elements = $config["echi_vdn_dat"].split(".")
when "echi_trunk_groups"
filename_elements = $config["echi_trunk_group_dat"].split(".")
when "echi_vectors"
filename_elements = $config["echi_vector_dat"].split(".")
end
new_filename = filename_elements[0] + "_" + UUID.timestamp_create.to_s + "." + filename_elements[1]
target_file = @processeddirectory + "/" + new_filename
begin
FileUtils.mv(file["filename"], target_file)
if $config["echi_process_log"] == "Y"
log_processed_file nil, { "name" => new_filename, "cnt" => record_cnt }
end
rescue => err
@log.info "Unable to move processed file - " + err
end
end
#Process the appropriate table name
def process_proper_table file
@log.debug "process_proper_table method"
@record_cnt = 0
process_file = File.open(file["filename"])
process_file.each do |row|
if row != nil
begin
field = row.rstrip.split('|')
@log.debug '<====================START ' + file["name"] + ' RECORD ' + @record_cnt.to_s + ' ====================>'
case file["name"]
when "echi_acds"
record = EchiAcd.find(:first, :conditions => [ "number = ? AND acd_id = ?", field[1], field[0]])
when "echi_agents"
record = EchiAgent.find(:first, :conditions => [ "login_id = ? AND group_id = ?", field[1], field[0]])
when "echi_aux_reasons"
record = EchiAuxReason.find(:first, :conditions => [ "aux_reason = ? AND group_id = ?", field[1], field[0]])
when "echi_cwcs"
record = EchiCwc.find(:first, :conditions => [ "cwc = ? AND group_id = ?", field[1], field[0]])
when "echi_splits"
record = EchiSplit.find(:first, :conditions => [ "number = ? AND acd_number = ?", field[1], field[0]])
when "echi_trunk_groups"
record = EchiTrunkGroup.find(:first, :conditions => [ "trunk_group = ? AND acd_number = ?", field[1], field[0]])
when "echi_vdns"
record = EchiVdn.find(:first, :conditions => [ "vdn = ? AND group_id = ?", field[1], field[0]])
when "echi_vectors"
record = EchiVector.find(:first, :conditions => [ "number = ? AND acd_number = ?", field[1], field[0]])
end
if record != nil
if record.name != field[2]
record.name = field[2]
record.update
@record_cnt += 1
@log.debug "Updated record - " + field.inspect
else
@log.debug "No update required for - " + field.inspect
end
else
insert_dat_data file["name"], field
@record_cnt += 1
@log.debug "Inserted new record - " + field.inspect
end
rescue => err
@log.info "Error processing ECHI record in process_proper_table_method - " + err
end
end
@log.debug '<====================STOP ' + file["name"] + ' RECORD ' + @record_cnt.to_s + ' ====================>'
end
process_file.close
archive_file file, @record_cnt
end
#Method to insert data into 'echi_agents' based on agname.dat
def process_dat_files
@log.debug "process_dat_files method"
dat_files = Array.new
dat_files[0] = { "name" => "echi_acds", "filename" => $workingdir + "/../files/to_process/" + $config["echi_acd_dat"] }
dat_files[1] = { "name" => "echi_agents", "filename" => $workingdir + "/../files/to_process/" + $config["echi_agent_dat"] }
dat_files[2] = { "name" => "echi_aux_reasons", "filename" => $workingdir + "/../files/to_process/" + $config["echi_aux_rsn_dat"] }
dat_files[3] = { "name" => "echi_cwcs", "filename" => $workingdir + "/../files/to_process/" + $config["echi_cwc_dat"] }
dat_files[4] = { "name" => "echi_splits", "filename" => $workingdir + "/../files/to_process/" + $config["echi_split_dat"] }
dat_files[5] = { "name" => "echi_trunk_groups", "filename" => $workingdir + "/../files/to_process/" + $config["echi_trunk_group_dat"] }
dat_files[6] = { "name" => "echi_vdns", "filename" => $workingdir + "/../files/to_process/" + $config["echi_vdn_dat"] }
dat_files[7] = { "name" => "echi_vectors", "filename" => $workingdir + "/../files/to_process/" + $config["echi_vector_dat"] }
dat_files.each do |file|
if File.exists?(file["filename"]) && File.stat(file["filename"]).size > 0
case file["name"]
when "echi_acds"
EchiAcd.transaction do
process_proper_table file
end
when "echi_agents"
EchiAgent.transaction do
process_proper_table file
end
when "echi_aux_reasons"
EchiAuxReason.transaction do
process_proper_table file
end
when "echi_cwcs"
EchiCwc.transaction do
process_proper_table file
end
when "echi_splits"
EchiSplit.transaction do
process_proper_table file
end
when "echi_trunk_groups"
EchiTrunkGroup.transaction do
process_proper_table file
end
when "echi_vdns"
EchiVdn.transaction do
process_proper_table file
end
when "echi_vectors"
EchiVector.transaction do
process_proper_table file
end
end
else
archive_file file, 0
end
end
end
require $workingdir + '/echi-converter/version.rb'
end |
require_relative "inbox"
class EmailVerifier
attr_reader :missing_alerts, :emailed_alerts, :acknowledged_alerts
ACKNOWLEDGED_EMAIL_CONTENTS = [
%{subject:"Field Safety Notice - 02 to 06 April 2018"},
].freeze
def initialize
@emailed_alerts = []
@acknowledged_alerts = []
@missing_alerts = []
@inbox = Inbox.new
end
def have_all_alerts_been_emailed?
@missing_alerts.empty?
end
def run_report
email_search_queries.all? do |email_search_query|
emails_that_should_have_received_alert.all? do |email|
if has_email_address_received_email_with_contents?(email: email, contents: email_search_query)
@emailed_alerts << [email, email_search_query]
elsif acknowledged_as_missing?(contents: email_search_query)
@acknowledged_alerts << [email, email_search_query]
else
@missing_alerts << [email, email_search_query]
end
end
end
end
private
attr_reader :inbox
def has_email_address_received_email_with_contents?(email:, contents:)
inbox.message_count_for_query("#{contents} to:#{email}") != 0
end
def acknowledged_as_missing?(contents:)
ACKNOWLEDGED_EMAIL_CONTENTS.include?(contents)
end
end
Ignore alert for re-titled Imatinib drug alert
On 2018-09-21, a drug alert was published with the title "Imatinib
400mg Capsules (3 x 10) PL 36390/0180 : Company-led recall" and an
email was sent out.
On 2018-09-27, the title was changed to "Imatinib 400mg Capsules (3 x
10) PL 36390/0180 : Company-led Drug Alert", which caused the email
check to fail.
As the original email was sent out, and this was only a minor change,
the alert can be ignored.
```
irb(main):004:0> d = Document.find_by(content_id: 'd91f11cf-5e0d-4571-91d0-9552ca6cafe6').editions.pluck(:title, :state)
=> [["Imatinib 400mg Capsules (3 x 10) PL 36390/0180 : Company-led Drug Alert", "published"], ["Imatinib 400mg Capsules (3 x 10) PL 36390/0180 : Company-led recall", "superseded"], ["Imatinib 400mg Capsules (3 x 10) PL 36390/0180 : Company-led recall", "superseded"]]
```
require_relative "inbox"
class EmailVerifier
attr_reader :missing_alerts, :emailed_alerts, :acknowledged_alerts
ACKNOWLEDGED_EMAIL_CONTENTS = [
%{subject:"Field Safety Notice - 02 to 06 April 2018"},
%{subject:"Imatinib 400mg Capsules (3 x 10) PL 36390/0180 : Company-led Drug Alert"},
].freeze
def initialize
@emailed_alerts = []
@acknowledged_alerts = []
@missing_alerts = []
@inbox = Inbox.new
end
def have_all_alerts_been_emailed?
@missing_alerts.empty?
end
def run_report
email_search_queries.all? do |email_search_query|
emails_that_should_have_received_alert.all? do |email|
if has_email_address_received_email_with_contents?(email: email, contents: email_search_query)
@emailed_alerts << [email, email_search_query]
elsif acknowledged_as_missing?(contents: email_search_query)
@acknowledged_alerts << [email, email_search_query]
else
@missing_alerts << [email, email_search_query]
end
end
end
end
private
attr_reader :inbox
def has_email_address_received_email_with_contents?(email:, contents:)
inbox.message_count_for_query("#{contents} to:#{email}") != 0
end
def acknowledged_as_missing?(contents:)
ACKNOWLEDGED_EMAIL_CONTENTS.include?(contents)
end
end
|
# -*- encoding: utf-8 -*-
module EnjuNdl
module Porta
def self.included(base)
base.extend ClassMethods
end
module ClassMethods
def import_isbn(isbn)
isbn = ISBN_Tools.cleanup(isbn)
raise EnjuNdl::InvalidIsbn unless ISBN_Tools.is_valid?(isbn)
manifestation = Manifestation.find_by_isbn(isbn)
return manifestation if manifestation
doc = return_xml(isbn)
raise EnjuNdl::RecordNotFound if doc.at('//openSearch:totalResults').content.to_i == 0
pub_date, language, nbn = nil, nil, nil
publishers = get_publishers(doc).zip([]).map{|f,t| {:full_name => f, :full_name_transcription => t}}
# title
title = get_title(doc)
# date of publication
pub_date = doc.at('//dcterms:issued').content.try(:tr, '0-9.', '0-9-').to_s.gsub(/(.*)/, '')
language = get_language(doc)
nbn = doc.at('//dc:identifier[@xsi:type="dcndl:JPNO"]').content
ndc = doc.at('//dc:subject[@xsi:type="dcndl:NDC"]').try(:content)
Patron.transaction do
publisher_patrons = Patron.import_patrons(publishers)
language_id = Language.where(:iso_639_2 => language).first.id rescue 1
manifestation = Manifestation.new(
:original_title => title[:manifestation],
:title_transcription => title[:transcription],
# TODO: PORTAに入っている図書以外の資料を調べる
#:carrier_type_id => CarrierType.where(:name => 'print').first.id,
:language_id => language_id,
:isbn => isbn,
:pub_date => pub_date,
:nbn => nbn,
:ndc => ndc
)
manifestation.ndc = ndc
manifestation.publishers << publisher_patrons
end
#manifestation.send_later(:create_frbr_instance, doc.to_s)
create_frbr_instance(doc, manifestation)
return manifestation
end
def import_isbn!(isbn)
manifestation = import_isbn(isbn)
manifestation.save!
manifestation
end
def create_frbr_instance(doc, manifestation)
title = get_title(doc)
creators = get_creators(doc).zip([]).map{|f,t| {:full_name => f, :full_name_transcription => t}}
language = get_language(doc)
subjects = get_subjects(doc)
Patron.transaction do
creator_patrons = Patron.import_patrons(creators)
language_id = Language.where(:iso_639_2 => language).first.id rescue 1
content_type_id = ContentType.where(:name => 'text').first.id rescue 1
manifestation.creators << creator_patrons
if defined?(Subject)
subjects.each do |term|
subject = Subject.where(:term => term).first
manifestation.subjects << subject if subject
end
end
end
end
def search_porta(query, options = {})
options = {:item => 'any', :startrecord => 1, :per_page => 10, :raw => false}.merge(options)
doc = nil
results = {}
startrecord = options[:startrecord].to_i
if startrecord == 0
startrecord = 1
end
url = "http://api.porta.ndl.go.jp/servicedp/opensearch?dpid=#{options[:dpid]}&#{options[:item]}=#{URI.escape(query)}&cnt=#{options[:per_page]}&idx=#{startrecord}"
if options[:raw] == true
open(url).read
else
RSS::Rss::Channel.install_text_element("openSearch:totalResults", "http://a9.com/-/spec/opensearchrss/1.0/", "?", "totalResults", :text, "openSearch:totalResults")
RSS::BaseListener.install_get_text_element "http://a9.com/-/spec/opensearchrss/1.0/", "totalResults", "totalResults="
feed = RSS::Parser.parse(url, false)
end
end
def normalize_isbn(isbn)
if isbn.length == 10
ISBN_Tools.isbn10_to_isbn13(isbn)
else
ISBN_Tools.isbn13_to_isbn10(isbn)
end
end
def return_xml(isbn)
xml = self.search_porta(isbn, {:dpid => 'zomoku', :item => 'isbn', :raw => true}).to_s
doc = Nokogiri::XML(xml)
if doc.at('//openSearch:totalResults').content.to_i == 0
isbn = normalize_isbn(isbn)
xml = self.search_porta(isbn, {:dpid => 'zomoku', :item => 'isbn', :raw => true}).to_s
doc = Nokogiri::XML(xml)
end
doc
end
private
def get_title(doc)
title = {
:manifestation => doc.xpath('//item[1]/title').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' '),
:transcription => doc.xpath('//item[1]/dcndl:titleTranscription').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' '),
:original => doc.xpath('//dcterms:alternative').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' ')
}
end
def get_creators(doc)
creators = []
doc.xpath('//item[1]/dc:creator[@xsi:type="dcndl:NDLNH"]').each do |creator|
creators << creator.content.gsub('‖', '').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ')
end
creators
end
def get_subjects(doc)
subjects = []
doc.xpath('//item[1]/dc:subject[@xsi:type="dcndl:NDLSH"]').each do |subject|
subjects << subject.content.tr('a-zA-Z0-9 ‖', 'a-zA-Z0-9 ')
end
return subjects
end
def get_language(doc)
# TODO: 言語が複数ある場合
language = doc.xpath('//item[1]/dc:language[@xsi:type="dcterms:ISO639-2"]').first.content.downcase
end
def get_publishers(doc)
publishers = []
doc.xpath('//item[1]/dc:publisher').each do |publisher|
publishers << publisher.content.tr('a-zA-Z0-9 ‖', 'a-zA-Z0-9 ')
end
return publishers
end
end
class AlreadyImported < StandardError
end
end
end
skipped invalid pub_date
# -*- encoding: utf-8 -*-
module EnjuNdl
module Porta
def self.included(base)
base.extend ClassMethods
end
module ClassMethods
def import_isbn(isbn)
isbn = ISBN_Tools.cleanup(isbn)
raise EnjuNdl::InvalidIsbn unless ISBN_Tools.is_valid?(isbn)
manifestation = Manifestation.find_by_isbn(isbn)
return manifestation if manifestation
doc = return_xml(isbn)
raise EnjuNdl::RecordNotFound if doc.at('//openSearch:totalResults').content.to_i == 0
pub_date, language, nbn = nil, nil, nil
publishers = get_publishers(doc).zip([]).map{|f,t| {:full_name => f, :full_name_transcription => t}}
# title
title = get_title(doc)
# date of publication
pub_date = doc.at('//dcterms:issued').content.try(:tr, '0-9.', '0-9-').to_s.gsub(/(.*)/, '')
unless pub_date =~ /^\d+(-\d{0,2}){0,2}$/
pub_date = nil
end
language = get_language(doc)
nbn = doc.at('//dc:identifier[@xsi:type="dcndl:JPNO"]').content
ndc = doc.at('//dc:subject[@xsi:type="dcndl:NDC"]').try(:content)
Patron.transaction do
publisher_patrons = Patron.import_patrons(publishers)
language_id = Language.where(:iso_639_2 => language).first.id rescue 1
manifestation = Manifestation.new(
:original_title => title[:manifestation],
:title_transcription => title[:transcription],
# TODO: PORTAに入っている図書以外の資料を調べる
#:carrier_type_id => CarrierType.where(:name => 'print').first.id,
:language_id => language_id,
:isbn => isbn,
:pub_date => pub_date,
:nbn => nbn,
:ndc => ndc
)
manifestation.ndc = ndc
manifestation.publishers << publisher_patrons
end
#manifestation.send_later(:create_frbr_instance, doc.to_s)
create_frbr_instance(doc, manifestation)
return manifestation
end
def import_isbn!(isbn)
manifestation = import_isbn(isbn)
manifestation.save!
manifestation
end
def create_frbr_instance(doc, manifestation)
title = get_title(doc)
creators = get_creators(doc).zip([]).map{|f,t| {:full_name => f, :full_name_transcription => t}}
language = get_language(doc)
subjects = get_subjects(doc)
Patron.transaction do
creator_patrons = Patron.import_patrons(creators)
language_id = Language.where(:iso_639_2 => language).first.id rescue 1
content_type_id = ContentType.where(:name => 'text').first.id rescue 1
manifestation.creators << creator_patrons
if defined?(Subject)
subjects.each do |term|
subject = Subject.where(:term => term).first
manifestation.subjects << subject if subject
end
end
end
end
def search_porta(query, options = {})
options = {:item => 'any', :startrecord => 1, :per_page => 10, :raw => false}.merge(options)
doc = nil
results = {}
startrecord = options[:startrecord].to_i
if startrecord == 0
startrecord = 1
end
url = "http://api.porta.ndl.go.jp/servicedp/opensearch?dpid=#{options[:dpid]}&#{options[:item]}=#{URI.escape(query)}&cnt=#{options[:per_page]}&idx=#{startrecord}"
if options[:raw] == true
open(url).read
else
RSS::Rss::Channel.install_text_element("openSearch:totalResults", "http://a9.com/-/spec/opensearchrss/1.0/", "?", "totalResults", :text, "openSearch:totalResults")
RSS::BaseListener.install_get_text_element "http://a9.com/-/spec/opensearchrss/1.0/", "totalResults", "totalResults="
feed = RSS::Parser.parse(url, false)
end
end
def normalize_isbn(isbn)
if isbn.length == 10
ISBN_Tools.isbn10_to_isbn13(isbn)
else
ISBN_Tools.isbn13_to_isbn10(isbn)
end
end
def return_xml(isbn)
xml = self.search_porta(isbn, {:dpid => 'zomoku', :item => 'isbn', :raw => true}).to_s
doc = Nokogiri::XML(xml)
if doc.at('//openSearch:totalResults').content.to_i == 0
isbn = normalize_isbn(isbn)
xml = self.search_porta(isbn, {:dpid => 'zomoku', :item => 'isbn', :raw => true}).to_s
doc = Nokogiri::XML(xml)
end
doc
end
private
def get_title(doc)
title = {
:manifestation => doc.xpath('//item[1]/title').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' '),
:transcription => doc.xpath('//item[1]/dcndl:titleTranscription').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' '),
:original => doc.xpath('//dcterms:alternative').collect(&:content).join(' ').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ').squeeze(' ')
}
end
def get_creators(doc)
creators = []
doc.xpath('//item[1]/dc:creator[@xsi:type="dcndl:NDLNH"]').each do |creator|
creators << creator.content.gsub('‖', '').tr('a-zA-Z0-9 ', 'a-zA-Z0-9 ')
end
creators
end
def get_subjects(doc)
subjects = []
doc.xpath('//item[1]/dc:subject[@xsi:type="dcndl:NDLSH"]').each do |subject|
subjects << subject.content.tr('a-zA-Z0-9 ‖', 'a-zA-Z0-9 ')
end
return subjects
end
def get_language(doc)
# TODO: 言語が複数ある場合
language = doc.xpath('//item[1]/dc:language[@xsi:type="dcterms:ISO639-2"]').first.content.downcase
end
def get_publishers(doc)
publishers = []
doc.xpath('//item[1]/dc:publisher').each do |publisher|
publishers << publisher.content.tr('a-zA-Z0-9 ‖', 'a-zA-Z0-9 ')
end
return publishers
end
end
class AlreadyImported < StandardError
end
end
end
|
module Enygma
VERSION = "1.0.0"
end
changed the version number for another release
module Enygma
VERSION = "1.0.1"
end
|
require 'open-uri'
require 'nokogiri'
require 'uri'
class EspnBoxScore
attr_reader :gamehq, :url, :subreddit, :away_team, :home_team, :scoreboard, :game_notes, :post, :title, :encoded_url
RANKED_REGEX = /\A\(\d+\)\z/
def initialize(url, subreddit)
@url = url
@subreddit = subreddit.start_with?("/r/") ? subreddit[3..-1] : subreddit
page = Nokogiri::HTML(open(url))
@gamehq = page.xpath("//div[@class='gamehq-wrapper']")
make_away_team
make_home_team
make_scoreboard
make_game_notes
make_post
make_title
make_encoded_url
end
def make_away_team
@away_team = Hash.new
@away_team[:ranked] = true if RANKED_REGEX.match(@gamehq.xpath("//div[@class='team-info']").first.
children.children[0].text)
if @away_team[:ranked]
@away_team[:name] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[0..2].text
@away_team[:score] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[4].text
@away_team[:record] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[5].text
else
@away_team[:name] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[0].text
@away_team[:score] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[2].text
@away_team[:record] = @gamehq.xpath("//div[@class='team-info']").first.
children.children[3].text
end
end
def make_home_team
@home_team = Hash.new
@home_team[:ranked] = true if RANKED_REGEX.match(@gamehq.xpath("//div[@class='team-info']").last.
children.children[0].text)
if @home_team[:ranked]
@home_team[:name] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[0..2].text
@home_team[:score] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[4].text
@home_team[:record] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[5].text
else
@home_team[:name] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[0].text
@home_team[:score] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[2].text
@home_team[:record] = @gamehq.xpath("//div[@class='team-info']").last.
children.children[3].text
end
end
def make_scoreboard
@scoreboard = Hash.new
#includes total
@scoreboard[:quarters] = @gamehq.xpath("//div[@class='line-score-container']//tr[@class='periods']//td[@class='period' or @class='total']").children.map(&:text)
# [team, score, score, score, score, tot, team, score, score, score, score, tot]
scores = @gamehq.xpath("//div[@class='line-score-container']//tr[not(@class='periods')]").children.children.map(&:text)
#scores1 -> shift then normal
#scores2 -> normals
#scores3 -> take both then shift then placed
#scores4 -> take, then shift home.
if @away_team[:ranked] && @home_team[:ranked]
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:away].shift
@scoreboard[:home] = scores.drop(scores.size/2)
@scoreboard[:home].shift
elsif @away_team[:ranked]
scores.shift
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
elsif @home_team[:ranked]
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
@scoreboard[:home].shift
else
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
end
end
def make_game_notes
@game_notes = Hash.new
if @url.include?("nba") || @url.include?("ncb") || @url.include?("ncw")
@game_notes[:away] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[0..2].text
@game_notes[:home] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[3..5].text
else
@game_notes[:passing] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[0].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[1].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[2].text
@game_notes[:rushing] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[3].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[4].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[5].text
@game_notes[:receiving] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[6].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[7].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[8].text
end
end
def quarters
result = "Team"
scoreboard[:quarters].each do |quarter|
result += " | " + quarter.strip
end
result
end
#qbq -> quarter by quarter
def away_team_qbq
result = scoreboard[:away].first
scoreboard[:away][1..-1].each do |score|
result += " | " + score.strip
end
result
end
def home_team_qbq
result = scoreboard[:home].first
scoreboard[:home][1..-1].each do |score|
result += " | " + score.strip
end
result
end
def top_performers
string = ""
if @url.include?("nba") || @url.include?("ncb") || @url.include?("ncw")
string << "#{game_notes[:away]}"
string << "\n\n"
string << "#{game_notes[:home]}"
else
string << "#{game_notes[:passing]}"
string << "\n\n"
string << "#{game_notes[:rushing]}"
string << "\n\n"
string << "#{game_notes[:receiving]}"
end
string
end
def make_post
@post = "[Box Score provided by ESPN](#{url})
**#{away_team[:name]}** #{away_team[:score]} - **#{home_team[:name]}** #{home_team[:score]}
#{quarters}
#{if @url.include? "ncb"
quarters.size == 4 ? '----|-|-|-' : '----|-|-|-|-'
else
quarters.size == 6 ? '----|-|-|-|-|-' : '----|-|-|-|-|-|-'
end
}
#{away_team_qbq}
#{home_team_qbq}
**Top Performers**
#{top_performers}
**Thoughts**
ಠ_ಠ
┗(`Д゚┗(`゚Д゚´)┛゚Д´)┛
[Generator](http://reddit-cfb-postgame.herokuapp.com/) created by /u/swanpenguin
"
end
def make_title
@title = "[Post Game Thread] "
if @away_team[:score].to_i == @home_team[:score].to_i
@title += "#{winner[:name]} and #{loser[:name]} tie, #{winner[:score]}-#{loser[:score]}"
else
winner_plural = winner[:name].pluralize == winner[:name]
@title += "#{winner[:name]} #{winner_plural ? 'defeat' : 'defeats'} #{loser[:name]}, #{winner[:score]}-#{loser[:score]}"
end
@title
end
def winner
if @away_team[:score].to_i > @home_team[:score].to_i
@away_team
else
@home_team
end
end
def loser
if @away_team == winner
@home_team
else
@away_team
end
end
def make_encoded_url
if !@subreddit.nil? && !@subreddit.empty?
@encoded_url = "http://www.reddit.com/r/#{@subreddit}/submit?selftext=true&title="
elsif @url.include?("ncf")
@encoded_url = "http://www.reddit.com/r/CFB/submit?selftext=true&title="
elsif @url.include?("nfl")
@encoded_url = "http://www.reddit.com/r/NFL/submit?selftext=true&title="
elsif @url.include?("nba")
@encoded_url = "http://www.reddit.com/r/NBA/submit?selftext=true&title="
elsif @url.include?("ncb")
@encoded_url = "http://www.reddit.com/r/CollegeBasketball/submit?selftext=true&title="
elsif @url.include?("ncw")
@encoded_url = "http://www.reddit.com/r/NCAAW/submit?selftext=true&title="
end
@encoded_url += URI.encode(@title).gsub("&", "%26")
@encoded_url += "&text="
@encoded_url += URI.encode(@post).gsub("&", "%26")
end
end
Refactored make_home_team and make_away_team
require 'open-uri'
require 'nokogiri'
require 'uri'
class EspnBoxScore
attr_accessor :gamehq,
:url,
:subreddit,
:away_team,
:home_team,
:scoreboard,
:game_notes,
:post,
:title,
:encoded_url
RANKED_REGEX = /\A\(\d+\)\z/
def initialize(url, subreddit)
@url = url
@subreddit = subreddit.start_with?("/r/") ? subreddit[3..-1] : subreddit
page = Nokogiri::HTML(open(url))
@gamehq = page.xpath("//div[@class='gamehq-wrapper']")
self.away_team = {}
self.home_team = {}
make_team(:away, self.away_team)
make_team(:home, self.home_team)
make_scoreboard
make_game_notes
make_post
make_title
make_encoded_url
end
def get_game_text(team_index, index)
@gamehq.xpath("//div[@class='team-info']")[team_index].children.children[index].text
end
def make_team(type, team)
index = type == :home ? 1 : 0
if RANKED_REGEX.match(get_game_text(index, 0))
team[:ranked] = true
end
if team[:ranked]
team[:name] = get_game_text(index, 0..2)
team[:score] = get_game_text(index, 4)
team[:record] = get_game_text(index, 5)
else
team[:name] = get_game_text(index, 0)
team[:score] = get_game_text(index, 2)
team[:record] = get_game_text(index, 3)
end
end
def make_scoreboard
@scoreboard = Hash.new
#includes total
@scoreboard[:quarters] = @gamehq.xpath("//div[@class='line-score-container']//tr[@class='periods']//td[@class='period' or @class='total']").children.map(&:text)
# [team, score, score, score, score, tot, team, score, score, score, score, tot]
scores = @gamehq.xpath("//div[@class='line-score-container']//tr[not(@class='periods')]").children.children.map(&:text)
#scores1 -> shift then normal
#scores2 -> normals
#scores3 -> take both then shift then placed
#scores4 -> take, then shift home.
if @away_team[:ranked] && @home_team[:ranked]
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:away].shift
@scoreboard[:home] = scores.drop(scores.size/2)
@scoreboard[:home].shift
elsif @away_team[:ranked]
scores.shift
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
elsif @home_team[:ranked]
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
@scoreboard[:home].shift
else
@scoreboard[:away] = scores.take(scores.size/2)
@scoreboard[:home] = scores.drop(scores.size/2)
end
end
def make_game_notes
@game_notes = Hash.new
if @url.include?("nba") || @url.include?("ncb") || @url.include?("ncw")
@game_notes[:away] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[0..2].text
@game_notes[:home] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[3..5].text
else
@game_notes[:passing] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[0].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[1].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[2].text
@game_notes[:rushing] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[3].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[4].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[5].text
@game_notes[:receiving] = gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[6].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[7].text + gamehq.xpath("//div[@class='game-notes']//p[not(@class='heading')]").children[8].text
end
end
def quarters
result = "Team"
scoreboard[:quarters].each do |quarter|
result += " | " + quarter.strip
end
result
end
#qbq -> quarter by quarter
def away_team_qbq
result = scoreboard[:away].first
scoreboard[:away][1..-1].each do |score|
result += " | " + score.strip
end
result
end
def home_team_qbq
result = scoreboard[:home].first
scoreboard[:home][1..-1].each do |score|
result += " | " + score.strip
end
result
end
def top_performers
string = ""
if @url.include?("nba") || @url.include?("ncb") || @url.include?("ncw")
string << "#{game_notes[:away]}"
string << "\n\n"
string << "#{game_notes[:home]}"
else
string << "#{game_notes[:passing]}"
string << "\n\n"
string << "#{game_notes[:rushing]}"
string << "\n\n"
string << "#{game_notes[:receiving]}"
end
string
end
def make_post
@post = "[Box Score provided by ESPN](#{url})
**#{away_team[:name]}** #{away_team[:score]} - **#{home_team[:name]}** #{home_team[:score]}
#{quarters}
#{if @url.include? "ncb"
quarters.size == 4 ? '----|-|-|-' : '----|-|-|-|-'
else
quarters.size == 6 ? '----|-|-|-|-|-' : '----|-|-|-|-|-|-'
end
}
#{away_team_qbq}
#{home_team_qbq}
**Top Performers**
#{top_performers}
**Thoughts**
ಠ_ಠ
┗(`Д゚┗(`゚Д゚´)┛゚Д´)┛
[Generator](http://reddit-cfb-postgame.herokuapp.com/) created by /u/swanpenguin
"
end
def make_title
@title = "[Post Game Thread] "
if @away_team[:score].to_i == @home_team[:score].to_i
@title += "#{winner[:name]} and #{loser[:name]} tie, #{winner[:score]}-#{loser[:score]}"
else
winner_plural = winner[:name].pluralize == winner[:name]
@title += "#{winner[:name]} #{winner_plural ? 'defeat' : 'defeats'} #{loser[:name]}, #{winner[:score]}-#{loser[:score]}"
end
@title
end
def winner
if @away_team[:score].to_i > @home_team[:score].to_i
@away_team
else
@home_team
end
end
def loser
if @away_team == winner
@home_team
else
@away_team
end
end
def make_encoded_url
if !@subreddit.nil? && !@subreddit.empty?
@encoded_url = "http://www.reddit.com/r/#{@subreddit}/submit?selftext=true&title="
elsif @url.include?("ncf")
@encoded_url = "http://www.reddit.com/r/CFB/submit?selftext=true&title="
elsif @url.include?("nfl")
@encoded_url = "http://www.reddit.com/r/NFL/submit?selftext=true&title="
elsif @url.include?("nba")
@encoded_url = "http://www.reddit.com/r/NBA/submit?selftext=true&title="
elsif @url.include?("ncb")
@encoded_url = "http://www.reddit.com/r/CollegeBasketball/submit?selftext=true&title="
elsif @url.include?("ncw")
@encoded_url = "http://www.reddit.com/r/NCAAW/submit?selftext=true&title="
end
@encoded_url += URI.encode(@title).gsub("&", "%26")
@encoded_url += "&text="
@encoded_url += URI.encode(@post).gsub("&", "%26")
end
end
|
# encoding: UTF-8
require 'rubygems'
require 'bundler/setup'
Bundler.require(:default)
require 'active_support/core_ext/string'
module Everything
class CLI < Thor
desc "new PIECE_NAME", "Create a new everything piece, in your current directory, with the given name, which must be in spinal-case."
def new(piece_name)
path = piece_path(piece_name)
if File.exist?(path)
puts "Piece `#{piece_name}` already exists"
exit
end
piece = Everything::Piece.new(path)
titleized_name = piece_name.gsub('-', ' ').titleize
markdown_header = "# #{titleized_name}"
default_markdown_body = <<MD
#{markdown_header}
MD
piece.raw_markdown = default_markdown_body
default_yaml_metadata = <<YAML
---
public: false
YAML
piece.raw_yaml = default_yaml_metadata
piece.save
end
map 'n' => 'new'
desc "open_new PIECE_NAME", "Create a new everything piece, in your current directory, with the given name, which must be in spinal-case. And then open it in gvim."
def open_new(piece_name)
new(piece_name)
open(piece_name)
end
map 'on' => 'open_new'
desc "open PIECE_NAME", "Open the piece, in your current directory, in gvim."
def open(piece_name)
path = piece_path(piece_name)
unless File.exist?(path)
puts "Piece `#{piece_name}` does not exist"
exit
end
fork { `gvim -O #{path}/index.{md,yaml}` }
end
map 'o' => 'open'
private
def piece_path(piece_name)
current_dir = Dir.pwd
File.join(current_dir, piece_name)
end
end
end
Open vim with the yaml on the left
# encoding: UTF-8
require 'rubygems'
require 'bundler/setup'
Bundler.require(:default)
require 'active_support/core_ext/string'
module Everything
class CLI < Thor
desc "new PIECE_NAME", "Create a new everything piece, in your current directory, with the given name, which must be in spinal-case."
def new(piece_name)
path = piece_path(piece_name)
if File.exist?(path)
puts "Piece `#{piece_name}` already exists"
exit
end
piece = Everything::Piece.new(path)
titleized_name = piece_name.gsub('-', ' ').titleize
markdown_header = "# #{titleized_name}"
default_markdown_body = <<MD
#{markdown_header}
MD
piece.raw_markdown = default_markdown_body
default_yaml_metadata = <<YAML
---
public: false
YAML
piece.raw_yaml = default_yaml_metadata
piece.save
end
map 'n' => 'new'
desc "open_new PIECE_NAME", "Create a new everything piece, in your current directory, with the given name, which must be in spinal-case. And then open it in gvim."
def open_new(piece_name)
new(piece_name)
open(piece_name)
end
map 'on' => 'open_new'
desc "open PIECE_NAME", "Open the piece, in your current directory, in gvim."
def open(piece_name)
path = piece_path(piece_name)
unless File.exist?(path)
puts "Piece `#{piece_name}` does not exist"
exit
end
fork { `gvim -O #{path}/index.{yaml,md}` }
end
map 'o' => 'open'
private
def piece_path(piece_name)
current_dir = Dir.pwd
File.join(current_dir, piece_name)
end
end
end
|
=begin
This file is part of Viewpoint; the Ruby library for Microsoft Exchange Web Services.
Copyright © 2011 Dan Wanek <dan.wanek@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=end
require 'httpclient'
class Viewpoint::EWS::Connection
include Viewpoint::EWS::ConnectionHelper
include Viewpoint::EWS
attr_reader :endpoint
# @param [String] endpoint the URL of the web service.
# @example https://<site>/ews/Exchange.asmx
# @param [Hash] opts Misc config options (mostly for developement)
# @option opts [Fixnum] :ssl_verify_mode
# @option opts [Fixnum] :receive_timeout override the default receive timeout
# seconds
# @option opts [Array] :trust_ca an array of hashed dir paths or a file
def initialize(endpoint, opts = {})
@log = Logging.logger[self.class.name.to_s.to_sym]
@httpcli = HTTPClient.new
if opts[:trust_ca]
@httpcli.ssl_config.clear_cert_store
opts[:trust_ca].each do |ca|
@httpcli.ssl_config.add_trust_ca ca
end
end
@httpcli.ssl_config.verify_mode = opts[:ssl_verify_mode] if opts[:ssl_verify_mode]
@httpcli.ssl_config.ssl_version = opts[:ssl_version] if opts[:ssl_version]
# Up the keep-alive so we don't have to do the NTLM dance as often.
@httpcli.keep_alive_timeout = 60
@httpcli.receive_timeout = opts[:receive_timeout] if opts[:receive_timeout]
@endpoint = endpoint
end
def set_auth(user,pass)
@httpcli.set_auth(@endpoint.to_s, user, pass)
end
# Authenticate to the web service. You don't have to do this because
# authentication will happen on the first request if you don't do it here.
# @return [Boolean] true if authentication is successful, false otherwise
def authenticate
self.get && true
end
# Every Connection class must have the dispatch method. It is what sends the
# SOAP request to the server and calls the parser method on the EWS instance.
#
# This was originally in the ExchangeWebService class but it was added here
# to make the processing chain easier to modify. For example, it allows the
# reactor pattern to handle the request with a callback.
# @param ews [Viewpoint::EWS::SOAP::ExchangeWebService] used to call
# #parse_soap_response
# @param soapmsg [String]
# @param opts [Hash] misc opts for handling the Response
def dispatch(ews, soapmsg, opts)
respmsg = post(soapmsg)
@log.debug <<-EOF.gsub(/^ {6}/, '')
Received SOAP Response:
----------------
#{Nokogiri::XML(respmsg).to_xml}
----------------
EOF
opts[:raw_response] ? respmsg : ews.parse_soap_response(respmsg, opts)
end
# Send a GET to the web service
# @return [String] If the request is successful (200) it returns the body of
# the response.
def get
check_response( @httpcli.get(@endpoint) )
end
# Send a POST to the web service
# @return [String] If the request is successful (200) it returns the body of
# the response.
def post(xmldoc)
headers = {'Content-Type' => 'text/xml'}
check_response( @httpcli.post(@endpoint, xmldoc, headers) )
end
# Send an asynchronous POST request to the web service
# @return HTTPClient::Connection instance
def post_async(xmldoc)
authenticate
headers = {'Content-Type' => 'text/xml'}
@httpcli.post_async(@endpoint, xmldoc, headers)
end
private
def check_response(resp)
case resp.status
when 200
resp.body
when 302
# @todo redirect
raise Errors::UnhandledResponseError.new("Unhandled HTTP Redirect", resp)
when 401
raise Errors::UnauthorizedResponseError.new("Unauthorized request", resp)
when 500
if resp.headers['Content-Type'] =~ /xml/
err_string, err_code = parse_soap_error(resp.body)
raise Errors::SoapResponseError.new("SOAP Error: Message: #{err_string} Code: #{err_code}", resp, err_code, err_string)
else
raise Errors::ServerError.new("Internal Server Error. Message: #{resp.body}", resp)
end
else
raise Errors::ResponseError.new("HTTP Error Code: #{resp.status}, Msg: #{resp.body}", resp)
end
end
# @param [String] xml to parse the errors from.
def parse_soap_error(xml)
ndoc = Nokogiri::XML(xml)
ns = ndoc.collect_namespaces
err_string = ndoc.xpath("//faultstring",ns).text
err_code = ndoc.xpath("//faultcode",ns).text
@log.debug "Internal SOAP error. Message: #{err_string}, Code: #{err_code}"
[err_string, err_code]
end
end
Add comment about the reason to authenticate a client with post_async request
=begin
This file is part of Viewpoint; the Ruby library for Microsoft Exchange Web Services.
Copyright © 2011 Dan Wanek <dan.wanek@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=end
require 'httpclient'
class Viewpoint::EWS::Connection
include Viewpoint::EWS::ConnectionHelper
include Viewpoint::EWS
attr_reader :endpoint
# @param [String] endpoint the URL of the web service.
# @example https://<site>/ews/Exchange.asmx
# @param [Hash] opts Misc config options (mostly for developement)
# @option opts [Fixnum] :ssl_verify_mode
# @option opts [Fixnum] :receive_timeout override the default receive timeout
# seconds
# @option opts [Array] :trust_ca an array of hashed dir paths or a file
def initialize(endpoint, opts = {})
@log = Logging.logger[self.class.name.to_s.to_sym]
@httpcli = HTTPClient.new
if opts[:trust_ca]
@httpcli.ssl_config.clear_cert_store
opts[:trust_ca].each do |ca|
@httpcli.ssl_config.add_trust_ca ca
end
end
@httpcli.ssl_config.verify_mode = opts[:ssl_verify_mode] if opts[:ssl_verify_mode]
@httpcli.ssl_config.ssl_version = opts[:ssl_version] if opts[:ssl_version]
# Up the keep-alive so we don't have to do the NTLM dance as often.
@httpcli.keep_alive_timeout = 60
@httpcli.receive_timeout = opts[:receive_timeout] if opts[:receive_timeout]
@endpoint = endpoint
end
def set_auth(user,pass)
@httpcli.set_auth(@endpoint.to_s, user, pass)
end
# Authenticate to the web service. You don't have to do this because
# authentication will happen on the first request if you don't do it here.
# @return [Boolean] true if authentication is successful, false otherwise
def authenticate
self.get && true
end
# Every Connection class must have the dispatch method. It is what sends the
# SOAP request to the server and calls the parser method on the EWS instance.
#
# This was originally in the ExchangeWebService class but it was added here
# to make the processing chain easier to modify. For example, it allows the
# reactor pattern to handle the request with a callback.
# @param ews [Viewpoint::EWS::SOAP::ExchangeWebService] used to call
# #parse_soap_response
# @param soapmsg [String]
# @param opts [Hash] misc opts for handling the Response
def dispatch(ews, soapmsg, opts)
respmsg = post(soapmsg)
@log.debug <<-EOF.gsub(/^ {6}/, '')
Received SOAP Response:
----------------
#{Nokogiri::XML(respmsg).to_xml}
----------------
EOF
opts[:raw_response] ? respmsg : ews.parse_soap_response(respmsg, opts)
end
# Send a GET to the web service
# @return [String] If the request is successful (200) it returns the body of
# the response.
def get
check_response( @httpcli.get(@endpoint) )
end
# Send a POST to the web service
# @return [String] If the request is successful (200) it returns the body of
# the response.
def post(xmldoc)
headers = {'Content-Type' => 'text/xml'}
check_response( @httpcli.post(@endpoint, xmldoc, headers) )
end
# Send an asynchronous POST request to the web service
# @return HTTPClient::Connection instance
def post_async(xmldoc)
# Client need to be authenticated first.
# Related issue: https://github.com/nahi/httpclient/issues/181
authenticate
headers = {'Content-Type' => 'text/xml'}
@httpcli.post_async(@endpoint, xmldoc, headers)
end
private
def check_response(resp)
case resp.status
when 200
resp.body
when 302
# @todo redirect
raise Errors::UnhandledResponseError.new("Unhandled HTTP Redirect", resp)
when 401
raise Errors::UnauthorizedResponseError.new("Unauthorized request", resp)
when 500
if resp.headers['Content-Type'] =~ /xml/
err_string, err_code = parse_soap_error(resp.body)
raise Errors::SoapResponseError.new("SOAP Error: Message: #{err_string} Code: #{err_code}", resp, err_code, err_string)
else
raise Errors::ServerError.new("Internal Server Error. Message: #{resp.body}", resp)
end
else
raise Errors::ResponseError.new("HTTP Error Code: #{resp.status}, Msg: #{resp.body}", resp)
end
end
# @param [String] xml to parse the errors from.
def parse_soap_error(xml)
ndoc = Nokogiri::XML(xml)
ns = ndoc.collect_namespaces
err_string = ndoc.xpath("//faultstring",ns).text
err_code = ndoc.xpath("//faultcode",ns).text
@log.debug "Internal SOAP error. Message: #{err_string}, Code: #{err_code}"
[err_string, err_code]
end
end
|
module Excon
class Response
attr_accessor :body, :headers, :status, :remote_ip
def initialize(params={})
@data = {
:body => '',
:headers => {}
}.merge(params)
@body = @data[:body]
@headers = @data[:headers]
@status = @data[:status]
@remote_ip = @data[:remote_ip]
end
def [](key)
@data[key]
end
def data
{
:body => body,
:headers => headers,
:status => status,
:remote_ip => remote_ip
}
end
def params
$stderr.puts("Excon::Response#params is deprecated use Excon::Response#data instead (#{caller.first})")
data
end
# Retrieve a specific header value. Header names are treated case-insensitively.
# @param [String] name Header name
def get_header(name)
headers.each do |key,value|
if key.casecmp(name) == 0
return value
end
end
nil
end
end # class Response
end # module Excon
just return data directly, instead of rebuilding
module Excon
class Response
attr_accessor :body, :data, :headers, :status, :remote_ip
def initialize(params={})
@data = {
:body => '',
:headers => {}
}.merge(params)
@body = @data[:body]
@headers = @data[:headers]
@status = @data[:status]
@remote_ip = @data[:remote_ip]
end
def [](key)
@data[key]
end
def params
$stderr.puts("Excon::Response#params is deprecated use Excon::Response#data instead (#{caller.first})")
data
end
# Retrieve a specific header value. Header names are treated case-insensitively.
# @param [String] name Header name
def get_header(name)
headers.each do |key,value|
if key.casecmp(name) == 0
return value
end
end
nil
end
end # class Response
end # module Excon
|
require 'celluloid'
require 'yaml'
require 'active_support'
require 'active_support/core_ext/object/blank'
require 'active_support/core_ext/object/try'
require 'active_support/core_ext/numeric'
require 'active_support/core_ext/string/filters'
require_relative 'utils/celluloid_klass'
# Extend all objects with logger
Object.send(:include, Eye::Logger::ObjectExt)
class Eye::Controller
include Celluloid
autoload :Load, 'eye/controller/load'
autoload :Helpers, 'eye/controller/helpers'
autoload :Commands, 'eye/controller/commands'
autoload :Status, 'eye/controller/status'
autoload :SendCommand, 'eye/controller/send_command'
autoload :ShowHistory, 'eye/controller/show_history'
autoload :Options, 'eye/controller/options'
include Eye::Controller::Load
include Eye::Controller::Helpers
include Eye::Controller::Commands
include Eye::Controller::Status
include Eye::Controller::SendCommand
include Eye::Controller::ShowHistory
include Eye::Controller::Options
attr_reader :applications, :current_config
def initialize
@applications = []
@current_config = Eye::Config.new
Celluloid::logger = Eye::Logger.new('celluloid')
Eye::SystemResources.setup
info "starting #{Eye::ABOUT} (#{$$})"
end
def settings
current_config.settings
end
end
add logger tag to controller
require 'celluloid'
require 'yaml'
require 'active_support'
require 'active_support/core_ext/object/blank'
require 'active_support/core_ext/object/try'
require 'active_support/core_ext/numeric'
require 'active_support/core_ext/string/filters'
require_relative 'utils/celluloid_klass'
# Extend all objects with logger
Object.send(:include, Eye::Logger::ObjectExt)
class Eye::Controller
include Celluloid
autoload :Load, 'eye/controller/load'
autoload :Helpers, 'eye/controller/helpers'
autoload :Commands, 'eye/controller/commands'
autoload :Status, 'eye/controller/status'
autoload :SendCommand, 'eye/controller/send_command'
autoload :ShowHistory, 'eye/controller/show_history'
autoload :Options, 'eye/controller/options'
include Eye::Controller::Load
include Eye::Controller::Helpers
include Eye::Controller::Commands
include Eye::Controller::Status
include Eye::Controller::SendCommand
include Eye::Controller::ShowHistory
include Eye::Controller::Options
attr_reader :applications, :current_config
def initialize
@applications = []
@current_config = Eye::Config.new
Celluloid::logger = Eye::Logger.new('celluloid')
Eye::SystemResources.setup
info "starting #{Eye::ABOUT} (#{$$})"
end
def settings
current_config.settings
end
def logger_tag
'Eye'
end
end
|
module FakeFS
# Version module
module Version
VERSION = '1.2.1'.freeze
def self.to_s
VERSION
end
end
end
v1.2.2
module FakeFS
# Version module
module Version
VERSION = '1.2.2'.freeze
def self.to_s
VERSION
end
end
end
|
require "irs_groups/irs_group_builder"
class FamilyBuilder
attr_reader :family
attr_reader :save_list
def initialize(param, person_mapper)
$logger ||= Logger.new("#{Rails.root}/log/family_#{Time.now.to_s.gsub(' ', '')}.log")
$error_dir ||= File.join(Rails.root, "log", "error_xmls_from_curam_#{Time.now.to_s.gsub(' ', '')}")
@save_list = [] # it is observed that some embedded objects are not saved. We add all embedded/associated objects to this list and save them explicitly
@new_family_members = [] #this will include all the new applicants objects we create. In case of update application_group will have old applicants
if param.nil? || person_mapper.nil?
initialize_with_nil_params
return
end
@is_update = true # true = we update an existing application group, false = we create a new application group
@family_members_params = param[:family_members]
@params = param
filtered_param = param.slice(:e_case_id, :submitted_at, :e_status_code, :application_type)
@person_mapper = person_mapper
@family = Family.where(e_case_id: filtered_param[:e_case_id]).first
if @family.nil?
@family = Family.new(filtered_param) #we create a new application group from the xml
@is_update = false # means this is a create
end
@family.submitted_at = filtered_param[:submitted_at]
@family.updated_by = "curam_system_service"
get_household
end
def initialize_with_nil_params
@is_update = false
@family = Family.new
@family.e_case_id = (0...12).map { (65 + rand(26)).chr }.join
@family.submitted_at = DateTime.now
@family.updated_by = "curam_system_service"
get_household
end
def build
add_hbx_enrollments
add_tax_households(@params.to_hash[:tax_households])
add_financial_statements(@params[:family_members])
add_coverage_household
return_obj = save
add_irsgroups
return_obj
end
def add_family_member(family_member_params)
if @family.family_members.map(&:person_id).include? family_member_params[:person].id
#puts "Added already existing family_member"
family_member = @family.family_members.where(person_id: family_member_params[:person].id).first
else
#puts "Added a new family_member"
if family_member_params[:is_primary_applicant] == "true"
is_primary_applicant_unique?(family_member_params)
reset_exisiting_primary_applicant
end
family_member = @family.family_members.build(filter_family_member_params(family_member_params))
@new_family_members << family_member
member = family_member.person.members.select do |m|
m.authority?
end.first
member = family_member.person.authority_member if member.nil?
set_person_demographics(member, family_member_params[:person_demographics]) if family_member_params[:person_demographics]
set_alias_ids(member, family_member_params[:alias_ids]) if family_member_params[:alias_ids]
@save_list << member
@save_list << family_member
end
family_member
end
def is_primary_applicant_unique?(family_member_params)
person = family_member_params[:person]
if Family.where({:family_members => {"$elemMatch" => {:person_id => Moped::BSON::ObjectId(person.id)}}}).length > 0
raise("Duplicate Primary Applicant person_id : #{person.id}")
end
end
def set_alias_ids(member, alias_ids_params)
alias_ids_params.each do |alias_id_params|
alias_id = alias_id_params.split('#').last
return if alias_id.nil?
if alias_id_params.include? "aceds"
member.aceds_id = alias_id
elsif alias_id_params.include? "concern_role"
member.e_concern_role_id = alias_id
elsif alias_id_params.include? "person"
member.e_person_id = alias_id
end
end
end
def reset_exisiting_primary_applicant
@family.family_members.each do |family_member|
family_member.is_primary_applicant = false
end
end
def set_person_demographics(member, person_demographics_params)
member.dob = person_demographics_params["dob"] if person_demographics_params["dob"]
member.death_date = person_demographics_params["death_date"] if person_demographics_params["death_date"]
member.ssn = person_demographics_params["ssn"] if person_demographics_params["ssn"]
member.gender = person_demographics_params["gender"] if person_demographics_params["gender"]
member.ethnicity = person_demographics_params["ethnicity"] if person_demographics_params["ethnicity"]
member.race = person_demographics_params["race"] if person_demographics_params["race"]
member.marital_status = person_demographics_params["marital_status"] if person_demographics_params["marital_status"]
end
def filter_family_member_params(family_member_params)
family_member_params = family_member_params.slice(
:is_primary_applicant,
:is_coverage_applicant,
:person)
family_member_params.delete_if do |k, v|
v.nil?
end
family_member_params
end
def get_household
return @household if @household
if !@is_update
#puts "New Application Group Case"
@household = self.family.households.build #if new application group then create new household
@save_list << @household
elsif have_family_members_changed?
#puts "Update Application Group Case - Applicants have changed. Creating new household"
@household = self.family.households.build #if applicants have changed then create new household
@save_list << @household
else
#puts "Update Application Group Case - @household = self.family.active_household"
@household = self.family.active_household #if update and applicants haven't changed then use the active household
end
return @household
end
def have_family_members_changed?
current_list = @family.family_members.map do |family_member|
family_member.person_id
end.sort
new_list = @family_members_params.map do |family_member_params|
family_member_params[:person].id
end.sort
#puts "current_list #{current_list.inspect}"
#puts "new_list #{new_list.inspect}"
if current_list == new_list
return false
else
return true
end
end
def add_coverage_household
return if @new_family_members.length == 0
#TODO decide where to get submitted_at from
coverage_household = @household.coverage_households.build({submitted_at: Time.now})
@new_family_members.each do |family_member|
if family_member.is_coverage_applicant
if valid_relationship?(family_member)
coverage_household_member = coverage_household.coverage_household_members.build
coverage_household_member.applicant_id = family_member.id
else
$logger.warn "WARNING: Family e_case_id: #{@family.e_case_id} Relationship #{@family.primary_applicant.person.find_relationship_with(family_member.person)} not valid for a coverage household between primary applicant person #{@family.primary_applicant.person.id} and #{family_member.person.id}"
end
end
end
end
def valid_relationship?(family_member)
return true if @family.primary_applicant.nil? #responsible party case
return true if @family.primary_applicant.person.id == family_member.person.id
valid_relationships = %w{self spouse life_partner child ward foster_child adopted_child stepson_or_stepdaughter}
if valid_relationships.include? @family.primary_applicant.person.find_relationship_with(family_member.person)
return true
else
return false
end
end
def add_hbx_enrollments
return if @family.primary_applicant.nil?
@family.primary_applicant.person.policies.each do |policy|
add_hbx_enrollment(policy)
end
end
def add_hbx_enrollment(policy)
return if @family.primary_applicant.nil?
hbx_enrollement = @household.hbx_enrollments.build
hbx_enrollement.policy = policy
@family.primary_applicant.broker_id = Broker.find(policy.broker_id).id unless policy.broker_id.blank?
hbx_enrollement.elected_aptc_in_dollars = policy.elected_aptc
hbx_enrollement.applied_aptc_in_dollars = policy.applied_aptc
hbx_enrollement.submitted_at = @family.submitted_at
hbx_enrollement.kind = "employer_sponsored" unless policy.employer_id.blank?
hbx_enrollement.kind = "unassisted_qhp" if (hbx_enrollement.applied_aptc_in_cents == 0 && policy.employer.blank?)
hbx_enrollement.kind = "insurance_assisted_qhp" if (hbx_enrollement.applied_aptc_in_cents > 0 && policy.employer.blank?)
policy.enrollees.each do |enrollee|
begin
person = Person.find_for_member_id(enrollee.m_id)
@family.family_members << FamilyMember.new(person: person) unless @family.person_is_family_member?(person)
family_member = @family.find_family_member_by_person(person)
hbx_enrollement_member = hbx_enrollement.hbx_enrollment_members.build({family_member: family_member,
premium_amount_in_cents: enrollee.pre_amt})
hbx_enrollement_member.is_subscriber = true if (enrollee.rel_code == "self")
rescue FloatDomainError
next
end
end
end
#TODO currently only handling case we create new application case, where 1 irs group is built with 1 coverage household.
def add_irsgroups
if @is_update
irs_group_builder = IrsGroupBuilder.new(self.family.id)
irs_group_builder.update
else
irs_group_builder = IrsGroupBuilder.new(self.family.id)
irs_group_builder.build
irs_group_builder.save
end
end
def add_tax_households(tax_households_params)
tax_households_params.map do |tax_household_params|
tax_household = @household.tax_households.build(filter_tax_household_params(tax_household_params))
eligibility_determinations_params = tax_household_params[:eligibility_determinations]
eligibility_determinations_params.each do |eligibility_determination_params|
tax_household.eligibility_determinations.build(eligibility_determination_params)
end
tax_household_params[:tax_household_members].map do |tax_household_member_params|
tax_household_member = tax_household.tax_household_members.build(filter_tax_household_member_params(tax_household_member_params))
person_uri = @person_mapper.alias_map[tax_household_member_params[:person_id]]
person_obj = @person_mapper.people_map[person_uri].first
new_family_member = get_family_member(person_obj)
new_family_member = verify_person_id(new_family_member)
tax_household_member.applicant_id = new_family_member.id
tax_household_member.family_member = new_family_member
end
end
end
def verify_person_id(family_member)
if family_member.id.to_s.include? "concern_role"
end
family_member
end
def filter_tax_household_member_params(tax_household_member_params)
tax_household_member_params_clone = tax_household_member_params.clone
tax_household_member_params_clone = tax_household_member_params_clone.slice(:is_ia_eligible, :is_medicaid_chip_eligible, :is_subscriber)
tax_household_member_params_clone.delete_if do |k, v|
v.nil?
end
tax_household_member_params_clone
end
def filter_tax_household_params(tax_household_params)
tax_household_params = tax_household_params.slice(:id)
tax_household_params.delete_if do |k, v|
v.nil?
end
end
## Fetches the family_member object either from application_group or person_mapper
def get_family_member(person_obj)
new_family_member = self.family.family_members.find do |family_member|
family_member.id == @person_mapper.applicant_map[person_obj.id].id
end
new_family_member = @person_mapper.applicant_map[person_obj.id] unless new_family_member
end
def add_financial_statements(family_members_params)
family_members_params.map do |family_member_params|
family_member_params[:financial_statements].each do |financial_statement_params|
tax_household_member = find_tax_household_member(@person_mapper.applicant_map[family_member_params[:person].id])
financial_statement = tax_household_member.financial_statements.build(filter_financial_statement_params(financial_statement_params))
financial_statement_params[:incomes].each do |income_params|
financial_statement.incomes.build(income_params)
end
financial_statement_params[:deductions].each do |deduction_params|
financial_statement.deductions.build(deduction_params)
end
financial_statement_params[:alternative_benefits].each do |alternative_benefit_params|
financial_statement.alternate_benefits.build(alternative_benefit_params)
end
end
end
end
=begin
def add_financial_statements(family_members_params)
family_members_params.map do |family_members_params|
family_members_params[:financial_statements].each do |financial_statement_params|
tax_household_member = find_tax_household_member(@person_mapper.applicant_map[family_members_params[:person].id])
financial_statement = tax_household_member.financial_statements.build(filter_financial_statement_params(financial_statement_params))
financial_statement_params[:incomes].each do |income_params|
financial_statement.incomes.build(income_params)
end
financial_statement_params[:deductions].each do |deduction_params|
financial_statement.deductions.build(deduction_params)
end
financial_statement_params[:alternative_benefits].each do |alternative_benefit_params|
financial_statement.alternate_benefits.build(alternative_benefit_params)
end
end
end
end
=end
def filter_financial_statement_params(financial_statement_params)
financial_statement_params = financial_statement_params.slice(:type, :is_tax_filing_together, :tax_filing_status)
financial_statement_params.delete_if do |k, v|
v.nil?
end
end
def find_tax_household_member(family_member)
tax_household_members = self.family.households.flat_map(&:tax_households).flat_map(&:tax_household_members)
tax_household_member = tax_household_members.find do |tax_household_member|
tax_household_member.applicant_id == family_member.id
end
tax_household_member
end
def save
id = @family.save!
save_save_list
@family #return the saved family
end
#save objects in save list
def save_save_list
save_list.each do |obj|
obj.save! unless obj.nil?
end
end
end
if we update a family, we clear previous hbx_enrollments and tax_households. @household.hbx_enrollments.delete_all, @household.tax_households.delete_all
require "irs_groups/irs_group_builder"
class FamilyBuilder
attr_reader :family
attr_reader :save_list
def initialize(param, person_mapper)
$logger ||= Logger.new("#{Rails.root}/log/family_#{Time.now.to_s.gsub(' ', '')}.log")
$error_dir ||= File.join(Rails.root, "log", "error_xmls_from_curam_#{Time.now.to_s.gsub(' ', '')}")
@save_list = [] # it is observed that some embedded objects are not saved. We add all embedded/associated objects to this list and save them explicitly
@new_family_members = [] #this will include all the new applicants objects we create. In case of update application_group will have old applicants
if param.nil? || person_mapper.nil?
initialize_with_nil_params
return
end
@is_update = true # true = we update an existing application group, false = we create a new application group
@family_members_params = param[:family_members]
@params = param
filtered_param = param.slice(:e_case_id, :submitted_at, :e_status_code, :application_type)
@person_mapper = person_mapper
@family = Family.where(e_case_id: filtered_param[:e_case_id]).first
if @family.nil?
@family = Family.new(filtered_param) #we create a new application group from the xml
@is_update = false # means this is a create
end
@family.submitted_at = filtered_param[:submitted_at]
@family.updated_by = "curam_system_service"
get_household
end
def initialize_with_nil_params
@is_update = false
@family = Family.new
@family.e_case_id = (0...12).map { (65 + rand(26)).chr }.join
@family.submitted_at = DateTime.now
@family.updated_by = "curam_system_service"
get_household
end
def build
add_hbx_enrollments
add_tax_households(@params.to_hash[:tax_households])
add_financial_statements(@params[:family_members])
add_coverage_household
return_obj = save
add_irsgroups
return_obj
end
def add_family_member(family_member_params)
if @family.family_members.map(&:person_id).include? family_member_params[:person].id
#puts "Added already existing family_member"
family_member = @family.family_members.where(person_id: family_member_params[:person].id).first
else
#puts "Added a new family_member"
if family_member_params[:is_primary_applicant] == "true"
is_primary_applicant_unique?(family_member_params)
reset_exisiting_primary_applicant
end
family_member = @family.family_members.build(filter_family_member_params(family_member_params))
@new_family_members << family_member
member = family_member.person.members.select do |m|
m.authority?
end.first
member = family_member.person.authority_member if member.nil?
set_person_demographics(member, family_member_params[:person_demographics]) if family_member_params[:person_demographics]
set_alias_ids(member, family_member_params[:alias_ids]) if family_member_params[:alias_ids]
@save_list << member
@save_list << family_member
end
family_member
end
def is_primary_applicant_unique?(family_member_params)
person = family_member_params[:person]
if Family.where({:family_members => {"$elemMatch" => {:person_id => Moped::BSON::ObjectId(person.id)}}}).length > 0
raise("Duplicate Primary Applicant person_id : #{person.id}")
end
end
def set_alias_ids(member, alias_ids_params)
alias_ids_params.each do |alias_id_params|
alias_id = alias_id_params.split('#').last
return if alias_id.nil?
if alias_id_params.include? "aceds"
member.aceds_id = alias_id
elsif alias_id_params.include? "concern_role"
member.e_concern_role_id = alias_id
elsif alias_id_params.include? "person"
member.e_person_id = alias_id
end
end
end
def reset_exisiting_primary_applicant
@family.family_members.each do |family_member|
family_member.is_primary_applicant = false
end
end
def set_person_demographics(member, person_demographics_params)
member.dob = person_demographics_params["dob"] if person_demographics_params["dob"]
member.death_date = person_demographics_params["death_date"] if person_demographics_params["death_date"]
member.ssn = person_demographics_params["ssn"] if person_demographics_params["ssn"]
member.gender = person_demographics_params["gender"] if person_demographics_params["gender"]
member.ethnicity = person_demographics_params["ethnicity"] if person_demographics_params["ethnicity"]
member.race = person_demographics_params["race"] if person_demographics_params["race"]
member.marital_status = person_demographics_params["marital_status"] if person_demographics_params["marital_status"]
end
def filter_family_member_params(family_member_params)
family_member_params = family_member_params.slice(
:is_primary_applicant,
:is_coverage_applicant,
:person)
family_member_params.delete_if do |k, v|
v.nil?
end
family_member_params
end
def get_household
return @household if @household
if !@is_update
#puts "New Application Group Case"
@household = self.family.households.build #if new application group then create new household
@save_list << @household
elsif have_family_members_changed?
#puts "Update Application Group Case - Applicants have changed. Creating new household"
@household = self.family.households.build #if applicants have changed then create new household
@save_list << @household
else
#puts "Update Application Group Case - @household = self.family.active_household"
@household = self.family.active_household #if update and applicants haven't changed then use the active household
end
return @household
end
def have_family_members_changed?
current_list = @family.family_members.map do |family_member|
family_member.person_id
end.sort
new_list = @family_members_params.map do |family_member_params|
family_member_params[:person].id
end.sort
#puts "current_list #{current_list.inspect}"
#puts "new_list #{new_list.inspect}"
if current_list == new_list
return false
else
return true
end
end
def add_coverage_household
return if @new_family_members.length == 0
#TODO decide where to get submitted_at from
coverage_household = @household.coverage_households.build({submitted_at: Time.now})
@new_family_members.each do |family_member|
if family_member.is_coverage_applicant
if valid_relationship?(family_member)
coverage_household_member = coverage_household.coverage_household_members.build
coverage_household_member.applicant_id = family_member.id
else
$logger.warn "WARNING: Family e_case_id: #{@family.e_case_id} Relationship #{@family.primary_applicant.person.find_relationship_with(family_member.person)} not valid for a coverage household between primary applicant person #{@family.primary_applicant.person.id} and #{family_member.person.id}"
end
end
end
end
def valid_relationship?(family_member)
return true if @family.primary_applicant.nil? #responsible party case
return true if @family.primary_applicant.person.id == family_member.person.id
valid_relationships = %w{self spouse life_partner child ward foster_child adopted_child stepson_or_stepdaughter}
if valid_relationships.include? @family.primary_applicant.person.find_relationship_with(family_member.person)
return true
else
return false
end
end
def add_hbx_enrollments
return if @family.primary_applicant.nil?
@household.hbx_enrollments.delete_all #clear any existing
@family.primary_applicant.person.policies.each do |policy|
add_hbx_enrollment(policy)
end
end
def add_hbx_enrollment(policy)
return if @family.primary_applicant.nil?
hbx_enrollement = @household.hbx_enrollments.build
hbx_enrollement.policy = policy
@family.primary_applicant.broker_id = Broker.find(policy.broker_id).id unless policy.broker_id.blank?
hbx_enrollement.elected_aptc_in_dollars = policy.elected_aptc
hbx_enrollement.applied_aptc_in_dollars = policy.applied_aptc
hbx_enrollement.submitted_at = @family.submitted_at
hbx_enrollement.kind = "employer_sponsored" unless policy.employer_id.blank?
hbx_enrollement.kind = "unassisted_qhp" if (hbx_enrollement.applied_aptc_in_cents == 0 && policy.employer.blank?)
hbx_enrollement.kind = "insurance_assisted_qhp" if (hbx_enrollement.applied_aptc_in_cents > 0 && policy.employer.blank?)
policy.enrollees.each do |enrollee|
begin
person = Person.find_for_member_id(enrollee.m_id)
@family.family_members << FamilyMember.new(person: person) unless @family.person_is_family_member?(person)
family_member = @family.find_family_member_by_person(person)
hbx_enrollement_member = hbx_enrollement.hbx_enrollment_members.build({family_member: family_member,
premium_amount_in_cents: enrollee.pre_amt})
hbx_enrollement_member.is_subscriber = true if (enrollee.rel_code == "self")
rescue FloatDomainError
next
end
end
end
#TODO currently only handling case we create new application case, where 1 irs group is built with 1 coverage household.
def add_irsgroups
if @is_update
irs_group_builder = IrsGroupBuilder.new(self.family.id)
irs_group_builder.update
else
irs_group_builder = IrsGroupBuilder.new(self.family.id)
irs_group_builder.build
irs_group_builder.save
end
end
def add_tax_households(tax_households_params)
@household.tax_households.delete_all
tax_households_params.map do |tax_household_params|
tax_household = @household.tax_households.build(filter_tax_household_params(tax_household_params))
eligibility_determinations_params = tax_household_params[:eligibility_determinations]
eligibility_determinations_params.each do |eligibility_determination_params|
tax_household.eligibility_determinations.build(eligibility_determination_params)
end
tax_household_params[:tax_household_members].map do |tax_household_member_params|
tax_household_member = tax_household.tax_household_members.build(filter_tax_household_member_params(tax_household_member_params))
person_uri = @person_mapper.alias_map[tax_household_member_params[:person_id]]
person_obj = @person_mapper.people_map[person_uri].first
new_family_member = get_family_member(person_obj)
new_family_member = verify_person_id(new_family_member)
tax_household_member.applicant_id = new_family_member.id
tax_household_member.family_member = new_family_member
end
end
end
def verify_person_id(family_member)
if family_member.id.to_s.include? "concern_role"
end
family_member
end
def filter_tax_household_member_params(tax_household_member_params)
tax_household_member_params_clone = tax_household_member_params.clone
tax_household_member_params_clone = tax_household_member_params_clone.slice(:is_ia_eligible, :is_medicaid_chip_eligible, :is_subscriber)
tax_household_member_params_clone.delete_if do |k, v|
v.nil?
end
tax_household_member_params_clone
end
def filter_tax_household_params(tax_household_params)
tax_household_params = tax_household_params.slice(:id)
tax_household_params.delete_if do |k, v|
v.nil?
end
end
## Fetches the family_member object either from application_group or person_mapper
def get_family_member(person_obj)
new_family_member = self.family.family_members.find do |family_member|
family_member.id == @person_mapper.applicant_map[person_obj.id].id
end
new_family_member = @person_mapper.applicant_map[person_obj.id] unless new_family_member
end
def add_financial_statements(family_members_params)
family_members_params.map do |family_member_params|
family_member_params[:financial_statements].each do |financial_statement_params|
tax_household_members = find_tax_household_members(@person_mapper.applicant_map[family_member_params[:person].id])
tax_household_members.each do |tax_household_member|
financial_statement = tax_household_member.financial_statements.build(filter_financial_statement_params(financial_statement_params))
financial_statement_params[:incomes].each do |income_params|
financial_statement.incomes.build(income_params)
end
financial_statement_params[:deductions].each do |deduction_params|
financial_statement.deductions.build(deduction_params)
end
financial_statement_params[:alternative_benefits].each do |alternative_benefit_params|
financial_statement.alternate_benefits.build(alternative_benefit_params)
end
end
end
end
end
=begin
def add_financial_statements(family_members_params)
family_members_params.map do |family_members_params|
family_members_params[:financial_statements].each do |financial_statement_params|
tax_household_member = find_tax_household_member(@person_mapper.applicant_map[family_members_params[:person].id])
financial_statement = tax_household_member.financial_statements.build(filter_financial_statement_params(financial_statement_params))
financial_statement_params[:incomes].each do |income_params|
financial_statement.incomes.build(income_params)
end
financial_statement_params[:deductions].each do |deduction_params|
financial_statement.deductions.build(deduction_params)
end
financial_statement_params[:alternative_benefits].each do |alternative_benefit_params|
financial_statement.alternate_benefits.build(alternative_benefit_params)
end
end
end
end
=end
def filter_financial_statement_params(financial_statement_params)
financial_statement_params = financial_statement_params.slice(:type, :is_tax_filing_together, :tax_filing_status)
financial_statement_params.delete_if do |k, v|
v.nil?
end
end
def find_tax_household_members(family_member)
tax_household_members = self.family.households.flat_map(&:tax_households).flat_map(&:tax_household_members)
tax_household_members= tax_household_members.select do |tax_household_member|
tax_household_member.applicant_id == family_member.id
end
tax_household_members
end
def save
id = @family.save!
save_save_list
@family #return the saved family
end
#save objects in save list
def save_save_list
save_list.each do |obj|
obj.save! unless obj.nil?
end
end
end
|
module Fech
class Table
def initialize(cycle, opts={})
@cycle = cycle
@headers = opts[:headers]
@file = opts[:file]
@format = opts[:format]
@receiver = opts[:connection] || receiver
@parser = parser
end
def receiver
if @format == :csv
CSV.open("#{@file}#{@cycle.to_s[2..3]}.csv", 'a+', headers: @headers, write_headers: true)
else
[]
end
end
def retrieve_data
fetch_file { |row| enter_row(row) }
return @receiver
end
def enter_row(row)
case @format
when :db
table_exist? ? @receiver << row : create_table(row)
when :csv
@receiver << row.values
else
@receiver << row
end
end
# the @receiver obj is the database itself.
# This assumes the table needs to be created.
def table_exist?
@receiver.respond_to? :columns
end
def create_table(row)
db, table = @receiver
table = table.to_s.pluralize.to_sym
db.create_table(table) { primary_key :id }
row.each do |k,v|
v = v.nil? ? String : v.class
db.alter_table table do
add_column k, v
end
end
@receiver = db[table]
@receiver << row
end
def fetch_file(&blk)
zip_file = "#{@file}#{@cycle.to_s[2..3]}.zip"
Net::FTP.open("ftp.fec.gov") do |ftp|
ftp.login
ftp.chdir("./FEC/#{@cycle}")
begin
ftp.get(zip_file, "./#{zip_file}")
rescue Net::FTPPermError
raise 'File not found - please try the other methods'
end
end
unzip(zip_file, &blk)
end
def parser
@headers.map.with_index do |h,i|
if h.to_s =~ /cash|amount|contributions|total|loan|transfer|debts|refund|expenditure/
[h, ->(line) { line[i].to_f }]
elsif h == :filing_id
[h, ->(line) { line[i].to_i }]
elsif h.to_s =~ /_date/
[h, ->(line) { parse_date(line[i]) }]
else
[h, ->(line) { line[i] }]
end
end
end
def format_row(line)
hash = {}
line = line.encode('UTF-8', invalid: :replace, replace: ' ').chomp.split("|")
@parser.each { |k,blk| hash[k] = blk.call(line) }
return hash
end
def parse_date(date)
if date == '' && table_exist?
if table_exist?
return Date.new(@cycle, 1,1)
else
return ''
end
end
if date.length == 8
Date.strptime(date, "%m%d%Y")
else
Date.parse(date)
end
end
def unzip(zip_file, &blk)
Zip::File.open(zip_file) do |zip|
zip.each do |entry|
entry.extract("./#{entry.name}") if !File.file?(entry.name)
File.delete(zip_file)
File.foreach(entry.name) do |row|
blk.call(format_row(row))
end
File.delete(entry.name)
end
end
end
end
end
added location property to table
module Fech
class Table
def initialize(cycle, opts={})
@cycle = cycle
@headers = opts[:headers]
@file = opts[:file]
@format = opts[:format]
@location = opts[:location]
@receiver = opts[:connection] || receiver
@parser = parser
end
def receiver
if @format == :csv
CSV.open("#{@file}#{@cycle.to_s[2..3]}.csv", 'a+', headers: @headers, write_headers: true)
else
[]
end
end
def retrieve_data
fetch_file { |row| enter_row(row) }
return @receiver
end
def enter_row(row)
case @format
when :db
table_exist? ? @receiver << row : create_table(row)
when :csv
@receiver << row.values
else
@receiver << row
end
end
# the @receiver obj is the database itself.
# This assumes the table needs to be created.
def table_exist?
@receiver.respond_to? :columns
end
def create_table(row)
db, table = @receiver
table = table.to_s.pluralize.to_sym
db.create_table(table) { primary_key :id }
row.each do |k,v|
v = v.nil? ? String : v.class
db.alter_table table do
add_column k, v
end
end
@receiver = db[table]
@receiver << row
end
def fetch_file(&blk)
zip_file = "#{@file}#{@cycle.to_s[2..3]}.zip"
Net::FTP.open("ftp.fec.gov") do |ftp|
ftp.login
ftp.chdir("./FEC/#{@cycle}")
begin
ftp.get(zip_file, "./#{zip_file}")
rescue Net::FTPPermError
raise 'File not found - please try the other methods'
end
end
unzip(zip_file, &blk)
end
def parser
@headers.map.with_index do |h,i|
if h.to_s =~ /cash|amount|contributions|total|loan|transfer|debts|refund|expenditure/
[h, ->(line) { line[i].to_f }]
elsif h == :filing_id
[h, ->(line) { line[i].to_i }]
elsif h.to_s =~ /_date/
[h, ->(line) { parse_date(line[i]) }]
else
[h, ->(line) { line[i] }]
end
end
end
def format_row(line)
hash = {}
line = line.encode('UTF-8', invalid: :replace, replace: ' ').chomp.split("|")
@parser.each { |k,blk| hash[k] = blk.call(line) }
return hash
end
def parse_date(date)
if date == '' && table_exist?
if table_exist?
return Date.new(@cycle, 1,1)
else
return ''
end
end
if date.length == 8
Date.strptime(date, "%m%d%Y")
else
Date.parse(date)
end
end
def unzip(zip_file, &blk)
Zip::File.open(zip_file) do |zip|
zip.each do |entry|
path = @location.nil? ? entry.name : @location + entry.name
entry.extract(path) if !File.file?(path)
File.delete(zip_file)
File.foreach(path) do |row|
blk.call(format_row(row))
end
File.delete(path)
end
end
end
end
end
|
module Figures
class German
UNITS = %w{ eins zwei drei vier fünf sechs sieben acht neun }.freeze
PREFIXES = {
units: %w{ tausend mi bi tri quadri quinti sexti septi okti noni },
union_units: %w{ un duo tre quattuor quinqua se septe okto nove },
union_tens: %w{ dezi viginti triginta quadraginta quinquaginta sexaginta septuaginta oktoginta nonaginta },
union_hundreds: %w{ zenti duzenti trezenti quadringenti quingenti seszenti septingenti oktingenti nongenti }
}.freeze
EXCEPTIONS = {
/^eins(und|hundert|tausend)/ => 'ein\1',
/^eins\s/ => 'eine ',
'einszehn' => 'elf',
'zweizehn' => 'zwölf',
'sechszehn' => 'sechzehn',
'siebenzehn' => 'siebzehn',
'zweizig' => 'zwanzig',
'dreizig' => 'dreißig',
'sechszig' => 'sechzig',
'siebenzig' => 'siebzig'
}.freeze
attr_reader :number
def initialize(number)
@number = number.to_i
end
def parse
return 'null' if number == 0
triples = split_into_reverse_triples(number)
word = triples.each_with_index.reduce('') do |result, (triple, index)|
triple_word = triple_to_word(triple, index)
result.prepend(triple_word)
end.strip
number < 0 ? "minus #{word}" : word
end
private
def triples_count
@triples_count ||= split_into_reverse_triples(number).count
end
def split_into_reverse_triples(number)
@reverse_triples ||= number.abs.to_s.reverse.scan(/.{1,3}/).map(&:reverse)
end
def triple_to_word(triple, triple_index)
hundred_digit, ten_digit, unit_digit = split_triple(triple)
word = [
hundred(hundred_digit),
unit(unit_digit),
copula(unit_digit, ten_digit),
ten(ten_digit)
].join
word = append_exponent_identifier(word, triple_index)
cleanup_exceptions(word)
end
# splits up a triple into hundreds, tens and unit position
def split_triple(triple)
triple.match(/\A(\d)??(\d)??(\d)\z/).captures.map(&:to_i)
end
# returns the word for the given unit number
def unit(digit)
return '' if digit.zero?
UNITS[digit - 1]
end
# returns the copula between unit position and tens
def copula(unit_digit, ten_digit)
'und' if ten_digit > 1 && !unit_digit.zero?
end
# returns the word for the given tens digit
def ten(digit)
case digit
when 0 then ''
when 1 then 'zehn'
else unit(digit) + 'zig'
end
end
# returns the word for the given hundreds number
def hundred(digit)
case digit
when 0 then ''
else unit(digit) + 'hundert'
end
end
# adds the exponent word to the triple word
# e.g. tausend for the second triple (index = 1)
# Million for the third triple (index = 2)
# Milliarde for the fourth triple (index = 3)
#
# indexes => PREFIXES index
# 2,3 => 1; 4,5 => 2; 6,7 => 3; ... : floored division by 2
# etc.
def append_exponent_identifier(word, index)
return word if word.empty? || index.zero? || triples_count == 1
if index == 1
word + PREFIXES[:units][0]
elsif index.even?
pluralize(word + ' ' + (PREFIXES[:units][index / 2] + "llion ").capitalize)
elsif index.odd?
pluralize(word + ' ' + (PREFIXES[:units][index / 2] + "lliarde ").capitalize)
end
end
# pluralizes exponent identifiers
def pluralize(word)
word =~ /^eins/ ? word : word.sub(/e? $/, 'en ')
end
# replaces all exceptions in the number word
def cleanup_exceptions(word)
EXCEPTIONS.each do |exception, replacement|
word.sub!(exception, replacement)
end
word
end
end
end
Fix 'einsund' to become 'einund'
module Figures
class German
UNITS = %w{ eins zwei drei vier fünf sechs sieben acht neun }.freeze
PREFIXES = {
units: %w{ tausend mi bi tri quadri quinti sexti septi okti noni },
union_units: %w{ un duo tre quattuor quinqua se septe okto nove },
union_tens: %w{ dezi viginti triginta quadraginta quinquaginta sexaginta septuaginta oktoginta nonaginta },
union_hundreds: %w{ zenti duzenti trezenti quadringenti quingenti seszenti septingenti oktingenti nongenti }
}.freeze
EXCEPTIONS = {
/^eins(hundert|tausend)/ => 'ein\1',
/^eins\s/ => 'eine ',
/einsund/ => 'einund',
'einszehn' => 'elf',
'zweizehn' => 'zwölf',
'sechszehn' => 'sechzehn',
'siebenzehn' => 'siebzehn',
'zweizig' => 'zwanzig',
'dreizig' => 'dreißig',
'sechszig' => 'sechzig',
'siebenzig' => 'siebzig'
}.freeze
attr_reader :number
def initialize(number)
@number = number.to_i
end
def parse
return 'null' if number == 0
triples = split_into_reverse_triples(number)
word = triples.each_with_index.reduce('') do |result, (triple, index)|
triple_word = triple_to_word(triple, index)
result.prepend(triple_word)
end.strip
number < 0 ? "minus #{word}" : word
end
private
def triples_count
@triples_count ||= split_into_reverse_triples(number).count
end
def split_into_reverse_triples(number)
@reverse_triples ||= number.abs.to_s.reverse.scan(/.{1,3}/).map(&:reverse)
end
def triple_to_word(triple, triple_index)
hundred_digit, ten_digit, unit_digit = split_triple(triple)
word = [
hundred(hundred_digit),
unit(unit_digit),
copula(unit_digit, ten_digit),
ten(ten_digit)
].join
word = append_exponent_identifier(word, triple_index)
cleanup_exceptions(word)
end
# splits up a triple into hundreds, tens and unit position
def split_triple(triple)
triple.match(/\A(\d)??(\d)??(\d)\z/).captures.map(&:to_i)
end
# returns the word for the given unit number
def unit(digit)
return '' if digit.zero?
UNITS[digit - 1]
end
# returns the copula between unit position and tens
def copula(unit_digit, ten_digit)
'und' if ten_digit > 1 && !unit_digit.zero?
end
# returns the word for the given tens digit
def ten(digit)
case digit
when 0 then ''
when 1 then 'zehn'
else unit(digit) + 'zig'
end
end
# returns the word for the given hundreds number
def hundred(digit)
case digit
when 0 then ''
else unit(digit) + 'hundert'
end
end
# adds the exponent word to the triple word
# e.g. tausend for the second triple (index = 1)
# Million for the third triple (index = 2)
# Milliarde for the fourth triple (index = 3)
#
# indexes => PREFIXES index
# 2,3 => 1; 4,5 => 2; 6,7 => 3; ... : floored division by 2
# etc.
def append_exponent_identifier(word, index)
return word if word.empty? || index.zero? || triples_count == 1
if index == 1
word + PREFIXES[:units][0]
elsif index.even?
pluralize(word + ' ' + (PREFIXES[:units][index / 2] + "llion ").capitalize)
elsif index.odd?
pluralize(word + ' ' + (PREFIXES[:units][index / 2] + "lliarde ").capitalize)
end
end
# pluralizes exponent identifiers
def pluralize(word)
word =~ /^eins/ ? word : word.sub(/e? $/, 'en ')
end
# replaces all exceptions in the number word
def cleanup_exceptions(word)
EXCEPTIONS.each do |exception, replacement|
word.sub!(exception, replacement)
end
word
end
end
end |
require 'httparty'
require 'json'
require 'uri'
require 'cgi'
require "flapjack-diner/version"
require "flapjack-diner/argument_validator"
module Flapjack
module Diner
SUCCESS_STATUS_CODES = [200, 204]
include HTTParty
format :json
class << self
attr_accessor :logger
# NB: clients will need to handle any exceptions caused by,
# e.g., network failures or non-parseable JSON data.
def entities
perform_get('/entities')
end
def create_entities!(params = {})
perform_post('/entities', params)
end
def checks(entity)
perform_get("/checks/#{escape(entity)}")
end
def status(entity, options = {})
check = options.delete(:check)
path = check.nil? ? "/status/#{escape(entity)}" : "/status/#{escape(entity)}/#{escape(check)}"
perform_get(path)
end
def bulk_status(options = {})
validate_bulk_params(options)
perform_get('/status', options)
end
# maybe rename 'create_acknowledgement!' ?
def acknowledge!(entity, check, options = {})
args = options.merge(:check => {entity => check})
validate_bulk_params(args)
perform_post('/acknowledgements', args)
end
def bulk_acknowledge!(options = {})
validate_bulk_params(options)
perform_post('/acknowledgements', options)
end
# maybe rename 'create_test_notifications!' ?
def test_notifications!(entity, check, options = {})
args = options.merge(:check => {entity => check})
validate_bulk_params(args)
perform_post('/test_notifications', args)
end
def bulk_test_notifications!(options = {})
validate_bulk_params(options)
perform_post('/test_notifications', options)
end
def create_scheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :start_time, :as => [:required, :time]
validate :query => :duration, :as => [:required, :integer]
end
perform_post('/scheduled_maintenances', args)
end
def bulk_create_scheduled_maintenance!(options = {})
validate_bulk_params(options) do
validate :query => :start_time, :as => [:required, :time]
validate :query => :duration, :as => [:required, :integer]
end
perform_post('/scheduled_maintenances', options)
end
def delete_scheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :start_time, :as => :required
end
perform_delete('/scheduled_maintenances', args)
end
def bulk_delete_scheduled_maintenance!(options = {})
validate_bulk_params(options) do
validate :query => :start_time, :as => :required
end
perform_delete('/scheduled_maintenances', options)
end
def delete_unscheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :end_time, :as => :time
end
perform_delete('/unscheduled_maintenances', args)
end
def bulk_delete_unscheduled_maintenance!(options)
validate_bulk_params(options) do
validate :query => :end_time, :as => :time
end
perform_delete('/unscheduled_maintenances', options)
end
def scheduled_maintenances(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/scheduled_maintenances/#{ec_path}", options)
end
def bulk_scheduled_maintenances(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/scheduled_maintenances', options)
end
def unscheduled_maintenances(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/unscheduled_maintenances/#{ec_path}", options)
end
def bulk_unscheduled_maintenances(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/unscheduled_maintenances', options)
end
def outages(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/outages/#{ec_path}", options)
end
def bulk_outages(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/outages', options)
end
def downtime(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/downtime/#{ec_path}", options)
end
def bulk_downtime(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/downtime', options)
end
def entity_tags(entity)
perform_get("/entities/#{escape(entity)}/tags")
end
def add_entity_tags!(entity, *tags)
perform_post("/entities/#{escape(entity)}/tags", :tag => tags)
end
def delete_entity_tags!(entity, *tags)
perform_delete("/entities/#{escape(entity)}/tags", :tag => tags)
end
def contacts(contact_ids = nil)
perform_get('/contacts')
end
def contact(contact_id)
perform_get("/contacts/#{escape(contact_id)}")
end
def create_contacts!(params = {})
params = [params] if params.respond_to?(:keys)
perform_post('/contacts', {'contacts' => params})
end
def update_contact!(contact_id, contact)
perform_put("/contacts/#{escape(contact_id)}", contact)
end
def delete_contact!(contact_id)
perform_delete("/contacts/#{escape(contact_id)}")
end
def contact_tags(contact_id)
perform_get("/contacts/#{escape(contact_id)}/tags")
end
def contact_entitytags(contact_id)
perform_get("/contacts/#{escape(contact_id)}/entity_tags")
end
def add_contact_tags!(contact_id, *tags)
perform_post("/contacts/#{escape(contact_id)}/tags", :tag => tags)
end
# TODO better checking of provided data
def add_contact_entitytags!(contact_id, entity_tags = {})
perform_post("/contacts/#{escape(contact_id)}/entity_tags", :entity => entity_tags)
end
def delete_contact_tags!(contact_id, *tags)
perform_delete("/contacts/#{escape(contact_id)}/tags", :tag => tags)
end
# TODO better checking of provided data
def delete_contact_entitytags!(contact_id, entity_tags = {})
perform_delete("/contacts/#{escape(contact_id)}/entity_tags", :entity => entity_tags)
end
def notification_rules(contact_id)
perform_get("/contacts/#{escape(contact_id)}/notification_rules")
end
def notification_rule(rule_id)
perform_get("/notification_rules/#{escape(rule_id)}")
end
def create_notification_rule!(rules)
rules = [rules] if rules.respond_to?(:keys)
perform_post('/notification_rules', {'notification_rules' => rules})
end
def update_notification_rule!(rule_id, rule)
perform_put("/notification_rules/#{escape(rule_id)}", {'notification_rules' => [rule]})
end
def delete_notification_rule!(rule_id)
perform_delete("/notification_rules/#{escape(rule_id)}")
end
def create_media!(media)
media = [media] if media.respond_to?(:keys)
perform_post('/media', {'media' => media})
end
def contact_media(contact_id)
perform_get("/contacts/#{escape(contact_id)}/media")
end
def contact_medium(contact_id, media_type)
perform_get("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}")
end
def update_contact_medium!(contact_id, media_type, media)
perform_put("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}", media)
end
def delete_contact_medium!(contact_id, media_type)
perform_delete("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}")
end
def contact_timezone(contact_id)
perform_get("/contacts/#{escape(contact_id)}/timezone")
end
def update_contact_timezone!(contact_id, timezone)
perform_put("/contacts/#{escape(contact_id)}/timezone", :timezone => timezone)
end
def delete_contact_timezone!(contact_id)
perform_delete("/contacts/#{escape(contact_id)}/timezone")
end
def last_error
@last_error
end
private
def perform_get(path, params = nil)
req_uri = build_uri(path, params)
logger.info "GET #{req_uri}" if logger
response = get(req_uri.request_uri)
handle_response(response)
end
def perform_post(path, data = {})
req_uri = build_uri(path)
if logger
log_post = "POST #{req_uri}"
log_post << "\n Params: #{data.inspect}" if data
logger.info log_post
end
opts = data ? {:body => prepare_nested_query(data).to_json, :headers => {'Content-Type' => 'application/vnd.api+json'}} : {}
response = post(req_uri.request_uri, opts)
handle_response(response)
end
def perform_put(path, body = {})
req_uri = build_uri(path)
if logger
log_put = "PUT #{req_uri}"
log_put << "\n Params: #{body.inspect}" if body
logger.info log_put
end
opts = body ? {:body => prepare_nested_query(body).to_json, :headers => {'Content-Type' => 'application/vnd.api+json'}} : {}
response = put(req_uri.request_uri, opts)
handle_response(response)
end
def perform_delete(path, body = nil)
req_uri = build_uri(path)
if logger
log_delete = "DELETE #{req_uri}"
log_delete << "\n Params: #{body.inspect}" if body
logger.info log_delete
end
opts = body ? {:body => prepare_nested_query(body).to_json, :headers => {'Content-Type' => 'application/json'}} : {}
response = delete(req_uri.request_uri, opts)
handle_response(response)
end
def handle_response(response)
response_body = response.body
response_start = response_body ? response_body[0..300] : nil
if logger
response_message = " #{response.message}" unless (response.message.nil? || response.message == "")
logger.info " Response Code: #{response.code}#{response_message}"
logger.info " Response Body: #{response_start}" if response_start
end
parsed_response = response.respond_to?(:parsed_response) ? response.parsed_response : nil
unless SUCCESS_STATUS_CODES.include?(response.code)
self.last_error = {'status_code' => response.code}.merge(parsed_response)
return nil
end
return true unless (response.code == 200) && parsed_response
parsed_response
end
def validate_bulk_params(query = {}, &validation)
errors = []
entities = query[:entity]
checks = query[:check]
if entities && !entities.is_a?(String) &&
(!entities.is_a?(Array) || !entities.all? {|e| e.is_a?(String)})
raise ArgumentError.new("Entity argument must be a String, or an Array of Strings")
end
if checks && (!checks.is_a?(Hash) || !checks.all? {|k, v|
k.is_a?(String) && (v.is_a?(String) || (v.is_a?(Array) && v.all?{|vv| vv.is_a?(String)}))
})
raise ArgumentError.new("Check argument must be a Hash with keys String, values either String or Array of Strings")
end
if entities.nil? && checks.nil?
raise ArgumentError.new("Entity and/or check arguments must be provided")
end
validate_params(query, &validation)
end
def validate_params(query = {}, &validation)
ArgumentValidator.new(query).instance_eval(&validation) if block_given?
end
def entity_check_path(entity, check)
check.nil? ? "#{escape(entity)}" : "#{escape(entity)}/#{escape(check)}"
end
# copied from Rack::Utils -- builds the query string for GETs
def build_nested_query(value, prefix = nil)
if value.respond_to?(:iso8601)
raise ArgumentError, "value must be a Hash" if prefix.nil?
"#{prefix}=#{escape(value.iso8601)}"
else
case value
when Array
value.map { |v|
build_nested_query(v, "#{prefix}[]")
}.join("&")
when Hash
value.map { |k, v|
build_nested_query(v, prefix ? "#{prefix}[#{escape(k)}]" : escape(k))
}.join("&")
when String, Integer
raise ArgumentError, "value must be a Hash" if prefix.nil?
"#{prefix}=#{escape(value.to_s)}"
else
prefix
end
end
end
def escape(s)
URI.encode_www_form_component(s)
end
# used for the JSON data hashes in POST, PUT, DELETE
def prepare_nested_query(value)
if value.respond_to?(:iso8601)
value.iso8601
else
case value
when Array
value.map { |v| prepare_nested_query(v) }
when Hash
value.inject({}) do |memo, (k, v)|
memo[k] = prepare_nested_query(v)
memo
end
when Integer, TrueClass, FalseClass, NilClass
value
else
value.to_s
end
end
end
def protocol_host_port
self.base_uri =~ /^(?:(https?):\/\/)?([a-zA-Z0-9][a-zA-Z0-9\.\-]*[a-zA-Z0-9])(?::(\d+))?/i
protocol = ($1 || 'http').downcase
host = $2
port = $3
if port.nil? || port.to_i < 1 || port.to_i > 65535
port = 'https'.eql?(protocol) ? 443 : 80
else
port = port.to_i
end
[protocol, host, port]
end
def build_uri(path, params = nil)
pr, ho, po = protocol_host_port
URI::HTTP.build(:protocol => pr, :host => ho, :port => po,
:path => path, :query => (params.nil? || params.empty? ? nil : build_nested_query(params)))
end
def last_error=(error)
@last_error = error
end
end
end
end
alias contact to contacts
require 'httparty'
require 'json'
require 'uri'
require 'cgi'
require "flapjack-diner/version"
require "flapjack-diner/argument_validator"
module Flapjack
module Diner
SUCCESS_STATUS_CODES = [200, 204]
include HTTParty
format :json
class << self
attr_accessor :logger
# NB: clients will need to handle any exceptions caused by,
# e.g., network failures or non-parseable JSON data.
def entities
perform_get('/entities')
end
def create_entities!(params = {})
perform_post('/entities', params)
end
def checks(entity)
perform_get("/checks/#{escape(entity)}")
end
def status(entity, options = {})
check = options.delete(:check)
path = check.nil? ? "/status/#{escape(entity)}" : "/status/#{escape(entity)}/#{escape(check)}"
perform_get(path)
end
def bulk_status(options = {})
validate_bulk_params(options)
perform_get('/status', options)
end
# maybe rename 'create_acknowledgement!' ?
def acknowledge!(entity, check, options = {})
args = options.merge(:check => {entity => check})
validate_bulk_params(args)
perform_post('/acknowledgements', args)
end
def bulk_acknowledge!(options = {})
validate_bulk_params(options)
perform_post('/acknowledgements', options)
end
# maybe rename 'create_test_notifications!' ?
def test_notifications!(entity, check, options = {})
args = options.merge(:check => {entity => check})
validate_bulk_params(args)
perform_post('/test_notifications', args)
end
def bulk_test_notifications!(options = {})
validate_bulk_params(options)
perform_post('/test_notifications', options)
end
def create_scheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :start_time, :as => [:required, :time]
validate :query => :duration, :as => [:required, :integer]
end
perform_post('/scheduled_maintenances', args)
end
def bulk_create_scheduled_maintenance!(options = {})
validate_bulk_params(options) do
validate :query => :start_time, :as => [:required, :time]
validate :query => :duration, :as => [:required, :integer]
end
perform_post('/scheduled_maintenances', options)
end
def delete_scheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :start_time, :as => :required
end
perform_delete('/scheduled_maintenances', args)
end
def bulk_delete_scheduled_maintenance!(options = {})
validate_bulk_params(options) do
validate :query => :start_time, :as => :required
end
perform_delete('/scheduled_maintenances', options)
end
def delete_unscheduled_maintenance!(entity, check, options = {})
args = options.merge( check ? {:check => {entity => check}} : {:entity => entity} )
validate_bulk_params(args) do
validate :query => :end_time, :as => :time
end
perform_delete('/unscheduled_maintenances', args)
end
def bulk_delete_unscheduled_maintenance!(options)
validate_bulk_params(options) do
validate :query => :end_time, :as => :time
end
perform_delete('/unscheduled_maintenances', options)
end
def scheduled_maintenances(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/scheduled_maintenances/#{ec_path}", options)
end
def bulk_scheduled_maintenances(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/scheduled_maintenances', options)
end
def unscheduled_maintenances(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/unscheduled_maintenances/#{ec_path}", options)
end
def bulk_unscheduled_maintenances(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/unscheduled_maintenances', options)
end
def outages(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/outages/#{ec_path}", options)
end
def bulk_outages(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/outages', options)
end
def downtime(entity, options = {})
check = options.delete(:check)
validate_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
ec_path = entity_check_path(entity, check)
perform_get("/downtime/#{ec_path}", options)
end
def bulk_downtime(options = {})
validate_bulk_params(options) do
validate :query => [:start_time, :end_time], :as => :time
end
perform_get('/downtime', options)
end
def entity_tags(entity)
perform_get("/entities/#{escape(entity)}/tags")
end
def add_entity_tags!(entity, *tags)
perform_post("/entities/#{escape(entity)}/tags", :tag => tags)
end
def delete_entity_tags!(entity, *tags)
perform_delete("/entities/#{escape(entity)}/tags", :tag => tags)
end
def contacts(contact_ids = nil)
location = '/contacts'
if contact_ids
contact_ids = [contact_ids] unless contact_ids.respond_to?(:each)
location += '/' + contact_ids.map {|c| escape(c)}.join(',')
end
perform_get(location)
end
def contact(contact_id)
#perform_get("/contacts/#{escape(contact_id)}")
contacts([contact_id])
end
def create_contacts!(params = {})
params = [params] if params.respond_to?(:keys)
perform_post('/contacts', {'contacts' => params})
end
def update_contact!(contact_id, contact)
perform_put("/contacts/#{escape(contact_id)}", contact)
end
def delete_contact!(contact_id)
perform_delete("/contacts/#{escape(contact_id)}")
end
def contact_tags(contact_id)
perform_get("/contacts/#{escape(contact_id)}/tags")
end
def contact_entitytags(contact_id)
perform_get("/contacts/#{escape(contact_id)}/entity_tags")
end
def add_contact_tags!(contact_id, *tags)
perform_post("/contacts/#{escape(contact_id)}/tags", :tag => tags)
end
# TODO better checking of provided data
def add_contact_entitytags!(contact_id, entity_tags = {})
perform_post("/contacts/#{escape(contact_id)}/entity_tags", :entity => entity_tags)
end
def delete_contact_tags!(contact_id, *tags)
perform_delete("/contacts/#{escape(contact_id)}/tags", :tag => tags)
end
# TODO better checking of provided data
def delete_contact_entitytags!(contact_id, entity_tags = {})
perform_delete("/contacts/#{escape(contact_id)}/entity_tags", :entity => entity_tags)
end
def notification_rules(contact_id)
perform_get("/contacts/#{escape(contact_id)}/notification_rules")
end
def notification_rule(rule_id)
perform_get("/notification_rules/#{escape(rule_id)}")
end
def create_notification_rule!(rules)
rules = [rules] if rules.respond_to?(:keys)
perform_post('/notification_rules', {'notification_rules' => rules})
end
def update_notification_rule!(rule_id, rule)
perform_put("/notification_rules/#{escape(rule_id)}", {'notification_rules' => [rule]})
end
def delete_notification_rule!(rule_id)
perform_delete("/notification_rules/#{escape(rule_id)}")
end
def create_media!(media)
media = [media] if media.respond_to?(:keys)
perform_post('/media', {'media' => media})
end
def contact_media(contact_id)
perform_get("/contacts/#{escape(contact_id)}/media")
end
def contact_medium(contact_id, media_type)
perform_get("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}")
end
def update_contact_medium!(contact_id, media_type, media)
perform_put("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}", media)
end
def delete_contact_medium!(contact_id, media_type)
perform_delete("/contacts/#{escape(contact_id)}/media/#{escape(media_type)}")
end
def contact_timezone(contact_id)
perform_get("/contacts/#{escape(contact_id)}/timezone")
end
def update_contact_timezone!(contact_id, timezone)
perform_put("/contacts/#{escape(contact_id)}/timezone", :timezone => timezone)
end
def delete_contact_timezone!(contact_id)
perform_delete("/contacts/#{escape(contact_id)}/timezone")
end
def last_error
@last_error
end
private
def perform_get(path, params = nil)
req_uri = build_uri(path, params)
logger.info "GET #{req_uri}" if logger
response = get(req_uri.request_uri)
handle_response(response)
end
def perform_post(path, data = {})
req_uri = build_uri(path)
if logger
log_post = "POST #{req_uri}"
log_post << "\n Params: #{data.inspect}" if data
logger.info log_post
end
opts = data ? {:body => prepare_nested_query(data).to_json, :headers => {'Content-Type' => 'application/vnd.api+json'}} : {}
response = post(req_uri.request_uri, opts)
handle_response(response)
end
def perform_put(path, body = {})
req_uri = build_uri(path)
if logger
log_put = "PUT #{req_uri}"
log_put << "\n Params: #{body.inspect}" if body
logger.info log_put
end
opts = body ? {:body => prepare_nested_query(body).to_json, :headers => {'Content-Type' => 'application/vnd.api+json'}} : {}
response = put(req_uri.request_uri, opts)
handle_response(response)
end
def perform_delete(path, body = nil)
req_uri = build_uri(path)
if logger
log_delete = "DELETE #{req_uri}"
log_delete << "\n Params: #{body.inspect}" if body
logger.info log_delete
end
opts = body ? {:body => prepare_nested_query(body).to_json, :headers => {'Content-Type' => 'application/json'}} : {}
response = delete(req_uri.request_uri, opts)
handle_response(response)
end
def handle_response(response)
response_body = response.body
response_start = response_body ? response_body[0..300] : nil
if logger
response_message = " #{response.message}" unless (response.message.nil? || response.message == "")
logger.info " Response Code: #{response.code}#{response_message}"
logger.info " Response Body: #{response_start}" if response_start
end
parsed_response = response.respond_to?(:parsed_response) ? response.parsed_response : nil
unless SUCCESS_STATUS_CODES.include?(response.code)
self.last_error = {'status_code' => response.code}.merge(parsed_response)
return nil
end
return true unless (response.code == 200) && parsed_response
parsed_response
end
def validate_bulk_params(query = {}, &validation)
errors = []
entities = query[:entity]
checks = query[:check]
if entities && !entities.is_a?(String) &&
(!entities.is_a?(Array) || !entities.all? {|e| e.is_a?(String)})
raise ArgumentError.new("Entity argument must be a String, or an Array of Strings")
end
if checks && (!checks.is_a?(Hash) || !checks.all? {|k, v|
k.is_a?(String) && (v.is_a?(String) || (v.is_a?(Array) && v.all?{|vv| vv.is_a?(String)}))
})
raise ArgumentError.new("Check argument must be a Hash with keys String, values either String or Array of Strings")
end
if entities.nil? && checks.nil?
raise ArgumentError.new("Entity and/or check arguments must be provided")
end
validate_params(query, &validation)
end
def validate_params(query = {}, &validation)
ArgumentValidator.new(query).instance_eval(&validation) if block_given?
end
def entity_check_path(entity, check)
check.nil? ? "#{escape(entity)}" : "#{escape(entity)}/#{escape(check)}"
end
# copied from Rack::Utils -- builds the query string for GETs
def build_nested_query(value, prefix = nil)
if value.respond_to?(:iso8601)
raise ArgumentError, "value must be a Hash" if prefix.nil?
"#{prefix}=#{escape(value.iso8601)}"
else
case value
when Array
value.map { |v|
build_nested_query(v, "#{prefix}[]")
}.join("&")
when Hash
value.map { |k, v|
build_nested_query(v, prefix ? "#{prefix}[#{escape(k)}]" : escape(k))
}.join("&")
when String, Integer
raise ArgumentError, "value must be a Hash" if prefix.nil?
"#{prefix}=#{escape(value.to_s)}"
else
prefix
end
end
end
def escape(s)
URI.encode_www_form_component(s)
end
# used for the JSON data hashes in POST, PUT, DELETE
def prepare_nested_query(value)
if value.respond_to?(:iso8601)
value.iso8601
else
case value
when Array
value.map { |v| prepare_nested_query(v) }
when Hash
value.inject({}) do |memo, (k, v)|
memo[k] = prepare_nested_query(v)
memo
end
when Integer, TrueClass, FalseClass, NilClass
value
else
value.to_s
end
end
end
def protocol_host_port
self.base_uri =~ /^(?:(https?):\/\/)?([a-zA-Z0-9][a-zA-Z0-9\.\-]*[a-zA-Z0-9])(?::(\d+))?/i
protocol = ($1 || 'http').downcase
host = $2
port = $3
if port.nil? || port.to_i < 1 || port.to_i > 65535
port = 'https'.eql?(protocol) ? 443 : 80
else
port = port.to_i
end
[protocol, host, port]
end
def build_uri(path, params = nil)
pr, ho, po = protocol_host_port
URI::HTTP.build(:protocol => pr, :host => ho, :port => po,
:path => path, :query => (params.nil? || params.empty? ? nil : build_nested_query(params)))
end
def last_error=(error)
@last_error = error
end
end
end
end
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'itunes/version'
Gem::Specification.new do |spec|
spec.name = "itunes-client"
spec.version = Itunes::VERSION
spec.authors = ["ryo katsuma"]
spec.email = ["katsuma@gmail.com"]
spec.description = %q{iTunes client with high level API}
spec.summary = %q{itunes-client provides a high level API like ActiveRecord style to control your iTunes.}
spec.homepage = "https://github.com/katsuma/itunes-client"
spec.license = "MIT"
spec.files = `git ls-files`.split($/)
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_development_dependency "rake", "~> 10.4.2"
spec.add_development_dependency "rspec", "~> 3.4.0"
spec.add_development_dependency "rb-fsevent", "~> 0.9.6"
spec.add_development_dependency "guard", "~> 2.13.0"
spec.add_development_dependency "guard-rspec", "~> 4.6.4"
spec.add_development_dependency "growl", "~> 1.0.3"
spec.add_development_dependency "fakefs", "~> 0.6.7"
spec.add_development_dependency "simplecov", "~> 0.11.1"
spec.add_development_dependency "coveralls", "~> 0.8.9"
spec.add_development_dependency "pry", "~> 0.10.3"
end
Update rake for CVE-2020-8130
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'itunes/version'
Gem::Specification.new do |spec|
spec.name = "itunes-client"
spec.version = Itunes::VERSION
spec.authors = ["ryo katsuma"]
spec.email = ["katsuma@gmail.com"]
spec.description = %q{iTunes client with high level API}
spec.summary = %q{itunes-client provides a high level API like ActiveRecord style to control your iTunes.}
spec.homepage = "https://github.com/katsuma/itunes-client"
spec.license = "MIT"
spec.files = `git ls-files`.split($/)
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_development_dependency "rake", "~> 12.3.3"
spec.add_development_dependency "rspec", "~> 3.4.0"
spec.add_development_dependency "rb-fsevent", "~> 0.9.6"
spec.add_development_dependency "guard", "~> 2.13.0"
spec.add_development_dependency "guard-rspec", "~> 4.6.4"
spec.add_development_dependency "growl", "~> 1.0.3"
spec.add_development_dependency "fakefs", "~> 0.6.7"
spec.add_development_dependency "simplecov", "~> 0.11.1"
spec.add_development_dependency "coveralls", "~> 0.8.9"
spec.add_development_dependency "pry", "~> 0.10.3"
end
|
require 'fog/core'
require 'uri'
module Fog
module Cloudstack
extend Fog::Provider
service(:compute, 'cloudstack/compute')
DIGEST = OpenSSL::Digest::Digest.new('sha1')
def self.escape(string)
string = CGI::escape(string)
string = string.gsub("+","%20")
string
end
def self.signed_params(key,params)
# remove empty attributes, cloudstack will not takem them into account when verifying signature
params.reject!{|k,v| v.nil? || v.to_s == ''}
query = params.to_a.sort.collect{|c| "#{c[0]}=#{escape(c[1].to_s)}"}.join('&').downcase
signed_string = Base64.encode64(OpenSSL::HMAC.digest(DIGEST,key,query)).strip
signed_string
end
end
end
[cloudstack] added additional networking support and volume management commands
require 'fog/core'
require 'uri'
module Fog
module Cloudstack
extend Fog::Provider
service(:compute, 'cloudstack/compute')
DIGEST = OpenSSL::Digest::Digest.new('sha1')
def self.escape(string)
string = CGI::escape(string)
string = string.gsub("+","%20")
string
end
def self.signed_params(key,params)
query = params.to_a.sort.collect{|c| "#{c[0]}=#{escape(c[1].to_s)}"}.join('&').downcase
signed_string = Base64.encode64(OpenSSL::HMAC.digest(DIGEST,key,query)).strip
signed_string
end
end
end
|
module FormsAutofill
# left hand: json that says first, middle, and last name.
# right hand: my sections + templates
# action: take left hand and map to right hand correctly, including duplicates
# Procedure is:
# form = FormController new pdf_path
# form.add_section #whatever section or sections you want,
# form.create_defaults - creates default sections for all the rest
# form.fill! user_input - the user input you want, in terms of section names
# form.write destination
class FormController
require "pdf-forms"
attr_accessor :sections
attr_reader :form
@@pdftk_path = File.realpath(__dir__) + "/../bin/pdftk"
def initialize form_path
@pdftk = PdfForms.new @@pdftk_path
# @form_path = form_path
@form = PdfForms::Pdf.new form_path, @pdftk
@form.fields.each_.with_index do |field, index|
field.id = index
end
@sections = []
end
# 1. field id added when adding to section
# 2. field id added when @field created. - that's no good.
# issues: field id assigned in 2 places.
# create defaults create multiple copies of multisections.
def create_defaults
unless clear?
puts "Error: Multiple sections contain the same field"
nil
else
unassigned_fields = @form.fields.reject { |f| fields.include? f }
unassigned_fields.each do |field|
newsection = Section.new @form
newsection.add_field field.id
add_section(newsection)
end
@sections
end
end
# def create_defaults
# # take all unassigned fields, make unnamed sections from them.
# @fields.each_with_index do |field, index|
# newsection = Section.new @form
# newsection.add_field index
# add_section(newsection)
# end
# @sections
# end
def add_section section
@sections << section
# relevant_fields = select_fields(section).map{|field| field.to_hash}
# @fields.delete_if {|field| relevant_fields.include? field.to_hash} #__ISSUE: not sure if this will work - might be different objects
end
def export
{
:pdftk_path => @@pdftk_path,
:form_path => @form.path,
:sections => sections.map{|section| section.export}
}
end
def fill! user_data
# example{"name" => "James", "DOB" => "05/03/2010"}
user_data.each do |label, value|
relevant_section = @sections.select {|section| section.name == label}
relevant_section.first.assign! value
end
end
def self.import data
# creates a new FormController with the right form, and the right sections defined, and the default values
# structure should be same as export output.
controller = FormController.new data[:form_path]
data[:sections].each do |section_hash|
section = Section.import section_hash, controller.form
controller.add_section section
end
controller
end
# def fast_section id
# newsec = mksection @fields[id].name, [id]
# add_section newsec
# end
# def store_field field
# No assign for form - too complicated.
# assign values to the individual sections.
# manually create sections like firstname, phonenum, etc.
# manually assign them values.
# so now create_defaults can read a pdf and make sections. You define the sections containing multiple fields and the multisections manually.
# 2 kinds of functionality -
# 1 is having a steady state to store values in the way they're organized
# 2 is writing them down.
# 3 I want to add six values, have them be assigned to multisections/sections,
# have all the rest added from someplace, and write all this on top of the blank pdf.
# have export/impo
# 3. cases: one value per section - defaults
# one value for multiple secitons - PdfSection
# value mapped to multiple pdfsections. - MultiSection.
# < section class='pdfsection', value='something'> (works for default as well)
# <listelt - name = whatever, id = whatever, value = sectionvalue>
# < section class='multisection' value='something', mapping = 'way split works'>
# <section class='pdfsection', value='result of mapping'>
# <listelt - name = whatever, id = whatever, value = sectionvalue(0..4)>
# section has class, value, fields - add id attribute to field.
# multisection has class, value, mapping, and sections.
def write destination
pdftk.fill_form @form.path , destination, make_hash
end
def clear?
# test whether it contains duplicates fields
fields.uniq == fields
end
def make_hash
#creates hash of the fields parameters
fields = @sections.map{|section| select_fields section}.flatten
result = Hash.new
fields.each {|field| result[field.name.to_sym] = field.value}
# @fields.map {|field| newvals[field.name.to_sym] = field.value}
result
end
def select_fields section
result = []
if section.class == Section
result << section.fields
else
result += section.sections.values.map{|subsection| select_fields subsection}.flatten
end
result.flatten
end
def fields
@sections.map { |section| select_fields section }.flatten
end
end
end
# old:
# def import_sections sections_json, options = {}
# # accepts JSON, adds it to @sections
# defaults = {:overwrite => false}
# options = defaults.merge(options)
# new_sections = JSON.parse(sections_json)
# if options[:overwrite]
# @sections = new_sections
# else
# @sections.concat new_sections
# end
# @sections
# end
formcontroller not takes a form element, not path, in #new
module FormsAutofill
# left hand: json that says first, middle, and last name.
# right hand: my sections + templates
# action: take left hand and map to right hand correctly, including duplicates
# Procedure is:
# form = FormController new pdf_path
# form.add_section #whatever section or sections you want,
# form.create_defaults - creates default sections for all the rest
# form.fill! user_input - the user input you want, in terms of section names
# form.write destination
class FormController
require "pdf-forms"
attr_accessor :sections
attr_reader :form
@@pdftk_path = File.realpath(__dir__) + "/../bin/pdftk"
def initialize form
@pdftk = PdfForms.new @@pdftk_path
# @form_path = form_path
@form = form#PdfForms::Pdf.new form_path, @pdftk
@form.fields.each_with_index do |field, index|
field.id = index
end
@sections = []
end
# 1. field id added when adding to section
# 2. field id added when @field created. - that's no good.
# issues: field id assigned in 2 places.
# create defaults create multiple copies of multisections.
def create_defaults
#__ISSUE: select all fields not in sections
unless clear?
puts "Error: Multiple sections contain the same field"
nil
else
unassigned_fields = @form.fields.reject { |f| fields.include? f }
unassigned_fields.each do |field|
newsection = Section.new @form
newsection.add_field field.id
add_section(newsection)
end
@sections
end
end
# def create_defaults
# # take all unassigned fields, make unnamed sections from them.
# @fields.each_with_index do |field, index|
# newsection = Section.new @form
# newsection.add_field index
# add_section(newsection)
# end
# @sections
# end
def add_section section
@sections << section
# relevant_fields = select_fields(section).map{|field| field.to_hash}
# @fields.delete_if {|field| relevant_fields.include? field.to_hash} #__ISSUE: not sure if this will work - might be different objects
end
def export
{
:pdftk_path => @@pdftk_path,
:form_path => @form.path,
:sections => sections.map{|section| section.export}
}
end
def fill! user_data
# example{"name" => "James", "DOB" => "05/03/2010"}
user_data.each do |label, value|
relevant_section = @sections.select {|section| section.name == label}
relevant_section.first.assign! value
end
end
def self.import data
# creates a new FormController with the right form, and the right sections defined, and the default values
# structure should be same as export output.
form = PdfForms::Pdf.new data[:form_path], @@pdftk_path
controller = FormController.new form
data[:sections].each do |section_hash|
section = Section.import section_hash, form
controller.add_section section
end
controller
end
# def fast_section id
# newsec = mksection @fields[id].name, [id]
# add_section newsec
# end
# def store_field field
# No assign for form - too complicated.
# assign values to the individual sections.
# manually create sections like firstname, phonenum, etc.
# manually assign them values.
# so now create_defaults can read a pdf and make sections. You define the sections containing multiple fields and the multisections manually.
# 2 kinds of functionality -
# 1 is having a steady state to store values in the way they're organized
# 2 is writing them down.
# 3 I want to add six values, have them be assigned to multisections/sections,
# have all the rest added from someplace, and write all this on top of the blank pdf.
# have export/impo
# 3. cases: one value per section - defaults
# one value for multiple secitons - PdfSection
# value mapped to multiple pdfsections. - MultiSection.
# < section class='pdfsection', value='something'> (works for default as well)
# <listelt - name = whatever, id = whatever, value = sectionvalue>
# < section class='multisection' value='something', mapping = 'way split works'>
# <section class='pdfsection', value='result of mapping'>
# <listelt - name = whatever, id = whatever, value = sectionvalue(0..4)>
# section has class, value, fields - add id attribute to field.
# multisection has class, value, mapping, and sections.
def write destination
pdftk.fill_form @form.path , destination, make_hash
end
def clear?
# test whether it contains duplicates fields
fields.uniq == fields
end
def make_hash
#creates hash of the fields parameters
fields = @sections.map{|section| select_fields section}.flatten
result = Hash.new
fields.each {|field| result[field.name.to_sym] = field.value}
# @fields.map {|field| newvals[field.name.to_sym] = field.value}
result
end
def select_fields section
result = []
if section.class == Section
result << section.fields
else
result += section.sections.values.map{|subsection| select_fields subsection}.flatten
end
result.flatten
end
def fields
@sections.map { |section| select_fields section }.flatten
end
end
end
# old:
# def import_sections sections_json, options = {}
# # accepts JSON, adds it to @sections
# defaults = {:overwrite => false}
# options = defaults.merge(options)
# new_sections = JSON.parse(sections_json)
# if options[:overwrite]
# @sections = new_sections
# else
# @sections.concat new_sections
# end
# @sections
# end |
require "fpm/namespace"
require "fpm/source"
require "rubygems/package"
require "rubygems"
require "fileutils"
class FPM::Source::Gem < FPM::Source
def self.flags(opts, settings)
opts.on("--bin-path DIRECTORY",
"The directory to install gem executables") do |path|
settings.source[:bin_path] = path
end
end # def flags
def get_source(params)
gem = @paths.first
looks_like_name_re = /^[A-Za-z0-9_-]+$/
if !File.exists?(gem)
if gem =~ looks_like_name_re
download(gem, params[:version])
else
raise "Path '#{gem}' is not a file and does not appear to be the name of a rubygem."
end
end
end # def get_source
def can_recurse_dependencies
true
end
def download(gem_name, version=nil)
# This code mostly mutated from rubygem's fetch_command.rb
# Code use permissible by rubygems's "GPL or these conditions below"
# http://rubygems.rubyforge.org/rubygems-update/LICENSE_txt.html
puts "Trying to download #{gem_name} (version=#{version || 'latest'})"
dep = ::Gem::Dependency.new gem_name, version
# How to handle prerelease? Some extra magic options?
#dep.prerelease = options[:prerelease]
if ::Gem::SpecFetcher.fetcher.respond_to?(:fetch_with_errors)
specs_and_sources, errors =
::Gem::SpecFetcher.fetcher.fetch_with_errors(dep, true, true, false)
else
specs_and_sources =
::Gem::SpecFetcher.fetcher.fetch(dep, true)
errors = "???"
end
spec, source_uri = specs_and_sources.sort_by { |s,| s.version }.last
if spec.nil? then
raise "Invalid gem? Name: #{gem_name}, Version: #{version}, Errors: #{errors}"
end
path = ::Gem::RemoteFetcher.fetcher.download spec, source_uri
FileUtils.mv path, spec.file_name
@paths = [spec.file_name]
end
def get_metadata
File.open(@paths.first, 'r') do |f|
::Gem::Package.open(f, 'r') do |gem|
spec = gem.metadata
%w(
description
license
summary
version
).each do |field|
self[field.to_sym] = spec.send(field) rescue "unknown"
end
self[:name] = "rubygem#{self[:suffix]}-#{spec.name}"
self[:maintainer] = spec.author
self[:url] = spec.homepage
# TODO [Jay]: this will be different for different
# package managers. Need to decide how to handle this.
self[:category] = 'Languages/Development/Ruby'
# if the gemspec has extensions defined, then this should be a 'native' arch.
if !spec.extensions.empty?
self[:architecture] = "native"
else
self[:architecture] = "all"
end
# make sure we have a description
self[:description] = [ self[:description], self[:summary], "N/A" ].detect { |t| !t.strip == "" }
self[:dependencies] = []
spec.runtime_dependencies.map do |dep|
# rubygems 1.3.5 doesn't have 'Gem::Dependency#requirement'
if dep.respond_to?(:requirement)
reqs = dep.requirement.to_s.gsub(/,/, '')
else
reqs = dep.version_requirements
end
# Some reqs can be ">= a, < b" versions, let's handle that.
reqs.to_s.split(/, */).each do |req|
self[:dependencies] << "rubygem#{self[:suffix]}-#{dep.name} #{req}"
end
end # runtime_dependencies
end # ::Gem::Package
end # File.open (the gem)
end # def get_metadata
def make_tarball!(tar_path, builddir)
tmpdir = "#{tar_path}.dir"
gem = @paths.first
if self[:prefix]
installdir = "#{tmpdir}/#{self[:prefix]}"
# TODO(sissel): Overwriting @paths is bad mojo and confusing...
# Maybe we shouldn't?
@paths = [ self[:prefix] ]
else
installdir = File.join(tmpdir, ::Gem::dir)
@paths = [ ::Gem::dir ]
end
::FileUtils.mkdir_p(installdir)
args = ["gem", "install", "--quiet", "--no-ri", "--no-rdoc",
"--install-dir", installdir, "--ignore-dependencies"]
if self[:settings][:bin_path]
tmp_bin_path = File.join(tmpdir, self[:settings][:bin_path])
args += ["--bindir", tmp_bin_path]
@paths << self[:settings][:bin_path]
FileUtils.mkdir_p(tmp_bin_path) # Fixes #27
end
args << gem
system(*args)
# make paths relative (/foo becomes ./foo)
tar(tar_path, @paths.collect {|p| ".#{p}"}, tmpdir)
FileUtils.rm_r(tmpdir)
# TODO(sissel): Make a helper method.
system(*["gzip", "-f", tar_path])
end
end # class FPM::Source::Gem
- clean up and catch null case
require "fpm/namespace"
require "fpm/source"
require "rubygems/package"
require "rubygems"
require "fileutils"
class FPM::Source::Gem < FPM::Source
def self.flags(opts, settings)
opts.on("--bin-path DIRECTORY",
"The directory to install gem executables") do |path|
settings.source[:bin_path] = path
end
end # def flags
def get_source(params)
gem = @paths.first
looks_like_name_re = /^[A-Za-z0-9_-]+$/
if !File.exists?(gem)
if gem =~ looks_like_name_re
download(gem, params[:version])
else
raise "Path '#{gem}' is not a file and does not appear to be the name of a rubygem."
end
end
end # def get_source
def can_recurse_dependencies
true
end
def download(gem_name, version=nil)
# This code mostly mutated from rubygem's fetch_command.rb
# Code use permissible by rubygems's "GPL or these conditions below"
# http://rubygems.rubyforge.org/rubygems-update/LICENSE_txt.html
puts "Trying to download #{gem_name} (version=#{version || 'latest'})"
dep = ::Gem::Dependency.new gem_name, version
# How to handle prerelease? Some extra magic options?
#dep.prerelease = options[:prerelease]
if ::Gem::SpecFetcher.fetcher.respond_to?(:fetch_with_errors)
specs_and_sources, errors =
::Gem::SpecFetcher.fetcher.fetch_with_errors(dep, true, true, false)
else
specs_and_sources =
::Gem::SpecFetcher.fetcher.fetch(dep, true)
errors = "???"
end
spec, source_uri = specs_and_sources.sort_by { |s,| s.version }.last
if spec.nil? then
raise "Invalid gem? Name: #{gem_name}, Version: #{version}, Errors: #{errors}"
end
path = ::Gem::RemoteFetcher.fetcher.download spec, source_uri
FileUtils.mv path, spec.file_name
@paths = [spec.file_name]
end
def get_metadata
File.open(@paths.first, 'r') do |f|
::Gem::Package.open(f, 'r') do |gem|
spec = gem.metadata
%w(
description
license
summary
version
).each do |field|
self[field.to_sym] = spec.send(field) rescue "unknown"
end
self[:name] = "rubygem#{self[:suffix]}-#{spec.name}"
self[:maintainer] = spec.author
self[:url] = spec.homepage
# TODO [Jay]: this will be different for different
# package managers. Need to decide how to handle this.
self[:category] = 'Languages/Development/Ruby'
# if the gemspec has extensions defined, then this should be a 'native' arch.
if !spec.extensions.empty?
self[:architecture] = "native"
else
self[:architecture] = "all"
end
# make sure we have a description
descriptions = [ self[:description], self[:summary], "#{spec.name} - no description given" ]
self[:description] = descriptions.detect { |d| !(d.nil? or d.strip.empty?) }
self[:dependencies] = []
spec.runtime_dependencies.map do |dep|
# rubygems 1.3.5 doesn't have 'Gem::Dependency#requirement'
if dep.respond_to?(:requirement)
reqs = dep.requirement.to_s.gsub(/,/, '')
else
reqs = dep.version_requirements
end
# Some reqs can be ">= a, < b" versions, let's handle that.
reqs.to_s.split(/, */).each do |req|
self[:dependencies] << "rubygem#{self[:suffix]}-#{dep.name} #{req}"
end
end # runtime_dependencies
end # ::Gem::Package
end # File.open (the gem)
end # def get_metadata
def make_tarball!(tar_path, builddir)
tmpdir = "#{tar_path}.dir"
gem = @paths.first
if self[:prefix]
installdir = "#{tmpdir}/#{self[:prefix]}"
# TODO(sissel): Overwriting @paths is bad mojo and confusing...
# Maybe we shouldn't?
@paths = [ self[:prefix] ]
else
installdir = File.join(tmpdir, ::Gem::dir)
@paths = [ ::Gem::dir ]
end
::FileUtils.mkdir_p(installdir)
args = ["gem", "install", "--quiet", "--no-ri", "--no-rdoc",
"--install-dir", installdir, "--ignore-dependencies"]
if self[:settings][:bin_path]
tmp_bin_path = File.join(tmpdir, self[:settings][:bin_path])
args += ["--bindir", tmp_bin_path]
@paths << self[:settings][:bin_path]
FileUtils.mkdir_p(tmp_bin_path) # Fixes #27
end
args << gem
system(*args)
# make paths relative (/foo becomes ./foo)
tar(tar_path, @paths.collect {|p| ".#{p}"}, tmpdir)
FileUtils.rm_r(tmpdir)
# TODO(sissel): Make a helper method.
system(*["gzip", "-f", tar_path])
end
end # class FPM::Source::Gem
|
require "fpm/package"
class FPM::Target::Rpm < FPM::Package
def architecture
case @architecture
when nil
return %x{uname -m}.chomp # default to current arch
when "native"
return %x{uname -m}.chomp # 'native' is current arch
when "all"
# Translate fpm "all" arch to what it means in RPM.
return "noarch"
else
return @architecture
end
end # def architecture
def specfile(builddir)
"#{builddir}/#{name}.spec"
end
def build!(params)
raise "No package name given. Can't assemble package" if !@name
# TODO(sissel): Abort if 'rpmbuild' tool not found.
if !replaces.empty?
$stderr.puts "Warning: RPM does not support 'replaces'"
$stderr.puts "If you have suggstions as to what --replaces means to RPM"
$stderr.puts "Please let me know: https://github.com/jordansissel/fpm/issues"
end
%w(BUILD RPMS SRPMS SOURCES SPECS).each { |d| Dir.mkdir(d) }
args = ["rpmbuild", "-ba",
"--define", "buildroot #{Dir.pwd}/BUILD",
"--define", "_topdir #{Dir.pwd}",
"--define", "_sourcedir #{Dir.pwd}",
"--define", "_rpmdir #{Dir.pwd}/RPMS",
"#{name}.spec"]
ret = system(*args)
if !ret
raise "rpmbuild failed (exit code: #{$?.exitstatus})"
end
Dir["#{Dir.pwd}/RPMS/**/*.rpm"].each do |path|
# This should only output one rpm, should we verify this?
system("mv", path, params[:output])
end
end # def build!
end # class FPM::Target::RPM
proposed fix for issue #82
require "fpm/package"
class FPM::Target::Rpm < FPM::Package
def architecture
case @architecture
when nil
return %x{uname -m}.chomp # default to current arch
when "native"
return %x{uname -m}.chomp # 'native' is current arch
when "all"
# Translate fpm "all" arch to what it means in RPM.
return "noarch"
else
return @architecture
end
end # def architecture
def specfile(builddir)
"#{builddir}/#{name}.spec"
end
def url
if @url.nil? || @url.empty?
'http://nourlgiven.example.com'
else
@url
end
end
def build!(params)
raise "No package name given. Can't assemble package" if !@name
# TODO(sissel): Abort if 'rpmbuild' tool not found.
if !replaces.empty?
$stderr.puts "Warning: RPM does not support 'replaces'"
$stderr.puts "If you have suggstions as to what --replaces means to RPM"
$stderr.puts "Please let me know: https://github.com/jordansissel/fpm/issues"
end
%w(BUILD RPMS SRPMS SOURCES SPECS).each { |d| Dir.mkdir(d) }
args = ["rpmbuild", "-ba",
"--define", "buildroot #{Dir.pwd}/BUILD",
"--define", "_topdir #{Dir.pwd}",
"--define", "_sourcedir #{Dir.pwd}",
"--define", "_rpmdir #{Dir.pwd}/RPMS",
"#{name}.spec"]
ret = system(*args)
if !ret
raise "rpmbuild failed (exit code: #{$?.exitstatus})"
end
Dir["#{Dir.pwd}/RPMS/**/*.rpm"].each do |path|
# This should only output one rpm, should we verify this?
system("mv", path, params[:output])
end
end # def build!
end # class FPM::Target::RPM
|
require 'fastimage'
module Frameit
class Editor
module Color
BLACK = "SpaceGray"
SILVER = "Slvr"
end
module Orientation
PORTRAIT = "Vert"
LANDSCAPE = "Horz"
end
def initialize
FrameConverter.new.run
end
def run(path, color = Color::BLACK)
@color = color
Dir["#{path}/**/*.png"].each do |screenshot|
next if screenshot.include?"_framed.png"
begin
template_path = get_template(screenshot)
if template_path
template = MiniMagick::Image.open(template_path)
image = MiniMagick::Image.open(screenshot)
offset_information = image_offset(screenshot)
raise "Could not find offset_information for '#{screenshot}'" unless (offset_information and offset_information[:width])
width = offset_information[:width]
image.resize width
result = template.composite(image) do |c|
c.compose "Over"
c.geometry offset_information[:offset]
end
output_path = screenshot.gsub('.png', '_framed.png')
result.write output_path
Helper.log.info "Successfully framed screenshots at path '#{output_path}'".green
end
rescue Exception => ex
Helper.log.error ex
end
end
end
# This will detect the screen size and choose the correct template
def get_template(path)
parts = [
device_name(screen_size(path)),
orientation_name(path),
@color
]
templates_path = [ENV['HOME'], FrameConverter::FRAME_PATH].join('/')
templates = Dir["#{templates_path}/**/#{parts.join('_')}*.png"]
if templates.count == 0
if screen_size(path) == Deliver::AppScreenshot::ScreenSize::IOS_35
Helper.log.warn "Unfortunately 3.5\" device frames were discontinued. Skipping screen '#{path}'".yellow
else
Helper.log.error "Could not find a valid template for screenshot '#{path}'".red
Helper.log.error "You can download new templates from '#{FrameConverter::DOWNLOAD_URL}'"
Helper.log.error "and store them in '#{templates_path}'"
Helper.log.error "Missing file: '#{parts.join('_')}.psd'".red
end
return nil
else
# Helper.log.debug "Found template '#{templates.first}' for screenshot '#{path}'"
return templates.first.gsub(" ", "\ ")
end
end
private
def screen_size(path)
Deliver::AppScreenshot.calculate_screen_size(path)
end
def device_name(screen_size)
size = Deliver::AppScreenshot::ScreenSize
case screen_size
when size::IOS_55
return 'iPhone_6_Plus'
when size::IOS_47
return 'iPhone_6'
when size::IOS_40
return 'iPhone_5s'
when size::IOS_IPAD
return 'iPad_Air'
end
end
def orientation_name(path)
size = FastImage.size(path)
return Orientation::PORTRAIT if size[0] < size[1]
return Orientation::LANDSCAPE
end
def image_offset(path)
size = Deliver::AppScreenshot::ScreenSize
case orientation_name(path)
when Orientation::PORTRAIT
case screen_size(path)
when size::IOS_55
return {
offset: '+42+147',
width: 539
}
when size::IOS_47
return {
offset: '+41+154',
width: 530
}
when size::IOS_40
return {
offset: "+54+197",
width: 543
}
when size::IOS_IPAD
return {
offset: '+0+0', # TODO
width: ''
}
end
when Orientation::LANDSCAPE
case screen_size(path)
when size::IOS_55
return {
offset: "+146+41",
width: 960
}
when size::IOS_47
return {
offset: "+153+41",
width: 946
}
when size::IOS_40
return {
offset: "+201+48",
width: 970
}
when size::IOS_IPAD
return {
offset: '+0+0', # TODO
width: ''
}
end
end
end
end
end
Added iPad support for both portrait and landscape
require 'fastimage'
module Frameit
class Editor
module Color
BLACK = "SpaceGray"
SILVER = "Slvr"
end
module Orientation
PORTRAIT = "Vert"
LANDSCAPE = "Horz"
end
def initialize
FrameConverter.new.run
end
def run(path, color = Color::BLACK)
@color = color
Dir["#{path}/**/*.png"].each do |screenshot|
next if screenshot.include?"_framed.png"
begin
template_path = get_template(screenshot)
if template_path
template = MiniMagick::Image.open(template_path)
image = MiniMagick::Image.open(screenshot)
offset_information = image_offset(screenshot)
raise "Could not find offset_information for '#{screenshot}'" unless (offset_information and offset_information[:width])
width = offset_information[:width]
image.resize width
result = template.composite(image) do |c|
c.compose "Over"
c.geometry offset_information[:offset]
end
output_path = screenshot.gsub('.png', '_framed.png')
result.write output_path
Helper.log.info "Successfully framed screenshots at path '#{output_path}'".green
end
rescue Exception => ex
Helper.log.error ex
end
end
end
# This will detect the screen size and choose the correct template
def get_template(path)
parts = [
device_name(screen_size(path)),
orientation_name(path),
@color
]
templates_path = [ENV['HOME'], FrameConverter::FRAME_PATH].join('/')
templates = Dir["#{templates_path}/**/#{parts.join('_')}*.png"]
if templates.count == 0
if screen_size(path) == Deliver::AppScreenshot::ScreenSize::IOS_35
Helper.log.warn "Unfortunately 3.5\" device frames were discontinued. Skipping screen '#{path}'".yellow
else
Helper.log.error "Could not find a valid template for screenshot '#{path}'".red
Helper.log.error "You can download new templates from '#{FrameConverter::DOWNLOAD_URL}'"
Helper.log.error "and store them in '#{templates_path}'"
Helper.log.error "Missing file: '#{parts.join('_')}.psd'".red
end
return nil
else
# Helper.log.debug "Found template '#{templates.first}' for screenshot '#{path}'"
return templates.first.gsub(" ", "\ ")
end
end
private
def screen_size(path)
Deliver::AppScreenshot.calculate_screen_size(path)
end
def device_name(screen_size)
size = Deliver::AppScreenshot::ScreenSize
case screen_size
when size::IOS_55
return 'iPhone_6_Plus'
when size::IOS_47
return 'iPhone_6'
when size::IOS_40
return 'iPhone_5s'
when size::IOS_IPAD
return 'iPad_Air'
end
end
def orientation_name(path)
size = FastImage.size(path)
return Orientation::PORTRAIT if size[0] < size[1]
return Orientation::LANDSCAPE
end
def image_offset(path)
size = Deliver::AppScreenshot::ScreenSize
case orientation_name(path)
when Orientation::PORTRAIT
case screen_size(path)
when size::IOS_55
return {
offset: '+42+147',
width: 539
}
when size::IOS_47
return {
offset: '+41+154',
width: 530
}
when size::IOS_40
return {
offset: "+54+197",
width: 543
}
when size::IOS_IPAD
return {
offset: '+57+112',
width: 765
}
end
when Orientation::LANDSCAPE
case screen_size(path)
when size::IOS_55
return {
offset: "+146+41",
width: 960
}
when size::IOS_47
return {
offset: "+153+41",
width: 946
}
when size::IOS_40
return {
offset: "+201+48",
width: 970
}
when size::IOS_IPAD
return {
offset: '+113+57',
width: 1024
}
end
end
end
end
end |
module Frameit
class Editor
attr_accessor :screenshot # reference to the screenshot object to fetch the path, title, etc.
attr_accessor :frame # the frame of the device
attr_accessor :image # the current image used for editing
attr_accessor :top_space_above_device
def frame!(screenshot)
self.screenshot = screenshot
prepare_image
if load_frame # e.g. Mac doesn't need a frame
self.frame = MiniMagick::Image.open(load_frame)
end
if should_add_title?
@image = complex_framing
else
# easy mode from 1.0 - no title or background
width = offset['width']
image.resize width # resize the image to fit the frame
put_into_frame # put it in the frame
end
store_result # write to file system
end
def load_frame
TemplateFinder.get_template(screenshot)
end
def prepare_image
@image = MiniMagick::Image.open(screenshot.path)
end
private
def store_result
output_path = screenshot.path.gsub('.png', '_framed.png').gsub('.PNG', '_framed.png')
image.format "png"
image.write output_path
Helper.log.info "Added frame: '#{File.expand_path(output_path)}'".green
end
# puts the screenshot into the frame
def put_into_frame
@image = frame.composite(image, "png") do |c|
c.compose "Over"
c.geometry offset['offset']
end
end
def offset
return @offset_information if @offset_information
@offset_information = fetch_config['offset'] || Offsets.image_offset(screenshot)
if @offset_information and (@offset_information['offset'] or @offset_information['offset'])
return @offset_information
end
raise "Could not find offset_information for '#{screenshot}'"
end
#########################################################################################
# Everything below is related to title, background, etc. and is not used in the easy mode
#########################################################################################
# this is used to correct the 1:1 offset information
# the offset information is stored to work for the template images
# since we resize the template images to have higher quality screenshots
# we need to modify the offset information by a certain factor
def modify_offset(multiplicator)
# Format: "+133+50"
hash = offset['offset']
x = hash.split("+")[1].to_f * multiplicator
y = hash.split("+")[2].to_f * multiplicator
new_offset = "+#{x.round}+#{y.round}"
@offset_information['offset'] = new_offset
end
# Do we add a background and title as well?
def should_add_title?
return (fetch_config['background'] and (fetch_config['title'] or fetch_config['keyword']))
end
# more complex mode: background, frame and title
def complex_framing
background = generate_background
if self.frame # we have no frame on le mac
resize_frame!
@image = put_into_frame
# Decrease the size of the framed screenshot to fit into the defined padding + background
frame_width = background.width - frame_padding * 2
image.resize "#{frame_width}x"
end
@image = put_device_into_background(background)
if fetch_config['title']
@image = add_title
end
image
end
# Padding around the frames
def frame_padding
multi = 1.0
multi = 1.7 if self.screenshot.is_triple_density?
return fetch_config['padding'] * multi
end
# Returns a correctly sized background image
def generate_background
background = MiniMagick::Image.open(fetch_config['background'])
if background.height != screenshot.size[1]
background.resize "#{screenshot.size[0]}x#{screenshot.size[1]}!" # `!` says it should ignore the ratio
end
background
end
def put_device_into_background(background)
left_space = (background.width / 2.0 - image.width / 2.0).round
bottom_space = -(image.height / 10).round # to be just a bit below the image bottom
bottom_space -= 40 if screenshot.is_portrait? # even more for portrait mode
bottom_space -= 50 if (screenshot.is_mini? and screenshot.is_portrait?) # super old devices
self.top_space_above_device = background.height - image.height - bottom_space
@image = background.composite(image, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space_above_device}"
end
return image
end
# Resize the frame as it's too low quality by default
def resize_frame!
multiplicator = (screenshot.size[0].to_f / offset['width'].to_f) # by how much do we have to change this?
new_frame_width = multiplicator * frame.width # the new width for the frame
frame.resize "#{new_frame_width.round}x" # resize it to the calculated witdth
modify_offset(multiplicator) # modify the offset to properly insert the screenshot into the frame later
end
# Add the title above the device
def add_title
title_images = build_title_images(image.width)
keyword = title_images[:keyword]
title = title_images[:title]
sum_width = (keyword.width rescue 0) + title.width + keyword_padding
top_space = (top_space_above_device / 2.0 - actual_font_size / 2.0).round # centered
left_space = (image.width / 2.0 - sum_width / 2.0).round
if keyword
@image = image.composite(keyword, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space}"
end
end
left_space += (keyword.width rescue 0) + keyword_padding
@image = image.composite(title, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space}"
end
image
end
def actual_font_size
[top_space_above_device / 3.0, @image.width / 30.0].max.round
end
def keyword_padding
(actual_font_size / 2.0).round
end
# This will build 2 individual images with the title, which will then be added to the real image
def build_title_images(max_width)
words = [:keyword, :title].keep_if{ |a| fetch_text(a) } # optional keyword/title
results = {}
words.each do |key|
# Create empty background
empty_path = File.join(Helper.gem_path('frameit'), "lib/assets/empty.png")
title_image = MiniMagick::Image.open(empty_path)
image_height = actual_font_size * 2 # gets trimmed afterwards anyway, and on the iPad the `y` would get cut
title_image.combine_options do |i|
i.resize "#{max_width}x#{image_height}!" # `!` says it should ignore the ratio
end
current_font = font(key)
Helper.log.debug "Using #{current_font} as font the #{key} of #{screenshot.path}" if $verbose and current_font
# Add the actual title
title_image.combine_options do |i|
i.font current_font if current_font
i.gravity "Center"
i.pointsize actual_font_size
i.draw "text 0,0 '#{fetch_text(key)}'"
i.fill fetch_config[key.to_s]['color']
end
title_image.trim # remove white space
results[key] = title_image
end
results
end
# Loads the config (colors, background, texts, etc.)
# Don't use this method to access the actual text and use `fetch_texts` instead
def fetch_config
return @config if @config
config_path = File.join(File.expand_path("..", screenshot.path), "Framefile.json")
config_path = File.join(File.expand_path("../..", screenshot.path), "Framefile.json") unless File.exists?config_path
file = ConfigParser.new.load(config_path)
return {} unless file # no config file at all
@config = file.fetch_value(screenshot.path)
end
# Fetches the title + keyword for this particular screenshot
def fetch_text(type)
raise "Valid parameters :keyword, :title" unless [:keyword, :title].include?type
# Try to get it from a keyword.strings or title.strings file
strings_path = File.join(File.expand_path("..", screenshot.path), "#{type.to_s}.strings")
if File.exists?strings_path
parsed = StringsParser.parse(strings_path)
result = parsed.find { |k, v| screenshot.path.include?k }
return result.last if result
end
# No string files, fallback to Framefile config
result = fetch_config[type.to_s]['text']
Helper.log.debug "Falling back to default text as there was nothing specified in the .strings file" if $verbose
if !result and type == :title
# title is mandatory
raise "Could not get title for screenshot #{screenshot.path}. Please provide one in your Framefile.json".red
end
return result
end
# The font we want to use
def font(key)
single_font = fetch_config[key.to_s]['font']
return single_font if single_font
fonts = fetch_config[key.to_s]['fonts']
if fonts
fonts.each do |font|
if font['supported']
font['supported'].each do |language|
if screenshot.path.include?language
return font["font"]
end
end
else
# No `supported` array, this will always be true
Helper.log.debug "Found a font with no list of supported languages, using this now" if $verbose
return font["font"]
end
end
end
Helper.log.debug "No custom font specified, using the default one" if $verbose
return nil
end
end
end
Added more debugging information for --verbose mode
module Frameit
class Editor
attr_accessor :screenshot # reference to the screenshot object to fetch the path, title, etc.
attr_accessor :frame # the frame of the device
attr_accessor :image # the current image used for editing
attr_accessor :top_space_above_device
def frame!(screenshot)
self.screenshot = screenshot
prepare_image
if load_frame # e.g. Mac doesn't need a frame
self.frame = MiniMagick::Image.open(load_frame)
end
if should_add_title?
@image = complex_framing
else
# easy mode from 1.0 - no title or background
width = offset['width']
image.resize width # resize the image to fit the frame
put_into_frame # put it in the frame
end
store_result # write to file system
end
def load_frame
TemplateFinder.get_template(screenshot)
end
def prepare_image
@image = MiniMagick::Image.open(screenshot.path)
end
private
def store_result
output_path = screenshot.path.gsub('.png', '_framed.png').gsub('.PNG', '_framed.png')
image.format "png"
image.write output_path
Helper.log.info "Added frame: '#{File.expand_path(output_path)}'".green
end
# puts the screenshot into the frame
def put_into_frame
@image = frame.composite(image, "png") do |c|
c.compose "Over"
c.geometry offset['offset']
end
end
def offset
return @offset_information if @offset_information
@offset_information = fetch_config['offset'] || Offsets.image_offset(screenshot)
if @offset_information and (@offset_information['offset'] or @offset_information['offset'])
return @offset_information
end
raise "Could not find offset_information for '#{screenshot}'"
end
#########################################################################################
# Everything below is related to title, background, etc. and is not used in the easy mode
#########################################################################################
# this is used to correct the 1:1 offset information
# the offset information is stored to work for the template images
# since we resize the template images to have higher quality screenshots
# we need to modify the offset information by a certain factor
def modify_offset(multiplicator)
# Format: "+133+50"
hash = offset['offset']
x = hash.split("+")[1].to_f * multiplicator
y = hash.split("+")[2].to_f * multiplicator
new_offset = "+#{x.round}+#{y.round}"
@offset_information['offset'] = new_offset
end
# Do we add a background and title as well?
def should_add_title?
return (fetch_config['background'] and (fetch_config['title'] or fetch_config['keyword']))
end
# more complex mode: background, frame and title
def complex_framing
background = generate_background
if self.frame # we have no frame on le mac
resize_frame!
@image = put_into_frame
# Decrease the size of the framed screenshot to fit into the defined padding + background
frame_width = background.width - frame_padding * 2
image.resize "#{frame_width}x"
end
@image = put_device_into_background(background)
if fetch_config['title']
@image = add_title
end
image
end
# Padding around the frames
def frame_padding
multi = 1.0
multi = 1.7 if self.screenshot.is_triple_density?
return fetch_config['padding'] * multi
end
# Returns a correctly sized background image
def generate_background
background = MiniMagick::Image.open(fetch_config['background'])
if background.height != screenshot.size[1]
background.resize "#{screenshot.size[0]}x#{screenshot.size[1]}!" # `!` says it should ignore the ratio
end
background
end
def put_device_into_background(background)
left_space = (background.width / 2.0 - image.width / 2.0).round
bottom_space = -(image.height / 10).round # to be just a bit below the image bottom
bottom_space -= 40 if screenshot.is_portrait? # even more for portrait mode
bottom_space -= 50 if (screenshot.is_mini? and screenshot.is_portrait?) # super old devices
self.top_space_above_device = background.height - image.height - bottom_space
@image = background.composite(image, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space_above_device}"
end
return image
end
# Resize the frame as it's too low quality by default
def resize_frame!
multiplicator = (screenshot.size[0].to_f / offset['width'].to_f) # by how much do we have to change this?
new_frame_width = multiplicator * frame.width # the new width for the frame
frame.resize "#{new_frame_width.round}x" # resize it to the calculated witdth
modify_offset(multiplicator) # modify the offset to properly insert the screenshot into the frame later
end
# Add the title above the device
def add_title
title_images = build_title_images(image.width)
keyword = title_images[:keyword]
title = title_images[:title]
sum_width = (keyword.width rescue 0) + title.width + keyword_padding
top_space = (top_space_above_device / 2.0 - actual_font_size / 2.0).round # centered
left_space = (image.width / 2.0 - sum_width / 2.0).round
if keyword
@image = image.composite(keyword, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space}"
end
end
left_space += (keyword.width rescue 0) + keyword_padding
@image = image.composite(title, "png") do |c|
c.compose "Over"
c.geometry "+#{left_space}+#{top_space}"
end
image
end
def actual_font_size
[top_space_above_device / 3.0, @image.width / 30.0].max.round
end
def keyword_padding
(actual_font_size / 2.0).round
end
# This will build 2 individual images with the title, which will then be added to the real image
def build_title_images(max_width)
words = [:keyword, :title].keep_if{ |a| fetch_text(a) } # optional keyword/title
results = {}
words.each do |key|
# Create empty background
empty_path = File.join(Helper.gem_path('frameit'), "lib/assets/empty.png")
title_image = MiniMagick::Image.open(empty_path)
image_height = actual_font_size * 2 # gets trimmed afterwards anyway, and on the iPad the `y` would get cut
title_image.combine_options do |i|
i.resize "#{max_width}x#{image_height}!" # `!` says it should ignore the ratio
end
current_font = font(key)
text = fetch_text(key)
Helper.log.debug "Using #{current_font} as font the #{key} of #{screenshot.path}" if $verbose and current_font
Helper.log.debug "Adding text '#{fetch_text(key)}'"if $verbose
# Add the actual title
title_image.combine_options do |i|
i.font current_font if current_font
i.gravity "Center"
i.pointsize actual_font_size
i.draw "text 0,0 '#{fetch_text(key)}'"
i.fill fetch_config[key.to_s]['color']
end
title_image.trim # remove white space
results[key] = title_image
end
results
end
# Loads the config (colors, background, texts, etc.)
# Don't use this method to access the actual text and use `fetch_texts` instead
def fetch_config
return @config if @config
config_path = File.join(File.expand_path("..", screenshot.path), "Framefile.json")
config_path = File.join(File.expand_path("../..", screenshot.path), "Framefile.json") unless File.exists?config_path
file = ConfigParser.new.load(config_path)
return {} unless file # no config file at all
@config = file.fetch_value(screenshot.path)
end
# Fetches the title + keyword for this particular screenshot
def fetch_text(type)
raise "Valid parameters :keyword, :title" unless [:keyword, :title].include?type
# Try to get it from a keyword.strings or title.strings file
strings_path = File.join(File.expand_path("..", screenshot.path), "#{type.to_s}.strings")
if File.exists?strings_path
parsed = StringsParser.parse(strings_path)
result = parsed.find { |k, v| screenshot.path.include?k }
return result.last if result
end
# No string files, fallback to Framefile config
result = fetch_config[type.to_s]['text']
Helper.log.debug "Falling back to default text as there was nothing specified in the .strings file" if $verbose
if !result and type == :title
# title is mandatory
raise "Could not get title for screenshot #{screenshot.path}. Please provide one in your Framefile.json".red
end
return result
end
# The font we want to use
def font(key)
single_font = fetch_config[key.to_s]['font']
return single_font if single_font
fonts = fetch_config[key.to_s]['fonts']
if fonts
fonts.each do |font|
if font['supported']
font['supported'].each do |language|
if screenshot.path.include?language
return font["font"]
end
end
else
# No `supported` array, this will always be true
Helper.log.debug "Found a font with no list of supported languages, using this now" if $verbose
return font["font"]
end
end
end
Helper.log.debug "No custom font specified for #{screenshot}, using the default one" if $verbose
return nil
end
end
end |
# encoding: utf-8
class Gaston
VERSION = "0.1.1"
end
Bump version.
Signed-off-by: chatgris <f9469d12bf3d131e7aae80be27ccfe58aa9db1f1@af83.com>
# encoding: utf-8
class Gaston
VERSION = "0.1.2"
end
|
#!/usr/bin/env ruby
class Penalty
include DataMapper::Resource
storage_names[:default] = "swdata"
property :id, String, :key => true
property :notice, String
property :code, String
property :party_served, String
property :notes, String
property :issued_by, String
property :date_served, String
property :penalty, String
property :suburb, String
property :trade_name, String
property :council, String
property :address, String
property :date, String
property :details_link, String
property :latitude, String
property :longitude, String
property :date_scraped, String
def url
self.details_link
end
def geocode(opts={})
force = opts[:force]
if !self.latitude.blank? && !self.longitude.blank? && !force
return [self.latitude, self.longitude ]
end
tries = 3
begin
location = ::Geokit::Geocoders::MultiGeocoder.geocode(self.address)
rescue ::OpenURI::HTTPError
retry if (tries =- 1) > 0
end
if location
self.latitude = location.lat
self.longitude = location.lng
else
return false
end
[ self.latitude, self.longitude ]
end
end
Stub out old attribute names for compatibility
#!/usr/bin/env ruby
class Penalty
include DataMapper::Resource
storage_names[:default] = "swdata"
property :id, String, :key => true
property :notice, String
property :code, String
property :party_served, String
property :notes, String
property :issued_by, String
property :date_served, String
property :penalty, String
property :suburb, String
property :trade_name, String
property :council, String
property :address, String
property :date, String
property :details_link, String
property :latitude, String
property :longitude, String
property :date_scraped, String
def url
self.details_link
end
def type
self.class.to_s.downcase
end
def trading_name
self.trade_name
end
def offence_description
self.notice
end
def offence_code
self.code
end
def penalty_amount
self.penalty
end
def served_to
self.party_served
end
def pursued_by
self.issued_by
end
def council_area
self.council
end
def geocode(opts={})
force = opts[:force]
if !self.latitude.blank? && !self.longitude.blank? && !force
return [self.latitude, self.longitude ]
end
tries = 3
begin
location = ::Geokit::Geocoders::MultiGeocoder.geocode(self.address)
rescue ::OpenURI::HTTPError
retry if (tries =- 1) > 0
end
if location
self.latitude = location.lat
self.longitude = location.lng
else
return false
end
[ self.latitude, self.longitude ]
end
end
|
class Gauges
VERSION = '0.1.0'
end
0.2 since 0.1 was poo.
class Gauges
VERSION = '0.2.0'
end
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{java-autotest}
s.version = "0.0.1.beta1"
s.required_rubygems_version = Gem::Requirement.new("> 1.3.1") if s.respond_to? :required_rubygems_version=
s.authors = ["rodolfoliviero"]
s.date = %q{2010-08-21}
s.default_executable = %q{java-autotest}
s.description = %q{Java AutoTest}
s.email = %q{rodolfoliviero@gmail.com}
s.executables = ["java-autotest"]
s.extra_rdoc_files = [
"LICENSE",
"README.rdoc"
]
s.files = [
".document",
".gitignore",
"Gemfile",
"Gemfile.lock",
"LICENSE",
"README.rdoc",
"Rakefile",
"VERSION.yml",
"bin/java-autotest",
"java_autotest.gemspec",
"lib/file.rb",
"lib/java_autotest.rb",
"lib/java_autotest/autotest.rb",
"spec/file_spec.rb",
"spec/java_autotest/autotest_spec.rb",
"spec/spec.opts",
"spec/spec_helper.rb",
"spec/src/main/java/Impl.java",
"spec/src/test/java/ImplTest.java"
]
s.homepage = %q{http://github.com/rodolfoliviero/java-autotest}
s.rdoc_options = ["--charset=UTF-8"]
s.require_paths = ["lib"]
s.rubygems_version = %q{1.3.7}
s.summary = %q{Java AutoTest}
s.test_files = [
"spec/spec_helper.rb",
"spec/java_autotest/autotest_spec.rb",
"spec/file_spec.rb"
]
if s.respond_to? :specification_version then
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_development_dependency(%q<rspec>, [">= 1.2.9"])
else
s.add_dependency(%q<rspec>, [">= 1.2.9"])
end
else
s.add_dependency(%q<rspec>, [">= 1.2.9"])
end
end
Regenerated gemspec for version 0.0.1.beta2
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{java-autotest}
s.version = "0.0.1.beta2"
s.required_rubygems_version = Gem::Requirement.new("> 1.3.1") if s.respond_to? :required_rubygems_version=
s.authors = ["rodolfoliviero"]
s.date = %q{2010-08-21}
s.default_executable = %q{java-autotest}
s.description = %q{Java AutoTest}
s.email = %q{rodolfoliviero@gmail.com}
s.executables = ["java-autotest"]
s.extra_rdoc_files = [
"LICENSE",
"README.rdoc"
]
s.files = [
".document",
".gitignore",
"Gemfile",
"Gemfile.lock",
"LICENSE",
"README.rdoc",
"Rakefile",
"VERSION.yml",
"bin/java-autotest",
"java-autotest.gemspec",
"lib/file.rb",
"lib/java_autotest.rb",
"lib/java_autotest/autotest.rb",
"spec/file_spec.rb",
"spec/java_autotest/autotest_spec.rb",
"spec/spec.opts",
"spec/spec_helper.rb",
"spec/src/main/java/Impl.java",
"spec/src/test/java/ImplTest.java"
]
s.homepage = %q{http://github.com/rodolfoliviero/java-autotest}
s.rdoc_options = ["--charset=UTF-8"]
s.require_paths = ["lib"]
s.rubygems_version = %q{1.3.7}
s.summary = %q{Java AutoTest}
s.test_files = [
"spec/spec_helper.rb",
"spec/java_autotest/autotest_spec.rb",
"spec/file_spec.rb"
]
if s.respond_to? :specification_version then
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_development_dependency(%q<rspec>, [">= 1.2.9"])
else
s.add_dependency(%q<rspec>, [">= 1.2.9"])
end
else
s.add_dependency(%q<rspec>, [">= 1.2.9"])
end
end
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'jekyll/assets_plugin/version'
Gem::Specification.new do |spec|
spec.name = "jekyll-assets"
spec.version = Jekyll::AssetsPlugin::VERSION
spec.homepage = "http://ixti.github.com/jekyll-assets"
spec.authors = ["Aleksey V Zapparov"]
spec.email = %w{ixti@member.fsf.org}
spec.license = "MIT"
spec.summary = "jekyll-assets-#{Jekyll::AssetsPlugin::VERSION}"
spec.description = <<-DESC
Jekyll plugin, that allows you to write javascript/css assets in
other languages such as CoffeeScript, Sass, Less and ERB, concatenate
them, respecting dependencies, minify and many more.
DESC
spec.files = `git ls-files`.split($/)
spec.executables = spec.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_dependency "jekyll", "~> 1.0"
spec.add_dependency "sprockets", "~> 2.10"
spec.add_development_dependency "bundler", "~> 1.3"
spec.add_development_dependency "rake"
spec.add_development_dependency "rspec"
spec.add_development_dependency "guard-rspec"
spec.add_development_dependency "compass"
spec.add_development_dependency "bourbon"
spec.add_development_dependency "neat"
spec.add_development_dependency "bootstrap-sass"
end
Limit version of SASS in development
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'jekyll/assets_plugin/version'
Gem::Specification.new do |spec|
spec.name = "jekyll-assets"
spec.version = Jekyll::AssetsPlugin::VERSION
spec.homepage = "http://ixti.github.com/jekyll-assets"
spec.authors = ["Aleksey V Zapparov"]
spec.email = %w{ixti@member.fsf.org}
spec.license = "MIT"
spec.summary = "jekyll-assets-#{Jekyll::AssetsPlugin::VERSION}"
spec.description = <<-DESC
Jekyll plugin, that allows you to write javascript/css assets in
other languages such as CoffeeScript, Sass, Less and ERB, concatenate
them, respecting dependencies, minify and many more.
DESC
spec.files = `git ls-files`.split($/)
spec.executables = spec.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_dependency "jekyll", "~> 1.0"
spec.add_dependency "sprockets", "~> 2.10"
spec.add_development_dependency "bundler", "~> 1.3"
spec.add_development_dependency "rake"
spec.add_development_dependency "rspec"
spec.add_development_dependency "guard-rspec"
spec.add_development_dependency "compass"
spec.add_development_dependency "bourbon"
spec.add_development_dependency "neat"
spec.add_development_dependency "bootstrap-sass"
# compass fails with SASS than 3.3+
# https://github.com/chriseppstein/compass/issues/1513
spec.add_development_dependency "sass", "~> 3.2.13"
end
|
# frozen_string_literal: true
lib = File.expand_path("lib", __dir__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require "jekyll-import/version"
Gem::Specification.new do |s|
s.specification_version = 2 if s.respond_to? :specification_version=
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.rubygems_version = "2.2.2"
s.required_ruby_version = ">= 2.3"
s.name = "jekyll-import"
s.version = JekyllImport::VERSION
s.license = "MIT"
s.summary = "Import command for Jekyll (static site generator)."
s.description = "Provides the Import command for Jekyll."
s.authors = ["Tom Preston-Werner"]
s.email = "tom@mojombo.com"
s.homepage = "http://github.com/jekyll/jekyll-import"
s.files = `git ls-files`.split($INPUT_RECORD_SEPARATOR).grep(%r!^lib/!)
s.require_paths = %w(lib)
s.rdoc_options = ["--charset=UTF-8"]
s.extra_rdoc_files = %w(README.markdown LICENSE)
# runtime dependencies
s.add_runtime_dependency("fastercsv", "~> 1.0")
s.add_runtime_dependency("jekyll", ENV["JEKYLL_VERSION"] ? "~> #{ENV["JEKYLL_VERSION"]}" : "~> 3.0")
s.add_runtime_dependency("nokogiri", "~> 1.0")
s.add_runtime_dependency("reverse_markdown", "~> 1.0")
# development dependencies
s.add_development_dependency("activesupport", "~> 4.2")
s.add_development_dependency("rake", "~> 12.0")
s.add_development_dependency("rdoc", "~> 4.0")
# test dependencies:
s.add_development_dependency("redgreen", "~> 1.2")
s.add_development_dependency("rr", "~> 1.0")
s.add_development_dependency("rubocop-jekyll", "~> 0.4")
s.add_development_dependency("shoulda", "~> 3.5")
s.add_development_dependency("simplecov", "~> 0.7")
s.add_development_dependency("simplecov-gem-adapter", "~> 1.0")
# migrator dependencies:
s.add_development_dependency("behance", "~> 0.3")
s.add_development_dependency("hpricot", "~> 0.8")
s.add_development_dependency("htmlentities", "~> 4.3")
s.add_development_dependency("mysql2", "~> 0.3")
s.add_development_dependency("open_uri_redirections", "~> 0.2")
s.add_development_dependency("pg", "~> 0.12")
s.add_development_dependency("sequel", "~> 3.42")
s.add_development_dependency("sqlite3", "~> 1.3")
s.add_development_dependency("unidecode", "~> 1.0")
# site dependencies:
s.add_development_dependency("launchy", "~> 2.4")
end
chore(deps): require Rubygems > 2.5
# frozen_string_literal: true
lib = File.expand_path("lib", __dir__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require "jekyll-import/version"
Gem::Specification.new do |s|
s.rubygems_version = ">= 2.5"
s.required_ruby_version = ">= 2.3"
s.name = "jekyll-import"
s.version = JekyllImport::VERSION
s.license = "MIT"
s.summary = "Import command for Jekyll (static site generator)."
s.description = "Provides the Import command for Jekyll."
s.authors = ["Tom Preston-Werner", "Parker Moore", "Matt Rogers"]
s.email = "maintainers@jekyllrb.com"
s.homepage = "http://github.com/jekyll/jekyll-import"
s.files = `git ls-files`.split($INPUT_RECORD_SEPARATOR).grep(%r!^lib/!)
s.require_paths = %w(lib)
s.rdoc_options = ["--charset=UTF-8"]
s.extra_rdoc_files = %w(README.markdown LICENSE)
# runtime dependencies
s.add_runtime_dependency("fastercsv", "~> 1.0")
s.add_runtime_dependency("jekyll", ENV["JEKYLL_VERSION"] ? "~> #{ENV["JEKYLL_VERSION"]}" : "~> 3.0")
s.add_runtime_dependency("nokogiri", "~> 1.0")
s.add_runtime_dependency("reverse_markdown", "~> 1.0")
# development dependencies
s.add_development_dependency("activesupport", "~> 4.2")
s.add_development_dependency("rake", "~> 12.0")
s.add_development_dependency("rdoc", "~> 6.0")
# test dependencies:
s.add_development_dependency("redgreen", "~> 1.2")
s.add_development_dependency("rr", "~> 1.0")
s.add_development_dependency("rubocop-jekyll", "~> 0.4")
s.add_development_dependency("shoulda", "~> 3.5")
s.add_development_dependency("simplecov", "~> 0.7")
s.add_development_dependency("simplecov-gem-adapter", "~> 1.0")
# migrator dependencies:
s.add_development_dependency("behance", "~> 0.3")
s.add_development_dependency("hpricot", "~> 0.8")
s.add_development_dependency("htmlentities", "~> 4.3")
s.add_development_dependency("mysql2", "~> 0.3")
s.add_development_dependency("open_uri_redirections", "~> 0.2")
s.add_development_dependency("pg", "~> 0.12")
s.add_development_dependency("sequel", "~> 3.42")
s.add_development_dependency("sqlite3", "~> 1.3")
s.add_development_dependency("unidecode", "~> 1.0")
# site dependencies:
s.add_development_dependency("launchy", "~> 2.4")
end
|
module GithubService
##
# GithubService is miq-bot's interface to the Github API. It acts as a
# wrapper around Octokit, delegating calls directly to the Octokit client as
# well as providing a space to keep useful augmentations of the interface for
# our own use cases.
#
# You can find the official Octokit documentation at http://octokit.github.io/octokit.rb
#
# Please check the documentation first before adding helper methods, as they
# may already be well handled by Octokit itself.
#
class << self
def service
@service ||=
begin
require 'octokit'
unless Rails.env.test?
Octokit.configure do |c|
c.login = Settings.github_credentials.username
c.password = Settings.github_credentials.password
c.auto_paginate = true
c.middleware = Faraday::RackBuilder.new do |builder|
builder.use GithubService::Response::RatelimitLogger
builder.use Octokit::Response::RaiseError
builder.use Octokit::Response::FeedParser
builder.adapter Faraday.default_adapter
end
end
end
Octokit::Client.new
end
end
def add_comments(fq_repo_name, issue_number, comments)
Array(comments).each do |comment|
add_comment(fq_repo_name, issue_number, comment)
end
end
def delete_comments(fq_repo_name, comment_ids)
Array(comment_ids).each do |comment_id|
delete_comment(fq_repo_name, comment_id)
end
end
# Deletes the issue comments found by the provided block, then creates new
# issue comments from those provided.
def replace_comments(fq_repo_name, issue_number, new_comments)
raise "no block given" unless block_given?
to_delete = issue_comments(fq_repo_name, issue_number).select { |c| yield c }
delete_comments(fq_repo_name, to_delete.map(&:id))
add_comments(fq_repo_name, issue_number, new_comments)
end
def issue(*args)
Issue.new(service.issue(*args))
end
def issue_comments(*args)
service.issue_comments(*args).map do |comment|
IssueComment.new(comment)
end
end
def repository_notifications(*args)
service.repository_notifications(*args).map do |notification|
Notification.new(notification)
end
end
def labels
labels_cache[fq_name] ||= Set.new(service.labels(fq_name).map(&:name))
end
def valid_label?(label_text)
labels.include?(label_text)
end
def refresh_labels
labels_cache.delete(fq_name)
end
def milestones
milestones_cache[fq_name] ||= Hash[service.list_milestones(fq_name).map { |m| [m.title, m.number] }]
end
def valid_milestone?(milestone)
milestones.include?(milestone)
end
def refresh_milestones
milestones_cache.delete(fq_name)
end
def assignees
assignees_cache[fq_name] ||= Set.new(service.repo_assignees(fq_name).map(&:login))
end
def valid_assignee?(user)
assignees.include?(user)
end
def refresh_assignees
assignees_cache.delete(fq_name)
end
private
def labels_cache
@labels_cache ||= {}
end
def milestones_cache
@milestones_cache ||= {}
end
def assignees_cache
@assignees_cache ||= {}
end
def respond_to_missing?(method_name, include_private = false)
service.respond_to?(method_name, include_private)
end
def method_missing(method_name, *args, &block)
if service.respond_to?(method_name)
service.send(method_name, *args, &block)
else
super
end
end
end
end
Privitize GithubService.service
There's no reason for this to be public; anything calling Github should
be going through GithubService's interfact and not interacting with
Octokit directly (except by delegation)
module GithubService
##
# GithubService is miq-bot's interface to the Github API. It acts as a
# wrapper around Octokit, delegating calls directly to the Octokit client as
# well as providing a space to keep useful augmentations of the interface for
# our own use cases.
#
# You can find the official Octokit documentation at http://octokit.github.io/octokit.rb
#
# Please check the documentation first before adding helper methods, as they
# may already be well handled by Octokit itself.
#
class << self
def add_comments(fq_repo_name, issue_number, comments)
Array(comments).each do |comment|
add_comment(fq_repo_name, issue_number, comment)
end
end
def delete_comments(fq_repo_name, comment_ids)
Array(comment_ids).each do |comment_id|
delete_comment(fq_repo_name, comment_id)
end
end
# Deletes the issue comments found by the provided block, then creates new
# issue comments from those provided.
def replace_comments(fq_repo_name, issue_number, new_comments)
raise "no block given" unless block_given?
to_delete = issue_comments(fq_repo_name, issue_number).select { |c| yield c }
delete_comments(fq_repo_name, to_delete.map(&:id))
add_comments(fq_repo_name, issue_number, new_comments)
end
def issue(*args)
Issue.new(service.issue(*args))
end
def issue_comments(*args)
service.issue_comments(*args).map do |comment|
IssueComment.new(comment)
end
end
def repository_notifications(*args)
service.repository_notifications(*args).map do |notification|
Notification.new(notification)
end
end
def labels
labels_cache[fq_name] ||= Set.new(service.labels(fq_name).map(&:name))
end
def valid_label?(label_text)
labels.include?(label_text)
end
def refresh_labels
labels_cache.delete(fq_name)
end
def milestones
milestones_cache[fq_name] ||= Hash[service.list_milestones(fq_name).map { |m| [m.title, m.number] }]
end
def valid_milestone?(milestone)
milestones.include?(milestone)
end
def refresh_milestones
milestones_cache.delete(fq_name)
end
def assignees
assignees_cache[fq_name] ||= Set.new(service.repo_assignees(fq_name).map(&:login))
end
def valid_assignee?(user)
assignees.include?(user)
end
def refresh_assignees
assignees_cache.delete(fq_name)
end
private
def service
@service ||=
begin
require 'octokit'
unless Rails.env.test?
Octokit.configure do |c|
c.login = Settings.github_credentials.username
c.password = Settings.github_credentials.password
c.auto_paginate = true
c.middleware = Faraday::RackBuilder.new do |builder|
builder.use GithubService::Response::RatelimitLogger
builder.use Octokit::Response::RaiseError
builder.use Octokit::Response::FeedParser
builder.adapter Faraday.default_adapter
end
end
end
Octokit::Client.new
end
end
def labels_cache
@labels_cache ||= {}
end
def milestones_cache
@milestones_cache ||= {}
end
def assignees_cache
@assignees_cache ||= {}
end
def respond_to_missing?(method_name, include_private = false)
service.respond_to?(method_name, include_private)
end
def method_missing(method_name, *args, &block)
if service.respond_to?(method_name)
service.send(method_name, *args, &block)
else
super
end
end
end
end
|
module Grape
# An Endpoint is the proxy scope in which all routing
# blocks are executed. In other words, any methods
# on the instance level of this class may be called
# from inside a `get`, `post`, etc.
class Endpoint
attr_accessor :block, :source, :options, :settings
attr_reader :env, :request
class << self
# @api private
#
# Create an UnboundMethod that is appropriate for executing an endpoint
# route.
#
# The unbound method allows explicit calls to +return+ without raising a
# +LocalJumpError+. The method will be removed, but a +Proc+ reference to
# it will be returned. The returned +Proc+ expects a single argument: the
# instance of +Endpoint+ to bind to the method during the call.
#
# @param [String, Symbol] method_name
# @return [Proc]
# @raise [NameError] an instance method with the same name already exists
def generate_api_method(method_name, &block)
if instance_methods.include?(method_name.to_sym) || instance_methods.include?(method_name.to_s)
raise NameError.new("method #{method_name.inspect} already exists and cannot be used as an unbound method name")
end
define_method(method_name, &block)
method = instance_method(method_name)
remove_method(method_name)
proc { |endpoint_instance| method.bind(endpoint_instance).call }
end
end
def initialize(settings, options = {}, &block)
@settings = settings
if block_given?
method_name = [
options[:method],
Namespace.joined_space(settings),
settings.gather(:mount_path).join("/"),
Array(options[:path]).join("/")
].join(" ")
@source = block
@block = self.class.generate_api_method(method_name, &block)
end
@options = options
raise Grape::Exceptions::MissingOption.new(:path) unless options.key?(:path)
options[:path] = Array(options[:path])
options[:path] = ['/'] if options[:path].empty?
raise Grape::Exceptions::MissingOption.new(:method) unless options.key?(:method)
options[:method] = Array(options[:method])
options[:route_options] ||= {}
end
def routes
@routes ||= endpoints ? endpoints.collect(&:routes).flatten : prepare_routes
end
def mount_in(route_set)
if endpoints
endpoints.each { |e| e.mount_in(route_set) }
else
routes.each do |route|
methods = [ route.route_method ]
if ! settings[:do_not_route_head] && route.route_method == "GET"
methods << "HEAD"
end
methods.each do |method|
route_set.add_route(self, {
:path_info => route.route_compiled,
:request_method => method,
}, { :route_info => route })
end
end
end
end
def prepare_routes
routes = []
options[:method].each do |method|
options[:path].each do |path|
prepared_path = prepare_path(path)
anchor = options[:route_options][:anchor]
anchor = anchor.nil? ? true : anchor
endpoint_requirements = options[:route_options][:requirements] || {}
all_requirements = (settings.gather(:namespace).map(&:requirements) << endpoint_requirements)
requirements = all_requirements.reduce({}) do |base_requirements, single_requirements|
base_requirements.merge!(single_requirements)
end
path = compile_path(prepared_path, anchor && !options[:app], requirements)
regex = Rack::Mount::RegexpWithNamedGroups.new(path)
path_params = {}
# named parameters in the api path
named_params = regex.named_captures.map { |nc| nc[0] } - [ 'version', 'format' ]
named_params.each { |named_param| path_params[named_param] = "" }
# route parameters declared via desc or appended to the api declaration
route_params = (options[:route_options][:params] || {})
path_params.merge!(route_params)
request_method = (method.to_s.upcase unless method == :any)
routes << Route.new(options[:route_options].clone.merge({
:prefix => settings[:root_prefix],
:version => settings[:version] ? settings[:version].join('|') : nil,
:namespace => namespace,
:method => request_method,
:path => prepared_path,
:params => path_params,
:compiled => path,
})
)
end
end
routes
end
def prepare_path(path)
parts = []
parts << settings[:mount_path].to_s.split("/") if settings[:mount_path]
parts << settings[:root_prefix].to_s.split("/") if settings[:root_prefix]
uses_path_versioning = settings[:version] && settings[:version_options][:using] == :path
namespace_is_empty = namespace && (namespace.to_s =~ /^\s*$/ || namespace.to_s == '/')
path_is_empty = path && (path.to_s =~ /^\s*$/ || path.to_s == '/')
parts << ':version' if uses_path_versioning
if ! uses_path_versioning || (! namespace_is_empty || ! path_is_empty)
parts << namespace.to_s if namespace
parts << path.to_s if path
format_suffix = '(.:format)'
else
format_suffix = '(/.:format)'
end
parts = parts.flatten.select { |part| part != '/' }
Rack::Mount::Utils.normalize_path(parts.join('/') + format_suffix)
end
def namespace
@namespace ||= Namespace.joined_space_path(settings)
end
def compile_path(prepared_path, anchor = true, requirements = {})
endpoint_options = {}
endpoint_options[:version] = /#{settings[:version].join('|')}/ if settings[:version]
endpoint_options.merge!(requirements)
Rack::Mount::Strexp.compile(prepared_path, endpoint_options, %w( / . ? ), anchor)
end
def call(env)
dup.call!(env)
end
def call!(env)
env['api.endpoint'] = self
if options[:app]
options[:app].call(env)
else
builder = build_middleware
builder.run options[:app] || lambda{|env| self.run(env) }
builder.call(env)
end
end
# The parameters passed into the request as
# well as parsed from URL segments.
def params
@params ||= @request.params
end
# A filtering method that will return a hash
# consisting only of keys that have been declared by a
# `params` statement.
#
# @param params [Hash] The initial hash to filter. Usually this will just be `params`
# @param options [Hash] Can pass `:include_missing` and `:stringify` options.
def declared(params, options = {}, declared_params = settings[:declared_params])
options[:include_missing] = true unless options.key?(:include_missing)
unless declared_params
raise ArgumentError, "Tried to filter for declared parameters but none exist."
end
if params.is_a? Array
params.map do |param|
declared(param || {}, options, declared_params)
end
else
declared_params.inject({}) do |hash, key|
key = { key => nil } unless key.is_a? Hash
key.each_pair do |parent, children|
output_key = options[:stringify] ? parent.to_s : parent.to_sym
if params.key?(parent) || options[:include_missing]
hash[output_key] = if children
declared(params[parent] || {}, options, Array(children))
else
params[parent]
end
end
end
hash
end
end
end
# The API version as specified in the URL.
def version; env['api.version'] end
# End the request and display an error to the
# end user with the specified message.
#
# @param message [String] The message to display.
# @param status [Integer] the HTTP Status Code. Defaults to 403.
def error!(message, status=403)
throw :error, :message => message, :status => status
end
# Redirect to a new url.
#
# @param url [String] The url to be redirect.
# @param options [Hash] The options used when redirect.
# :permanent, default true.
def redirect(url, options = {})
merged_options = {:permanent => false }.merge(options)
if merged_options[:permanent]
status 301
else
if env['HTTP_VERSION'] == 'HTTP/1.1' && request.request_method.to_s.upcase != "GET"
status 303
else
status 302
end
end
header "Location", url
body ""
end
# Set or retrieve the HTTP status code.
#
# @param status [Integer] The HTTP Status Code to return for this request.
def status(status = nil)
if status
@status = status
else
return @status if @status
case request.request_method.to_s.upcase
when 'POST'
201
else
200
end
end
end
# Set an individual header or retrieve
# all headers that have been set.
def header(key = nil, val = nil)
if key
val ? @header[key.to_s] = val : @header.delete(key.to_s)
else
@header
end
end
# Retrieves all available request headers.
def headers
@headers ||= @request.headers
end
# Set response content-type
def content_type(val)
header('Content-Type', val)
end
# Set or get a cookie
#
# @example
# cookies[:mycookie] = 'mycookie val'
# cookies['mycookie-string'] = 'mycookie string val'
# cookies[:more] = { :value => '123', :expires => Time.at(0) }
# cookies.delete :more
#
def cookies
@cookies ||= Cookies.new
end
# Allows you to define the response body as something other than the
# return value.
#
# @example
# get '/body' do
# body "Body"
# "Not the Body"
# end
#
# GET /body # => "Body"
def body(value = nil)
if value
@body = value
else
@body
end
end
# Allows you to make use of Grape Entities by setting
# the response body to the serializable hash of the
# entity provided in the `:with` option. This has the
# added benefit of automatically passing along environment
# and version information to the serialization, making it
# very easy to do conditional exposures. See Entity docs
# for more info.
#
# @example
#
# get '/users/:id' do
# present User.find(params[:id]),
# :with => API::Entities::User,
# :admin => current_user.admin?
# end
def present(*args)
options = args.count > 1 ? args.extract_options! : {}
key, object = if args.count == 2 && args.first.is_a?(Symbol)
args
else
[nil, args.first]
end
entity_class = options.delete(:with)
# auto-detect the entity from the first object in the collection
object_instance = object.respond_to?(:first) ? object.first : object
object_instance.class.ancestors.each do |potential|
entity_class ||= (settings[:representations] || {})[potential]
end
entity_class ||= object_instance.class.const_get(:Entity) if object_instance.class.const_defined?(:Entity)
root = options.delete(:root)
representation = if entity_class
embeds = {:env => env}
embeds[:version] = env['api.version'] if env['api.version']
entity_class.represent(object, embeds.merge(options))
else
object
end
representation = { root => representation } if root
representation = (@body || {}).merge({key => representation}) if key
body representation
end
# Returns route information for the current request.
#
# @example
#
# desc "Returns the route description."
# get '/' do
# route.route_description
# end
def route
env["rack.routing_args"][:route_info]
end
protected
# Return the collection of endpoints within this endpoint.
# This is the case when an Grape::API mounts another Grape::API.
def endpoints
if options[:app] && options[:app].respond_to?(:endpoints)
options[:app].endpoints
else
nil
end
end
def run(env)
@env = env
@header = {}
@request = Grape::Request.new(@env)
self.extend helpers
cookies.read(@request)
run_filters befores
# Retieve validations from this namespace and all parent namespaces.
validation_errors = []
settings.gather(:validations).each do |validator|
begin
validator.validate!(params)
rescue Grape::Exceptions::Validation => e
validation_errors << e
end
end
if validation_errors.any?
raise Grape::Exceptions::Validations, errors: validation_errors
end
run_filters after_validations
response_text = @block ? @block.call(self) : nil
run_filters afters
cookies.write(header)
[status, header, [body || response_text]]
end
def build_middleware
b = Rack::Builder.new
b.use Rack::Head
b.use Grape::Middleware::Error,
:format => settings[:format],
:default_status => settings[:default_error_status] || 403,
:rescue_all => settings[:rescue_all],
:rescued_errors => aggregate_setting(:rescued_errors),
:default_error_formatter => settings[:default_error_formatter],
:error_formatters => settings[:error_formatters],
:rescue_options => settings[:rescue_options],
:rescue_handlers => merged_setting(:rescue_handlers)
aggregate_setting(:middleware).each do |m|
m = m.dup
block = m.pop if m.last.is_a?(Proc)
if block
b.use *m, &block
else
b.use *m
end
end
b.use Rack::Auth::Basic, settings[:auth][:realm], &settings[:auth][:proc] if settings[:auth] && settings[:auth][:type] == :http_basic
b.use Rack::Auth::Digest::MD5, settings[:auth][:realm], settings[:auth][:opaque], &settings[:auth][:proc] if settings[:auth] && settings[:auth][:type] == :http_digest
if settings[:version]
b.use Grape::Middleware::Versioner.using(settings[:version_options][:using]), {
:versions => settings[:version] ? settings[:version].flatten : nil,
:version_options => settings[:version_options],
:prefix => settings[:root_prefix]
}
end
b.use Grape::Middleware::Formatter,
:format => settings[:format],
:default_format => settings[:default_format] || :txt,
:content_types => settings[:content_types],
:formatters => settings[:formatters],
:parsers => settings[:parsers]
b
end
def helpers
m = Module.new
settings.stack.each{|frame| m.send :include, frame[:helpers] if frame[:helpers]}
m
end
def aggregate_setting(key)
settings.stack.inject([]) do |aggregate, frame|
aggregate += (frame[key] || [])
end
end
def merged_setting(key)
settings.stack.inject({}) do |merged, frame|
merged.merge(frame[key] || {})
end
end
def run_filters(filters)
(filters || []).each do |filter|
instance_eval &filter
end
end
def befores; aggregate_setting(:befores) end
def after_validations; aggregate_setting(:after_validations) end
def afters; aggregate_setting(:afters) end
end
end
Refactor Grape::Endpoint#initialize
module Grape
# An Endpoint is the proxy scope in which all routing
# blocks are executed. In other words, any methods
# on the instance level of this class may be called
# from inside a `get`, `post`, etc.
class Endpoint
attr_accessor :block, :source, :options, :settings
attr_reader :env, :request
class << self
# @api private
#
# Create an UnboundMethod that is appropriate for executing an endpoint
# route.
#
# The unbound method allows explicit calls to +return+ without raising a
# +LocalJumpError+. The method will be removed, but a +Proc+ reference to
# it will be returned. The returned +Proc+ expects a single argument: the
# instance of +Endpoint+ to bind to the method during the call.
#
# @param [String, Symbol] method_name
# @return [Proc]
# @raise [NameError] an instance method with the same name already exists
def generate_api_method(method_name, &block)
if instance_methods.include?(method_name.to_sym) || instance_methods.include?(method_name.to_s)
raise NameError.new("method #{method_name.inspect} already exists and cannot be used as an unbound method name")
end
define_method(method_name, &block)
method = instance_method(method_name)
remove_method(method_name)
proc { |endpoint_instance| method.bind(endpoint_instance).call }
end
end
def initialize(settings, options = {}, &block)
require_option(options, :path)
require_option(options, :method)
@settings = settings
@options = options
@options[:path] = Array(options[:path])
@options[:path] << '/' if options[:path].empty?
@options[:method] = Array(options[:method])
@options[:route_options] ||= {}
if block_given?
@source = block
@block = self.class.generate_api_method(method_name, &block)
end
end
def require_option(options, key)
options.has_key?(key) or raise Grape::Exceptions::MissingOption.new(key)
end
def method_name
[ options[:method],
Namespace.joined_space(settings),
settings.gather(:mount_path).join('/'),
options[:path].join('/')
].join(" ")
end
def routes
@routes ||= endpoints ? endpoints.collect(&:routes).flatten : prepare_routes
end
def mount_in(route_set)
if endpoints
endpoints.each { |e| e.mount_in(route_set) }
else
routes.each do |route|
methods = [ route.route_method ]
if ! settings[:do_not_route_head] && route.route_method == "GET"
methods << "HEAD"
end
methods.each do |method|
route_set.add_route(self, {
:path_info => route.route_compiled,
:request_method => method,
}, { :route_info => route })
end
end
end
end
def prepare_routes
routes = []
options[:method].each do |method|
options[:path].each do |path|
prepared_path = prepare_path(path)
anchor = options[:route_options][:anchor]
anchor = anchor.nil? ? true : anchor
endpoint_requirements = options[:route_options][:requirements] || {}
all_requirements = (settings.gather(:namespace).map(&:requirements) << endpoint_requirements)
requirements = all_requirements.reduce({}) do |base_requirements, single_requirements|
base_requirements.merge!(single_requirements)
end
path = compile_path(prepared_path, anchor && !options[:app], requirements)
regex = Rack::Mount::RegexpWithNamedGroups.new(path)
path_params = {}
# named parameters in the api path
named_params = regex.named_captures.map { |nc| nc[0] } - [ 'version', 'format' ]
named_params.each { |named_param| path_params[named_param] = "" }
# route parameters declared via desc or appended to the api declaration
route_params = (options[:route_options][:params] || {})
path_params.merge!(route_params)
request_method = (method.to_s.upcase unless method == :any)
routes << Route.new(options[:route_options].clone.merge({
:prefix => settings[:root_prefix],
:version => settings[:version] ? settings[:version].join('|') : nil,
:namespace => namespace,
:method => request_method,
:path => prepared_path,
:params => path_params,
:compiled => path,
})
)
end
end
routes
end
def prepare_path(path)
parts = []
parts << settings[:mount_path].to_s.split("/") if settings[:mount_path]
parts << settings[:root_prefix].to_s.split("/") if settings[:root_prefix]
uses_path_versioning = settings[:version] && settings[:version_options][:using] == :path
namespace_is_empty = namespace && (namespace.to_s =~ /^\s*$/ || namespace.to_s == '/')
path_is_empty = path && (path.to_s =~ /^\s*$/ || path.to_s == '/')
parts << ':version' if uses_path_versioning
if ! uses_path_versioning || (! namespace_is_empty || ! path_is_empty)
parts << namespace.to_s if namespace
parts << path.to_s if path
format_suffix = '(.:format)'
else
format_suffix = '(/.:format)'
end
parts = parts.flatten.select { |part| part != '/' }
Rack::Mount::Utils.normalize_path(parts.join('/') + format_suffix)
end
def namespace
@namespace ||= Namespace.joined_space_path(settings)
end
def compile_path(prepared_path, anchor = true, requirements = {})
endpoint_options = {}
endpoint_options[:version] = /#{settings[:version].join('|')}/ if settings[:version]
endpoint_options.merge!(requirements)
Rack::Mount::Strexp.compile(prepared_path, endpoint_options, %w( / . ? ), anchor)
end
def call(env)
dup.call!(env)
end
def call!(env)
env['api.endpoint'] = self
if options[:app]
options[:app].call(env)
else
builder = build_middleware
builder.run options[:app] || lambda{|env| self.run(env) }
builder.call(env)
end
end
# The parameters passed into the request as
# well as parsed from URL segments.
def params
@params ||= @request.params
end
# A filtering method that will return a hash
# consisting only of keys that have been declared by a
# `params` statement.
#
# @param params [Hash] The initial hash to filter. Usually this will just be `params`
# @param options [Hash] Can pass `:include_missing` and `:stringify` options.
def declared(params, options = {}, declared_params = settings[:declared_params])
options[:include_missing] = true unless options.key?(:include_missing)
unless declared_params
raise ArgumentError, "Tried to filter for declared parameters but none exist."
end
if params.is_a? Array
params.map do |param|
declared(param || {}, options, declared_params)
end
else
declared_params.inject({}) do |hash, key|
key = { key => nil } unless key.is_a? Hash
key.each_pair do |parent, children|
output_key = options[:stringify] ? parent.to_s : parent.to_sym
if params.key?(parent) || options[:include_missing]
hash[output_key] = if children
declared(params[parent] || {}, options, Array(children))
else
params[parent]
end
end
end
hash
end
end
end
# The API version as specified in the URL.
def version; env['api.version'] end
# End the request and display an error to the
# end user with the specified message.
#
# @param message [String] The message to display.
# @param status [Integer] the HTTP Status Code. Defaults to 403.
def error!(message, status=403)
throw :error, :message => message, :status => status
end
# Redirect to a new url.
#
# @param url [String] The url to be redirect.
# @param options [Hash] The options used when redirect.
# :permanent, default true.
def redirect(url, options = {})
merged_options = {:permanent => false }.merge(options)
if merged_options[:permanent]
status 301
else
if env['HTTP_VERSION'] == 'HTTP/1.1' && request.request_method.to_s.upcase != "GET"
status 303
else
status 302
end
end
header "Location", url
body ""
end
# Set or retrieve the HTTP status code.
#
# @param status [Integer] The HTTP Status Code to return for this request.
def status(status = nil)
if status
@status = status
else
return @status if @status
case request.request_method.to_s.upcase
when 'POST'
201
else
200
end
end
end
# Set an individual header or retrieve
# all headers that have been set.
def header(key = nil, val = nil)
if key
val ? @header[key.to_s] = val : @header.delete(key.to_s)
else
@header
end
end
# Retrieves all available request headers.
def headers
@headers ||= @request.headers
end
# Set response content-type
def content_type(val)
header('Content-Type', val)
end
# Set or get a cookie
#
# @example
# cookies[:mycookie] = 'mycookie val'
# cookies['mycookie-string'] = 'mycookie string val'
# cookies[:more] = { :value => '123', :expires => Time.at(0) }
# cookies.delete :more
#
def cookies
@cookies ||= Cookies.new
end
# Allows you to define the response body as something other than the
# return value.
#
# @example
# get '/body' do
# body "Body"
# "Not the Body"
# end
#
# GET /body # => "Body"
def body(value = nil)
if value
@body = value
else
@body
end
end
# Allows you to make use of Grape Entities by setting
# the response body to the serializable hash of the
# entity provided in the `:with` option. This has the
# added benefit of automatically passing along environment
# and version information to the serialization, making it
# very easy to do conditional exposures. See Entity docs
# for more info.
#
# @example
#
# get '/users/:id' do
# present User.find(params[:id]),
# :with => API::Entities::User,
# :admin => current_user.admin?
# end
def present(*args)
options = args.count > 1 ? args.extract_options! : {}
key, object = if args.count == 2 && args.first.is_a?(Symbol)
args
else
[nil, args.first]
end
entity_class = options.delete(:with)
# auto-detect the entity from the first object in the collection
object_instance = object.respond_to?(:first) ? object.first : object
object_instance.class.ancestors.each do |potential|
entity_class ||= (settings[:representations] || {})[potential]
end
entity_class ||= object_instance.class.const_get(:Entity) if object_instance.class.const_defined?(:Entity)
root = options.delete(:root)
representation = if entity_class
embeds = {:env => env}
embeds[:version] = env['api.version'] if env['api.version']
entity_class.represent(object, embeds.merge(options))
else
object
end
representation = { root => representation } if root
representation = (@body || {}).merge({key => representation}) if key
body representation
end
# Returns route information for the current request.
#
# @example
#
# desc "Returns the route description."
# get '/' do
# route.route_description
# end
def route
env["rack.routing_args"][:route_info]
end
protected
# Return the collection of endpoints within this endpoint.
# This is the case when an Grape::API mounts another Grape::API.
def endpoints
if options[:app] && options[:app].respond_to?(:endpoints)
options[:app].endpoints
else
nil
end
end
def run(env)
@env = env
@header = {}
@request = Grape::Request.new(@env)
self.extend helpers
cookies.read(@request)
run_filters befores
# Retieve validations from this namespace and all parent namespaces.
validation_errors = []
settings.gather(:validations).each do |validator|
begin
validator.validate!(params)
rescue Grape::Exceptions::Validation => e
validation_errors << e
end
end
if validation_errors.any?
raise Grape::Exceptions::Validations, errors: validation_errors
end
run_filters after_validations
response_text = @block ? @block.call(self) : nil
run_filters afters
cookies.write(header)
[status, header, [body || response_text]]
end
def build_middleware
b = Rack::Builder.new
b.use Rack::Head
b.use Grape::Middleware::Error,
:format => settings[:format],
:default_status => settings[:default_error_status] || 403,
:rescue_all => settings[:rescue_all],
:rescued_errors => aggregate_setting(:rescued_errors),
:default_error_formatter => settings[:default_error_formatter],
:error_formatters => settings[:error_formatters],
:rescue_options => settings[:rescue_options],
:rescue_handlers => merged_setting(:rescue_handlers)
aggregate_setting(:middleware).each do |m|
m = m.dup
block = m.pop if m.last.is_a?(Proc)
if block
b.use *m, &block
else
b.use *m
end
end
b.use Rack::Auth::Basic, settings[:auth][:realm], &settings[:auth][:proc] if settings[:auth] && settings[:auth][:type] == :http_basic
b.use Rack::Auth::Digest::MD5, settings[:auth][:realm], settings[:auth][:opaque], &settings[:auth][:proc] if settings[:auth] && settings[:auth][:type] == :http_digest
if settings[:version]
b.use Grape::Middleware::Versioner.using(settings[:version_options][:using]), {
:versions => settings[:version] ? settings[:version].flatten : nil,
:version_options => settings[:version_options],
:prefix => settings[:root_prefix]
}
end
b.use Grape::Middleware::Formatter,
:format => settings[:format],
:default_format => settings[:default_format] || :txt,
:content_types => settings[:content_types],
:formatters => settings[:formatters],
:parsers => settings[:parsers]
b
end
def helpers
m = Module.new
settings.stack.each{|frame| m.send :include, frame[:helpers] if frame[:helpers]}
m
end
def aggregate_setting(key)
settings.stack.inject([]) do |aggregate, frame|
aggregate += (frame[key] || [])
end
end
def merged_setting(key)
settings.stack.inject({}) do |merged, frame|
merged.merge(frame[key] || {})
end
end
def run_filters(filters)
(filters || []).each do |filter|
instance_eval &filter
end
end
def befores; aggregate_setting(:befores) end
def after_validations; aggregate_setting(:after_validations) end
def afters; aggregate_setting(:afters) end
end
end
|
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_reps_filename, xml_senate_filename)
#parse_date_house(date, xml_reps_filename, "House")
parse_date_house(date, xml_senate_filename, "Senate")
end
def parse_date_house(date, xml_filename, house)
throw "house can only have value House or Senate" unless house == "House" || house == "Senate"
@logger.info "Parsing #{house} speeches for #{date.strftime('%a %d %b %Y')}..."
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20#{house}%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
if house == "House"
debates = HouseDebates.new(date)
else
debates = SenateDebates.new(date)
end
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date, house)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date, house)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS TO THE SPEAKER:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3 in link text: #{link_text}" unless split.size == 3
time = split[2]
parse_sub_day_speech_page(sub_page, time, debates, date, house)
#elsif link_text =~ /^Procedural text:/
# # Assuming no time recorded for Procedural text
# parse_sub_day_speech_page(sub_page, nil, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS IN WRITING:/ || link_text =~ /^Division:/ ||
link_text =~ /^REQUEST FOR DETAILED INFORMATION:/ ||
link_text =~ /^Petition:/ || link_text =~ /^PRIVILEGE:/ || link_text == "Interruption" ||
link_text =~ /^QUESTIONS ON NOTICE:/
logger.warn "Not yet supporting: #{link_text}"
else
throw "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, debates, date, house)
top_content_tag = sub_page.search('div#contentstart').first
throw "Page on date #{date} at time #{time} has no content" if top_content_tag.nil?
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
speaker = nil
top_content_tag.children.each do |e|
class_value = e.attributes["class"]
if e.name == "div"
if class_value == "hansardtitlegroup" || class_value == "hansardsubtitlegroup"
elsif class_value == "speech0" || class_value == "speech1"
e.children[1..-1].each do |e|
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
end
elsif class_value == "motionnospeech" || class_value == "subspeech0" || class_value == "subspeech1" ||
class_value == "motion" || class_value = "quote"
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
elsif e.name == "p"
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
elsif e.name == "table"
if class_value == "division"
debates.increment_minor_count
# Ignore (for the time being)
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
else
throw "Unexpected tag #{e.name}"
end
end
end
# Returns new speaker
def parse_speech_block(e, speaker, time, url, debates, date, house)
speakername, interjection = extract_speakername(e)
# Only change speaker if a speaker name was found
this_speaker = speakername ? lookup_speaker(speakername, date, house) : speaker
debates.add_speech(this_speaker, time, url, clean_speech_content(url, e))
# With interjections the next speech should never be by the person doing the interjection
if interjection
speaker
else
this_speaker
end
end
def extract_speakername(content)
interjection = false
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
tag2 = content.search('span.speechname').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
elsif tag2
name = tag2.inner_html
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
interjection = true
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
# As a last resort try searching for interjection text
else
m = strip_tags(content).match(/([a-z].*) interjecting/i)
if m
name = m[1]
interjection = true
end
end
[name, interjection]
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
fix_motionnospeech_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
text = text.gsub('<div class="separator"></div>', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr", "img"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
throw "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_motionnospeech_tags(content)
content.search('div.motionnospeech').wrap('<p></p>')
replace_with_inner_html(content, 'div.motionnospeech')
content.search('span.speechname').remove
content.search('span.speechelectorate').remove
content.search('span.speechrole').remove
content.search('span.speechtime').remove
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold" || class_value == "paraheading"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content.search('img').each do |e|
e.set_attribute('src', URI.join(base_url, e.get_attribute('src')))
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date, house)
throw "house can only have value House or Senate" unless house == "House" || house == "Senate"
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
throw "Don't expect Speaker in Senate" unless house == "House"
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
throw "Don't expect Deputy Speaker in Senate" unless house == "House"
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
#logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
elsif speakername =~ /^the president/i
throw "Don't expect President in House of Representatives" unless house == "Senate"
member = @people.senate_president(date)
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
if house == "House"
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.find_senator_by_name_current_on_date(name, date)
end
end
if member.nil?
logger.warn "Unknown speaker #{speakername}"
member = UnknownSpeaker.new(speakername)
end
member
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
Woops. Reinstating parsing of House Hansard
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_reps_filename, xml_senate_filename)
parse_date_house(date, xml_reps_filename, "House")
parse_date_house(date, xml_senate_filename, "Senate")
end
def parse_date_house(date, xml_filename, house)
throw "house can only have value House or Senate" unless house == "House" || house == "Senate"
@logger.info "Parsing #{house} speeches for #{date.strftime('%a %d %b %Y')}..."
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20#{house}%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
if house == "House"
debates = HouseDebates.new(date)
else
debates = SenateDebates.new(date)
end
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date, house)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date, house)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS TO THE SPEAKER:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3 in link text: #{link_text}" unless split.size == 3
time = split[2]
parse_sub_day_speech_page(sub_page, time, debates, date, house)
#elsif link_text =~ /^Procedural text:/
# # Assuming no time recorded for Procedural text
# parse_sub_day_speech_page(sub_page, nil, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS IN WRITING:/ || link_text =~ /^Division:/ ||
link_text =~ /^REQUEST FOR DETAILED INFORMATION:/ ||
link_text =~ /^Petition:/ || link_text =~ /^PRIVILEGE:/ || link_text == "Interruption" ||
link_text =~ /^QUESTIONS ON NOTICE:/
logger.warn "Not yet supporting: #{link_text}"
else
throw "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, debates, date, house)
top_content_tag = sub_page.search('div#contentstart').first
throw "Page on date #{date} at time #{time} has no content" if top_content_tag.nil?
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
speaker = nil
top_content_tag.children.each do |e|
class_value = e.attributes["class"]
if e.name == "div"
if class_value == "hansardtitlegroup" || class_value == "hansardsubtitlegroup"
elsif class_value == "speech0" || class_value == "speech1"
e.children[1..-1].each do |e|
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
end
elsif class_value == "motionnospeech" || class_value == "subspeech0" || class_value == "subspeech1" ||
class_value == "motion" || class_value = "quote"
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
elsif e.name == "p"
speaker = parse_speech_block(e, speaker, time, url, debates, date, house)
debates.increment_minor_count
elsif e.name == "table"
if class_value == "division"
debates.increment_minor_count
# Ignore (for the time being)
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
else
throw "Unexpected tag #{e.name}"
end
end
end
# Returns new speaker
def parse_speech_block(e, speaker, time, url, debates, date, house)
speakername, interjection = extract_speakername(e)
# Only change speaker if a speaker name was found
this_speaker = speakername ? lookup_speaker(speakername, date, house) : speaker
debates.add_speech(this_speaker, time, url, clean_speech_content(url, e))
# With interjections the next speech should never be by the person doing the interjection
if interjection
speaker
else
this_speaker
end
end
def extract_speakername(content)
interjection = false
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
tag2 = content.search('span.speechname').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
elsif tag2
name = tag2.inner_html
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
interjection = true
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
# As a last resort try searching for interjection text
else
m = strip_tags(content).match(/([a-z].*) interjecting/i)
if m
name = m[1]
interjection = true
end
end
[name, interjection]
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
fix_motionnospeech_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
text = text.gsub('<div class="separator"></div>', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr", "img"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
throw "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_motionnospeech_tags(content)
content.search('div.motionnospeech').wrap('<p></p>')
replace_with_inner_html(content, 'div.motionnospeech')
content.search('span.speechname').remove
content.search('span.speechelectorate').remove
content.search('span.speechrole').remove
content.search('span.speechtime').remove
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold" || class_value == "paraheading"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content.search('img').each do |e|
e.set_attribute('src', URI.join(base_url, e.get_attribute('src')))
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date, house)
throw "house can only have value House or Senate" unless house == "House" || house == "Senate"
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
throw "Don't expect Speaker in Senate" unless house == "House"
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
throw "Don't expect Deputy Speaker in Senate" unless house == "House"
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
#logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
elsif speakername =~ /^the president/i
throw "Don't expect President in House of Representatives" unless house == "Senate"
member = @people.senate_president(date)
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
if house == "House"
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.find_senator_by_name_current_on_date(name, date)
end
end
if member.nil?
logger.warn "Unknown speaker #{speakername}"
member = UnknownSpeaker.new(speakername)
end
member
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
|
require 'environment'
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
require 'house'
require 'people_image_downloader'
# Using Active Support (part of Ruby on Rails) for Unicode support
require 'activesupport'
require 'log4r'
require 'hansard_page'
require 'hansard_day'
$KCODE = 'u'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
@conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => @conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
# Returns the subdirectory where html_cache files for a particular date are stored
def cache_subdirectory(date, house)
date.to_s
end
# Returns the XML file loaded from aph.gov.au as plain text which contains all the Hansard data
# Returns nil if it doesn't exist
def hansard_xml_source_data_on_date(date, house)
agent = MechanizeProxy.new
agent.cache_subdirectory = cache_subdirectory(date, house)
# This is the page returned by Parlinfo Search for that day
url = "http://parlinfo.aph.gov.au/parlInfo/search/display/display.w3p;query=Id:chamber/hansard#{house.representatives? ? "r" : "s"}/#{date}/0000"
page = agent.get(url)
tag = page.at('div#content center')
if tag && tag.inner_html =~ /^Unable to find document/
nil
else
link = page.link_with(:text => "View/Save XML")
if link.nil?
@logger.error "Link to XML download is missing"
nil
else
agent.click(link).body
end
end
end
# Returns HansardDate object for a particular day
def hansard_day_on_date(date, house)
text = hansard_xml_source_data_on_date(date, house)
HansardDay.new(Hpricot.XML(text), @logger) if text
end
# Parse but only if there is a page that is at "proof" stage
def parse_date_house_only_in_proof(date, xml_filename, house)
day = hansard_day_on_date(date, house)
if day && day.in_proof?
logger.info "Deleting all cached html for #{date} because that date is in proof stage."
FileUtils.rm_rf("#{@conf.html_cache_path}/#{cache_subdirectory(date, house)}")
logger.info "Redownloading pages on #{date}..."
parse_date_house(date, xml_filename, house)
end
end
def parse_date_house(date, xml_filename, house)
debates = Debates.new(date, house, @logger)
content = false
day = hansard_day_on_date(date, house)
if day
@logger.info "Parsing #{house} speeches for #{date.strftime('%a %d %b %Y')}..."
@logger.warn "In proof stage" if day.in_proof?
day.pages.each do |page|
content = true
#throw "Unsupported: #{page.full_hansard_title}" unless page.supported? || page.to_skip? || page.not_yet_supported?
if page
debates.add_heading(page.hansard_title, page.hansard_subtitle, page.permanent_url)
speaker = nil
page.speeches.each do |speech|
if speech
# Only change speaker if a speaker name or url was found
this_speaker = (speech.speakername || speech.aph_id) ? lookup_speaker(speech, date, house) : speaker
# With interjections the next speech should never be by the person doing the interjection
speaker = this_speaker unless speech.interjection
debates.add_speech(this_speaker, speech.time, speech.permanent_url, speech.clean_content)
end
debates.increment_minor_count
end
end
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
else
@logger.info "Skipping #{house} speeches for #{date.strftime('%a %d %b %Y')} (no data available)"
end
# Only output the debate file if there's going to be something in it
debates.output(xml_filename) if content
end
def lookup_speaker_by_title(speech, date, house)
# Some sanity checking.
if speech.speakername =~ /speaker/i && house.senate?
logger.error "The Speaker is not expected in the Senate"
return nil
elsif speech.speakername =~ /president/i && house.representatives?
logger.error "The President is not expected in the House of Representatives"
return nil
elsif speech.speakername =~ /chairman/i && house.representatives?
logger.error "The Chairman is not expected in the House of Representatives"
return nil
end
# Handle speakers where they are referred to by position rather than name
# Handle names in brackets first
if speech.speakername =~ /^the (deputy speaker|acting deputy president|temporary chairman) \((.*)\)/i
@people.find_member_by_name_current_on_date(Name.title_first_last($~[2]), date, house)
elsif speech.speakername =~ /^the speaker/i
@people.house_speaker(date)
elsif speech.speakername =~ /^the deputy speaker/i
@people.deputy_house_speaker(date)
elsif speech.speakername =~ /^the president/i
@people.senate_president(date)
elsif speech.speakername =~ /^(the )?chairman/i || speech.speakername =~ /^the deputy president/i
# The "Chairman" in the main Senate Hansard is when the Senate is sitting as a committee of the whole Senate.
# In this case, the "Chairman" is the deputy president. See http://www.aph.gov.au/senate/pubs/briefs/brief06.htm#3
@people.deputy_senate_president(date)
end
end
def lookup_speaker_by_name(speech, date, house)
#puts "Looking up speaker by name: #{speech.speakername}"
throw "speakername can not be nil in lookup_speaker" if speech.speakername.nil?
member = lookup_speaker_by_title(speech, date, house)
# If member hasn't already been set then lookup using speakername
if member.nil?
name = Name.title_first_last(speech.speakername)
member = @people.find_member_by_name_current_on_date(name, date, house)
if member.nil?
name = Name.last_title_first(speech.speakername)
member = @people.find_member_by_name_current_on_date(name, date, house)
end
end
member
end
def lookup_speaker_by_aph_id(speech, date, house)
person = @people.find_person_by_aph_id(speech.aph_id)
if person
# Now find the member for that person who is current on the given date
@people.find_member_by_name_current_on_date(person.name, date, house)
else
logger.error "Can't figure out which person the aph id #{speech.aph_id} belongs to"
nil
end
end
def lookup_speaker(speech, date, house)
member = lookup_speaker_by_name(speech, date, house)
if member.nil?
# Only try to use the aph id if we can't look up by name
member = lookup_speaker_by_aph_id(speech, date, house) if speech.aph_id
if member
# If link is valid use that to look up the member
logger.error "Determined speaker #{member.person.name.full_name} by link only. Valid name missing."
end
end
if member.nil?
logger.warn "Unknown speaker #{speech.speakername}" unless HansardSpeech.generic_speaker?(speech.speakername)
member = UnknownSpeaker.new(speech.speakername)
end
member
end
end
Now looks up speaker by checking the id first, then trying a name
require 'environment'
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
require 'house'
require 'people_image_downloader'
# Using Active Support (part of Ruby on Rails) for Unicode support
require 'activesupport'
require 'log4r'
require 'hansard_page'
require 'hansard_day'
$KCODE = 'u'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
@conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => @conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
# Returns the subdirectory where html_cache files for a particular date are stored
def cache_subdirectory(date, house)
date.to_s
end
# Returns the XML file loaded from aph.gov.au as plain text which contains all the Hansard data
# Returns nil if it doesn't exist
def hansard_xml_source_data_on_date(date, house)
agent = MechanizeProxy.new
agent.cache_subdirectory = cache_subdirectory(date, house)
# This is the page returned by Parlinfo Search for that day
url = "http://parlinfo.aph.gov.au/parlInfo/search/display/display.w3p;query=Id:chamber/hansard#{house.representatives? ? "r" : "s"}/#{date}/0000"
page = agent.get(url)
tag = page.at('div#content center')
if tag && tag.inner_html =~ /^Unable to find document/
nil
else
link = page.link_with(:text => "View/Save XML")
if link.nil?
@logger.error "Link to XML download is missing"
nil
else
agent.click(link).body
end
end
end
# Returns HansardDate object for a particular day
def hansard_day_on_date(date, house)
text = hansard_xml_source_data_on_date(date, house)
HansardDay.new(Hpricot.XML(text), @logger) if text
end
# Parse but only if there is a page that is at "proof" stage
def parse_date_house_only_in_proof(date, xml_filename, house)
day = hansard_day_on_date(date, house)
if day && day.in_proof?
logger.info "Deleting all cached html for #{date} because that date is in proof stage."
FileUtils.rm_rf("#{@conf.html_cache_path}/#{cache_subdirectory(date, house)}")
logger.info "Redownloading pages on #{date}..."
parse_date_house(date, xml_filename, house)
end
end
def parse_date_house(date, xml_filename, house)
debates = Debates.new(date, house, @logger)
content = false
day = hansard_day_on_date(date, house)
if day
@logger.info "Parsing #{house} speeches for #{date.strftime('%a %d %b %Y')}..."
@logger.warn "In proof stage" if day.in_proof?
day.pages.each do |page|
content = true
#throw "Unsupported: #{page.full_hansard_title}" unless page.supported? || page.to_skip? || page.not_yet_supported?
if page
debates.add_heading(page.hansard_title, page.hansard_subtitle, page.permanent_url)
speaker = nil
page.speeches.each do |speech|
if speech
# Only change speaker if a speaker name or url was found
this_speaker = (speech.speakername || speech.aph_id) ? lookup_speaker(speech, date, house) : speaker
# With interjections the next speech should never be by the person doing the interjection
speaker = this_speaker unless speech.interjection
debates.add_speech(this_speaker, speech.time, speech.permanent_url, speech.clean_content)
end
debates.increment_minor_count
end
end
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
else
@logger.info "Skipping #{house} speeches for #{date.strftime('%a %d %b %Y')} (no data available)"
end
# Only output the debate file if there's going to be something in it
debates.output(xml_filename) if content
end
def lookup_speaker_by_title(speech, date, house)
# Some sanity checking.
if speech.speakername =~ /speaker/i && house.senate?
logger.error "The Speaker is not expected in the Senate"
return nil
elsif speech.speakername =~ /president/i && house.representatives?
logger.error "The President is not expected in the House of Representatives"
return nil
elsif speech.speakername =~ /chairman/i && house.representatives?
logger.error "The Chairman is not expected in the House of Representatives"
return nil
end
# Handle speakers where they are referred to by position rather than name
# Handle names in brackets first
if speech.speakername =~ /^(.*) \(the (deputy speaker|acting deputy president|temporary chairman)\)/i
@people.find_member_by_name_current_on_date(Name.last_title_first($~[1]), date, house)
elsif speech.speakername =~ /^the speaker/i
@people.house_speaker(date)
elsif speech.speakername =~ /^the deputy speaker/i
@people.deputy_house_speaker(date)
elsif speech.speakername =~ /^the president/i
@people.senate_president(date)
elsif speech.speakername =~ /^(the )?chairman/i || speech.speakername =~ /^the deputy president/i
# The "Chairman" in the main Senate Hansard is when the Senate is sitting as a committee of the whole Senate.
# In this case, the "Chairman" is the deputy president. See http://www.aph.gov.au/senate/pubs/briefs/brief06.htm#3
@people.deputy_senate_president(date)
end
end
def lookup_speaker_by_name(speech, date, house)
#puts "Looking up speaker by name: #{speech.speakername}"
throw "speakername can not be nil in lookup_speaker" if speech.speakername.nil?
member = lookup_speaker_by_title(speech, date, house)
# If member hasn't already been set then lookup using speakername
if member.nil?
name = Name.title_first_last(speech.speakername)
member = @people.find_member_by_name_current_on_date(name, date, house)
if member.nil?
name = Name.last_title_first(speech.speakername)
member = @people.find_member_by_name_current_on_date(name, date, house)
end
end
member
end
def lookup_speaker_by_aph_id(speech, date, house)
# The aph_id "10000" is special. It represents the speaker, deputy speaker, something like that.
# It could be anyone of a number of poeple. So, if it is that, just ignore it.
if speech.aph_id && speech.aph_id != "10000"
person = @people.find_person_by_aph_id(speech.aph_id)
if person
# Now find the member for that person who is current on the given date
@people.find_member_by_name_current_on_date(person.name, date, house)
else
logger.error "Can't figure out which person the aph id #{speech.aph_id} belongs to"
nil
end
end
end
def lookup_speaker(speech, date, house)
# First try looking up speaker by id then try name
member = lookup_speaker_by_aph_id(speech, date, house) || lookup_speaker_by_name(speech, date, house)
if member.nil?
logger.warn "Unknown speaker #{speech.speakername}" unless HansardSpeech.generic_speaker?(speech.speakername)
member = UnknownSpeaker.new(speech.speakername)
end
member
end
end
|
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_filename)
debates = Debates.new(date)
@logger.info "Parsing speeches for #{date.strftime('%a %d %b %Y')}..."
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20House%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS TO THE SPEAKER:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3 in link text: #{link_text}" unless split.size == 3
time = split[2]
parse_sub_day_speech_page(sub_page, time, debates, date)
#elsif link_text =~ /^Procedural text:/
# # Assuming no time recorded for Procedural text
# parse_sub_day_speech_page(sub_page, nil, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS IN WRITING:/ || link_text =~ /^Division:/ ||
link_text =~ /^REQUEST FOR DETAILED INFORMATION:/ ||
link_text =~ /^Petition:/ || link_text =~ /^PRIVILEGE:/ || link_text == "Interruption"
logger.warn "Not yet supporting: #{link_text}"
else
throw "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, debates, date)
top_content_tag = sub_page.search('div#contentstart').first
throw "Page on date #{date} at time #{time} has no content" if top_content_tag.nil?
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
speaker = nil
top_content_tag.children.each do |e|
class_value = e.attributes["class"]
if e.name == "div"
if class_value == "hansardtitlegroup" || class_value == "hansardsubtitlegroup"
elsif class_value == "speech0" || class_value == "speech1"
e.children[1..-1].each do |e|
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
end
elsif class_value == "motionnospeech" || class_value == "subspeech0" || class_value == "subspeech1" ||
class_value == "motion" || class_value = "quote"
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
elsif e.name == "p"
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
elsif e.name == "table"
if class_value == "division"
debates.increment_minor_count
# Ignore (for the time being)
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
else
throw "Unexpected tag #{e.name}"
end
end
end
# Returns new speaker
def parse_speech_block(e, speaker, time, url, debates, date)
speakername, interjection = extract_speakername(e)
# Only change speaker if a speaker name was found
this_speaker = speakername ? lookup_speaker(speakername, date) : speaker
debates.add_speech(this_speaker, time, url, clean_speech_content(url, e))
# With interjections the next speech should never be by the person doing the interjection
if interjection
speaker
else
this_speaker
end
end
def extract_speakername(content)
interjection = false
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
tag2 = content.search('span.speechname').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
elsif tag2
name = tag2.inner_html
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
interjection = true
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
# As a last resort try searching for interjection text
else
m = strip_tags(content).match(/([a-z].*) interjecting/i)
if m
name = m[1]
interjection = true
else
m = strip_tags(content).match(/^([a-z].*)—/i)
name = m[1] if m and generic_speaker?(m[1])
end
end
[name, interjection]
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
fix_motionnospeech_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr", "img"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
throw "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_motionnospeech_tags(content)
content.search('div.motionnospeech').wrap('<p></p>')
replace_with_inner_html(content, 'div.motionnospeech')
content.search('span.speechname').remove
content.search('span.speechelectorate').remove
content.search('span.speechrole').remove
content.search('span.speechtime').remove
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold" || class_value == "paraheading"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content.search('img').each do |e|
e.set_attribute('src', URI.join(base_url, e.get_attribute('src')))
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date)
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
#logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
end
if member.nil?
logger.warn "Unknown speaker #{speakername}" unless generic_speaker?(speakername)
member = UnknownSpeaker.new(speakername)
end
member
end
def generic_speaker?(speakername)
return speakername =~ /^(a )?(honourable|opposition|government) members?$/i
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
Make "not yet supporting" message an "INFO"
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
require 'builder_alpha_attributes'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_filename)
debates = Debates.new(date)
@logger.info "Parsing speeches for #{date.strftime('%a %d %b %Y')}..."
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20House%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS TO THE SPEAKER:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3 in link text: #{link_text}" unless split.size == 3
time = split[2]
parse_sub_day_speech_page(sub_page, time, debates, date)
#elsif link_text =~ /^Procedural text:/
# # Assuming no time recorded for Procedural text
# parse_sub_day_speech_page(sub_page, nil, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS IN WRITING:/ || link_text =~ /^Division:/ ||
link_text =~ /^REQUEST FOR DETAILED INFORMATION:/ ||
link_text =~ /^Petition:/ || link_text =~ /^PRIVILEGE:/ || link_text == "Interruption"
logger.info "Not yet supporting: #{link_text}"
else
throw "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, debates, date)
top_content_tag = sub_page.search('div#contentstart').first
throw "Page on date #{date} at time #{time} has no content" if top_content_tag.nil?
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
speaker = nil
top_content_tag.children.each do |e|
class_value = e.attributes["class"]
if e.name == "div"
if class_value == "hansardtitlegroup" || class_value == "hansardsubtitlegroup"
elsif class_value == "speech0" || class_value == "speech1"
e.children[1..-1].each do |e|
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
end
elsif class_value == "motionnospeech" || class_value == "subspeech0" || class_value == "subspeech1" ||
class_value == "motion" || class_value = "quote"
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
elsif e.name == "p"
speaker = parse_speech_block(e, speaker, time, url, debates, date)
debates.increment_minor_count
elsif e.name == "table"
if class_value == "division"
debates.increment_minor_count
# Ignore (for the time being)
else
throw "Unexpected class value #{class_value} for tag #{e.name}"
end
else
throw "Unexpected tag #{e.name}"
end
end
end
# Returns new speaker
def parse_speech_block(e, speaker, time, url, debates, date)
speakername, interjection = extract_speakername(e)
# Only change speaker if a speaker name was found
this_speaker = speakername ? lookup_speaker(speakername, date) : speaker
debates.add_speech(this_speaker, time, url, clean_speech_content(url, e))
# With interjections the next speech should never be by the person doing the interjection
if interjection
speaker
else
this_speaker
end
end
def extract_speakername(content)
interjection = false
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
tag2 = content.search('span.speechname').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
elsif tag2
name = tag2.inner_html
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
interjection = true
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
# As a last resort try searching for interjection text
else
m = strip_tags(content).match(/([a-z].*) interjecting/i)
if m
name = m[1]
interjection = true
else
m = strip_tags(content).match(/^([a-z].*)—/i)
name = m[1] if m and generic_speaker?(m[1])
end
end
[name, interjection]
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
fix_motionnospeech_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr", "img"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
throw "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_motionnospeech_tags(content)
content.search('div.motionnospeech').wrap('<p></p>')
replace_with_inner_html(content, 'div.motionnospeech')
content.search('span.speechname').remove
content.search('span.speechelectorate').remove
content.search('span.speechrole').remove
content.search('span.speechtime').remove
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold" || class_value == "paraheading"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content.search('img').each do |e|
e.set_attribute('src', URI.join(base_url, e.get_attribute('src')))
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date)
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
#logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
end
if member.nil?
logger.warn "Unknown speaker #{speakername}" unless generic_speaker?(speakername)
member = UnknownSpeaker.new(speakername)
end
member
end
def generic_speaker?(speakername)
return speakername =~ /^(a )?(honourable|opposition|government) members?$/i
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
|
module Haruna
VERSION = "0.0.1.2"
end
updated version
module Haruna
VERSION = "0.0.2"
end
|
module HasOffers
class Base
BaseUri = 'https://api.hasoffers.com/Api'
@@api_mode = (ENV['RAILS_ENV'] == 'production' or ENV['HAS_OFFERS_LIVE'] == '1') ? :live : :test
@@default_params = nil
class << self
def initialize_credentials
config_file = ENV['HAS_OFFERS_CONFIG_FILE'] || "config/has_offers.yml"
if File.exists?(config_file)
config = YAML::load(IO.read(config_file))
@@default_params = {'Format' => 'json',
'Service' => 'HasOffers',
'Version' => '2',
'NetworkId' => config['network_id'],
'NetworkToken' => config['api_key']}
else
puts "Missing config/has_offers.yml file!"
end
end
def test?
@@api_mode == :test
end
def live?
@@api_mode == :live
end
def api_mode=(mode)
@@api_mode = mode
end
def api_mode
@@api_mode
end
def get_request(target, method, params)
make_request(:get, target, method, params)
end
def post_request(target, method, params)
make_request(:post, target, method, params)
end
def requires!(hash, required_params)
missing_params = []
required_params.each do |param|
missing_params.push param unless hash.has_key?(param)
end
unless missing_params.empty?
raise ArgumentError.new("Missing required parameter(s): #{missing_params.join(', ')}")
end
end
private
def new_http(uri)
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE
http
end
def query_string(data_hash)
# Rails to_params adds an extra open close brackets to multi-dimensional array parameters which
# hasoffers doesn't like, so the gsub here takes care of that.
data_hash.to_params.gsub(/\[\]\[/,'[')
end
def make_request(http_method, target, method, params)
data = build_request_params(target, method, params)
if live?
if http_method == :post
uri = URI.parse BaseUri
http = new_http uri
raw_request = Net::HTTP::Post.new(uri.request_uri)
raw_request.body = query_string data
else # assume get
uri = URI.parse("#{BaseUri}?#{query_string(data)}")
http = new_http uri
raw_request = Net::HTTP::Get.new(uri.request_uri)
end
http_response = http.request raw_request
else
http_response = DummyResponse.response_for(target, method, params)
end
Response.new(http_response)
end
def build_request_params(target, method, params)
initialize_credentials unless @@default_params
params['Target'] = target
params['Method'] = method
params.merge @@default_params
end
def build_data(data, return_object = false)
{'data' => data, 'return_object' => return_object}
end
end
end
end
setting @@default_params to {} when no config file is present
module HasOffers
class Base
BaseUri = 'https://api.hasoffers.com/Api'
@@api_mode = (ENV['RAILS_ENV'] == 'production' or ENV['HAS_OFFERS_LIVE'] == '1') ? :live : :test
@@default_params = nil
class << self
def initialize_credentials
config_file = ENV['HAS_OFFERS_CONFIG_FILE'] || "config/has_offers.yml"
if File.exists?(config_file)
config = YAML::load(IO.read(config_file))
@@default_params = {'Format' => 'json',
'Service' => 'HasOffers',
'Version' => '2',
'NetworkId' => config['network_id'],
'NetworkToken' => config['api_key']}
else
@@default_params = {}
puts "Missing config/has_offers.yml file!"
end
end
def test?
@@api_mode == :test
end
def live?
@@api_mode == :live
end
def api_mode=(mode)
@@api_mode = mode
end
def api_mode
@@api_mode
end
def get_request(target, method, params)
make_request(:get, target, method, params)
end
def post_request(target, method, params)
make_request(:post, target, method, params)
end
def requires!(hash, required_params)
missing_params = []
required_params.each do |param|
missing_params.push param unless hash.has_key?(param)
end
unless missing_params.empty?
raise ArgumentError.new("Missing required parameter(s): #{missing_params.join(', ')}")
end
end
private
def new_http(uri)
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE
http
end
def query_string(data_hash)
# Rails to_params adds an extra open close brackets to multi-dimensional array parameters which
# hasoffers doesn't like, so the gsub here takes care of that.
data_hash.to_params.gsub(/\[\]\[/,'[')
end
def make_request(http_method, target, method, params)
data = build_request_params(target, method, params)
if live?
if http_method == :post
uri = URI.parse BaseUri
http = new_http uri
raw_request = Net::HTTP::Post.new(uri.request_uri)
raw_request.body = query_string data
else # assume get
uri = URI.parse("#{BaseUri}?#{query_string(data)}")
http = new_http uri
raw_request = Net::HTTP::Get.new(uri.request_uri)
end
http_response = http.request raw_request
else
http_response = DummyResponse.response_for(target, method, params)
end
Response.new(http_response)
end
def build_request_params(target, method, params)
initialize_credentials unless @@default_params
params['Target'] = target
params['Method'] = method
params.merge @@default_params
end
def build_data(data, return_object = false)
{'data' => data, 'return_object' => return_object}
end
end
end
end |
# encoding: utf-8
require 'optparse'
require 'set'
require 'open-uri'
require 'fileutils'
module Hdo
module Import
class CLI
attr_reader :options
def initialize(argv)
if argv.empty?
raise ArgumentError, 'no args'
else
@options = parse_options argv
@cmd = argv.shift
@rest = argv
end
end
def run
case @cmd
when 'json'
import_files
when 'daily'
import_daily
when 'api'
import_api
when 'dev'
import_api(30)
when 'representatives'
import_api_representatives
when 'votes'
import_api_votes
when 'promises'
import_promises
when 'parliament-issues', 'parliament-issues'
import_parliament_issues
when 'representative-emails'
import_representative_emails
else
raise ArgumentError, "unknown command: #{@cmd.inspect}"
end
end
private
def import_parliament_issues
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
persister.import_parliament_issues parliament_issues
end
def import_api(vote_limit = nil)
persister.import_parties parsing_data_source.parties(@options[:session])
persister.import_committees parsing_data_source.committees(@options[:session])
persister.import_districts parsing_data_source.districts
persister.import_categories parsing_data_source.categories
persister.import_parliament_periods parsing_data_source.parliament_periods
persister.import_parliament_sessions parsing_data_source.parliament_sessions
import_api_representatives
import_api_votes(vote_limit)
end
def import_daily
persister.import_parties parsing_data_source.parties(@options[:session])
persister.import_committees parsing_data_source.committees(@options[:session])
persister.import_districts parsing_data_source.districts
persister.import_categories parsing_data_source.categories
persister.import_parliament_periods parsing_data_source.parliament_periods
persister.import_parliament_sessions parsing_data_source.parliament_sessions
import_api_representatives
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
persister.import_parliament_issues parliament_issues
each_vote_for(parsing_data_source, parliament_issues) do |votes|
persister.import_votes votes, infer: false
end
persister.infer_all_votes
notify_new_votes if Rails.env.production?
notify_missing_emails if Rails.env.production?
rescue Hdo::StortingImporter::DataSource::ServerError
notify_api_error if Rails.env.production?
raise
end
def import_api_votes(vote_limit = nil)
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
if @options[:parliament_issue_ids]
parliament_issues = parliament_issues.select { |i| @options[:parliament_issue_ids].include? i.external_id }
end
persister.import_parliament_issues parliament_issues
each_vote_for(parsing_data_source, parliament_issues, vote_limit) do |votes|
persister.import_votes votes, infer: false
end
persister.infer_all_votes
end
def import_api_representatives
representatives = {}
# the information in 'representatives_today' is more complete,
# so it takes precedence
representatives_today = parsing_data_source.representatives_today
representatives_today.each do |rep|
representatives[rep.external_id] = rep
end
parsing_data_source.representatives(@options[:period]).each do |rep|
representatives[rep.external_id] ||= rep
end
persister.import_representatives representatives.values
# mark currently attending representatives
# see https://github.com/holderdeord/hdo-site/issues/195
attending_xids = representatives_today.map(&:external_id)
Representative.all.each do |rep|
rep.update_attributes!(attending: attending_xids.include?(rep.external_id))
end
end
def import_promises
spreadsheet = @rest.first or raise "no spreadsheet path given"
promises = Hdo::StortingImporter::Promise.from_xlsx(spreadsheet)
persister.import_promises promises
end
SCRAPED_EMAIL_URL = "https://api.scraperwiki.com/api/1.0/datastore/sqlite?format=json&name=hdo-representative-emails&query=#{URI.escape 'select * from swdata'}"
def import_representative_emails
data = JSON.parse(RestClient.get(SCRAPED_EMAIL_URL))
missing_emails = []
data.each do |e|
rep = Representative.where('first_name like ? and last_name = ?', "#{e['first_name'].strip}%", e['last_name'].strip).first
if rep
rep.email ||= e['email']
rep.save!
else
missing_emails << e
end
end
missing_reps = Representative.where(:email => nil).map(&:full_name)
log.warn "representatives missing emails: #{missing_reps.to_json} "
log.warn "emails missing representatives: #{missing_emails.to_json}"
end
def each_vote_for(data_source, parliament_issues, limit = nil)
count = 0
parliament_issues.each_with_index do |parliament_issue, index|
votes = data_source.votes_for(parliament_issue.external_id)
count += votes.size
yield votes
break if limit && count >= limit
parliament_issue_count = index + 1
remaining_parliament_issues = parliament_issues.size - parliament_issue_count
remaining_votes = (count / parliament_issue_count.to_f) * remaining_parliament_issues
log.info "-> #{count} votes for #{parliament_issue_count} parliament issues imported"
log.info "-> about #{remaining_votes.to_i} votes remaining for #{remaining_parliament_issues} parliament issues"
end
end
def import_files
@rest.each do |file|
print "\nimporting #{file}:"
if file == "-"
str = STDIN.read
else
str = open(file) { |io| io.read }
end
data = MultiJson.decode(str)
data = case data
when Array
data
when Hash
[data]
else
raise TypeError, "expected Hash or Array, got: #{data.inspect}"
end
import_data data
end
end
def import_data(data)
kinds = data.group_by do |hash|
hash['kind'] or raise ArgumentError, "missing 'kind' property: #{hash.inspect}"
end
kinds.each do |kind, hashes|
case kind
when 'hdo#representative'
persister.import_representatives hashes.map { |e| StortingImporter::Representative.from_hash(e) }
when 'hdo#party'
persister.import_parties hashes.map { |e| StortingImporter::Party.from_hash(e) }
when 'hdo#committee'
persister.import_committees hashes.map { |e| StortingImporter::Committee.from_hash(e) }
when 'hdo#category'
persister.import_categories hashes.map { |e| StortingImporter::Categories.from_hash(e) }
when 'hdo#district'
persister.import_districts hashes.map { |e| StortingImporter::District.from_hash(e) }
when 'hdo#issue'
persister.import_parliament_issues hashes.map { |e| StortingImporter::ParliamentIssue.from_hash(e) }
when 'hdo#vote'
# import_votes (plural) will also run VoteInferrer.
persister.import_votes hashes.map { |e| StortingImporter::Vote.from_hash(e) }
when 'hdo#promise'
persister.import_promises hashes.map { |e| StortingImporter::Promise.from_hash(e) }
else
raise "unknown type: #{kind}"
end
end
end
def parsing_data_source
@parsing_data_source ||= (
ds = Hdo::StortingImporter::ParsingDataSource.new(api_data_source)
case @options[:cache]
when 'rails'
Hdo::StortingImporter::CachingDataSource.new(ds, Rails.cache)
when true
Hdo::StortingImporter::CachingDataSource.new(ds)
else
ds
end
)
end
def api_data_source
@api_data_source ||= Hdo::StortingImporter::ApiDataSource.default
end
def persister
@persister ||= (
persister = Persister.new
persister.log = log
persister
)
end
def log
@log ||= (
if @options[:quiet]
Logger.new(File::NULL)
else
Hdo::StortingImporter.logger
end
)
end
def notify_new_votes
mail = ImportMailer.votes_today_email
return if mail.to.nil?
message = mail.parts.last.body.raw_source
client = hipchat_client || return
client['Analyse'].send('Stortinget', message, notify: true)
rescue => ex
log.error [ex.message, ex.backtrace].join("\n")
end
def notify_missing_emails
client = hipchat_client || return
missing = Representative.attending.where('email is null')
return if missing.empty?
template = <<-HTML
<h2>Møtende representanter uten epostadresse:</h2>
<ul>
<% missing.each do |rep| %>
<li><%= rep.external_id %>: <%= rep.full_name %></li>
<% end %>
</ul>
HTML
message = ERB.new(template, 0, "%-<>").result(binding)
client['Teknisk'].send('Stortinget', message, color: 'red', notify: true)
rescue => ex
log.error ex.message
end
def notify_api_error
client = hipchat_client || return
client['Teknisk'].send('API', "Feil hos data.stortinget.no! Hjelp!", color: 'red', notify: true)
rescue => ex
log.error ex.message
end
def hipchat_client
@hipchat_client ||= (
token = AppConfig.hipchat_api_token
HipChat::Client.new(token) unless token.blank?
)
end
def parse_options(args)
options = {:period => '2009-2013', :session => '2012-2013'}
OptionParser.new { |opt|
opt.on("-s", "--quiet") { @options[:quiet] = true }
opt.on("--cache [rails]", "Cache results of API calls. Defaults to caching in memory, pass 'rails' to use Rails.cache instead.") do |arg|
options[:cache] = arg || true
end
opt.on("--parliament-issues ISSUE_IDS", "Only import this comma-sparated list of issue external ids") do |ids|
options[:parliament_issue_ids] = ids.split(",")
end
opt.on("--period PERIOD", %Q{The parliamentary period to import data for. Note that "today's representatives" will always be imported. Default: #{options[:period]}}) do |period|
options[:period] = period
end
opt.on("--session SESSION", %Q{The parliamentary session to import data for. Note that "today's representatives" will always be imported. Default: #{options[:session]}}) do |session|
options[:session] = session
end
opt.on("-h", "--help") do
puts opt
exit 1
end
}.parse!(args)
options[:cache] ||= ENV['CACHE']
options
end
end # CLI
end # Import
end # Hdo
Fix new vote notification.
* Actually deliver the email
* Make sure we pass a String, not an OutputBuffer to the HC client.
# encoding: utf-8
require 'optparse'
require 'set'
require 'open-uri'
require 'fileutils'
module Hdo
module Import
class CLI
attr_reader :options
def initialize(argv)
if argv.empty?
raise ArgumentError, 'no args'
else
@options = parse_options argv
@cmd = argv.shift
@rest = argv
end
end
def run
case @cmd
when 'json'
import_files
when 'daily'
import_daily
when 'api'
import_api
when 'dev'
import_api(30)
when 'representatives'
import_api_representatives
when 'votes'
import_api_votes
when 'promises'
import_promises
when 'parliament-issues', 'parliament-issues'
import_parliament_issues
when 'representative-emails'
import_representative_emails
else
raise ArgumentError, "unknown command: #{@cmd.inspect}"
end
end
private
def import_parliament_issues
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
persister.import_parliament_issues parliament_issues
end
def import_api(vote_limit = nil)
persister.import_parties parsing_data_source.parties(@options[:session])
persister.import_committees parsing_data_source.committees(@options[:session])
persister.import_districts parsing_data_source.districts
persister.import_categories parsing_data_source.categories
persister.import_parliament_periods parsing_data_source.parliament_periods
persister.import_parliament_sessions parsing_data_source.parliament_sessions
import_api_representatives
import_api_votes(vote_limit)
end
def import_daily
persister.import_parties parsing_data_source.parties(@options[:session])
persister.import_committees parsing_data_source.committees(@options[:session])
persister.import_districts parsing_data_source.districts
persister.import_categories parsing_data_source.categories
persister.import_parliament_periods parsing_data_source.parliament_periods
persister.import_parliament_sessions parsing_data_source.parliament_sessions
import_api_representatives
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
persister.import_parliament_issues parliament_issues
each_vote_for(parsing_data_source, parliament_issues) do |votes|
persister.import_votes votes, infer: false
end
persister.infer_all_votes
notify_new_votes if Rails.env.production?
notify_missing_emails if Rails.env.production?
rescue Hdo::StortingImporter::DataSource::ServerError
notify_api_error if Rails.env.production?
raise
end
def import_api_votes(vote_limit = nil)
parliament_issues = parsing_data_source.parliament_issues(@options[:session])
if @options[:parliament_issue_ids]
parliament_issues = parliament_issues.select { |i| @options[:parliament_issue_ids].include? i.external_id }
end
persister.import_parliament_issues parliament_issues
each_vote_for(parsing_data_source, parliament_issues, vote_limit) do |votes|
persister.import_votes votes, infer: false
end
persister.infer_all_votes
end
def import_api_representatives
representatives = {}
# the information in 'representatives_today' is more complete,
# so it takes precedence
representatives_today = parsing_data_source.representatives_today
representatives_today.each do |rep|
representatives[rep.external_id] = rep
end
parsing_data_source.representatives(@options[:period]).each do |rep|
representatives[rep.external_id] ||= rep
end
persister.import_representatives representatives.values
# mark currently attending representatives
# see https://github.com/holderdeord/hdo-site/issues/195
attending_xids = representatives_today.map(&:external_id)
Representative.all.each do |rep|
rep.update_attributes!(attending: attending_xids.include?(rep.external_id))
end
end
def import_promises
spreadsheet = @rest.first or raise "no spreadsheet path given"
promises = Hdo::StortingImporter::Promise.from_xlsx(spreadsheet)
persister.import_promises promises
end
SCRAPED_EMAIL_URL = "https://api.scraperwiki.com/api/1.0/datastore/sqlite?format=json&name=hdo-representative-emails&query=#{URI.escape 'select * from swdata'}"
def import_representative_emails
data = JSON.parse(RestClient.get(SCRAPED_EMAIL_URL))
missing_emails = []
data.each do |e|
rep = Representative.where('first_name like ? and last_name = ?', "#{e['first_name'].strip}%", e['last_name'].strip).first
if rep
rep.email ||= e['email']
rep.save!
else
missing_emails << e
end
end
missing_reps = Representative.where(:email => nil).map(&:full_name)
log.warn "representatives missing emails: #{missing_reps.to_json} "
log.warn "emails missing representatives: #{missing_emails.to_json}"
end
def each_vote_for(data_source, parliament_issues, limit = nil)
count = 0
parliament_issues.each_with_index do |parliament_issue, index|
votes = data_source.votes_for(parliament_issue.external_id)
count += votes.size
yield votes
break if limit && count >= limit
parliament_issue_count = index + 1
remaining_parliament_issues = parliament_issues.size - parliament_issue_count
remaining_votes = (count / parliament_issue_count.to_f) * remaining_parliament_issues
log.info "-> #{count} votes for #{parliament_issue_count} parliament issues imported"
log.info "-> about #{remaining_votes.to_i} votes remaining for #{remaining_parliament_issues} parliament issues"
end
end
def import_files
@rest.each do |file|
print "\nimporting #{file}:"
if file == "-"
str = STDIN.read
else
str = open(file) { |io| io.read }
end
data = MultiJson.decode(str)
data = case data
when Array
data
when Hash
[data]
else
raise TypeError, "expected Hash or Array, got: #{data.inspect}"
end
import_data data
end
end
def import_data(data)
kinds = data.group_by do |hash|
hash['kind'] or raise ArgumentError, "missing 'kind' property: #{hash.inspect}"
end
kinds.each do |kind, hashes|
case kind
when 'hdo#representative'
persister.import_representatives hashes.map { |e| StortingImporter::Representative.from_hash(e) }
when 'hdo#party'
persister.import_parties hashes.map { |e| StortingImporter::Party.from_hash(e) }
when 'hdo#committee'
persister.import_committees hashes.map { |e| StortingImporter::Committee.from_hash(e) }
when 'hdo#category'
persister.import_categories hashes.map { |e| StortingImporter::Categories.from_hash(e) }
when 'hdo#district'
persister.import_districts hashes.map { |e| StortingImporter::District.from_hash(e) }
when 'hdo#issue'
persister.import_parliament_issues hashes.map { |e| StortingImporter::ParliamentIssue.from_hash(e) }
when 'hdo#vote'
# import_votes (plural) will also run VoteInferrer.
persister.import_votes hashes.map { |e| StortingImporter::Vote.from_hash(e) }
when 'hdo#promise'
persister.import_promises hashes.map { |e| StortingImporter::Promise.from_hash(e) }
else
raise "unknown type: #{kind}"
end
end
end
def parsing_data_source
@parsing_data_source ||= (
ds = Hdo::StortingImporter::ParsingDataSource.new(api_data_source)
case @options[:cache]
when 'rails'
Hdo::StortingImporter::CachingDataSource.new(ds, Rails.cache)
when true
Hdo::StortingImporter::CachingDataSource.new(ds)
else
ds
end
)
end
def api_data_source
@api_data_source ||= Hdo::StortingImporter::ApiDataSource.default
end
def persister
@persister ||= (
persister = Persister.new
persister.log = log
persister
)
end
def log
@log ||= (
if @options[:quiet]
Logger.new(File::NULL)
else
Hdo::StortingImporter.logger
end
)
end
def notify_new_votes
mail = ImportMailer.votes_today_email
return if mail.to.nil? # no new votes
mail.deliver
message = mail.parts.last.body.raw_source
client = hipchat_client || return
client['Analyse'].send('Stortinget', message.to_param, notify: true)
rescue => ex
log.error [ex.message, ex.backtrace].join("\n")
end
def notify_missing_emails
client = hipchat_client || return
missing = Representative.attending.where('email is null')
return if missing.empty?
template = <<-HTML
<h2>Møtende representanter uten epostadresse:</h2>
<ul>
<% missing.each do |rep| %>
<li><%= rep.external_id %>: <%= rep.full_name %></li>
<% end %>
</ul>
HTML
message = ERB.new(template, 0, "%-<>").result(binding)
client['Teknisk'].send('Stortinget', message, color: 'red', notify: true)
rescue => ex
log.error ex.message
end
def notify_api_error
client = hipchat_client || return
client['Teknisk'].send('API', "Feil hos data.stortinget.no! Hjelp!", color: 'red', notify: true)
rescue => ex
log.error ex.message
end
def hipchat_client
@hipchat_client ||= (
token = AppConfig.hipchat_api_token
HipChat::Client.new(token) unless token.blank?
)
end
def parse_options(args)
options = {:period => '2009-2013', :session => '2012-2013'}
OptionParser.new { |opt|
opt.on("-s", "--quiet") { @options[:quiet] = true }
opt.on("--cache [rails]", "Cache results of API calls. Defaults to caching in memory, pass 'rails' to use Rails.cache instead.") do |arg|
options[:cache] = arg || true
end
opt.on("--parliament-issues ISSUE_IDS", "Only import this comma-sparated list of issue external ids") do |ids|
options[:parliament_issue_ids] = ids.split(",")
end
opt.on("--period PERIOD", %Q{The parliamentary period to import data for. Note that "today's representatives" will always be imported. Default: #{options[:period]}}) do |period|
options[:period] = period
end
opt.on("--session SESSION", %Q{The parliamentary session to import data for. Note that "today's representatives" will always be imported. Default: #{options[:session]}}) do |session|
options[:session] = session
end
opt.on("-h", "--help") do
puts opt
exit 1
end
}.parse!(args)
options[:cache] ||= ENV['CACHE']
options
end
end # CLI
end # Import
end # Hdo |
Add AWS S3 healthcheck module
This will be required for the AWS S3 healthcheck.
module Healthcheck
class S3
def name
:s3
end
def status
connection = S3FileUploader.connection
connection.directories.get(ENV["AWS_S3_BUCKET_NAME"])
GovukHealthcheck::OK
rescue StandardError
GovukHealthcheck::CRITICAL
end
end
end
|
module Hector
class Request
attr_reader :line, :command, :args, :text
def initialize(line)
@line = line
parse
end
def event_name
"on_#{command.downcase}"
end
protected
def parse
source = line.dup
@command = extract!(source, /^ *([^ ]+)/, "").upcase
@text = extract!(source, / :(.*)$/)
@args = source.strip.split(" ")
@text ||= @args.last
end
def extract!(line, regex, default = nil)
result = nil
line.gsub!(regex) do |match|
result = $~[1]
""
end
result || default
end
end
end
Alias Request#to_s -> #line
module Hector
class Request
attr_reader :line, :command, :args, :text
alias_method :to_s, :line
def initialize(line)
@line = line
parse
end
def event_name
"on_#{command.downcase}"
end
protected
def parse
source = line.dup
@command = extract!(source, /^ *([^ ]+)/, "").upcase
@text = extract!(source, / :(.*)$/)
@args = source.strip.split(" ")
@text ||= @args.last
end
def extract!(line, regex, default = nil)
result = nil
line.gsub!(regex) do |match|
result = $~[1]
""
end
result || default
end
end
end
|
module Heroku
VERSION = "3.1.1"
end
3.2.0.pre
module Heroku
VERSION = "3.2.0.pre"
end
|
require 'sinatra'
require 'oj'
require 'rack/protection'
require 'rack/cors'
module Hippo::API
class Root < Sinatra::Application
CORS_PATHS = {}
Hippo.config.get(:environment) do | env |
set :environment, env
end
helpers RequestWrapper
helpers HelperMethods
helpers FormattedReply
use TenantDomainRouter
use Rack::Session::Cookie,
:key => 'rack.session',
:secret => Hippo.config.session_secret_key_base
not_found do
Oj.dump({ message: "endpoint not found", success: false })
end
error ActiveRecord::RecordNotFound do
halt 404, error_as_json
end
error do
error_as_json
end
configure do
set :show_exceptions, false
Hippo.config.apply
Hippo::Extensions.load_controlling_config
set :views, Hippo::Extensions.map{|ext| ext.root_path.join('views') }.reverse
set :webpack, Hippo::Webpack.new
webpack.start
require_relative './routing'
Cable.configure
cors_resources = []
if CORS_PATHS.any?
use Rack::Cors,
debug: !Hippo.env.production?,
logger: Hippo.logger do
CORS_PATHS.each do | path, options |
allow do
cors_resources.push Rack::Cors::Resource.new('', path)
origins options[:origins]
resource path,
:methods => options[:methods].map(&:to_sym) + [:options],
:headers => :any
end
end
end
end
use Rack::Protection, allow_if: -> (env) {
path = env['PATH_INFO']
cors_resources.any?{|r| r.matches_path?(path) }
}
end
end
end
server files from public
require 'sinatra'
require 'oj'
require 'rack/protection'
require 'rack/cors'
module Hippo::API
class Root < Sinatra::Application
CORS_PATHS = {}
Hippo.config.get(:environment) do | env |
set :environment, env
end
helpers RequestWrapper
helpers HelperMethods
helpers FormattedReply
use TenantDomainRouter
use Rack::Session::Cookie,
:key => 'rack.session',
:secret => Hippo.config.session_secret_key_base
not_found do
Oj.dump({ message: "endpoint not found", success: false })
end
error ActiveRecord::RecordNotFound do
halt 404, error_as_json
end
error do
error_as_json
end
configure do
set :show_exceptions, false
Hippo.config.apply
Hippo::Extensions.load_controlling_config
set :views, Hippo::Extensions.map{|ext| ext.root_path.join('views') }.reverse
set :webpack, Hippo::Webpack.new
set :public_folder, 'public' unless Hippo.env.production?
webpack.start
require_relative './routing'
Cable.configure
cors_resources = []
if CORS_PATHS.any?
use Rack::Cors,
debug: !Hippo.env.production?,
logger: Hippo.logger do
CORS_PATHS.each do | path, options |
allow do
cors_resources.push Rack::Cors::Resource.new('', path)
origins options[:origins]
resource path,
:methods => options[:methods].map(&:to_sym) + [:options],
:headers => :any
end
end
end
end
use Rack::Protection, allow_if: -> (env) {
path = env['PATH_INFO']
cors_resources.any?{|r| r.matches_path?(path) }
}
end
end
end
|
module Hivegame
class Hex
def obstructed?
false
end
end
class Board
def initialize(rows=9, cols=9)
@rows, @cols = rows, cols
@board = Array.new(@rows) do |row|
Array.new(@cols) do |col|
Hex.new
end
end
end
def distance(position1, position2)
# Distance between two hexes on the board
end
# This will print the board out to the console
def draw
@rows.times do |row|
line = ''
line << "#{row}:"
(@cols - row).times {line << ' '}
@cols.times do |col|
line << (distance([4,4], [row,col]) || 'X').to_s
line << ' '
end
puts line
end
end
def empty?
true
end
def [](row)
@board[row]
end
end
end
ws, unwanted code
module Hivegame
class Hex
end
class Board
def initialize(rows=9, cols=9)
@rows, @cols = rows, cols
@board = Array.new(@rows) do |row|
Array.new(@cols) do |col|
Hex.new
end
end
end
# This will print the board out to the console
def draw
@rows.times do |row|
line = ''
line << "#{row}:"
(@cols - row).times {line << ' '}
@cols.times do |col|
line << (distance([4,4], [row,col]) || 'X').to_s
line << ' '
end
puts line
end
end
def empty?
true
end
def [](row)
@board[row]
end
end
end
|
require 'logger'
require 'socket'
module HPFeeds
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
HEADERSIZE = 5
BLOCKSIZE = 1024
class Client
def initialize(options)
@host = options[:host]
@port = options[:port] || 10000
@ident = options[:ident]
@secret = options[:secret]
@timeout = options[:timeout] || 3
@reconnect = options[:reconnect] || true
@sleepwait = options[:sleepwait] || 20
@connected = false
@stopped = false
log_to = options[:log_to] || STDOUT
log_level = options[:log_level] || :info
@logger = Logger.new(log_to)
@logger.level = get_log_level(log_level)
@decoder = Decoder.new
@handlers = {}
@subscribed = []
tryconnect
end
def tryconnect
loop do
begin
connect()
for c in @subscribed
subscribe_to_channel c
end
break
rescue => e
@logger.warn("#{e.class} caugthed while connecting: #{e}. Reconnecting in #{@sleepwait} seconds...")
sleep(@sleepwait)
end
end
end
def connect
@socket = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM)
begin
@logger.debug("connecting #{@host}:#{@port}")
sockaddr = Socket.pack_sockaddr_in( @port, @host )
@socket.connect(sockaddr)
rescue => e
raise Exception.new("Could not connect to broker: #{e}.")
end
@logger.debug("waiting for data")
header = recv_timeout(HEADERSIZE)
opcode, len = @decoder.parse_header(header)
@logger.debug("received header, opcode = #{opcode}, len = #{len}")
if opcode == OP_INFO
data = recv_timeout(len)
@logger.debug("received data = #{binary_to_hex(data)}")
name, rand = @decoder.parse_info(data)
@logger.debug("received INFO, name = #{name}, rand = #{binary_to_hex(rand)}")
@brokername = name
auth = @decoder.msg_auth(rand, @ident, @secret)
@socket.send(auth, 0)
else
raise Exception.new('Expected info message at this point.')
end
@logger.info("connected to #{@host}, port #{@port}")
@connected = true
# set keepalive
@socket.setsockopt(Socket::Option.bool(:INET, :SOCKET, :KEEPALIVE, true))
end
def subscribe(*channels, &block)
if block_given?
handler = block
else
raise ArgumentError.new('When subscribing to a channel, you have to provide a block as a callback for message handling')
end
for c in channels
subscribe_to_channel c
@handlers[c] = handler unless handler.nil?
@subscribed << c
end
end
def publish(data, *channels)
for c in channels
publish_to_channel c, data
end
end
def stop
@stopped = true
end
def close
begin
@logger.debug("Closing socket")
@socket.close
rescue => e
@logger.warn("Socket exception when closing: #{e}")
end
end
def run(error_callback = nil)
begin
while !@stopped
while @connected
header = @socket.recv(HEADERSIZE)
if header.empty?
@connected = false
break
end
opcode, len = @decoder.parse_header(header)
@logger.debug("received header, opcode = #{opcode}, len = #{len}")
data = ''
while data.size < len - HEADERSIZE
data += @socket.recv(BLOCKSIZE)
end
if opcode == OP_ERROR
unless error_callback.nil?
error_callback.call(data)
else
raise ErrorMessage.new(data)
end
elsif opcode == OP_PUBLISH
name, chan, payload = @decoder.parse_publish(data)
@logger.debug("received #{payload.length} bytes of data from #{name} on channel #{chan}")
handler = @handlers[chan]
unless handler.nil?
# ignore unhandled messages
handler.call(name, chan, payload)
end
end
end
@logger.debug("Lost connection, trying to connect again...")
tryconnect
end
rescue ErrorMessage => e
@logger.warn("#{e.class} caugthed in main loop: #{e}")
raise e
rescue => e
message = "#{e.class} caugthed in main loop: #{e}\n"
message += e.backtrace.join("\n")
@logger.error(message)
end
end
private
def binary_to_hex s
"0x#{s.unpack('H*')[0]}" rescue ''
end
def subscribe_to_channel c
@logger.info("subscribing to #{c}")
message = @decoder.msg_subscribe(@ident, c)
@socket.send(message, 0)
end
def publish_to_channel c, data
@logger.info("publish to #{c}: #{data}")
message = @decoder.msg_publish(@ident, c, data)
@socket.send(message, 0)
end
def recv_timeout(len)
if IO.select([@socket], nil, nil, @timeout)
@socket.recv(len)
else
raise Exception.new("Connection receive timeout.")
end
end
def get_log_level(level)
begin
Logger.const_get(level.to_s.upcase)
rescue
raise ArgumentError.new("Unknown log level #{level}")
end
end
end
end
fixed bug in socket data receving
require 'logger'
require 'socket'
module HPFeeds
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
HEADERSIZE = 5
BLOCKSIZE = 1500
class Client
def initialize(options)
@host = options[:host]
@port = options[:port] || 10000
@ident = options[:ident]
@secret = options[:secret]
@timeout = options[:timeout] || 3
@reconnect = options[:reconnect] || true
@sleepwait = options[:sleepwait] || 20
@connected = false
@stopped = false
log_to = options[:log_to] || STDOUT
log_level = options[:log_level] || :info
@logger = Logger.new(log_to)
@logger.level = get_log_level(log_level)
@decoder = Decoder.new
@handlers = {}
@subscribed = []
tryconnect
end
def tryconnect
loop do
begin
connect()
for c in @subscribed
subscribe_to_channel c
end
break
rescue => e
@logger.warn("#{e.class} caugthed while connecting: #{e}. Reconnecting in #{@sleepwait} seconds...")
sleep(@sleepwait)
end
end
end
def connect
@socket = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM)
begin
@logger.debug("connecting #{@host}:#{@port}")
sockaddr = Socket.pack_sockaddr_in( @port, @host )
@socket.connect(sockaddr)
rescue => e
raise Exception.new("Could not connect to broker: #{e}.")
end
@logger.debug("waiting for data")
header = receive_data(HEADERSIZE, @timeout)
opcode, len = @decoder.parse_header(header)
@logger.debug("received header, opcode = #{opcode}, len = #{len}")
if opcode == OP_INFO
data = receive_data(len-HEADERSIZE, @timeout)
@logger.debug("received data = #{binary_to_hex(data)}")
name, rand = @decoder.parse_info(data)
@logger.debug("received INFO, name = #{name}, rand = #{binary_to_hex(rand)}")
@brokername = name
auth = @decoder.msg_auth(rand, @ident, @secret)
@socket.send(auth, 0)
else
raise Exception.new('Expected info message at this point.')
end
@logger.info("connected to #{@host}, port #{@port}")
@connected = true
# set keepalive
@socket.setsockopt(Socket::Option.bool(:INET, :SOCKET, :KEEPALIVE, true))
end
def subscribe(*channels, &block)
if block_given?
handler = block
else
raise ArgumentError.new('When subscribing to a channel, you have to provide a block as a callback for message handling')
end
for c in channels
subscribe_to_channel c
@handlers[c] = handler unless handler.nil?
@subscribed << c
end
end
def publish(data, *channels)
for c in channels
publish_to_channel c, data
end
end
def stop
@stopped = true
end
def close
begin
@logger.debug("Closing socket")
@socket.close
rescue => e
@logger.warn("Socket exception when closing: #{e}")
end
end
def run(error_callback = nil)
begin
while !@stopped
while @connected
header = receive_data(HEADERSIZE)
if header.empty?
@connected = false
break
end
opcode, len = @decoder.parse_header(header)
@logger.debug("received header, opcode = #{opcode}, len = #{len}")
data = receive_data(len - HEADERSIZE)
if opcode == OP_ERROR
unless error_callback.nil?
error_callback.call(data)
else
raise ErrorMessage.new(data)
end
elsif opcode == OP_PUBLISH
name, chan, payload = @decoder.parse_publish(data)
@logger.debug("received #{payload.length} bytes of data from #{name} on channel #{chan}")
handler = @handlers[chan]
unless handler.nil?
# ignore unhandled messages
handler.call(name, chan, payload)
end
end
end
@logger.debug("Lost connection, trying to connect again...")
tryconnect
end
rescue ErrorMessage => e
@logger.warn("#{e.class} caugthed in main loop: #{e}")
raise e
rescue => e
message = "#{e.class} caugthed in main loop: #{e}\n"
message += e.backtrace.join("\n")
@logger.error(message)
end
end
private
def binary_to_hex s
"0x#{s.unpack('H*')[0]}" rescue ''
end
def subscribe_to_channel c
@logger.info("subscribing to #{c}")
message = @decoder.msg_subscribe(@ident, c)
@socket.send(message, 0)
end
def publish_to_channel c, data
@logger.info("publish to #{c}: #{data}")
message = @decoder.msg_publish(@ident, c, data)
@socket.send(message, 0)
end
def receive_data(max, timeout=nil)
if IO.select([@socket], nil, nil, timeout)
read_from_socket(max)
else
raise Exception.new("Connection receive timeout.")
end
end
def read_from_socket(max, block = BLOCKSIZE)
data = ''
(max/block).times do
data += @socket.recv(block)
end
data += @socket.recv(max % block)
end
def get_log_level(level)
begin
Logger.const_get(level.to_s.upcase)
rescue
raise ArgumentError.new("Unknown log level #{level}")
end
end
end
end
|
require 'open3'
require 'fileutils'
require_relative '../html2pdf'
module Html2Pdf
module Utils
class << self
# Batch convert to pdf using `wkhtmltopdf` tool
#
# @param [Array<String>] files the input file list
# @param [String] base_dir the base directory
def to_pdfs(files)
files.each_with_index do |file, index|
puts "Convert file #{index + 1} of #{files.size} : #{file}"
to_pdf(file)
end
end
# Convert '*.xhtml' or '*.html' to pdf
#
# @param filename input filename
def to_pdf(filename)
# see: http://madalgo.au.dk/~jakobt/wkhtmltoxdoc/wkhtmltopdf_0.10.0_rc2-doc.html#Footers And Headers
# - may be only allow "*.html" and "*.xhtml"
# - allow the options to be passed in so that we can use different theme
# '--no-background'
fail "Invalid input file #{filename}" unless File.exist?(filename)
command = [
'wkhtmltopdf',
'--margin-top 4',
'--margin-bottom 4',
'--margin-left 4',
'--margin-right 4',
# Note: working correctly but long URL
'--header-center "[webpage] :: [page]/[topage]"',
# header section
# TODO: not yet working properly
# "--header-center #{filename.gsub(base_dir,File.basename(base_dir))} \"[page]/[topage]\"",
# "--header-center #{filename} \"[page]/[topage]\"",
'--header-spacing 1',
'--header-font-size 8',
'--header-line',
# footer section
'--footer-spacing 1',
'--footer-font-size 8',
'--footer-line',
"#{filename}",
"#{filename}.pdf",
'> /dev/null']
_stdin, _stderr, status = Open3.capture3(command.join(' '))
puts "FYI: to_pdf command: #{command.join(' ')}"
# Note: may be log it and continue
fail "Problem processing #{filename}" unless status.success?
end
# Check and verify that the proper softwares are available.
#
def required_softwares?
AgileUtils::Helper.which('gs') && AgileUtils::Helper.which('gs')
end
end
end
end
Make result less verbose
require 'open3'
require 'fileutils'
require_relative '../html2pdf'
module Html2Pdf
module Utils
class << self
# Batch convert to pdf using `wkhtmltopdf` tool
#
# @param [Array<String>] files the input file list
# @param [String] base_dir the base directory
def to_pdfs(files)
files.each_with_index do |file, index|
puts "Convert file #{index + 1} of #{files.size} : #{file}"
to_pdf(file)
end
end
# Convert '*.xhtml' or '*.html' to pdf
#
# @param filename input filename
def to_pdf(filename)
# see: http://madalgo.au.dk/~jakobt/wkhtmltoxdoc/wkhtmltopdf_0.10.0_rc2-doc.html#Footers And Headers
# - may be only allow "*.html" and "*.xhtml"
# - allow the options to be passed in so that we can use different theme
# '--no-background'
fail "Invalid input file #{filename}" unless File.exist?(filename)
command = [
'wkhtmltopdf',
'--margin-top 4',
'--margin-bottom 4',
'--margin-left 4',
'--margin-right 4',
# Note: working correctly but long URL
'--header-center "[webpage] :: [page]/[topage]"',
# header section
# TODO: not yet working properly
# "--header-center #{filename.gsub(base_dir,File.basename(base_dir))} \"[page]/[topage]\"",
# "--header-center #{filename} \"[page]/[topage]\"",
'--header-spacing 1',
'--header-font-size 8',
'--header-line',
# footer section
'--footer-spacing 1',
'--footer-font-size 8',
'--footer-line',
"#{filename}",
"#{filename}.pdf",
'> /dev/null']
_stdin, _stderr, status = Open3.capture3(command.join(' '))
# puts "FYI: to_pdf command: #{command.join(' ')}"
# Note: may be log it and continue
fail "Problem processing #{filename}" unless status.success?
end
# Check and verify that the proper softwares are available.
#
def required_softwares?
AgileUtils::Helper.which('gs') && AgileUtils::Helper.which('gs')
end
end
end
end
|
module Identification
module TableDefinitions
def identities(*args)
options = args.extract_options!
column(:created_by, :integer, options)
column(:updated_by, :integer, options)
end
end
module Identifier
def fetch_identity
session[:current_user_id]
end
private
def identify
unless fetch_identity.nil?
key = controller_name.singularize.to_sym
if params.include?(key)
params[key] = add_identity_to_params(key,params[key])
params[key][:_identity] = fetch_identity
elsif params.include?(controller_name.to_sym)
params[controller_name.to_sym].each_value do |param|
param = add_identity_to_params(key,param)
param[:_identity] = fetch_identity
end
end
end
end
def add_identity_to_params(key,params)
klass = key.to_s.classify.constantize
klass.reflect_on_all_associations.each do |reflection|
nested_attributes = "#{reflection.name}_attributes"
if params.include?(nested_attributes)
case reflection.macro
when :has_one, :belongs_to
params[nested_attributes] = add_identity_to_params(reflection.name,params[nested_attributes])
params[nested_attributes][:_identity] = fetch_identity
when :has_many, :has_and_belongs_to_many
params[nested_attributes].each_value do |param|
param = add_identity_to_params(reflection.name,param)
param[:_identity] = fetch_identity
end
end
end
end
params
end
end
module Identity
def self.included(klass)
klass.define_callbacks :before_identify, :after_identify
klass.attr_protected :created_by, :updated_by
klass.before_save :_identify
klass.send :include, InstanceMethods
end
module InstanceMethods
protected
def _identity=(id)
@_identity = id
end
private
def _identify
callback :before_identify
if new_record?
write_attribute('created_by', @_identity) if respond_to?(:created_by)
write_attribute('updated_by', @_identity) if respond_to?(:updated_by)
elsif !partial_updates? || changed?
write_attribute('updated_by', @_identity) if respond_to?(:updated_by)
end
callback :after_identify
end
end
end
end
ActiveRecord::ConnectionAdapters::Table.send(:include,Identification::TableDefinitions)
ActiveRecord::Base.send(:include, Identification::Identity)
ActionController::Base.send(:include,Identification::Identifier)
Now you can explicitly set created/updated_by and they won't be overwritten
module Identification
module TableDefinitions
def identities(*args)
options = args.extract_options!
column(:created_by, :integer, options)
column(:updated_by, :integer, options)
end
end
module Identifier
def fetch_identity
session[:current_user_id]
end
private
def identify
unless fetch_identity.nil?
key = controller_name.singularize.to_sym
if params.include?(key)
params[key] = add_identity_to_params(key,params[key])
params[key][:_identity] = fetch_identity
elsif params.include?(controller_name.to_sym)
params[controller_name.to_sym].each_value do |param|
param = add_identity_to_params(key,param)
param[:_identity] = fetch_identity
end
end
end
end
def add_identity_to_params(key,params)
klass = key.to_s.classify.constantize
klass.reflect_on_all_associations.each do |reflection|
nested_attributes = "#{reflection.name}_attributes"
if params.include?(nested_attributes)
case reflection.macro
when :has_one, :belongs_to
params[nested_attributes] = add_identity_to_params(reflection.name,params[nested_attributes])
params[nested_attributes][:_identity] = fetch_identity
when :has_many, :has_and_belongs_to_many
params[nested_attributes].each_value do |param|
param = add_identity_to_params(reflection.name,param)
param[:_identity] = fetch_identity
end
end
end
end
params
end
end
module Identity
def self.included(klass)
klass.define_callbacks :before_identify, :after_identify
klass.attr_protected :created_by, :updated_by
klass.before_save :_identify
klass.send :include, InstanceMethods
end
module InstanceMethods
protected
def _identity=(id)
@_identity = id
end
private
def _identify
callback :before_identify
if new_record?
write_attribute('created_by', @_identity) if respond_to?(:created_by) and created_by.nil?
write_attribute('updated_by', @_identity) if respond_to?(:updated_by) and updated_by.nil?
elsif !partial_updates? || changed?
write_attribute('updated_by', @_identity) if respond_to?(:updated_by) and updated_by.nil?
end
callback :after_identify
end
end
end
end
ActiveRecord::ConnectionAdapters::Table.send(:include,Identification::TableDefinitions)
ActiveRecord::Base.send(:include, Identification::Identity)
ActionController::Base.send(:include,Identification::Identifier) |
module Indico
VERSION = '0.7.0'
end
UPDATE: version number
module Indico
VERSION = '0.7.1'
end
|
# -*- encoding : utf-8 -*-
module Innodb
VERSION = "0.9.0"
end
Bump version to 0.9.5
# -*- encoding : utf-8 -*-
module Innodb
VERSION = "0.9.5"
end
|
#METODO only allow build to set fields that are part of the API fields
#METODO make a distinction between fields that you can set and save and ones you can only read - like DATE_UPDATED_UTC
module Insightly
class Base
class << self
attr_accessor :api_fields,:url_base
end
self.api_fields = []
def self.custom_fields(*args)
args.each_with_index do |method, index|
next if method.nil? or method == ""
method_name = method.to_s.downcase.to_sym
send :define_method, method_name do
@data["#{self.class.const_get(:CUSTOM_FIELD_PREFIX)}_#{index+1}"]
end
method_name = "#{method.to_s.downcase}=".to_sym
send :define_method, method_name do |value|
@data["#{self.class.const_get(:CUSTOM_FIELD_PREFIX)}_#{index+1}"] = value
end
end
end
def self.api_field(*args)
args.each do |field|
self.api_fields = [] if !self.api_fields
self.api_fields << field
method_name = field.downcase.to_sym
send :define_method, method_name do
@data[field]
end
method_name = "#{field.downcase}=".to_sym
send :define_method, method_name do |value|
@data[field] = value
end
end
end
def initialize(id = nil)
@data = {}
load(id) if id
end
def url_base
self.class.url_base
end
def remote_id
raise ScriptError, "This should be overridden in the subclass"
end
def load(id)
@data = get_collection("#{url_base}/#{id}")
self
end
def reload
load(remote_id)
end
def to_json
@data.to_json
end
def build(data)
@data = data
self
end
def self.build(data)
self.new.build(data)
end
def ==(other)
self.remote_data == other.remote_data
end
def remote_data
@data
end
def process(result, content_type)
if content_type == :json
JSON.parse(result.to_str)
elsif content_type == :xml
Hash.from_xml(result.to_str)
else
result
end
end
def config
Insightly::Configuration.instantiate
end
def get_collection(path, content_selector = :json)
if content_selector == :xml_raw
content_type = :xml
else
content_type = content_selector
end
response = RestClient::Request.new(:method => :get,
:url => "#{config.endpoint}/#{path.to_s}",
:user => config.api_key,
:password => "",
:headers => {:accept => content_type, :content_type => content_type}).execute
process(response, content_selector)
end
def self.all
item = self.new
list = []
item.get_collection(item.url_base).each do |d|
list << self.new.build(d)
end
list
end
end
end
Added a note about tags vs custom fields
#METODO look at tags vs custom fields
#METODO only allow build to set fields that are part of the API fields
#METODO make a distinction between fields that you can set and save and ones you can only read - like DATE_UPDATED_UTC
module Insightly
class Base
class << self
attr_accessor :api_fields,:url_base
end
self.api_fields = []
def self.custom_fields(*args)
args.each_with_index do |method, index|
next if method.nil? or method == ""
method_name = method.to_s.downcase.to_sym
send :define_method, method_name do
@data["#{self.class.const_get(:CUSTOM_FIELD_PREFIX)}_#{index+1}"]
end
method_name = "#{method.to_s.downcase}=".to_sym
send :define_method, method_name do |value|
@data["#{self.class.const_get(:CUSTOM_FIELD_PREFIX)}_#{index+1}"] = value
end
end
end
def self.api_field(*args)
args.each do |field|
self.api_fields = [] if !self.api_fields
self.api_fields << field
method_name = field.downcase.to_sym
send :define_method, method_name do
@data[field]
end
method_name = "#{field.downcase}=".to_sym
send :define_method, method_name do |value|
@data[field] = value
end
end
end
def initialize(id = nil)
@data = {}
load(id) if id
end
def url_base
self.class.url_base
end
def remote_id
raise ScriptError, "This should be overridden in the subclass"
end
def load(id)
@data = get_collection("#{url_base}/#{id}")
self
end
def reload
load(remote_id)
end
def to_json
@data.to_json
end
def build(data)
@data = data
self
end
def self.build(data)
self.new.build(data)
end
def ==(other)
self.remote_data == other.remote_data
end
def remote_data
@data
end
def process(result, content_type)
if content_type == :json
JSON.parse(result.to_str)
elsif content_type == :xml
Hash.from_xml(result.to_str)
else
result
end
end
def config
Insightly::Configuration.instantiate
end
def get_collection(path, content_selector = :json)
if content_selector == :xml_raw
content_type = :xml
else
content_type = content_selector
end
response = RestClient::Request.new(:method => :get,
:url => "#{config.endpoint}/#{path.to_s}",
:user => config.api_key,
:password => "",
:headers => {:accept => content_type, :content_type => content_type}).execute
process(response, content_selector)
end
def self.all
item = self.new
list = []
item.get_collection(item.url_base).each do |d|
list << self.new.build(d)
end
list
end
end
end
|
module Intacct
class Vendor < Intacct::Base
def create
send_xml('create') do |xml|
xml.function(controlid: "1") {
xml.create_vendor {
xml.vendorid intacct_object_id
vendor_xml xml
}
}
end
successful?
end
def update updated_vendor = false
@object = updated_vendor if updated_vendor
return false if object.intacct_system_id.nil?
send_xml('update') do |xml|
xml.function(controlid: "1") {
xml.update_vendor(vendorid: intacct_system_id) {
vendor_xml xml
}
}
end
successful?
end
def delete
return false if object.intacct_system_id.nil?
@response = send_xml('delete') do |xml|
xml.function(controlid: "1") {
xml.delete_vendor(vendorid: intacct_system_id)
}
end
successful?
end
def intacct_object_id
"#{intacct_vendor_prefix}#{object.id}"
end
def vendor_xml xml
xml.name "#{object.company_name.present? ? object.company_name : object.full_name}"
#[todo] - Custom
xml.vendtype "Appraiser"
xml.taxid object.tax_id
xml.billingtype "balanceforward"
xml.status "active"
xml.contactinfo {
xml.contact {
xml.contactname "#{object.last_name}, #{object.first_name} (#{object.id})"
xml.printas object.full_name
xml.companyname object.company_name
xml.firstname object.first_name
xml.lastname object.last_name
xml.phone1 object.business_phone
xml.cellphone object.cell_phone
xml.email1 object.email
if object.billing_address.present?
xml.mailaddress {
xml.address1 object.billing_address.address1
xml.address2 object.billing_address.address2
xml.city object.billing_address.city
xml.state object.billing_address.state
xml.zip object.billing_address.zipcode
}
end
}
}
if object.ach_routing_number.present?
xml.achenabled "#{object.ach_routing_number.present? ? "true" : "false"}"
xml.achbankroutingnumber object.ach_routing_number
xml.achaccountnumber object.ach_account_number
xml.achaccounttype "#{object.ach_account_type.capitalize+" Account"}"
xml.achremittancetype "#{(object.ach_account_classification=="business" ? "CCD" : "PPD")}"
end
end
end
end
updates to ach vendor info
module Intacct
class Vendor < Intacct::Base
def create
send_xml('create') do |xml|
xml.function(controlid: "1") {
xml.create_vendor {
xml.vendorid intacct_object_id
vendor_xml xml
}
}
end
successful?
end
def update updated_vendor = false
@object = updated_vendor if updated_vendor
return false if object.intacct_system_id.nil?
send_xml('update') do |xml|
xml.function(controlid: "1") {
xml.update_vendor(vendorid: intacct_system_id) {
vendor_xml xml
}
}
end
successful?
end
def delete
return false if object.intacct_system_id.nil?
@response = send_xml('delete') do |xml|
xml.function(controlid: "1") {
xml.delete_vendor(vendorid: intacct_system_id)
}
end
successful?
end
def intacct_object_id
"#{intacct_vendor_prefix}#{object.id}"
end
def vendor_xml xml
xml.name "#{object.company_name.present? ? object.company_name : object.full_name}"
#[todo] - Custom
xml.vendtype "Appraiser"
xml.taxid object.tax_id
xml.paymethod "ACH" if object.ach_routing_number.present?
xml.billingtype "balanceforward"
xml.status "active"
xml.contactinfo {
xml.contact {
xml.contactname "#{object.last_name}, #{object.first_name} (#{object.id})"
xml.printas object.full_name
xml.companyname object.company_name
xml.firstname object.first_name
xml.lastname object.last_name
xml.phone1 object.business_phone
xml.cellphone object.cell_phone
xml.email1 object.email
if object.billing_address.present?
xml.mailaddress {
xml.address1 object.billing_address.address1
xml.address2 object.billing_address.address2
xml.city object.billing_address.city
xml.state object.billing_address.state
xml.zip object.billing_address.zipcode
}
end
}
}
if object.ach_routing_number.present?
xml.paymentnotify "true"
xml.achenabled "#{object.ach_routing_number.present? ? "true" : "false"}"
xml.achbankroutingnumber object.ach_routing_number
xml.achaccountnumber object.ach_account_number
xml.achaccounttype "#{object.ach_account_type.capitalize+" Account"}"
xml.achremittancetype "#{(object.ach_account_classification=="business" ? "CCD" : "PPD")}"
end
end
end
end
|
module Jasmine
class Config
require 'yaml'
require 'erb'
def browser
ENV["JASMINE_BROWSER"] || 'firefox'
end
def jasmine_host
ENV["JASMINE_HOST"] || 'http://localhost'
end
def external_selenium_server_port
ENV['SELENIUM_SERVER_PORT'] && ENV['SELENIUM_SERVER_PORT'].to_i > 0 ? ENV['SELENIUM_SERVER_PORT'].to_i : nil
end
def start_server(port = 8888)
server = Rack::Server.new(:Port => port, :AccessLog => [])
server.instance_variable_set(:@app, Jasmine.app(self)) # workaround for Rack bug, when Rack > 1.2.1 is released Rack::Server.start(:app => Jasmine.app(self)) will work
server.start
end
def start
start_jasmine_server
@client = Jasmine::SeleniumDriver.new(browser, "#{jasmine_host}:#{@jasmine_server_port}/")
@client.connect
end
def stop
@client.disconnect
end
def start_jasmine_server
@jasmine_server_port = Jasmine::find_unused_port
Thread.new do
start_server(@jasmine_server_port)
end
Jasmine::wait_for_listener(@jasmine_server_port, "jasmine server")
puts "jasmine server started."
end
def windows?
require 'rbconfig'
::RbConfig::CONFIG['host_os'] =~ /mswin|mingw/
end
# def start_selenium_server
# @selenium_server_port = external_selenium_server_port
# if @selenium_server_port.nil?
# @selenium_server_port = Jasmine::find_unused_port
# require 'selenium-rc'
# SeleniumRC::Server.boot("localhost", @selenium_server_port, :args => [windows? ? ">NUL" : "> /dev/null"])
# else
# Jasmine::wait_for_listener(@selenium_server_port, "selenium server")
# end
# end
def run
begin
start
puts "servers are listening on their ports -- running the test script..."
tests_passed = @client.run
ensure
stop
end
return tests_passed
end
def eval_js(script)
@client.eval_js(script)
end
def json_generate(obj)
@client.json_generate(obj)
end
def match_files(dir, patterns)
dir = File.expand_path(dir)
negative, positive = patterns.partition {|pattern| /^!/ =~ pattern}
chosen, negated = [positive, negative].collect do |patterns|
patterns.collect do |pattern|
matches = Dir.glob(File.join(dir, pattern.gsub(/^!/,'')))
matches.collect {|f| f.sub("#{dir}/", "")}.sort
end.flatten.uniq
end
chosen - negated
end
def simple_config
config = File.exist?(simple_config_file) ? YAML::load(ERB.new(File.read(simple_config_file)).result(binding)) : false
config || {}
end
def spec_path
"/__spec__"
end
def root_path
"/__root__"
end
def js_files(spec_filter = nil)
spec_files_to_include = spec_filter.nil? ? spec_files : match_files(spec_dir, [spec_filter])
src_files.collect {|f| "/" + f } + helpers.collect {|f| File.join(spec_path, f) } + spec_files_to_include.collect {|f| File.join(spec_path, f) }
end
def css_files
stylesheets.collect {|f| "/" + f }
end
def spec_files_full_paths
spec_files.collect {|spec_file| File.join(spec_dir, spec_file) }
end
def project_root
Dir.pwd
end
def simple_config_file
File.join(project_root, 'spec/javascripts/support/jasmine.yml')
end
def src_dir
if simple_config['src_dir']
File.join(project_root, simple_config['src_dir'])
else
project_root
end
end
def spec_dir
if simple_config['spec_dir']
File.join(project_root, simple_config['spec_dir'])
else
File.join(project_root, 'spec/javascripts')
end
end
def helpers
if simple_config['helpers']
match_files(spec_dir, simple_config['helpers'])
else
match_files(spec_dir, ["helpers/**/*.js"])
end
end
def src_files
if simple_config['src_files']
match_files(src_dir, simple_config['src_files'])
else
[]
end
end
def spec_files
if simple_config['spec_files']
match_files(spec_dir, simple_config['spec_files'])
else
match_files(spec_dir, ["**/*[sS]pec.js"])
end
end
def stylesheets
if simple_config['stylesheets']
match_files(src_dir, simple_config['stylesheets'])
else
[]
end
end
end
end
removing outdated method
module Jasmine
class Config
require 'yaml'
require 'erb'
def browser
ENV["JASMINE_BROWSER"] || 'firefox'
end
def jasmine_host
ENV["JASMINE_HOST"] || 'http://localhost'
end
def external_selenium_server_port
ENV['SELENIUM_SERVER_PORT'] && ENV['SELENIUM_SERVER_PORT'].to_i > 0 ? ENV['SELENIUM_SERVER_PORT'].to_i : nil
end
def start_server(port = 8888)
server = Rack::Server.new(:Port => port, :AccessLog => [])
server.instance_variable_set(:@app, Jasmine.app(self)) # workaround for Rack bug, when Rack > 1.2.1 is released Rack::Server.start(:app => Jasmine.app(self)) will work
server.start
end
def start
start_jasmine_server
@client = Jasmine::SeleniumDriver.new(browser, "#{jasmine_host}:#{@jasmine_server_port}/")
@client.connect
end
def stop
@client.disconnect
end
def start_jasmine_server
@jasmine_server_port = Jasmine::find_unused_port
Thread.new do
start_server(@jasmine_server_port)
end
Jasmine::wait_for_listener(@jasmine_server_port, "jasmine server")
puts "jasmine server started."
end
def windows?
require 'rbconfig'
::RbConfig::CONFIG['host_os'] =~ /mswin|mingw/
end
def run
begin
start
puts "servers are listening on their ports -- running the test script..."
tests_passed = @client.run
ensure
stop
end
return tests_passed
end
def eval_js(script)
@client.eval_js(script)
end
def json_generate(obj)
@client.json_generate(obj)
end
def match_files(dir, patterns)
dir = File.expand_path(dir)
negative, positive = patterns.partition {|pattern| /^!/ =~ pattern}
chosen, negated = [positive, negative].collect do |patterns|
patterns.collect do |pattern|
matches = Dir.glob(File.join(dir, pattern.gsub(/^!/,'')))
matches.collect {|f| f.sub("#{dir}/", "")}.sort
end.flatten.uniq
end
chosen - negated
end
def simple_config
config = File.exist?(simple_config_file) ? YAML::load(ERB.new(File.read(simple_config_file)).result(binding)) : false
config || {}
end
def spec_path
"/__spec__"
end
def root_path
"/__root__"
end
def js_files(spec_filter = nil)
spec_files_to_include = spec_filter.nil? ? spec_files : match_files(spec_dir, [spec_filter])
src_files.collect {|f| "/" + f } + helpers.collect {|f| File.join(spec_path, f) } + spec_files_to_include.collect {|f| File.join(spec_path, f) }
end
def css_files
stylesheets.collect {|f| "/" + f }
end
def spec_files_full_paths
spec_files.collect {|spec_file| File.join(spec_dir, spec_file) }
end
def project_root
Dir.pwd
end
def simple_config_file
File.join(project_root, 'spec/javascripts/support/jasmine.yml')
end
def src_dir
if simple_config['src_dir']
File.join(project_root, simple_config['src_dir'])
else
project_root
end
end
def spec_dir
if simple_config['spec_dir']
File.join(project_root, simple_config['spec_dir'])
else
File.join(project_root, 'spec/javascripts')
end
end
def helpers
if simple_config['helpers']
match_files(spec_dir, simple_config['helpers'])
else
match_files(spec_dir, ["helpers/**/*.js"])
end
end
def src_files
if simple_config['src_files']
match_files(src_dir, simple_config['src_files'])
else
[]
end
end
def spec_files
if simple_config['spec_files']
match_files(spec_dir, simple_config['spec_files'])
else
match_files(spec_dir, ["**/*[sS]pec.js"])
end
end
def stylesheets
if simple_config['stylesheets']
match_files(src_dir, simple_config['stylesheets'])
else
[]
end
end
end
end |
# coding: utf-8
module Jkf::Parser
class Ki2 < Base
def parse_root
s0 = @current_pos
s1 = []
s2 = parse_header
while s2 != :failed
s1 << s2
s2 = parse_header
end
if s1 != :failed
s2 = parse_initialboard
s2 = nil if s2 == :failed
s3 = []
s4 = parse_header
while s4 != :failed
s3 << s4
s4 = parse_header
end
s4 = parse_moves
if s4 != :failed
s5 = []
s6 = parse_fork
while s6 != :failed
s5 << s6
s6 = parse_fork
end
@reported_pos = s0
s0 = transform_root(s1, s2, s3, s4, s5)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_header
s0 = @current_pos
s2 = match_regexp(/^[^*:\r\n]/)
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = match_regexp(/^[^:\r\n]/)
end
else
s1 = :failed
end
if s1 != :failed
if match_str(":") != :failed
s3 = parse_nonls
s5 = parse_nl
if s5 != :failed
s4 = []
while s5 != :failed
s4 << s5
s5 = parse_nl
end
else
s4 = :failed
end
if s4 != :failed
@reported_pos = s0
s0 = { "k" => s1.join, "v" => s3.join }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0 = parse_header_teban if s0 == :failed
s0
end
def parse_header_teban
s0 = @current_pos
s1 = match_regexp(/^[先後上下]/)
if s1 != :failed
s2 = match_str("手番")
if s2 != :failed
s3 = parse_nl
if s3 != :failed
@reported_pos = s0
{ "k" => "手番", "v" => s1 }
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_initialboard
s0 = s1 = @current_pos
if match_space != :failed
parse_nonls
s2 = parse_nl
@current_pos = s1 if s2 == :failed
else
@current_pos = s1
end
s2 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s2 if parse_nl == :failed
else
@current_pos = s2
end
s4 = parse_ikkatsuline
if s4 != :failed
s3 = []
while s4 != :failed
s3 << s4
s4 = parse_ikkatsuline
end
else
s3 = :failed
end
if s3 != :failed
s4 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s4 if parse_nl == :failed
else
@current_pos = s4
end
@reported_pos = s0
transform_initialboard(s3)
else
@current_pos = s0
:failed
end
end
def parse_ikkatsuline
s0 = @current_pos
if match_str("|") != :failed
s3 = parse_masu
if s3 != :failed
s2 = []
while s3 != :failed
s2 << s3
s3 = parse_masu
end
else
s2 = :failed
end
if s2 != :failed
if match_str("|") != :failed
s4 = parse_nonls!
if s4 != :failed
if parse_nl != :failed
@reported_pos = s0
s0 = s2
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_masu
s0 = @current_pos
s1 = parse_teban
if s1 != :failed
s2 = parse_piece
if s2 != :failed
@reported_pos = s0
s0 = { "color" => s1, "kind" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
s1 = match_str(" ・")
if s1 != :failed
@reported_pos = s0
s1 = {}
end
s0 = s1
end
s0
end
def parse_teban
s0 = @current_pos
s1 = match_space
s1 = match_str("+") if s1 == :failed
s1 = match_str("^") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 0
end
s0 = s1
if s0 == :failed
s0 = @current_pos
s1 = match_str("v")
s1 = match_str("V") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 1
end
s0 = s1
end
s0
end
def parse_moves
s0 = @current_pos
s1 = parse_firstboard
if s1 != :failed
s2 = []
s3 = parse_move
while s3 != :failed
s2 << s3
s3 = parse_move
end
s3 = parse_result
s3 = nil if s3 == :failed
@reported_pos = s0
s0 = -> (hd, tl, res) do
tl.unshift(hd)
tl << { "special" => res } if res && !tl[tl.length - 1]["special"]
tl
end.call(s1, s2, s3)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_firstboard
s0 = @current_pos
s1 = []
s2 = parse_comment
while s2 != :failed
s1 << s2
s2 = parse_comment
end
parse_pointer
@reported_pos = s0
s0 = s1.empty? ? {} : { "comments" => s1 }
s0
end
def parse_move
s0 = @current_pos
s1 = parse_line
if s1 != :failed
s2 = []
s3 = parse_comment
while s3 != :failed
s2 << s3
s3 = parse_comment
end
parse_pointer
s4 = []
s5 = parse_nl
s5 = match_space if s5 == :failed
while s5 != :failed
s4 << s5
s5 = parse_nl
s5 = match_space if s5 == :failed
end
@reported_pos = s0
s0 = -> (line, c) do
ret = { "move" => line }
ret["comments"] = c if !c.empty?
ret
end.call(s1, s2)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_pointer
s0 = @current_pos
s1 = match_str("&")
if s1 != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_line
s0 = @current_pos
s1 = match_regexp(/^[▲△]/)
if s1 != :failed
s1 = if s1 == "▲"
{ "color" => 0 }
else
{ "color" => 1 }
end
s2 = parse_fugou
if s2 != :failed
s3 = []
s4 = parse_nl
s4 = match_space if s4 == :failed
while s4 != :failed
s3 << s4
s4 = parse_nl
s4 = match_space if s4 == :failed
end
@reported_pos = s0
s0 = s2.merge(s1)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_fugou
s0 = @current_pos
s1 = parse_place
if s1 != :failed
s2 = parse_piece
if s2 != :failed
s3 = parse_soutai
s3 = nil if s3 == :failed
s4 = parse_dousa
s4 = nil if s4 == :failed
s5 = match_str("成")
s5 = match_str("不成") if s5 == :failed
s5 = nil if s5 == :failed
s6 = match_str("打")
s6 = nil if s6 == :failed
@reported_pos = s0
transform_fugou(s1, s2, s3, s4, s5, s6)
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_place
s0 = @current_pos
s1 = parse_num
if s1 != :failed
s2 = parse_numkan
if s2 != :failed
@reported_pos = s0
s0 = { "x" => s1, "y" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
if match_regexp("同") != :failed
match_str(" ")
@reported_pos = s0
s0 = { "same" => true }
else
@current_pos = s0
s0 = :failed
end
end
s0
end
def parse_piece
s0 = @current_pos
s1 = match_regexp("成")
s1 = "" if s1 == :failed
s2 = match_regexp(/^[歩香桂銀金角飛王玉と杏圭全馬竜龍]/)
if s2 != :failed
@reported_pos = s0
s0 = kind2csa(s1 + s2)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_soutai
match_regexp(/^[左直右]/)
end
def parse_dousa
match_regexp(/^[上寄引]/)
end
def parse_num
s0 = @current_pos
s1 = match_regexp(/^[123456789]/)
if s1 != :failed
@reported_pos = s0
s1 = zen2n(s1)
end
s0 = s1
s0
end
def parse_numkan
s0 = @current_pos
s1 = match_regexp(/^[一二三四五六七八九]/)
if s1 != :failed
@reported_pos = s0
s1 = kan2n(s1)
end
s0 = s1
s0
end
def parse_comment
s0 = @current_pos
if match_str("*") != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
@reported_pos = s0
s0 = s2.join
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_result
s0 = @current_pos
if match_str("まで") != :failed
s2 = match_digits!
if s2 != :failed
if match_str("手") != :failed
s4 = @current_pos
if match_str("で") != :failed
if parse_turn != :failed
if match_str("手の") != :failed
s8 = parse_result_toryo
s8 = parse_result_illegal if s8 == :failed
s4 = if s8 != :failed
@reported_pos = s4
s8
else
@current_pos = s4
:failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
if s4 == :failed
s4 = parse_result_timeup
if s4 == :failed
s4 = parse_result_chudan
if s4 == :failed
s4 = parse_result_jishogi
if s4 == :failed
s4 = parse_result_sennichite
if s4 == :failed
s4 = parse_result_tsumi
s4 = parse_result_fuzumi if s4 == :failed
end
end
end
end
end
if s4 != :failed
if parse_nl != :failed || eos?
@reported_pos = s0
s4
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_toryo
s0 = @current_pos
s1 = match_str("勝ち")
if s1 != :failed
@reported_pos = s0
"TORYO"
else
@current_pos = s0
:failed
end
end
def parse_result_illegal
s0 = @current_pos
if match_str("反則") != :failed
s10 = @current_pos
s11 = match_str("勝ち")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_ACTION"
end
s10 = s11
if s10 == :failed
s10 = @current_pos
s11 = match_str("負け")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_MOVE"
end
s10 = s11
end
if s10 != :failed
@reported_pos = s0
s10
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_timeup
s0 = @current_pos
if match_str("で時間切れにより") != :failed
if parse_turn != :failed
if match_str("手の勝ち") != :failed
@reported_pos = s0
"TIME_UP"
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_chudan
s0 = @current_pos
s1 = match_str("で中断")
if s1 != :failed
@reported_pos = s0
"CHUDAN"
else
@current_pos = s0
:failed
end
end
def parse_result_jishogi
s0 = @current_pos
s1 = match_str("で持将棋")
if s1 != :failed
@reported_pos = s0
"JISHOGI"
else
@current_pos = s0
:failed
end
end
def parse_result_sennichite
s0 = @current_pos
s1 = match_str("で千日手")
if s1 != :failed
@reported_pos = s0
"SENNICHITE"
else
@current_pos = s0
:failed
end
end
def parse_result_tsumi
s0 = @current_pos
match_str("で")
if match_str("詰") != :failed
match_str("み")
@reported_pos = s0
"TSUMI"
else
@current_pos = s0
:failed
end
end
def parse_result_fuzumi
s0 = @current_pos
s1 = match_str("で不詰")
if s1 != :failed
@reported_pos = s0
"FUZUMI"
else
@current_pos = s0
:failed
end
end
def parse_fork
s0 = @current_pos
if match_str("変化:") != :failed
match_spaces
s3 = match_digits!
if s3 != :failed
if match_str("手") != :failed
if parse_nl != :failed
s6 = parse_moves
if s6 != :failed
@reported_pos = s0
s0 = { "te" => s3.join.to_i, "moves" => s6[1..-1] }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_turn
match_regexp(/^[先後上下]/)
end
def parse_nl
s0 = @current_pos
s2 = parse_newline
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = parse_newline
end
else
s1 = :failed
end
if s1 != :failed
s2 = []
s3 = parse_skipline
while s3 != :failed
s2 << s3
s3 = parse_skipline
end
s0 = [s1, s2]
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_skipline
s0 = @current_pos
s1 = match_str("#")
if s1 != :failed
s2 = parse_nonls
s3 = parse_newline
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_whitespace
s0 = match_space
s0 = match_str("\t") if s0 == :failed
s0
end
def parse_newline
s0 = @current_pos
s1 = []
s2 = parse_whitespace
while s2 != :failed
s1 << s2
s2 = parse_whitespace
end
s2 = match_str("\n")
if s2 == :failed
s2 = @current_pos
s3 = match_str("\r")
if s3 != :failed
s4 = match_str("\n")
s4 = nil if s4 == :failed
s2 = [s3, s4]
else
@current_pos = s2
s2 = :failed
end
end
if s2 != :failed
s0 = [s1, s2]
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_nonl
match_regexp(/^[^\r\n]/)
end
def parse_nonls
stack = []
matched = parse_nonl
while matched != :failed
stack << matched
matched = parse_nonl
end
stack
end
def parse_nonls!
matched = parse_nonls
if matched.empty?
:failed
else
matched
end
end
protected
def transform_root(headers, ini, headers2, moves, forks)
ret = { "header" => {}, "moves" => moves }
headers.compact.each { |h| ret["header"][h["k"]] = h["v"] }
headers2.compact.each { |h| ret["header"][h["k"]] = h["v"] }
if ini
ret["initial"] = ini
elsif ret["header"]["手合割"]
preset = preset2str(ret["header"]["手合割"])
ret["initial"] = { "preset" => preset } if preset != "OTHER"
end
if ret["initial"] && ret["initial"]["data"]
if ret["header"]["手番"]
ret["initial"]["data"]["color"] = "下先".index(ret["header"]["手番"]) >= 0 ? 0 : 1
ret["header"].delete("手番")
else
ret["initial"]["data"]["color"] = 0
end
ret["initial"]["data"]["hands"] = [
make_hand(ret["header"]["先手の持駒"] || ret["header"]["下手の持駒"]),
make_hand(ret["header"]["後手の持駒"] || ret["header"]["上手の持駒"])
]
%w(先手の持駒 下手の持駒 後手の持駒 上手の持駒).each do |key|
ret["header"].delete(key)
end
end
fork_stack = [{ "te" => 0, "moves" => moves }]
forks.each do |f|
now_fork = f
_fork = fork_stack.pop
_fork = fork_stack.pop while _fork["te"] > now_fork["te"]
move = _fork["moves"][now_fork["te"] - _fork["te"]]
move["forks"] ||= []
move["forks"] << now_fork["moves"]
fork_stack << _fork
fork_stack << now_fork
end
ret
end
def transform_initialboard(lines)
board = []
9.times do |i|
line = []
9.times do |j|
line << lines[j][8 - i]
end
board << line
end
{ "preset" => "OTHER", "data" => { "board" => board } }
end
def transform_fugou(pl, pi, sou, dou, pro, da)
ret = { "piece" => pi }
if pl["same"]
ret["same"] = true
else
ret["to"] = pl
end
ret["promote"] = (pro == "成") if pro
if da
ret["relative"] = "H"
else
rel = soutai2relative(sou) + dousa2relative(dou)
ret["relative"] = rel unless rel.empty?
end
ret
end
def zen2n(s)
"0123456789".index(s)
end
def kan2n(s)
"〇一二三四五六七八九".index(s)
end
def kan2n2(s)
case s.length
when 1
"〇一二三四五六七八九十".index(s)
when 2
"〇一二三四五六七八九十".index(s[1]) + 10
else
raise "21以上の数値に対応していません"
end
end
def kind2csa(kind)
if kind[0] == "成"
{
"香" => "NY",
"桂" => "NK",
"銀" => "NG"
}[kind[1]]
else
{
"歩" => "FU",
"香" => "KY",
"桂" => "KE",
"銀" => "GI",
"金" => "KI",
"角" => "KA",
"飛" => "HI",
"玉" => "OU",
"王" => "OU",
"と" => "TO",
"杏" => "NY",
"圭" => "NK",
"全" => "NG",
"馬" => "UM",
"竜" => "RY",
"龍" => "RY"
}[kind]
end
end
def soutai2relative(str)
{
"左" => "L",
"直" => "C",
"右" => "R"
}[str] || ""
end
def dousa2relative(str)
{
"上" => "U",
"寄" => "M",
"引" => "D"
}[str] || ""
end
def preset2str(preset)
{
"平手" => "HIRATE",
"香落ち" => "KY",
"右香落ち" => "KY_R",
"角落ち" => "KA",
"飛車落ち" => "HI",
"飛香落ち" => "HIKY",
"二枚落ち" => "2",
"三枚落ち" => "3",
"四枚落ち" => "4",
"五枚落ち" => "5",
"左五枚落ち" => "5_L",
"六枚落ち" => "6",
"八枚落ち" => "8",
"十枚落ち" => "10",
"その他" => "OTHER"
}[preset.gsub(/\s/, "")]
end
def make_hand(str)
ret = { "FU" => 0, "KY" => 0, "KE" => 0, "GI" => 0, "KI" => 0, "KA" => 0, "HI" => 0 }
return ret if str.empty?
str.gsub(/ $/, "").split(" ").each do |kind|
next if kind.empty?
ret[kind2csa(kind[0])] = kind.length == 1 ? 1 : kan2n2(kind[1..-1])
end
ret
end
def eos?
@input[@current_pos].nil?
end
end
end
add transform_root_forks
# coding: utf-8
module Jkf::Parser
class Ki2 < Base
def parse_root
s0 = @current_pos
s1 = []
s2 = parse_header
while s2 != :failed
s1 << s2
s2 = parse_header
end
if s1 != :failed
s2 = parse_initialboard
s2 = nil if s2 == :failed
s3 = []
s4 = parse_header
while s4 != :failed
s3 << s4
s4 = parse_header
end
s4 = parse_moves
if s4 != :failed
s5 = []
s6 = parse_fork
while s6 != :failed
s5 << s6
s6 = parse_fork
end
@reported_pos = s0
s0 = transform_root(s1, s2, s3, s4, s5)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_header
s0 = @current_pos
s2 = match_regexp(/^[^*:\r\n]/)
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = match_regexp(/^[^:\r\n]/)
end
else
s1 = :failed
end
if s1 != :failed
if match_str(":") != :failed
s3 = parse_nonls
s5 = parse_nl
if s5 != :failed
s4 = []
while s5 != :failed
s4 << s5
s5 = parse_nl
end
else
s4 = :failed
end
if s4 != :failed
@reported_pos = s0
s0 = { "k" => s1.join, "v" => s3.join }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0 = parse_header_teban if s0 == :failed
s0
end
def parse_header_teban
s0 = @current_pos
s1 = match_regexp(/^[先後上下]/)
if s1 != :failed
s2 = match_str("手番")
if s2 != :failed
s3 = parse_nl
if s3 != :failed
@reported_pos = s0
{ "k" => "手番", "v" => s1 }
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_initialboard
s0 = s1 = @current_pos
if match_space != :failed
parse_nonls
s2 = parse_nl
@current_pos = s1 if s2 == :failed
else
@current_pos = s1
end
s2 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s2 if parse_nl == :failed
else
@current_pos = s2
end
s4 = parse_ikkatsuline
if s4 != :failed
s3 = []
while s4 != :failed
s3 << s4
s4 = parse_ikkatsuline
end
else
s3 = :failed
end
if s3 != :failed
s4 = @current_pos
if match_str("+") != :failed
parse_nonls
@current_pos = s4 if parse_nl == :failed
else
@current_pos = s4
end
@reported_pos = s0
transform_initialboard(s3)
else
@current_pos = s0
:failed
end
end
def parse_ikkatsuline
s0 = @current_pos
if match_str("|") != :failed
s3 = parse_masu
if s3 != :failed
s2 = []
while s3 != :failed
s2 << s3
s3 = parse_masu
end
else
s2 = :failed
end
if s2 != :failed
if match_str("|") != :failed
s4 = parse_nonls!
if s4 != :failed
if parse_nl != :failed
@reported_pos = s0
s0 = s2
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_masu
s0 = @current_pos
s1 = parse_teban
if s1 != :failed
s2 = parse_piece
if s2 != :failed
@reported_pos = s0
s0 = { "color" => s1, "kind" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
s1 = match_str(" ・")
if s1 != :failed
@reported_pos = s0
s1 = {}
end
s0 = s1
end
s0
end
def parse_teban
s0 = @current_pos
s1 = match_space
s1 = match_str("+") if s1 == :failed
s1 = match_str("^") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 0
end
s0 = s1
if s0 == :failed
s0 = @current_pos
s1 = match_str("v")
s1 = match_str("V") if s1 == :failed
if s1 != :failed
@reported_pos = s0
s1 = 1
end
s0 = s1
end
s0
end
def parse_moves
s0 = @current_pos
s1 = parse_firstboard
if s1 != :failed
s2 = []
s3 = parse_move
while s3 != :failed
s2 << s3
s3 = parse_move
end
s3 = parse_result
s3 = nil if s3 == :failed
@reported_pos = s0
s0 = -> (hd, tl, res) do
tl.unshift(hd)
tl << { "special" => res } if res && !tl[tl.length - 1]["special"]
tl
end.call(s1, s2, s3)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_firstboard
s0 = @current_pos
s1 = []
s2 = parse_comment
while s2 != :failed
s1 << s2
s2 = parse_comment
end
parse_pointer
@reported_pos = s0
s0 = s1.empty? ? {} : { "comments" => s1 }
s0
end
def parse_move
s0 = @current_pos
s1 = parse_line
if s1 != :failed
s2 = []
s3 = parse_comment
while s3 != :failed
s2 << s3
s3 = parse_comment
end
parse_pointer
s4 = []
s5 = parse_nl
s5 = match_space if s5 == :failed
while s5 != :failed
s4 << s5
s5 = parse_nl
s5 = match_space if s5 == :failed
end
@reported_pos = s0
s0 = -> (line, c) do
ret = { "move" => line }
ret["comments"] = c if !c.empty?
ret
end.call(s1, s2)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_pointer
s0 = @current_pos
s1 = match_str("&")
if s1 != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_line
s0 = @current_pos
s1 = match_regexp(/^[▲△]/)
if s1 != :failed
s1 = if s1 == "▲"
{ "color" => 0 }
else
{ "color" => 1 }
end
s2 = parse_fugou
if s2 != :failed
s3 = []
s4 = parse_nl
s4 = match_space if s4 == :failed
while s4 != :failed
s3 << s4
s4 = parse_nl
s4 = match_space if s4 == :failed
end
@reported_pos = s0
s0 = s2.merge(s1)
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_fugou
s0 = @current_pos
s1 = parse_place
if s1 != :failed
s2 = parse_piece
if s2 != :failed
s3 = parse_soutai
s3 = nil if s3 == :failed
s4 = parse_dousa
s4 = nil if s4 == :failed
s5 = match_str("成")
s5 = match_str("不成") if s5 == :failed
s5 = nil if s5 == :failed
s6 = match_str("打")
s6 = nil if s6 == :failed
@reported_pos = s0
transform_fugou(s1, s2, s3, s4, s5, s6)
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_place
s0 = @current_pos
s1 = parse_num
if s1 != :failed
s2 = parse_numkan
if s2 != :failed
@reported_pos = s0
s0 = { "x" => s1, "y" => s2 }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
if s0 == :failed
s0 = @current_pos
if match_regexp("同") != :failed
match_str(" ")
@reported_pos = s0
s0 = { "same" => true }
else
@current_pos = s0
s0 = :failed
end
end
s0
end
def parse_piece
s0 = @current_pos
s1 = match_regexp("成")
s1 = "" if s1 == :failed
s2 = match_regexp(/^[歩香桂銀金角飛王玉と杏圭全馬竜龍]/)
if s2 != :failed
@reported_pos = s0
s0 = kind2csa(s1 + s2)
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_soutai
match_regexp(/^[左直右]/)
end
def parse_dousa
match_regexp(/^[上寄引]/)
end
def parse_num
s0 = @current_pos
s1 = match_regexp(/^[123456789]/)
if s1 != :failed
@reported_pos = s0
s1 = zen2n(s1)
end
s0 = s1
s0
end
def parse_numkan
s0 = @current_pos
s1 = match_regexp(/^[一二三四五六七八九]/)
if s1 != :failed
@reported_pos = s0
s1 = kan2n(s1)
end
s0 = s1
s0
end
def parse_comment
s0 = @current_pos
if match_str("*") != :failed
s2 = parse_nonls
s3 = parse_nl
if s3 != :failed
@reported_pos = s0
s0 = s2.join
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_result
s0 = @current_pos
if match_str("まで") != :failed
s2 = match_digits!
if s2 != :failed
if match_str("手") != :failed
s4 = @current_pos
if match_str("で") != :failed
if parse_turn != :failed
if match_str("手の") != :failed
s8 = parse_result_toryo
s8 = parse_result_illegal if s8 == :failed
s4 = if s8 != :failed
@reported_pos = s4
s8
else
@current_pos = s4
:failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
else
@current_pos = s4
s4 = :failed
end
if s4 == :failed
s4 = parse_result_timeup
if s4 == :failed
s4 = parse_result_chudan
if s4 == :failed
s4 = parse_result_jishogi
if s4 == :failed
s4 = parse_result_sennichite
if s4 == :failed
s4 = parse_result_tsumi
s4 = parse_result_fuzumi if s4 == :failed
end
end
end
end
end
if s4 != :failed
if parse_nl != :failed || eos?
@reported_pos = s0
s4
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_toryo
s0 = @current_pos
s1 = match_str("勝ち")
if s1 != :failed
@reported_pos = s0
"TORYO"
else
@current_pos = s0
:failed
end
end
def parse_result_illegal
s0 = @current_pos
if match_str("反則") != :failed
s10 = @current_pos
s11 = match_str("勝ち")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_ACTION"
end
s10 = s11
if s10 == :failed
s10 = @current_pos
s11 = match_str("負け")
if s11 != :failed
@reported_pos = s10
s11 = "ILLEGAL_MOVE"
end
s10 = s11
end
if s10 != :failed
@reported_pos = s0
s10
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_timeup
s0 = @current_pos
if match_str("で時間切れにより") != :failed
if parse_turn != :failed
if match_str("手の勝ち") != :failed
@reported_pos = s0
"TIME_UP"
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
else
@current_pos = s0
:failed
end
end
def parse_result_chudan
s0 = @current_pos
s1 = match_str("で中断")
if s1 != :failed
@reported_pos = s0
"CHUDAN"
else
@current_pos = s0
:failed
end
end
def parse_result_jishogi
s0 = @current_pos
s1 = match_str("で持将棋")
if s1 != :failed
@reported_pos = s0
"JISHOGI"
else
@current_pos = s0
:failed
end
end
def parse_result_sennichite
s0 = @current_pos
s1 = match_str("で千日手")
if s1 != :failed
@reported_pos = s0
"SENNICHITE"
else
@current_pos = s0
:failed
end
end
def parse_result_tsumi
s0 = @current_pos
match_str("で")
if match_str("詰") != :failed
match_str("み")
@reported_pos = s0
"TSUMI"
else
@current_pos = s0
:failed
end
end
def parse_result_fuzumi
s0 = @current_pos
s1 = match_str("で不詰")
if s1 != :failed
@reported_pos = s0
"FUZUMI"
else
@current_pos = s0
:failed
end
end
def parse_fork
s0 = @current_pos
if match_str("変化:") != :failed
match_spaces
s3 = match_digits!
if s3 != :failed
if match_str("手") != :failed
if parse_nl != :failed
s6 = parse_moves
if s6 != :failed
@reported_pos = s0
s0 = { "te" => s3.join.to_i, "moves" => s6[1..-1] }
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_turn
match_regexp(/^[先後上下]/)
end
def parse_nl
s0 = @current_pos
s2 = parse_newline
if s2 != :failed
s1 = []
while s2 != :failed
s1 << s2
s2 = parse_newline
end
else
s1 = :failed
end
if s1 != :failed
s2 = []
s3 = parse_skipline
while s3 != :failed
s2 << s3
s3 = parse_skipline
end
s0 = [s1, s2]
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_skipline
s0 = @current_pos
s1 = match_str("#")
if s1 != :failed
s2 = parse_nonls
s3 = parse_newline
if s3 != :failed
s0 = [s1, s2, s3]
else
@current_pos = s0
s0 = :failed
end
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_whitespace
s0 = match_space
s0 = match_str("\t") if s0 == :failed
s0
end
def parse_newline
s0 = @current_pos
s1 = []
s2 = parse_whitespace
while s2 != :failed
s1 << s2
s2 = parse_whitespace
end
s2 = match_str("\n")
if s2 == :failed
s2 = @current_pos
s3 = match_str("\r")
if s3 != :failed
s4 = match_str("\n")
s4 = nil if s4 == :failed
s2 = [s3, s4]
else
@current_pos = s2
s2 = :failed
end
end
if s2 != :failed
s0 = [s1, s2]
else
@current_pos = s0
s0 = :failed
end
s0
end
def parse_nonl
match_regexp(/^[^\r\n]/)
end
def parse_nonls
stack = []
matched = parse_nonl
while matched != :failed
stack << matched
matched = parse_nonl
end
stack
end
def parse_nonls!
matched = parse_nonls
if matched.empty?
:failed
else
matched
end
end
protected
def transform_root(headers, ini, headers2, moves, forks)
ret = { "header" => {}, "moves" => moves }
headers.compact.each { |h| ret["header"][h["k"]] = h["v"] }
headers2.compact.each { |h| ret["header"][h["k"]] = h["v"] }
if ini
ret["initial"] = ini
elsif ret["header"]["手合割"]
preset = preset2str(ret["header"]["手合割"])
ret["initial"] = { "preset" => preset } if preset != "OTHER"
end
if ret["initial"] && ret["initial"]["data"]
if ret["header"]["手番"]
ret["initial"]["data"]["color"] = "下先".index(ret["header"]["手番"]) >= 0 ? 0 : 1
ret["header"].delete("手番")
else
ret["initial"]["data"]["color"] = 0
end
ret["initial"]["data"]["hands"] = [
make_hand(ret["header"]["先手の持駒"] || ret["header"]["下手の持駒"]),
make_hand(ret["header"]["後手の持駒"] || ret["header"]["上手の持駒"])
]
%w(先手の持駒 下手の持駒 後手の持駒 上手の持駒).each do |key|
ret["header"].delete(key)
end
end
transform_root_forks(forks, moves)
ret
end
def transform_root_forks(forks, moves)
fork_stack = [{ "te" => 0, "moves" => moves }]
forks.each do |f|
now_fork = f
_fork = fork_stack.pop
_fork = fork_stack.pop while _fork["te"] > now_fork["te"]
move = _fork["moves"][now_fork["te"] - _fork["te"]]
move["forks"] ||= []
move["forks"] << now_fork["moves"]
fork_stack << _fork
fork_stack << now_fork
end
end
def transform_initialboard(lines)
board = []
9.times do |i|
line = []
9.times do |j|
line << lines[j][8 - i]
end
board << line
end
{ "preset" => "OTHER", "data" => { "board" => board } }
end
def transform_fugou(pl, pi, sou, dou, pro, da)
ret = { "piece" => pi }
if pl["same"]
ret["same"] = true
else
ret["to"] = pl
end
ret["promote"] = (pro == "成") if pro
if da
ret["relative"] = "H"
else
rel = soutai2relative(sou) + dousa2relative(dou)
ret["relative"] = rel unless rel.empty?
end
ret
end
def zen2n(s)
"0123456789".index(s)
end
def kan2n(s)
"〇一二三四五六七八九".index(s)
end
def kan2n2(s)
case s.length
when 1
"〇一二三四五六七八九十".index(s)
when 2
"〇一二三四五六七八九十".index(s[1]) + 10
else
raise "21以上の数値に対応していません"
end
end
def kind2csa(kind)
if kind[0] == "成"
{
"香" => "NY",
"桂" => "NK",
"銀" => "NG"
}[kind[1]]
else
{
"歩" => "FU",
"香" => "KY",
"桂" => "KE",
"銀" => "GI",
"金" => "KI",
"角" => "KA",
"飛" => "HI",
"玉" => "OU",
"王" => "OU",
"と" => "TO",
"杏" => "NY",
"圭" => "NK",
"全" => "NG",
"馬" => "UM",
"竜" => "RY",
"龍" => "RY"
}[kind]
end
end
def soutai2relative(str)
{
"左" => "L",
"直" => "C",
"右" => "R"
}[str] || ""
end
def dousa2relative(str)
{
"上" => "U",
"寄" => "M",
"引" => "D"
}[str] || ""
end
def preset2str(preset)
{
"平手" => "HIRATE",
"香落ち" => "KY",
"右香落ち" => "KY_R",
"角落ち" => "KA",
"飛車落ち" => "HI",
"飛香落ち" => "HIKY",
"二枚落ち" => "2",
"三枚落ち" => "3",
"四枚落ち" => "4",
"五枚落ち" => "5",
"左五枚落ち" => "5_L",
"六枚落ち" => "6",
"八枚落ち" => "8",
"十枚落ち" => "10",
"その他" => "OTHER"
}[preset.gsub(/\s/, "")]
end
def make_hand(str)
ret = { "FU" => 0, "KY" => 0, "KE" => 0, "GI" => 0, "KI" => 0, "KA" => 0, "HI" => 0 }
return ret if str.empty?
str.gsub(/ $/, "").split(" ").each do |kind|
next if kind.empty?
ret[kind2csa(kind[0])] = kind.length == 1 ? 1 : kan2n2(kind[1..-1])
end
ret
end
def eos?
@input[@current_pos].nil?
end
end
end
|
require 'json_structure/type'
require 'json_structure/object'
require 'json_structure/array'
require 'json_structure/number'
require 'json_structure/string'
require 'json_structure/null'
require 'json_structure/any_of'
require 'json_structure/value'
module JsonStructure
def self.build(&block)
Builder.new.instance_eval(&block)
end
class Builder
[
Type,
Object_,
Array,
Number,
Integer,
Float,
String,
Null,
AnyOf,
].each do |klass|
method = klass.name
.gsub(/([a-z])([A-Z])/, '\1_\2')
.gsub(/_+$/, '')
.downcase
define_method(method) do |*args|
klass.new(*args)
end
end
end
end
Fix broken code
require 'json_structure/type'
require 'json_structure/object'
require 'json_structure/array'
require 'json_structure/number'
require 'json_structure/string'
require 'json_structure/null'
require 'json_structure/any_of'
require 'json_structure/value'
module JsonStructure
def self.build(&block)
Builder.new.instance_eval(&block)
end
class Builder
%w[
Type
Object_
Array
Number
Integer
Float
String
Null
AnyOf
].each do |name|
klass = JsonStructure.const_get(name)
method = name
.gsub(/([a-z])([A-Z])/, '\1_\2')
.gsub(/_+$/, '')
.downcase
define_method(method) do |*args|
klass.new(*args)
end
end
end
end
|
module JumpStart
class Base
def initialize(input, output, args)
@input = input
@output = output
@project_name = args.shift
if args[0] != nil
@template_name = args.shift
elsif DEFAULT_TEMPLATE_NAME != nil
@template_name = DEFAULT_TEMPLATE_NAME
end
@existing_projects = []
end
def start
@output.puts
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "JumpStarting...."
@output.puts
@output.puts
lookup_existing_projects
check_project_name
check_template_name
load_config_options
check_install_paths
create_project
run_after_install
run_template_scripts
parse_template_dir
create_new_folders
create_new_files_from_whole_templates
populate_files_from_append_templates
populate_files_from_line_templates
run_after_jumpstart
end
private
def lookup_existing_projects
project_dirs = Dir.entries(JUMPSTART_TEMPLATES_PATH) -IGNORE_DIRS
project_dirs.each do |x|
if Dir.entries("#{JUMPSTART_TEMPLATES_PATH}/#{x}").include? "jumpstart_config"
if File.exists?("#{JUMPSTART_TEMPLATES_PATH}/#{x}/jumpstart_config/#{x}.yml")
@existing_projects << x
end
end
end
end
def check_project_name
if @project_name.nil? || @project_name.empty?
@output.puts
@output.puts "Enter a name for your project."
@project_name = @input.gets.chomp
if @project_name.length < 3
@output.puts
@output.puts "The name of your project must be at least 3 characters long."
check_project_name
end
end
end
def check_template_name
if @template_name.nil? || @template_name.empty?
jumpstart_options
else
unless @existing_projects.include? @template_name
@output.puts "A JumpStart template of the name #{@template_name} doesn't exist, would you like to create it?\nyes (y) / no (n)?"
@output.puts
input = @input.gets.chomp
if input == "yes" || input == "y"
@output.puts "creating JumpStart template #{@template_name}"
# TODO Create functionality for creating templates if they do not exist
elsif input == "no" || input == "n"
exit_jumpstart
end
end
end
end
def jumpstart_options
global_options = {'c' => 'config'}
templates = {}
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "jumpstart options!"
@output.puts
@output.puts "What would you like to do?"
@output.puts "To run an existing jumpstart enter it's number or it's name."
@output.puts
count = 0
@existing_projects.each do |x|
count += 1
templates[count.to_s] = x
@output.puts "#{count}: #{x}"
end
@output.puts
@output.puts "To create a new jumpstart enter a name for it."
@output.puts
@output.puts "To view/set jumpstart configuration options type 'config' or 'c'."
input = @input.gets.chomp
global_options.each do |x,y|
if input == 'c' || input == 'config'
configure_jumpstart
end
end
projects.each do |x,y|
if x == input
@template_name = projects.fetch(x)
elsif y == input
@template_name = y
end
end
end
def configure_jumpstart
# TODO Define configure_jumpstart method
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "jumpstart configuration."
@output.puts
# This should be removed when method is finished.
exit_jumpstart
end
def load_config_options
@config_file = YAML.load_file("#{JUMPSTART_TEMPLATES_PATH}/#{@template_name}/jumpstart_config/#{@template_name}.yml")
end
def check_install_paths
@install_path = @config_file[:install_path]
@template_path = "#{JUMPSTART_TEMPLATES_PATH}/#{@template_name}"
[@install_path, @template_path].each do |x|
begin
Dir.chdir(x)
rescue
@output.puts
@output.puts "The directory #{x} could not be found, or you do not have the correct permissions to access it."
exit_jumpstart
end
end
if Dir.exists?("#{@install_path}/#{@project_name}")
@output.puts
@output.puts "The directory #{@install_path}/#{@project_name} already exists. As this is the location you have specified for creating your new project jumpstart will now exit to avoid overwriting anything."
exit_jumpstart
end
end
def create_project
@install_command = @config_file[:install_command]
@install_command_options = @config_file[:install_command_options]
Dir.chdir(@install_path)
@output.puts "Executing command: #{@install_command} #{@project_name} #{@install_command_options}"
system "#{@install_command} #{@project_name} #{@install_command_options}"
end
def run_after_install
Dir.chdir("#{@install_path}/#{@project_name}")
@config_file[:run_after_install_command].each do |x|
@output.puts "Executing command: #{x}"
system "#{x}"
end
end
def run_template_scripts
# TODO Finish scripts method
scripts = Dir.entries("#{@template_path}/jumpstart_config") - IGNORE_DIRS
end
def parse_template_dir
@dir_list = []
file_list = []
@whole_templates = []
@append_templates = []
@line_templates = []
Find.find(@template_path) do |x|
case
when File.file?(x) && x !~ /\/jumpstart_config/ then
file_list << x.sub!(@template_path, '')
when File.directory?(x) && x !~ /\/jumpstart_config/ then
@dir_list << x.sub!(@template_path, '')
end
end
file_list.each do |file|
if file =~ /_\._{1}\w*/
@append_templates << file
elsif file =~ /_(\d+)\._{1}\w*/
@line_templates << file
else
@whole_templates << file
end
end
end
def create_new_folders
Dir.chdir(@install_path)
@dir_list.each do |x|
unless Dir.exists?("#{@install_path}/#{@project_name}#{x}")
Dir.mkdir("#{@install_path}/#{@project_name}#{x}")
end
end
end
def create_new_files_from_whole_templates
@whole_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x}")
file_contents = IO.readlines("#{@template_path}#{x}")
File.open("#{@install_path}/#{@project_name}#{x}", "w") do |y|
y.puts file_contents
end
end
end
# TODO Look into a way of being able to pass the 'remove last line => true' option via the naming convention of the templates
def populate_files_from_append_templates
@append_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x.sub(/_\._{1}/, '')}")
FileUtils.append_to_end_of_file("#{@template_path}#{x}", "#{@install_path}/#{@project_name}#{x.sub(/_\._{1}/, '')}")
end
end
def populate_files_from_line_templates
@line_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x.sub(/_(\d+)\._{1}/, '')}")
FileUtils.insert_text_at_line_number("#{@template_path}#{x}", "#{@install_path}/#{@project_name}#{x.sub(/_(\d+)\._{1}/, '')}", get_line_number(x))
end
end
def run_after_jumpstart
Dir.chdir("#{@install_path}/#{@project_name}")
@config_file[:run_after_jumpstart].each do |x|
@output.puts "Executing command: #{x}"
system "#{x}"
end
end
# TODO Look at the possibility of passing an nginx_config option via template naming.
# TODO Write a method to remove files from the install that you might not want
def get_line_number(file_name)
/_(?<number>\d+)\._\w*/ =~ file_name
number.to_i
end
def exit_jumpstart
@output.puts
@output.puts
@output.puts "Exiting JumpStart..."
@output.puts "Goodbye!"
@output.puts
@output.puts "******************************************************************************************************************************************"
@output.puts
exit
end
end
end
refactored scripts method
module JumpStart
class Base
def initialize(input, output, args)
@input = input
@output = output
@project_name = args.shift
if args[0] != nil
@template_name = args.shift
elsif DEFAULT_TEMPLATE_NAME != nil
@template_name = DEFAULT_TEMPLATE_NAME
end
@existing_projects = []
end
def start
@output.puts
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "JumpStarting...."
@output.puts
@output.puts
lookup_existing_projects
check_project_name
check_template_name
load_config_options
check_install_paths
create_project
run_scripts_from_yaml(:run_after_install_command)
run_template_scripts
parse_template_dir
create_new_folders
create_new_files_from_whole_templates
populate_files_from_append_templates
populate_files_from_line_templates
run_scripts_from_yaml(:run_after_jumpstart)
end
private
def lookup_existing_projects
project_dirs = Dir.entries(JUMPSTART_TEMPLATES_PATH) -IGNORE_DIRS
project_dirs.each do |x|
if Dir.entries("#{JUMPSTART_TEMPLATES_PATH}/#{x}").include? "jumpstart_config"
if File.exists?("#{JUMPSTART_TEMPLATES_PATH}/#{x}/jumpstart_config/#{x}.yml")
@existing_projects << x
end
end
end
end
def check_project_name
if @project_name.nil? || @project_name.empty?
@output.puts
@output.puts "Enter a name for your project."
@project_name = @input.gets.chomp
if @project_name.length < 3
@output.puts
@output.puts "The name of your project must be at least 3 characters long."
check_project_name
end
end
end
def check_template_name
if @template_name.nil? || @template_name.empty?
jumpstart_options
else
unless @existing_projects.include? @template_name
@output.puts "A JumpStart template of the name #{@template_name} doesn't exist, would you like to create it?\nyes (y) / no (n)?"
@output.puts
input = @input.gets.chomp
if input == "yes" || input == "y"
@output.puts "creating JumpStart template #{@template_name}"
# TODO Create functionality for creating templates if they do not exist
elsif input == "no" || input == "n"
exit_jumpstart
end
end
end
end
def jumpstart_options
global_options = {'c' => 'config'}
templates = {}
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "jumpstart options!"
@output.puts
@output.puts "What would you like to do?"
@output.puts "To run an existing jumpstart enter it's number or it's name."
@output.puts
count = 0
@existing_projects.each do |x|
count += 1
templates[count.to_s] = x
@output.puts "#{count}: #{x}"
end
@output.puts
@output.puts "To create a new jumpstart enter a name for it."
@output.puts
@output.puts "To view/set jumpstart configuration options type 'config' or 'c'."
input = @input.gets.chomp
global_options.each do |x,y|
if input == 'c' || input == 'config'
configure_jumpstart
end
end
projects.each do |x,y|
if x == input
@template_name = projects.fetch(x)
elsif y == input
@template_name = y
end
end
end
def configure_jumpstart
# TODO Define configure_jumpstart method
@output.puts "******************************************************************************************************************************************"
@output.puts
@output.puts "jumpstart configuration."
@output.puts
# This should be removed when method is finished.
exit_jumpstart
end
def load_config_options
@config_file = YAML.load_file("#{JUMPSTART_TEMPLATES_PATH}/#{@template_name}/jumpstart_config/#{@template_name}.yml")
end
def check_install_paths
@install_path = @config_file[:install_path]
@template_path = "#{JUMPSTART_TEMPLATES_PATH}/#{@template_name}"
[@install_path, @template_path].each do |x|
begin
Dir.chdir(x)
rescue
@output.puts
@output.puts "The directory #{x} could not be found, or you do not have the correct permissions to access it."
exit_jumpstart
end
end
if Dir.exists?("#{@install_path}/#{@project_name}")
@output.puts
@output.puts "The directory #{@install_path}/#{@project_name} already exists. As this is the location you have specified for creating your new project jumpstart will now exit to avoid overwriting anything."
exit_jumpstart
end
end
def create_project
@install_command = @config_file[:install_command]
@install_command_options = @config_file[:install_command_options]
Dir.chdir(@install_path)
@output.puts "Executing command: #{@install_command} #{@project_name} #{@install_command_options}"
system "#{@install_command} #{@project_name} #{@install_command_options}"
end
def run_template_scripts
# TODO Finish scripts method
scripts = Dir.entries("#{@template_path}/jumpstart_config") - IGNORE_DIRS
end
def parse_template_dir
@dir_list = []
file_list = []
@whole_templates = []
@append_templates = []
@line_templates = []
Find.find(@template_path) do |x|
case
when File.file?(x) && x !~ /\/jumpstart_config/ then
file_list << x.sub!(@template_path, '')
when File.directory?(x) && x !~ /\/jumpstart_config/ then
@dir_list << x.sub!(@template_path, '')
end
end
file_list.each do |file|
if file =~ /_\._{1}\w*/
@append_templates << file
elsif file =~ /_(\d+)\._{1}\w*/
@line_templates << file
else
@whole_templates << file
end
end
end
def create_new_folders
Dir.chdir(@install_path)
@dir_list.each do |x|
unless Dir.exists?("#{@install_path}/#{@project_name}#{x}")
Dir.mkdir("#{@install_path}/#{@project_name}#{x}")
end
end
end
def create_new_files_from_whole_templates
@whole_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x}")
file_contents = IO.readlines("#{@template_path}#{x}")
File.open("#{@install_path}/#{@project_name}#{x}", "w") do |y|
y.puts file_contents
end
end
end
# TODO Look into a way of being able to pass the 'remove last line => true' option via the naming convention of the templates
def populate_files_from_append_templates
@append_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x.sub(/_\._{1}/, '')}")
FileUtils.append_to_end_of_file("#{@template_path}#{x}", "#{@install_path}/#{@project_name}#{x.sub(/_\._{1}/, '')}")
end
end
def populate_files_from_line_templates
@line_templates.each do |x|
FileUtils.touch("#{@install_path}/#{@project_name}#{x.sub(/_(\d+)\._{1}/, '')}")
FileUtils.insert_text_at_line_number("#{@template_path}#{x}", "#{@install_path}/#{@project_name}#{x.sub(/_(\d+)\._{1}/, '')}", get_line_number(x))
end
end
def run_scripts_from_yaml(script_name)
Dir.chdir("#{@install_path}/#{@project_name}")
@config_file[script_name].each do |x|
@output.puts "Executing command: #{x}"
system "#{x}"
end
end
# TODO Look at the possibility of passing an nginx_config option via template naming.
# TODO Write a method to remove files from the install that you might not want
def get_line_number(file_name)
/_(?<number>\d+)\._\w*/ =~ file_name
number.to_i
end
def exit_jumpstart
@output.puts
@output.puts
@output.puts "Exiting JumpStart..."
@output.puts "Goodbye!"
@output.puts
@output.puts "******************************************************************************************************************************************"
@output.puts
exit
end
end
end |
# frozen_string_literal: true
require "kafka/consumer_group"
require "kafka/offset_manager"
require "kafka/fetcher"
require "kafka/pause"
module Kafka
# A client that consumes messages from a Kafka cluster in coordination with
# other clients.
#
# A Consumer subscribes to one or more Kafka topics; all consumers with the
# same *group id* then agree on who should read from the individual topic
# partitions. When group members join or leave, the group synchronizes,
# making sure that all partitions are assigned to a single member, and that
# all members have some partitions to read from.
#
# ## Example
#
# A simple producer that simply writes the messages it consumes to the
# console.
#
# require "kafka"
#
# kafka = Kafka.new(["kafka1:9092", "kafka2:9092"])
#
# # Create a new Consumer instance in the group `my-group`:
# consumer = kafka.consumer(group_id: "my-group")
#
# # Subscribe to a Kafka topic:
# consumer.subscribe("messages")
#
# # Loop forever, reading in messages from all topics that have been
# # subscribed to.
# consumer.each_message do |message|
# puts message.topic
# puts message.partition
# puts message.key
# puts message.headers
# puts message.value
# puts message.offset
# end
#
class Consumer
def initialize(cluster:, logger:, instrumenter:, group:, fetcher:, offset_manager:, session_timeout:, heartbeat:, refresh_topic_interval: 0)
@cluster = cluster
@logger = TaggedLogger.new(logger)
@instrumenter = instrumenter
@group = group
@offset_manager = offset_manager
@session_timeout = session_timeout
@fetcher = fetcher
@heartbeat = heartbeat
@refresh_topic_interval = refresh_topic_interval
@pauses = Hash.new {|h, k|
h[k] = Hash.new {|h2, k2|
h2[k2] = Pause.new
}
}
# Whether or not the consumer is currently consuming messages.
@running = false
# Hash containing offsets for each topic and partition that has the
# automatically_mark_as_processed feature disabled. Offset manager is only active
# when everything is suppose to happen automatically. Otherwise we need to keep track of the
# offset manually in memory for all the time
# The key structure for this equals an array with topic and partition [topic, partition]
# The value is equal to the offset of the last message we've received
# @note It won't be updated in case user marks message as processed, because for the case
# when user commits message other than last in a batch, this would make ruby-kafka refetch
# some already consumed messages
@current_offsets = Hash.new { |h, k| h[k] = {} }
# Map storing subscribed topics with their configuration
@subscribed_topics = Concurrent::Map.new
# Set storing topics that matched topics in @subscribed_topics
@matched_topics = Set.new
# Whether join_group must be executed again because new topics are added
@join_group_for_new_topics = false
end
# Subscribes the consumer to a topic.
#
# Typically you either want to start reading messages from the very
# beginning of the topic's partitions or you simply want to wait for new
# messages to be written. In the former case, set `start_from_beginning`
# to true (the default); in the latter, set it to false.
#
# @param topic_or_regex [String, Regexp] subscribe to single topic with a string
# or multiple topics matching a regex.
# @param default_offset [Symbol] whether to start from the beginning or the
# end of the topic's partitions. Deprecated.
# @param start_from_beginning [Boolean] whether to start from the beginning
# of the topic or just subscribe to new messages being produced. This
# only applies when first consuming a topic partition – once the consumer
# has checkpointed its progress, it will always resume from the last
# checkpoint.
# @param max_bytes_per_partition [Integer] the maximum amount of data fetched
# from a single partition at a time.
# @return [nil]
def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
default_offset ||= start_from_beginning ? :earliest : :latest
@subscribed_topics[topic_or_regex] = {
default_offset: default_offset,
start_from_beginning: start_from_beginning,
max_bytes_per_partition: max_bytes_per_partition
}
scan_for_subscribing
nil
end
# Stop the consumer.
#
# The consumer will finish any in-progress work and shut down.
#
# @return [nil]
def stop
@running = false
@fetcher.stop
@cluster.disconnect
end
# Pause processing of a specific topic partition.
#
# When a specific message causes the processor code to fail, it can be a good
# idea to simply pause the partition until the error can be resolved, allowing
# the rest of the partitions to continue being processed.
#
# If the `timeout` argument is passed, the partition will automatically be
# resumed when the timeout expires. If `exponential_backoff` is enabled, each
# subsequent pause will cause the timeout to double until a message from the
# partition has been successfully processed.
#
# @param topic [String]
# @param partition [Integer]
# @param timeout [nil, Integer] the number of seconds to pause the partition for,
# or `nil` if the partition should not be automatically resumed.
# @param max_timeout [nil, Integer] the maximum number of seconds to pause for,
# or `nil` if no maximum should be enforced.
# @param exponential_backoff [Boolean] whether to enable exponential backoff.
# @return [nil]
def pause(topic, partition, timeout: nil, max_timeout: nil, exponential_backoff: false)
if max_timeout && !exponential_backoff
raise ArgumentError, "`max_timeout` only makes sense when `exponential_backoff` is enabled"
end
pause_for(topic, partition).pause!(
timeout: timeout,
max_timeout: max_timeout,
exponential_backoff: exponential_backoff,
)
end
# Resume processing of a topic partition.
#
# @see #pause
# @param topic [String]
# @param partition [Integer]
# @return [nil]
def resume(topic, partition)
pause_for(topic, partition).resume!
# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.
seek_to_next(topic, partition) if @group.assigned_to?(topic, partition)
end
# Whether the topic partition is currently paused.
#
# @see #pause
# @param topic [String]
# @param partition [Integer]
# @return [Boolean] true if the partition is paused, false otherwise.
def paused?(topic, partition)
pause = pause_for(topic, partition)
pause.paused? && !pause.expired?
end
# Fetches and enumerates the messages in the topics that the consumer group
# subscribes to.
#
# Each message is yielded to the provided block. If the block returns
# without raising an exception, the message will be considered successfully
# processed. At regular intervals the offset of the most recent successfully
# processed message in each partition will be committed to the Kafka
# offset store. If the consumer crashes or leaves the group, the group member
# that is tasked with taking over processing of these partitions will resume
# at the last committed offsets.
#
# @param min_bytes [Integer] the minimum number of bytes to read before
# returning messages from each broker; if `max_wait_time` is reached, this
# is ignored.
# @param max_bytes [Integer] the maximum number of bytes to read before
# returning messages from each broker.
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
# returning messages from each broker, in seconds.
# @param automatically_mark_as_processed [Boolean] whether to automatically
# mark a message as successfully processed when the block returns
# without an exception. Once marked successful, the offsets of processed
# messages can be committed to Kafka.
# @yieldparam message [Kafka::FetchedMessage] a message fetched from Kafka.
# @raise [Kafka::ProcessingError] if there was an error processing a message.
# The original exception will be returned by calling `#cause` on the
# {Kafka::ProcessingError} instance.
# @return [nil]
def each_message(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
@fetcher.configure(
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
consumer_loop do
refresh_topic_list_if_enabled
batches = fetch_batches
batches.each do |batch|
batch.messages.each do |message|
notification = {
topic: message.topic,
partition: message.partition,
offset: message.offset,
offset_lag: batch.highwater_mark_offset - message.offset - 1,
create_time: message.create_time,
key: message.key,
value: message.value,
headers: message.headers
}
# Instrument an event immediately so that subscribers don't have to wait until
# the block is completed.
@instrumenter.instrument("start_process_message.consumer", notification)
@instrumenter.instrument("process_message.consumer", notification) do
begin
yield message unless message.is_control_record
@current_offsets[message.topic][message.partition] = message.offset
rescue => e
location = "#{message.topic}/#{message.partition} at offset #{message.offset}"
backtrace = e.backtrace.join("\n")
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
raise ProcessingError.new(message.topic, message.partition, message.offset)
end
end
mark_message_as_processed(message) if automatically_mark_as_processed
@offset_manager.commit_offsets_if_necessary
trigger_heartbeat
return if shutting_down?
end
# We've successfully processed a batch from the partition, so we can clear
# the pause.
pause_for(batch.topic, batch.partition).reset!
end
# We may not have received any messages, but it's still a good idea to
# commit offsets if we've processed messages in the last set of batches.
# This also ensures the offsets are retained if we haven't read any messages
# since the offset retention period has elapsed.
@offset_manager.commit_offsets_if_necessary
end
end
# Fetches and enumerates the messages in the topics that the consumer group
# subscribes to.
#
# Each batch of messages is yielded to the provided block. If the block returns
# without raising an exception, the batch will be considered successfully
# processed. At regular intervals the offset of the most recent successfully
# processed message batch in each partition will be committed to the Kafka
# offset store. If the consumer crashes or leaves the group, the group member
# that is tasked with taking over processing of these partitions will resume
# at the last committed offsets.
#
# @param min_bytes [Integer] the minimum number of bytes to read before
# returning messages from each broker; if `max_wait_time` is reached, this
# is ignored.
# @param max_bytes [Integer] the maximum number of bytes to read before
# returning messages from each broker.
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
# returning messages from each broker, in seconds.
# @param automatically_mark_as_processed [Boolean] whether to automatically
# mark a batch's messages as successfully processed when the block returns
# without an exception. Once marked successful, the offsets of processed
# messages can be committed to Kafka.
# @yieldparam batch [Kafka::FetchedBatch] a message batch fetched from Kafka.
# @raise [Kafka::ProcessingError] if there was an error processing a batch.
# The original exception will be returned by calling `#cause` on the
# {Kafka::ProcessingError} instance.
# @return [nil]
def each_batch(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
@fetcher.configure(
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
consumer_loop do
refresh_topic_list_if_enabled
batches = fetch_batches
batches.each do |batch|
unless batch.empty?
raw_messages = batch.messages
batch.messages = raw_messages.reject(&:is_control_record)
notification = {
topic: batch.topic,
partition: batch.partition,
last_offset: batch.last_offset,
last_create_time: batch.messages.last.try(:create_time),
offset_lag: batch.offset_lag,
highwater_mark_offset: batch.highwater_mark_offset,
message_count: batch.messages.count,
}
# Instrument an event immediately so that subscribers don't have to wait until
# the block is completed.
@instrumenter.instrument("start_process_batch.consumer", notification)
@instrumenter.instrument("process_batch.consumer", notification) do
begin
yield batch
@current_offsets[batch.topic][batch.partition] = batch.last_offset unless batch.unknown_last_offset?
rescue => e
offset_range = (batch.first_offset..batch.last_offset || batch.highwater_mark_offset)
location = "#{batch.topic}/#{batch.partition} in offset range #{offset_range}"
backtrace = e.backtrace.join("\n")
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
raise ProcessingError.new(batch.topic, batch.partition, offset_range)
ensure
batch.messages = raw_messages
end
end
mark_message_as_processed(batch.messages.last) if automatically_mark_as_processed
# We've successfully processed a batch from the partition, so we can clear
# the pause.
pause_for(batch.topic, batch.partition).reset!
end
@offset_manager.commit_offsets_if_necessary
trigger_heartbeat
return if shutting_down?
end
# We may not have received any messages, but it's still a good idea to
# commit offsets if we've processed messages in the last set of batches.
# This also ensures the offsets are retained if we haven't read any messages
# since the offset retention period has elapsed.
@offset_manager.commit_offsets_if_necessary
end
end
# Move the consumer's position in a topic partition to the specified offset.
#
# Note that this has to be done prior to calling {#each_message} or {#each_batch}
# and only has an effect if the consumer is assigned the partition. Typically,
# you will want to do this in every consumer group member in order to make sure
# that the member that's assigned the partition knows where to start.
#
# @param topic [String]
# @param partition [Integer]
# @param offset [Integer]
# @return [nil]
def seek(topic, partition, offset)
@offset_manager.seek_to(topic, partition, offset)
end
def commit_offsets
@offset_manager.commit_offsets
end
def mark_message_as_processed(message)
@offset_manager.mark_as_processed(message.topic, message.partition, message.offset)
end
def trigger_heartbeat
@heartbeat.trigger
end
def trigger_heartbeat!
@heartbeat.trigger!
end
# Aliases for the external API compatibility
alias send_heartbeat_if_necessary trigger_heartbeat
alias send_heartbeat trigger_heartbeat!
private
def consumer_loop
@running = true
@logger.push_tags(@group.to_s)
@fetcher.start
while running?
begin
@instrumenter.instrument("loop.consumer") do
yield
end
rescue HeartbeatError
make_final_offsets_commit!
join_group if running?
rescue OffsetCommitError
join_group if running?
rescue RebalanceInProgress
@logger.warn "Group rebalance in progress, re-joining..."
join_group if running?
rescue FetchError, NotLeaderForPartition, UnknownTopicOrPartition
@cluster.mark_as_stale!
rescue LeaderNotAvailable => e
@logger.error "Leader not available; waiting 1s before retrying"
@cluster.mark_as_stale!
sleep 1
rescue ConnectionError => e
@logger.error "Connection error #{e.class}: #{e.message}"
@cluster.mark_as_stale!
rescue SignalException => e
@logger.warn "Received signal #{e.message}, shutting down"
@running = false
end
end
ensure
@fetcher.stop
# In order to quickly have the consumer group re-balance itself, it's
# important that members explicitly tell Kafka when they're leaving.
make_final_offsets_commit!
@group.leave rescue nil
@running = false
@logger.pop_tags
end
def make_final_offsets_commit!(attempts = 3)
@offset_manager.commit_offsets
rescue ConnectionError, OffsetCommitError, EOFError
# It's important to make sure final offsets commit is done
# As otherwise messages that have been processed after last auto-commit
# will be processed again and that may be huge amount of messages
return if attempts.zero?
@logger.error "Retrying to make final offsets commit (#{attempts} attempts left)"
sleep(0.1)
make_final_offsets_commit!(attempts - 1)
rescue Kafka::Error => e
@logger.error "Encountered error while shutting down; #{e.class}: #{e.message}"
end
def join_group
@join_group_for_new_topics = false
old_generation_id = @group.generation_id
@group.join
if old_generation_id && @group.generation_id != old_generation_id + 1
# We've been out of the group for at least an entire generation, no
# sense in trying to hold on to offset data
clear_current_offsets
@offset_manager.clear_offsets
else
# After rejoining the group we may have been assigned a new set of
# partitions. Keeping the old offset commits around forever would risk
# having the consumer go back and reprocess messages if it's assigned
# a partition it used to be assigned to way back. For that reason, we
# only keep commits for the partitions that we're still assigned.
clear_current_offsets(excluding: @group.assigned_partitions)
@offset_manager.clear_offsets_excluding(@group.assigned_partitions)
end
@fetcher.reset
@group.assigned_partitions.each do |topic, partitions|
partitions.each do |partition|
if paused?(topic, partition)
@logger.warn "Not fetching from #{topic}/#{partition} due to pause"
else
seek_to_next(topic, partition)
end
end
end
end
def seek_to_next(topic, partition)
# When automatic marking is off, the first poll needs to be based on the last committed
# offset from Kafka, that's why we fallback in case of nil (it may not be 0)
if @current_offsets[topic].key?(partition)
offset = @current_offsets[topic][partition] + 1
else
offset = @offset_manager.next_offset_for(topic, partition)
end
@fetcher.seek(topic, partition, offset)
end
def resume_paused_partitions!
@pauses.each do |topic, partitions|
partitions.each do |partition, pause|
@instrumenter.instrument("pause_status.consumer", {
topic: topic,
partition: partition,
duration: pause.pause_duration,
})
if pause.paused? && pause.expired?
@logger.info "Automatically resuming partition #{topic}/#{partition}, pause timeout expired"
resume(topic, partition)
end
end
end
end
def refresh_topic_list_if_enabled
return if @refresh_topic_interval <= 0
return if @refreshed_at && @refreshed_at + @refresh_topic_interval > Time.now
scan_for_subscribing
@refreshed_at = Time.now
end
def fetch_batches
# Return early if the consumer has been stopped.
return [] if shutting_down?
join_group if !@group.member? || @join_group_for_new_topics
trigger_heartbeat
resume_paused_partitions!
if !@fetcher.data?
@logger.debug "No batches to process"
sleep 2
[]
else
tag, message = @fetcher.poll
case tag
when :batches
# make sure any old batches, fetched prior to the completion of a consumer group sync,
# are only processed if the batches are from brokers for which this broker is still responsible.
message.select { |batch| @group.assigned_to?(batch.topic, batch.partition) }
when :exception
raise message
end
end
rescue OffsetOutOfRange => e
@logger.error "Invalid offset #{e.offset} for #{e.topic}/#{e.partition}, resetting to default offset"
@offset_manager.seek_to_default(e.topic, e.partition)
retry
rescue ConnectionError => e
@logger.error "Connection error while fetching messages: #{e}"
raise FetchError, e
end
def pause_for(topic, partition)
@pauses[topic][partition]
end
def running?
@running
end
def shutting_down?
!running?
end
def clear_current_offsets(excluding: {})
@current_offsets.each do |topic, partitions|
partitions.keep_if do |partition, _|
excluding.fetch(topic, []).include?(partition)
end
end
end
def scan_for_subscribing
@subscribed_topics.keys.each do |topic_or_regex|
default_offset = @subscribed_topics[topic_or_regex][:default_offset]
start_from_beginning = @subscribed_topics[topic_or_regex][:start_from_beginning]
max_bytes_per_partition = @subscribed_topics[topic_or_regex][:max_bytes_per_partition]
if topic_or_regex.is_a?(Regexp)
subscribe_to_regex(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
else
subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
end
end
end
def subscribe_to_regex(topic_regex, default_offset, start_from_beginning, max_bytes_per_partition)
cluster_topics.select { |topic| topic =~ topic_regex }.each do |topic|
subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
end
end
def subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
return if @matched_topics.include?(topic)
@matched_topics.add(topic)
@join_group_for_new_topics = true
@group.subscribe(topic)
@offset_manager.set_default_offset(topic, default_offset)
@fetcher.subscribe(topic, max_bytes_per_partition: max_bytes_per_partition)
@cluster.mark_as_stale!
end
def cluster_topics
attempts = 0
begin
attempts += 1
@cluster.list_topics
rescue Kafka::ConnectionError
@cluster.mark_as_stale!
retry unless attempts > 1
raise
end
end
end
end
[Chore] Refactor code in consumer
# frozen_string_literal: true
require "kafka/consumer_group"
require "kafka/offset_manager"
require "kafka/fetcher"
require "kafka/pause"
module Kafka
# A client that consumes messages from a Kafka cluster in coordination with
# other clients.
#
# A Consumer subscribes to one or more Kafka topics; all consumers with the
# same *group id* then agree on who should read from the individual topic
# partitions. When group members join or leave, the group synchronizes,
# making sure that all partitions are assigned to a single member, and that
# all members have some partitions to read from.
#
# ## Example
#
# A simple producer that simply writes the messages it consumes to the
# console.
#
# require "kafka"
#
# kafka = Kafka.new(["kafka1:9092", "kafka2:9092"])
#
# # Create a new Consumer instance in the group `my-group`:
# consumer = kafka.consumer(group_id: "my-group")
#
# # Subscribe to a Kafka topic:
# consumer.subscribe("messages")
#
# # Loop forever, reading in messages from all topics that have been
# # subscribed to.
# consumer.each_message do |message|
# puts message.topic
# puts message.partition
# puts message.key
# puts message.headers
# puts message.value
# puts message.offset
# end
#
class Consumer
def initialize(cluster:, logger:, instrumenter:, group:, fetcher:, offset_manager:, session_timeout:, heartbeat:, refresh_topic_interval: 0)
@cluster = cluster
@logger = TaggedLogger.new(logger)
@instrumenter = instrumenter
@group = group
@offset_manager = offset_manager
@session_timeout = session_timeout
@fetcher = fetcher
@heartbeat = heartbeat
@refresh_topic_interval = refresh_topic_interval
@pauses = Hash.new {|h, k|
h[k] = Hash.new {|h2, k2|
h2[k2] = Pause.new
}
}
# Whether or not the consumer is currently consuming messages.
@running = false
# Hash containing offsets for each topic and partition that has the
# automatically_mark_as_processed feature disabled. Offset manager is only active
# when everything is suppose to happen automatically. Otherwise we need to keep track of the
# offset manually in memory for all the time
# The key structure for this equals an array with topic and partition [topic, partition]
# The value is equal to the offset of the last message we've received
# @note It won't be updated in case user marks message as processed, because for the case
# when user commits message other than last in a batch, this would make ruby-kafka refetch
# some already consumed messages
@current_offsets = Hash.new { |h, k| h[k] = {} }
# Map storing subscribed topics with their configuration
@subscribed_topics = Concurrent::Map.new
# Set storing topics that matched topics in @subscribed_topics
@matched_topics = Set.new
# Whether join_group must be executed again because new topics are added
@join_group_for_new_topics = false
end
# Subscribes the consumer to a topic.
#
# Typically you either want to start reading messages from the very
# beginning of the topic's partitions or you simply want to wait for new
# messages to be written. In the former case, set `start_from_beginning`
# to true (the default); in the latter, set it to false.
#
# @param topic_or_regex [String, Regexp] subscribe to single topic with a string
# or multiple topics matching a regex.
# @param default_offset [Symbol] whether to start from the beginning or the
# end of the topic's partitions. Deprecated.
# @param start_from_beginning [Boolean] whether to start from the beginning
# of the topic or just subscribe to new messages being produced. This
# only applies when first consuming a topic partition – once the consumer
# has checkpointed its progress, it will always resume from the last
# checkpoint.
# @param max_bytes_per_partition [Integer] the maximum amount of data fetched
# from a single partition at a time.
# @return [nil]
def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
default_offset ||= start_from_beginning ? :earliest : :latest
@subscribed_topics[topic_or_regex] = {
default_offset: default_offset,
start_from_beginning: start_from_beginning,
max_bytes_per_partition: max_bytes_per_partition
}
scan_for_subscribing
nil
end
# Stop the consumer.
#
# The consumer will finish any in-progress work and shut down.
#
# @return [nil]
def stop
@running = false
@fetcher.stop
@cluster.disconnect
end
# Pause processing of a specific topic partition.
#
# When a specific message causes the processor code to fail, it can be a good
# idea to simply pause the partition until the error can be resolved, allowing
# the rest of the partitions to continue being processed.
#
# If the `timeout` argument is passed, the partition will automatically be
# resumed when the timeout expires. If `exponential_backoff` is enabled, each
# subsequent pause will cause the timeout to double until a message from the
# partition has been successfully processed.
#
# @param topic [String]
# @param partition [Integer]
# @param timeout [nil, Integer] the number of seconds to pause the partition for,
# or `nil` if the partition should not be automatically resumed.
# @param max_timeout [nil, Integer] the maximum number of seconds to pause for,
# or `nil` if no maximum should be enforced.
# @param exponential_backoff [Boolean] whether to enable exponential backoff.
# @return [nil]
def pause(topic, partition, timeout: nil, max_timeout: nil, exponential_backoff: false)
if max_timeout && !exponential_backoff
raise ArgumentError, "`max_timeout` only makes sense when `exponential_backoff` is enabled"
end
pause_for(topic, partition).pause!(
timeout: timeout,
max_timeout: max_timeout,
exponential_backoff: exponential_backoff,
)
end
# Resume processing of a topic partition.
#
# @see #pause
# @param topic [String]
# @param partition [Integer]
# @return [nil]
def resume(topic, partition)
pause_for(topic, partition).resume!
# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.
seek_to_next(topic, partition) if @group.assigned_to?(topic, partition)
end
# Whether the topic partition is currently paused.
#
# @see #pause
# @param topic [String]
# @param partition [Integer]
# @return [Boolean] true if the partition is paused, false otherwise.
def paused?(topic, partition)
pause = pause_for(topic, partition)
pause.paused? && !pause.expired?
end
# Fetches and enumerates the messages in the topics that the consumer group
# subscribes to.
#
# Each message is yielded to the provided block. If the block returns
# without raising an exception, the message will be considered successfully
# processed. At regular intervals the offset of the most recent successfully
# processed message in each partition will be committed to the Kafka
# offset store. If the consumer crashes or leaves the group, the group member
# that is tasked with taking over processing of these partitions will resume
# at the last committed offsets.
#
# @param min_bytes [Integer] the minimum number of bytes to read before
# returning messages from each broker; if `max_wait_time` is reached, this
# is ignored.
# @param max_bytes [Integer] the maximum number of bytes to read before
# returning messages from each broker.
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
# returning messages from each broker, in seconds.
# @param automatically_mark_as_processed [Boolean] whether to automatically
# mark a message as successfully processed when the block returns
# without an exception. Once marked successful, the offsets of processed
# messages can be committed to Kafka.
# @yieldparam message [Kafka::FetchedMessage] a message fetched from Kafka.
# @raise [Kafka::ProcessingError] if there was an error processing a message.
# The original exception will be returned by calling `#cause` on the
# {Kafka::ProcessingError} instance.
# @return [nil]
def each_message(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
@fetcher.configure(
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
consumer_loop do
batches = fetch_batches
batches.each do |batch|
batch.messages.each do |message|
notification = {
topic: message.topic,
partition: message.partition,
offset: message.offset,
offset_lag: batch.highwater_mark_offset - message.offset - 1,
create_time: message.create_time,
key: message.key,
value: message.value,
headers: message.headers
}
# Instrument an event immediately so that subscribers don't have to wait until
# the block is completed.
@instrumenter.instrument("start_process_message.consumer", notification)
@instrumenter.instrument("process_message.consumer", notification) do
begin
yield message unless message.is_control_record
@current_offsets[message.topic][message.partition] = message.offset
rescue => e
location = "#{message.topic}/#{message.partition} at offset #{message.offset}"
backtrace = e.backtrace.join("\n")
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
raise ProcessingError.new(message.topic, message.partition, message.offset)
end
end
mark_message_as_processed(message) if automatically_mark_as_processed
@offset_manager.commit_offsets_if_necessary
trigger_heartbeat
return if shutting_down?
end
# We've successfully processed a batch from the partition, so we can clear
# the pause.
pause_for(batch.topic, batch.partition).reset!
end
# We may not have received any messages, but it's still a good idea to
# commit offsets if we've processed messages in the last set of batches.
# This also ensures the offsets are retained if we haven't read any messages
# since the offset retention period has elapsed.
@offset_manager.commit_offsets_if_necessary
end
end
# Fetches and enumerates the messages in the topics that the consumer group
# subscribes to.
#
# Each batch of messages is yielded to the provided block. If the block returns
# without raising an exception, the batch will be considered successfully
# processed. At regular intervals the offset of the most recent successfully
# processed message batch in each partition will be committed to the Kafka
# offset store. If the consumer crashes or leaves the group, the group member
# that is tasked with taking over processing of these partitions will resume
# at the last committed offsets.
#
# @param min_bytes [Integer] the minimum number of bytes to read before
# returning messages from each broker; if `max_wait_time` is reached, this
# is ignored.
# @param max_bytes [Integer] the maximum number of bytes to read before
# returning messages from each broker.
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
# returning messages from each broker, in seconds.
# @param automatically_mark_as_processed [Boolean] whether to automatically
# mark a batch's messages as successfully processed when the block returns
# without an exception. Once marked successful, the offsets of processed
# messages can be committed to Kafka.
# @yieldparam batch [Kafka::FetchedBatch] a message batch fetched from Kafka.
# @raise [Kafka::ProcessingError] if there was an error processing a batch.
# The original exception will be returned by calling `#cause` on the
# {Kafka::ProcessingError} instance.
# @return [nil]
def each_batch(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
@fetcher.configure(
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
consumer_loop do
batches = fetch_batches
batches.each do |batch|
unless batch.empty?
raw_messages = batch.messages
batch.messages = raw_messages.reject(&:is_control_record)
notification = {
topic: batch.topic,
partition: batch.partition,
last_offset: batch.last_offset,
last_create_time: batch.messages.last.try(:create_time),
offset_lag: batch.offset_lag,
highwater_mark_offset: batch.highwater_mark_offset,
message_count: batch.messages.count,
}
# Instrument an event immediately so that subscribers don't have to wait until
# the block is completed.
@instrumenter.instrument("start_process_batch.consumer", notification)
@instrumenter.instrument("process_batch.consumer", notification) do
begin
yield batch
@current_offsets[batch.topic][batch.partition] = batch.last_offset unless batch.unknown_last_offset?
rescue => e
offset_range = (batch.first_offset..batch.last_offset || batch.highwater_mark_offset)
location = "#{batch.topic}/#{batch.partition} in offset range #{offset_range}"
backtrace = e.backtrace.join("\n")
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
raise ProcessingError.new(batch.topic, batch.partition, offset_range)
ensure
batch.messages = raw_messages
end
end
mark_message_as_processed(batch.messages.last) if automatically_mark_as_processed
# We've successfully processed a batch from the partition, so we can clear
# the pause.
pause_for(batch.topic, batch.partition).reset!
end
@offset_manager.commit_offsets_if_necessary
trigger_heartbeat
return if shutting_down?
end
# We may not have received any messages, but it's still a good idea to
# commit offsets if we've processed messages in the last set of batches.
# This also ensures the offsets are retained if we haven't read any messages
# since the offset retention period has elapsed.
@offset_manager.commit_offsets_if_necessary
end
end
# Move the consumer's position in a topic partition to the specified offset.
#
# Note that this has to be done prior to calling {#each_message} or {#each_batch}
# and only has an effect if the consumer is assigned the partition. Typically,
# you will want to do this in every consumer group member in order to make sure
# that the member that's assigned the partition knows where to start.
#
# @param topic [String]
# @param partition [Integer]
# @param offset [Integer]
# @return [nil]
def seek(topic, partition, offset)
@offset_manager.seek_to(topic, partition, offset)
end
def commit_offsets
@offset_manager.commit_offsets
end
def mark_message_as_processed(message)
@offset_manager.mark_as_processed(message.topic, message.partition, message.offset)
end
def trigger_heartbeat
@heartbeat.trigger
end
def trigger_heartbeat!
@heartbeat.trigger!
end
# Aliases for the external API compatibility
alias send_heartbeat_if_necessary trigger_heartbeat
alias send_heartbeat trigger_heartbeat!
private
def consumer_loop
@running = true
@logger.push_tags(@group.to_s)
@fetcher.start
while running?
begin
@instrumenter.instrument("loop.consumer") do
refresh_topic_list_if_enabled
yield
end
rescue HeartbeatError
make_final_offsets_commit!
join_group if running?
rescue OffsetCommitError
join_group if running?
rescue RebalanceInProgress
@logger.warn "Group rebalance in progress, re-joining..."
join_group if running?
rescue FetchError, NotLeaderForPartition, UnknownTopicOrPartition
@cluster.mark_as_stale!
rescue LeaderNotAvailable => e
@logger.error "Leader not available; waiting 1s before retrying"
@cluster.mark_as_stale!
sleep 1
rescue ConnectionError => e
@logger.error "Connection error #{e.class}: #{e.message}"
@cluster.mark_as_stale!
rescue SignalException => e
@logger.warn "Received signal #{e.message}, shutting down"
@running = false
end
end
ensure
@fetcher.stop
# In order to quickly have the consumer group re-balance itself, it's
# important that members explicitly tell Kafka when they're leaving.
make_final_offsets_commit!
@group.leave rescue nil
@running = false
@logger.pop_tags
end
def make_final_offsets_commit!(attempts = 3)
@offset_manager.commit_offsets
rescue ConnectionError, OffsetCommitError, EOFError
# It's important to make sure final offsets commit is done
# As otherwise messages that have been processed after last auto-commit
# will be processed again and that may be huge amount of messages
return if attempts.zero?
@logger.error "Retrying to make final offsets commit (#{attempts} attempts left)"
sleep(0.1)
make_final_offsets_commit!(attempts - 1)
rescue Kafka::Error => e
@logger.error "Encountered error while shutting down; #{e.class}: #{e.message}"
end
def join_group
@join_group_for_new_topics = false
old_generation_id = @group.generation_id
@group.join
if old_generation_id && @group.generation_id != old_generation_id + 1
# We've been out of the group for at least an entire generation, no
# sense in trying to hold on to offset data
clear_current_offsets
@offset_manager.clear_offsets
else
# After rejoining the group we may have been assigned a new set of
# partitions. Keeping the old offset commits around forever would risk
# having the consumer go back and reprocess messages if it's assigned
# a partition it used to be assigned to way back. For that reason, we
# only keep commits for the partitions that we're still assigned.
clear_current_offsets(excluding: @group.assigned_partitions)
@offset_manager.clear_offsets_excluding(@group.assigned_partitions)
end
@fetcher.reset
@group.assigned_partitions.each do |topic, partitions|
partitions.each do |partition|
if paused?(topic, partition)
@logger.warn "Not fetching from #{topic}/#{partition} due to pause"
else
seek_to_next(topic, partition)
end
end
end
end
def seek_to_next(topic, partition)
# When automatic marking is off, the first poll needs to be based on the last committed
# offset from Kafka, that's why we fallback in case of nil (it may not be 0)
if @current_offsets[topic].key?(partition)
offset = @current_offsets[topic][partition] + 1
else
offset = @offset_manager.next_offset_for(topic, partition)
end
@fetcher.seek(topic, partition, offset)
end
def resume_paused_partitions!
@pauses.each do |topic, partitions|
partitions.each do |partition, pause|
@instrumenter.instrument("pause_status.consumer", {
topic: topic,
partition: partition,
duration: pause.pause_duration,
})
if pause.paused? && pause.expired?
@logger.info "Automatically resuming partition #{topic}/#{partition}, pause timeout expired"
resume(topic, partition)
end
end
end
end
def refresh_topic_list_if_enabled
return if @refresh_topic_interval <= 0
return if @refreshed_at && @refreshed_at + @refresh_topic_interval > Time.now
scan_for_subscribing
@refreshed_at = Time.now
end
def fetch_batches
# Return early if the consumer has been stopped.
return [] if shutting_down?
join_group if !@group.member? || @join_group_for_new_topics
trigger_heartbeat
resume_paused_partitions!
if !@fetcher.data?
@logger.debug "No batches to process"
sleep 2
[]
else
tag, message = @fetcher.poll
case tag
when :batches
# make sure any old batches, fetched prior to the completion of a consumer group sync,
# are only processed if the batches are from brokers for which this broker is still responsible.
message.select { |batch| @group.assigned_to?(batch.topic, batch.partition) }
when :exception
raise message
end
end
rescue OffsetOutOfRange => e
@logger.error "Invalid offset #{e.offset} for #{e.topic}/#{e.partition}, resetting to default offset"
@offset_manager.seek_to_default(e.topic, e.partition)
retry
rescue ConnectionError => e
@logger.error "Connection error while fetching messages: #{e}"
raise FetchError, e
end
def pause_for(topic, partition)
@pauses[topic][partition]
end
def running?
@running
end
def shutting_down?
!running?
end
def clear_current_offsets(excluding: {})
@current_offsets.each do |topic, partitions|
partitions.keep_if do |partition, _|
excluding.fetch(topic, []).include?(partition)
end
end
end
def scan_for_subscribing
@subscribed_topics.keys.each do |topic_or_regex|
default_offset = @subscribed_topics[topic_or_regex][:default_offset]
start_from_beginning = @subscribed_topics[topic_or_regex][:start_from_beginning]
max_bytes_per_partition = @subscribed_topics[topic_or_regex][:max_bytes_per_partition]
if topic_or_regex.is_a?(Regexp)
subscribe_to_regex(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
else
subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
end
end
end
def subscribe_to_regex(topic_regex, default_offset, start_from_beginning, max_bytes_per_partition)
cluster_topics.select { |topic| topic =~ topic_regex }.each do |topic|
subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
end
end
def subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
return if @matched_topics.include?(topic)
@matched_topics.add(topic)
@join_group_for_new_topics = true
@group.subscribe(topic)
@offset_manager.set_default_offset(topic, default_offset)
@fetcher.subscribe(topic, max_bytes_per_partition: max_bytes_per_partition)
@cluster.mark_as_stale!
end
def cluster_topics
attempts = 0
begin
attempts += 1
@cluster.list_topics
rescue Kafka::ConnectionError
@cluster.mark_as_stale!
retry unless attempts > 1
raise
end
end
end
end
|
module Kapify
VERSION = "0.0.13"
end
version bump
module Kapify
VERSION = "0.0.14"
end
|
module Keisan
class Context
attr_reader :function_registry, :variable_registry, :allow_recursive
def initialize(parent: nil, random: nil, allow_recursive: false)
@parent = parent
@function_registry = Functions::Registry.new(parent: @parent.try(:function_registry))
@variable_registry = Variables::Registry.new(parent: @parent.try(:variable_registry))
@random = random
@allow_recursive = allow_recursive
end
def spawn_child(definitions: {}, transient: false)
child = Context.new(parent: self, allow_recursive: allow_recursive)
definitions.each do |name, value|
case value
when Proc
child.register_function!(name, value)
else
child.register_variable!(name, value)
end
end
child.set_transient! if transient
child
end
def variable(name)
@variable_registry[name.to_s]
end
def has_variable?(name)
@variable_registry.has?(name)
end
def register_variable!(name, value)
if @transient
@parent.register_variable!(name, value)
else
@variable_registry.register!(name.to_s, value)
end
end
def function(name)
@function_registry[name.to_s]
end
def has_function?(name)
@function_registry.has?(name)
end
def register_function!(name, function)
if @transient
@parent.register_function!(name, function)
else
@function_registry.register!(name.to_s, function)
end
end
def random
@random || @parent.try(:random) || Random.new
end
protected
def set_transient!
@transient = true
end
end
end
Use current class when spawning child
Allows easier subclassing of Keisan::Context
module Keisan
class Context
attr_reader :function_registry, :variable_registry, :allow_recursive
def initialize(parent: nil, random: nil, allow_recursive: false)
@parent = parent
@function_registry = Functions::Registry.new(parent: @parent.try(:function_registry))
@variable_registry = Variables::Registry.new(parent: @parent.try(:variable_registry))
@random = random
@allow_recursive = allow_recursive
end
def spawn_child(definitions: {}, transient: false)
child = self.class.new(parent: self, allow_recursive: allow_recursive)
definitions.each do |name, value|
case value
when Proc
child.register_function!(name, value)
else
child.register_variable!(name, value)
end
end
child.set_transient! if transient
child
end
def variable(name)
@variable_registry[name.to_s]
end
def has_variable?(name)
@variable_registry.has?(name)
end
def register_variable!(name, value)
if @transient
@parent.register_variable!(name, value)
else
@variable_registry.register!(name.to_s, value)
end
end
def function(name)
@function_registry[name.to_s]
end
def has_function?(name)
@function_registry.has?(name)
end
def register_function!(name, function)
if @transient
@parent.register_function!(name, function)
else
@function_registry.register!(name.to_s, function)
end
end
def random
@random || @parent.try(:random) || Random.new
end
protected
def set_transient!
@transient = true
end
end
end
|
module Kender
VERSION = '0.1.3'
end
bumping up version to 0.2
module Kender
VERSION = '0.2.0'
end
|
class Kicker
VERSION = "3.0.0pre2"
end
Bump version to 3.0.0pre3.
class Kicker
VERSION = "3.0.0pre3"
end
|
require 'omf_common/lobject'
require 'warden-openid'
use ::Rack::ShowExceptions
use ::Rack::Session::Cookie, secret: "715aba35a6980113aa418ec18af31411", key: 'labwiki.session'
use ::Rack::OpenID
$users = {}
Warden::OpenID.configure do |config|
config.user_finder do |response|
$users[response.identity_url]
end
end
module AuthFailureApp
def self.call(env)
req = ::Rack::Request.new(env)
if openid = env['warden.options'][:openid]
# OpenID authenticate success, but user is missing (Warden::OpenID.user_finder returns nil)
identity_url = openid[:response].identity_url
$users[identity_url] = identity_url
env['warden'].set_user identity_url
[307, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
else
# When OpenID authenticate failure
[401, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
end
end
end
use Warden::Manager do |manager|
manager.default_strategies :openid
manager.failure_app = AuthFailureApp
end
OMF::Web::Runner.instance.life_cycle(:pre_rackup)
options = OMF::Web::Runner.instance.options
require 'labwiki/session_init'
use SessionInit
# These should go to a separate controller/handler file.
map "/create_script" do
handler = lambda do |env|
req = ::Rack::Request.new(env)
file_ext = req.params['file_ext'].downcase
file_name = "#{req.params['file_name']}.#{file_ext}"
sub_folder = case file_ext
when 'rb'
'oidl'
when 'md'
'wiki'
end
repo = (LabWiki::Configurator[:repositories] || {}).first
if repo.class == Array
repo = OMF::Web::ContentRepository.find_repo_for("#{repo[1][:type]}:#{repo[0]}")
end
repo ||= (OMF::Web::SessionStore[:prepare, :repos] || []).first
repo.write("repo/#{sub_folder}/#{file_name}", "", "Adding new script #{file_name}")
[200, {}, "#{file_name} created"]
end
run handler
end
map "/dump" do
handler = lambda do |env|
req = ::Rack::Request.new(env)
omf_exp_id = req.params['domain']
if LabWiki::Configurator[:gimi] && LabWiki::Configurator[:gimi][:dump_script]
dump_cmd = File.expand_path(LabWiki::Configurator[:gimi][:dump_script])
else
return [500, {}, "Dump script not configured."]
end
exp = nil
OMF::Web::SessionStore.find_across_sessions do |content|
content["omf:exps"] && (exp = content["omf:exps"].find { |v| v[:id] == omf_exp_id } )
end
if exp
i_token = exp[:irods_token]
i_path = "#{exp[:irods_path]}/#{LabWiki::Configurator[:gimi][:irods][:measurement_folder]}_#{exp[:exp_name]}" rescue "#{exp[:irods_path]}"
dump_cmd << " --domain #{omf_exp_id} --token #{i_token} --path #{i_path}"
EM.popen(dump_cmd)
[200, {}, "Dump script triggered. <br /> Using command: #{dump_cmd} <br /> Unfortunately we cannot show you the progress."]
else
[500, {}, "Cannot find experiment(task) by domain id #{omf_exp_id}"]
end
end
run handler
end
map "/labwiki" do
handler = proc do |env|
if options[:no_login_required]
identity_url = "https://localhost?id=user1"
$users[identity_url] = identity_url
env['warden'].set_user identity_url
require 'labwiki/rack/top_handler'
LabWiki::TopHandler.new(options).call(env)
elsif env['warden'].authenticated?
require 'labwiki/rack/top_handler'
LabWiki::TopHandler.new(options).call(env)
else
[307, {'Location' => '/resource/login/openid.html', "Content-Type" => ""}, ['Authenticate!']]
end
end
run handler
end
map '/login' do
handler = proc do |env|
req = ::Rack::Request.new(env)
if req.post?
env['warden'].authenticate!
[307, {'Location' => '/', "Content-Type" => ""}, ['Next window!']]
end
end
run handler
end
map '/logout' do
handler = Proc.new do |env|
req = ::Rack::Request.new(env)
env['warden'].logout(:default)
req.session['sid'] = nil
[307, {'Location' => '/', "Content-Type" => ""}, ['Next window!']]
end
run handler
end
map "/resource/vendor/" do
require 'omf-web/rack/multi_file'
run OMF::Web::Rack::MultiFile.new(options[:static_dirs], :sub_path => 'vendor', :version => true)
end
map "/resource" do
require 'omf-web/rack/multi_file'
dirs = options[:static_dirs]
dirs.insert(0, "#{File.dirname(__FILE__)}/../../htdocs")
run OMF::Web::Rack::MultiFile.new(dirs)
end
map "/plugin" do
require 'labwiki/rack/plugin_resource_handler'
run LabWiki::PluginResourceHandler.new()
end
map '/_ws' do
begin
require 'omf-web/rack/websocket_handler'
run OMF::Web::Rack::WebsocketHandler.new # :backend => { :debug => true }
rescue Exception => ex
OMF::Common::Loggable.logger('web').error "#{ex}"
end
end
map '/_update' do
require 'omf-web/rack/update_handler'
run OMF::Web::Rack::UpdateHandler.new
end
map '/_content' do
require 'omf-web/rack/content_handler'
run OMF::Web::Rack::ContentHandler.new
end
map '/_search' do
require 'labwiki/rack/search_handler'
run LabWiki::SearchHandler.new
end
map '/_column' do
require 'labwiki/rack/column_handler'
run LabWiki::ColumnHandler.new
end
map "/" do
handler = Proc.new do |env|
req = ::Rack::Request.new(env)
case req.path_info
when '/'
[307, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
when '/favicon.ico'
[301, {'Location' => '/resource/image/favicon.ico', "Content-Type" => ""}, ['Next window!']]
when '/image/favicon.ico'
[301, {'Location' => '/resource/image/favicon.ico', "Content-Type" => ""}, ['Next window!']]
else
OMF::Common::Loggable.logger('rack').warn "Can't handle request '#{req.path_info}'"
[401, {"Content-Type" => ""}, "Sorry!"]
end
end
run handler
end
OMF::Web::Runner.instance.life_cycle(:post_rackup)
Handle different default folder structure in repo
require 'omf_common/lobject'
require 'warden-openid'
use ::Rack::ShowExceptions
use ::Rack::Session::Cookie, secret: "715aba35a6980113aa418ec18af31411", key: 'labwiki.session'
use ::Rack::OpenID
$users = {}
Warden::OpenID.configure do |config|
config.user_finder do |response|
$users[response.identity_url]
end
end
module AuthFailureApp
def self.call(env)
req = ::Rack::Request.new(env)
if openid = env['warden.options'][:openid]
# OpenID authenticate success, but user is missing (Warden::OpenID.user_finder returns nil)
identity_url = openid[:response].identity_url
$users[identity_url] = identity_url
env['warden'].set_user identity_url
[307, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
else
# When OpenID authenticate failure
[401, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
end
end
end
use Warden::Manager do |manager|
manager.default_strategies :openid
manager.failure_app = AuthFailureApp
end
OMF::Web::Runner.instance.life_cycle(:pre_rackup)
options = OMF::Web::Runner.instance.options
require 'labwiki/session_init'
use SessionInit
# These should go to a separate controller/handler file.
map "/create_script" do
handler = lambda do |env|
req = ::Rack::Request.new(env)
file_ext = req.params['file_ext'].downcase
file_name = "#{req.params['file_name']}.#{file_ext}"
sub_folder = case file_ext
when 'rb'
'oidl'
when 'md'
'wiki'
end
repo = (LabWiki::Configurator[:repositories] || {}).first
if repo.class == Array
repo = OMF::Web::ContentRepository.find_repo_for("#{repo[1][:type]}:#{repo[0]}")
end
repo ||= (OMF::Web::SessionStore[:prepare, :repos] || []).first
begin
repo.write("repo/#{sub_folder}/#{file_name}", "", "Adding new script #{file_name}")
rescue => e
if e.class == RuntimeError && e.message =~ /Cannot write to file/
repo.write("#{sub_folder}/#{file_name}", "", "Adding new script #{file_name}")
end
end
[200, {}, "#{file_name} created"]
end
run handler
end
map "/dump" do
handler = lambda do |env|
req = ::Rack::Request.new(env)
omf_exp_id = req.params['domain']
if LabWiki::Configurator[:gimi] && LabWiki::Configurator[:gimi][:dump_script]
dump_cmd = File.expand_path(LabWiki::Configurator[:gimi][:dump_script])
else
return [500, {}, "Dump script not configured."]
end
exp = nil
OMF::Web::SessionStore.find_across_sessions do |content|
content["omf:exps"] && (exp = content["omf:exps"].find { |v| v[:id] == omf_exp_id } )
end
if exp
i_token = exp[:irods_token]
i_path = "#{exp[:irods_path]}/#{LabWiki::Configurator[:gimi][:irods][:measurement_folder]}_#{exp[:exp_name]}" rescue "#{exp[:irods_path]}"
dump_cmd << " --domain #{omf_exp_id} --token #{i_token} --path #{i_path}"
EM.popen(dump_cmd)
[200, {}, "Dump script triggered. <br /> Using command: #{dump_cmd} <br /> Unfortunately we cannot show you the progress."]
else
[500, {}, "Cannot find experiment(task) by domain id #{omf_exp_id}"]
end
end
run handler
end
map "/labwiki" do
handler = proc do |env|
if options[:no_login_required]
identity_url = "https://localhost?id=user1"
$users[identity_url] = identity_url
env['warden'].set_user identity_url
require 'labwiki/rack/top_handler'
LabWiki::TopHandler.new(options).call(env)
elsif env['warden'].authenticated?
require 'labwiki/rack/top_handler'
LabWiki::TopHandler.new(options).call(env)
else
[307, {'Location' => '/resource/login/openid.html', "Content-Type" => ""}, ['Authenticate!']]
end
end
run handler
end
map '/login' do
handler = proc do |env|
req = ::Rack::Request.new(env)
if req.post?
env['warden'].authenticate!
[307, {'Location' => '/', "Content-Type" => ""}, ['Next window!']]
end
end
run handler
end
map '/logout' do
handler = Proc.new do |env|
req = ::Rack::Request.new(env)
env['warden'].logout(:default)
req.session['sid'] = nil
[307, {'Location' => '/', "Content-Type" => ""}, ['Next window!']]
end
run handler
end
map "/resource/vendor/" do
require 'omf-web/rack/multi_file'
run OMF::Web::Rack::MultiFile.new(options[:static_dirs], :sub_path => 'vendor', :version => true)
end
map "/resource" do
require 'omf-web/rack/multi_file'
dirs = options[:static_dirs]
dirs.insert(0, "#{File.dirname(__FILE__)}/../../htdocs")
run OMF::Web::Rack::MultiFile.new(dirs)
end
map "/plugin" do
require 'labwiki/rack/plugin_resource_handler'
run LabWiki::PluginResourceHandler.new()
end
map '/_ws' do
begin
require 'omf-web/rack/websocket_handler'
run OMF::Web::Rack::WebsocketHandler.new # :backend => { :debug => true }
rescue Exception => ex
OMF::Common::Loggable.logger('web').error "#{ex}"
end
end
map '/_update' do
require 'omf-web/rack/update_handler'
run OMF::Web::Rack::UpdateHandler.new
end
map '/_content' do
require 'omf-web/rack/content_handler'
run OMF::Web::Rack::ContentHandler.new
end
map '/_search' do
require 'labwiki/rack/search_handler'
run LabWiki::SearchHandler.new
end
map '/_column' do
require 'labwiki/rack/column_handler'
run LabWiki::ColumnHandler.new
end
map "/" do
handler = Proc.new do |env|
req = ::Rack::Request.new(env)
case req.path_info
when '/'
[307, {'Location' => '/labwiki', "Content-Type" => ""}, ['Next window!']]
when '/favicon.ico'
[301, {'Location' => '/resource/image/favicon.ico', "Content-Type" => ""}, ['Next window!']]
when '/image/favicon.ico'
[301, {'Location' => '/resource/image/favicon.ico', "Content-Type" => ""}, ['Next window!']]
else
OMF::Common::Loggable.logger('rack').warn "Can't handle request '#{req.path_info}'"
[401, {"Content-Type" => ""}, "Sorry!"]
end
end
run handler
end
OMF::Web::Runner.instance.life_cycle(:post_rackup)
|
require "thor"
require "listen"
module Linner
class Command < Thor
include Thor::Actions
map "-v" => :version
def self.source_root
File.dirname(__FILE__)
end
desc "version", "show version"
def version
puts Linner::VERSION
end
desc "build", "build assets"
def build
Linner.compile = true
clean
Notifier.profile { Linner.perform }
end
desc "watch", "watch assets"
def watch
trap(:INT) { exit! }
clean
perform_proc.call
watch_for_perform
watch_for_reload
sleep
end
desc "clean", "clean assets"
def clean
FileUtils.rm_rf Dir.glob("#{env.public_folder}/*")
end
desc "new", "create the skeleton of project"
def new(name)
directory('templates', name)
chmod("#{name}/bin/server", 0755)
end
private
def env
Linner.env
end
def perform_proc
@proc ||= Proc.new do
begin
Notifier.profile{ Linner.perform }
rescue
Notifier.error $!
end
end
end
def watch_for_perform
Listen.to env.watched_paths do |modified, added, removed|
Linner.cache.expire_by(modified + added + removed)
perform_proc.call
end
end
def watch_for_reload
reactor = Reactor.supervise_as(:reactor).actors.first
Listen.to env.public_folder, relative_path: true do |modified, added, removed|
reactor.reload_browser(modified + added + removed)
end
end
def exit!
Notifier.exit
Kernel::exit
end
end
end
add check and install methods to linner command
require "thor"
require "listen"
module Linner
class Command < Thor
include Thor::Actions
map "-v" => :version
def self.source_root
File.dirname(__FILE__)
end
desc "version", "show version"
def version
puts Linner::VERSION
end
desc "check", "check dependencies"
def check
Bundler.new(env.bundles).check
end
desc "install", "install dependencies"
def install
Bundler.new(env.bundles).perform
end
desc "build", "build assets"
def build
Linner.compile = true
clean
Notifier.profile { Linner.perform }
end
desc "watch", "watch assets"
def watch
trap(:INT) { exit! }
clean
install
perform_proc.call
watch_for_perform
watch_for_reload
sleep
end
desc "clean", "clean assets"
def clean
FileUtils.rm_rf Dir.glob("#{env.public_folder}/*")
end
desc "new", "create the skeleton of project"
def new(name)
directory('templates', name)
chmod("#{name}/bin/server", 0755)
end
private
def env
Linner.env
end
def perform_proc
@proc ||= Proc.new do
begin
Notifier.profile{ Linner.perform }
rescue
Notifier.error $!
end
end
end
def watch_for_perform
Listen.to env.watched_paths do |modified, added, removed|
Linner.cache.expire_by(modified + added + removed)
perform_proc.call
end
end
def watch_for_reload
reactor = Reactor.supervise_as(:reactor).actors.first
Listen.to env.public_folder, relative_path: true do |modified, added, removed|
reactor.reload_browser(modified + added + removed)
end
end
def exit!
Notifier.exit
Kernel::exit
end
end
end
|
require 'logan/HashConstructed'
require 'logan/todo'
module Logan
class TodoList
include HashConstructed
attr_accessor :id
attr_accessor :project_id
attr_accessor :name
attr_accessor :description
attr_accessor :completed
attr_accessor :remaining_todos
attr_accessor :completed_todos
def initialize(h)
@remaining_todos = []
@completed_todos = []
super
end
def post_json
{ :name => @name, :description => @description }.to_json
end
def todos=(todo_hash)
@remaining_todos = todo_hash['remaining'].map { |h| Logan::Todo.new h }
@completed_todos = todo_hash['completed'].map { |h| Logan::Todo.new h }
end
def todo_with_substring(substring)
issue_todo = @remaining_todos.detect{ |t| !t.content.index(substring).nil? }
issue_todo ||= @completed_todos.detect { |t| !t.content.index(substring).nil? }
end
def create_todo(todo)
post_params = {
:body => todo.post_json,
:headers => Logan::Client.headers.merge({'Content-Type' => 'application/json'})
}
response = Logan::Client.post "/projects/#{@project_id}/todolists/#{@id}/todos.json", post_params
Logan::Todo.new response
end
def update_todo(todo)
put_params = {
:body => todo.put_json,
:headers => Logan::Client.headers.merge({'Content-Type' => 'application/json'})
}
response = Logan::Client.put "/projects/#{@project_id}/todos/#{todo.id}.json", put_params
Logan::Todo.new response
end
end
end
add a method to delete a todo
require 'logan/HashConstructed'
require 'logan/todo'
module Logan
class TodoList
include HashConstructed
attr_accessor :id
attr_accessor :project_id
attr_accessor :name
attr_accessor :description
attr_accessor :completed
attr_accessor :remaining_todos
attr_accessor :completed_todos
attr_accessor :url
def initialize(h)
@remaining_todos = []
@completed_todos = []
super
end
def post_json
{ :name => @name, :description => @description }.to_json
end
def todos=(todo_hash)
@remaining_todos = todo_hash['remaining'].map { |h| Logan::Todo.new h }
@completed_todos = todo_hash['completed'].map { |h| Logan::Todo.new h }
end
def todo_with_substring(substring)
issue_todo = @remaining_todos.detect{ |t| !t.content.index(substring).nil? }
issue_todo ||= @completed_todos.detect { |t| !t.content.index(substring).nil? }
end
def create_todo(todo)
post_params = {
:body => todo.post_json,
:headers => Logan::Client.headers.merge({'Content-Type' => 'application/json'})
}
response = Logan::Client.post "/projects/#{@project_id}/todolists/#{@id}/todos.json", post_params
Logan::Todo.new response
end
def update_todo(todo)
put_params = {
:body => todo.put_json,
:headers => Logan::Client.headers.merge({'Content-Type' => 'application/json'})
}
response = Logan::Client.put "/projects/#{@project_id}/todos/#{todo.id}.json", put_params
Logan::Todo.new response
end
def delete_todo(todo)
response = Logan::Client.delete "/projects/#{@project_id}/todos/#{todo.id}.json"
end
end
end |
require "logstash/config/file"
require "logstash/filterworker"
require "logstash/logging"
require "logstash/sized_queue"
require "logstash/multiqueue"
require "logstash/namespace"
require "logstash/program"
require "logstash/threadwatchdog"
require "logstash/util"
require "optparse"
require "thread"
require "uri"
# TODO(sissel): only enable this if we are in debug mode.
# JRuby.objectspace=true
# Collect logs, ship them out.
class LogStash::Agent
include LogStash::Program
attr_reader :config
attr_reader :inputs
attr_reader :outputs
attr_reader :filters
attr_accessor :logger
# flags
attr_reader :config_path
attr_reader :logfile
attr_reader :verbose
public
def initialize
log_to(STDERR)
@config_path = nil
@config_string = nil
@logfile = nil
# flag/config defaults
@verbose = 0
@filterworker_count = 1
@plugins = {}
@plugins_mutex = Mutex.new
@plugin_setup_mutex = Mutex.new
@outputs = []
@inputs = []
@filters = []
@plugin_paths = []
@reloading = false
# Add logstash's plugin path (plugin paths must contain inputs, outputs, filters)
@plugin_paths << File.dirname(__FILE__)
# TODO(sissel): Other default plugin paths?
Thread::abort_on_exception = true
@is_shutting_down = false
end # def initialize
public
def log_to(target)
@logger = LogStash::Logger.new(target)
end # def log_to
private
def options(opts)
opts.on("-f CONFIGPATH", "--config CONFIGPATH",
"Load the logstash config from a specific file or directory. " \
"If a direcory is given instead of a file, all files in that " \
"directory will be concatonated in lexicographical order and " \
"then parsed as a single config file.") do |arg|
@config_path = arg
end # -f / --config
opts.on("-e CONFIGSTRING",
"Use the given string as the configuration data. Same syntax as " \
"the config file. If not input is specified, " \
"'stdin { type => stdin }' is default. If no output is " \
"specified, 'stdout { debug => true }}' is default.") do |arg|
@config_string = arg
end # -e
opts.on("-w COUNT", "--filterworkers COUNT", Integer,
"Run COUNT filter workers (default: 1)") do |arg|
@filterworker_count = arg
if @filterworker_count <= 0
raise ArgumentError, "filter worker count must be > 0"
end
end # -w
opts.on("-l", "--log FILE", "Log to a given path. Default is stdout.") do |path|
@logfile = path
end
opts.on("-v", "Increase verbosity") do
@verbose += 1
end
opts.on("-V", "--version", "Show the version of logstash") do
require "logstash/version"
puts "logstash #{LOGSTASH_VERSION}"
exit(0)
end
opts.on("-p PLUGIN_PATH", "--pluginpath PLUGIN_PATH",
"A colon-delimited path to find plugins in.") do |path|
path.split(":").each do |p|
@plugin_paths << p unless @plugin_paths.include?(p)
end
end
end # def options
# Parse options.
private
def parse_options(args)
@opts = OptionParser.new
# Step one is to add agent flags.
options(@opts)
# TODO(sissel): Check for plugin_path flags, add them to @plugin_paths.
args.each_with_index do |arg, index|
next unless arg =~ /^(?:-p|--pluginpath)(?:=(.*))?$/
path = $1
if path.nil?
path = args[index + 1]
end
@plugin_paths += path.split(":")
end # args.each
# At this point, we should load any plugin-specific flags.
# These are 'unknown' flags that begin --<plugin>-flag
# Put any plugin paths into the ruby library path for requiring later.
@plugin_paths.each do |p|
@logger.debug("Adding to ruby load path", :path => p)
$:.unshift p
end
# TODO(sissel): Go through all inputs, filters, and outputs to get the flags.
# Add plugin flags to @opts
# Load any plugins that we have flags for.
# TODO(sissel): The --<plugin> flag support currently will load
# any matching plugins input, output, or filter. This means, for example,
# that the 'amqp' input *and* output plugin will be loaded if you pass
# --amqp-foo flag. This might cause confusion, but it seems reasonable for
# now that any same-named component will have the same flags.
plugins = []
args.each do |arg|
# skip things that don't look like plugin flags
next unless arg =~ /^--[A-z0-9]+-/
name = arg.split("-")[2] # pull the plugin name out
# Try to load any plugin by that name
%w{inputs outputs filters}.each do |component|
@plugin_paths.each do |path|
plugin = File.join(path, component, name) + ".rb"
@logger.debug("Plugin flag found; trying to load it",
:flag => arg, :plugin => plugin)
if File.file?(plugin)
@logger.info("Loading plugin", :plugin => plugin)
require plugin
[LogStash::Inputs, LogStash::Filters, LogStash::Outputs].each do |c|
# If we get flag --foo-bar, check for LogStash::Inputs::Foo
# and add any options to our option parser.
klass_name = name.capitalize
if c.const_defined?(klass_name)
@logger.debug("Found plugin class", :class => "#{c}::#{klass_name})")
klass = c.const_get(klass_name)
# See LogStash::Config::Mixin::DSL#options
klass.options(@opts)
plugins << klass
end # c.const_defined?
end # each component type (input/filter/outputs)
end # if File.file?(plugin)
end # @plugin_paths.each
end # %{inputs outputs filters}.each
#if !found
#@logger.fatal("Flag #{arg.inspect} requires plugin #{name}, but no plugin found.")
#return false
#end
end # @remaining_args.each
begin
remainder = @opts.parse(args)
rescue OptionParser::InvalidOption => e
@logger.info("Invalid option", :exception => e)
raise e
end
return remainder
end # def parse_options
private
def configure
if @config_path && @config_string
@logger.fatal("Can't use -f and -e at the same time")
raise "Configuration problem"
elsif (@config_path.nil? || @config_path.empty?) && @config_string.nil?
@logger.fatal("No config file given. (missing -f or --config flag?)")
@logger.fatal(@opts.help)
raise "Configuration problem"
end
#if @config_path and !File.exist?(@config_path)
if @config_path and Dir.glob(@config_path).length == 0
@logger.fatal("Config file does not exist.", :path => @config_path)
raise "Configuration problem"
end
if @logfile
logfile = File.open(@logfile, "a")
STDOUT.reopen(logfile)
STDERR.reopen(logfile)
end
if @verbose >= 3 # Uber debugging.
@logger.level = :debug
$DEBUG = true
elsif @verbose == 2 # logstash debug logs
@logger.level = :debug
elsif @verbose == 1 # logstash info logs
@logger.level = :info
else # Default log level
@logger.level = :warn
end
end # def configure
def read_config
if @config_path
# Support directory of config files.
# https://logstash.jira.com/browse/LOGSTASH-106
if File.directory?(@config_path)
@logger.debug("Config path is a directory, scanning files",
:path => @config_path)
paths = Dir.glob(File.join(@config_path, "*")).sort
else
# Get a list of files matching a glob. If the user specified a single
# file, then this will only have one match and we are still happy.
paths = Dir.glob(@config_path).sort
end
concatconfig = []
paths.each do |path|
concatconfig << File.new(path).read
end
config = LogStash::Config::File.new(nil, concatconfig.join("\n"))
else # @config_string
# Given a config string by the user (via the '-e' flag)
config = LogStash::Config::File.new(nil, @config_string)
end
config.logger = @logger
config
end
# Parses a config and returns [inputs, filters, outputs]
def parse_config(config)
inputs = []
filters = []
outputs = []
config.parse do |plugin|
# 'plugin' is a has containing:
# :type => the base class of the plugin (LogStash::Inputs::Base, etc)
# :plugin => the class of the plugin (LogStash::Inputs::File, etc)
# :parameters => hash of key-value parameters from the config.
type = plugin[:type].config_name # "input" or "filter" etc...
klass = plugin[:plugin]
# Create a new instance of a plugin, called like:
# -> LogStash::Inputs::File.new( params )
instance = klass.new(plugin[:parameters])
instance.logger = @logger
case type
when "input"
inputs << instance
when "filter"
filters << instance
when "output"
outputs << instance
else
msg = "Unknown config type '#{type}'"
@logger.error(msg)
raise msg
end # case type
end # config.parse
return inputs, filters, outputs
end
public
def run(args, &block)
@logger.info("Register signal handlers")
register_signal_handlers
@logger.info("Parse options ")
remaining = parse_options(args)
if remaining == false
raise "Option parsing failed. See error log."
end
@logger.info("Configure")
configure
# Load the config file
@logger.info("Read config")
config = read_config
@logger.info("Start thread")
@thread = Thread.new do
LogStash::Util::set_thread_name(self.class.name)
run_with_config(config, &block)
end
return remaining
end # def run
public
def wait
@thread.join
return 0
end # def wait
private
def start_input(input)
@logger.debug("Starting input", :plugin => input)
t = 0
# inputs should write directly to output queue if there are no filters.
input_target = @filters.length > 0 ? @filter_queue : @output_queue
# check to see if input supports multiple threads
if input.threadable
@logger.debug("Threadable input", :plugin => input)
# start up extra threads if need be
(input.threads-1).times do
input_thread = input.clone
@logger.debug("Starting thread", :plugin => input, :thread => (t+=1))
@plugins[input_thread] = Thread.new(input_thread, input_target) do |*args|
run_input(*args)
end
end
end
@logger.debug("Starting thread", :plugin => input, :thread => (t+=1))
@plugins[input] = Thread.new(input, input_target) do |*args|
run_input(*args)
end
end
private
def start_output(output)
@logger.debug("Starting output", :plugin => output)
queue = LogStash::SizedQueue.new(10 * @filterworker_count)
queue.logger = @logger
@output_queue.add_queue(queue)
@output_plugin_queues[output] = queue
@plugins[output] = Thread.new(output, queue) do |*args|
run_output(*args)
end
end
public
def run_with_config(config)
@plugins_mutex.synchronize do
@inputs, @filters, @outputs = parse_config(config)
# If we are given a config string (run usually with 'agent -e "some config string"')
# then set up some defaults.
if @config_string
require "logstash/inputs/stdin"
require "logstash/outputs/stdout"
# set defaults if necessary
# All filters default to 'stdin' type
@filters.each do |filter|
filter.type = "stdin" if filter.type.nil?
end
# If no inputs are specified, use stdin by default.
@inputs = [LogStash::Inputs::Stdin.new("type" => [ "stdin" ])] if @inputs.length == 0
# If no outputs are specified, use stdout in debug mode.
@outputs = [LogStash::Outputs::Stdout.new("debug" => [ "true" ])] if @outputs.length == 0
end
if @inputs.length == 0 or @outputs.length == 0
raise "Must have both inputs and outputs configured."
end
# NOTE(petef) we should have config params for queue size
@filter_queue = LogStash::SizedQueue.new(10 * @filterworker_count)
@filter_queue.logger = @logger
@output_queue = LogStash::MultiQueue.new
@output_queue.logger = @logger
@ready_queue = Queue.new
# Start inputs
@inputs.each do |input|
start_input(input)
end # @inputs.each
# Create N filter-worker threads
@filterworkers = {}
if @filters.length > 0
@filters.each do |filter|
filter.logger = @logger
@plugin_setup_mutex.synchronize do
filter.register
end
end
if @filterworker_count > 1
@filters.each do |filter|
if ! filter.threadsafe?
raise "fail"
end
end
end
@filterworker_count.times do |n|
# TODO(sissel): facter this out into a 'filterworker' that accepts
# 'shutdown'
# Start a filter worker
filterworker = LogStash::FilterWorker.new(@filters, @filter_queue,
@output_queue)
filterworker.logger = @logger
thread = Thread.new(filterworker, n, @output_queue) do |*args|
run_filter(*args)
end
@plugins[filterworker] = thread
@filterworkers[filterworker] = thread
end # N.times
end # if @filters.length > 0
# A thread to supervise filter workers
watchdog = LogStash::ThreadWatchdog.new(@filterworkers.values)
watchdog.logger = logger
Thread.new do
watchdog.watch
end
# Create output threads
@output_plugin_queues = {}
@outputs.each do |output|
start_output(output)
end # @outputs.each
# Wait for all inputs and outputs to be registered.
wait_count = outputs.size + inputs.size
while wait_count > 0 and @ready_queue.pop
wait_count -= 1
end
@logger.info("All plugins are started and registered.")
end # synchronize
# yield to a block in case someone's waiting for us to be done setting up
# like tests, etc.
yield if block_given?
while sleep(2)
if @plugins.values.count { |p| p.alive? } == 0
@logger.warn("no plugins running, shutting down")
shutdown
end
@logger.debug("heartbeat")
end
end # def run_with_config
public
def stop
# TODO(petef): Stop inputs, fluch outputs, wait for finish,
# then stop the event loop
end # def stop
# Shutdown the agent.
protected
def shutdown
@logger.info("Starting shutdown sequence")
shutdown_plugins(@plugins)
# When we get here, all inputs have finished, all messages are done
@logger.info("Shutdown complete")
exit(0)
end # def shutdown
def shutdown_plugins(plugins)
return if @is_shutting_down
@is_shutting_down = true
Thread.new do
LogStash::Util::set_thread_name("logstash shutdown process")
# TODO(sissel): Make this a flag
force_shutdown_time = Time.now + 10
finished_queue = Queue.new
# Tell everything to shutdown.
@logger.debug("Plugins to shutdown", :plugins => plugins.keys.collect(&:to_s))
plugins.each do |p, thread|
@logger.debug("Sending shutdown to: #{p.to_s}", :plugin => p)
p.shutdown(finished_queue)
end
# Now wait until the queues we were given are empty.
#@logger.debug(@plugins)
remaining = plugins.select { |p, thread| p.running? }
while remaining.size > 0
if (Time.now > force_shutdown_time)
@logger.warn("Time to quit, even if some plugins aren't finished yet.")
@logger.warn("Stuck plugins?", :remaining => remaining.map(&:first))
break
end
@logger.debug("Waiting for plugins to finish.")
plugin = finished_queue.pop(non_block=true) rescue nil
if plugin.nil?
sleep(1)
else
remaining = plugins.select { |p, thread| plugin.running? }
@logger.debug("Plugin #{p.to_s} finished, waiting on the rest.",
:count => remaining.size,
:remaining => remaining.map(&:first))
end
end # while remaining.size > 0
end
@is_shutting_down = false
end
# Reload configuration of filters, etc.
def reload
@plugins_mutex.synchronize do
begin
@reloading = true
# Reload the config file
begin
config = read_config
reloaded_inputs, reloaded_filters, reloaded_outputs = parse_config(config)
rescue Exception => e
@logger.error("Aborting reload due to bad configuration", :exception => e)
return
end
new_inputs = reloaded_inputs - @inputs
new_filters = reloaded_filters - @filters
new_outputs = reloaded_outputs - @outputs
deleted_inputs = @inputs - reloaded_inputs
deleted_filters = @filters - reloaded_filters
deleted_outputs = @outputs - reloaded_outputs
# Handle shutdown of input and output plugins
obsolete_plugins = {}
[deleted_inputs].flatten.each do |p|
if @plugins.include? p
obsolete_plugins[p] = @plugins[p]
@plugins.delete(p)
else
@logger.warn("Couldn't find input plugin to stop", :plugin => p)
end
end
[deleted_outputs].flatten.each do |p|
if @plugins.include? p
obsolete_plugins[p] = @plugins[p]
@plugins.delete(p)
@output_queue.remove_queue(@output_plugin_queues[p])
else
@logger.warn("Couldn't find output plugin to stop", :plugin => p)
end
end
# Call reload on all existing plugins which are not being dropped
(@plugins.keys - obsolete_plugins.keys).each(&:reload)
(@filters - deleted_filters).each(&:reload)
# Also remove filters
deleted_filters.each {|f| obsolete_plugins[f] = nil}
if obsolete_plugins.size > 0
@logger.info("Stopping removed plugins:", :plugins => obsolete_plugins.keys)
shutdown_plugins(obsolete_plugins)
end
# require 'pry'; binding.pry()
# Start up filters
if new_filters.size > 0 || deleted_filters.size > 0
if new_filters.size > 0
@logger.info("Starting new filters", :plugins => new_filters)
new_filters.each do |f|
f.logger = @logger
@plugin_setup_mutex.synchronize { f.register }
end
end
@filters = reloaded_filters
@filterworkers.each_key do |filterworker|
filterworker.filters = @filters
end
end
if new_inputs.size > 0
@logger.info("Starting new inputs", :plugins => new_inputs)
new_inputs.each do |p|
start_input(p)
end
end
if new_outputs.size > 0
@logger.info("Starting new outputs", :plugins => new_outputs)
new_inputs.each do |p|
start_output(p)
end
end
# Wait for all inputs and outputs to be registered.
wait_count = new_outputs.size + new_inputs.size
while wait_count > 0 and @ready_queue.pop
wait_count -= 1
end
rescue Exception => e
@reloading = false
raise e
end
end
end
public
def register_signal_handlers
# TODO(sissel): This doesn't work well in jruby since ObjectSpace is disabled
# by default.
#Signal.trap("USR2") do
# TODO(sissel): Make this a function.
#counts = Hash.new { |h,k| h[k] = 0 }
#ObjectSpace.each_object do |obj|
#counts[obj.class] += 1
#end
#@logger.info("SIGUSR1 received. Dumping state")
#@logger.info("#{self.class.name} config")
#@logger.info([" Inputs:", @inputs])
#@logger.info([" Filters:", @filters])
##@logger.info([" Outputs:", @outputs])
#@logger.info("Dumping counts of objects by class")
#counts.sort { |a,b| a[1] <=> b[1] or a[0] <=> b[0] }.each do |key, value|
#@logger.info("Class: [#{value}] #{key}")
##end
#end # SIGUSR1
Signal.trap("INT") do
@logger.warn("SIGINT received, shutting down.")
shutdown
end
Signal.trap("HUP") do
@logger.warn("SIGHUP received, reloading.")
reload
end
Signal.trap("TERM") do
@logger.warn("SIGTERM received, shutting down.")
shutdown
end
end # def register_signal_handlers
private
def run_input(input, queue)
LogStash::Util::set_thread_name("input|#{input.to_s}")
input.logger = @logger
@plugin_setup_mutex.synchronize { input.register }
@logger.info("Input registered", :plugin => input)
@ready_queue << input
done = false
while !done
begin
input.run(queue)
done = true
input.finished
rescue => e
@logger.warn("Input thread exception", :plugin => input,
:exception => e, :backtrace => e.backtrace)
@logger.error("Restarting input due to exception", :plugin => input)
sleep(1)
retry # This jumps to the top of the 'begin'
end
end
# The following used to be a warning, but it confused so many users that
# I disabled it until something better can be provided.
#@logger.info("Input #{input.to_s} shutting down")
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::Inputs::Base, queue) unless @reloading
end # def run_input
# Run a filter thread
public
def run_filter(filterworker, index, output_queue)
LogStash::Util::set_thread_name("filter|worker|#{index}")
filterworker.run
@logger.warn("Filter worker shutting down", :index => index)
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::FilterWorker, output_queue) unless @reloading
end # def run_filter
# TODO(sissel): Factor this into an 'outputworker'
def run_output(output, queue)
LogStash::Util::set_thread_name("output|#{output.to_s}")
output.logger = @logger
@plugin_setup_mutex.synchronize { output.register }
@logger.info("Output registered", :plugin => output)
@ready_queue << output
# TODO(sissel): We need a 'reset' or 'restart' method to call on errors
begin
while event = queue.pop do
@logger.debug("Sending event", :target => output)
output.handle(event)
break if output.finished?
end
rescue Exception => e
@logger.warn("Output thread exception", :plugin => output,
:exception => e, :backtrace => e.backtrace)
# TODO(sissel): should we abort after too many failures?
sleep(1)
retry
end # begin/rescue
@logger.warn("Output shutting down", :plugin => output)
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::Outputs::Base) unless @reloading
end # def run_output
def shutdown_if_none_running(pluginclass, queue=nil)
# Send shutdown signal if all inputs are done.
@plugins_mutex.synchronize do
# Look for plugins of type 'pluginclass' (or a subclass)
# If none are running, start the shutdown sequence and
# send the 'shutdown' event down the pipeline.
remaining = @plugins.count do |plugin, thread|
plugin.is_a?(pluginclass) and plugin.running? and thread.alive?
end
@logger.debug("Plugins still running", :type => pluginclass,
:remaining => remaining)
if remaining == 0
@logger.warn("All #{pluginclass} finished. Shutting down.")
# Send 'shutdown' event to other running plugins
queue << LogStash::SHUTDOWN unless queue.nil?
end # if remaining == 0
end # @plugins_mutex.synchronize
end # def shutdown_if_none_running
end # class LogStash::Agent
if __FILE__ == $0
$: << "net"
agent = LogStash::Agent.new
agent.argv = ARGV
agent.run
end
- Make '-vvv' no longer set $DEBUG (ruby global). Setting $DEBUG = true
causes ruby to log all exceptions, caught or otherwise (or something),
and this was tripping up users causing them to report false bugs.
To get the $DEBUG behavior again, you can now set RUBY_DEBUG=1 in
your environment.
require "logstash/config/file"
require "logstash/filterworker"
require "logstash/logging"
require "logstash/sized_queue"
require "logstash/multiqueue"
require "logstash/namespace"
require "logstash/program"
require "logstash/threadwatchdog"
require "logstash/util"
require "optparse"
require "thread"
require "uri"
# TODO(sissel): only enable this if we are in debug mode.
# JRuby.objectspace=true
# Collect logs, ship them out.
class LogStash::Agent
include LogStash::Program
attr_reader :config
attr_reader :inputs
attr_reader :outputs
attr_reader :filters
attr_accessor :logger
# flags
attr_reader :config_path
attr_reader :logfile
attr_reader :verbose
public
def initialize
log_to(STDERR)
@config_path = nil
@config_string = nil
@logfile = nil
# flag/config defaults
@verbose = 0
@filterworker_count = 1
@plugins = {}
@plugins_mutex = Mutex.new
@plugin_setup_mutex = Mutex.new
@outputs = []
@inputs = []
@filters = []
@plugin_paths = []
@reloading = false
# Add logstash's plugin path (plugin paths must contain inputs, outputs, filters)
@plugin_paths << File.dirname(__FILE__)
# TODO(sissel): Other default plugin paths?
Thread::abort_on_exception = true
@is_shutting_down = false
end # def initialize
public
def log_to(target)
@logger = LogStash::Logger.new(target)
end # def log_to
private
def options(opts)
opts.on("-f CONFIGPATH", "--config CONFIGPATH",
"Load the logstash config from a specific file or directory. " \
"If a direcory is given instead of a file, all files in that " \
"directory will be concatonated in lexicographical order and " \
"then parsed as a single config file.") do |arg|
@config_path = arg
end # -f / --config
opts.on("-e CONFIGSTRING",
"Use the given string as the configuration data. Same syntax as " \
"the config file. If not input is specified, " \
"'stdin { type => stdin }' is default. If no output is " \
"specified, 'stdout { debug => true }}' is default.") do |arg|
@config_string = arg
end # -e
opts.on("-w COUNT", "--filterworkers COUNT", Integer,
"Run COUNT filter workers (default: 1)") do |arg|
@filterworker_count = arg
if @filterworker_count <= 0
raise ArgumentError, "filter worker count must be > 0"
end
end # -w
opts.on("-l", "--log FILE", "Log to a given path. Default is stdout.") do |path|
@logfile = path
end
opts.on("-v", "Increase verbosity") do
@verbose += 1
end
opts.on("-V", "--version", "Show the version of logstash") do
require "logstash/version"
puts "logstash #{LOGSTASH_VERSION}"
exit(0)
end
opts.on("-p PLUGIN_PATH", "--pluginpath PLUGIN_PATH",
"A colon-delimited path to find plugins in.") do |path|
path.split(":").each do |p|
@plugin_paths << p unless @plugin_paths.include?(p)
end
end
end # def options
# Parse options.
private
def parse_options(args)
@opts = OptionParser.new
# Step one is to add agent flags.
options(@opts)
# TODO(sissel): Check for plugin_path flags, add them to @plugin_paths.
args.each_with_index do |arg, index|
next unless arg =~ /^(?:-p|--pluginpath)(?:=(.*))?$/
path = $1
if path.nil?
path = args[index + 1]
end
@plugin_paths += path.split(":")
end # args.each
# At this point, we should load any plugin-specific flags.
# These are 'unknown' flags that begin --<plugin>-flag
# Put any plugin paths into the ruby library path for requiring later.
@plugin_paths.each do |p|
@logger.debug("Adding to ruby load path", :path => p)
$:.unshift p
end
# TODO(sissel): Go through all inputs, filters, and outputs to get the flags.
# Add plugin flags to @opts
# Load any plugins that we have flags for.
# TODO(sissel): The --<plugin> flag support currently will load
# any matching plugins input, output, or filter. This means, for example,
# that the 'amqp' input *and* output plugin will be loaded if you pass
# --amqp-foo flag. This might cause confusion, but it seems reasonable for
# now that any same-named component will have the same flags.
plugins = []
args.each do |arg|
# skip things that don't look like plugin flags
next unless arg =~ /^--[A-z0-9]+-/
name = arg.split("-")[2] # pull the plugin name out
# Try to load any plugin by that name
%w{inputs outputs filters}.each do |component|
@plugin_paths.each do |path|
plugin = File.join(path, component, name) + ".rb"
@logger.debug("Plugin flag found; trying to load it",
:flag => arg, :plugin => plugin)
if File.file?(plugin)
@logger.info("Loading plugin", :plugin => plugin)
require plugin
[LogStash::Inputs, LogStash::Filters, LogStash::Outputs].each do |c|
# If we get flag --foo-bar, check for LogStash::Inputs::Foo
# and add any options to our option parser.
klass_name = name.capitalize
if c.const_defined?(klass_name)
@logger.debug("Found plugin class", :class => "#{c}::#{klass_name})")
klass = c.const_get(klass_name)
# See LogStash::Config::Mixin::DSL#options
klass.options(@opts)
plugins << klass
end # c.const_defined?
end # each component type (input/filter/outputs)
end # if File.file?(plugin)
end # @plugin_paths.each
end # %{inputs outputs filters}.each
#if !found
#@logger.fatal("Flag #{arg.inspect} requires plugin #{name}, but no plugin found.")
#return false
#end
end # @remaining_args.each
begin
remainder = @opts.parse(args)
rescue OptionParser::InvalidOption => e
@logger.info("Invalid option", :exception => e)
raise e
end
return remainder
end # def parse_options
private
def configure
if @config_path && @config_string
@logger.fatal("Can't use -f and -e at the same time")
raise "Configuration problem"
elsif (@config_path.nil? || @config_path.empty?) && @config_string.nil?
@logger.fatal("No config file given. (missing -f or --config flag?)")
@logger.fatal(@opts.help)
raise "Configuration problem"
end
#if @config_path and !File.exist?(@config_path)
if @config_path and Dir.glob(@config_path).length == 0
@logger.fatal("Config file does not exist.", :path => @config_path)
raise "Configuration problem"
end
if @logfile
logfile = File.open(@logfile, "a")
STDOUT.reopen(logfile)
STDERR.reopen(logfile)
end
if ENV.include?("RUBY_DEBUG")
$DEBUG = true
end
if @verbose >= 2 # logstash debug logs
@logger.level = :debug
elsif @verbose == 1 # logstash info logs
@logger.level = :info
else # Default log level
@logger.level = :warn
end
end # def configure
def read_config
if @config_path
# Support directory of config files.
# https://logstash.jira.com/browse/LOGSTASH-106
if File.directory?(@config_path)
@logger.debug("Config path is a directory, scanning files",
:path => @config_path)
paths = Dir.glob(File.join(@config_path, "*")).sort
else
# Get a list of files matching a glob. If the user specified a single
# file, then this will only have one match and we are still happy.
paths = Dir.glob(@config_path).sort
end
concatconfig = []
paths.each do |path|
concatconfig << File.new(path).read
end
config = LogStash::Config::File.new(nil, concatconfig.join("\n"))
else # @config_string
# Given a config string by the user (via the '-e' flag)
config = LogStash::Config::File.new(nil, @config_string)
end
config.logger = @logger
config
end
# Parses a config and returns [inputs, filters, outputs]
def parse_config(config)
inputs = []
filters = []
outputs = []
config.parse do |plugin|
# 'plugin' is a has containing:
# :type => the base class of the plugin (LogStash::Inputs::Base, etc)
# :plugin => the class of the plugin (LogStash::Inputs::File, etc)
# :parameters => hash of key-value parameters from the config.
type = plugin[:type].config_name # "input" or "filter" etc...
klass = plugin[:plugin]
# Create a new instance of a plugin, called like:
# -> LogStash::Inputs::File.new( params )
instance = klass.new(plugin[:parameters])
instance.logger = @logger
case type
when "input"
inputs << instance
when "filter"
filters << instance
when "output"
outputs << instance
else
msg = "Unknown config type '#{type}'"
@logger.error(msg)
raise msg
end # case type
end # config.parse
return inputs, filters, outputs
end
public
def run(args, &block)
@logger.info("Register signal handlers")
register_signal_handlers
@logger.info("Parse options ")
remaining = parse_options(args)
if remaining == false
raise "Option parsing failed. See error log."
end
@logger.info("Configure")
configure
# Load the config file
@logger.info("Read config")
config = read_config
@logger.info("Start thread")
@thread = Thread.new do
LogStash::Util::set_thread_name(self.class.name)
run_with_config(config, &block)
end
return remaining
end # def run
public
def wait
@thread.join
return 0
end # def wait
private
def start_input(input)
@logger.debug("Starting input", :plugin => input)
t = 0
# inputs should write directly to output queue if there are no filters.
input_target = @filters.length > 0 ? @filter_queue : @output_queue
# check to see if input supports multiple threads
if input.threadable
@logger.debug("Threadable input", :plugin => input)
# start up extra threads if need be
(input.threads-1).times do
input_thread = input.clone
@logger.debug("Starting thread", :plugin => input, :thread => (t+=1))
@plugins[input_thread] = Thread.new(input_thread, input_target) do |*args|
run_input(*args)
end
end
end
@logger.debug("Starting thread", :plugin => input, :thread => (t+=1))
@plugins[input] = Thread.new(input, input_target) do |*args|
run_input(*args)
end
end
private
def start_output(output)
@logger.debug("Starting output", :plugin => output)
queue = LogStash::SizedQueue.new(10 * @filterworker_count)
queue.logger = @logger
@output_queue.add_queue(queue)
@output_plugin_queues[output] = queue
@plugins[output] = Thread.new(output, queue) do |*args|
run_output(*args)
end
end
public
def run_with_config(config)
@plugins_mutex.synchronize do
@inputs, @filters, @outputs = parse_config(config)
# If we are given a config string (run usually with 'agent -e "some config string"')
# then set up some defaults.
if @config_string
require "logstash/inputs/stdin"
require "logstash/outputs/stdout"
# set defaults if necessary
# All filters default to 'stdin' type
@filters.each do |filter|
filter.type = "stdin" if filter.type.nil?
end
# If no inputs are specified, use stdin by default.
@inputs = [LogStash::Inputs::Stdin.new("type" => [ "stdin" ])] if @inputs.length == 0
# If no outputs are specified, use stdout in debug mode.
@outputs = [LogStash::Outputs::Stdout.new("debug" => [ "true" ])] if @outputs.length == 0
end
if @inputs.length == 0 or @outputs.length == 0
raise "Must have both inputs and outputs configured."
end
# NOTE(petef) we should have config params for queue size
@filter_queue = LogStash::SizedQueue.new(10 * @filterworker_count)
@filter_queue.logger = @logger
@output_queue = LogStash::MultiQueue.new
@output_queue.logger = @logger
@ready_queue = Queue.new
# Start inputs
@inputs.each do |input|
start_input(input)
end # @inputs.each
# Create N filter-worker threads
@filterworkers = {}
if @filters.length > 0
@filters.each do |filter|
filter.logger = @logger
@plugin_setup_mutex.synchronize do
filter.register
end
end
if @filterworker_count > 1
@filters.each do |filter|
if ! filter.threadsafe?
raise "fail"
end
end
end
@filterworker_count.times do |n|
# TODO(sissel): facter this out into a 'filterworker' that accepts
# 'shutdown'
# Start a filter worker
filterworker = LogStash::FilterWorker.new(@filters, @filter_queue,
@output_queue)
filterworker.logger = @logger
thread = Thread.new(filterworker, n, @output_queue) do |*args|
run_filter(*args)
end
@plugins[filterworker] = thread
@filterworkers[filterworker] = thread
end # N.times
end # if @filters.length > 0
# A thread to supervise filter workers
watchdog = LogStash::ThreadWatchdog.new(@filterworkers.values)
watchdog.logger = logger
Thread.new do
watchdog.watch
end
# Create output threads
@output_plugin_queues = {}
@outputs.each do |output|
start_output(output)
end # @outputs.each
# Wait for all inputs and outputs to be registered.
wait_count = outputs.size + inputs.size
while wait_count > 0 and @ready_queue.pop
wait_count -= 1
end
@logger.info("All plugins are started and registered.")
end # synchronize
# yield to a block in case someone's waiting for us to be done setting up
# like tests, etc.
yield if block_given?
while sleep(2)
if @plugins.values.count { |p| p.alive? } == 0
@logger.warn("no plugins running, shutting down")
shutdown
end
@logger.debug("heartbeat")
end
end # def run_with_config
public
def stop
# TODO(petef): Stop inputs, fluch outputs, wait for finish,
# then stop the event loop
end # def stop
# Shutdown the agent.
protected
def shutdown
@logger.info("Starting shutdown sequence")
shutdown_plugins(@plugins)
# When we get here, all inputs have finished, all messages are done
@logger.info("Shutdown complete")
exit(0)
end # def shutdown
def shutdown_plugins(plugins)
return if @is_shutting_down
@is_shutting_down = true
Thread.new do
LogStash::Util::set_thread_name("logstash shutdown process")
# TODO(sissel): Make this a flag
force_shutdown_time = Time.now + 10
finished_queue = Queue.new
# Tell everything to shutdown.
@logger.debug("Plugins to shutdown", :plugins => plugins.keys.collect(&:to_s))
plugins.each do |p, thread|
@logger.debug("Sending shutdown to: #{p.to_s}", :plugin => p)
p.shutdown(finished_queue)
end
# Now wait until the queues we were given are empty.
#@logger.debug(@plugins)
remaining = plugins.select { |p, thread| p.running? }
while remaining.size > 0
if (Time.now > force_shutdown_time)
@logger.warn("Time to quit, even if some plugins aren't finished yet.")
@logger.warn("Stuck plugins?", :remaining => remaining.map(&:first))
break
end
@logger.debug("Waiting for plugins to finish.")
plugin = finished_queue.pop(non_block=true) rescue nil
if plugin.nil?
sleep(1)
else
remaining = plugins.select { |p, thread| plugin.running? }
@logger.debug("Plugin #{p.to_s} finished, waiting on the rest.",
:count => remaining.size,
:remaining => remaining.map(&:first))
end
end # while remaining.size > 0
end
@is_shutting_down = false
end
# Reload configuration of filters, etc.
def reload
@plugins_mutex.synchronize do
begin
@reloading = true
# Reload the config file
begin
config = read_config
reloaded_inputs, reloaded_filters, reloaded_outputs = parse_config(config)
rescue Exception => e
@logger.error("Aborting reload due to bad configuration", :exception => e)
return
end
new_inputs = reloaded_inputs - @inputs
new_filters = reloaded_filters - @filters
new_outputs = reloaded_outputs - @outputs
deleted_inputs = @inputs - reloaded_inputs
deleted_filters = @filters - reloaded_filters
deleted_outputs = @outputs - reloaded_outputs
# Handle shutdown of input and output plugins
obsolete_plugins = {}
[deleted_inputs].flatten.each do |p|
if @plugins.include? p
obsolete_plugins[p] = @plugins[p]
@plugins.delete(p)
else
@logger.warn("Couldn't find input plugin to stop", :plugin => p)
end
end
[deleted_outputs].flatten.each do |p|
if @plugins.include? p
obsolete_plugins[p] = @plugins[p]
@plugins.delete(p)
@output_queue.remove_queue(@output_plugin_queues[p])
else
@logger.warn("Couldn't find output plugin to stop", :plugin => p)
end
end
# Call reload on all existing plugins which are not being dropped
(@plugins.keys - obsolete_plugins.keys).each(&:reload)
(@filters - deleted_filters).each(&:reload)
# Also remove filters
deleted_filters.each {|f| obsolete_plugins[f] = nil}
if obsolete_plugins.size > 0
@logger.info("Stopping removed plugins:", :plugins => obsolete_plugins.keys)
shutdown_plugins(obsolete_plugins)
end
# require 'pry'; binding.pry()
# Start up filters
if new_filters.size > 0 || deleted_filters.size > 0
if new_filters.size > 0
@logger.info("Starting new filters", :plugins => new_filters)
new_filters.each do |f|
f.logger = @logger
@plugin_setup_mutex.synchronize { f.register }
end
end
@filters = reloaded_filters
@filterworkers.each_key do |filterworker|
filterworker.filters = @filters
end
end
if new_inputs.size > 0
@logger.info("Starting new inputs", :plugins => new_inputs)
new_inputs.each do |p|
start_input(p)
end
end
if new_outputs.size > 0
@logger.info("Starting new outputs", :plugins => new_outputs)
new_inputs.each do |p|
start_output(p)
end
end
# Wait for all inputs and outputs to be registered.
wait_count = new_outputs.size + new_inputs.size
while wait_count > 0 and @ready_queue.pop
wait_count -= 1
end
rescue Exception => e
@reloading = false
raise e
end
end
end
public
def register_signal_handlers
# TODO(sissel): This doesn't work well in jruby since ObjectSpace is disabled
# by default.
#Signal.trap("USR2") do
# TODO(sissel): Make this a function.
#counts = Hash.new { |h,k| h[k] = 0 }
#ObjectSpace.each_object do |obj|
#counts[obj.class] += 1
#end
#@logger.info("SIGUSR1 received. Dumping state")
#@logger.info("#{self.class.name} config")
#@logger.info([" Inputs:", @inputs])
#@logger.info([" Filters:", @filters])
##@logger.info([" Outputs:", @outputs])
#@logger.info("Dumping counts of objects by class")
#counts.sort { |a,b| a[1] <=> b[1] or a[0] <=> b[0] }.each do |key, value|
#@logger.info("Class: [#{value}] #{key}")
##end
#end # SIGUSR1
Signal.trap("INT") do
@logger.warn("SIGINT received, shutting down.")
shutdown
end
Signal.trap("HUP") do
@logger.warn("SIGHUP received, reloading.")
reload
end
Signal.trap("TERM") do
@logger.warn("SIGTERM received, shutting down.")
shutdown
end
end # def register_signal_handlers
private
def run_input(input, queue)
LogStash::Util::set_thread_name("input|#{input.to_s}")
input.logger = @logger
@plugin_setup_mutex.synchronize { input.register }
@logger.info("Input registered", :plugin => input)
@ready_queue << input
done = false
while !done
begin
input.run(queue)
done = true
input.finished
rescue => e
@logger.warn("Input thread exception", :plugin => input,
:exception => e, :backtrace => e.backtrace)
@logger.error("Restarting input due to exception", :plugin => input)
sleep(1)
retry # This jumps to the top of the 'begin'
end
end
# The following used to be a warning, but it confused so many users that
# I disabled it until something better can be provided.
#@logger.info("Input #{input.to_s} shutting down")
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::Inputs::Base, queue) unless @reloading
end # def run_input
# Run a filter thread
public
def run_filter(filterworker, index, output_queue)
LogStash::Util::set_thread_name("filter|worker|#{index}")
filterworker.run
@logger.warn("Filter worker shutting down", :index => index)
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::FilterWorker, output_queue) unless @reloading
end # def run_filter
# TODO(sissel): Factor this into an 'outputworker'
def run_output(output, queue)
LogStash::Util::set_thread_name("output|#{output.to_s}")
output.logger = @logger
@plugin_setup_mutex.synchronize { output.register }
@logger.info("Output registered", :plugin => output)
@ready_queue << output
# TODO(sissel): We need a 'reset' or 'restart' method to call on errors
begin
while event = queue.pop do
@logger.debug("Sending event", :target => output)
output.handle(event)
break if output.finished?
end
rescue Exception => e
@logger.warn("Output thread exception", :plugin => output,
:exception => e, :backtrace => e.backtrace)
# TODO(sissel): should we abort after too many failures?
sleep(1)
retry
end # begin/rescue
@logger.warn("Output shutting down", :plugin => output)
# If we get here, the plugin finished, check if we need to shutdown.
shutdown_if_none_running(LogStash::Outputs::Base) unless @reloading
end # def run_output
def shutdown_if_none_running(pluginclass, queue=nil)
# Send shutdown signal if all inputs are done.
@plugins_mutex.synchronize do
# Look for plugins of type 'pluginclass' (or a subclass)
# If none are running, start the shutdown sequence and
# send the 'shutdown' event down the pipeline.
remaining = @plugins.count do |plugin, thread|
plugin.is_a?(pluginclass) and plugin.running? and thread.alive?
end
@logger.debug("Plugins still running", :type => pluginclass,
:remaining => remaining)
if remaining == 0
@logger.warn("All #{pluginclass} finished. Shutting down.")
# Send 'shutdown' event to other running plugins
queue << LogStash::SHUTDOWN unless queue.nil?
end # if remaining == 0
end # @plugins_mutex.synchronize
end # def shutdown_if_none_running
end # class LogStash::Agent
if __FILE__ == $0
$: << "net"
agent = LogStash::Agent.new
agent.argv = ARGV
agent.run
end
|
module Lowmac
VERSION = "1.2.5"
end
v1.2.6
module Lowmac
VERSION = "1.2.6"
end
|
module MaZMQ
class Handler < EM::Connection
#def initialize(socket)
# @socket = socket
#end
def notify_readable
msg = @socket.recv_string
return unless msg
puts msg
end
def notify_writable
puts 'writable'
end
end
end
Debugging EM watch
module MaZMQ
class Handler < EM::Connection
def initialize(socket)
@socket = socket
end
def notify_readable
msg = @socket.recv_string
return unless msg
puts msg
end
def notify_writable
puts 'writable'
end
end
end
|
require 'json'
require 'net/http'
module MediaWiki
class Butt
# Creates a new instance of MediaWiki::Butt
#
# ==== Attributes
#
# * +url+ - The FULL wiki URL. api.php can be omitted, but it will make harsh assumptions about your wiki configuration.
# * +use_ssl+ - Whether or not to use SSL. Will default to true.
#
# ==== Examples
#
# The example below shows an ideal usage of the method.
# => butt = MediaWiki::Butt.new("http://ftb.gamepedia.com/api.php")
#
# The example below shows a less than idea, but still functional, usage of the method. It is less than ideal because it has to assume that your API page is at /api.php, but it could easily be at /w/api.php, or even /wiki/api.php. It also does not use a secure connection.
# => butt = MediaWiki::Butt.new("http://ftb.gamepedia.com", false)
def initialize(url, use_ssl = true)
if url =~ /api.php$/
@url = url
else
@url = "#{url}/api.php"
end
@ssl = use_ssl
@logged_in = false
@tokens = {}
puts @url
end
# Performs a generic HTTP POST action and provides the response. This method generally should not be used by the user, unless there is not a method provided by the Butt developers for a particular action.
#
# ==== Attributes
#
# * +params+ - A basic hash containing MediaWiki API parameters. Please see mediawiki.org/wiki/API for more information.
# * +autoparse+ - Whether or not to provide a parsed version of the response's JSON. Will default to true.
#
# ==== Examples
#
# => login = butt.post({action: 'login', lgname: username, lgpassword: password, format: 'json'})
def post(params, autoparse = true)
uri = URI.parse(@url)
request = Net::HTTP::Post.new(uri)
request.set_form_data(params)
response = Net::HTTP.start(uri.hostname, uri.port) do |http|
http.request(request)
end
if response.is_a? Net::HTTPSuccess
if autoparse == true
return JSON.parse(response.body)
else
return response
end
else
return false
end
end
# Logs the user in to the wiki. This is generally required for editing, or getting restricted data.
#
# ==== Attributes
# * +username+ - The desired login handle
# * +password+ - The password of that user
#
# ==== Examples
# => butt.login("MyUsername", "My5up3r53cur3P@55w0rd")
def login(username, password)
params = {
action: 'login',
lgname: username,
lgpassword: password,
format: 'json'
}
result = post(params)
if result["login"]["result"] == "Success"
@logged_in = true
@tokens.clear
elsif result["login"]["result"] == "NeedToken" && result["login"]["token"] != nil
token_params = {
action: 'login',
lgname: username,
lgpassword: password,
lgtoken: result["login"]["token"],
format: 'json'
}
# There is no need to autoparse this, because we don't do anything with it.
post(token_params, false)
end
end
# Logs the current user out
#
# ==== Examples
# => butt.login("MyUsername", "My5up3r53cur3P@55w0rd")
# => # do stuff
# => butt.logout
def logout
if @logged_in = true
params = {
action: 'logout'
}
post(params)
@logged_in = false
@tokens.clear
end
end
# Creates an account with the given parameters
#
# ==== Attributes
# *+username+ - The desired username
# *+usemail+ - Whether to use a random password and send it via email
# *+email+ - The desired email address.
# *+password+ - The desired password. Required only if usemail = false
# *+reason+ - The reason for creating the account, shown in the account creation log. Defaults to nil.
# *+language+ - The language code to set as default for the account being created. Defaults to 'en' or English. Use the language code, not the name.
#
# ==== Examples
#
# An example of using mailpassword can be seen below.
# => butt.create_account("MyUser", true, "MyEmailAddress@MailMan.com", nil, "Quiero un nuevo acuenta con correocontraseña", "es")
#
# An example of not using mailpassword can be seen below.
# => butt.create_account("MyUser", false, "MyEmailAddress@MailMain.com", "password", "Quiero un nuevo acuenta sin embargo correocontraseña", "es")
def create_account(username, usemail = false, email, *password = nil, *reason = nil, *language = 'en')
if usemail == true
params = {
name: username,
email: email,
mailpassword: 'value',
reason: reason,
language: language,
token:
}
else
params = {
name: username,
email: email
password: password,
reason: reason,
language: language,
token:
}
end
result = post(params)
if result["createaccount"]["result"] == "Success"
@tokens.clear
elsif result["createaccount"]["result"] == "NeedToken"
if usemail == true
params = {
name: username,
email: email,
mailpassword: 'value',
reason: reason,
language: language,
token: result["createaccount"]["token"]
}
else
params = {
name: username,
password: password,
reason: reason,
language: language,
token: result["createaccount"]["token"]
}
end
end
end
end
end
Split up create_account for email stuff.
require 'json'
require 'net/http'
module MediaWiki
class Butt
# Creates a new instance of MediaWiki::Butt
#
# ==== Attributes
#
# * +url+ - The FULL wiki URL. api.php can be omitted, but it will make harsh assumptions about your wiki configuration.
# * +use_ssl+ - Whether or not to use SSL. Will default to true.
#
# ==== Examples
#
# The example below shows an ideal usage of the method.
# => butt = MediaWiki::Butt.new("http://ftb.gamepedia.com/api.php")
#
# The example below shows a less than idea, but still functional, usage of the method. It is less than ideal because it has to assume that your API page is at /api.php, but it could easily be at /w/api.php, or even /wiki/api.php. It also does not use a secure connection.
# => butt = MediaWiki::Butt.new("http://ftb.gamepedia.com", false)
def initialize(url, use_ssl = true)
if url =~ /api.php$/
@url = url
else
@url = "#{url}/api.php"
end
@ssl = use_ssl
@logged_in = false
@tokens = {}
end
# Performs a generic HTTP POST action and provides the response. This method generally should not be used by the user, unless there is not a method provided by the Butt developers for a particular action.
#
# ==== Attributes
#
# * +params+ - A basic hash containing MediaWiki API parameters. Please see mediawiki.org/wiki/API for more information.
# * +autoparse+ - Whether or not to provide a parsed version of the response's JSON. Will default to true.
#
# ==== Examples
#
# => login = butt.post({action: 'login', lgname: username, lgpassword: password, format: 'json'})
def post(params, autoparse = true)
uri = URI.parse(@url)
request = Net::HTTP::Post.new(uri)
request.set_form_data(params)
response = Net::HTTP.start(uri.hostname, uri.port) do |http|
http.request(request)
end
if response.is_a? Net::HTTPSuccess
if autoparse == true
return JSON.parse(response.body)
else
return response
end
else
return false
end
end
# Logs the user in to the wiki. This is generally required for editing, or getting restricted data.
#
# ==== Attributes
#
# * +username+ - The desired login handle
# * +password+ - The password of that user
#
# ==== Examples
#
# => butt.login("MyUsername", "My5up3r53cur3P@55w0rd")
def login(username, password)
params = {
action: 'login',
lgname: username,
lgpassword: password,
format: 'json'
}
result = post(params)
if result["login"]["result"] == "Success"
@logged_in = true
@tokens.clear
elsif result["login"]["result"] == "NeedToken" && result["login"]["token"] != nil
token_params = {
action: 'login',
lgname: username,
lgpassword: password,
lgtoken: result["login"]["token"],
format: 'json'
}
# There is no need to autoparse this, because we don't do anything with it.
post(token_params, false)
end
end
# Logs the current user out
#
# ==== Examples
#
# => butt.login("MyUsername", "My5up3r53cur3P@55w0rd")
# => # do stuff
# => butt.logout
def logout
if @logged_in == true
params = {
action: 'logout'
}
post(params)
@logged_in = false
@tokens.clear
end
end
# Creates an account using the standard procedure.
#
# ==== Attributes
#
# *+username+ - The desired username
# *+password+ - The desired password.
# *+language+ - The language code to set as default for the account being created. Defaults to 'en' or English. Use the language code, not the name.
# *+reason+ - The reason for creating the account, shown in the account creation log. Optional.
#
# ==== Examples
#
# => butt.create_account("MyUser", "password", "es", "MyEmailAddress@MailMain.com", "Quiero un nuevo acuenta sin embargo correocontraseña")
def create_account(username, password, language = 'en', *reason)
params = {
name: username,
password: password,
reason: reason,
language: language,
token: ''
}
result = post(params)
if result["createaccount"]["result"] == "Success"
@tokens.clear
elsif result["createaccount"]["result"] == "NeedToken"
params = {
name: username,
password: password,
reason: reason,
language: language,
token: result["createaccount"]["token"]
}
end
end
# Creates an account using the random-password-sent-by-email procedure.
#
# ==== Attributes
#
# *+username+ - The desired username
# *+email+ - The desired email address.
# *+reason+ - The reason for creating the account, shown in the account creation log. Optional.
# *+language+ - The language code to set as default for the account being created. Defaults to 'en' or English. Use the language code, not the name.
#
# ==== Examples
#
# => butt.create_account_email("MyUser", "MyEmailAddress@Whatever.com", "es", "Quiero una nueva acuenta porque quiero a comer caca")
def create_account_email(username, email, language = 'en', *reason)
params = {
name: username,
email: email,
mailpassword: 'value',
reason: reason,
language: language,
token: ''
}
result = post(params)
if result["createaccount"]["result"] == "Success"
@tokens.clear
elsif result["createaccount"]["result"] == "NeedToken"
params = {
name: username,
email: email,
mailpassword: 'value',
reason: reason,
language: language,
token: result["createaccount"]["token"]
}
end
end
end
end
|
##
# Request class for midori
# @attr [String] ip client ip address
# @attr [Integer] port client port
# @attr [String] protocol protocol version of HTTP request
# @attr [Symbol] method HTTP method
# @attr [String] path request path
# @attr [Hash] query_params parameter parsed from query string
# @attr [String | nil] query_string request query string
# @attr [Hash] header request header
# @attr [String] body request body
# @attr [Hash] cookie cookie hash coming from request
# @attr [Boolean] parsed whether the request header parsed
# @attr [Boolean] body_parsed whether the request body parsed
# @attr [Hash] params params in the url
class Midori::Request
attr_accessor :ip, :port,
:protocol, :method, :path, :query_params, :query_string,
:header, :body, :parsed, :body_parsed, :params, :cookie
# Init Request
def initialize
@header = {}
@parsed = false
@body_parsed = false
@is_websocket = false
@is_eventsource = false
@parser = Http::Parser.new
@params = {}
@query_params = Hash.new(Array.new)
@cookie = {}
@body = ''
@parser.on_headers_complete = proc do
@protocol = @parser.http_version
@method = @parser.http_method
@path = @parser.request_url
@header = @parser.headers
@query_string = @path.match(/\?(.*?)$/)
unless @query_string.nil?
@query_string = @query_string[1]
@query_params = CGI::parse(@query_string)
end
@cookie = CGI::Cookie.parse(@header['Cookie']) unless @header['Cookie'].nil?
@path.gsub!(/\?(.*?)$/, '')
@method = @method.to_sym
# Detect client real IP by RFC 7239
@ip = @header['X-Real-IP'] unless @header['X-Real-IP'].nil?
@parsed = true
:stop
end
end
# Init an request with String data
# @param [String] data
# @return [nil] nil
def parse(data)
# Call parser if header not parsed
if @parsed
@body += data
else
offset = @parser << data
@body += data[offset..-1] if @parsed
end
# Set body parsed if body reaches content length
if @parsed && (@header['Content-Length'].to_i || 0) <= @body.bytesize
@body_parsed = true
pre_proceed
end
nil
end
# Preproceed the request after parsed
# @return [nil] nil
def pre_proceed
# Deal with WebSocket
if @header['Upgrade'] == 'websocket' && @header['Connection'] == 'Upgrade'
@method = :WEBSOCKET
@is_websocket = true
end
# Deal with EventSource
if @header['Accept'] == 'text/event-stream'
@method = :EVENTSOURCE
@is_eventsource = true
end
@method = @method.to_sym
nil
end
# Syntactic sugar for whether a request header is parsed
# @return [Boolean] parsed or not
def parsed?
@parsed
end
# Syntactic sugar for whether a request body is parsed
# @return [Boolean] parsed or not
def body_parsed?
@body_parsed
end
# Syntactic sugar for whether a request is a websocket request
# @return [Boolean] websocket or not
def websocket?
@is_websocket
end
# Syntactic sugar for whether a request is an eventsource request
# @return [Boolean] eventsource or not
def eventsource?
@is_eventsource
end
end
Use remote_ip to detect client’s real IP
##
# Request class for midori
# @attr [String] ip client ip address
# @attr [Integer] port client port
# @attr [String] protocol protocol version of HTTP request
# @attr [Symbol] method HTTP method
# @attr [String] path request path
# @attr [Hash] query_params parameter parsed from query string
# @attr [String | nil] query_string request query string
# @attr [Hash] header request header
# @attr [String] body request body
# @attr [Hash] cookie cookie hash coming from request
# @attr [Boolean] parsed whether the request header parsed
# @attr [Boolean] body_parsed whether the request body parsed
# @attr [Hash] params params in the url
class Midori::Request
attr_accessor :ip, :port, :remote_ip,
:protocol, :method, :path, :query_params, :query_string,
:header, :body, :parsed, :body_parsed, :params, :cookie
# Init Request
def initialize
@header = {}
@parsed = false
@body_parsed = false
@is_websocket = false
@is_eventsource = false
@parser = Http::Parser.new
@params = {}
@query_params = Hash.new(Array.new)
@cookie = {}
@body = ''
@parser.on_headers_complete = proc do
@protocol = @parser.http_version
@method = @parser.http_method
@path = @parser.request_url
@header = @parser.headers
@remote_ip = parse_ip || @ip # Detect client real IP with RFC 7239
@query_string = @path.match(/\?(.*?)$/)
unless @query_string.nil?
@query_string = @query_string[1]
@query_params = CGI::parse(@query_string)
end
@cookie = CGI::Cookie.parse(@header['Cookie']) unless @header['Cookie'].nil?
@path.gsub!(/\?(.*?)$/, '')
@method = @method.to_sym
@parsed = true
:stop
end
end
# Init an request with String data
# @param [String] data
# @return [nil] nil
def parse(data)
# Call parser if header not parsed
if @parsed
@body += data
else
offset = @parser << data
@body += data[offset..-1] if @parsed
end
# Set body parsed if body reaches content length
if @parsed && (@header['Content-Length'].to_i || 0) <= @body.bytesize
@body_parsed = true
pre_proceed
end
nil
end
def parse_ip
client_ip = @header['X-Real-IP']
return nil if client_ip.nil?
forwarded_ips = @header['X-Forwarded-For'].split(', ')
# If forwarded_ips doesn't include the client_ip, it might be an
# ip spoofing attempt, so we ignore X-Real-IP
return client_ip if forwarded_ips.include?(client_ip)
nil
end
# Preproceed the request after parsed
# @return [nil] nil
def pre_proceed
# Deal with WebSocket
if @header['Upgrade'] == 'websocket' && @header['Connection'] == 'Upgrade'
@method = :WEBSOCKET
@is_websocket = true
end
# Deal with EventSource
if @header['Accept'] == 'text/event-stream'
@method = :EVENTSOURCE
@is_eventsource = true
end
@method = @method.to_sym
nil
end
# Syntactic sugar for whether a request header is parsed
# @return [Boolean] parsed or not
def parsed?
@parsed
end
# Syntactic sugar for whether a request body is parsed
# @return [Boolean] parsed or not
def body_parsed?
@body_parsed
end
# Syntactic sugar for whether a request is a websocket request
# @return [Boolean] websocket or not
def websocket?
@is_websocket
end
# Syntactic sugar for whether a request is an eventsource request
# @return [Boolean] eventsource or not
def eventsource?
@is_eventsource
end
end
|
module Mikunyan
# Class for representing Unity Asset
# @attr_reader [String] name Asset name
# @attr_reader [Integer] format file format number
# @attr_reader [String] generator_version version string of generator
# @attr_reader [Integer] target_platform target platform number
# @attr_reader [Symbol] endian data endianness (:little or :big)
# @attr_reader [Array<Mikunyan::Asset::Klass>] klasses defined classes
# @attr_reader [Array<Mikunyan::Asset::ObjectData>] objects included objects
# @attr_reader [Array<Integer>] add_ids ?
# @attr_reader [Array<Mikunyan::Asset::Reference>] references reference data
class Asset
attr_reader :name, :format, :generator_version, :target_platform, :endian, :klasses, :objects, :add_ids, :references
# Struct for representing Asset class definition
# @attr [Integer] class_id class ID
# @attr [Integer,nil] script_id script ID
# @attr [String] hash hash value (16 or 32 bytes)
# @attr [Mikunyan::TypeTree, nil] type_tree given TypeTree
Klass = Struct.new(:class_id, :script_id, :hash, :type_tree)
# Struct for representing Asset object information
# @attr [Integer] path_id path ID
# @attr [Integer] offset data offset
# @attr [Integer] size data size
# @attr [Integer,nil] type_id type ID
# @attr [Integer,nil] class_id class ID
# @attr [Integer,nil] class_idx class definition index
# @attr [Boolean] destroyed? destroyed or not
# @attr [String] data binary data of object
ObjectData = Struct.new(:path_id, :offset, :size, :type_id, :class_id, :class_idx, :destroyed?, :data)
# Struct for representing Asset reference information
# @attr [String] path path
# @attr [String] guid GUID (16 bytes)
# @attr [Integer] type ?
# @attr [String] file_path Asset name
Reference = Struct.new(:path, :guid, :type, :file_path)
# Load Asset from binary string
# @param [String] bin binary data
# @param [String] name Asset name
# @return [Mikunyan::Asset] deserialized Asset object
def self.load(bin, name)
r = Asset.new(name)
r.send(:load, bin)
r
end
# Load Asset from file
# @param [String] file file name
# @param [String] name Asset name (automatically generated if not specified)
# @return [Mikunyan::Asset] deserialized Asset object
def self.file(file, name=nil)
name = File.basename(name, '.*') unless name
Asset.load(File.binread(file), name)
end
# Returns list of all path IDs
# @return [Array<Integer>] list of all path IDs
def path_ids
@objects.map{|e| e.path_id}
end
# Returns list of containers
# @return [Array<Hash>,nil] list of all containers
def containers
obj = parse_object(1)
return nil unless obj && obj.m_Container && obj.m_Container.array?
obj.m_Container.value.map do |e|
{:name => e.first.value, :preload_index => e.second.preloadIndex.value, :path_id => e.second.asset.m_PathID.value}
end
end
# Parse object of given path ID
# @param [Integer,ObjectData] path_id path ID or object
# @return [Mikunyan::ObjectValue,nil] parsed object
def parse_object(path_id)
if path_id.class == Integer
obj = @objects.find{|e| e.path_id == path_id}
return nil unless obj
elsif path_id.class == ObjectData
obj = path_id
else
return nil
end
klass = (obj.class_idx ? @klasses[obj.class_idx] : @klasses.find{|e| e.class_id == obj.class_id} || @klasses.find{|e| e.class_id == obj.type_id})
type_tree = Asset.parse_type_tree(klass)
return nil unless type_tree
parse_object_private(BinaryReader.new(obj.data, @endian), type_tree)
end
# Parse object of given path ID and simplify it
# @param [Integer,ObjectData] path_id path ID or object
# @return [Hash,nil] parsed object
def parse_object_simple(path_id)
Asset.object_simplify(parse_object(path_id))
end
# Returns object type name string
# @param [Integer,ObjectData] path_id path ID or object
# @return [String,nil] type name
def object_type(path_id)
if path_id.class == Integer
obj = @objects.find{|e| e.path_id == path_id}
return nil unless obj
elsif path_id.class == ObjectData
obj = path_id
else
return nil
end
klass = (obj.class_idx ? @klasses[obj.class_idx] : @klasses.find{|e| e.class_id == obj.class_id} || @klasses.find{|e| e.class_id == obj.type_id})
if klass && klass.type_tree && klass.type_tree.nodes[0]
klass.type_tree.nodes[0].type
elsif klass
Mikunyan::CLASS_ID[klass.class_id]
else
nil
end
end
private
def initialize(name)
@name = name
@endian = :big
end
def load(bin)
br = BinaryReader.new(bin)
metadata_size = br.i32u
size = br.i32u
@format = br.i32u
data_offset = br.i32u
if @format >= 9
@endian = :little if br.i32 == 0
br.endian = @endian
end
@generator_version = br.cstr
@target_platform = br.i32
@klasses = []
if @format >= 17
has_type_trees = (br.i8 != 0)
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
br.adv(1)
script_id = br.i16
if class_id < 0 || class_id == 114
hash = br.read(32)
else
hash = br.read(16)
end
@klasses << Klass.new(class_id, script_id, hash, has_type_trees ? TypeTree.load(br) : TypeTree.load_default(hash))
end
elsif @format >= 13
has_type_trees = (br.i8 != 0)
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
if class_id < 0
hash = br.read(32)
else
hash = br.read(16)
end
@klasses << Klass.new(class_id, nil, hash, has_type_trees ? TypeTree.load(br) : TypeTree.load_default(hash))
end
else
@type_trees = {}
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
@klasses << Klass.new(class_id, nil, nil, @format == 10 || @format == 12 ? TypeTree.load(br) : TypeTree.load_legacy(br))
end
end
long_object_ids = (@format >= 14 || (7 <= @format && @format <= 13 && br.i32 != 0))
@objects = []
object_count = br.i32u
object_count.times do
br.align(4) if @format >= 14
path_id = long_object_ids ? br.i64 : br.i32
offset = br.i32u
size = br.i32u
if @format >= 17
@objects << ObjectData.new(path_id, offset, size, nil, nil, br.i32u, @format <= 10 && br.i16 != 0)
else
@objects << ObjectData.new(path_id, offset, size, br.i32, br.i16, nil, @format <= 10 && br.i16 != 0)
end
br.adv(2) if 11 <= @format && @format <= 16
br.adv(1) if 15 <= @format && @format <= 16
end
if @format >= 11
@add_ids = []
add_id_count = br.i32u
add_id_count.times do
br.align(4) if @format >= 14
@add_ids << [(long_object_ids ? br.i64 : br.i32), br.i32]
end
end
if @format >= 6
@references = []
reference_count = br.i32u
reference_count.times do
@references << Reference.new(br.cstr, br.read(16), br.i32, br.cstr)
end
end
@objects.each do |e|
br.jmp(data_offset + e.offset)
e.data = br.read(e.size)
end
end
def parse_object_private(br, type_tree)
r = nil
node = type_tree[:node]
children = type_tree[:children]
if node.array?
data = []
size = parse_object_private(br, children.find{|e| e[:name] == 'size'})
data_type_tree = children.find{|e| e[:name] == 'data'}
size.value.times do |i|
data << parse_object_private(br, data_type_tree)
end
data = data.map{|e| e.value}.pack('C*') if node.type == 'TypelessData'
r = ObjectValue.new(node.name, node.type, br.endian, data)
elsif node.size == -1
r = ObjectValue.new(node.name, node.type, br.endian)
if children.size == 1 && children[0][:name] == 'Array' && children[0][:node].type == 'Array' && children[0][:node].array?
r.value = parse_object_private(br, children[0]).value
r.value = r.value.map{|e| e.value}.pack('C*').force_encoding("utf-8") if node.type == 'string'
else
children.each do |child|
r[child[:name]] = parse_object_private(br, child)
end
end
elsif children.size > 0
pos = br.pos
r = ObjectValue.new(node.name, node.type, br.endian)
r.is_struct = true
children.each do |child|
r[child[:name]] = parse_object_private(br, child)
end
else
pos = br.pos
value = nil
case node.type
when 'bool'
value = (br.i8 != 0)
when 'SInt8'
value = br.i8s
when 'UInt8', 'char'
value = br.i8u
when 'SInt16', 'short'
value = br.i16s
when 'UInt16', 'unsigned short'
value = br.i16u
when 'SInt32', 'int'
value = br.i32s
when 'UInt32', 'unsigned int'
value = br.i32u
when 'SInt64', 'long long'
value = br.i64s
when 'UInt64', 'unsigned long long'
value = br.i64u
when 'float'
value = br.float
when 'double'
value = br.double
when 'ColorRGBA'
value = [br.i8u, br.i8u, br.i8u, br.i8u]
else
value = br.read(node.size)
end
br.jmp(pos + node.size)
r = ObjectValue.new(node.name, node.type, br.endian, value)
end
br.align(4) if node.flags & 0x4000 != 0
r
end
def self.object_simplify(obj)
if obj.class != ObjectValue
obj
elsif obj.type == 'pair'
[object_simplify(obj['first']), object_simplify(obj['second'])]
elsif obj.type == 'map' && obj.array?
obj.value.map{|e| [object_simplify(e['first']), object_simplify(e['second'])] }.to_h
elsif obj.value?
object_simplify(obj.value)
elsif obj.array?
obj.value.map{|e| object_simplify(e)}
else
hash = {}
obj.keys.each do |key|
hash[key] = object_simplify(obj[key])
end
hash
end
end
def self.parse_type_tree(klass)
return nil unless klass.type_tree
nodes = klass.type_tree.nodes
tree = {}
stack = []
nodes.each do |node|
this = {:name => node.name, :node => node, :children => []}
if node.depth == 0
tree = this
else
stack[node.depth - 1][:children] << this
end
stack[node.depth] = this
end
tree
end
end
end
Improve performance in decoding string and TypelessData
module Mikunyan
# Class for representing Unity Asset
# @attr_reader [String] name Asset name
# @attr_reader [Integer] format file format number
# @attr_reader [String] generator_version version string of generator
# @attr_reader [Integer] target_platform target platform number
# @attr_reader [Symbol] endian data endianness (:little or :big)
# @attr_reader [Array<Mikunyan::Asset::Klass>] klasses defined classes
# @attr_reader [Array<Mikunyan::Asset::ObjectData>] objects included objects
# @attr_reader [Array<Integer>] add_ids ?
# @attr_reader [Array<Mikunyan::Asset::Reference>] references reference data
class Asset
attr_reader :name, :format, :generator_version, :target_platform, :endian, :klasses, :objects, :add_ids, :references
# Struct for representing Asset class definition
# @attr [Integer] class_id class ID
# @attr [Integer,nil] script_id script ID
# @attr [String] hash hash value (16 or 32 bytes)
# @attr [Mikunyan::TypeTree, nil] type_tree given TypeTree
Klass = Struct.new(:class_id, :script_id, :hash, :type_tree)
# Struct for representing Asset object information
# @attr [Integer] path_id path ID
# @attr [Integer] offset data offset
# @attr [Integer] size data size
# @attr [Integer,nil] type_id type ID
# @attr [Integer,nil] class_id class ID
# @attr [Integer,nil] class_idx class definition index
# @attr [Boolean] destroyed? destroyed or not
# @attr [String] data binary data of object
ObjectData = Struct.new(:path_id, :offset, :size, :type_id, :class_id, :class_idx, :destroyed?, :data)
# Struct for representing Asset reference information
# @attr [String] path path
# @attr [String] guid GUID (16 bytes)
# @attr [Integer] type ?
# @attr [String] file_path Asset name
Reference = Struct.new(:path, :guid, :type, :file_path)
# Load Asset from binary string
# @param [String] bin binary data
# @param [String] name Asset name
# @return [Mikunyan::Asset] deserialized Asset object
def self.load(bin, name)
r = Asset.new(name)
r.send(:load, bin)
r
end
# Load Asset from file
# @param [String] file file name
# @param [String] name Asset name (automatically generated if not specified)
# @return [Mikunyan::Asset] deserialized Asset object
def self.file(file, name=nil)
name = File.basename(name, '.*') unless name
Asset.load(File.binread(file), name)
end
# Returns list of all path IDs
# @return [Array<Integer>] list of all path IDs
def path_ids
@objects.map{|e| e.path_id}
end
# Returns list of containers
# @return [Array<Hash>,nil] list of all containers
def containers
obj = parse_object(1)
return nil unless obj && obj.m_Container && obj.m_Container.array?
obj.m_Container.value.map do |e|
{:name => e.first.value, :preload_index => e.second.preloadIndex.value, :path_id => e.second.asset.m_PathID.value}
end
end
# Parse object of given path ID
# @param [Integer,ObjectData] path_id path ID or object
# @return [Mikunyan::ObjectValue,nil] parsed object
def parse_object(path_id)
if path_id.class == Integer
obj = @objects.find{|e| e.path_id == path_id}
return nil unless obj
elsif path_id.class == ObjectData
obj = path_id
else
return nil
end
klass = (obj.class_idx ? @klasses[obj.class_idx] : @klasses.find{|e| e.class_id == obj.class_id} || @klasses.find{|e| e.class_id == obj.type_id})
type_tree = Asset.parse_type_tree(klass)
return nil unless type_tree
parse_object_private(BinaryReader.new(obj.data, @endian), type_tree)
end
# Parse object of given path ID and simplify it
# @param [Integer,ObjectData] path_id path ID or object
# @return [Hash,nil] parsed object
def parse_object_simple(path_id)
Asset.object_simplify(parse_object(path_id))
end
# Returns object type name string
# @param [Integer,ObjectData] path_id path ID or object
# @return [String,nil] type name
def object_type(path_id)
if path_id.class == Integer
obj = @objects.find{|e| e.path_id == path_id}
return nil unless obj
elsif path_id.class == ObjectData
obj = path_id
else
return nil
end
klass = (obj.class_idx ? @klasses[obj.class_idx] : @klasses.find{|e| e.class_id == obj.class_id} || @klasses.find{|e| e.class_id == obj.type_id})
if klass && klass.type_tree && klass.type_tree.nodes[0]
klass.type_tree.nodes[0].type
elsif klass
Mikunyan::CLASS_ID[klass.class_id]
else
nil
end
end
private
def initialize(name)
@name = name
@endian = :big
end
def load(bin)
br = BinaryReader.new(bin)
metadata_size = br.i32u
size = br.i32u
@format = br.i32u
data_offset = br.i32u
if @format >= 9
@endian = :little if br.i32 == 0
br.endian = @endian
end
@generator_version = br.cstr
@target_platform = br.i32
@klasses = []
if @format >= 17
has_type_trees = (br.i8 != 0)
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
br.adv(1)
script_id = br.i16
if class_id < 0 || class_id == 114
hash = br.read(32)
else
hash = br.read(16)
end
@klasses << Klass.new(class_id, script_id, hash, has_type_trees ? TypeTree.load(br) : TypeTree.load_default(hash))
end
elsif @format >= 13
has_type_trees = (br.i8 != 0)
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
if class_id < 0
hash = br.read(32)
else
hash = br.read(16)
end
@klasses << Klass.new(class_id, nil, hash, has_type_trees ? TypeTree.load(br) : TypeTree.load_default(hash))
end
else
@type_trees = {}
type_tree_count = br.i32u
type_tree_count.times do
class_id = br.i32
@klasses << Klass.new(class_id, nil, nil, @format == 10 || @format == 12 ? TypeTree.load(br) : TypeTree.load_legacy(br))
end
end
long_object_ids = (@format >= 14 || (7 <= @format && @format <= 13 && br.i32 != 0))
@objects = []
object_count = br.i32u
object_count.times do
br.align(4) if @format >= 14
path_id = long_object_ids ? br.i64 : br.i32
offset = br.i32u
size = br.i32u
if @format >= 17
@objects << ObjectData.new(path_id, offset, size, nil, nil, br.i32u, @format <= 10 && br.i16 != 0)
else
@objects << ObjectData.new(path_id, offset, size, br.i32, br.i16, nil, @format <= 10 && br.i16 != 0)
end
br.adv(2) if 11 <= @format && @format <= 16
br.adv(1) if 15 <= @format && @format <= 16
end
if @format >= 11
@add_ids = []
add_id_count = br.i32u
add_id_count.times do
br.align(4) if @format >= 14
@add_ids << [(long_object_ids ? br.i64 : br.i32), br.i32]
end
end
if @format >= 6
@references = []
reference_count = br.i32u
reference_count.times do
@references << Reference.new(br.cstr, br.read(16), br.i32, br.cstr)
end
end
@objects.each do |e|
br.jmp(data_offset + e.offset)
e.data = br.read(e.size)
end
end
def parse_object_private(br, type_tree)
r = nil
node = type_tree[:node]
children = type_tree[:children]
if node.array?
data = nil
size = parse_object_private(br, children.find{|e| e[:name] == 'size'}).value
data_type_tree = children.find{|e| e[:name] == 'data'}
if node.type == 'TypelessData'
data = br.read(size * data_type_tree[:node].size)
else
data = size.times.map{ parse_object_private(br, data_type_tree) }
end
r = ObjectValue.new(node.name, node.type, br.endian, data)
elsif node.size == -1
r = ObjectValue.new(node.name, node.type, br.endian)
if children.size == 1 && children[0][:name] == 'Array' && children[0][:node].type == 'Array' && children[0][:node].array?
if node.type == 'string'
size = parse_object_private(br, children[0][:children].find{|e| e[:name] == 'size'}).value
r.value = br.read(size * children[0][:children].find{|e| e[:name] == 'data'}[:node].size).force_encoding("utf-8")
br.align(4) if children[0][:node].flags & 0x4000 != 0
else
r.value = parse_object_private(br, children[0]).value
end
else
children.each do |child|
r[child[:name]] = parse_object_private(br, child)
end
end
elsif children.size > 0
pos = br.pos
r = ObjectValue.new(node.name, node.type, br.endian)
r.is_struct = true
children.each do |child|
r[child[:name]] = parse_object_private(br, child)
end
else
pos = br.pos
value = nil
case node.type
when 'bool'
value = (br.i8 != 0)
when 'SInt8'
value = br.i8s
when 'UInt8', 'char'
value = br.i8u
when 'SInt16', 'short'
value = br.i16s
when 'UInt16', 'unsigned short'
value = br.i16u
when 'SInt32', 'int'
value = br.i32s
when 'UInt32', 'unsigned int'
value = br.i32u
when 'SInt64', 'long long'
value = br.i64s
when 'UInt64', 'unsigned long long'
value = br.i64u
when 'float'
value = br.float
when 'double'
value = br.double
when 'ColorRGBA'
value = [br.i8u, br.i8u, br.i8u, br.i8u]
else
value = br.read(node.size)
end
br.jmp(pos + node.size)
r = ObjectValue.new(node.name, node.type, br.endian, value)
end
br.align(4) if node.flags & 0x4000 != 0
r
end
def self.object_simplify(obj)
if obj.class != ObjectValue
obj
elsif obj.type == 'pair'
[object_simplify(obj['first']), object_simplify(obj['second'])]
elsif obj.type == 'map' && obj.array?
obj.value.map{|e| [object_simplify(e['first']), object_simplify(e['second'])] }.to_h
elsif obj.value?
object_simplify(obj.value)
elsif obj.array?
obj.value.map{|e| object_simplify(e)}
else
hash = {}
obj.keys.each do |key|
hash[key] = object_simplify(obj[key])
end
hash
end
end
def self.parse_type_tree(klass)
return nil unless klass.type_tree
nodes = klass.type_tree.nodes
tree = {}
stack = []
nodes.each do |node|
this = {:name => node.name, :node => node, :children => []}
if node.depth == 0
tree = this
else
stack[node.depth - 1][:children] << this
end
stack[node.depth] = this
end
tree
end
end
end
|
module Nexpose
module NexposeAPI
include XMLUtils
#
#
#
def report_generate(param)
r = execute(make_xml('ReportGenerateRequest', {'report-id' => param}))
r.success
end
#
#
#
def report_last(param)
r = execute(make_xml('ReportHistoryRequest', {'reportcfg-id' => param}))
res = nil
if (r.success)
stk = []
r.res.elements.each("//ReportSummary") do |rep|
stk << [rep.attributes['id'].to_i, rep.attributes['report-URI']]
end
if (stk.length > 0)
stk.sort! { |a, b| b[0] <=> a[0] }
res = stk[0][1]
end
end
res
end
#
#
#
def report_history(param)
execute(make_xml('ReportHistoryRequest', {'reportcfg-id' => param}))
end
#
#
#
def report_config_delete(param)
r = execute(make_xml('ReportDeleteRequest', {'reportcfg-id' => param}))
r.success
end
#
#
#
def report_delete(param)
r = execute(make_xml('ReportDeleteRequest', {'report-id' => param}))
r.success
end
#
#
#
def report_template_listing
r = execute(make_xml('ReportTemplateListingRequest', {}))
if (r.success)
res = []
r.res.elements.each("//ReportTemplateSummary") do |template|
desc = ''
template.elements.each("//description") do |ent|
desc = ent.text
end
res << {
:template_id => template.attributes['id'].to_s,
:name => template.attributes['name'].to_s,
:description => desc.to_s
}
end
res
else
false
end
end
end
# === Description
# Object that represents the summary of a Report Configuration.
#
class ReportConfigSummary
# The Report Configuration ID
attr_reader :id
# A unique name for the Report
attr_reader :name
# The report format
attr_reader :format
# The date of the last report generation
attr_reader :last_generated_on
# Relative URI of the last generated report
attr_reader :last_generated_uri
# Constructor
# ReportConfigSummary(id, name, format, last_generated_on, last_generated_uri)
def initialize(id, name, format, last_generated_on, last_generated_uri)
@id = id
@name = name
@format = format
@last_generated_on = last_generated_on
@last_generated_uri = last_generated_uri
end
end
# === Description
# Object that represents the schedule on which to automatically generate new reports.
class ReportHistory
# true if an error condition exists; false otherwise
attr_reader :error
# Error message string
attr_reader :error_msg
# The last XML request sent by this object
attr_reader :request_xml
# The last XML response received by this object
attr_reader :response_xml
# The NSC Connection associated with this object
attr_reader :connection
# The report definition (report config) ID
# Report definition ID
attr_reader :config_id
# Array (ReportSummary*)
attr_reader :report_summaries
def initialize(connection, config_id)
@error = false
@connection = connection
@config_id = config_id
@report_summaries = []
reportHistory_request = APIRequest.new('<ReportHistoryRequest session-id="' + "#{connection.session_id}" + '" reportcfg-id="' + "#{@config_id}" + '"/>', @connection.geturl())
reportHistory_request.execute()
@response_xml = reportHistory_request.response_xml
@request_xml = reportHistory_request.request_xml
end
def xml_parse(response)
response = REXML::Document.new(response.to_s)
status = response.root.attributes['success']
if (status == '1')
response.elements.each('ReportHistoryResponse/ReportSummary') do |r|
@report_summaries.push(ReportSummary.new(r.attributes["id"], r.attributes["cfg-id"], r.attributes["status"], r.attributes["generated-on"], r.attributes['report-uri']))
end
else
@error = true
@error_msg = 'Error ReportHistoryReponse'
end
end
end
# === Description
# Object that represents the summary of a single report.
class ReportSummary
# The Report ID
attr_reader :id
# The Report Configuration ID
attr_reader :cfg_id
# The status of this report
# available | generating | failed
attr_reader :status
# The date on which this report was generated
attr_reader :generated_on
# The relative URI of the report
attr_reader :report_uri
def initialize(id, cfg_id, status, generated_on, report_uri)
@id = id
@cfg_id = cfg_id
@status = status
@generated_on = generated_on
@report_uri = report_uri
end
end
# === Description
#
class ReportAdHoc
include XMLUtils
attr_reader :error
attr_reader :error_msg
attr_reader :connection
# Report Template ID strong e.g. full-audit
attr_reader :template_id
# pdf|html|xml|text|csv|raw-xml
attr_reader :format
# Array of (ReportFilter)*
attr_reader :filters
attr_reader :request_xml
attr_reader :response_xml
attr_reader :report_decoded
def initialize(connection, template_id = 'full-audit', format = 'raw-xml')
@error = false
@connection = connection
@filters = Array.new()
@template_id = template_id
@format = format
end
def addFilter(filter_type, id)
# filter_type can be site|group|device|scan
# id is the ID number. For scan, you can use 'last' for the most recently run scan
filter = ReportFilter.new(filter_type, id)
filters.push(filter)
end
def generate()
request_xml = '<ReportAdhocGenerateRequest session-id="' + @connection.session_id + '">'
request_xml += '<AdhocReportConfig template-id="' + @template_id + '" format="' + @format + '">'
request_xml += '<Filters>'
@filters.each do |f|
request_xml += '<filter type="' + f.type + '" id="'+ f.id.to_s + '"/>'
end
request_xml += '</Filters>'
request_xml += '</AdhocReportConfig>'
request_xml += '</ReportAdhocGenerateRequest>'
ad_hoc_request = APIRequest.new(request_xml, @connection.url)
ad_hoc_request.execute()
content_type_response = ad_hoc_request.raw_response.header['Content-Type']
if content_type_response =~ /multipart\/mixed;\s*boundary=([^\s]+)/
# NeXpose sends an incorrect boundary format which breaks parsing
# Eg: boundary=XXX; charset=XXX
# Fix by removing everything from the last semi-colon onward
last_semi_colon_index = content_type_response.index(/;/, content_type_response.index(/boundary/))
content_type_response = content_type_response[0, last_semi_colon_index]
data = "Content-Type: " + content_type_response + "\r\n\r\n" + ad_hoc_request.raw_response_data
doc = Rex::MIME::Message.new data
doc.parts.each do |part|
if /.*base64.*/ =~ part.header.to_s
return parse_xml(part.content.unpack("m*")[0])
end
end
end
end
end
# === Description
# Object that represents the configuration of a report definition.
#
class ReportConfig
# true if an error condition exists; false otherwise
attr_reader :error
# Error message string
attr_reader :error_msg
# The last XML request sent by this object
attr_reader :request_xml
# The last XML response received by this object
attr_reader :response_xml
# The NSC Connection associated with this object
attr_reader :connection
# The ID for this report definition
attr_reader :config_id
# A unique name for this report definition
attr_reader :name
# The template ID used for this report definition
attr_reader :template_id
# html, db, txt, xml, raw-xml, csv, pdf
attr_reader :format
# XXX new
attr_reader :timezone
# XXX new
attr_reader :owner
# Array of (ReportFilter)* - The Sites, Asset Groups, or Devices to run the report against
attr_reader :filters
# Automatically generate a new report at the conclusion of a scan
# 1 or 0
attr_reader :generate_after_scan
# Schedule to generate reports
# ReportSchedule Object
attr_reader :schedule
# Store the reports on the server
# 1 or 0
attr_reader :storeOnServer
# Location to store the report on the server
attr_reader :store_location
# Form to send the report via email
# "file", "zip", "url", or NULL (don’t send email)
attr_reader :email_As
# Send the Email to all Authorized Users
# boolean - Send the Email to all Authorized Users
attr_reader :email_to_all
# Array containing the email addresses of the recipients
attr_reader :email_recipients
# IP Address or Hostname of SMTP Relay Server
attr_reader :smtp_relay_server
# Sets the FROM field of the Email
attr_reader :sender
# TODO
attr_reader :db_export
# TODO
attr_reader :csv_export
# TODO
attr_reader :xml_export
def initialize(connection, config_id = -1)
@error = false
@connection = connection
@config_id = config_id
@xml_tag_stack = Array.new()
@filters = Array.new()
@email_recipients = Array.new()
@name = "New Report " + rand(999999999).to_s
r = @connection.execute('<ReportConfigRequest session-id="' + @connection.session_id.to_s + '" reportcfg-id="' + @config_id.to_s + '"/>')
if (r.success)
r.res.elements.each('ReportConfigResponse/ReportConfig') do |r|
@name = r.attributes['name']
@format = r.attributes['format']
@timezone = r.attributes['timezone']
@id = r.attributes['id']
@template_id = r.attributes['template-id']
@owner = r.attributes['owner']
end
else
@error = true
@error_msg = 'Error ReportHistoryReponse'
end
end
# === Description
# Generate a new report on this report definition. Returns the new report ID.
def generateReport(debug = false)
return generateReport(@connection, @config_id, debug)
end
# === Description
# Save the report definition to the NSC.
# Returns the config-id.
def saveReport()
r = @connection.execute('<ReportSaveRequest session-id="' + @connection.session_id.to_s + '">' + getXML().to_s + ' </ReportSaveRequest>')
if (r.success)
@config_id = r.attributes['reportcfg-id']
return true
end
return false
end
# === Description
# Adds a new filter to the report config
def addFilter(filter_type, id)
filter = ReportFilter.new(filter_type, id)
@filters.push(filter)
end
# === Description
# Adds a new email recipient
def addEmailRecipient(recipient)
@email_recipients.push(recipient)
end
# === Description
# Sets the schedule for this report config
def setSchedule(schedule)
@schedule = schedule
end
def getXML()
xml = '<ReportConfig id="' + @config_id.to_s + '" name="' + @name.to_s + '" template-id="' + @template_id.to_s + '" format="' + @format.to_s + '">'
xml += ' <Filters>'
@filters.each do |f|
xml += ' <' + f.type.to_s + ' id="' + f.id.to_s + '"/>'
end
xml += ' </Filters>'
xml += ' <Generate after-scan="' + @generate_after_scan.to_s + '">'
if (@schedule)
xml += ' <Schedule type="' + @schedule.type.to_s + '" interval="' + @schedule.interval.to_s + '" start="' + @schedule.start.to_s + '"/>'
end
xml += ' </Generate>'
xml += ' <Delivery>'
xml += ' <Storage storeOnServer="' + @storeOnServer.to_s + '">'
if (@store_location and @store_location.length > 0)
xml += ' <location>' + @store_location.to_s + '</location>'
end
xml += ' </Storage>'
xml += ' </Delivery>'
xml += ' </ReportConfig>'
return xml
end
def set_name(name)
@name = name
end
def set_template_id(template_id)
@template_id = template_id
end
def set_format(format)
@format = format
end
def set_email_As(email_As)
@email_As = email_As
end
def set_storeOnServer(storeOnServer)
@storeOnServer = storeOnServer
end
def set_smtp_relay_server(smtp_relay_server)
@smtp_relay_server = smtp_relay_server
end
def set_sender(sender)
@sender = sender
end
def set_generate_after_scan(generate_after_scan)
@generate_after_scan = generate_after_scan
end
end
# === Description
# Object that represents a report filter which determines which sites, asset
# groups, and/or devices that a report is run against. gtypes are
# "SiteFilter", "AssetGroupFilter", "DeviceFilter", or "ScanFilter". gid is
# the site-id, assetgroup-id, or devce-id. ScanFilter, if used, specifies
# a specifies a specific scan to use as the data source for the report. The gid
# can be a specific scan-id or "first" for the first run scan, or “last” for
# the last run scan.
#
class ReportFilter
attr_reader :type
attr_reader :id
def initialize(type, id)
@type = type
@id = id
end
end
# === Description
# Object that represents the schedule on which to automatically generate new reports.
#
class ReportSchedule
# The type of schedule
# (daily, hourly, monthly, weekly)
attr_reader :type
# The frequency with which to run the scan
attr_reader :interval
# The earliest date to generate the report
attr_reader :start
def initialize(type, interval, start)
@type = type
@interval = interval
@start = start
end
end
class ReportTemplateListing
attr_reader :error_msg
attr_reader :error
attr_reader :request_xml
attr_reader :response_xml
attr_reader :connection
attr_reader :xml_tag_stack
attr_reader :report_template_summaries #; //Array (ReportTemplateSummary*)
def initialize(connection)
@error = nil
@connection = connection
@report_template_summaries = Array.new()
r = @connection.execute('<ReportTemplateListingRequest session-id="' + connection.session_id.to_s + '"/>')
if (r.success)
r.res.elements.each('ReportTemplateListingResponse/ReportTemplateSummary') do |r|
@report_template_summaries.push(ReportTemplateSumary.new(r.attributes['id'], r.attributes['name']))
end
else
@error = true
@error_msg = 'ReportTemplateListingRequest Parse Error'
end
end
end
class ReportTemplateSummary
attr_reader :id
attr_reader :name
attr_reader :description
def initialize(id, name, description)
@id = id
@name = name
@description = description
end
end
class ReportSection
attr_reader :name
attr_reader :properties
def initialize(name)
@properties = Array.new()
@name = name
end
def addProperty(name, value)
@properties[name.to_s] = value
end
end
end
Changed accessor method on report history
module Nexpose
module NexposeAPI
include XMLUtils
#
#
#
def report_generate(param)
r = execute(make_xml('ReportGenerateRequest', {'report-id' => param}))
r.success
end
#
#
#
def report_last(param)
r = execute(make_xml('ReportHistoryRequest', {'reportcfg-id' => param}))
res = nil
if (r.success)
stk = []
r.res.elements.each("//ReportSummary") do |rep|
stk << [rep.attributes['id'].to_i, rep.attributes['report-URI']]
end
if (stk.length > 0)
stk.sort! { |a, b| b[0] <=> a[0] }
res = stk[0][1]
end
end
res
end
#
#
#
def report_history(param)
execute(make_xml('ReportHistoryRequest', {'reportcfg-id' => param}))
end
#
#
#
def report_config_delete(param)
r = execute(make_xml('ReportDeleteRequest', {'reportcfg-id' => param}))
r.success
end
#
#
#
def report_delete(param)
r = execute(make_xml('ReportDeleteRequest', {'report-id' => param}))
r.success
end
#
#
#
def report_template_listing
r = execute(make_xml('ReportTemplateListingRequest', {}))
if (r.success)
res = []
r.res.elements.each("//ReportTemplateSummary") do |template|
desc = ''
template.elements.each("//description") do |ent|
desc = ent.text
end
res << {
:template_id => template.attributes['id'].to_s,
:name => template.attributes['name'].to_s,
:description => desc.to_s
}
end
res
else
false
end
end
end
# === Description
# Object that represents the summary of a Report Configuration.
#
class ReportConfigSummary
# The Report Configuration ID
attr_reader :id
# A unique name for the Report
attr_reader :name
# The report format
attr_reader :format
# The date of the last report generation
attr_reader :last_generated_on
# Relative URI of the last generated report
attr_reader :last_generated_uri
# Constructor
# ReportConfigSummary(id, name, format, last_generated_on, last_generated_uri)
def initialize(id, name, format, last_generated_on, last_generated_uri)
@id = id
@name = name
@format = format
@last_generated_on = last_generated_on
@last_generated_uri = last_generated_uri
end
end
# === Description
# Object that represents the schedule on which to automatically generate new reports.
class ReportHistory
# true if an error condition exists; false otherwise
attr_reader :error
# Error message string
attr_reader :error_msg
# The last XML request sent by this object
attr_reader :request_xml
# The last XML response received by this object
attr_reader :response_xml
# The NSC Connection associated with this object
attr_reader :connection
# The report definition (report config) ID
# Report definition ID
attr_reader :config_id
# Array (ReportSummary*)
attr_reader :report_summaries
def initialize(connection, config_id)
@error = false
@connection = connection
@config_id = config_id
@report_summaries = []
reportHistory_request = APIRequest.new('<ReportHistoryRequest session-id="' + "#{connection.session_id}" + '" reportcfg-id="' + "#{@config_id}" + '"/>', @connection.get)
reportHistory_request.execute()
@response_xml = reportHistory_request.response_xml
@request_xml = reportHistory_request.request_xml
end
def xml_parse(response)
response = REXML::Document.new(response.to_s)
status = response.root.attributes['success']
if (status == '1')
response.elements.each('ReportHistoryResponse/ReportSummary') do |r|
@report_summaries.push(ReportSummary.new(r.attributes["id"], r.attributes["cfg-id"], r.attributes["status"], r.attributes["generated-on"], r.attributes['report-uri']))
end
else
@error = true
@error_msg = 'Error ReportHistoryReponse'
end
end
end
# === Description
# Object that represents the summary of a single report.
class ReportSummary
# The Report ID
attr_reader :id
# The Report Configuration ID
attr_reader :cfg_id
# The status of this report
# available | generating | failed
attr_reader :status
# The date on which this report was generated
attr_reader :generated_on
# The relative URI of the report
attr_reader :report_uri
def initialize(id, cfg_id, status, generated_on, report_uri)
@id = id
@cfg_id = cfg_id
@status = status
@generated_on = generated_on
@report_uri = report_uri
end
end
# === Description
#
class ReportAdHoc
include XMLUtils
attr_reader :error
attr_reader :error_msg
attr_reader :connection
# Report Template ID strong e.g. full-audit
attr_reader :template_id
# pdf|html|xml|text|csv|raw-xml
attr_reader :format
# Array of (ReportFilter)*
attr_reader :filters
attr_reader :request_xml
attr_reader :response_xml
attr_reader :report_decoded
def initialize(connection, template_id = 'full-audit', format = 'raw-xml')
@error = false
@connection = connection
@filters = Array.new()
@template_id = template_id
@format = format
end
def addFilter(filter_type, id)
# filter_type can be site|group|device|scan
# id is the ID number. For scan, you can use 'last' for the most recently run scan
filter = ReportFilter.new(filter_type, id)
filters.push(filter)
end
def generate()
request_xml = '<ReportAdhocGenerateRequest session-id="' + @connection.session_id + '">'
request_xml += '<AdhocReportConfig template-id="' + @template_id + '" format="' + @format + '">'
request_xml += '<Filters>'
@filters.each do |f|
request_xml += '<filter type="' + f.type + '" id="'+ f.id.to_s + '"/>'
end
request_xml += '</Filters>'
request_xml += '</AdhocReportConfig>'
request_xml += '</ReportAdhocGenerateRequest>'
ad_hoc_request = APIRequest.new(request_xml, @connection.url)
ad_hoc_request.execute()
content_type_response = ad_hoc_request.raw_response.header['Content-Type']
if content_type_response =~ /multipart\/mixed;\s*boundary=([^\s]+)/
# NeXpose sends an incorrect boundary format which breaks parsing
# Eg: boundary=XXX; charset=XXX
# Fix by removing everything from the last semi-colon onward
last_semi_colon_index = content_type_response.index(/;/, content_type_response.index(/boundary/))
content_type_response = content_type_response[0, last_semi_colon_index]
data = "Content-Type: " + content_type_response + "\r\n\r\n" + ad_hoc_request.raw_response_data
doc = Rex::MIME::Message.new data
doc.parts.each do |part|
if /.*base64.*/ =~ part.header.to_s
return parse_xml(part.content.unpack("m*")[0])
end
end
end
end
end
# === Description
# Object that represents the configuration of a report definition.
#
class ReportConfig
# true if an error condition exists; false otherwise
attr_reader :error
# Error message string
attr_reader :error_msg
# The last XML request sent by this object
attr_reader :request_xml
# The last XML response received by this object
attr_reader :response_xml
# The NSC Connection associated with this object
attr_reader :connection
# The ID for this report definition
attr_reader :config_id
# A unique name for this report definition
attr_reader :name
# The template ID used for this report definition
attr_reader :template_id
# html, db, txt, xml, raw-xml, csv, pdf
attr_reader :format
# XXX new
attr_reader :timezone
# XXX new
attr_reader :owner
# Array of (ReportFilter)* - The Sites, Asset Groups, or Devices to run the report against
attr_reader :filters
# Automatically generate a new report at the conclusion of a scan
# 1 or 0
attr_reader :generate_after_scan
# Schedule to generate reports
# ReportSchedule Object
attr_reader :schedule
# Store the reports on the server
# 1 or 0
attr_reader :storeOnServer
# Location to store the report on the server
attr_reader :store_location
# Form to send the report via email
# "file", "zip", "url", or NULL (don’t send email)
attr_reader :email_As
# Send the Email to all Authorized Users
# boolean - Send the Email to all Authorized Users
attr_reader :email_to_all
# Array containing the email addresses of the recipients
attr_reader :email_recipients
# IP Address or Hostname of SMTP Relay Server
attr_reader :smtp_relay_server
# Sets the FROM field of the Email
attr_reader :sender
# TODO
attr_reader :db_export
# TODO
attr_reader :csv_export
# TODO
attr_reader :xml_export
def initialize(connection, config_id = -1)
@error = false
@connection = connection
@config_id = config_id
@xml_tag_stack = Array.new()
@filters = Array.new()
@email_recipients = Array.new()
@name = "New Report " + rand(999999999).to_s
r = @connection.execute('<ReportConfigRequest session-id="' + @connection.session_id.to_s + '" reportcfg-id="' + @config_id.to_s + '"/>')
if (r.success)
r.res.elements.each('ReportConfigResponse/ReportConfig') do |r|
@name = r.attributes['name']
@format = r.attributes['format']
@timezone = r.attributes['timezone']
@id = r.attributes['id']
@template_id = r.attributes['template-id']
@owner = r.attributes['owner']
end
else
@error = true
@error_msg = 'Error ReportHistoryReponse'
end
end
# === Description
# Generate a new report on this report definition. Returns the new report ID.
def generateReport(debug = false)
return generateReport(@connection, @config_id, debug)
end
# === Description
# Save the report definition to the NSC.
# Returns the config-id.
def saveReport()
r = @connection.execute('<ReportSaveRequest session-id="' + @connection.session_id.to_s + '">' + getXML().to_s + ' </ReportSaveRequest>')
if (r.success)
@config_id = r.attributes['reportcfg-id']
return true
end
return false
end
# === Description
# Adds a new filter to the report config
def addFilter(filter_type, id)
filter = ReportFilter.new(filter_type, id)
@filters.push(filter)
end
# === Description
# Adds a new email recipient
def addEmailRecipient(recipient)
@email_recipients.push(recipient)
end
# === Description
# Sets the schedule for this report config
def setSchedule(schedule)
@schedule = schedule
end
def getXML()
xml = '<ReportConfig id="' + @config_id.to_s + '" name="' + @name.to_s + '" template-id="' + @template_id.to_s + '" format="' + @format.to_s + '">'
xml += ' <Filters>'
@filters.each do |f|
xml += ' <' + f.type.to_s + ' id="' + f.id.to_s + '"/>'
end
xml += ' </Filters>'
xml += ' <Generate after-scan="' + @generate_after_scan.to_s + '">'
if (@schedule)
xml += ' <Schedule type="' + @schedule.type.to_s + '" interval="' + @schedule.interval.to_s + '" start="' + @schedule.start.to_s + '"/>'
end
xml += ' </Generate>'
xml += ' <Delivery>'
xml += ' <Storage storeOnServer="' + @storeOnServer.to_s + '">'
if (@store_location and @store_location.length > 0)
xml += ' <location>' + @store_location.to_s + '</location>'
end
xml += ' </Storage>'
xml += ' </Delivery>'
xml += ' </ReportConfig>'
return xml
end
def set_name(name)
@name = name
end
def set_template_id(template_id)
@template_id = template_id
end
def set_format(format)
@format = format
end
def set_email_As(email_As)
@email_As = email_As
end
def set_storeOnServer(storeOnServer)
@storeOnServer = storeOnServer
end
def set_smtp_relay_server(smtp_relay_server)
@smtp_relay_server = smtp_relay_server
end
def set_sender(sender)
@sender = sender
end
def set_generate_after_scan(generate_after_scan)
@generate_after_scan = generate_after_scan
end
end
# === Description
# Object that represents a report filter which determines which sites, asset
# groups, and/or devices that a report is run against. gtypes are
# "SiteFilter", "AssetGroupFilter", "DeviceFilter", or "ScanFilter". gid is
# the site-id, assetgroup-id, or devce-id. ScanFilter, if used, specifies
# a specifies a specific scan to use as the data source for the report. The gid
# can be a specific scan-id or "first" for the first run scan, or “last” for
# the last run scan.
#
class ReportFilter
attr_reader :type
attr_reader :id
def initialize(type, id)
@type = type
@id = id
end
end
# === Description
# Object that represents the schedule on which to automatically generate new reports.
#
class ReportSchedule
# The type of schedule
# (daily, hourly, monthly, weekly)
attr_reader :type
# The frequency with which to run the scan
attr_reader :interval
# The earliest date to generate the report
attr_reader :start
def initialize(type, interval, start)
@type = type
@interval = interval
@start = start
end
end
class ReportTemplateListing
attr_reader :error_msg
attr_reader :error
attr_reader :request_xml
attr_reader :response_xml
attr_reader :connection
attr_reader :xml_tag_stack
attr_reader :report_template_summaries #; //Array (ReportTemplateSummary*)
def initialize(connection)
@error = nil
@connection = connection
@report_template_summaries = Array.new()
r = @connection.execute('<ReportTemplateListingRequest session-id="' + connection.session_id.to_s + '"/>')
if (r.success)
r.res.elements.each('ReportTemplateListingResponse/ReportTemplateSummary') do |r|
@report_template_summaries.push(ReportTemplateSumary.new(r.attributes['id'], r.attributes['name']))
end
else
@error = true
@error_msg = 'ReportTemplateListingRequest Parse Error'
end
end
end
class ReportTemplateSummary
attr_reader :id
attr_reader :name
attr_reader :description
def initialize(id, name, description)
@id = id
@name = name
@description = description
end
end
class ReportSection
attr_reader :name
attr_reader :properties
def initialize(name)
@properties = Array.new()
@name = name
end
def addProperty(name, value)
@properties[name.to_s] = value
end
end
end |
# This is the main ruby file for the nmatrix-lapack gem
require 'nmatrix' #need to have nmatrix required first or else bad things will happen
require_relative 'lapack_ext_common'
NMatrix.register_lapack_extension("nmatrix-lapack")
require "nmatrix_lapack.so"
class NMatrix
#Add functions from the LAPACKE C extension to the main LAPACK and BLAS modules.
#This will overwrite the original functions where applicable.
module LAPACK
class << self
NMatrix::LAPACKE::LAPACK.singleton_methods.each do |m|
define_method m, NMatrix::LAPACKE::LAPACK.method(m).to_proc
end
end
end
module BLAS
class << self
NMatrix::LAPACKE::BLAS.singleton_methods.each do |m|
define_method m, NMatrix::LAPACKE::BLAS.method(m).to_proc
end
end
end
def getrf!
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
ipiv = NMatrix::LAPACK::lapacke_getrf(:row, self.shape[0], self.shape[1], self, self.shape[1])
return ipiv
end
def invert!
raise(StorageTypeError, "invert only works on dense matrices currently") unless self.dense?
raise(ShapeError, "Cannot invert non-square matrix") unless shape[0] == shape[1]
raise(DataTypeError, "Cannot invert an integer matrix in-place") if self.integer_dtype?
# Get the pivot array; factor the matrix
n = self.shape[0]
pivot = NMatrix::LAPACK::lapacke_getrf(:row, n, n, self, n)
# Now calculate the inverse using the pivot array
NMatrix::LAPACK::lapacke_getri(:row, n, self, n, pivot)
self
end
def potrf!(which)
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
raise(ShapeError, "Cholesky decomposition only valid for square matrices") unless self.dim == 2 && self.shape[0] == self.shape[1]
NMatrix::LAPACK::lapacke_potrf(:row, which, self.shape[0], self, self.shape[1])
end
end
#solve for lapacke
# This is the main ruby file for the nmatrix-lapack gem
require 'nmatrix' #need to have nmatrix required first or else bad things will happen
require_relative 'lapack_ext_common'
NMatrix.register_lapack_extension("nmatrix-lapack")
require "nmatrix_lapack.so"
class NMatrix
#Add functions from the LAPACKE C extension to the main LAPACK and BLAS modules.
#This will overwrite the original functions where applicable.
module LAPACK
class << self
NMatrix::LAPACKE::LAPACK.singleton_methods.each do |m|
define_method m, NMatrix::LAPACKE::LAPACK.method(m).to_proc
end
end
end
module BLAS
class << self
NMatrix::LAPACKE::BLAS.singleton_methods.each do |m|
define_method m, NMatrix::LAPACKE::BLAS.method(m).to_proc
end
end
end
def getrf!
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
ipiv = NMatrix::LAPACK::lapacke_getrf(:row, self.shape[0], self.shape[1], self, self.shape[1])
return ipiv
end
def invert!
raise(StorageTypeError, "invert only works on dense matrices currently") unless self.dense?
raise(ShapeError, "Cannot invert non-square matrix") unless shape[0] == shape[1]
raise(DataTypeError, "Cannot invert an integer matrix in-place") if self.integer_dtype?
# Get the pivot array; factor the matrix
n = self.shape[0]
pivot = NMatrix::LAPACK::lapacke_getrf(:row, n, n, self, n)
# Now calculate the inverse using the pivot array
NMatrix::LAPACK::lapacke_getri(:row, n, self, n, pivot)
self
end
def potrf!(which)
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
raise(ShapeError, "Cholesky decomposition only valid for square matrices") unless self.dim == 2 && self.shape[0] == self.shape[1]
NMatrix::LAPACK::lapacke_potrf(:row, which, self.shape[0], self, self.shape[1])
end
def solve b
raise(ShapeError, "b must be a column vector") unless b.dim == 2 && b.shape[1] == 1
raise(ShapeError, "Must be called on square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
raise(ShapeError, "number of rows of b must equal number of cols of self") if
self.shape[1] != b.shape[0]
raise ArgumentError, "only works with dense matrices" if self.stype != :dense
raise ArgumentError, "only works for non-integer, non-object dtypes" if
integer_dtype? or object_dtype? or b.integer_dtype? or b.object_dtype?
x = b.clone
clone = self.clone
n = self.shape[0]
ipiv = NMatrix::LAPACK.lapacke_getrf(:row, n, n, clone, n)
NMatrix::LAPACK.lapacke_getrs(:row, :no_transpose, n, b.shape[1], clone, n, ipiv, x, b.shape[1])
x
end
end
|
#-- encoding: UTF-8
require "nomener/name"
require "nomener/titles"
require "nomener/suffixes"
require "nomener/compounders"
require "nomener/helper"
module Nomener
class Parser
include Nomener::Titles
include Nomener::Suffixes
include Nomener::Compounders
# regex for stuff at the end we want to get out
TRAILER_TRASH = /[,|\s]+$/
# regex for name characters we aren't going to use
DIRTY_STUFF = /[^,'(?:\p{Alpha}(?<\.))\p{Alpha}\p{Blank}]{2,}/
# regex for boundaries we'll use to find leftover nickname boundaries
NICKNAME_LEFTOVER = /["'\(\)]{2}/
# regex for matching enclosed nicknames
NICKNAME = /(?<=["'\(])([\p{Alpha}\-\ '\.\,]+?)(?=["'\)])/
# regex for matching last names in a "first last" pattern
FIRSTLAST_MATCHER = /\p{Blank}(?<fam>#{COMPOUNDS}[\p{Alpha}\-\']+)\Z/i
# regex for matching last names in a "last first" pattern
LASTFIRST_MATCHER = /\A(?<fam>#{COMPOUNDS}\b[\p{Alpha}\-\']+)\p{Blank}/i
# regex for matching last names in a "last, first" pattern
LASTCOMFIRST_MATCHER = /\A(?<fam>#{COMPOUNDS}\b[\p{Alpha}\-\'\p{Blank}]+),/i
# period. probably not much performance help.
PERIOD = /\./
# Public: parse a string into name parts
#
# name - a string to get the name from
# format - a hash of options to parse name (default {:order => :fl, :spacelimit => 0})
# :order - format the name. defaults to "last first" of the available
# :fl - presumes the name is in the format of "first last"
# :lf - presumes the name is in the format of "last first"
# :lcf - presumes the name is in the format of "last, first"
# :spacelimit - the number of spaces to consider in the first name
#
# Returns a Nomener::Name object hopefully a parsed name of the string or nil
def self.parse(name, format = {:order => :auto, :spacelimit => 1})
begin
self.parse!(name, format)
rescue
nil
end
end
# Public: parse a string into name parts
#
# name - string to parse a name from
# format - has of options to parse name. See parse()
#
# Returns a hash of name parts or nil
# Raises ArgumentError if 'name' is not a string or is empty
def self.parse!(name, format = {:order => :auto, :spacelimit => 0})
raise ArgumentError, "Name to parse not provided" unless (name.kind_of?(String) && !name.empty?)
name = Nomener::Helper.reformat name
newname = { :title => "", :first => "", :nick => "", :middle => "", :last => "", :suffix => "" }
# grab any identified nickname before working on the rest
newname[:nick] = parse_nick! name
cleanup! name
# grab any suffix' we can find
newname[:suffix] = parse_suffix! name
cleanup! name
newname[:title] = parse_title! name
name = dustoff name
newname[:last] = name # possibly mononyms
case name
when /,/ # if there's a comma, it may be a useful hint
clues = name.split(",").each { |i| i.strip! }
raise ParseError, "Could not decipher commas in \"#{name}\"" if clues.length > 2
# convention is last, first when there's a comma
newname[:last], newname[:first] = clues
# check the last by comparing a re-ordering of the name
# Mies van der Rohe, Ludwig
# Snepscheut, Jan L. A. van de
unless newname[:first].nil? || newname[:first].split(" ").length == 1
check = parse_last!("#{newname[:first]} #{newname[:last]}", :fl)
# let's trust the full name
if check != newname[:last]
newname[:first] = "#{newname[:first]} #{newname[:last]}".sub(check, "").strip
newname[:last] = check
end
end
# titles which are part of the first name...
newname[:title] = parse_title!(newname[:first]) if newname[:title].empty?
when / / # no comma, check for space on first then last
newname[:last] = parse_last!(name, format[:order])
newname[:first], newname[:middle] = parse_first!(name, format[:spacelimit])
end
cleanup! newname[:last], newname[:first], newname[:middle]
newname
end
# Internal: pull off a title if we can
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the title found or and empty string
def self.parse_title!(nm)
titles = []
nm.gsub! TITLES do |title|
titles << title.strip
""
end
dustoff titles.join(" ")
end
# Internal: pull off what suffixes we can
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the suffixes found or and empty string
def self.parse_suffix!(nm)
suffixes = []
nm.gsub! SUFFIXES do |suffix|
suffixes << suffix.strip
""
end
dustoff suffixes.join(" ")
end
# Internal: parse nickname out of string. presuming it's in quotes
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the nickname found or and empty string
def self.parse_nick!(nm)
nick = ""
nm.sub! NICKNAME do |z|
nick = $1.strip
""
end
nm.sub! NICKNAME_LEFTOVER, ""
dustoff nick
end
# Internal: parse last name from string
# Modifies given string in place.
#
# nm - string to get the last name from
# format - symbol defaulting to "first last". See parse()
#
# Returns string of the last name found or an empty string
def self.parse_last!(nm, format = :fl)
last = ""
format = :fl if (format == :auto && nm.index(",").nil?)
format = :lcf if (format == :auto && nm.index(","))
# these constants should have the named match :fam
n = nm.match( FIRSTLAST_MATCHER ) if format == :fl
n = nm.match( LASTFIRST_MATCHER ) if format == :lf
n = nm.match( LASTCOMFIRST_MATCHER ) if format == :lcf
unless n.nil?
last = n[:fam].strip if n[:fam]
nm.sub!(last, "")
nm.sub!(",", "")
end
last
end
# Internal: parse the first name, and middle name if any
# Modifies given string in place.
#
# nm - the string to get the first name from
# namecount - the number of spaces in the first name to consider
#
# Returns an array containing the first name and middle name if any
def self.parse_first!(nm, namecount = 0)
nm.tr! ".", " "
nm.squeeze! " "
first, middle = nm.split " ", namecount
[first || "", middle || ""]
end
private
# Internal: Clean up a string where there are numerous consecutive and trailing non-name characters.
# Modifies given string in place.
#
# args - strings to clean up
#
# Returns nothing
def self.cleanup!(*args)
args.each do |dirty|
next if(dirty.nil? || !dirty.kind_of?(String))
dirty.gsub! DIRTY_STUFF, ""
dirty.squeeze! " "
# remove any trailing commas or whitespace
dirty.gsub! TRAILER_TRASH, ""
dirty.strip!
end
end
# Internal: a softer clean we keep re-using
#
# str - the string to dust off
#
# Returns the nice clean
def self.dustoff(str)
str = str.gsub PERIOD, " "
str = str.squeeze " "
str = str.strip
end
end
end
a bit more refactoring; still at about 1k names per
#-- encoding: UTF-8
require "nomener/name"
require "nomener/titles"
require "nomener/suffixes"
require "nomener/compounders"
require "nomener/helper"
module Nomener
class Parser
include Nomener::Titles
include Nomener::Suffixes
include Nomener::Compounders
# regex for stuff at the end we want to get out
TRAILER_TRASH = /[,|\s]+$/
# regex for name characters we aren't going to use
DIRTY_STUFF = /[^,'(?:\p{Alpha}(?<\.))\p{Alpha}\p{Blank}]{2,}/
# regex for boundaries we'll use to find leftover nickname boundaries
NICKNAME_LEFTOVER = /["'\(\)]{2}/
# regex for matching enclosed nicknames
NICKNAME = /(?<=["'\(])([\p{Alpha}\-\ '\.\,]+?)(?=["'\)])/
# regex for matching last names in a "first last" pattern
FIRSTLAST_MATCHER = /\p{Blank}(?<fam>#{COMPOUNDS}[\p{Alpha}\-\']+)\Z/i
# regex for matching last names in a "last first" pattern
LASTFIRST_MATCHER = /\A(?<fam>#{COMPOUNDS}\b[\p{Alpha}\-\']+)\p{Blank}/i
# regex for matching last names in a "last, first" pattern
LASTCOMFIRST_MATCHER = /\A(?<fam>#{COMPOUNDS}\b[\p{Alpha}\-\'\p{Blank}]+),/i
# period. probably not much performance help.
PERIOD = /\./
# Public: parse a string into name parts
#
# name - a string to get the name from
# format - a hash of options to parse name (default {:order => :fl, :spacelimit => 0})
# :order - format the name. defaults to "last first" of the available
# :fl - presumes the name is in the format of "first last"
# :lf - presumes the name is in the format of "last first"
# :lcf - presumes the name is in the format of "last, first"
# :spacelimit - the number of spaces to consider in the first name
#
# Returns a Nomener::Name object hopefully a parsed name of the string or nil
def self.parse(name, format = {:order => :auto, :spacelimit => 1})
begin
self.parse!(name, format)
rescue
nil
end
end
# Public: parse a string into name parts
#
# name - string to parse a name from
# format - has of options to parse name. See parse()
#
# Returns a hash of name parts or nil
# Raises ArgumentError if 'name' is not a string or is empty
def self.parse!(name, format = {:order => :auto, :spacelimit => 0})
raise ArgumentError, "Name to parse not provided" unless (name.kind_of?(String) && !name.empty?)
name = Nomener::Helper.reformat name
newname = { :title => "", :first => "", :nick => "", :middle => "", :last => "", :suffix => "" }
# grab any identified nickname before working on the rest
newname[:nick] = parse_nick! name
name.sub! NICKNAME_LEFTOVER, ""
cleanup! name
# grab any suffix' we can find
newname[:suffix] = parse_suffix! name
cleanup! name
newname[:title] = parse_title! name
name = dustoff name
newname[:last] = name # possibly mononyms
case name
when /,/ # if there's a comma, it may be a useful hint
clues = name.split(",").each { |i| i.strip! }
raise ParseError, "Could not decipher commas in \"#{name}\"" if clues.length > 2
# convention is last, first when there's a comma
newname[:last], newname[:first] = clues
# check the last by comparing a re-ordering of the name
# Mies van der Rohe, Ludwig
# Snepscheut, Jan L. A. van de
unless newname[:first].nil? || newname[:first].split(" ").length == 1
check = parse_last!("#{newname[:first]} #{newname[:last]}", :fl)
# let's trust the full name
if check != newname[:last]
newname[:first] = "#{newname[:first]} #{newname[:last]}".sub(check, "").strip
newname[:last] = check
end
end
# titles which are part of the first name...
newname[:title] = parse_title!(newname[:first]) if newname[:title].empty?
when / / # no comma, check for space on first then last
newname[:last] = parse_last!(name, format[:order])
newname[:first], newname[:middle] = parse_first!(name, format[:spacelimit])
end
cleanup! newname[:last], newname[:first], newname[:middle]
newname
end
# Internal: pull off a title if we can
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the title found or and empty string
def self.parse_title!(nm)
dustoff gut!(nm, TITLES)
end
# Internal: pull off what suffixes we can
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the suffixes found or and empty string
def self.parse_suffix!(nm)
dustoff gut!(nm, SUFFIXES)
end
# Internal: parse nickname out of string. presuming it's in quotes
# Modifies given string in place.
#
# nm - string of the name to parse
#
# Returns string of the nickname found or and empty string
def self.parse_nick!(nm)
dustoff gut!(nm, NICKNAME)
end
# Internal: parse last name from string
# Modifies given string in place.
#
# nm - string to get the last name from
# format - symbol defaulting to "first last". See parse()
#
# Returns string of the last name found or an empty string
def self.parse_last!(nm, format = :fl)
last = ""
format = :fl if (format == :auto && nm.index(",").nil?)
format = :lcf if (format == :auto && nm.index(","))
# these constants should have the named match :fam
nomen = case format
when :fl
nm.match( FIRSTLAST_MATCHER )
when :lf
nm.match( LASTFIRST_MATCHER )
when :lcf
nm.match( LASTCOMFIRST_MATCHER )
end
unless nomen.nil? || nomen[:fam].nil?
last = nomen[:fam].strip
nm.sub!(last, "")
nm.sub!(",", "")
end
last
end
# Internal: parse the first name, and middle name if any
# Modifies given string in place.
#
# nm - the string to get the first name from
# namecount - the number of spaces in the first name to consider
#
# Returns an array containing the first name and middle name if any
def self.parse_first!(nm, namecount = 0)
nm.tr! ".", " "
nm.squeeze! " "
first, middle = nm.split " ", namecount
[first || "", middle || ""]
end
private
# Internal: Clean up a string where there are numerous consecutive and trailing non-name characters.
# Modifies given string in place.
#
# args - strings to clean up
#
# Returns nothing
def self.cleanup!(*args)
args.each do |dirty|
next if(dirty.nil? || !dirty.kind_of?(String))
dirty.gsub! DIRTY_STUFF, ""
dirty.squeeze! " "
# remove any trailing commas or whitespace
dirty.gsub! TRAILER_TRASH, ""
dirty.strip!
end
end
# Internal: a softer clean we keep re-using
#
# str - the string to dust off
#
# Returns the nice clean
def self.dustoff(str)
str = str.gsub PERIOD, " "
str = str.squeeze " "
str = str.strip
end
# Internal: clean out a given string with a given pattern
# Modfies the given string
# str - the string to gut
# pattern - the regext to cut with
#
# Returns the gutted pattern
def self.gut!(str = "", pattern = / /)
found = []
str.gsub! pattern do |pat|
found << pat.strip
""
end
found.join " "
end
end
end
|
require 'json'
require 'socket'
require 'openssl'
require 'timeout'
require_relative 'frames/error'
require_relative 'frames/message'
require_relative 'frames/response'
require_relative 'logger'
module Nsq
class Connection
include Nsq::AttributeLogger
@@log_attributes = [:host, :port]
attr_reader :host
attr_reader :port
attr_accessor :max_in_flight
attr_reader :presumed_in_flight
USER_AGENT = "nsq-ruby/#{Nsq::Version::STRING}"
RESPONSE_HEARTBEAT = '_heartbeat_'
RESPONSE_OK = 'OK'
def initialize(opts = {})
@host = opts[:host] || (raise ArgumentError, 'host is required')
@port = opts[:port] || (raise ArgumentError, 'port is required')
@queue = opts[:queue]
@topic = opts[:topic]
@channel = opts[:channel]
@msg_timeout = opts[:msg_timeout] || 60_000 # 60s
@max_in_flight = opts[:max_in_flight] || 1
@tls_options = opts[:tls_options]
if opts[:ssl_context]
if @tls_options
warn 'ssl_context and tls_options both set. Using tls_options. Ignoring ssl_context.'
else
@tls_options = opts[:ssl_context]
warn 'ssl_context will be deprecated nsq-ruby version 3. Please use tls_options instead.'
end
end
@tls_v1 = !!opts[:tls_v1]
if @tls_options
if @tls_v1
validate_tls_options!
else
warn 'tls_options was provided, but tls_v1 is false. Skipping validation of tls_options.'
end
end
if @msg_timeout < 1000
raise ArgumentError, 'msg_timeout cannot be less than 1000. it\'s in milliseconds.'
end
# for outgoing communication
@write_queue = Queue.new
# For indicating that the connection has died.
# We use a Queue so we don't have to poll. Used to communicate across
# threads (from write_loop and read_loop to connect_and_monitor).
@death_queue = Queue.new
@connected = false
@presumed_in_flight = 0
open_connection
start_monitoring_connection
end
def connected?
@connected
end
# close the connection and don't try to re-open it
def close
stop_monitoring_connection
close_connection
end
def sub(topic, channel)
write "SUB #{topic} #{channel}\n"
end
def rdy(count)
write "RDY #{count}\n"
end
def fin(message_id)
write "FIN #{message_id}\n"
decrement_in_flight
end
def req(message_id, timeout)
write "REQ #{message_id} #{timeout}\n"
decrement_in_flight
end
def touch(message_id)
write "TOUCH #{message_id}\n"
end
def pub(topic, message)
write ["PUB #{topic}\n", message.bytesize, message].pack('a*l>a*')
end
def dpub(topic, delay_in_ms, message)
write ["DPUB #{topic} #{delay_in_ms}\n", message.bytesize, message].pack('a*l>a*')
end
def mpub(topic, messages)
body = messages.map do |message|
[message.bytesize, message].pack('l>a*')
end.join
write ["MPUB #{topic}\n", body.bytesize, messages.size, body].pack('a*l>l>a*')
end
# Tell the server we are ready for more messages!
def re_up_ready
rdy(@max_in_flight)
# assume these messages are coming our way. yes, this might not be the
# case, but it's much easier to manage our RDY state with the server if
# we treat things this way.
@presumed_in_flight = @max_in_flight
end
private
def cls
write "CLS\n"
end
def nop
write "NOP\n"
end
def write(raw)
@write_queue.push(raw)
end
def write_to_socket(raw)
debug ">>> #{raw.inspect}"
@socket.write(raw)
end
def identify
hostname = Socket.gethostname
metadata = {
client_id: hostname,
hostname: hostname,
feature_negotiation: true,
heartbeat_interval: 30_000, # 30 seconds
output_buffer: 16_000, # 16kb
output_buffer_timeout: 250, # 250ms
tls_v1: @tls_v1,
snappy: false,
deflate: false,
sample_rate: 0, # disable sampling
user_agent: USER_AGENT,
msg_timeout: @msg_timeout
}.to_json
write_to_socket ["IDENTIFY\n", metadata.length, metadata].pack('a*l>a*')
# Now wait for the response!
frame = receive_frame
server = JSON.parse(frame.data)
if @max_in_flight > server['max_rdy_count']
raise "max_in_flight is set to #{@max_in_flight}, server only supports #{server['max_rdy_count']}"
end
@server_version = server['version']
end
def handle_response(frame)
if frame.data == RESPONSE_HEARTBEAT
debug 'Received heartbeat'
nop
elsif frame.data == RESPONSE_OK
debug 'Received OK'
else
die "Received response we don't know how to handle: #{frame.data}"
end
end
def receive_frame
if buffer = @socket.read(8)
size, type = buffer.unpack('l>l>')
size -= 4 # we want the size of the data part and type already took up 4 bytes
data = @socket.read(size)
frame_class = frame_class_for_type(type)
return frame_class.new(data, self)
end
end
FRAME_CLASSES = [Response, Error, Message]
def frame_class_for_type(type)
raise "Bad frame type specified: #{type}" if type > FRAME_CLASSES.length - 1
[Response, Error, Message][type]
end
def decrement_in_flight
@presumed_in_flight -= 1
if server_needs_rdy_re_ups?
# now that we're less than @max_in_flight we might need to re-up our RDY state
threshold = (@max_in_flight * 0.2).ceil
re_up_ready if @presumed_in_flight <= threshold
end
end
def start_read_loop
@read_loop_thread ||= Thread.new{read_loop}
end
def stop_read_loop
@read_loop_thread.kill if @read_loop_thread
@read_loop_thread = nil
end
def read_loop
loop do
frame = receive_frame
if frame.is_a?(Response)
handle_response(frame)
elsif frame.is_a?(Error)
error "Error received: #{frame.data}"
elsif frame.is_a?(Message)
debug "<<< #{frame.body}"
@queue.push(frame) if @queue
else
raise 'No data from socket'
end
end
rescue Exception => ex
die(ex)
end
def start_write_loop
@write_loop_thread ||= Thread.new{write_loop}
end
def stop_write_loop
@stop_write_loop = true
@write_loop_thread.join(1) if @write_loop_thread
@write_loop_thread = nil
end
def write_loop
@stop_write_loop = false
data = nil
loop do
data = @write_queue.pop
write_to_socket(data)
break if @stop_write_loop && @write_queue.size == 0
end
rescue Exception => ex
# requeue PUB and MPUB commands
if data =~ /^M?PUB/
debug "Requeueing to write_queue: #{data.inspect}"
@write_queue.push(data)
end
die(ex)
end
# Waits for death of connection
def start_monitoring_connection
@connection_monitor_thread ||= Thread.new{monitor_connection}
@connection_monitor_thread.abort_on_exception = true
end
def stop_monitoring_connection
@connection_monitor_thread.kill if @connection_monitor_thread
@connection_monitor = nil
end
def monitor_connection
loop do
# wait for death, hopefully it never comes
cause_of_death = @death_queue.pop
warn "Died from: #{cause_of_death}"
debug 'Reconnecting...'
reconnect
debug 'Reconnected!'
# clear all death messages, since we're now reconnected.
# we don't want to complete this loop and immediately reconnect again.
@death_queue.clear
end
end
# close the connection if it's not already closed and try to reconnect
# over and over until we succeed!
def reconnect
close_connection
with_retries do
open_connection
end
end
def open_connection
@socket = TCPSocket.new(@host, @port)
# write the version and IDENTIFY directly to the socket to make sure
# it gets to nsqd ahead of anything in the `@write_queue`
write_to_socket ' V2'
identify
upgrade_to_ssl_socket if @tls_v1
start_read_loop
start_write_loop
@connected = true
# we need to re-subscribe if there's a topic specified
if @topic
debug "Subscribing to #{@topic}"
sub(@topic, @channel)
re_up_ready
end
end
# closes the connection and stops listening for messages
def close_connection
cls if connected?
stop_read_loop
stop_write_loop
@socket = nil
@connected = false
end
# this is called when there's a connection error in the read or write loop
# it triggers `connect_and_monitor` to try to reconnect
def die(reason)
@connected = false
@death_queue.push(reason)
end
def upgrade_to_ssl_socket
ssl_opts = [@socket, openssl_context].compact
@socket = OpenSSL::SSL::SSLSocket.new(*ssl_opts)
@socket.connect
end
def openssl_context
return unless @tls_options
context = OpenSSL::SSL::SSLContext.new
context.cert = OpenSSL::X509::Certificate.new(File.read(@tls_options[:certificate]))
context.key = OpenSSL::PKey::RSA.new(File.read(@tls_options[:key]))
if @tls_options[:ca_certificate]
context.ca_file = OpenSSL::X509::Certificate.new(File.read(@tls_options[:ca_certificate])).to_pem
end
context
end
# Retry the supplied block with exponential backoff.
#
# Borrowed liberally from:
# https://github.com/ooyala/retries/blob/master/lib/retries.rb
def with_retries(&block)
base_sleep_seconds = 0.5
max_sleep_seconds = 300 # 5 minutes
# Let's do this thing
attempts = 0
start_time = Time.now
begin
attempts += 1
return block.call(attempts)
rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH,
Errno::ENETDOWN, Errno::ENETUNREACH, Errno::ETIMEDOUT, Timeout::Error => ex
raise ex if attempts >= 100
# The sleep time is an exponentially-increasing function of base_sleep_seconds.
# But, it never exceeds max_sleep_seconds.
sleep_seconds = [base_sleep_seconds * (2 ** (attempts - 1)), max_sleep_seconds].min
# Randomize to a random value in the range sleep_seconds/2 .. sleep_seconds
sleep_seconds = sleep_seconds * (0.5 * (1 + rand()))
# But never sleep less than base_sleep_seconds
sleep_seconds = [base_sleep_seconds, sleep_seconds].max
warn "Failed to connect: #{ex}. Retrying in #{sleep_seconds.round(1)} seconds."
snooze(sleep_seconds)
retry
end
end
# Se we can stub for testing and reconnect in a tight loop
def snooze(t)
sleep(t)
end
def server_needs_rdy_re_ups?
# versions less than 0.3.0 need RDY re-ups
# see: https://github.com/bitly/nsq/blob/master/ChangeLog.md#030---2014-11-18
major, minor, patch = @server_version.split('.').map(&:to_i)
major == 0 && minor <= 2
end
def validate_tls_options!
[:key, :certificate].each do |key|
unless @tls_options.has_key?(key)
raise ArgumentError.new "@tls_options requires a :#{key}"
end
end
[:key, :certificate, :ca_certificate].each do |key|
if @tls_options[key] && !File.readable?(@tls_options[key])
raise LoadError.new "@tls_options :#{key} is unreadable"
end
end
end
end
end
Ensure write_loop ends; pass message via write_queue
There is a race condition with the current implementation where by
setting `@stop_write_loop = true` can potentially not be seen by the
Thread running `the write_loop`. Even with a Mutex around the variable
@stop_write_loop, there is a chance that the `.join(1)` will enter the
loop and imediately call `@write_queue.pop`. If there is nothing in the
queue at the time pop is called, the thread will block waiting for new
data. In the case that we are `close`ing the current connection, this
thread will be left for garbage collection at some later date...
The change here removes that variable `@stop_write_loop` and instead,
pushes a symbol :stop_write_loop onto the write_queue for the write_loop
to see and then act on.
require 'json'
require 'socket'
require 'openssl'
require 'timeout'
require_relative 'frames/error'
require_relative 'frames/message'
require_relative 'frames/response'
require_relative 'logger'
module Nsq
class Connection
include Nsq::AttributeLogger
@@log_attributes = [:host, :port]
attr_reader :host
attr_reader :port
attr_accessor :max_in_flight
attr_reader :presumed_in_flight
USER_AGENT = "nsq-ruby/#{Nsq::Version::STRING}"
RESPONSE_HEARTBEAT = '_heartbeat_'
RESPONSE_OK = 'OK'
def initialize(opts = {})
@host = opts[:host] || (raise ArgumentError, 'host is required')
@port = opts[:port] || (raise ArgumentError, 'port is required')
@queue = opts[:queue]
@topic = opts[:topic]
@channel = opts[:channel]
@msg_timeout = opts[:msg_timeout] || 60_000 # 60s
@max_in_flight = opts[:max_in_flight] || 1
@tls_options = opts[:tls_options]
if opts[:ssl_context]
if @tls_options
warn 'ssl_context and tls_options both set. Using tls_options. Ignoring ssl_context.'
else
@tls_options = opts[:ssl_context]
warn 'ssl_context will be deprecated nsq-ruby version 3. Please use tls_options instead.'
end
end
@tls_v1 = !!opts[:tls_v1]
if @tls_options
if @tls_v1
validate_tls_options!
else
warn 'tls_options was provided, but tls_v1 is false. Skipping validation of tls_options.'
end
end
if @msg_timeout < 1000
raise ArgumentError, 'msg_timeout cannot be less than 1000. it\'s in milliseconds.'
end
# for outgoing communication
@write_queue = Queue.new
# For indicating that the connection has died.
# We use a Queue so we don't have to poll. Used to communicate across
# threads (from write_loop and read_loop to connect_and_monitor).
@death_queue = Queue.new
@connected = false
@presumed_in_flight = 0
open_connection
start_monitoring_connection
end
def connected?
@connected
end
# close the connection and don't try to re-open it
def close
stop_monitoring_connection
close_connection
end
def sub(topic, channel)
write "SUB #{topic} #{channel}\n"
end
def rdy(count)
write "RDY #{count}\n"
end
def fin(message_id)
write "FIN #{message_id}\n"
decrement_in_flight
end
def req(message_id, timeout)
write "REQ #{message_id} #{timeout}\n"
decrement_in_flight
end
def touch(message_id)
write "TOUCH #{message_id}\n"
end
def pub(topic, message)
write ["PUB #{topic}\n", message.bytesize, message].pack('a*l>a*')
end
def dpub(topic, delay_in_ms, message)
write ["DPUB #{topic} #{delay_in_ms}\n", message.bytesize, message].pack('a*l>a*')
end
def mpub(topic, messages)
body = messages.map do |message|
[message.bytesize, message].pack('l>a*')
end.join
write ["MPUB #{topic}\n", body.bytesize, messages.size, body].pack('a*l>l>a*')
end
# Tell the server we are ready for more messages!
def re_up_ready
rdy(@max_in_flight)
# assume these messages are coming our way. yes, this might not be the
# case, but it's much easier to manage our RDY state with the server if
# we treat things this way.
@presumed_in_flight = @max_in_flight
end
private
def cls
write "CLS\n"
end
def nop
write "NOP\n"
end
def write(raw)
@write_queue.push(raw)
end
def write_to_socket(raw)
debug ">>> #{raw.inspect}"
@socket.write(raw)
end
def identify
hostname = Socket.gethostname
metadata = {
client_id: hostname,
hostname: hostname,
feature_negotiation: true,
heartbeat_interval: 30_000, # 30 seconds
output_buffer: 16_000, # 16kb
output_buffer_timeout: 250, # 250ms
tls_v1: @tls_v1,
snappy: false,
deflate: false,
sample_rate: 0, # disable sampling
user_agent: USER_AGENT,
msg_timeout: @msg_timeout
}.to_json
write_to_socket ["IDENTIFY\n", metadata.length, metadata].pack('a*l>a*')
# Now wait for the response!
frame = receive_frame
server = JSON.parse(frame.data)
if @max_in_flight > server['max_rdy_count']
raise "max_in_flight is set to #{@max_in_flight}, server only supports #{server['max_rdy_count']}"
end
@server_version = server['version']
end
def handle_response(frame)
if frame.data == RESPONSE_HEARTBEAT
debug 'Received heartbeat'
nop
elsif frame.data == RESPONSE_OK
debug 'Received OK'
else
die "Received response we don't know how to handle: #{frame.data}"
end
end
def receive_frame
if buffer = @socket.read(8)
size, type = buffer.unpack('l>l>')
size -= 4 # we want the size of the data part and type already took up 4 bytes
data = @socket.read(size)
frame_class = frame_class_for_type(type)
return frame_class.new(data, self)
end
end
FRAME_CLASSES = [Response, Error, Message]
def frame_class_for_type(type)
raise "Bad frame type specified: #{type}" if type > FRAME_CLASSES.length - 1
[Response, Error, Message][type]
end
def decrement_in_flight
@presumed_in_flight -= 1
if server_needs_rdy_re_ups?
# now that we're less than @max_in_flight we might need to re-up our RDY state
threshold = (@max_in_flight * 0.2).ceil
re_up_ready if @presumed_in_flight <= threshold
end
end
def start_read_loop
@read_loop_thread ||= Thread.new{read_loop}
end
def stop_read_loop
@read_loop_thread.kill if @read_loop_thread
@read_loop_thread = nil
end
def read_loop
loop do
frame = receive_frame
if frame.is_a?(Response)
handle_response(frame)
elsif frame.is_a?(Error)
error "Error received: #{frame.data}"
elsif frame.is_a?(Message)
debug "<<< #{frame.body}"
@queue.push(frame) if @queue
else
raise 'No data from socket'
end
end
rescue Exception => ex
die(ex)
end
def start_write_loop
@write_loop_thread ||= Thread.new{write_loop}
end
def stop_write_loop
if @write_loop_thread
@write_queue.push(:stop_write_loop)
@write_loop_thread.join
end
@write_loop_thread = nil
end
def write_loop
data = nil
loop do
data = @write_queue.pop
break if data == :stop_write_loop
write_to_socket(data)
end
rescue Exception => ex
# requeue PUB and MPUB commands
if data =~ /^M?PUB/
debug "Requeueing to write_queue: #{data.inspect}"
@write_queue.push(data)
end
die(ex)
end
# Waits for death of connection
def start_monitoring_connection
@connection_monitor_thread ||= Thread.new{monitor_connection}
@connection_monitor_thread.abort_on_exception = true
end
def stop_monitoring_connection
@connection_monitor_thread.kill if @connection_monitor_thread
@connection_monitor = nil
end
def monitor_connection
loop do
# wait for death, hopefully it never comes
cause_of_death = @death_queue.pop
warn "Died from: #{cause_of_death}"
debug 'Reconnecting...'
reconnect
debug 'Reconnected!'
# clear all death messages, since we're now reconnected.
# we don't want to complete this loop and immediately reconnect again.
@death_queue.clear
end
end
# close the connection if it's not already closed and try to reconnect
# over and over until we succeed!
def reconnect
close_connection
with_retries do
open_connection
end
end
def open_connection
@socket = TCPSocket.new(@host, @port)
# write the version and IDENTIFY directly to the socket to make sure
# it gets to nsqd ahead of anything in the `@write_queue`
write_to_socket ' V2'
identify
upgrade_to_ssl_socket if @tls_v1
start_read_loop
start_write_loop
@connected = true
# we need to re-subscribe if there's a topic specified
if @topic
debug "Subscribing to #{@topic}"
sub(@topic, @channel)
re_up_ready
end
end
# closes the connection and stops listening for messages
def close_connection
cls if connected?
stop_read_loop
stop_write_loop
@socket = nil
@connected = false
end
# this is called when there's a connection error in the read or write loop
# it triggers `connect_and_monitor` to try to reconnect
def die(reason)
@connected = false
@death_queue.push(reason)
end
def upgrade_to_ssl_socket
ssl_opts = [@socket, openssl_context].compact
@socket = OpenSSL::SSL::SSLSocket.new(*ssl_opts)
@socket.connect
end
def openssl_context
return unless @tls_options
context = OpenSSL::SSL::SSLContext.new
context.cert = OpenSSL::X509::Certificate.new(File.read(@tls_options[:certificate]))
context.key = OpenSSL::PKey::RSA.new(File.read(@tls_options[:key]))
if @tls_options[:ca_certificate]
context.ca_file = OpenSSL::X509::Certificate.new(File.read(@tls_options[:ca_certificate])).to_pem
end
context
end
# Retry the supplied block with exponential backoff.
#
# Borrowed liberally from:
# https://github.com/ooyala/retries/blob/master/lib/retries.rb
def with_retries(&block)
base_sleep_seconds = 0.5
max_sleep_seconds = 300 # 5 minutes
# Let's do this thing
attempts = 0
start_time = Time.now
begin
attempts += 1
return block.call(attempts)
rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH,
Errno::ENETDOWN, Errno::ENETUNREACH, Errno::ETIMEDOUT, Timeout::Error => ex
raise ex if attempts >= 100
# The sleep time is an exponentially-increasing function of base_sleep_seconds.
# But, it never exceeds max_sleep_seconds.
sleep_seconds = [base_sleep_seconds * (2 ** (attempts - 1)), max_sleep_seconds].min
# Randomize to a random value in the range sleep_seconds/2 .. sleep_seconds
sleep_seconds = sleep_seconds * (0.5 * (1 + rand()))
# But never sleep less than base_sleep_seconds
sleep_seconds = [base_sleep_seconds, sleep_seconds].max
warn "Failed to connect: #{ex}. Retrying in #{sleep_seconds.round(1)} seconds."
snooze(sleep_seconds)
retry
end
end
# Se we can stub for testing and reconnect in a tight loop
def snooze(t)
sleep(t)
end
def server_needs_rdy_re_ups?
# versions less than 0.3.0 need RDY re-ups
# see: https://github.com/bitly/nsq/blob/master/ChangeLog.md#030---2014-11-18
major, minor, patch = @server_version.split('.').map(&:to_i)
major == 0 && minor <= 2
end
def validate_tls_options!
[:key, :certificate].each do |key|
unless @tls_options.has_key?(key)
raise ArgumentError.new "@tls_options requires a :#{key}"
end
end
[:key, :certificate, :ca_certificate].each do |key|
if @tls_options[key] && !File.readable?(@tls_options[key])
raise LoadError.new "@tls_options :#{key} is unreadable"
end
end
end
end
end
|
module Okcoin
VERSION = "0.1.1"
end
version 0.1.2
module Okcoin
VERSION = "0.1.2"
end
|
module Olympia
class Camera
PROTOCOL = 'http://'
IP = '192.168.0.10'
require 'net/http'
require 'rexml/document'
def get(req, params = '')
if params.length > 0
rawuri = PROTOCOL + IP + req + '.cgi' + '?' + params
else
rawuri = PROTOCOL + IP + req + '.cgi'
end
begin
url = URI.parse(rawuri)
http = Net::HTTP.new(url.host, url.port)
http.read_timeout = 5
http.open_timeout = 1
resp = http.start() {|http|
http.get(url.path)
}
resp.body
rescue
'Olympia: NET ERROR'
end
end
def get_imglist(path = '/DCIM')
get('/get_imglist', 'DIR=' + path)
end
def parse_list(body)
# parse mark on top of body
if body.start_with?("VER_100")
rawlist = body[8, body.length]
linelist = rawlist.split("\n")
else
return 'Olympia: NO BODY MARK'
end
# parse list
linelist.each do |line|
sections = line.split(',')
if sections.length != 6
return 'Olympia: SECTION NUM ERROR'
end
sections.each do |section|
puts section
end
end
end
def get_thumbnail(path)
get('/get_thumbnail', 'DIR=' + path)
end
# get_caminfo
# actually, it returns only a model name in the response body
def get_caminfo
xml = get('/get_caminfo')
doc = REXML::Document.new xml
return doc.root.elements['model'].text
end
# get_connectmode
def get_connectmode
xml = get('/get_connectmode')
doc = REXML::Document.new xml
return doc.root.text
end
# get_activate
def get_activate
xml = get('/get_activate')
doc = REXML::Document.new xml
return doc.root.text
end
def get_gpsdivunit
xml = get('/get_gpsdivunit')
doc = REXML::Document.new xml
return doc.root.text
end
def get_unusedcapacity
xml = get('/get_unusedcapacity')
doc = REXML::Document.new xml
return doc.root.text
end
def get_dcffilenum
xml = get('/get_dcffilenum')
doc = REXML::Document.new xml
return doc.root.text
end
# power off
def exec_pwoff
get('/exec_pwoff')
end
# switch mode
def switch_cammode(mode = 'play')
get('/switch_cammode', 'mode=' + mode)
end
end
end
Exception to xml parsing ::kissing_cat::
module Olympia
class Camera
PROTOCOL = 'http://'
IP = '192.168.0.10'
require 'net/http'
require 'rexml/document'
def get(req, params = '')
if params.length > 0
rawuri = PROTOCOL + IP + req + '.cgi' + '?' + params
else
rawuri = PROTOCOL + IP + req + '.cgi'
end
begin
url = URI.parse(rawuri)
http = Net::HTTP.new(url.host, url.port)
http.read_timeout = 5
http.open_timeout = 1
http.add_field('User-Agent', 'OlympusCameraKit')
http.add_field('Referer', '')
p http
exit 0
resp = http.start() {|http|
http.get(url.path)
}
resp.body
rescue
'Olympia: NET ERROR'
end
end
def get_imglist(path = '/DCIM')
get('/get_imglist', 'DIR=' + path)
end
def parse_list(body)
# parse mark on top of body
if body.start_with?("VER_100")
rawlist = body[8, body.length]
linelist = rawlist.split("\n")
else
return 'Olympia: NO BODY MARK'
end
# parse list
linelist.each do |line|
sections = line.split(',')
if sections.length != 6
return 'Olympia: SECTION NUM ERROR'
end
sections.each do |section|
puts section
end
end
end
def get_thumbnail(path)
get('/get_thumbnail', 'DIR=' + path)
end
# get_caminfo
# actually, it returns only a model name in the response body
def get_caminfo
xml = get('/get_caminfo')
begin
doc = REXML::Document.new xml
return doc.root.elements['model'].text
rescue
return 'Olympia: XML ERROR'
end
end
# get_connectmode
def get_connectmode
xml = get('/get_connectmode')
begin
doc = REXML::Document.new xml
return doc.root.text
rescue
return 'Olympia: XML ERROR'
end
end
# get_activate
def get_activate
xml = get('/get_activate')
begin
doc = REXML::Document.new xml
return doc.root.text
rescue
return 'Olympia: XML ERROR'
end
end
def get_gpsdivunit
xml = get('/get_gpsdivunit')
begin
doc = REXML::Document.new xml
return doc.root.text
rescue
return 'Olympia: XML ERROR'
end
end
def get_unusedcapacity
xml = get('/get_unusedcapacity')
begin
doc = REXML::Document.new xml
return doc.root.text
rescue
return 'Olympia: XML ERROR'
end
end
def get_dcffilenum
xml = get('/get_dcffilenum')
begin
doc = REXML::Document.new xml
return doc.root.text
rescue
return 'Olympia: XML ERROR'
end
end
# power off
def exec_pwoff
get('/exec_pwoff')
end
# switch mode
def switch_cammode(mode = 'play')
get('/switch_cammode', 'mode=' + mode)
end
# shutter
def exec_shutter(com)
get('/exec_shutter', 'com=' + com)
end
end
end
|
#
# Copyright:: Copyright (c) 2012-2014 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'mixlib/config'
require 'omnibus/exceptions'
require 'json'
module Omnibus
# Global configuration object for Omnibus runs.
#
# @todo Write a {http://yardoc.org/guides/extending-yard/writing-handlers.html
# Yard handler} for Mixlib::Config-style DSL methods. I'd like
# the default value to show up in the docs without having to type
# it out twice, which I'm doing now for benefit of viewers of the Yard docs.
class Config
extend Mixlib::Config
# @!group Directory Configuration Parameters
# @!attribute [rw] cache_dir
# The absolute path to the directory on the virtual machine where
# code will be cached.
#
# Defaults to `"/var/cache/omnibus/cache"`.
#
# @return [String]
default :cache_dir, '/var/cache/omnibus/cache'
# @!attribute [rw] install_path_cache_dir
# The absolute path to the directory on the virtual machine where
# install paths will be progressively cached.
#
# Defaults to `"/var/cache/omnibus/cache/install_path"`.
#
# @return [String]
default :install_path_cache_dir, '/var/cache/omnibus/cache/install_path'
# @!attribute [rw] source_dir
# The absolute path to the directory on the virtual machine where
# source code will be downloaded.
#
# Defaults to `"/var/cache/omnibus/src"`.
#
# @return [String]
default :source_dir, '/var/cache/omnibus/src'
# @!attribute [rw] build_dir
# The absolute path to the directory on the virtual machine where
# software will be built.
#
# Defaults to `"/var/cache/omnibus/build"`.
#
# @return [String]
default :build_dir, '/var/cache/omnibus/build'
# @!attribute [rw] package_dir
# The absolute path to the directory on the virtual machine where
# packages will be constructed.
#
# Defaults to `"/var/cache/omnibus/pkg"`.
#
# @return [String]
default :package_dir, '/var/cache/omnibus/pkg'
# @!attribute [rw] package_tmp
# The absolute path to the directory on the virtual machine where
# packagers will store intermediate packaging products. Some packaging
# methods (notably fpm) handle this internally so not all packagers will
# use this setting.
#
# Defaults to `"/var/cache/omnibus/pkg-tmp"`.
#
# @return [String]
default :package_tmp, '/var/cache/omnibus/pkg-tmp'
# @!attribute [rw] project_dir
# The relative path of the directory containing {Omnibus::Project}
# DSL files. This is relative to {#project_root}.
#
# Defaults to `"config/projects"`.
#
# @return [String]
default :project_dir, 'config/projects'
# @!attribute [rw] software_dir
# The relative path of the directory containing {Omnibus::Software}
# DSL files. This is relative {#project_root}.
#
# Defaults to `"config/software"`.
#
# @return [String]
default :software_dir, 'config/software'
# @!attribute [rw] project_root
# The root directory in which to look for {Omnibus::Project} and
# {Omnibus::Software} DSL files.
#
# Defaults to the current working directory.
#
# @return [String]
default :project_root, Dir.pwd
# @!attribute [rw] install_dir
# Installation directory
#
# Defaults to `"/opt/chef"`.
#
# @todo This appears to be unused, and actually conflated with
# {Omnibus::Project#install_path}
#
# @return [String]
default :install_dir, '/opt/chef'
# @!endgroup
# @!group DMG configuration options
# @!attribute [rw] build_dmg
# Indicate the starting x,y and ending x,y positions for the created DMG
# window.
#
# @return [Boolean]
default :build_dmg, false
# @!attribute [rw] dmg_window_bounds
# Indicate the starting x,y and ending x,y positions for the created DMG
# window.
#
# @return [String]
default :dmg_window_bounds, '100, 100, 750, 600'
# @!attribute [rw] dmg_pkg_position
# Indicate the starting x,y position where the .pkg file should live in
# the DMG window.
#
# @return [String]
default :dmg_pkg_position, '535, 50'
# @!endgroup
# @!group S3 Caching Configuration Parameters
# @!attribute [rw] use_s3_caching
# Indicate if you wish to cache software artifacts in S3 for
# quicker build times. Requires {#s3_bucket}, {#s3_access_key},
# and {#s3_secret_key} to be set if this is set to `true`.
#
# Defaults to `false`.
#
# @return [Boolean]
default :use_s3_caching, false
# @!attribute [rw] s3_bucket
# The name of the S3 bucket you want to cache software artifacts in.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true`.
#
# @return [String, nil]
default :s3_bucket, nil
# @!attribute [rw] s3_access_key
# The S3 access key to use with S3 caching.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true`.
#
# @return [String, nil]
default :s3_access_key, nil
# @!attribute [rw] s3_secret_key
# The S3 secret key to use with S3 caching.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true.`
#
# @return [String, nil]
default :s3_secret_key, nil
# @!endgroup
# @!group S3 Release Parameters
# @!attribute [rw] release_s3_bucket
# The name of the S3 bucket you want to release artifacts to.
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_bucket, nil
# @!attribute [rw] release_s3_access_key
# The S3 access key to use for S3 artifact release.
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_access_key, nil
# @!attribute [rw] release_s3_secret_key
# The S3 secret key to use for S3 artifact release
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_secret_key, nil
# @!endgroup
# @!group Miscellaneous Configuration Parameters
# @!attribute [rw] override_file
#
# @return [Boolean]
default :override_file, nil
# @!attribute [rw] software_gem
#
# The gem to pull software definitions from. This is just the name of the gem, which is used
# to find the path to your software definitions, and you must also specify this gem in the
# Gemfile of your project repo in order to include the gem in your bundle.
#
# Defaults to "omnibus-software".
#
# @return [String, nil]
default :software_gem, 'omnibus-software'
# @!attribute [rw] solaris_compiler
#
# @return [String, nil]
default :solaris_compiler, nil
# @!endgroup
# @!group Build Version Parameters
# @!attribute [rw] append_timestamp
#
# @return [Boolean]
default :append_timestamp, true
# # @!endgroup
# @!group Build Control Parameters
# @! attribute [rw] build_retries
#
# @return [Integer, nil]
default :build_retries, 3
# @!group Validation Methods
# Asserts that the Config object is in a valid state. If invalid
# for any reason, an exception will be thrown.
#
# @raise [RuntimeError]
# @return [void]
def self.validate
valid_s3_config?
# add other validation methods as needed
end
# @raise [InvalidS3Configuration]
def self.valid_s3_config?
if use_s3_caching
unless s3_bucket
fail InvalidS3Configuration.new(s3_bucket, s3_access_key, s3_secret_key)
end
end
end
# @!endgroup
end # Config
end # Omnibus
Copy-paste fail
#
# Copyright:: Copyright (c) 2012-2014 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'mixlib/config'
require 'omnibus/exceptions'
require 'json'
module Omnibus
# Global configuration object for Omnibus runs.
#
# @todo Write a {http://yardoc.org/guides/extending-yard/writing-handlers.html
# Yard handler} for Mixlib::Config-style DSL methods. I'd like
# the default value to show up in the docs without having to type
# it out twice, which I'm doing now for benefit of viewers of the Yard docs.
class Config
extend Mixlib::Config
# @!group Directory Configuration Parameters
# @!attribute [rw] cache_dir
# The absolute path to the directory on the virtual machine where
# code will be cached.
#
# Defaults to `"/var/cache/omnibus/cache"`.
#
# @return [String]
default :cache_dir, '/var/cache/omnibus/cache'
# @!attribute [rw] install_path_cache_dir
# The absolute path to the directory on the virtual machine where
# install paths will be progressively cached.
#
# Defaults to `"/var/cache/omnibus/cache/install_path"`.
#
# @return [String]
default :install_path_cache_dir, '/var/cache/omnibus/cache/install_path'
# @!attribute [rw] source_dir
# The absolute path to the directory on the virtual machine where
# source code will be downloaded.
#
# Defaults to `"/var/cache/omnibus/src"`.
#
# @return [String]
default :source_dir, '/var/cache/omnibus/src'
# @!attribute [rw] build_dir
# The absolute path to the directory on the virtual machine where
# software will be built.
#
# Defaults to `"/var/cache/omnibus/build"`.
#
# @return [String]
default :build_dir, '/var/cache/omnibus/build'
# @!attribute [rw] package_dir
# The absolute path to the directory on the virtual machine where
# packages will be constructed.
#
# Defaults to `"/var/cache/omnibus/pkg"`.
#
# @return [String]
default :package_dir, '/var/cache/omnibus/pkg'
# @!attribute [rw] package_tmp
# The absolute path to the directory on the virtual machine where
# packagers will store intermediate packaging products. Some packaging
# methods (notably fpm) handle this internally so not all packagers will
# use this setting.
#
# Defaults to `"/var/cache/omnibus/pkg-tmp"`.
#
# @return [String]
default :package_tmp, '/var/cache/omnibus/pkg-tmp'
# @!attribute [rw] project_dir
# The relative path of the directory containing {Omnibus::Project}
# DSL files. This is relative to {#project_root}.
#
# Defaults to `"config/projects"`.
#
# @return [String]
default :project_dir, 'config/projects'
# @!attribute [rw] software_dir
# The relative path of the directory containing {Omnibus::Software}
# DSL files. This is relative {#project_root}.
#
# Defaults to `"config/software"`.
#
# @return [String]
default :software_dir, 'config/software'
# @!attribute [rw] project_root
# The root directory in which to look for {Omnibus::Project} and
# {Omnibus::Software} DSL files.
#
# Defaults to the current working directory.
#
# @return [String]
default :project_root, Dir.pwd
# @!attribute [rw] install_dir
# Installation directory
#
# Defaults to `"/opt/chef"`.
#
# @todo This appears to be unused, and actually conflated with
# {Omnibus::Project#install_path}
#
# @return [String]
default :install_dir, '/opt/chef'
# @!endgroup
# @!group DMG configuration options
# @!attribute [rw] build_dmg
# Package OSX pkg files inside a DMG
#
# @return [Boolean]
default :build_dmg, false
# @!attribute [rw] dmg_window_bounds
# Indicate the starting x,y and ending x,y positions for the created DMG
# window.
#
# @return [String]
default :dmg_window_bounds, '100, 100, 750, 600'
# @!attribute [rw] dmg_pkg_position
# Indicate the starting x,y position where the .pkg file should live in
# the DMG window.
#
# @return [String]
default :dmg_pkg_position, '535, 50'
# @!endgroup
# @!group S3 Caching Configuration Parameters
# @!attribute [rw] use_s3_caching
# Indicate if you wish to cache software artifacts in S3 for
# quicker build times. Requires {#s3_bucket}, {#s3_access_key},
# and {#s3_secret_key} to be set if this is set to `true`.
#
# Defaults to `false`.
#
# @return [Boolean]
default :use_s3_caching, false
# @!attribute [rw] s3_bucket
# The name of the S3 bucket you want to cache software artifacts in.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true`.
#
# @return [String, nil]
default :s3_bucket, nil
# @!attribute [rw] s3_access_key
# The S3 access key to use with S3 caching.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true`.
#
# @return [String, nil]
default :s3_access_key, nil
# @!attribute [rw] s3_secret_key
# The S3 secret key to use with S3 caching.
#
# Defaults to `nil`. Must be set if {#use_s3_caching} is `true.`
#
# @return [String, nil]
default :s3_secret_key, nil
# @!endgroup
# @!group S3 Release Parameters
# @!attribute [rw] release_s3_bucket
# The name of the S3 bucket you want to release artifacts to.
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_bucket, nil
# @!attribute [rw] release_s3_access_key
# The S3 access key to use for S3 artifact release.
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_access_key, nil
# @!attribute [rw] release_s3_secret_key
# The S3 secret key to use for S3 artifact release
#
# Defaults to `nil`. Must be set to use `release package` command.
#
# @return [String, nil]
default :release_s3_secret_key, nil
# @!endgroup
# @!group Miscellaneous Configuration Parameters
# @!attribute [rw] override_file
#
# @return [Boolean]
default :override_file, nil
# @!attribute [rw] software_gem
#
# The gem to pull software definitions from. This is just the name of the gem, which is used
# to find the path to your software definitions, and you must also specify this gem in the
# Gemfile of your project repo in order to include the gem in your bundle.
#
# Defaults to "omnibus-software".
#
# @return [String, nil]
default :software_gem, 'omnibus-software'
# @!attribute [rw] solaris_compiler
#
# @return [String, nil]
default :solaris_compiler, nil
# @!endgroup
# @!group Build Version Parameters
# @!attribute [rw] append_timestamp
#
# @return [Boolean]
default :append_timestamp, true
# # @!endgroup
# @!group Build Control Parameters
# @! attribute [rw] build_retries
#
# @return [Integer, nil]
default :build_retries, 3
# @!group Validation Methods
# Asserts that the Config object is in a valid state. If invalid
# for any reason, an exception will be thrown.
#
# @raise [RuntimeError]
# @return [void]
def self.validate
valid_s3_config?
# add other validation methods as needed
end
# @raise [InvalidS3Configuration]
def self.valid_s3_config?
if use_s3_caching
unless s3_bucket
fail InvalidS3Configuration.new(s3_bucket, s3_access_key, s3_secret_key)
end
end
end
# @!endgroup
end # Config
end # Omnibus
|
module Padrino
module Warden
module Helpers
# The main accessor to the warden middleware
def warden
request.env['warden']
end
# Return session info
#
# @param [Symbol] the scope to retrieve session info for
def session_info(scope=nil)
scope ? warden.session(scope) : scope
end
# Check the current session is authenticated to a given scope
def authenticated?(scope=nil)
scope ? warden.authenticated?(scope) : warden.authenticated?
end
alias_method :logged_in?, :authenticated?
# Authenticate a user against defined strategies
def authenticate(*args)
warden.authenticate!(*args)
end
alias_method :login, :authenticate
# Terminate the current session
#
# @param [Symbol] the session scope to terminate
def logout(scopes=nil)
scopes ? warden.logout(scopes) : warden.logout
end
# Access the user from the current session
#
# @param [Symbol] the scope for the logged in user
def user(scope=nil)
scope ? warden.user(scope) : warden.user
end
alias_method :current_user, :user
# Store the logged in user in the session
#
# @param [Object] the user you want to store in the session
# @option opts [Symbol] :scope The scope to assign the user
# @example Set John as the current user
# user = User.find_by_name('John')
def user=(new_user, opts={})
warden.set_user(new_user, opts)
end
alias_method :current_user=, :user=
# Require authorization for an action
#
# @param [String] path to redirect to if user is unauthenticated
def authorize!(failure_path=nil)
unless authenticated?
session[:return_to] = request.path if options.auth_use_referrer
redirect(failure_path ? failure_path : options.auth_failure_path)
end
end
end
def self.registered(app)
app.helpers Helpers
# Enable Sessions
app.set :sessions, true
app.set :auth_failure_path, '/'
app.set :auth_success_path, '/'
# Setting this to true will store last request URL
# into a user's session so that to redirect back to it
# upon successful authentication
app.set :auth_use_referrer, false
app.set :auth_error_message, "Could not log you in."
app.set :auth_success_message, "You have logged in successfully."
app.set :auth_login_template, 'sessions/login'
app.set :auth_layout, nil
# OAuth Specific Settings
app.set :auth_use_oauth, false
app.use ::Warden::Manager do |manager|
manager.default_strategies :password
manager.failure_app = app
end
app.controller :sessions do
post :unauthenticated do
status 401
warden.custom_failure! if warden.config.failure_app == self.class
env['x-rack.flash'][:error] = options.auth_error_message if defined?(Rack::Flash)
render options.auth_login_template, :layout => options.auth_layout
end
get :login do
if options.auth_use_oauth && !@auth_oauth_request_token.nil?
session[:request_token] = @auth_oauth_request_token.token
session[:request_token_secret] = @auth_oauth_request_token.secret
redirect @auth_oauth_request_token.authorize_url
else
render options.auth_login_template, :layout => options.auth_layout
end
end
get :oauth_callback do
if options.auth_use_oauth
authenticate
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_success_path
else
redirect options.auth_failure_path
end
end
post :login do
authenticate
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_use_referrer && session[:return_to] ? session.delete(:return_to) :
options.auth_success_path
end
get :logout do
authorize!
logout
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_success_path
end
end
end
end # Warden
end # Padrino
Add login and logout paths options.
module Padrino
module Warden
module Helpers
# The main accessor to the warden middleware
def warden
request.env['warden']
end
# Return session info
#
# @param [Symbol] the scope to retrieve session info for
def session_info(scope=nil)
scope ? warden.session(scope) : scope
end
# Check the current session is authenticated to a given scope
def authenticated?(scope=nil)
scope ? warden.authenticated?(scope) : warden.authenticated?
end
alias_method :logged_in?, :authenticated?
# Authenticate a user against defined strategies
def authenticate(*args)
warden.authenticate!(*args)
end
alias_method :login, :authenticate
# Terminate the current session
#
# @param [Symbol] the session scope to terminate
def logout(scopes=nil)
scopes ? warden.logout(scopes) : warden.logout
end
# Access the user from the current session
#
# @param [Symbol] the scope for the logged in user
def user(scope=nil)
scope ? warden.user(scope) : warden.user
end
alias_method :current_user, :user
# Store the logged in user in the session
#
# @param [Object] the user you want to store in the session
# @option opts [Symbol] :scope The scope to assign the user
# @example Set John as the current user
# user = User.find_by_name('John')
def user=(new_user, opts={})
warden.set_user(new_user, opts)
end
alias_method :current_user=, :user=
# Require authorization for an action
#
# @param [String] path to redirect to if user is unauthenticated
def authorize!(failure_path=nil)
unless authenticated?
session[:return_to] = request.path if options.auth_use_referrer
redirect(failure_path ? failure_path : options.auth_failure_path)
end
end
end
def self.registered(app)
app.helpers Helpers
# Enable Sessions
app.set :sessions, true
app.set :auth_login_path, '/login'
app.set :auth_logout_path, '/logout'
app.set :auth_failure_path, '/'
app.set :auth_success_path, '/'
# Setting this to true will store last request URL
# into a user's session so that to redirect back to it
# upon successful authentication
app.set :auth_use_referrer, false
app.set :auth_error_message, "Could not log you in."
app.set :auth_success_message, "You have logged in successfully."
app.set :auth_login_template, 'sessions/login'
app.set :auth_layout, nil
# OAuth Specific Settings
app.set :auth_use_oauth, false
app.use ::Warden::Manager do |manager|
manager.default_strategies :password
manager.failure_app = app
end
app.controller :sessions do
post :unauthenticated do
status 401
warden.custom_failure! if warden.config.failure_app == self.class
env['x-rack.flash'][:error] = options.auth_error_message if defined?(Rack::Flash)
render options.auth_login_template, :layout => options.auth_layout
end
get :login, :map => app.auth_login_path do
if options.auth_use_oauth && !@auth_oauth_request_token.nil?
session[:request_token] = @auth_oauth_request_token.token
session[:request_token_secret] = @auth_oauth_request_token.secret
redirect @auth_oauth_request_token.authorize_url
else
render options.auth_login_template, :layout => options.auth_layout
end
end
get :oauth_callback do
if options.auth_use_oauth
authenticate
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_success_path
else
redirect options.auth_failure_path
end
end
post :login, :map => app.auth_login_path do
authenticate
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_use_referrer && session[:return_to] ? session.delete(:return_to) :
options.auth_success_path
end
get :logout, :map => app.auth_logout_path do
authorize!
logout
env['x-rack.flash'][:success] = options.auth_success_message if defined?(Rack::Flash)
redirect options.auth_success_path
end
end
end
end # Warden
end # Padrino
|
require 'warden'
$:.unshift File.join( File.dirname(__FILE__), '..', '..' )
require 'padrino/warden/version'
require 'padrino/warden/controller'
require 'padrino/warden/helpers'
module Padrino
module Warden
def self.registered(app, register_controller = true)
# Enable Sessions
app.set :sessions, true unless app.sessions
app.set :auth_failure_path, '/'
app.set :auth_success_path, '/'
# Setting this to true will store last request URL
# into a user's session so that to redirect back to it
# upon successful authentication
app.set :auth_use_referrer, false
app.set :auth_error_message, "You have provided invalid credentials."
app.set :auth_success_message, "You have logged in successfully."
app.set :deauth_success_message, "You have logged out successfully."
# Custom map options and layout for the sessions controller
app.set :auth_login_template, 'sessions/login'
app.set :auth_login_path, 'sessions/login' unless app.respond_to?(:auth_login_path)
app.set :auth_unauthenticated_path,'/unauthenticated' unless app.respond_to?(:auth_unauthenticated_path)
app.set :auth_logout_path,'sessions/logout' unless app.respond_to?(:auth_logout_path)
app.set :auth_login_layout, true
# OAuth Specific Settings
app.set :auth_use_oauth, false
app.set :default_strategies, [:password] unless app.respond_to?(:default_strategies)
app.set :warden_failure_app, app unless app.respond_to?(:warden_failure_app)
app.set :warden_default_scope, :session
app.set(:warden_config) { |manager| nil }
app.use ::Warden::Manager do |manager|
manager.scope_defaults :session, strategies: app.default_strategies
manager.default_scope = app.warden_default_scope
manager.failure_app = app.warden_failure_app
app.warden_config manager
end
if register_controller
Controller.registered app
end
app.helpers Helpers
end
end
end
Better explanation for :auth_use_referrer
require 'warden'
$:.unshift File.join( File.dirname(__FILE__), '..', '..' )
require 'padrino/warden/version'
require 'padrino/warden/controller'
require 'padrino/warden/helpers'
module Padrino
module Warden
def self.registered(app, register_controller = true)
# Enable Sessions
app.set :sessions, true unless app.sessions
app.set :auth_failure_path, '/'
app.set :auth_success_path, '/'
# set :auth_use_referrer to true to redirect a user back to an action
# protected by 'login'/'authenticate' after successful login
app.set :auth_use_referrer, false
app.set :auth_error_message, "You have provided invalid credentials."
app.set :auth_success_message, "You have logged in successfully."
app.set :deauth_success_message, "You have logged out successfully."
# Custom map options and layout for the sessions controller
app.set :auth_login_template, 'sessions/login'
app.set :auth_login_path, 'sessions/login' unless app.respond_to?(:auth_login_path)
app.set :auth_unauthenticated_path,'/unauthenticated' unless app.respond_to?(:auth_unauthenticated_path)
app.set :auth_logout_path,'sessions/logout' unless app.respond_to?(:auth_logout_path)
app.set :auth_login_layout, true
# OAuth Specific Settings
app.set :auth_use_oauth, false
app.set :default_strategies, [:password] unless app.respond_to?(:default_strategies)
app.set :warden_failure_app, app unless app.respond_to?(:warden_failure_app)
app.set :warden_default_scope, :session
app.set(:warden_config) { |manager| nil }
app.use ::Warden::Manager do |manager|
manager.scope_defaults :session, strategies: app.default_strategies
manager.default_scope = app.warden_default_scope
manager.failure_app = app.warden_failure_app
app.warden_config manager
end
if register_controller
Controller.registered app
end
app.helpers Helpers
end
end
end
|
# Pagelime
puts "PAGELIME CMS PLUGIN: included"
if Rails::VERSION::MAJOR == 2
require "routing_extensions"
initialize_pagelime_plugin
elsif Rails::VERSION::MAJOR == 3
require "engine"
=begin
module Pagelime
class Railtie < Rails::Railtie
railtie_name :pagelime
initializer "pagelime.initialize" do |app|
initialize_pagelime_plugin
end
end
end
=end
end
def pagelime_environment_configured?
ENV['PAGELIME_ACCOUNT_KEY'] != nil &&
ENV['PAGELIME_ACCOUNT_SECRET'] != nil &&
ENV['PAGELIME_HEROKU_API_VERSION']
end
def fetch_cms_xml(page_path)
page_key = Base64.encode64(page_path)
xml_content = Rails.cache.fetch("cms:#{page_key}", :expires_in => 15.days) do
puts "PAGELIME CMS PLUGIN: NO CACHE... loading xml"
# set input values
key = ENV['PAGELIME_ACCOUNT_KEY']
secret = ENV['PAGELIME_ACCOUNT_SECRET']
api_version = ENV['PAGELIME_HEROKU_API_VERSION']
req = "apiKey=#{key}&path=#{CGI.escape(page_path)}"
# generate API signature
signature = Base64.encode64("#{OpenSSL::HMAC.digest('sha1',secret,req)}")
headers = {'Signature' => signature}
puts "PAGELIME CMS PLUGIN: SIGNATURE:" + signature
# get the url that we need to post to
http = Net::HTTP::new('qa.cms.pagelime.com',80)
# send the request
response = http.request_post("/api/heroku/#{api_version}/content.asmx/PageContent", req, headers)
# cache the file
# File.open("#{Rails.root}/tmp/test.cms", 'w') {|f| f.write(response.body) }
xml_content = response.body
xml_content
end
return xml_content
end
def cms_process_html_block(page_path=nil, html="")
begin
unless pagelime_environment_configured?
puts "PAGELIME CMS PLUGIN: Environment variables not configured"
return html
end
# use nokogiri to replace contents
doc = Nokogiri::HTML::DocumentFragment.parse(html)
doc.css("div.cms-editable").each do |div|
# Grab client ID
client_id = div["id"]
# Load pagelime content
xml_content = fetch_cms_xml(page_path)
puts "PAGELIME CMS PLUGIN: parsing xml"
soap = Nokogiri::XML::Document.parse(xml_content)
puts "PAGELIME CMS PLUGIN: looking for region: #{client_id}"
xpathNodes = soap.css("EditableRegion[@ElementID=\"#{client_id}\"]")
puts "regions found: #{xpathNodes.count}"
if (xpathNodes.count > 0)
new_content = xpathNodes[0].css("Html")[0].content()
puts "PAGELIME CMS PLUGIN: NEW CONTENT:"
puts new_content
if (new_content)
# div.content = "Replaced content"
div.replace new_content
end
end
end
return doc.to_html
rescue
# error
puts "PAGELIME CMS PLUGIN: Error rendering block"
# comment below to disable debug
raise
return html
end
end
module PagelimeControllerExtensions
def acts_as_cms_editable(opts=Hash.new)
after_filter :cms_process_rendered_body, :except => opts[:except]
include InstanceMethods
end
module InstanceMethods
def cms_process_rendered_body
puts "PAGELIME CMS PLUGIN: Processing response body"
if pagelime_environment_configured?
# response contents loaded into a variable
input_content = response.body
page_path = request.path
html = cms_process_html_block(page_path,input_content)
# output the final content
response.body = html
else
puts "PAGELIME CMS PLUGIN: Environment variables not configured"
end
end
end
end
def initialize_pagelime_plugin
puts "PAGELIME CMS PLUGIN: initializing"
# add dependencies to load paths
%w{ models controllers helpers }.each do |dir|
path = File.join(File.dirname(__FILE__), 'app', dir)
$LOAD_PATH << path
if Rails::VERSION::MAJOR == 2
ActiveSupport::Dependencies.load_paths << path
ActiveSupport::Dependencies.load_once_paths.delete(path)
elsif Rails::VERSION::MAJOR == 3
ActiveSupport::Dependencies.autoload_paths << path
ActiveSupport::Dependencies.autoload_once_paths.delete(path)
end
end
# wire controller extensions
ActionController::Base.extend PagelimeControllerExtensions
# wire helper
require "app/helpers/pagelime_helper"
ActionView::Base.send :include, PagelimeHelper
end
will have to test this with rails 2.3.8
# Pagelime
puts "PAGELIME CMS PLUGIN: included"
if Rails::VERSION::MAJOR == 2
require "routing_extensions"
require "../config/routes.rb"
initialize_pagelime_plugin
elsif Rails::VERSION::MAJOR == 3
require "engine"
end
def pagelime_environment_configured?
ENV['PAGELIME_ACCOUNT_KEY'] != nil &&
ENV['PAGELIME_ACCOUNT_SECRET'] != nil &&
ENV['PAGELIME_HEROKU_API_VERSION']
end
def fetch_cms_xml(page_path)
page_key = Base64.encode64(page_path)
xml_content = Rails.cache.fetch("cms:#{page_key}", :expires_in => 15.days) do
puts "PAGELIME CMS PLUGIN: NO CACHE... loading xml"
# set input values
key = ENV['PAGELIME_ACCOUNT_KEY']
secret = ENV['PAGELIME_ACCOUNT_SECRET']
api_version = ENV['PAGELIME_HEROKU_API_VERSION']
req = "apiKey=#{key}&path=#{CGI.escape(page_path)}"
# generate API signature
signature = Base64.encode64("#{OpenSSL::HMAC.digest('sha1',secret,req)}")
headers = {'Signature' => signature}
puts "PAGELIME CMS PLUGIN: SIGNATURE:" + signature
# get the url that we need to post to
http = Net::HTTP::new('qa.cms.pagelime.com',80)
# send the request
response = http.request_post("/api/heroku/#{api_version}/content.asmx/PageContent", req, headers)
# cache the file
# File.open("#{Rails.root}/tmp/test.cms", 'w') {|f| f.write(response.body) }
xml_content = response.body
xml_content
end
return xml_content
end
def cms_process_html_block(page_path=nil, html="")
begin
unless pagelime_environment_configured?
puts "PAGELIME CMS PLUGIN: Environment variables not configured"
return html
end
# use nokogiri to replace contents
doc = Nokogiri::HTML::DocumentFragment.parse(html)
doc.css("div.cms-editable").each do |div|
# Grab client ID
client_id = div["id"]
# Load pagelime content
xml_content = fetch_cms_xml(page_path)
puts "PAGELIME CMS PLUGIN: parsing xml"
soap = Nokogiri::XML::Document.parse(xml_content)
puts "PAGELIME CMS PLUGIN: looking for region: #{client_id}"
xpathNodes = soap.css("EditableRegion[@ElementID=\"#{client_id}\"]")
puts "regions found: #{xpathNodes.count}"
if (xpathNodes.count > 0)
new_content = xpathNodes[0].css("Html")[0].content()
puts "PAGELIME CMS PLUGIN: NEW CONTENT:"
puts new_content
if (new_content)
# div.content = "Replaced content"
div.replace new_content
end
end
end
return doc.to_html
rescue
# error
puts "PAGELIME CMS PLUGIN: Error rendering block"
# comment below to disable debug
raise
return html
end
end
module PagelimeControllerExtensions
def acts_as_cms_editable(opts=Hash.new)
after_filter :cms_process_rendered_body, :except => opts[:except]
include InstanceMethods
end
module InstanceMethods
def cms_process_rendered_body
puts "PAGELIME CMS PLUGIN: Processing response body"
if pagelime_environment_configured?
# response contents loaded into a variable
input_content = response.body
page_path = request.path
html = cms_process_html_block(page_path,input_content)
# output the final content
response.body = html
else
puts "PAGELIME CMS PLUGIN: Environment variables not configured"
end
end
end
end
def initialize_pagelime_plugin
puts "PAGELIME CMS PLUGIN: initializing"
# add dependencies to load paths
%w{ models controllers helpers }.each do |dir|
path = File.join(File.dirname(__FILE__), 'app', dir)
$LOAD_PATH << path
if Rails::VERSION::MAJOR == 2
ActiveSupport::Dependencies.load_paths << path
ActiveSupport::Dependencies.load_once_paths.delete(path)
elsif Rails::VERSION::MAJOR == 3
ActiveSupport::Dependencies.autoload_paths << path
ActiveSupport::Dependencies.autoload_once_paths.delete(path)
end
end
# wire controller extensions
ActionController::Base.extend PagelimeControllerExtensions
# wire helper
require "app/helpers/pagelime_helper"
ActionView::Base.send :include, PagelimeHelper
end |
require 'rest-client'
require 'jwt'
module PayApi
class Payment
attr_reader :params, :data
#params = {
# payment: {
# cardHolderEmail: "cardholder@example.com",
# cardHolderName: "John Smith",
# paymentMethod: "mastercard",
# creditCardNumber: "4242 4242 4242 4242",
# ccv: "1234",
# expiresMonth: "2",
# expiresYear: "3016",
# locale: "en-US",
# ip: "::ffff:127.0.0.1"
# },
# consumer: {
# name: "John Smith",
# co: "",
# streetAddress: "Delivery street 123",
# streetAddress2: "",
# postalCode: "90210",
# city: "New York",
# stateOrProvince: "",
# country: "USA"
# },
# order: {
# sumInCentsIncVat: 322,
# sumInCentsExcVat: 300,
# vatInCents: 22,
# currency: "EUR",
# referenceId: "ref123",
# sumIncludingVat: "€3.22",
# sumExcludingVat: "€3.00",
# vat: "€0.22"
# },
# products: [
# {
# id: "bbc123456",
# quantity: 1,
# title: "Black bling cap",
# description: "Flashy fine cap",
# imageUrl: "https://example.com/black_bling_cap.png",
# category: "Caps and hats",
# priceInCentsIncVat: 122,
# priceInCentsExcVat: 100,
# vatInCents: 22,
# vatPercentage: "22%",
# priceIncludingVat: "€1.22",
# priceExcludingVat: "€1.00",
# vat: "€0.22"
# },
# {
# id: "pbc123456",
# quantity: 1,
# title: "Pink bling cap",
# description: "Flashy fine cap",
# imageUrl: "https://example.com/pink_bling_cap.png",
# category: "Caps and hats",
# priceInCentsIncVat: 222,
# priceInCentsExcVat: 200,
# vatInCents: 22,
# vatPercentage: "22%",
# priceIncludingVat: "€2.22",
# priceExcludingVat: "€2.00",
# vat: "€0.22"
# }
# ],
# callbacks: {
# success: "https://merchantserver.xyz/payments/success",
# failed: "https://merchantserver.xyz/payments/failed",
# chargeback: "https://merchantserver.xyz/payments/chargeback"
# }
#};
def initialize(params)
@params = params
RestClient.add_before_execution_proc do |req, params|
req['alg'] = 'HS512'
end
end
def payload
puts '@params:' + @params
payload = {
authenticationToken: Authenticate.new.call,
paymentToken: JWT.encode(@params, CONFIG[:secret], 'HS512')
}.to_json
end
def call
resource = RestClient::Resource.new(
CONFIG[:site],
{
read_timeout: CONFIG[:read_timeout],
open_timeout: CONFIG[:open_timeout],
headers: {content_type: :json, accept: :json }
})
response = resource['/v1/api/authorized/payments'].post payload
end
end
end
Updating client
require 'rest-client'
require 'jwt'
module PayApi
class Payment
attr_reader :params, :data
#params = {
# payment: {
# cardHolderEmail: "cardholder@example.com",
# cardHolderName: "John Smith",
# paymentMethod: "mastercard",
# creditCardNumber: "4242 4242 4242 4242",
# ccv: "1234",
# expiresMonth: "2",
# expiresYear: "3016",
# locale: "en-US",
# ip: "::ffff:127.0.0.1"
# },
# consumer: {
# name: "John Smith",
# co: "",
# streetAddress: "Delivery street 123",
# streetAddress2: "",
# postalCode: "90210",
# city: "New York",
# stateOrProvince: "",
# country: "USA"
# },
# order: {
# sumInCentsIncVat: 322,
# sumInCentsExcVat: 300,
# vatInCents: 22,
# currency: "EUR",
# referenceId: "ref123",
# sumIncludingVat: "€3.22",
# sumExcludingVat: "€3.00",
# vat: "€0.22"
# },
# products: [
# {
# id: "bbc123456",
# quantity: 1,
# title: "Black bling cap",
# description: "Flashy fine cap",
# imageUrl: "https://example.com/black_bling_cap.png",
# category: "Caps and hats",
# priceInCentsIncVat: 122,
# priceInCentsExcVat: 100,
# vatInCents: 22,
# vatPercentage: "22%",
# priceIncludingVat: "€1.22",
# priceExcludingVat: "€1.00",
# vat: "€0.22"
# },
# {
# id: "pbc123456",
# quantity: 1,
# title: "Pink bling cap",
# description: "Flashy fine cap",
# imageUrl: "https://example.com/pink_bling_cap.png",
# category: "Caps and hats",
# priceInCentsIncVat: 222,
# priceInCentsExcVat: 200,
# vatInCents: 22,
# vatPercentage: "22%",
# priceIncludingVat: "€2.22",
# priceExcludingVat: "€2.00",
# vat: "€0.22"
# }
# ],
# callbacks: {
# success: "https://merchantserver.xyz/payments/success",
# failed: "https://merchantserver.xyz/payments/failed",
# chargeback: "https://merchantserver.xyz/payments/chargeback"
# }
#};
def initialize(params)
if params.class == Hash
@params = params
else
@payload = params
end
RestClient.add_before_execution_proc do |req, params|
req['alg'] = 'HS512'
end
end
def payload
if @payload.nil?
@payload = {
authenticationToken: Authenticate.new.call,
paymentToken: JWT.encode(@params, CONFIG[:secret], 'HS512')
}.to_json
end
@payload
end
def call
resource = RestClient::Resource.new(
CONFIG[:site],
{
read_timeout: CONFIG[:read_timeout],
open_timeout: CONFIG[:open_timeout],
headers: {content_type: :json, accept: :json }
})
resource['/v1/api/authorized/payments'].post payload
end
end
end
|
hash to query function
require 'cgi'
class Paysera::Helper
def self.make_query(data)
data.collect do |key, value|
"#{CGI.escape(key.to_s)}=#{CGI.escape(value.to_s)}"
end.compact.sort! * '&'
end
end |
module PiSys
VERSION = '1.1.0'
end
Upversion
module PiSys
VERSION = '1.1.1'
end
|
module Pkgwat
VERSION = "0.1.4"
end
Bumping version to 0.2.0
module Pkgwat
VERSION = "0.2.0"
end
|
require 'unirest'
require 'open-uri'
module Cinch
module Plugins
class Twitch
include Cinch::Plugin
timer 600, method: :check_live
match /(twitch)$/
match /(twitch) (.+)/, method: :check_user
match /(help twitch)$/, method: :help
def initialize(*args)
super
@users = ENV['TWITCH_USERS'].split(',')
@online = []
end
def execute(m)
@users.each do |user|
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(user)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
next if user_get.body['stream'].nil?
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
m.reply "LIVE: '#{title}' (#{name} playing #{game}) => #{url}"
end
end
def check_live
response = "LIVE:"
@users.each do |user|
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(user)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
@online.delete(user) if user_get.body['stream'].nil?
next if user_get.body['stream'].nil?
next if @online.include? user
@online << user
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
ENV["TWITCH_CHANNELS"].split(',').each do |channel|
Channel(channel).send "LIVE: '#{title}' (#{name} playing #{game}) => #{url}"
end
end
end
def check_user(m, prefix, check_user, user)
query = user.split(/[[:space:]]/).join(' ')
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(query)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
return m.reply "#{user} is not live bru" if user_get.body['stream'].nil?
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
m.reply "'#{title}' (#{name} playing #{game}), Viewers: #{viewers} => #{url}"
end
def help(m)
m.reply "checks every 10 minutes if specified twitch broadcasts are live."
m.reply "type .twitch [user] to check status of specific twitch user"
end
end
end
end
Decrease timer to 300s and create response for all users returning nil
require 'unirest'
require 'open-uri'
module Cinch
module Plugins
class Twitch
include Cinch::Plugin
timer 300, method: :check_live
match /(twitch)$/
match /(twitch) (.+)/, method: :check_user
match /(help twitch)$/, method: :help
def initialize(*args)
super
@users = ENV['TWITCH_USERS'].split(',')
@online = []
end
def execute(m)
counter = 0
@users.each do |user|
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(user)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
counter += 1 if user_get.body['stream'].nil?
return m.reply "no1 streaming" if counter == @users.size
next if user_get.body['stream'].nil?
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
m.reply "LIVE: '#{title}' (#{name} playing #{game}) => #{url}"
end
end
def check_live
response = "LIVE:"
@users.each do |user|
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(user)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
@online.delete(user) if user_get.body['stream'].nil?
next if user_get.body['stream'].nil?
next if @online.include? user
@online << user
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
ENV["TWITCH_CHANNELS"].split(',').each do |channel|
Channel(channel).send "LIVE: '#{title}' (#{name} playing #{game}) => #{url}"
end
end
end
def check_user(m, prefix, check_user, user)
query = user.split(/[[:space:]]/).join(' ')
user_get = Unirest.get "https://api.twitch.tv/kraken/streams/#{URI.encode(query)}",
headers: { "Accept" => "application/json" },
parameters: { :client_id => ENV['TWITCH_ID'] }
return m.reply "#{user} is not live bru" if user_get.body['stream'].nil?
game = user_get.body['stream']['game']
url = user_get.body['stream']['channel']['url']
name = user_get.body['stream']['channel']['display_name']
title = user_get.body['stream']['channel']['status']
title = 'No Title' if title == ''
viewers = user_get.body['stream']['viewers']
m.reply "'#{title}' (#{name} playing #{game}), Viewers: #{viewers} => #{url}"
end
def help(m)
m.reply "checks every 10 minutes if specified twitch broadcasts are live."
m.reply "type .twitch [user] to check status of specific twitch user"
end
end
end
end
|
Initial class fill-in for the Break class. Has name conflict
- Will become Pomodori::Pausa shortly
module Pomodori
class Pausa < Pomodori::Event
end
end
|
module Pomona
VERSION = "0.1.0"
end
Add Version
module Pomona
VERSION = "0.5.0"
end
|
require 'private_please/version'
require 'private_please/ruby_backports'
require 'private_please/candidate'
require 'private_please/storage'
require 'private_please/reporter'
require 'private_please/tracking'
set_trace_func(PrivatePlease::Tracking::LineChangeTracker::MY_TRACE_FUN)
module PrivatePlease
def self.install
Module.send :include, PrivatePlease::Tracking::Extension
end
# TODO : replace class methods by PP instance + instance methods
def self.calls_store
@@_calls_store ||= Storage::CallsStore.new
end
def self.candidates_store
@@_candidates_store ||= Storage::CandidatesStore.new
end
end
PrivatePlease.install
at_exit {
report = PrivatePlease::Reporter::SimpleText.new(candidates_store, calls_store)
$stdout.puts report.text
}
code in at_exit in invalid - fixes #8
Travis would complain :
```
/home/travis/build/alainravet/private_please/spec/../lib/private_please.rb:29: undefined local variable or method `candidates_store' for main:Object (NameError)
The command "bundle exec rspec" exited with 1.
Done. Your build exited with 1.
```
require 'private_please/version'
require 'private_please/ruby_backports'
require 'private_please/candidate'
require 'private_please/storage'
require 'private_please/reporter'
require 'private_please/tracking'
set_trace_func(PrivatePlease::Tracking::LineChangeTracker::MY_TRACE_FUN)
module PrivatePlease
def self.install
Module.send :include, PrivatePlease::Tracking::Extension
end
# TODO : replace class methods by PP instance + instance methods
def self.calls_store
@@_calls_store ||= Storage::CallsStore.new
end
def self.candidates_store
@@_candidates_store ||= Storage::CandidatesStore.new
end
end
PrivatePlease.install
at_exit {
report = PrivatePlease::Reporter::SimpleText.new(PrivatePlease.candidates_store, PrivatePlease.calls_store)
$stdout.puts report.text
}
|
require 'fileutils'
require 'google/protobuf'
load 'lib/proto/transit_pb.rb'
SIZES = [4.0, 1.0, 0.25]
LEVEL_BITS = 3
TILE_INDEX_BITS = 22
ID_INDEX_BITS = 21
LEVEL_MASK = (2**LEVEL_BITS) - 1
TILE_INDEX_MASK = (2**TILE_INDEX_BITS) - 1
ID_INDEX_MASK = (2**ID_INDEX_BITS) - 1
INVALID_ID = (ID_INDEX_MASK << (TILE_INDEX_BITS + LEVEL_BITS)) | (TILE_INDEX_MASK << LEVEL_BITS) | LEVEL_MASK
def int(value)
# Simplify porting
value.to_i
end
class UniqueIndex
def initialize(start: 0)
@index = start - 1
@values = {}
end
def fetch(key)
@values.fetch(key)
end
def check(key)
@values[key] ||= (@index += 1)
end
def next(key)
@values[key] = (@index += 1)
end
end
# https://github.com/valhalla/valhalla/blob/master/valhalla/midgard/encoded.h
class Shape7
def self.encode(coordinates)
output = []
last_lat = 0
last_lon = 0
coordinates.each do |lat, lon|
lat = (lat * 1e6).floor
lon = (lon * 1e6).floor
output += encode_int(lat - last_lat)
output += encode_int(lon - last_lon)
last_lat = lat
last_lon = lon
end
output.join('')
end
def self.decode(value)
last_lat = 0
last_lon = 0
decode_ints(value).each_slice(2).map do |lat,lon|
lat /= 1e6
lon /= 1e6
last_lat += lat
last_lon += lon
[last_lat, last_lon]
end
end
private
def self.encode_int(number)
ret = []
number = number < 0 ? ~(number << 1) : number << 1
while (number > 0x7f) do
# Take 7 bits
nextValue = (0x80 | (number & 0x7f))
ret << nextValue.chr
number >>= 7
end
# Last 7 bits
ret << (number & 0x7f).chr
ret
end
def self.decode_ints(value)
ret = []
index = 0
while (index < value.size) do
shift = 0
result = 0
nextValue = value[index].ord
while (nextValue > 0x7f) do
# Next 7 bits
result |= (nextValue & 0x7f) << shift
shift += 7
index += 1
nextValue = value[index].ord
end
# Last 7 bits
result |= (nextValue & 0x7f) << shift
# One's complement if msb is 1
result = (result & 1 == 1 ? ~result : result) >> 1
# Add to output
ret << result
index += 1
end
ret
end
end
class Tile
attr_reader :level, :tile, :message
def initialize(level, tile, data: nil)
@level = level
@tile = tile
@index = {}
@message = load(data)
end
def load(data)
if data
message = decode(data)
else
message = Valhalla::Mjolnir::Transit.new
end
message.nodes.each { |node| @index[GraphID.new(value: node.graphid).index] = node.graphid }
message
end
def decode(data)
Valhalla::Mjolnir::Transit.decode(data)
end
def encode
Valhalla::Mjolnir::Transit.encode(@message)
end
def next_index
(@index.keys.max || 0) + 1
end
def bbox
GraphID.level_tile_to_bbox(@level, @tile)
end
end
class TileSet
def initialize(path)
@path = path
@tiles = {}
end
def get_tile(level, tile)
@tiles[[level, tile]] ||= read_tile(level, tile)
end
def get_tile_by_lll(level, lat, lon)
get_tile_by_graphid(GraphID.new(level: level, lat: lat, lon: lon))
end
def get_tile_by_graphid(graphid)
get_tile(graphid.level, graphid.tile)
end
def write_tile(tile)
fn = tile_path(tile.level, tile.tile)
FileUtils.mkdir_p(File.dirname(fn))
File.open(fn, 'wb') do |f|
f.write(tile.encode)
end
end
private
def tile_path(level, tile)
s = tile.to_s.rjust(9, "0")
File.join(@path, level.to_s, s[0...3], s[3...6], s[6...9]+".pbf")
end
def read_tile(level, tile)
fn = tile_path(level, tile)
if File.exists?(fn)
Tile.new(level, tile, data: File.read(fn))
else
Tile.new(level, tile)
end
end
end
class GraphID
attr_accessor :value
def initialize(value: nil, **kwargs)
@value = value || (self.class.make_id(**kwargs))
end
def self.make_id(level: 0, tile: 0, index: 0, lat: nil, lon: nil)
if lat && lon
tile = lll_to_tile(level, lat, lon)
end
level | tile << LEVEL_BITS | index << (LEVEL_BITS + TILE_INDEX_BITS)
end
def self.lll_to_tile(tile_level, lat, lon)
size = SIZES[tile_level]
width = int(360 / size)
int((lat + 90) / size) * width + int((lon + 180 ) / size)
end
def self.level_tile_to_bbox(level, tile)
size = SIZES[level]
width = int(360 / size)
height = int(180 / size)
ymin = int(tile / width) * size - 90
xmin = (tile % width) * size - 180
xmax = xmin + size
ymax = ymin + size
[xmin, ymin, xmax, ymax]
end
def self.bbox_to_level_tiles(ymin, xmin, ymax, xmax)
# if this is crossing the anti meridian split it up and combine
left, bottom, right, top = ymin, xmin, ymax, xmax
if left > right
east = tiles_for_bbox(left, bottom, 180.0, top)
west = tiles_for_bbox(-180.0, bottom, right, top)
return east + west
end
#move these so we can compute percentages
left += 180
right += 180
bottom += 90
top += 90
tiles = []
SIZES.each_index do |level|
size = SIZES[level]
(int(left/size)..(int(right/size))).each do |x|
(int(bottom/size)..(int(top/size))).each do |y|
tile = int(y * (360.0 / size) + x)
tiles << [level, tile]
end
end
end
tiles
end
def bbox
self.class.level_tile_to_bbox(level, tile)
end
def level
@value & LEVEL_MASK
end
def tile
(@value >> LEVEL_BITS )& TILE_INDEX_MASK
end
def index
(@value >> LEVEL_BITS + TILE_INDEX_BITS) & ID_INDEX_MASK
end
end
Return nil if nil
require 'fileutils'
require 'google/protobuf'
load 'lib/proto/transit_pb.rb'
SIZES = [4.0, 1.0, 0.25]
LEVEL_BITS = 3
TILE_INDEX_BITS = 22
ID_INDEX_BITS = 21
LEVEL_MASK = (2**LEVEL_BITS) - 1
TILE_INDEX_MASK = (2**TILE_INDEX_BITS) - 1
ID_INDEX_MASK = (2**ID_INDEX_BITS) - 1
INVALID_ID = (ID_INDEX_MASK << (TILE_INDEX_BITS + LEVEL_BITS)) | (TILE_INDEX_MASK << LEVEL_BITS) | LEVEL_MASK
def int(value)
# Simplify porting
value.to_i
end
class UniqueIndex
def initialize(start: 0)
@index = start - 1
@values = {}
end
def fetch(key)
return nil if key.nil?
@values.fetch(key)
end
def check(key)
return nil if key.nil?
@values[key] ||= (@index += 1)
end
def next(key)
return nil if key.nil?
@values[key] = (@index += 1)
end
end
# https://github.com/valhalla/valhalla/blob/master/valhalla/midgard/encoded.h
class Shape7
def self.encode(coordinates)
output = []
last_lat = 0
last_lon = 0
coordinates.each do |lat, lon|
lat = (lat * 1e6).floor
lon = (lon * 1e6).floor
# puts "last_lat: #{lat - last_lat} last_lon: #{lon - last_lon}"
output += encode_int(lat - last_lat)
output += encode_int(lon - last_lon)
last_lat = lat
last_lon = lon
end
output.join('')
end
def self.decode(value)
last_lat = 0
last_lon = 0
decode_ints(value).each_slice(2).map do |lat,lon|
lat /= 1e6
lon /= 1e6
last_lat += lat
last_lon += lon
[last_lat, last_lon]
end
end
private
def self.encode_int(number)
ret = []
number = number < 0 ? ~(number << 1) : number << 1
while (number > 0x7f) do
# Take 7 bits
nextValue = (0x80 | (number & 0x7f))
ret << nextValue.chr
number >>= 7
end
# Last 7 bits
ret << (number & 0x7f).chr
ret
end
def self.decode_ints(value)
ret = []
index = 0
while (index < value.size) do
shift = 0
result = 0
nextValue = value[index].ord
while (nextValue > 0x7f) do
# Next 7 bits
result |= (nextValue & 0x7f) << shift
shift += 7
index += 1
nextValue = value[index].ord
end
# Last 7 bits
result |= (nextValue & 0x7f) << shift
# One's complement if msb is 1
result = (result & 1 == 1 ? ~result : result) >> 1
# Add to output
ret << result
index += 1
end
ret
end
end
class Tile
attr_reader :level, :tile, :message
def initialize(level, tile, data: nil)
@level = level
@tile = tile
@index = {}
@message = load(data)
end
def load(data)
if data
message = decode(data)
else
message = Valhalla::Mjolnir::Transit.new
end
message.nodes.each { |node| @index[GraphID.new(value: node.graphid).index] = node.graphid }
message
end
def decode(data)
Valhalla::Mjolnir::Transit.decode(data)
end
def encode
Valhalla::Mjolnir::Transit.encode(@message)
end
def next_index
(@index.keys.max || 0) + 1
end
def bbox
GraphID.level_tile_to_bbox(@level, @tile)
end
end
class TileSet
def initialize(path)
@path = path
@tiles = {}
end
def get_tile(level, tile)
@tiles[[level, tile]] ||= read_tile(level, tile)
end
def get_tile_by_lll(level, lat, lon)
get_tile_by_graphid(GraphID.new(level: level, lat: lat, lon: lon))
end
def get_tile_by_graphid(graphid)
get_tile(graphid.level, graphid.tile)
end
def write_tile(tile)
fn = tile_path(tile.level, tile.tile)
FileUtils.mkdir_p(File.dirname(fn))
File.open(fn, 'wb') do |f|
f.write(tile.encode)
end
end
private
def tile_path(level, tile)
s = tile.to_s.rjust(9, "0")
File.join(@path, level.to_s, s[0...3], s[3...6], s[6...9]+".pbf")
end
def read_tile(level, tile)
fn = tile_path(level, tile)
if File.exists?(fn)
Tile.new(level, tile, data: File.read(fn))
else
Tile.new(level, tile)
end
end
end
class GraphID
attr_accessor :value
def initialize(value: nil, **kwargs)
@value = value || (self.class.make_id(**kwargs))
end
def self.make_id(level: 0, tile: 0, index: 0, lat: nil, lon: nil)
if lat && lon
tile = lll_to_tile(level, lat, lon)
end
level | tile << LEVEL_BITS | index << (LEVEL_BITS + TILE_INDEX_BITS)
end
def self.lll_to_tile(tile_level, lat, lon)
size = SIZES[tile_level]
width = int(360 / size)
int((lat + 90) / size) * width + int((lon + 180 ) / size)
end
def self.level_tile_to_bbox(level, tile)
size = SIZES[level]
width = int(360 / size)
height = int(180 / size)
ymin = int(tile / width) * size - 90
xmin = (tile % width) * size - 180
xmax = xmin + size
ymax = ymin + size
[xmin, ymin, xmax, ymax]
end
def self.bbox_to_level_tiles(ymin, xmin, ymax, xmax)
# if this is crossing the anti meridian split it up and combine
left, bottom, right, top = ymin, xmin, ymax, xmax
if left > right
east = tiles_for_bbox(left, bottom, 180.0, top)
west = tiles_for_bbox(-180.0, bottom, right, top)
return east + west
end
#move these so we can compute percentages
left += 180
right += 180
bottom += 90
top += 90
tiles = []
SIZES.each_index do |level|
size = SIZES[level]
(int(left/size)..(int(right/size))).each do |x|
(int(bottom/size)..(int(top/size))).each do |y|
tile = int(y * (360.0 / size) + x)
tiles << [level, tile]
end
end
end
tiles
end
def bbox
self.class.level_tile_to_bbox(level, tile)
end
def level
@value & LEVEL_MASK
end
def tile
(@value >> LEVEL_BITS )& TILE_INDEX_MASK
end
def index
(@value >> LEVEL_BITS + TILE_INDEX_BITS) & ID_INDEX_MASK
end
end
|
require 'excon'
require 'json'
module Puffery
class Client
USER_AGENT = "Puffery Ruby #{Puffery::VERSION}"
RequestError = Class.new(StandardError)
ResourceNotFoundError = Class.new(RequestError)
STATUS_ENABLED = 'enabled'
STATUS_PASUED = 'paused'
attr_accessor :url, :key
def initialize(url = nil, key = nil)
self.url = url || Puffery.configuration.api_url || raise('Missing Api URL')
self.key = key || Puffery.configuration.api_key || raise('Missing Api key')
end
def conn
@conn ||= Excon.new(self.url, debug: Puffery.debug?, headers:
{ 'Content-Type' => 'application/json', 'X-API-KEY' => key })
end
def push(uid, payload, active:)
payload[:ad_group][:status] = active ? STATUS_ENABLED : STATUS_PASUED
json = if uid
request(:put, "/api/ad_groups/#{uid}", payload)
else
request(:post, '/api/ad_groups', payload)
end
json['ad_group']
end
def down(uid, attrs = { status: STATUS_PASUED })
json = request(:patch, "/api/ad_groups/#{uid}", attrs)
json['ad_group']
end
def unlink(uid)
json = request(:delete, "/api/ad_groups/#{uid}")
json['ad_group']['deleted_at']
end
def request(method, path, body = {})
res = conn.request(method: method, path: path, body: JSON.dump(body))
json = JSON.parse(res.body)
handle_errors(json)
json
end
def handle_errors(data)
if data['errors'].any?
if data['code'] == 404
raise ResourceNotFoundError, data['errors'].first
else
raise RequestError,
"Request Error occurred: %s" % data['errors'].first
end
end
end
# Update partial resources
def patch(payload)
end
# PUT request replace whole object
# PUTting the same data multiple times to the same resource
# should not result in different resources
def put(payload)
end
end
end
Add remove_keyword and add_keyword methods
require 'excon'
require 'json'
module Puffery
class Client
USER_AGENT = "Puffery Ruby #{Puffery::VERSION}"
RequestError = Class.new(StandardError)
ResourceNotFoundError = Class.new(RequestError)
STATUS_ENABLED = 'enabled'
STATUS_PASUED = 'paused'
attr_accessor :url, :key
def initialize(url = nil, key = nil)
self.url = url || Puffery.configuration.api_url || raise('Missing Api URL')
self.key = key || Puffery.configuration.api_key || raise('Missing Api key')
end
def conn
@conn ||= Excon.new(self.url, debug: Puffery.debug?, headers:
{ 'Content-Type' => 'application/json', 'X-API-KEY' => key })
end
def push(uid, payload, active:)
payload[:ad_group][:status] = active ? STATUS_ENABLED : STATUS_PASUED
json = if uid
request(:put, "/api/ad_groups/#{uid}", payload)
else
request(:post, '/api/ad_groups', payload)
end
json['ad_group']
end
def down(uid, attrs = { status: STATUS_PASUED })
json = request(:patch, "/api/ad_groups/#{uid}", attrs)
json['ad_group']
end
def unlink(uid)
json = request(:delete, "/api/ad_groups/#{uid}")
json['ad_group']['deleted_at']
end
def request(method, path, body = {})
res = conn.request(method: method, path: path, body: JSON.dump(body))
json = JSON.parse(res.body)
handle_errors(json)
json
end
def handle_errors(data)
if data['errors'].any?
if data['code'] == 404
raise ResourceNotFoundError, data['errors'].first
else
raise RequestError,
"Request Error occurred: %s" % data['errors'].first
end
end
end
def add_keyword(uid, params = {})
json = request(:post, "/api/ad_groups/#{uid}/keywords", {
keyword: params
})
json
end
def remove_keyword(uid, params = {})
json = request(:delete, "/api/ad_groups/#{uid}/keywords", {
keyword: params
})
json
end
# Update partial resources
def patch(payload)
end
# PUT request replace whole object
# PUTting the same data multiple times to the same resource
# should not result in different resources
def put(payload)
end
end
end
|
module QuestBack
class Api
# This hash contains parts of request we can include in soap operation.
# For instance call(:some_action, attributes, include_defaults: [:paging_info]) will
# slice paging_info and include it in the request.
DEFAULTS = {
paging_info: {page_no: 0, page_size: 50},
quest_filter: '',
sendduplicate: false,
respondents_data: {
delimiter: ';',
order!: [:respondent_data_header, :respondent_data, :delimiter, :allow_duplicate, :add_as_invitee]
}
}
# The order of the elements in the SOAP body is important for the SOAP API.
# For operations with multiple arguments this hash gives savon the order of which
# it should .. well, order the elements.
ORDER = {
get_quests: [:user_info, :paging_info, :quest_filter],
add_email_invitees: [:user_info, :quest_info, :emails, :sendduplicate, :language_id],
add_respondents_data: [:user_info, :quest_info, :respondents_data, :language_id],
add_respondents_data_with_sms_invitation: [
:user_info, :quest_info, :respondents_data, :language_id,
:sms_from_number, :sms_from_text, :sms_message
]
}
# In order to provide a simple response.result and response.results interface
# where the actual result we care about is returned we have to give knowledge to
# where this result is found. As it turns out, get_quests returns it's quests within
# quests/quest array, and at the same time get_quest_questions returns the questions
# within simply it's root result element. No nestings there.. So, it seems a bit randon
# and we need to have this configured. I though it would be put under quest_questions/quest_question,
# but no such luck.
RESULT_KEY_NESTINGS = {
test_connection: [],
get_quests: [:quests, :quest],
get_language_list: [:language],
add_email_invitees: [],
add_respondents_data: [],
add_respondents_data_with_sms_invitation: []
}
RESPONDENTS_HEADER_TYPE = {
numeric: 1,
text: 2
}
NAMESPACES = {
'xmlns:array' => 'http://schemas.microsoft.com/2003/10/Serialization/Arrays',
'xmlns:enum' => 'http://schemas.microsoft.com/2003/10/Serialization/Enums'
}
def self.respondent_data_header_type_for(type)
RESPONDENTS_HEADER_TYPE.fetch(type.to_sym) do
fail ArgumentError, "#{type.to_s.inspect} is an unkown respondent data header type."
end
end
# Public: Creates a new API gateway object.
#
# Attributes
# config - A QuestBack::Configuration object. May be nil if
# QuestBack.default_configuration has been set.
def initialize(attributes = {})
attributes = ActiveSupport::HashWithIndifferentAccess.new attributes
@config = attributes[:config]
end
# Public: Make a test connection call to QuestBack
#
# Returns QuestBack::Response
def test_connection
call :test_connection
end
# Public: Get quests
#
# attributes - Attributes sent to QuestBack
#
# Example
#
# response = api.get_quests paging_info: {page_size: 2} # Limits result to two
# response.results
# => [result, result]
#
# Returns QuestBack::Response
def get_quests(attributes = {})
call :get_quests, attributes, include_defaults: [:paging_info, :quest_filter]
end
# Public: Returns a list of languages from QuestBack.
#
#
# Returns QuestBack::Response
def get_language_list
call :get_language_list
end
# Public: Invites a set of emails to a quest.
#
# attributes - Attributes sent to QuestBack
#
# Example
#
# response = api.add_email_invitees(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# emails: ['inviso@skalar.no', 'th@skalar.no'],
# sendduplicate: true, # or false as default
# language_id: 123, # optional
# )
#
# Returns QuestBack::Response
def add_email_invitees(attributes = {})
call :add_email_invitees, attributes, include_defaults: [:sendduplicate]
end
# Public: Add respondent data to a quest - optionally send as invitee as well.
#
# attributes - Attributes sent to QuestBack
#
# QuestBack is doing a bit of CSV over XML here? As you need to serialize
# respondent_data as a string with a delimiter ala CSV. The order of the
# data must match the order of respondent_data_header. I guess simply using XML
# and named elements was too easy? :-)
#
# Example
#
# response = api.add_respondents_data(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# respondents_data: {
# respondent_data_header: {
# respondent_data_header: [
# {
# title: 'Epost',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: true,
# is_sms_field: false,
# },
# {
# title: 'Navn',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: false,
# is_sms_field: false,
# },
# {
# title: 'Alder',
# type: QuestBack::Api.respondent_data_header_type_for(:numeric),
# is_email_field: false,
# is_sms_field: false,
# },
# ]
# },
# respondent_data: ['th@skalar.no;Thorbjorn;32'], # According to QuestBack's doc you can only do one here
# allow_duplicate: true,
# add_as_invitee: true
# }
# )
#
# You may override respondent_data's delimiter in string too.
#
# Returns QuestBack::Response
def add_respondents_data(attributes = {})
call :add_respondents_data, attributes, include_defaults: [:respondents_data]
end
# Public: Add respondent data to a quest with SMS invitation
#
# attributes - Attributes sent to QuestBack
#
#
# Example
#
# response = api.add_respondents_data_with_sms_invitation(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# respondents_data: {
# respondent_data_header: {
# respondent_data_header: [
# {
# title: 'Epost',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: true,
# is_sms_field: false,
# },
# {
# title: 'Phone',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: false,
# is_sms_field: true,
# }
# ]
# },
# # According to QuestBack's doc you can only do one respondent data,
# # even though it for sure is an array. Phone numbers must be given
# # on with country code first.
# respondent_data: ['th@skalar.no;4711223344'],
# allow_duplicate: true,
# add_as_invitee: true
# },
# sms_from_number: 11111111,
# sms_from_text: 'Inviso AS',
# sms_message: 'Hello - please join our quest!'
# )
#
# You may override respondent_data's delimiter in string too.
#
# Returns QuestBack::Response
def add_respondents_data_with_sms_invitation(attributes = {})
call :add_respondents_data_with_sms_invitation, attributes, include_defaults: [:respondents_data]
end
# Public: Savon client.
#
# Savon client all API method calls will go through.
def client
@client ||= begin
client_config = {
wsdl: config.wsdl,
namespace: config.soap_namespace,
log_level: config.log_level,
element_form_default: :qualified,
namespaces: NAMESPACES
}
client_config[:proxy] = config.http_proxy if config.http_proxy.present?
Savon::Client.new client_config
end
end
# Public: Configuration for the API.
#
# Returns a QuestBack::Configuration object
def config
@config || QuestBack.default_configuration || fail(QuestBack::Error, 'No configuration given or found on QuestBack.default_configuration.')
end
private
def call(operation_name, attributes = {}, options = {})
options[:operation_name] = operation_name
options_to_response = {
operation_name: options[:operation_name],
result_key_nestings: RESULT_KEY_NESTINGS.fetch(operation_name) { fail KeyError, "You must configure RESULT_KEY_NESTINGS for #{operation_name}" }
}
savon_response = client.call operation_name, build_hash_for_savon_call(attributes, options)
Response.new savon_response, options_to_response
end
# Private: Builds a hash for savon call - include user info and other defaults you ask it to
#
# attributes - A hash representing attributes the client sent to us which it expects us to send to QuestBack
# options - A hash where we can send in options:
# :include_defaults - Give an array with key names to slice from DEFAULTS and mix in with
# the rest of the attributes.
#
# Returns a merged hash for Savon client
def build_hash_for_savon_call(attributes = {}, options = {})
user_info = {user_info: {username: config.username, password: config.password}}
message = user_info.merge attributes
if default_keys = options[:include_defaults]
message = DEFAULTS.slice(*Array.wrap(default_keys)).deep_merge message
end
if order = ORDER[options[:operation_name]]
unkown_keys = attributes.keys - order
if unkown_keys.any?
fail ArgumentError, "Unkown attribute(s) given to #{options[:operation_name]}: #{unkown_keys.join(', ')}. Attributes' order is defined in #{self.class.name}::ORDER, but you sent in something we do not have."
end
message[:order!] = order & message.keys
end
{
message: transform_hash_for_quest_back(message)
}
end
# Private: Transforms given hash as how Savon needs it to build the correct SOAP body.
#
# Since QuestBack's API needs to have elements like this:
#
# <wsdl:TestConnection>
# <wsdl:userInfo>
# <wsdl:Username>
# ...
#
# We cannot simply use Savon's convert_request_keys_to config, as it translate all keys.
# We need some keys camelcased (keys within nested hashes) and some lower_camelcased (keys in the outer most hash).
#
# Thus we map our inner attributes, for instance for userInfo to camelcase and keeps them
# as strings so Savon will not manipulate them.
#
# I guess this helper method here is kinda not optimal, and we may have a simple class / struct
# which can do this job for us, so the api class does not have multiple responsibilites. Oh well,
# works for now.
def transform_hash_for_quest_back(hash, transform_keys = false)
Hash[
hash.map do |key, value|
if key == :order!
# Key was :order! - it has special meaning: The symbols within it's array are used to
# dictate order of elements. If transform_keys is false we are on "root keys". These are
# keept as symbols and Savon does it's magic and we'll do nothing. If it is true it means that keys
# on this level is put to camelcase and the values in the :order! array must match this.
if transform_keys
value = value.map { |v| v.to_s.camelcase }
end
else
key = transform_keys ? key.to_s.camelcase : key
# Oh my god this is quick, dirty and mega hackish!
# Type element in the RespondentDataHeader must be in namespace enum.
key = "enum:Type" if key == "Type"
# In some cases we would like to transform values as well as the key
value = case value
when Hash
# Keep on transforming recursively..
transform_hash_for_quest_back value, true
when Array
if value.all? { |v| v.is_a? String }
# Put it in a structure QuestBack likes..
{'array:string' => value}
elsif value.all? { |v| v.is_a? Hash }
# Keep on transforming recursively..
value.map { |hash| transform_hash_for_quest_back(hash, true) }
end
else
# We don't know anything better - just let value fall through
value
end
end
[key, value]
end
]
end
end
end
Added support for `add_respondents_data_without_email_invitation`
module QuestBack
class Api
# This hash contains parts of request we can include in soap operation.
# For instance call(:some_action, attributes, include_defaults: [:paging_info]) will
# slice paging_info and include it in the request.
DEFAULTS = {
paging_info: {page_no: 0, page_size: 50},
quest_filter: '',
sendduplicate: false,
respondents_data: {
delimiter: ';',
order!: [:respondent_data_header, :respondent_data, :delimiter, :allow_duplicate, :add_as_invitee]
}
}
# The order of the elements in the SOAP body is important for the SOAP API.
# For operations with multiple arguments this hash gives savon the order of which
# it should .. well, order the elements.
ORDER = {
get_quests: [:user_info, :paging_info, :quest_filter],
add_email_invitees: [:user_info, :quest_info, :emails, :sendduplicate, :language_id],
add_respondents_data: [:user_info, :quest_info, :respondents_data, :language_id],
add_respondents_data_without_email_invitation: [:user_info, :quest_info, :respondents_data, :language_id],
add_respondents_data_with_sms_invitation: [
:user_info, :quest_info, :respondents_data, :language_id,
:sms_from_number, :sms_from_text, :sms_message
]
}
# In order to provide a simple response.result and response.results interface
# where the actual result we care about is returned we have to give knowledge to
# where this result is found. As it turns out, get_quests returns it's quests within
# quests/quest array, and at the same time get_quest_questions returns the questions
# within simply it's root result element. No nestings there.. So, it seems a bit randon
# and we need to have this configured. I though it would be put under quest_questions/quest_question,
# but no such luck.
RESULT_KEY_NESTINGS = {
test_connection: [],
get_quests: [:quests, :quest],
get_language_list: [:language],
add_email_invitees: [],
add_respondents_data: [],
add_respondents_data_without_email_invitation: [],
add_respondents_data_with_sms_invitation: []
}
RESPONDENTS_HEADER_TYPE = {
numeric: 1,
text: 2
}
NAMESPACES = {
'xmlns:array' => 'http://schemas.microsoft.com/2003/10/Serialization/Arrays',
'xmlns:enum' => 'http://schemas.microsoft.com/2003/10/Serialization/Enums'
}
def self.respondent_data_header_type_for(type)
RESPONDENTS_HEADER_TYPE.fetch(type.to_sym) do
fail ArgumentError, "#{type.to_s.inspect} is an unkown respondent data header type."
end
end
# Public: Creates a new API gateway object.
#
# Attributes
# config - A QuestBack::Configuration object. May be nil if
# QuestBack.default_configuration has been set.
def initialize(attributes = {})
attributes = ActiveSupport::HashWithIndifferentAccess.new attributes
@config = attributes[:config]
end
# Public: Make a test connection call to QuestBack
#
# Returns QuestBack::Response
def test_connection
call :test_connection
end
# Public: Get quests
#
# attributes - Attributes sent to QuestBack
#
# Example
#
# response = api.get_quests paging_info: {page_size: 2} # Limits result to two
# response.results
# => [result, result]
#
# Returns QuestBack::Response
def get_quests(attributes = {})
call :get_quests, attributes, include_defaults: [:paging_info, :quest_filter]
end
# Public: Returns a list of languages from QuestBack.
#
#
# Returns QuestBack::Response
def get_language_list
call :get_language_list
end
# Public: Invites a set of emails to a quest.
#
# attributes - Attributes sent to QuestBack
#
# Example
#
# response = api.add_email_invitees(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# emails: ['inviso@skalar.no', 'th@skalar.no'],
# sendduplicate: true, # or false as default
# language_id: 123, # optional
# )
#
# Returns QuestBack::Response
def add_email_invitees(attributes = {})
call :add_email_invitees, attributes, include_defaults: [:sendduplicate]
end
# Public: Add respondent data to a quest - optionally send as invitee as well.
#
# attributes - Attributes sent to QuestBack
#
# QuestBack is doing a bit of CSV over XML here? As you need to serialize
# respondent_data as a string with a delimiter ala CSV. The order of the
# data must match the order of respondent_data_header. I guess simply using XML
# and named elements was too easy? :-)
#
# Example
#
# response = api.add_respondents_data(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# respondents_data: {
# respondent_data_header: {
# respondent_data_header: [
# {
# title: 'Epost',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: true,
# is_sms_field: false,
# },
# {
# title: 'Navn',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: false,
# is_sms_field: false,
# },
# {
# title: 'Alder',
# type: QuestBack::Api.respondent_data_header_type_for(:numeric),
# is_email_field: false,
# is_sms_field: false,
# },
# ]
# },
# respondent_data: ['th@skalar.no;Thorbjorn;32'], # According to QuestBack's doc you can only do one here
# allow_duplicate: true,
# add_as_invitee: true
# }
# )
#
# You may override respondent_data's delimiter in string too.
#
# Returns QuestBack::Response
def add_respondents_data(attributes = {})
call :add_respondents_data, attributes, include_defaults: [:respondents_data]
end
# Public: Add respondent data to a quest - optionally send as invitee as well.
# This will not send an email invitation through Questback's platform
#
# attributes - Attributes sent to QuestBack
#
# QuestBack is doing a bit of CSV over XML here? As you need to serialize
# respondent_data as a string with a delimiter ala CSV. The order of the
# data must match the order of respondent_data_header. I guess simply using XML
# and named elements was too easy? :-)
#
# Example
#
# response = api.add_respondents_data_without_email_invitation(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# respondents_data: {
# respondent_data_header: {
# respondent_data_header: [
# {
# title: 'Epost',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: true,
# is_sms_field: false,
# },
# {
# title: 'Navn',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: false,
# is_sms_field: false,
# },
# {
# title: 'Alder',
# type: QuestBack::Api.respondent_data_header_type_for(:numeric),
# is_email_field: false,
# is_sms_field: false,
# },
# ]
# },
# respondent_data: ['th@skalar.no;Thorbjorn;32'], # According to QuestBack's doc you can only do one here
# allow_duplicate: true,
# add_as_invitee: true
# }
# )
#
# You may override respondent_data's delimiter in string too.
#
# Returns QuestBack::Response
def add_respondents_data_without_email_invitation(attributes = {})
call :add_respondents_data_without_email_invitation, attributes, include_defaults: [:respondents_data]
end
# Public: Add respondent data to a quest with SMS invitation
#
# attributes - Attributes sent to QuestBack
#
#
# Example
#
# response = api.add_respondents_data_with_sms_invitation(
# quest_info: {quest_id: 4567668, security_lock: 'm0pI8orKJp'},
# respondents_data: {
# respondent_data_header: {
# respondent_data_header: [
# {
# title: 'Epost',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: true,
# is_sms_field: false,
# },
# {
# title: 'Phone',
# type: QuestBack::Api.respondent_data_header_type_for(:text),
# is_email_field: false,
# is_sms_field: true,
# }
# ]
# },
# # According to QuestBack's doc you can only do one respondent data,
# # even though it for sure is an array. Phone numbers must be given
# # on with country code first.
# respondent_data: ['th@skalar.no;4711223344'],
# allow_duplicate: true,
# add_as_invitee: true
# },
# sms_from_number: 11111111,
# sms_from_text: 'Inviso AS',
# sms_message: 'Hello - please join our quest!'
# )
#
# You may override respondent_data's delimiter in string too.
#
# Returns QuestBack::Response
def add_respondents_data_with_sms_invitation(attributes = {})
call :add_respondents_data_with_sms_invitation, attributes, include_defaults: [:respondents_data]
end
# Public: Savon client.
#
# Savon client all API method calls will go through.
def client
@client ||= begin
client_config = {
wsdl: config.wsdl,
namespace: config.soap_namespace,
log_level: config.log_level,
element_form_default: :qualified,
namespaces: NAMESPACES
}
client_config[:proxy] = config.http_proxy if config.http_proxy.present?
Savon::Client.new client_config
end
end
# Public: Configuration for the API.
#
# Returns a QuestBack::Configuration object
def config
@config || QuestBack.default_configuration || fail(QuestBack::Error, 'No configuration given or found on QuestBack.default_configuration.')
end
private
def call(operation_name, attributes = {}, options = {})
options[:operation_name] = operation_name
options_to_response = {
operation_name: options[:operation_name],
result_key_nestings: RESULT_KEY_NESTINGS.fetch(operation_name) { fail KeyError, "You must configure RESULT_KEY_NESTINGS for #{operation_name}" }
}
savon_response = client.call operation_name, build_hash_for_savon_call(attributes, options)
Response.new savon_response, options_to_response
end
# Private: Builds a hash for savon call - include user info and other defaults you ask it to
#
# attributes - A hash representing attributes the client sent to us which it expects us to send to QuestBack
# options - A hash where we can send in options:
# :include_defaults - Give an array with key names to slice from DEFAULTS and mix in with
# the rest of the attributes.
#
# Returns a merged hash for Savon client
def build_hash_for_savon_call(attributes = {}, options = {})
user_info = {user_info: {username: config.username, password: config.password}}
message = user_info.merge attributes
if default_keys = options[:include_defaults]
message = DEFAULTS.slice(*Array.wrap(default_keys)).deep_merge message
end
if order = ORDER[options[:operation_name]]
unkown_keys = attributes.keys - order
if unkown_keys.any?
fail ArgumentError, "Unkown attribute(s) given to #{options[:operation_name]}: #{unkown_keys.join(', ')}. Attributes' order is defined in #{self.class.name}::ORDER, but you sent in something we do not have."
end
message[:order!] = order & message.keys
end
{
message: transform_hash_for_quest_back(message)
}
end
# Private: Transforms given hash as how Savon needs it to build the correct SOAP body.
#
# Since QuestBack's API needs to have elements like this:
#
# <wsdl:TestConnection>
# <wsdl:userInfo>
# <wsdl:Username>
# ...
#
# We cannot simply use Savon's convert_request_keys_to config, as it translate all keys.
# We need some keys camelcased (keys within nested hashes) and some lower_camelcased (keys in the outer most hash).
#
# Thus we map our inner attributes, for instance for userInfo to camelcase and keeps them
# as strings so Savon will not manipulate them.
#
# I guess this helper method here is kinda not optimal, and we may have a simple class / struct
# which can do this job for us, so the api class does not have multiple responsibilites. Oh well,
# works for now.
def transform_hash_for_quest_back(hash, transform_keys = false)
Hash[
hash.map do |key, value|
if key == :order!
# Key was :order! - it has special meaning: The symbols within it's array are used to
# dictate order of elements. If transform_keys is false we are on "root keys". These are
# keept as symbols and Savon does it's magic and we'll do nothing. If it is true it means that keys
# on this level is put to camelcase and the values in the :order! array must match this.
if transform_keys
value = value.map { |v| v.to_s.camelcase }
end
else
key = transform_keys ? key.to_s.camelcase : key
# Oh my god this is quick, dirty and mega hackish!
# Type element in the RespondentDataHeader must be in namespace enum.
key = "enum:Type" if key == "Type"
# In some cases we would like to transform values as well as the key
value = case value
when Hash
# Keep on transforming recursively..
transform_hash_for_quest_back value, true
when Array
if value.all? { |v| v.is_a? String }
# Put it in a structure QuestBack likes..
{'array:string' => value}
elsif value.all? { |v| v.is_a? Hash }
# Keep on transforming recursively..
value.map { |hash| transform_hash_for_quest_back(hash, true) }
end
else
# We don't know anything better - just let value fall through
value
end
end
[key, value]
end
]
end
end
end
|
module Qwerty
VERSION = "0.0.4.pre"
end
Bump version to 0.0.5.pre
module Qwerty
VERSION = "0.0.5.pre"
end
|
require 'net/http'
require 'digest/md5'
require 'openssl'
require 'uri'
module Raca
# Represents a single cloud files container. Contains methods for uploading,
# downloading, collecting stats, listing files, etc.
#
# You probably don't want to instantiate this directly,
# see Raca::Account#containers
#
class Container
MAX_ITEMS_PER_LIST = 10_000
LARGE_FILE_THRESHOLD = 5_368_709_120 # 5 Gb
LARGE_FILE_SEGMENT_SIZE = 104_857_600 # 100 Mb
attr_reader :container_name
def initialize(account, region, container_name, opts = {})
raise ArgumentError, "The container name must not contain '/'." if container_name['/']
@account, @region, @container_name = account, region, container_name
@storage_url = @account.public_endpoint("cloudFiles", region)
@cdn_url = @account.public_endpoint("cloudFilesCDN", region)
@logger = opts[:logger]
@logger ||= Rails.logger if defined?(Rails)
end
# Upload data_or_path (which may be a filename or an IO) to the container, as key.
def upload(key, data_or_path)
case data_or_path
when StringIO, File
upload_io(key, data_or_path, data_or_path.size)
when String
File.open(data_or_path, "rb") do |io|
upload_io(key, io, io.stat.size)
end
else
raise ArgumentError, "data_or_path must be an IO with data or filename string"
end
end
# Delete +key+ from the container. If the container is on the CDN, the object will
# still be served from the CDN until the TTL expires.
def delete(key)
log "deleting #{key} from #{container_path}"
storage_request(Net::HTTP::Delete.new(File.join(container_path, key)))
end
# Remove +key+ from the CDN edge nodes on which it is currently cached. The object is
# not deleted from the container: as the URL is re-requested, the edge cache will be
# re-filled with the object currently in the container.
#
# This shouldn't be used except when it's really required (e.g. when a piece has to be
# taken down) because it's expensive: it lodges a support ticket at Akamai. (!)
def purge_from_akamai(key, email_address)
log "Requesting #{File.join(container_path, key)} to be purged from the CDN"
cdn_request(Net::HTTP::Delete.new(
File.join(container_path, key),
'X-Purge-Email' => email_address
))
end
# Returns some metadata about a single object in this container.
#
def object_metadata(key)
object_path = File.join(container_path, key)
log "Requesting metadata from #{object_path}"
response = storage_request(Net::HTTP::Head.new(object_path))
{
:content_type => response["Content-Type"],
:bytes => response["Content-Length"].to_i
}
end
def download(key, filepath)
log "downloading #{key} from #{container_path}"
storage_request(Net::HTTP::Get.new(File.join(container_path, key))) do |response|
File.open(filepath, 'wb') do |io|
response.read_body do |chunk|
io.write(chunk)
end
end
end
end
# Return an array of files in the container.
#
# Supported options
#
# max - the maximum number of items to return
# marker - return items alphabetically after this key. Useful for pagination
# prefix - only return items that start with this string
def list(options = {})
max = options.fetch(:max, MAX_ITEMS_PER_LIST)
marker = options.fetch(:marker, nil)
prefix = options.fetch(:prefix, nil)
limit = [max, MAX_ITEMS_PER_LIST].min
log "retrieving up to #{limit} of #{max} items from #{container_path}"
query_string = "limit=#{limit}"
query_string += "&marker=#{marker}" if marker
query_string += "&prefix=#{prefix}" if prefix
request = Net::HTTP::Get.new(container_path + "?#{query_string}")
result = storage_request(request).body || ""
result.split("\n").tap {|items|
if max <= limit
log "Got #{items.length} items; we don't need any more."
elsif items.length < limit
log "Got #{items.length} items; there can't be any more."
else
log "Got #{items.length} items; requesting #{max - limit} more."
items.concat list(max: max - limit, marker: items.last, prefix: prefix)
end
}
end
def search(prefix)
log "retrieving container listing from #{container_path} items starting with #{prefix}"
list(prefix: prefix)
end
def metadata
log "retrieving container metadata from #{container_path}"
response = storage_request(Net::HTTP::Head.new(container_path))
{
:objects => response["X-Container-Object-Count"].to_i,
:bytes => response["X-Container-Bytes-Used"].to_i
}
end
def cdn_metadata
log "retrieving container CDN metadata from #{container_path}"
response = cdn_request(Net::HTTP::Head.new(container_path))
{
:cdn_enabled => response["X-CDN-Enabled"] == "True",
:host => response["X-CDN-URI"],
:ssl_host => response["X-CDN-SSL-URI"],
:streaming_host => response["X-CDN-STREAMING-URI"],
:ttl => response["X-TTL"].to_i,
:log_retention => response["X-Log-Retention"] == "True"
}
end
# use this with caution, it will make EVERY object in the container publicly available
# via the CDN. CDN enabling can be done via the web UI but only with a TTL of 72 hours.
# Using the API it's possible to set a TTL of 50 years.
#
def cdn_enable(ttl = 72.hours.to_i)
log "enabling CDN access to #{container_path} with a cache expiry of #{ttl / 60} minutes"
cdn_request Net::HTTP::Put.new(container_path, "X-TTL" => ttl.to_i.to_s)
end
# Generate a expiring URL for a file that is otherwise private. useful for providing temporary
# access to files.
#
def expiring_url(object_key, temp_url_key, expires_at = Time.now.to_i + 60)
digest = OpenSSL::Digest::Digest.new('sha1')
method = 'GET'
expires = expires_at.to_i
path = File.join(container_path, object_key)
data = "#{method}\n#{expires}\n#{path}"
hmac = OpenSSL::HMAC.new(temp_url_key, digest)
hmac << data
"https://#{storage_host}#{path}?temp_url_sig=#{hmac.hexdigest}&temp_url_expires=#{expires}"
end
private
def upload_io(key, io, byte_count)
if byte_count <= LARGE_FILE_THRESHOLD
upload_io_standard(key, io, byte_count)
else
upload_io_large(key, io, byte_count)
end
end
def upload_io_standard(key, io, byte_count)
full_path = File.join(container_path, key)
headers = {}
headers['Content-Type'] = extension_content_type(full_path)
if io.respond_to?(:path)
headers['Content-Type'] ||= extension_content_type(io.path)
headers['Content-Type'] ||= file_content_type(io.path)
headers['Etag'] = md5(io.path)
end
headers['Content-Type'] ||= "application/octet-stream"
if content_type_needs_cors(key)
headers['Access-Control-Allow-Origin'] = "*"
end
log "uploading #{byte_count} bytes to #{full_path}"
request = Net::HTTP::Put.new(full_path, headers)
request.body_stream = io
request.content_length = byte_count
storage_request(request)
end
def upload_io_large(key, io, byte_count)
segment_count = (byte_count.to_f / LARGE_FILE_SEGMENT_SIZE).ceil
segments = []
while segments.size < segment_count
start_pos = 0 + (LARGE_FILE_SEGMENT_SIZE * segments.size)
segment_key = "%s.%03d" % [key, segments.size]
io.seek(start_pos)
segment_io = StringIO.new(io.read(LARGE_FILE_SEGMENT_SIZE))
result = upload_io_standard(segment_key, segment_io, segment_io.size)
segments << {path: "#{@container_name}/#{segment_key}", etag: result["ETag"], size_bytes: segment_io.size}
end
manifest_key = "#{key}?multipart-manifest=put"
manifest_body = StringIO.new(JSON.dump(segments))
upload_io_standard(manifest_key, manifest_body, manifest_body.size)
end
def cdn_request(request, &block)
cloud_request(request, cdn_host, &block)
end
def storage_request(request, &block)
cloud_request(request, storage_host, &block)
end
def cloud_request(request, hostname, retries = 0, &block)
cloud_http(hostname) do |http|
request['X-Auth-Token'] = @account.auth_token
http.request(request, &block)
end
rescue Timeout::Error
if retries >= 3
raise "Timeout from Rackspace at #{Time.now} while trying #{request.class} to #{request.path}"
end
unless defined?(Rails) && Rails.env.test?
retry_interval = 5 + (retries.to_i * 5) # Retry after 5, 10, 15 and 20 seconds
log "Rackspace timed out: retrying after #{retry_interval}s"
sleep(retry_interval)
end
cloud_request(request, hostname, retries + 1, &block)
end
def cloud_http(hostname, &block)
Net::HTTP.new(hostname, 443).tap {|http|
http.use_ssl = true
http.read_timeout = 70
}.start do |http|
response = block.call http
if response.is_a?(Net::HTTPUnauthorized)
log "Rackspace returned HTTP 401; refreshing auth before retrying."
@account.refresh_cache
response = block.call http
end
raise "Failure: Rackspace returned #{response.inspect}" unless response.is_a?(Net::HTTPSuccess)
response
end
end
def log(msg)
if @logger.respond_to?(:debug)
@logger.debug msg
end
end
def storage_host
URI.parse(@storage_url).host
end
def storage_path
URI.parse(@storage_url).path
end
def cdn_host
URI.parse(@cdn_url).host
end
def cdn_path
URI.parse(@cdn_url).path
end
def container_path
@container_path ||= File.join(storage_path, container_name)
end
def file_content_type(path)
`file -b --mime-type \"#{path.gsub('"', '\"')}\"`.chomp
end
def extension_content_type(path)
{
".css" => "text/css",
".eot" => "application/vnd.ms-fontobject",
".html" => "text/html",
".js" => "application/javascript",
".png" => "image/png",
".jpg" => "image/jpeg",
".txt" => "text/plain",
".woff" => "font/woff",
".zip" => "application/zip"
}[File.extname(path)]
end
# Fonts need to be served with CORS headers to work in IE and FF
#
def content_type_needs_cors(path)
[".eot",".ttf",".woff"].include?(File.extname(path))
end
def md5(path)
digest = Digest::MD5.new
File.open(path, 'rb') do |f|
# read in 128K chunks
f.each(1024 * 128) do |chunk|
digest << chunk
end
end
digest.hexdigest
end
end
end
remove activesupport dependent code
require 'net/http'
require 'digest/md5'
require 'openssl'
require 'uri'
module Raca
# Represents a single cloud files container. Contains methods for uploading,
# downloading, collecting stats, listing files, etc.
#
# You probably don't want to instantiate this directly,
# see Raca::Account#containers
#
class Container
MAX_ITEMS_PER_LIST = 10_000
LARGE_FILE_THRESHOLD = 5_368_709_120 # 5 Gb
LARGE_FILE_SEGMENT_SIZE = 104_857_600 # 100 Mb
attr_reader :container_name
def initialize(account, region, container_name, opts = {})
raise ArgumentError, "The container name must not contain '/'." if container_name['/']
@account, @region, @container_name = account, region, container_name
@storage_url = @account.public_endpoint("cloudFiles", region)
@cdn_url = @account.public_endpoint("cloudFilesCDN", region)
@logger = opts[:logger]
@logger ||= Rails.logger if defined?(Rails)
end
# Upload data_or_path (which may be a filename or an IO) to the container, as key.
def upload(key, data_or_path)
case data_or_path
when StringIO, File
upload_io(key, data_or_path, data_or_path.size)
when String
File.open(data_or_path, "rb") do |io|
upload_io(key, io, io.stat.size)
end
else
raise ArgumentError, "data_or_path must be an IO with data or filename string"
end
end
# Delete +key+ from the container. If the container is on the CDN, the object will
# still be served from the CDN until the TTL expires.
def delete(key)
log "deleting #{key} from #{container_path}"
storage_request(Net::HTTP::Delete.new(File.join(container_path, key)))
end
# Remove +key+ from the CDN edge nodes on which it is currently cached. The object is
# not deleted from the container: as the URL is re-requested, the edge cache will be
# re-filled with the object currently in the container.
#
# This shouldn't be used except when it's really required (e.g. when a piece has to be
# taken down) because it's expensive: it lodges a support ticket at Akamai. (!)
def purge_from_akamai(key, email_address)
log "Requesting #{File.join(container_path, key)} to be purged from the CDN"
cdn_request(Net::HTTP::Delete.new(
File.join(container_path, key),
'X-Purge-Email' => email_address
))
end
# Returns some metadata about a single object in this container.
#
def object_metadata(key)
object_path = File.join(container_path, key)
log "Requesting metadata from #{object_path}"
response = storage_request(Net::HTTP::Head.new(object_path))
{
:content_type => response["Content-Type"],
:bytes => response["Content-Length"].to_i
}
end
def download(key, filepath)
log "downloading #{key} from #{container_path}"
storage_request(Net::HTTP::Get.new(File.join(container_path, key))) do |response|
File.open(filepath, 'wb') do |io|
response.read_body do |chunk|
io.write(chunk)
end
end
end
end
# Return an array of files in the container.
#
# Supported options
#
# max - the maximum number of items to return
# marker - return items alphabetically after this key. Useful for pagination
# prefix - only return items that start with this string
def list(options = {})
max = options.fetch(:max, MAX_ITEMS_PER_LIST)
marker = options.fetch(:marker, nil)
prefix = options.fetch(:prefix, nil)
limit = [max, MAX_ITEMS_PER_LIST].min
log "retrieving up to #{limit} of #{max} items from #{container_path}"
query_string = "limit=#{limit}"
query_string += "&marker=#{marker}" if marker
query_string += "&prefix=#{prefix}" if prefix
request = Net::HTTP::Get.new(container_path + "?#{query_string}")
result = storage_request(request).body || ""
result.split("\n").tap {|items|
if max <= limit
log "Got #{items.length} items; we don't need any more."
elsif items.length < limit
log "Got #{items.length} items; there can't be any more."
else
log "Got #{items.length} items; requesting #{max - limit} more."
items.concat list(max: max - limit, marker: items.last, prefix: prefix)
end
}
end
def search(prefix)
log "retrieving container listing from #{container_path} items starting with #{prefix}"
list(prefix: prefix)
end
def metadata
log "retrieving container metadata from #{container_path}"
response = storage_request(Net::HTTP::Head.new(container_path))
{
:objects => response["X-Container-Object-Count"].to_i,
:bytes => response["X-Container-Bytes-Used"].to_i
}
end
def cdn_metadata
log "retrieving container CDN metadata from #{container_path}"
response = cdn_request(Net::HTTP::Head.new(container_path))
{
:cdn_enabled => response["X-CDN-Enabled"] == "True",
:host => response["X-CDN-URI"],
:ssl_host => response["X-CDN-SSL-URI"],
:streaming_host => response["X-CDN-STREAMING-URI"],
:ttl => response["X-TTL"].to_i,
:log_retention => response["X-Log-Retention"] == "True"
}
end
# use this with caution, it will make EVERY object in the container publicly available
# via the CDN. CDN enabling can be done via the web UI but only with a TTL of 72 hours.
# Using the API it's possible to set a TTL of 50 years.
#
# TTL is defined in seconds, default is 72 hours.
#
def cdn_enable(ttl = 259200)
log "enabling CDN access to #{container_path} with a cache expiry of #{ttl / 60} minutes"
cdn_request Net::HTTP::Put.new(container_path, "X-TTL" => ttl.to_i.to_s)
end
# Generate a expiring URL for a file that is otherwise private. useful for providing temporary
# access to files.
#
def expiring_url(object_key, temp_url_key, expires_at = Time.now.to_i + 60)
digest = OpenSSL::Digest::Digest.new('sha1')
method = 'GET'
expires = expires_at.to_i
path = File.join(container_path, object_key)
data = "#{method}\n#{expires}\n#{path}"
hmac = OpenSSL::HMAC.new(temp_url_key, digest)
hmac << data
"https://#{storage_host}#{path}?temp_url_sig=#{hmac.hexdigest}&temp_url_expires=#{expires}"
end
private
def upload_io(key, io, byte_count)
if byte_count <= LARGE_FILE_THRESHOLD
upload_io_standard(key, io, byte_count)
else
upload_io_large(key, io, byte_count)
end
end
def upload_io_standard(key, io, byte_count)
full_path = File.join(container_path, key)
headers = {}
headers['Content-Type'] = extension_content_type(full_path)
if io.respond_to?(:path)
headers['Content-Type'] ||= extension_content_type(io.path)
headers['Content-Type'] ||= file_content_type(io.path)
headers['Etag'] = md5(io.path)
end
headers['Content-Type'] ||= "application/octet-stream"
if content_type_needs_cors(key)
headers['Access-Control-Allow-Origin'] = "*"
end
log "uploading #{byte_count} bytes to #{full_path}"
request = Net::HTTP::Put.new(full_path, headers)
request.body_stream = io
request.content_length = byte_count
storage_request(request)
end
def upload_io_large(key, io, byte_count)
segment_count = (byte_count.to_f / LARGE_FILE_SEGMENT_SIZE).ceil
segments = []
while segments.size < segment_count
start_pos = 0 + (LARGE_FILE_SEGMENT_SIZE * segments.size)
segment_key = "%s.%03d" % [key, segments.size]
io.seek(start_pos)
segment_io = StringIO.new(io.read(LARGE_FILE_SEGMENT_SIZE))
result = upload_io_standard(segment_key, segment_io, segment_io.size)
segments << {path: "#{@container_name}/#{segment_key}", etag: result["ETag"], size_bytes: segment_io.size}
end
manifest_key = "#{key}?multipart-manifest=put"
manifest_body = StringIO.new(JSON.dump(segments))
upload_io_standard(manifest_key, manifest_body, manifest_body.size)
end
def cdn_request(request, &block)
cloud_request(request, cdn_host, &block)
end
def storage_request(request, &block)
cloud_request(request, storage_host, &block)
end
def cloud_request(request, hostname, retries = 0, &block)
cloud_http(hostname) do |http|
request['X-Auth-Token'] = @account.auth_token
http.request(request, &block)
end
rescue Timeout::Error
if retries >= 3
raise "Timeout from Rackspace at #{Time.now} while trying #{request.class} to #{request.path}"
end
unless defined?(Rails) && Rails.env.test?
retry_interval = 5 + (retries.to_i * 5) # Retry after 5, 10, 15 and 20 seconds
log "Rackspace timed out: retrying after #{retry_interval}s"
sleep(retry_interval)
end
cloud_request(request, hostname, retries + 1, &block)
end
def cloud_http(hostname, &block)
Net::HTTP.new(hostname, 443).tap {|http|
http.use_ssl = true
http.read_timeout = 70
}.start do |http|
response = block.call http
if response.is_a?(Net::HTTPUnauthorized)
log "Rackspace returned HTTP 401; refreshing auth before retrying."
@account.refresh_cache
response = block.call http
end
raise "Failure: Rackspace returned #{response.inspect}" unless response.is_a?(Net::HTTPSuccess)
response
end
end
def log(msg)
if @logger.respond_to?(:debug)
@logger.debug msg
end
end
def storage_host
URI.parse(@storage_url).host
end
def storage_path
URI.parse(@storage_url).path
end
def cdn_host
URI.parse(@cdn_url).host
end
def cdn_path
URI.parse(@cdn_url).path
end
def container_path
@container_path ||= File.join(storage_path, container_name)
end
def file_content_type(path)
`file -b --mime-type \"#{path.gsub('"', '\"')}\"`.chomp
end
def extension_content_type(path)
{
".css" => "text/css",
".eot" => "application/vnd.ms-fontobject",
".html" => "text/html",
".js" => "application/javascript",
".png" => "image/png",
".jpg" => "image/jpeg",
".txt" => "text/plain",
".woff" => "font/woff",
".zip" => "application/zip"
}[File.extname(path)]
end
# Fonts need to be served with CORS headers to work in IE and FF
#
def content_type_needs_cors(path)
[".eot",".ttf",".woff"].include?(File.extname(path))
end
def md5(path)
digest = Digest::MD5.new
File.open(path, 'rb') do |f|
# read in 128K chunks
f.each(1024 * 128) do |chunk|
digest << chunk
end
end
digest.hexdigest
end
end
end
|
require 'rack'
require 'rack/contrib'
require 'sinatra/base'
require 'sinatra/param'
require 'sequel'
require 'active_support/inflector'
require 'rack/core-data/data_model'
require 'rack/core-data/version'
module Rack::CoreData::Models
end
module Rack
def self.CoreData(xcdatamodel)
model = CoreData::DataModel.new(xcdatamodel)
# Create each model class before implementing, in order to correctly set up relationships
model.entities.each do |entity|
klass = Rack::CoreData::Models.const_set(entity.name.capitalize, Class.new(Sequel::Model))
end
app = Class.new(Sinatra::Base) do
use Rack::PostBodyContentTypeParser
before do
content_type :json
end
helpers Sinatra::Param
options '/' do
links = []
model.entities.each do |entity|
links << %{</#{entity.name.downcase.pluralize}>; rel="resource"}
end
response['Link'] = links.join("\n")
model.entities.collect{ |entity|
{
name: entity.name,
url: "/#{entity.name.downcase.pluralize}",
columns: entity.attributes.collect(&:name)
}
}.to_json
end
end
model.entities.each do |entity|
klass = Rack::CoreData::Models.const_get(entity.name.capitalize)
klass.dataset = entity.name.downcase.pluralize.to_sym
klass.class_eval do
self.strict_param_setting = false
self.raise_on_save_failure = false
plugin :json_serializer, naked: true, include: [:url]
plugin :schema
plugin :validation_helpers
def url
"/#{self.class.table_name}/#{self[primary_key]}"
end
entity.relationships.each do |relationship|
options = {:class => Rack::CoreData::Models.const_get(relationship.destination.capitalize)}
if relationship.to_many?
one_to_many relationship.name.to_sym, options
else
many_to_one relationship.name.to_sym, options
end
end
set_schema do
primary_key :id
entity.attributes.each do |attribute|
next if attribute.transient?
options = {
:null => attribute.optional?,
:index => attribute.indexed?,
:default => attribute.default_value
}
type = case attribute.type
when "Integer 16" then :int2
when "Integer 32" then :int4
when "Integer 64" then :int8
when "Float" then :float4
when "Double" then :float8
when "Decimal" then :float8
when "Date" then :timestamp
when "Boolean" then :boolean
when "Binary" then :bytea
else :varchar
end
column attribute.name.to_sym, type, options
end
entity.relationships.each do |relationship|
options = {
:index => true,
:null => relationship.optional?
}
if not relationship.to_many?
column "#{relationship.name}_id".to_sym, :integer, options
end
end
end
create_table unless table_exists?
end
klass.send :define_method, :validate do
entity.attributes.each do |attribute|
case attribute.type
when "Integer 16", "Integer 32", "Integer 64"
validates_integer attribute.name
when "Float", "Double", "Decimal"
validates_numeric attribute.name
when "String"
validates_min_length attribute.minimum_value, attribute.name if attribute.minimum_value
validates_max_length attribute.maximum_value, attribute.name if attribute.maximum_value
end
end
end
app.class_eval do
include Rack::CoreData::Models
klass = Rack::CoreData::Models.const_get(entity.name.capitalize)
disable :raise_errors, :show_exceptions
get "/#{klass.table_name}/?" do
if params[:page] or params[:per_page]
param :page, Integer, default: 1, min: 1
param :per_page, Integer, default: 100, in: (1..100)
{
"#{klass.table_name}" => klass.limit(params[:per_page], (params[:page] - 1) * params[:per_page]),
page: params[:page],
total: klass.count
}.to_json
else
param :limit, Integer, default: 100, in: (1..100)
param :offset, Integer, default: 0, min: 0
{
"#{klass.table_name}" => klass.limit(params[:limit], params[:offset])
}.to_json
end
end
post "/#{klass.table_name}/?" do
record = klass.new(params)
if record.save
status 201
{entity.name.downcase => record}.to_json
else
status 406
{errors: record.errors}.to_json
end
end
get "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
record.to_json
end
put "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
if record.update(params)
status 200
{entity.name.downcase => record}.to_json
else
status 406
{errors: record.errors}.to_json
end
end
delete "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
if record.destroy
status 200
else
status 406
{errors: record.errors}.to_json
end
end
entity.relationships.each do |relationship|
next unless relationship.to_many?
get "/#{klass.table_name}/:id/#{relationship.name}/?" do
{relationship.name => klass[params[:id]].send(relationship.name)}.to_json
end
end
end
end
return app
end
end
Fixing JSON formatting for GET /resources/123
require 'rack'
require 'rack/contrib'
require 'sinatra/base'
require 'sinatra/param'
require 'sequel'
require 'active_support/inflector'
require 'rack/core-data/data_model'
require 'rack/core-data/version'
module Rack::CoreData::Models
end
module Rack
def self.CoreData(xcdatamodel)
model = CoreData::DataModel.new(xcdatamodel)
# Create each model class before implementing, in order to correctly set up relationships
model.entities.each do |entity|
klass = Rack::CoreData::Models.const_set(entity.name.capitalize, Class.new(Sequel::Model))
end
app = Class.new(Sinatra::Base) do
use Rack::PostBodyContentTypeParser
before do
content_type :json
end
helpers Sinatra::Param
options '/' do
links = []
model.entities.each do |entity|
links << %{</#{entity.name.downcase.pluralize}>; rel="resource"}
end
response['Link'] = links.join("\n")
model.entities.collect{ |entity|
{
name: entity.name,
url: "/#{entity.name.downcase.pluralize}",
columns: entity.attributes.collect(&:name)
}
}.to_json
end
end
model.entities.each do |entity|
klass = Rack::CoreData::Models.const_get(entity.name.capitalize)
klass.dataset = entity.name.downcase.pluralize.to_sym
klass.class_eval do
self.strict_param_setting = false
self.raise_on_save_failure = false
plugin :json_serializer, naked: true, include: [:url]
plugin :schema
plugin :validation_helpers
def url
"/#{self.class.table_name}/#{self[primary_key]}"
end
entity.relationships.each do |relationship|
options = {:class => Rack::CoreData::Models.const_get(relationship.destination.capitalize)}
if relationship.to_many?
one_to_many relationship.name.to_sym, options
else
many_to_one relationship.name.to_sym, options
end
end
set_schema do
primary_key :id
entity.attributes.each do |attribute|
next if attribute.transient?
options = {
:null => attribute.optional?,
:index => attribute.indexed?,
:default => attribute.default_value
}
type = case attribute.type
when "Integer 16" then :int2
when "Integer 32" then :int4
when "Integer 64" then :int8
when "Float" then :float4
when "Double" then :float8
when "Decimal" then :float8
when "Date" then :timestamp
when "Boolean" then :boolean
when "Binary" then :bytea
else :varchar
end
column attribute.name.to_sym, type, options
end
entity.relationships.each do |relationship|
options = {
:index => true,
:null => relationship.optional?
}
if not relationship.to_many?
column "#{relationship.name}_id".to_sym, :integer, options
end
end
end
create_table unless table_exists?
end
klass.send :define_method, :validate do
entity.attributes.each do |attribute|
case attribute.type
when "Integer 16", "Integer 32", "Integer 64"
validates_integer attribute.name
when "Float", "Double", "Decimal"
validates_numeric attribute.name
when "String"
validates_min_length attribute.minimum_value, attribute.name if attribute.minimum_value
validates_max_length attribute.maximum_value, attribute.name if attribute.maximum_value
end
end
end
app.class_eval do
include Rack::CoreData::Models
klass = Rack::CoreData::Models.const_get(entity.name.capitalize)
disable :raise_errors, :show_exceptions
get "/#{klass.table_name}/?" do
if params[:page] or params[:per_page]
param :page, Integer, default: 1, min: 1
param :per_page, Integer, default: 100, in: (1..100)
{
"#{klass.table_name}" => klass.limit(params[:per_page], (params[:page] - 1) * params[:per_page]),
page: params[:page],
total: klass.count
}.to_json
else
param :limit, Integer, default: 100, in: (1..100)
param :offset, Integer, default: 0, min: 0
{
"#{klass.table_name}" => klass.limit(params[:limit], params[:offset])
}.to_json
end
end
post "/#{klass.table_name}/?" do
record = klass.new(params)
if record.save
status 201
{entity.name.downcase => record}.to_json
else
status 406
{errors: record.errors}.to_json
end
end
get "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
{entity.name.downcase => record}.to_json
end
put "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
if record.update(params)
status 200
{entity.name.downcase => record}.to_json
else
status 406
{errors: record.errors}.to_json
end
end
delete "/#{klass.table_name}/:id/?" do
record = klass[params[:id]] or halt 404
if record.destroy
status 200
else
status 406
{errors: record.errors}.to_json
end
end
entity.relationships.each do |relationship|
next unless relationship.to_many?
get "/#{klass.table_name}/:id/#{relationship.name}/?" do
{relationship.name => klass[params[:id]].send(relationship.name)}.to_json
end
end
end
end
return app
end
end
|
require 'time'
require 'rack/utils'
require 'rack/mime'
module Rack
# Rack::Directory serves entries below the +root+ given, according to the
# path info of the Rack request. If a directory is found, the file's contents
# will be presented in an html based index. If a file is found, the env will
# be passed to the specified +app+.
#
# If +app+ is not specified, a Rack::File of the same +root+ will be used.
class Directory
DIR_FILE = "<tr><td class='name'><a href='%s'>%s</a></td><td class='size'>%s</td><td class='type'>%s</td><td class='mtime'>%s</td></tr>"
DIR_PAGE = <<-PAGE
<html><head>
<title>%s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<style type='text/css'>
table { width:100%%; }
.name { text-align:left; }
.size, .mtime { text-align:right; }
.type { width:11em; }
.mtime { width:15em; }
</style>
</head><body>
<h1>%s</h1>
<hr />
<table>
<tr>
<th class='name'>Name</th>
<th class='size'>Size</th>
<th class='type'>Type</th>
<th class='mtime'>Last Modified</th>
</tr>
%s
</table>
<hr />
</body></html>
PAGE
attr_reader :files
attr_accessor :root, :path
def initialize(root, app=nil)
@root = ::File.expand_path(root)
@app = app || Rack::File.new(@root)
end
def call(env)
dup._call(env)
end
def _call(env)
@env = env
@script_name = env[SCRIPT_NAME]
@path_info = Utils.unescape(env[PATH_INFO])
if forbidden = check_forbidden
forbidden
else
@path = ::File.join(@root, @path_info)
list_path
end
end
def check_forbidden
return unless @path_info.include? ".."
body = "Forbidden\n"
size = body.bytesize
return [403, {CONTENT_TYPE => "text/plain",
CONTENT_LENGTH => size.to_s,
"X-Cascade" => "pass"}, [body]]
end
def list_directory
@files = [['../','Parent Directory','','','']]
glob = ::File.join(@path, '*')
url_head = (@script_name.split('/') + @path_info.split('/')).map do |part|
Rack::Utils.escape part
end
Dir[glob].sort.each do |node|
stat = stat(node)
next unless stat
basename = ::File.basename(node)
ext = ::File.extname(node)
url = ::File.join(*url_head + [Rack::Utils.escape(basename)])
size = stat.size
type = stat.directory? ? 'directory' : Mime.mime_type(ext)
size = stat.directory? ? '-' : filesize_format(size)
mtime = stat.mtime.httpdate
url << '/' if stat.directory?
basename << '/' if stat.directory?
@files << [ url, basename, size, type, mtime ]
end
return [ 200, { CONTENT_TYPE =>'text/html; charset=utf-8'}, self ]
end
def stat(node, max = 10)
::File.stat(node)
rescue Errno::ENOENT, Errno::ELOOP
return nil
end
# TODO: add correct response if not readable, not sure if 404 is the best
# option
def list_path
@stat = ::File.stat(@path)
if @stat.readable?
return @app.call(@env) if @stat.file?
return list_directory if @stat.directory?
else
raise Errno::ENOENT, 'No such file or directory'
end
rescue Errno::ENOENT, Errno::ELOOP
return entity_not_found
end
def entity_not_found
body = "Entity not found: #{@path_info}\n"
size = body.bytesize
return [404, {CONTENT_TYPE => "text/plain",
CONTENT_LENGTH => size.to_s,
"X-Cascade" => "pass"}, [body]]
end
def each
show_path = Rack::Utils.escape_html(@path.sub(/^#{@root}/,''))
files = @files.map{|f| DIR_FILE % DIR_FILE_escape(*f) }*"\n"
page = DIR_PAGE % [ show_path, show_path , files ]
page.each_line{|l| yield l }
end
# Stolen from Ramaze
FILESIZE_FORMAT = [
['%.1fT', 1 << 40],
['%.1fG', 1 << 30],
['%.1fM', 1 << 20],
['%.1fK', 1 << 10],
]
def filesize_format(int)
FILESIZE_FORMAT.each do |format, size|
return format % (int.to_f / size) if int >= size
end
int.to_s + 'B'
end
private
# Assumes url is already escaped.
def DIR_FILE_escape url, *html
[url, *html.map { |e| Utils.escape_html(e) }]
end
end
end
keep `@path_info` on the stack
This is to start decoupling the directory middleware from instance
variables and make the middleware threadsafe without duping
require 'time'
require 'rack/utils'
require 'rack/mime'
module Rack
# Rack::Directory serves entries below the +root+ given, according to the
# path info of the Rack request. If a directory is found, the file's contents
# will be presented in an html based index. If a file is found, the env will
# be passed to the specified +app+.
#
# If +app+ is not specified, a Rack::File of the same +root+ will be used.
class Directory
DIR_FILE = "<tr><td class='name'><a href='%s'>%s</a></td><td class='size'>%s</td><td class='type'>%s</td><td class='mtime'>%s</td></tr>"
DIR_PAGE = <<-PAGE
<html><head>
<title>%s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<style type='text/css'>
table { width:100%%; }
.name { text-align:left; }
.size, .mtime { text-align:right; }
.type { width:11em; }
.mtime { width:15em; }
</style>
</head><body>
<h1>%s</h1>
<hr />
<table>
<tr>
<th class='name'>Name</th>
<th class='size'>Size</th>
<th class='type'>Type</th>
<th class='mtime'>Last Modified</th>
</tr>
%s
</table>
<hr />
</body></html>
PAGE
attr_reader :files
attr_accessor :root, :path
def initialize(root, app=nil)
@root = ::File.expand_path(root)
@app = app || Rack::File.new(@root)
end
def call(env)
dup._call(env)
end
def _call(env)
@env = env
@script_name = env[SCRIPT_NAME]
path_info = Utils.unescape(env[PATH_INFO])
if forbidden = check_forbidden(path_info)
forbidden
else
@path = ::File.join(@root, path_info)
list_path(path_info)
end
end
def check_forbidden(path_info)
return unless path_info.include? ".."
body = "Forbidden\n"
size = body.bytesize
return [403, {CONTENT_TYPE => "text/plain",
CONTENT_LENGTH => size.to_s,
"X-Cascade" => "pass"}, [body]]
end
def list_directory(path_info)
@files = [['../','Parent Directory','','','']]
glob = ::File.join(@path, '*')
url_head = (@script_name.split('/') + path_info.split('/')).map do |part|
Rack::Utils.escape part
end
Dir[glob].sort.each do |node|
stat = stat(node)
next unless stat
basename = ::File.basename(node)
ext = ::File.extname(node)
url = ::File.join(*url_head + [Rack::Utils.escape(basename)])
size = stat.size
type = stat.directory? ? 'directory' : Mime.mime_type(ext)
size = stat.directory? ? '-' : filesize_format(size)
mtime = stat.mtime.httpdate
url << '/' if stat.directory?
basename << '/' if stat.directory?
@files << [ url, basename, size, type, mtime ]
end
return [ 200, { CONTENT_TYPE =>'text/html; charset=utf-8'}, self ]
end
def stat(node, max = 10)
::File.stat(node)
rescue Errno::ENOENT, Errno::ELOOP
return nil
end
# TODO: add correct response if not readable, not sure if 404 is the best
# option
def list_path(path_info)
@stat = ::File.stat(@path)
if @stat.readable?
return @app.call(@env) if @stat.file?
return list_directory(path_info) if @stat.directory?
else
raise Errno::ENOENT, 'No such file or directory'
end
rescue Errno::ENOENT, Errno::ELOOP
return entity_not_found(path_info)
end
def entity_not_found(path_info)
body = "Entity not found: #{path_info}\n"
size = body.bytesize
return [404, {CONTENT_TYPE => "text/plain",
CONTENT_LENGTH => size.to_s,
"X-Cascade" => "pass"}, [body]]
end
def each
show_path = Rack::Utils.escape_html(@path.sub(/^#{@root}/,''))
files = @files.map{|f| DIR_FILE % DIR_FILE_escape(*f) }*"\n"
page = DIR_PAGE % [ show_path, show_path , files ]
page.each_line{|l| yield l }
end
# Stolen from Ramaze
FILESIZE_FORMAT = [
['%.1fT', 1 << 40],
['%.1fG', 1 << 30],
['%.1fM', 1 << 20],
['%.1fK', 1 << 10],
]
def filesize_format(int)
FILESIZE_FORMAT.each do |format, size|
return format % (int.to_f / size) if int >= size
end
int.to_s + 'B'
end
private
# Assumes url is already escaped.
def DIR_FILE_escape url, *html
[url, *html.map { |e| Utils.escape_html(e) }]
end
end
end
|
# frozen_string_literal: true
# Copyright 2016 Holger Just
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE.txt file for details.
require 'rack'
module Rackstash
# This module contains the integration classes into
# [Rack](https://github.com/rack/rack), the generic webserver interface for
# Ruby frameworks.
#
# Here, we provide a very basic integration. You can use it as a building
# block for more specific integrations into frameworks like Hanami, Rails, or
# Sinatra.
module Rack
end
end
require 'rackstash/rack/middleware'
Require rackstash in rackstash/rack
This allows users to just add
require 'rackstash/rack'
in order to load thw whole Rackstash plus the Rack integration.
# frozen_string_literal: true
# Copyright 2016 Holger Just
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE.txt file for details.
require 'rackstash'
require 'rack'
module Rackstash
# This module contains the integration classes into
# [Rack](https://github.com/rack/rack), the generic webserver interface for
# Ruby frameworks.
#
# Here, we provide a very basic integration. You can use it as a building
# block for more specific integrations into frameworks like Hanami, Rails, or
# Sinatra.
module Rack
end
end
require 'rackstash/rack/middleware'
|
Fix rake install:hooks
|
#!/usr/bin/env ruby
#
# editor.rb
#
# Created by Fabio Cevasco on 2008-03-01.
# Copyright (c) 2008 Fabio Cevasco. All rights reserved.
#
# This is Free Software. See LICENSE for details.
#
require 'forwardable'
require 'terminal_layout'
require 'ansi_string'
require 'term/ansicolor'
require 'fcntl'
module RawLine
#
# The Editor class defines methods to:
#
# * Read characters from STDIN or any type of input
# * Write characters to STDOUT or any type of output
# * Bind keys to specific actions
# * Perform line-related operations like moving, navigating through history, etc.
#
# Note that the following default key bindings are provided:
#
# * TAB: word completion defined via completion_proc
# * LEFT/RIGHT ARROWS: cursor movement (left/right)
# * UP/DOWN ARROWS: history navigation
# * DEL: Delete character under cursor
# * BACKSPACE: Delete character before cursor
# * INSERT: Toggle insert/replace mode (default: insert)
# * CTRL+K: Clear the whole line
# * CTRL+Z: undo (unless already registered by the OS)
# * CTRL+Y: redo (unless already registered by the OS)
#
class Editor
extend Forwardable
include HighLine::SystemExtensions
attr_accessor :char, :history_size, :line_history_size, :highlight_history_matching_text
attr_accessor :terminal, :keys, :mode
attr_accessor :completion_class, :completion_proc, :line, :history, :completion_append_string
attr_accessor :match_hidden_files
attr_accessor :word_break_characters
attr_reader :output
attr_accessor :dom
# TODO: dom traversal for lookup rather than assignment
attr_accessor :prompt_box, :input_box, :content_box
#
# Create an instance of RawLine::Editor which can be used
# to read from input and perform line-editing operations.
# This method takes an optional block used to override the
# following instance attributes:
# * <tt>@history_size</tt> - the size of the editor history buffer (30).
# * <tt>@line_history_size</tt> - the size of the editor line history buffer (50).
# * <tt>@keys</tt> - the keys (arrays of character codes) bound to specific actions.
# * <tt>@word_break_characters</tt> - a regex used for word separation, default inclues: " \t\n\"\\'`@$><=;|&{("
# * <tt>@mode</tt> - The editor's character insertion mode (:insert).
# * <tt>@completion_proc</tt> - a Proc object used to perform word completion.
# * <tt>@completion_append_string</tt> - a string to append to completed words ('').
# * <tt>@terminal</tt> - a RawLine::Terminal containing character key codes.
#
def initialize(input=STDIN, output=STDOUT)
@input = input
# @output = output
case RUBY_PLATFORM
when /mswin/i then
@terminal = WindowsTerminal.new
if RawLine.win32console? then
@win32_io = Win32::Console::ANSI::IO.new
end
else
@terminal = VT220Terminal.new
end
@history_size = 30
@line_history_size = 50
@keys = {}
@word_break_characters = " \t\n\"'@\$><=;|&{("
@mode = :insert
@completion_class = Completer
@completion_proc = filename_completion_proc
@completion_append_string = ''
@match_hidden_files = false
set_default_keys
@add_history = false
@highlight_history_matching_text = true
@history = HistoryBuffer.new(@history_size) do |h|
h.duplicates = false;
h.exclude = lambda { |item| item.strip == "" }
end
@keyboard_input_processors = [self]
yield self if block_given?
update_word_separator
@char = nil
@event_registry = Rawline::EventRegistry.new do |registry|
registry.subscribe :default, -> (_) { self.check_for_keyboard_input }
registry.subscribe :dom_tree_change, -> (_) { self.render }
end
@event_loop = Rawline::EventLoop.new(registry: @event_registry)
@dom ||= build_dom_tree
@renderer ||= build_renderer
initialize_line
end
attr_reader :dom
def events
@event_loop
end
#
# Return the current RawLine version
#
def library_version
"RawLine v#{RawLine.rawline_version}"
end
def prompt
@line.prompt if @line
end
def prompt=(text)
return if !@allow_prompt_updates || @line.nil? || @line.prompt == text
@prompt_box.content = Prompt.new(text)
end
def initialize_line
@input_box.content = ""
update_word_separator
@add_history = true #add_history
@line = Line.new(@line_history_size) do |l|
l.prompt = @prompt_box.content
l.word_separator = @word_separator
end
add_to_line_history
@allow_prompt_updates = true
end
def reset_line
initialize_line
render(reset: true)
end
def check_for_keyboard_input
bytes = []
begin
file_descriptor_flags = @input.fcntl(Fcntl::F_GETFL, 0)
loop do
string = @input.read_nonblock(4096)
bytes.concat string.bytes
end
rescue IO::WaitReadable
# reset flags so O_NONBLOCK is turned off on the file descriptor
# if it was turned on during the read_nonblock above
retry if IO.select([@input], [], [], 0.01)
@input.fcntl(Fcntl::F_SETFL, file_descriptor_flags)
@keyboard_input_processors.last.read_bytes(bytes)
@event_loop.add_event name: 'check_for_keyboard_input', source: self
end
end
def read_bytes(bytes)
return unless bytes.any?
old_position = @line.position
key_codes = parse_key_codes(bytes)
key_codes.each do |key_code|
@char = key_code
process_character
new_position = @line.position
if !@ignore_position_change && new_position != old_position
@matching_text = @line.text[0...@line.position]
end
@ignore_position_change = false
if @char == @terminal.keys[:enter] || !@char
@allow_prompt_updates = false
move_to_beginning_of_input
@event_loop.add_event name: "line_read", source: self, payload: { line: @line.text.without_ansi.dup }
end
end
end
def on_read_line(&blk)
@event_registry.subscribe :line_read, &blk
end
def start
@input.raw!
at_exit { @input.cooked! }
Signal.trap("SIGWINCH") do
@event_loop.add_event name: "terminal-resized", source: self
end
@event_registry.subscribe("terminal-resized") do
@render_tree.width = terminal_width
@render_tree.height = terminal_height
@event_loop.add_event name: "render", source: self
end
@event_loop.add_event name: "render", source: self
@event_loop.start
end
def subscribe(*args, &blk)
@event_registry.subscribe(*args, &blk)
end
#
# Parse a key or key sequence into the corresponding codes.
#
def parse_key_codes(bytes)
KeycodeParser.new(@terminal.keys).parse_bytes(bytes)
end
#
# Write a string to <tt># @output</tt> starting from the cursor position.
# Characters at the right of the cursor are shifted to the right if
# <tt>@mode == :insert</tt>, deleted otherwise.
#
def write(string)
string.each_byte { |c| print_character c, true }
add_to_line_history
end
#
# Write a new line to <tt># @output</tt>, overwriting any existing text
# and printing an end of line character.
#
def write_line(string)
clear_line
# @output.print string
@line.text = string
@input_box.position = @line.position
add_to_line_history
add_to_history
@char = nil
end
#
# Process a character. If the key corresponding to the inputted character
# is bound to an action, call <tt>press_key</tt>, otherwise call <tt>default_action</tt>.
# This method is called automatically by <tt>read</tt>
#
def process_character
case @char.class.to_s
when 'Fixnum' then
default_action
when 'Array'
press_key if key_bound?
end
end
#
# Bind a key to an action specified via <tt>block</tt>.
# <tt>key</tt> can be:
#
# * A Symbol identifying a character or character sequence defined for the current terminal
# * A Fixnum identifying a character defined for the current terminal
# * An Array identifying a character or character sequence defined for the current terminal
# * A String identifying a character or character sequence, even if it is not defined for the current terminal
# * An Hash identifying a character or character sequence, even if it is not defined for the current terminal
#
# If <tt>key</tt> is a hash, then:
#
# * It must contain only one key/value pair
# * The key identifies the name of the character or character sequence
# * The value identifies the code(s) corresponding to the character or character sequence
# * The value can be a Fixnum, a String or an Array.
#
def bind(key, &block)
case key.class.to_s
when 'Symbol' then
raise BindingException, "Unknown key or key sequence '#{key.to_s}' (#{key.class.to_s})" unless @terminal.keys[key]
@keys[@terminal.keys[key]] = block
when 'Array' then
raise BindingException, "Unknown key or key sequence '#{key.join(", ")}' (#{key.class.to_s})" unless @terminal.keys.has_value? key
@keys[key] = block
when 'Fixnum' then
raise BindingException, "Unknown key or key sequence '#{key.to_s}' (#{key.class.to_s})" unless @terminal.keys.has_value? [key]
@keys[[key]] = block
when 'String' then
if key.length == 1 then
@keys[[key.ord]] = block
else
bind_hash({:"#{key}" => key}, block)
end
when 'Hash' then
raise BindingException, "Cannot bind more than one key or key sequence at once" unless key.values.length == 1
bind_hash(key, block)
else
raise BindingException, "Unable to bind '#{key.to_s}' (#{key.class.to_s})"
end
@terminal.update
end
#
# Return true if the last character read via <tt>read</tt> is bound to an action.
#
def key_bound?
@keys[@char] ? true : false
end
#
# Call the action bound to the last character read via <tt>read</tt>.
# This method is called automatically by <tt>process_character</tt>.
#
def press_key
@keys[@char].call
end
#
# Execute the default action for the last character read via <tt>read</tt>.
# By default it prints the character to the screen via <tt>print_character</tt>.
# This method is called automatically by <tt>process_character</tt>.
#
def default_action
@input_box.content += @char.chr
print_character
end
#
# Write a character to <tt># @output</tt> at cursor position,
# shifting characters as appropriate.
# If <tt>no_line_history</tt> is set to <tt>true</tt>, the updated
# won't be saved in the history of the current line.
#
def print_character(char=@char, no_line_history = false)
if @line.position < @line.length then
chars = select_characters_from_cursor if @mode == :insert
@line.text[@line.position] = (@mode == :insert) ? "#{char.chr}#{@line.text[@line.position]}" : "#{char.chr}"
@line.right
@input_box.position = @line.position
# if @mode == :insert then
# chars.length.times { @line.left } # move cursor back
# end
else
@line.right
@line << char
end
@input_box.content = @line.text
@input_box.position = @line.position
add_to_line_history unless no_line_history
end
#
# Complete the current word according to what returned by
# <tt>@completion_proc</tt>. Characters can be appended to the
# completed word via <tt>@completion_append_character</tt> and word
# separators can be defined via <tt>@word_separator</tt>.
#
# This action is bound to the tab key by default, so the first
# match is displayed the first time the user presses tab, and all
# the possible messages will be displayed (cyclically) when tab is
# pressed again.
#
def complete
completer = @completion_class.new(
char: @char,
line: @line,
completion: @completion_proc,
completion_found: -> (completion:, possible_completions:) {
completion_found(completion: completion, possible_completions: possible_completions)
},
completion_not_found: -> {
completion_not_found
},
done: -> (*leftover_bytes){
completion_done
leftover_bytes = leftover_bytes.flatten
@keyboard_input_processors.pop
if leftover_bytes.any?
@keyboard_input_processors.last.read_bytes(leftover_bytes)
end
},
keys: terminal.keys
)
@keyboard_input_processors.push(completer)
completer.read_bytes(@char)
end
def completion_found(completion:, possible_completions:)
if @on_word_complete
word = @line.word[:text]
sub_word = @line.text[@line.word[:start]..@line.position-1] || ""
@on_word_complete.call(name: "word-completion", payload: { sub_word: sub_word, word: word, completion: completion, possible_completions: possible_completions })
end
if @line.word[:text].length > 0
# If not in a word, print the match, otherwise continue existing word
move_to_position(@line.word[:end]+@completion_append_string.to_s.length+1)
end
(@line.position-@line.word[:start]).times { delete_left_character(true) }
write completion.to_s + @completion_append_string.to_s
end
def completion_not_found
if @on_word_complete_no_match
word = @line.word[:text]
sub_word = @line.text[@line.word[:start]..@line.position-1] || ""
@on_word_complete_no_match.call(name: "word-completion-no-match", payload: { sub_word: sub_word, word: word })
end
end
def completion_done
if @on_word_complete_done
@on_word_complete_done.call
end
end
def on_word_complete(&blk)
@on_word_complete = blk
end
def on_word_complete_no_match(&blk)
@on_word_complete_no_match = blk
end
def on_word_complete_done(&blk)
@on_word_complete_done = blk
end
#
# Complete file and directory names.
# Hidden files and directories are matched only if <tt>@match_hidden_files</tt> is true.
#
def filename_completion_proc
lambda do |word, _|
dirs = @line.text.split('/')
path = @line.text.match(/^\/|[a-zA-Z]:\//) ? "/" : Dir.pwd+"/"
if dirs.length == 0 then # starting directory
dir = path
else
dirs.delete(dirs.last) unless File.directory?(path+dirs.join('/'))
dir = path+dirs.join('/')
end
Dir.entries(dir).select { |e| (e =~ /^\./ && @match_hidden_files && word == '') || (e =~ /^#{word}/ && e !~ /^\./) }
end
end
#
# Adds <tt>@line.text</tt> to the editor history. This action is
# bound to the enter key by default.
#
def newline
add_to_history
@history.clear_position
end
#
# Move the cursor left (if possible) by printing a
# backspace, updating <tt>@line.position</tt> accordingly.
# This action is bound to the left arrow key by default.
#
def move_left
unless @line.bol? then
@line.left
@input_box.position = @line.position
$z.puts "LINE: #{@line.inspect}"
return true
end
false
end
#
# Move the cursor right (if possible) by re-printing the
# character at the right of the cursor, if any, and updating
# <tt>@line.position</tt> accordingly.
# This action is bound to the right arrow key by default.
#
def move_right
unless @line.position > @line.eol then
@line.right
@input_box.position = @line.position
$z.puts "LINE: #{@line.inspect}"
# @output.putc @line.text[@line.position-1]
return true
end
false
end
#
# Print debug information about the current line. Note that after
# the message is displayed, the line text and position will be restored.
#
def debug_line
pos = @line.position
text = @line.text
word = @line.word
# @output.puts
# @output.puts "Text: [#{text}]"
# @output.puts "Length: #{@line.length}"
# @output.puts "Position: #{pos}"
# @output.puts "Character at Position: [#{text[pos].chr}] (#{text[pos]})" unless pos >= @line.length
# @output.puts "Current Word: [#{word[:text]}] (#{word[:start]} -- #{word[:end]})"
clear_line
raw_print text
overwrite_line(text, pos)
end
#
# Print the content of the editor history. Note that after
# the message is displayed, the line text and position will be restored.
#
def show_history
pos = @line.position
text = @line.text
# @output.puts
# @output.puts "History:"
@history.each {|l| puts "- [#{l}]"}
overwrite_line(text, pos)
end
#
# Clear the editor history.
#
def clear_history
@history.empty
end
#
# Delete the character at the left of the cursor.
# If <tt>no_line_hisytory</tt> is set to true, the deletion won't be
# recorded in the line history.
# This action is bound to the backspace key by default.
#
def delete_left_character(no_line_history=false)
if move_left then
delete_character(no_line_history)
end
end
#
# Delete the character under the cursor.
# If <tt>no_line_hisytory</tt> is set to true, the deletion won't be
# recorded in the line history.
# This action is bound to the delete key by default.
#
def delete_character(no_line_history=false)
unless @line.position > @line.eol
# save characters to shift
chars = (@line.eol?) ? ' ' : select_characters_from_cursor(1)
# remove character from console and shift characters
# (chars.length+1).times { # @output.putc ?\b.ord }
#remove character from line
@line[@line.position] = ''
@input_box.content = @line.text
@input_box.position = @line.position
add_to_line_history unless no_line_history
end
end
#
# Clear the current line, i.e.
# <tt>@line.text</tt> and <tt>@line.position</tt>.
# This action is bound to ctrl+k by default.
#
def clear_line
# @output.putc ?\r
# @output.print @line.prompt
# @line.length.times { @output.putc ?\s.ord }
# @line.length.times { @output.putc ?\b.ord }
add_to_line_history
@line.text = ""
@line.position = 0
@input_box.position = @line.position
@history.clear_position
end
def clear_screen
# @output.print @terminal.term_info.control_string("clear")
# @terminal.clear_screen
# @output.print @line.prompt
# @output.print @line.text
# (@line.length - @line.position).times { @output.putc ?\b.ord }
end
def clear_screen_down
# @output.print @terminal.term_info.control_string("ed")
# @terminal.clear_screen_down
end
#
# Undo the last modification to the current line (<tt>@line.text</tt>).
# This action is bound to ctrl+z by default.
#
def undo
generic_history_back(@line.history) if @line.history.position == nil
generic_history_back(@line.history)
end
#
# Redo a previously-undone modification to the
# current line (<tt>@line.text</tt>).
# This action is bound to ctrl+y by default.
#
def redo
generic_history_forward(@line.history)
end
#
# Load the previous entry of the editor in place of the
# current line (<tt>@line.text</tt>).
# This action is bound to the up arrow key by default.
#
def history_back
generic_history_back(@history)
add_to_line_history
end
#
# Load the next entry of the editor history in place of the
# current line (<tt>@line.text</tt>).
# This action is bound to down arrow key by default.
#
def history_forward
generic_history_forward(@history)
add_to_line_history
end
#
# Add the current line (<tt>@line.text</tt>) to the
# line history, to allow undo/redo
# operations.
#
def add_to_line_history
@line.history << @line.text.dup unless @line.text == ""
end
#
# Add the current line (<tt>@line.text</tt>) to the editor history.
#
def add_to_history
@history << @line.text.dup if @add_history && @line.text != ""
end
#
# Toggle the editor <tt>@mode</tt> to :replace or :insert (default).
#
def toggle_mode
case @mode
when :insert then @mode = :replace
when :replace then @mode = :insert
end
end
def terminal_row_for_line_position(line_position)
((@line.prompt.length + line_position) / terminal_width.to_f).ceil
end
def current_terminal_row
((@line.position + @line.prompt.length + 1) / terminal_width.to_f).ceil
end
def number_of_terminal_rows
((@line.length + @line.prompt.length) / terminal_width.to_f).ceil
end
def kill_forward
@line.text[@line.position..-1].tap do
@line[line.position..-1] = ""
@input_box.content = line.text
@input_box.position = @line.position
end
end
def yank_forward(text)
@line.text[line.position] = text
@line.position = line.position + text.length
@input_box.content = line.text
@input_box.position = @line.position
end
#
# Overwrite the current line (<tt>@line.text</tt>)
# with <tt>new_line</tt>, and optionally reset the cursor position to
# <tt>position</tt>.
#
def overwrite_line(new_line, position=nil, options={})
text = @line.text
@highlighting = false
if options[:highlight_up_to]
@highlighting = true
new_line = highlight_text_up_to(new_line, options[:highlight_up_to])
end
@ignore_position_change = true
@line.position = new_line.length
@line.text = new_line
@input_box.content = @line.text
@input_box.position = @line.position
@event_loop.add_event name: "render", source: @input_box
end
def highlight_text_up_to(text, position)
ANSIString.new("\e[1m#{text[0...position]}\e[0m#{text[position..-1]}")
end
def move_to_beginning_of_input
@line.position = @line.bol
@input_box.position = @line.position
end
def move_to_end_of_input
@line.position = @line.length
@input_box.position = @line.position
end
#
# Move the cursor to <tt>pos</tt>.
#
def move_to_position(pos)
rows_to_move = current_terminal_row - terminal_row_for_line_position(pos)
if rows_to_move > 0
# rows_to_move.times { @output.print @terminal.term_info.control_string("cuu1") }
# @terminal.move_up_n_rows(rows_to_move)
else
# rows_to_move.abs.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows(rows_to_move.abs)
end
column = (@line.prompt.length + pos) % terminal_width
# @output.print @terminal.term_info.control_string("hpa", column)
# @terminal.move_to_column((@line.prompt.length + pos) % terminal_width)
@line.position = pos
@input_box.position = @line.position
end
def move_to_end_of_line
rows_to_move_down = number_of_terminal_rows - current_terminal_row
# rows_to_move_down.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows rows_to_move_down
@line.position = @line.length
@input_box.position = @line.position
column = (@line.prompt.length + @line.position) % terminal_width
# @output.print @terminal.term_info.control_string("hpa", column)
# @terminal.move_to_column((@line.prompt.length + @line.position) % terminal_width)
end
def move_up_n_lines(n)
# n.times { @output.print @terminal.term_info.control_string("cuu1") }
# @terminal.move_up_n_rows(n)
end
def move_down_n_lines(n)
# n.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows(n)
end
private
def build_dom_tree
@prompt_box = TerminalLayout::Box.new(content: "default-prompt>", style: {display: :inline})
@input_box = TerminalLayout::InputBox.new(content: "", style: {display: :inline})
@content_box = TerminalLayout::Box.new(content: "", style: {display: :block})
TerminalLayout::Box.new(children:[@prompt_box, @input_box, @content_box])
end
def build_renderer
@renderer = TerminalLayout::TerminalRenderer.new(output: $stdout)
@render_tree = TerminalLayout::RenderTree.new(
@dom,
parent: nil,
style: { width:terminal_width, height:terminal_height },
renderer: @renderer
)
@dom.on(:child_changed) do |*args|
@event_loop.add_event name: "render", source: @dom#, target: event[:target]
end
@dom.on :cursor_position_changed do |*args|
@renderer.render_cursor(@input_box)
end
@event_registry.subscribe :render, -> (_) { render(reset: false) }
@renderer
end
def render(reset: false)
@render_tree.layout
@renderer.reset if reset
@renderer.render(@render_tree)
@event_loop.add_event name: "check_for_keyboard_input"
end
def update_word_separator
return @word_separator = "" if @word_break_characters.to_s == ""
chars = []
@word_break_characters.each_byte do |c|
ch = (c.is_a? Fixnum) ? c : c.ord
value = (ch == ?\s.ord) ? ' ' : Regexp.escape(ch.chr).to_s
chars << value
end
@word_separator = /(?<!\\)[#{chars.join}]/
end
def bind_hash(key, block)
key.each_pair do |j,k|
raise BindingException, "'#{k[0].chr}' is not a legal escape code for '#{@terminal.class.to_s}'." unless k.length > 1 && @terminal.escape_codes.include?(k[0].ord)
code = []
case k.class.to_s
when 'Fixnum' then
code = [k]
when 'String' then
k.each_byte { |b| code << b }
when 'Array' then
code = k
else
raise BindingException, "Unable to bind '#{k.to_s}' (#{k.class.to_s})"
end
@terminal.keys[j] = code
@keys[code] = block
end
end
def select_characters_from_cursor(offset=0)
select_characters(:right, @line.length-@line.position, offset)
end
def raw_print(string)
# string.each_byte { |c| @output.putc c }
end
def generic_history_back(history)
unless history.empty?
history.back(matching_text: matching_text)
line = history.get
return unless line
cursor_position = nil
if supports_partial_text_matching? && highlight_history_matching_text
if line && matching_text
cursor_position = [line.length, matching_text.length].min
elsif matching_text
cursor_position = matching_text.length
end
end
overwrite_line(line, cursor_position, highlight_up_to: cursor_position)
end
end
def supports_partial_text_matching?
history.supports_partial_text_matching?
end
def generic_history_forward(history)
if history.forward(matching_text: matching_text)
line = history.get
return unless line
cursor_position = if supports_partial_text_matching? && highlight_history_matching_text && matching_text
[line.length, matching_text.length].min
end
overwrite_line(line, cursor_position, highlight_up_to: cursor_position)
end
end
def select_characters(direction, n, offset=0)
if direction == :right then
@line.text[@line.position+offset..@line.position+offset+n]
elsif direction == :left then
@line.text[@line.position-offset-n..@line.position-offset]
end
end
def set_default_keys
bind(:enter) { newline }
bind(:tab) { complete }
bind(:backspace) { delete_left_character }
bind(:ctrl_c) { raise Interrupt }
bind(:ctrl_k) { clear_line }
bind(:ctrl_u) { undo }
bind(:ctrl_r) { self.redo }
bind(:left_arrow) { move_left }
bind(:right_arrow) { move_right }
bind(:up_arrow) { history_back }
bind(:down_arrow) { history_forward }
bind(:delete) { delete_character }
bind(:insert) { toggle_mode }
end
def matching_text
return nil unless @line
return nil if @line.text == ""
if @history.searching?
@matching_text
else
@matching_text = @line[0...@line.position]
end
end
end
if RawLine.ansi? then
class Editor
if RUBY_PLATFORM.match(/mswin/) && RawLine.win32console? then
def escape(string)
string.each_byte { |c| @win32_io.putc c }
end
else
def escape(string)
# @output.print string
end
end
def terminal_width
terminal_size[0]
end
def terminal_height
terminal_size[1]
end
def cursor_position
terminal.cursor_position
end
end
end
end
Removing unnecessary $z.puts debugging lines.
#!/usr/bin/env ruby
#
# editor.rb
#
# Created by Fabio Cevasco on 2008-03-01.
# Copyright (c) 2008 Fabio Cevasco. All rights reserved.
#
# This is Free Software. See LICENSE for details.
#
require 'forwardable'
require 'terminal_layout'
require 'ansi_string'
require 'term/ansicolor'
require 'fcntl'
module RawLine
#
# The Editor class defines methods to:
#
# * Read characters from STDIN or any type of input
# * Write characters to STDOUT or any type of output
# * Bind keys to specific actions
# * Perform line-related operations like moving, navigating through history, etc.
#
# Note that the following default key bindings are provided:
#
# * TAB: word completion defined via completion_proc
# * LEFT/RIGHT ARROWS: cursor movement (left/right)
# * UP/DOWN ARROWS: history navigation
# * DEL: Delete character under cursor
# * BACKSPACE: Delete character before cursor
# * INSERT: Toggle insert/replace mode (default: insert)
# * CTRL+K: Clear the whole line
# * CTRL+Z: undo (unless already registered by the OS)
# * CTRL+Y: redo (unless already registered by the OS)
#
class Editor
extend Forwardable
include HighLine::SystemExtensions
attr_accessor :char, :history_size, :line_history_size, :highlight_history_matching_text
attr_accessor :terminal, :keys, :mode
attr_accessor :completion_class, :completion_proc, :line, :history, :completion_append_string
attr_accessor :match_hidden_files
attr_accessor :word_break_characters
attr_reader :output
attr_accessor :dom
# TODO: dom traversal for lookup rather than assignment
attr_accessor :prompt_box, :input_box, :content_box
#
# Create an instance of RawLine::Editor which can be used
# to read from input and perform line-editing operations.
# This method takes an optional block used to override the
# following instance attributes:
# * <tt>@history_size</tt> - the size of the editor history buffer (30).
# * <tt>@line_history_size</tt> - the size of the editor line history buffer (50).
# * <tt>@keys</tt> - the keys (arrays of character codes) bound to specific actions.
# * <tt>@word_break_characters</tt> - a regex used for word separation, default inclues: " \t\n\"\\'`@$><=;|&{("
# * <tt>@mode</tt> - The editor's character insertion mode (:insert).
# * <tt>@completion_proc</tt> - a Proc object used to perform word completion.
# * <tt>@completion_append_string</tt> - a string to append to completed words ('').
# * <tt>@terminal</tt> - a RawLine::Terminal containing character key codes.
#
def initialize(input=STDIN, output=STDOUT)
@input = input
# @output = output
case RUBY_PLATFORM
when /mswin/i then
@terminal = WindowsTerminal.new
if RawLine.win32console? then
@win32_io = Win32::Console::ANSI::IO.new
end
else
@terminal = VT220Terminal.new
end
@history_size = 30
@line_history_size = 50
@keys = {}
@word_break_characters = " \t\n\"'@\$><=;|&{("
@mode = :insert
@completion_class = Completer
@completion_proc = filename_completion_proc
@completion_append_string = ''
@match_hidden_files = false
set_default_keys
@add_history = false
@highlight_history_matching_text = true
@history = HistoryBuffer.new(@history_size) do |h|
h.duplicates = false;
h.exclude = lambda { |item| item.strip == "" }
end
@keyboard_input_processors = [self]
yield self if block_given?
update_word_separator
@char = nil
@event_registry = Rawline::EventRegistry.new do |registry|
registry.subscribe :default, -> (_) { self.check_for_keyboard_input }
registry.subscribe :dom_tree_change, -> (_) { self.render }
end
@event_loop = Rawline::EventLoop.new(registry: @event_registry)
@dom ||= build_dom_tree
@renderer ||= build_renderer
initialize_line
end
attr_reader :dom
def events
@event_loop
end
#
# Return the current RawLine version
#
def library_version
"RawLine v#{RawLine.rawline_version}"
end
def prompt
@line.prompt if @line
end
def prompt=(text)
return if !@allow_prompt_updates || @line.nil? || @line.prompt == text
@prompt_box.content = Prompt.new(text)
end
def initialize_line
@input_box.content = ""
update_word_separator
@add_history = true #add_history
@line = Line.new(@line_history_size) do |l|
l.prompt = @prompt_box.content
l.word_separator = @word_separator
end
add_to_line_history
@allow_prompt_updates = true
end
def reset_line
initialize_line
render(reset: true)
end
def check_for_keyboard_input
bytes = []
begin
file_descriptor_flags = @input.fcntl(Fcntl::F_GETFL, 0)
loop do
string = @input.read_nonblock(4096)
bytes.concat string.bytes
end
rescue IO::WaitReadable
# reset flags so O_NONBLOCK is turned off on the file descriptor
# if it was turned on during the read_nonblock above
retry if IO.select([@input], [], [], 0.01)
@input.fcntl(Fcntl::F_SETFL, file_descriptor_flags)
@keyboard_input_processors.last.read_bytes(bytes)
@event_loop.add_event name: 'check_for_keyboard_input', source: self
end
end
def read_bytes(bytes)
return unless bytes.any?
old_position = @line.position
key_codes = parse_key_codes(bytes)
key_codes.each do |key_code|
@char = key_code
process_character
new_position = @line.position
if !@ignore_position_change && new_position != old_position
@matching_text = @line.text[0...@line.position]
end
@ignore_position_change = false
if @char == @terminal.keys[:enter] || !@char
@allow_prompt_updates = false
move_to_beginning_of_input
@event_loop.add_event name: "line_read", source: self, payload: { line: @line.text.without_ansi.dup }
end
end
end
def on_read_line(&blk)
@event_registry.subscribe :line_read, &blk
end
def start
@input.raw!
at_exit { @input.cooked! }
Signal.trap("SIGWINCH") do
@event_loop.add_event name: "terminal-resized", source: self
end
@event_registry.subscribe("terminal-resized") do
@render_tree.width = terminal_width
@render_tree.height = terminal_height
@event_loop.add_event name: "render", source: self
end
@event_loop.add_event name: "render", source: self
@event_loop.start
end
def subscribe(*args, &blk)
@event_registry.subscribe(*args, &blk)
end
#
# Parse a key or key sequence into the corresponding codes.
#
def parse_key_codes(bytes)
KeycodeParser.new(@terminal.keys).parse_bytes(bytes)
end
#
# Write a string to <tt># @output</tt> starting from the cursor position.
# Characters at the right of the cursor are shifted to the right if
# <tt>@mode == :insert</tt>, deleted otherwise.
#
def write(string)
string.each_byte { |c| print_character c, true }
add_to_line_history
end
#
# Write a new line to <tt># @output</tt>, overwriting any existing text
# and printing an end of line character.
#
def write_line(string)
clear_line
# @output.print string
@line.text = string
@input_box.position = @line.position
add_to_line_history
add_to_history
@char = nil
end
#
# Process a character. If the key corresponding to the inputted character
# is bound to an action, call <tt>press_key</tt>, otherwise call <tt>default_action</tt>.
# This method is called automatically by <tt>read</tt>
#
def process_character
case @char.class.to_s
when 'Fixnum' then
default_action
when 'Array'
press_key if key_bound?
end
end
#
# Bind a key to an action specified via <tt>block</tt>.
# <tt>key</tt> can be:
#
# * A Symbol identifying a character or character sequence defined for the current terminal
# * A Fixnum identifying a character defined for the current terminal
# * An Array identifying a character or character sequence defined for the current terminal
# * A String identifying a character or character sequence, even if it is not defined for the current terminal
# * An Hash identifying a character or character sequence, even if it is not defined for the current terminal
#
# If <tt>key</tt> is a hash, then:
#
# * It must contain only one key/value pair
# * The key identifies the name of the character or character sequence
# * The value identifies the code(s) corresponding to the character or character sequence
# * The value can be a Fixnum, a String or an Array.
#
def bind(key, &block)
case key.class.to_s
when 'Symbol' then
raise BindingException, "Unknown key or key sequence '#{key.to_s}' (#{key.class.to_s})" unless @terminal.keys[key]
@keys[@terminal.keys[key]] = block
when 'Array' then
raise BindingException, "Unknown key or key sequence '#{key.join(", ")}' (#{key.class.to_s})" unless @terminal.keys.has_value? key
@keys[key] = block
when 'Fixnum' then
raise BindingException, "Unknown key or key sequence '#{key.to_s}' (#{key.class.to_s})" unless @terminal.keys.has_value? [key]
@keys[[key]] = block
when 'String' then
if key.length == 1 then
@keys[[key.ord]] = block
else
bind_hash({:"#{key}" => key}, block)
end
when 'Hash' then
raise BindingException, "Cannot bind more than one key or key sequence at once" unless key.values.length == 1
bind_hash(key, block)
else
raise BindingException, "Unable to bind '#{key.to_s}' (#{key.class.to_s})"
end
@terminal.update
end
#
# Return true if the last character read via <tt>read</tt> is bound to an action.
#
def key_bound?
@keys[@char] ? true : false
end
#
# Call the action bound to the last character read via <tt>read</tt>.
# This method is called automatically by <tt>process_character</tt>.
#
def press_key
@keys[@char].call
end
#
# Execute the default action for the last character read via <tt>read</tt>.
# By default it prints the character to the screen via <tt>print_character</tt>.
# This method is called automatically by <tt>process_character</tt>.
#
def default_action
@input_box.content += @char.chr
print_character
end
#
# Write a character to <tt># @output</tt> at cursor position,
# shifting characters as appropriate.
# If <tt>no_line_history</tt> is set to <tt>true</tt>, the updated
# won't be saved in the history of the current line.
#
def print_character(char=@char, no_line_history = false)
if @line.position < @line.length then
chars = select_characters_from_cursor if @mode == :insert
@line.text[@line.position] = (@mode == :insert) ? "#{char.chr}#{@line.text[@line.position]}" : "#{char.chr}"
@line.right
@input_box.position = @line.position
# if @mode == :insert then
# chars.length.times { @line.left } # move cursor back
# end
else
@line.right
@line << char
end
@input_box.content = @line.text
@input_box.position = @line.position
add_to_line_history unless no_line_history
end
#
# Complete the current word according to what returned by
# <tt>@completion_proc</tt>. Characters can be appended to the
# completed word via <tt>@completion_append_character</tt> and word
# separators can be defined via <tt>@word_separator</tt>.
#
# This action is bound to the tab key by default, so the first
# match is displayed the first time the user presses tab, and all
# the possible messages will be displayed (cyclically) when tab is
# pressed again.
#
def complete
completer = @completion_class.new(
char: @char,
line: @line,
completion: @completion_proc,
completion_found: -> (completion:, possible_completions:) {
completion_found(completion: completion, possible_completions: possible_completions)
},
completion_not_found: -> {
completion_not_found
},
done: -> (*leftover_bytes){
completion_done
leftover_bytes = leftover_bytes.flatten
@keyboard_input_processors.pop
if leftover_bytes.any?
@keyboard_input_processors.last.read_bytes(leftover_bytes)
end
},
keys: terminal.keys
)
@keyboard_input_processors.push(completer)
completer.read_bytes(@char)
end
def completion_found(completion:, possible_completions:)
if @on_word_complete
word = @line.word[:text]
sub_word = @line.text[@line.word[:start]..@line.position-1] || ""
@on_word_complete.call(name: "word-completion", payload: { sub_word: sub_word, word: word, completion: completion, possible_completions: possible_completions })
end
if @line.word[:text].length > 0
# If not in a word, print the match, otherwise continue existing word
move_to_position(@line.word[:end]+@completion_append_string.to_s.length+1)
end
(@line.position-@line.word[:start]).times { delete_left_character(true) }
write completion.to_s + @completion_append_string.to_s
end
def completion_not_found
if @on_word_complete_no_match
word = @line.word[:text]
sub_word = @line.text[@line.word[:start]..@line.position-1] || ""
@on_word_complete_no_match.call(name: "word-completion-no-match", payload: { sub_word: sub_word, word: word })
end
end
def completion_done
if @on_word_complete_done
@on_word_complete_done.call
end
end
def on_word_complete(&blk)
@on_word_complete = blk
end
def on_word_complete_no_match(&blk)
@on_word_complete_no_match = blk
end
def on_word_complete_done(&blk)
@on_word_complete_done = blk
end
#
# Complete file and directory names.
# Hidden files and directories are matched only if <tt>@match_hidden_files</tt> is true.
#
def filename_completion_proc
lambda do |word, _|
dirs = @line.text.split('/')
path = @line.text.match(/^\/|[a-zA-Z]:\//) ? "/" : Dir.pwd+"/"
if dirs.length == 0 then # starting directory
dir = path
else
dirs.delete(dirs.last) unless File.directory?(path+dirs.join('/'))
dir = path+dirs.join('/')
end
Dir.entries(dir).select { |e| (e =~ /^\./ && @match_hidden_files && word == '') || (e =~ /^#{word}/ && e !~ /^\./) }
end
end
#
# Adds <tt>@line.text</tt> to the editor history. This action is
# bound to the enter key by default.
#
def newline
add_to_history
@history.clear_position
end
#
# Move the cursor left (if possible) by printing a
# backspace, updating <tt>@line.position</tt> accordingly.
# This action is bound to the left arrow key by default.
#
def move_left
unless @line.bol? then
@line.left
@input_box.position = @line.position
return true
end
false
end
#
# Move the cursor right (if possible) by re-printing the
# character at the right of the cursor, if any, and updating
# <tt>@line.position</tt> accordingly.
# This action is bound to the right arrow key by default.
#
def move_right
unless @line.position > @line.eol then
@line.right
@input_box.position = @line.position
return true
end
false
end
#
# Print debug information about the current line. Note that after
# the message is displayed, the line text and position will be restored.
#
def debug_line
pos = @line.position
text = @line.text
word = @line.word
# @output.puts
# @output.puts "Text: [#{text}]"
# @output.puts "Length: #{@line.length}"
# @output.puts "Position: #{pos}"
# @output.puts "Character at Position: [#{text[pos].chr}] (#{text[pos]})" unless pos >= @line.length
# @output.puts "Current Word: [#{word[:text]}] (#{word[:start]} -- #{word[:end]})"
clear_line
raw_print text
overwrite_line(text, pos)
end
#
# Print the content of the editor history. Note that after
# the message is displayed, the line text and position will be restored.
#
def show_history
pos = @line.position
text = @line.text
# @output.puts
# @output.puts "History:"
@history.each {|l| puts "- [#{l}]"}
overwrite_line(text, pos)
end
#
# Clear the editor history.
#
def clear_history
@history.empty
end
#
# Delete the character at the left of the cursor.
# If <tt>no_line_hisytory</tt> is set to true, the deletion won't be
# recorded in the line history.
# This action is bound to the backspace key by default.
#
def delete_left_character(no_line_history=false)
if move_left then
delete_character(no_line_history)
end
end
#
# Delete the character under the cursor.
# If <tt>no_line_hisytory</tt> is set to true, the deletion won't be
# recorded in the line history.
# This action is bound to the delete key by default.
#
def delete_character(no_line_history=false)
unless @line.position > @line.eol
# save characters to shift
chars = (@line.eol?) ? ' ' : select_characters_from_cursor(1)
# remove character from console and shift characters
# (chars.length+1).times { # @output.putc ?\b.ord }
#remove character from line
@line[@line.position] = ''
@input_box.content = @line.text
@input_box.position = @line.position
add_to_line_history unless no_line_history
end
end
#
# Clear the current line, i.e.
# <tt>@line.text</tt> and <tt>@line.position</tt>.
# This action is bound to ctrl+k by default.
#
def clear_line
# @output.putc ?\r
# @output.print @line.prompt
# @line.length.times { @output.putc ?\s.ord }
# @line.length.times { @output.putc ?\b.ord }
add_to_line_history
@line.text = ""
@line.position = 0
@input_box.position = @line.position
@history.clear_position
end
def clear_screen
# @output.print @terminal.term_info.control_string("clear")
# @terminal.clear_screen
# @output.print @line.prompt
# @output.print @line.text
# (@line.length - @line.position).times { @output.putc ?\b.ord }
end
def clear_screen_down
# @output.print @terminal.term_info.control_string("ed")
# @terminal.clear_screen_down
end
#
# Undo the last modification to the current line (<tt>@line.text</tt>).
# This action is bound to ctrl+z by default.
#
def undo
generic_history_back(@line.history) if @line.history.position == nil
generic_history_back(@line.history)
end
#
# Redo a previously-undone modification to the
# current line (<tt>@line.text</tt>).
# This action is bound to ctrl+y by default.
#
def redo
generic_history_forward(@line.history)
end
#
# Load the previous entry of the editor in place of the
# current line (<tt>@line.text</tt>).
# This action is bound to the up arrow key by default.
#
def history_back
generic_history_back(@history)
add_to_line_history
end
#
# Load the next entry of the editor history in place of the
# current line (<tt>@line.text</tt>).
# This action is bound to down arrow key by default.
#
def history_forward
generic_history_forward(@history)
add_to_line_history
end
#
# Add the current line (<tt>@line.text</tt>) to the
# line history, to allow undo/redo
# operations.
#
def add_to_line_history
@line.history << @line.text.dup unless @line.text == ""
end
#
# Add the current line (<tt>@line.text</tt>) to the editor history.
#
def add_to_history
@history << @line.text.dup if @add_history && @line.text != ""
end
#
# Toggle the editor <tt>@mode</tt> to :replace or :insert (default).
#
def toggle_mode
case @mode
when :insert then @mode = :replace
when :replace then @mode = :insert
end
end
def terminal_row_for_line_position(line_position)
((@line.prompt.length + line_position) / terminal_width.to_f).ceil
end
def current_terminal_row
((@line.position + @line.prompt.length + 1) / terminal_width.to_f).ceil
end
def number_of_terminal_rows
((@line.length + @line.prompt.length) / terminal_width.to_f).ceil
end
def kill_forward
@line.text[@line.position..-1].tap do
@line[line.position..-1] = ""
@input_box.content = line.text
@input_box.position = @line.position
end
end
def yank_forward(text)
@line.text[line.position] = text
@line.position = line.position + text.length
@input_box.content = line.text
@input_box.position = @line.position
end
#
# Overwrite the current line (<tt>@line.text</tt>)
# with <tt>new_line</tt>, and optionally reset the cursor position to
# <tt>position</tt>.
#
def overwrite_line(new_line, position=nil, options={})
text = @line.text
@highlighting = false
if options[:highlight_up_to]
@highlighting = true
new_line = highlight_text_up_to(new_line, options[:highlight_up_to])
end
@ignore_position_change = true
@line.position = new_line.length
@line.text = new_line
@input_box.content = @line.text
@input_box.position = @line.position
@event_loop.add_event name: "render", source: @input_box
end
def highlight_text_up_to(text, position)
ANSIString.new("\e[1m#{text[0...position]}\e[0m#{text[position..-1]}")
end
def move_to_beginning_of_input
@line.position = @line.bol
@input_box.position = @line.position
end
def move_to_end_of_input
@line.position = @line.length
@input_box.position = @line.position
end
#
# Move the cursor to <tt>pos</tt>.
#
def move_to_position(pos)
rows_to_move = current_terminal_row - terminal_row_for_line_position(pos)
if rows_to_move > 0
# rows_to_move.times { @output.print @terminal.term_info.control_string("cuu1") }
# @terminal.move_up_n_rows(rows_to_move)
else
# rows_to_move.abs.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows(rows_to_move.abs)
end
column = (@line.prompt.length + pos) % terminal_width
# @output.print @terminal.term_info.control_string("hpa", column)
# @terminal.move_to_column((@line.prompt.length + pos) % terminal_width)
@line.position = pos
@input_box.position = @line.position
end
def move_to_end_of_line
rows_to_move_down = number_of_terminal_rows - current_terminal_row
# rows_to_move_down.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows rows_to_move_down
@line.position = @line.length
@input_box.position = @line.position
column = (@line.prompt.length + @line.position) % terminal_width
# @output.print @terminal.term_info.control_string("hpa", column)
# @terminal.move_to_column((@line.prompt.length + @line.position) % terminal_width)
end
def move_up_n_lines(n)
# n.times { @output.print @terminal.term_info.control_string("cuu1") }
# @terminal.move_up_n_rows(n)
end
def move_down_n_lines(n)
# n.times { @output.print @terminal.term_info.control_string("cud1") }
# @terminal.move_down_n_rows(n)
end
private
def build_dom_tree
@prompt_box = TerminalLayout::Box.new(content: "default-prompt>", style: {display: :inline})
@input_box = TerminalLayout::InputBox.new(content: "", style: {display: :inline})
@content_box = TerminalLayout::Box.new(content: "", style: {display: :block})
TerminalLayout::Box.new(children:[@prompt_box, @input_box, @content_box])
end
def build_renderer
@renderer = TerminalLayout::TerminalRenderer.new(output: $stdout)
@render_tree = TerminalLayout::RenderTree.new(
@dom,
parent: nil,
style: { width:terminal_width, height:terminal_height },
renderer: @renderer
)
@dom.on(:child_changed) do |*args|
@event_loop.add_event name: "render", source: @dom#, target: event[:target]
end
@dom.on :cursor_position_changed do |*args|
@renderer.render_cursor(@input_box)
end
@event_registry.subscribe :render, -> (_) { render(reset: false) }
@renderer
end
def render(reset: false)
@render_tree.layout
@renderer.reset if reset
@renderer.render(@render_tree)
@event_loop.add_event name: "check_for_keyboard_input"
end
def update_word_separator
return @word_separator = "" if @word_break_characters.to_s == ""
chars = []
@word_break_characters.each_byte do |c|
ch = (c.is_a? Fixnum) ? c : c.ord
value = (ch == ?\s.ord) ? ' ' : Regexp.escape(ch.chr).to_s
chars << value
end
@word_separator = /(?<!\\)[#{chars.join}]/
end
def bind_hash(key, block)
key.each_pair do |j,k|
raise BindingException, "'#{k[0].chr}' is not a legal escape code for '#{@terminal.class.to_s}'." unless k.length > 1 && @terminal.escape_codes.include?(k[0].ord)
code = []
case k.class.to_s
when 'Fixnum' then
code = [k]
when 'String' then
k.each_byte { |b| code << b }
when 'Array' then
code = k
else
raise BindingException, "Unable to bind '#{k.to_s}' (#{k.class.to_s})"
end
@terminal.keys[j] = code
@keys[code] = block
end
end
def select_characters_from_cursor(offset=0)
select_characters(:right, @line.length-@line.position, offset)
end
def raw_print(string)
# string.each_byte { |c| @output.putc c }
end
def generic_history_back(history)
unless history.empty?
history.back(matching_text: matching_text)
line = history.get
return unless line
cursor_position = nil
if supports_partial_text_matching? && highlight_history_matching_text
if line && matching_text
cursor_position = [line.length, matching_text.length].min
elsif matching_text
cursor_position = matching_text.length
end
end
overwrite_line(line, cursor_position, highlight_up_to: cursor_position)
end
end
def supports_partial_text_matching?
history.supports_partial_text_matching?
end
def generic_history_forward(history)
if history.forward(matching_text: matching_text)
line = history.get
return unless line
cursor_position = if supports_partial_text_matching? && highlight_history_matching_text && matching_text
[line.length, matching_text.length].min
end
overwrite_line(line, cursor_position, highlight_up_to: cursor_position)
end
end
def select_characters(direction, n, offset=0)
if direction == :right then
@line.text[@line.position+offset..@line.position+offset+n]
elsif direction == :left then
@line.text[@line.position-offset-n..@line.position-offset]
end
end
def set_default_keys
bind(:enter) { newline }
bind(:tab) { complete }
bind(:backspace) { delete_left_character }
bind(:ctrl_c) { raise Interrupt }
bind(:ctrl_k) { clear_line }
bind(:ctrl_u) { undo }
bind(:ctrl_r) { self.redo }
bind(:left_arrow) { move_left }
bind(:right_arrow) { move_right }
bind(:up_arrow) { history_back }
bind(:down_arrow) { history_forward }
bind(:delete) { delete_character }
bind(:insert) { toggle_mode }
end
def matching_text
return nil unless @line
return nil if @line.text == ""
if @history.searching?
@matching_text
else
@matching_text = @line[0...@line.position]
end
end
end
if RawLine.ansi? then
class Editor
if RUBY_PLATFORM.match(/mswin/) && RawLine.win32console? then
def escape(string)
string.each_byte { |c| @win32_io.putc c }
end
else
def escape(string)
# @output.print string
end
end
def terminal_width
terminal_size[0]
end
def terminal_height
terminal_size[1]
end
def cursor_position
terminal.cursor_position
end
end
end
end
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
if defined?(RUBY_ENGINE) && RUBY_ENGINE =~ /jruby/
File.open('Makefile', 'w'){|f| f.puts "all:\n\ninstall:\n" }
else
require 'mkmf'
$CFLAGS = "-g -O2 -Wall -Werror"
have_func("strlcpy", "string.h")
create_makefile 'thrift_native'
end
Thrift-1673: Ruby compile flags for extension for multi arch builds (os x)
Client: ruby
Patch: Jake Farrell
Updating extension build to use ruby cflags which include the arch flags needed.
git-svn-id: e03702e31dc1037215618ef8d1dc0ebc5b0b17e5@1371273 13f79535-47bb-0310-9956-ffa450edef68
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
if defined?(RUBY_ENGINE) && RUBY_ENGINE =~ /jruby/
File.open('Makefile', 'w'){|f| f.puts "all:\n\ninstall:\n" }
else
require 'mkmf'
$CFLAGS = "-g -O2 -Wall -Werror " + Config::CONFIG["CFLAGS"]
have_func("strlcpy", "string.h")
create_makefile 'thrift_native'
end
|
module RCPU
class Block
def initialize(&blk)
@ins = []
@data = {}
@next_newlabel = 0
instance_eval(&blk)
end
def data(name, data)
label(name)
if data.respond_to?(:to_ary)
@ins << ByteData.new(data)
elsif data.respond_to?(:to_int)
@ins << ZeroData.new(data)
elsif data.respond_to?(:to_str)
@ins << StringData.new(data)
else
raise AssemblerError, "uknown data type"
end
end
Register::ALL.each do |name|
r = Register.new(name)
define_method(name.to_s.downcase) { r }
end
BasicInstruction::ALL.each do |name|
define_method(name) do |a, b|
@ins << BasicInstruction.new(name.to_sym, normalize(a), normalize(b))
end
end
NonBasicInstruction::ALL.each do |name|
define_method(name) do |a|
@ins << NonBasicInstruction.new(name.to_sym, normalize(a))
end
end
def label(name)
@ins << name
end
def newlabel
:"newlabel_#{@next_newlabel}".tap do |sym|
label sym
@next_newlabel += 1
end
end
def normalize(value)
case value
when Register
value
when PlusRegister
value.value = normalize(value.value) unless value.value.is_a?(Fixnum)
value
when Fixnum
Literal.new(value)
when Array
Indirection.new(normalize(value[0]))
when Symbol
if value.to_s[0] == ?_
External.new(value.to_s[1..-1].to_sym)
else
Label.new(value)
end
else
raise "Missing: #{value.inspect}"
end
end
def to_machine(mem = [])
labels = {}
@ins.each do |i|
case i
when Symbol
labels[i] = mem.size
else
i.to_machine(mem)
end
end
[mem, labels]
end
end
class Library
attr_reader :blocks, :extensions, :libraries
attr_accessor :scope
def initialize
@blocks = {}
@extensions = []
@libraries = []
end
def all_blocks
end
def block(name, &blk)
@blocks[name] = Block.new(&blk)
end
def extension(location, klass, *args, &blk)
@extensions << [location, klass, args, blk]
end
def library(name)
@libraries << name
end
end
def self.define(name, &blk)
l = Library.new
l.instance_eval(&blk)
Linker.default_libraries[name] = l
end
class Linker
attr_reader :extensions
def self.default_libraries
@dl ||= {}
end
def initialize
@memory = []
@blocks = {}
@seen = {}
@seen_libs = {}
@extensions = []
@libraries = {}
end
def gather(library)
return if @seen_libs[library]
@seen_libs[library] = true
@blocks.update(library.blocks)
@extensions.concat(library.extensions)
library.libraries.each do |l|
gather(find(l, library.scope))
end
end
def find(name, scope)
case name
when Symbol
self.class.default_libraries[name] or
raise AssemblerError, "no lib: #{name}"
when String
full = File.expand_path(name, scope)
@libraries[full] || load_file(full)
end
end
def load_file(file)
l = Library.new
l.scope = File.dirname(file)
l.instance_eval(File.read(file), file)
@libraries[file] = l
end
def compile(library, name = :main)
gather(library)
block = @blocks[name] or raise AssemblerError, "no block: #{name}"
compile_block(name, block)
end
def compile_block(name, block)
@seen[name] = @memory.size
pending = []
m, labels = block.to_machine
start = @memory.size
m.each do |word|
case word
when External
name = word.name
pending << name unless @seen[name]
@memory << name
when Label
location = labels[word.name] or raise AssemblerError, "no label: #{word.name}"
@memory << location + start
else
@memory << word
end
end
pending.each do |name|
block = @blocks[name]
raise AssemblerError, "no external label: #{name}" if block.nil?
compile_block(name, block)
end
end
def finalize
@memory.map do |word|
case word
when Symbol
@seen[word]
when Fixnum
word
else
raise AssemblerError, "unknown word: #{word}"
end
end
end
def dump
finalize.each_slice(8).each_with_index do |r, i|
print "#{(i*8).to_s(16).rjust(4, '0')}: "
puts r.map { |x| x.to_s(16).rjust(4, '0') }.join(" ")
end
end
end
end
Support data without labels
module RCPU
class Block
def initialize(&blk)
@ins = []
@data = {}
@next_newlabel = 0
instance_eval(&blk)
end
def data(*args)
data = args.pop
if name = args.shift
label(name)
end
if data.respond_to?(:to_ary)
@ins << ByteData.new(data)
elsif data.respond_to?(:to_int)
@ins << ZeroData.new(data)
elsif data.respond_to?(:to_str)
@ins << StringData.new(data)
else
raise AssemblerError, "uknown data type"
end
end
Register::ALL.each do |name|
r = Register.new(name)
define_method(name.to_s.downcase) { r }
end
BasicInstruction::ALL.each do |name|
define_method(name) do |a, b|
@ins << BasicInstruction.new(name.to_sym, normalize(a), normalize(b))
end
end
NonBasicInstruction::ALL.each do |name|
define_method(name) do |a|
@ins << NonBasicInstruction.new(name.to_sym, normalize(a))
end
end
def label(name)
@ins << name
end
def newlabel
:"newlabel_#{@next_newlabel}".tap do |sym|
label sym
@next_newlabel += 1
end
end
def normalize(value)
case value
when Register
value
when PlusRegister
value.value = normalize(value.value) unless value.value.is_a?(Fixnum)
value
when Fixnum
Literal.new(value)
when Array
Indirection.new(normalize(value[0]))
when Symbol
if value.to_s[0] == ?_
External.new(value.to_s[1..-1].to_sym)
else
Label.new(value)
end
else
raise "Missing: #{value.inspect}"
end
end
def to_machine(mem = [])
labels = {}
@ins.each do |i|
case i
when Symbol
labels[i] = mem.size
else
i.to_machine(mem)
end
end
[mem, labels]
end
end
class Library
attr_reader :blocks, :extensions, :libraries
attr_accessor :scope
def initialize
@blocks = {}
@extensions = []
@libraries = []
end
def all_blocks
end
def block(name, &blk)
@blocks[name] = Block.new(&blk)
end
def extension(location, klass, *args, &blk)
@extensions << [location, klass, args, blk]
end
def library(name)
@libraries << name
end
end
def self.define(name, &blk)
l = Library.new
l.instance_eval(&blk)
Linker.default_libraries[name] = l
end
class Linker
attr_reader :extensions
def self.default_libraries
@dl ||= {}
end
def initialize
@memory = []
@blocks = {}
@seen = {}
@seen_libs = {}
@extensions = []
@libraries = {}
end
def gather(library)
return if @seen_libs[library]
@seen_libs[library] = true
@blocks.update(library.blocks)
@extensions.concat(library.extensions)
library.libraries.each do |l|
gather(find(l, library.scope))
end
end
def find(name, scope)
case name
when Symbol
self.class.default_libraries[name] or
raise AssemblerError, "no lib: #{name}"
when String
full = File.expand_path(name, scope)
@libraries[full] || load_file(full)
end
end
def load_file(file)
l = Library.new
l.scope = File.dirname(file)
l.instance_eval(File.read(file), file)
@libraries[file] = l
end
def compile(library, name = :main)
gather(library)
block = @blocks[name] or raise AssemblerError, "no block: #{name}"
compile_block(name, block)
end
def compile_block(name, block)
@seen[name] = @memory.size
pending = []
m, labels = block.to_machine
start = @memory.size
m.each do |word|
case word
when External
name = word.name
pending << name unless @seen[name]
@memory << name
when Label
location = labels[word.name] or raise AssemblerError, "no label: #{word.name}"
@memory << location + start
else
@memory << word
end
end
pending.each do |name|
block = @blocks[name]
raise AssemblerError, "no external label: #{name}" if block.nil?
compile_block(name, block)
end
end
def finalize
@memory.map do |word|
case word
when Symbol
@seen[word]
when Fixnum
word
else
raise AssemblerError, "unknown word: #{word}"
end
end
end
def dump
finalize.each_slice(8).each_with_index do |r, i|
print "#{(i*8).to_s(16).rjust(4, '0')}: "
puts r.map { |x| x.to_s(16).rjust(4, '0') }.join(" ")
end
end
end
end
|
module RDF
class PTR < Vocabulary("http://www.w3.org/2009/pointers#"); end
class RDFA < Vocabulary("http://www.w3.org/ns/rdfa#"); end
class XHV < Vocabulary("http://www.w3.org/1999/xhtml/vocab#"); end
class XML < Vocabulary("http://www.w3.org/XML/1998/namespace"); end
class XSI < Vocabulary("http://www.w3.org/2001/XMLSchema-instance"); end
end
Remove vocab definition for XHV, which is defined in RDF.rb.
module RDF
class PTR < Vocabulary("http://www.w3.org/2009/pointers#"); end
class RDFA < Vocabulary("http://www.w3.org/ns/rdfa#"); end
class XML < Vocabulary("http://www.w3.org/XML/1998/namespace"); end
class XSI < Vocabulary("http://www.w3.org/2001/XMLSchema-instance"); end
end
|
require 'faraday'
require 'faraday_middleware'
require 'redmine/client/version'
require 'redmine/client/authorization_token'
module Redmine
class Client
class << self
attr_accessor :base_url
end
attr_reader :base_url, :access_key
def initialize(access_key, base_url=nil)
@access_key = access_key
@base_url = base_url || self.class.base_url
unless @base_url
raise ArgumentError, "You must provide an api base url, either Redmine::Client.new(token, base_url) or Redmine::Client.base_url = base_url"
end
end
def faraday
@faraday ||= Faraday.new(:url => base_url) do |f|
f.request :json
f.request :authorization_token, access_key
f.adapter Faraday::default_adapter
f.response :json, :content_type => /\bjson$/
end
end
def self.crud(plural, singular)
class_eval <<-EOF
def create_#{singular}(params, full_response=false)
resp = faraday.post("/#{plural}.json", {"#{singular}" => params})
full_response ? resp : resp.body
end
def find_#{singular}(id, full_response=false)
resp = faraday.get("/#{plural}/\#{id}.json")
full_response ? resp : resp.body
end
def update_#{singular}(id, params, full_response=false)
resp = faraday.put("/#{plural}/\#{id}.json", {"#{singular}" => params})
full_response ? resp : resp.body
end
def delete_#{singular}(id, full_response=false)
resp = faraday.delete("/#{plural}/\#{id}.json")
full_response ? resp : resp.body
end
EOF
end
crud :users, :user
crud :projects, :project
crud :issues, :issue
def add_member_to_project(user_id, project_id, role_ids=[3])
faraday.post("/projects/#{project_id}/memberships.json", {
"membership" => {
"user_id" => user_id,
"role_ids" => Array(role_ids),
}})
end
end
end
Check for errors in the redmine response
require 'faraday'
require 'faraday_middleware'
require 'redmine/client/version'
require 'redmine/client/authorization_token'
module Redmine
class Client
class ResponseError < StandardError
attr_reader :response
def initialize(response)
@response = response
end
def body
response.body
end
def message
"Error #{response.status}: #{response.body}"
end
end
class << self
attr_accessor :base_url
attr_writer :raise_on_error
def raise_on_error?
return @raise_on_error if defined?(@raise_on_error)
@raise_on_error = true
end
end
attr_reader :base_url, :access_key
def initialize(access_key, base_url=nil)
@access_key = access_key
@base_url = base_url || self.class.base_url
unless @base_url
raise ArgumentError, "You must provide an api base url, either Redmine::Client.new(token, base_url) or Redmine::Client.base_url = base_url"
end
end
def faraday
@faraday ||= Faraday.new(:url => base_url) do |f|
f.request :json
f.request :authorization_token, access_key
f.adapter Faraday::default_adapter
f.response :json, :content_type => /\bjson$/
end
end
def self.crud(plural, singular)
class_eval <<-EOF
def create_#{singular}(params, full_response=false)
resp = faraday.post("/#{plural}.json", {"#{singular}" => params})
check_errors(resp)
full_response ? resp : resp.body
end
def find_#{singular}(id, full_response=false)
resp = faraday.get("/#{plural}/\#{id}.json")
check_errors(resp)
full_response ? resp : resp.body
end
def update_#{singular}(id, params, full_response=false)
resp = faraday.put("/#{plural}/\#{id}.json", {"#{singular}" => params})
check_errors(resp)
full_response ? resp : resp.body
end
def delete_#{singular}(id, full_response=false, raise_on_error=true)
resp = faraday.delete("/#{plural}/\#{id}.json")
check_errors(resp)
full_response ? resp : resp.body
end
EOF
end
crud :users, :user
crud :projects, :project
crud :issues, :issue
def add_member_to_project(user_id, project_id, role_ids=[3])
faraday.post("/projects/#{project_id}/memberships.json", {
"membership" => {
"user_id" => user_id,
"role_ids" => Array(role_ids),
}})
end
def check_errors(response)
return if response.success?
$stderr.puts "REDMINE ERROR (#{response.status}): #{response.body}"
if self.class.raise_on_error?
raise ResponseError.new(response)
end
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.