CombinedText stringlengths 4 3.42M |
|---|
module Appilf
class Listing
end
end
first version for Listing model
module Appilf
class Listing < AppilfObject
extend APIActions
extend PaginatedResource
LISTING_PATH = "/listings"
def self.list
response = api_get(LISTING_PATH)
set_page(response)
end
def self.retrieve(listing_id)
response = api_get("#{LISTING_PATH}/#{listing_id}")
new(response)
end
def initialize(listing_hash)
add_relationships(listing_hash['data'].delete('relationships'))
super
end
def add_relationships(relationships_hash)
self.class.instance_eval do
relationships_hash.each_pair do |k, v|
define_method(k) { Util.translate_from_response(v) }
end
end
end
end
end |
require 'archetype/interface'
module Archetype
module Base
extend ActiveSupport::Concern
included do
extend BaseHelpers
include Archetype::Interface
Archetype.register(self)
delegate :archetype_name, to: :class
end
module BaseHelpers
def archetype_name
controller_name
end
def archetype_controller
Archetype.controllers[archetype_name.to_sym]
end
end
end
end
Handle view paths for base
require 'archetype/interface'
module Archetype
module Base
extend ActiveSupport::Concern
included do
extend BaseHelpers
include Archetype::Interface
Archetype.register(self)
delegate :archetype_name, to: :class
end
module BaseHelpers
def archetype_name
controller_name
end
def archetype_controller
Archetype.controllers[archetype_name.to_sym]
end
def local_prefixes
super.push('archetype/base')
end
# TODO: untested, probably works..
def inherited?(base)
super
Archetype.register(base)
end
end
end
end
|
module ArtDecomp class FSM
def self.from_kiss kiss
kiss = File.read kiss unless kiss.index "\n"
inputs, outputs, state, next_state = [], [], [], []
kiss.lines do |line|
case line
when /^\s*[01-]+\s+\S+\s+\S+\s+[01-]+\s*$/ then ins, st, nxt, outs = *line.split
when /^\s*[01-]+\s+[01-]+\s*$/ then st, nxt, ins, outs = DontCare, DontCare, *line.split
else next
end
inputs << ins.split(//).map(&:to_sym)
outputs << outs.split(//).map(&:to_sym)
state << (st == '*' ? DontCare : st.to_sym)
next_state << (nxt == '*' ? DontCare : nxt.to_sym)
end
# FIXME: the below hack makes state have all possible values by expanding a don’t-care a bit (if present)
(next_state - state - [DontCare]).each do |missing_state|
i = state.index DontCare
state << missing_state
next_state << next_state[i]
inputs << inputs[i]
outputs << outputs[i]
end if state.index DontCare
new inputs.transpose, outputs.transpose, state, next_state
end
def initialize inputs, outputs, state, next_state
@inputs, @outputs, @state, @next_state = inputs.freeze, outputs.freeze, state.freeze, next_state.freeze
end
def == other
[@inputs, @outputs, @state, @next_state] == [other.inputs, other.outputs, other.state, other.next_state]
end
def beta_f
@outputs.map { |o| Blanket.from_array o }.inject(:*) * Blanket.from_array(@next_state)
end
def beta_q
Blanket.from_array @state
end
def beta_x ins
return Blanket[B[*0...@state.size]] if ins.empty?
ins.map { |i| Blanket.from_array @inputs[i] }.inject :*
end
def beta_y ins
return Blanket[B[*0...@state.size]] if ins.empty?
ins.map { |i| Blanket.from_array @outputs[i] }.inject :*
end
alias eql? ==
def expand_x ins
return self unless ins.any? { |i| @inputs[i].include? DontCare }
FSM.from_kiss to_kiss.lines.map { |line| line.dc_expand(ins) }.flatten.sort.join
end
def fsm_cells archs
return 0 if @outputs.map { |output| Blanket.from_array output }.inject(:*).size < 2
Arch[input_count + beta_q.pins, output_count + beta_q.pins].cells archs
end
def hash
@inputs.hash ^ @outputs.hash ^ @state.hash ^ @next_state.hash
end
def implementable_in? archs
return true if @outputs.map { |output| Blanket.from_array output }.inject(:*).size < 2
input_count + beta_q.pins <= archs.map(&:pins).max
end
def input_count
@inputs.size
end
def input_relevance
seps = beta_f.seps
perpin = (beta_q.seps & seps).size.to_f / beta_q.pins
more, less = (0...input_count).map { |i| [(beta_x(Set[i]).seps & seps).size, i] }.sort.reverse.reject { |rel, i| rel.zero? }.partition { |rel, i| rel > perpin }
more.map(&:last) + [nil] * beta_q.pins + less.map(&:last)
end
def q_encoding rows
# FIXME: consider tr DontCare, '*'
encoding @state, rows
end
def output_count
@outputs.size
end
def state_rows_of_next_state_of rows
state = @next_state[rows.bits.first]
B[*(0...@state.size).select { |i| @state[i] == state or @state[i] == DontCare }]
end
def stats
"#{@inputs.size}/#{@outputs.size}+#{(@state.uniq - [DontCare]).size}s"
end
def to_kiss
st = @state.map { |e| e == DontCare ? '*' : e }
nxt = @next_state.map { |e| e == DontCare ? '*' : e }
div = Array.new @state.size, ' '
mid = truth_table? ? [div] : [div, st, div, nxt, div]
cols = @inputs + mid + @outputs
KISS.new(cols.transpose.map(&:join)).formatted
end
def truth_table?
@state.all? { |s| s == DontCare } and @next_state.all? { |ns| ns == DontCare }
end
def x_encoding ins, rows
ins.map { |i| encoding @inputs[i], rows }.join
end
def y_encoding rows
@outputs.map { |output| encoding output, rows }.join
end
protected
attr_reader :inputs, :outputs, :state, :next_state
private
def encoding column, rows
encs = rows.bits.map { |row| column[row] }.uniq - [DontCare]
case encs.size
when 0 then DontCare.to_s
when 1 then encs.first.to_s
else raise AmbiguousEncodingQuery, "ambiguous encoding query: block #{rows.bits.join ','}"
end
end
end end
factor out FSM#beta
module ArtDecomp class FSM
def self.from_kiss kiss
kiss = File.read kiss unless kiss.index "\n"
inputs, outputs, state, next_state = [], [], [], []
kiss.lines do |line|
case line
when /^\s*[01-]+\s+\S+\s+\S+\s+[01-]+\s*$/ then ins, st, nxt, outs = *line.split
when /^\s*[01-]+\s+[01-]+\s*$/ then st, nxt, ins, outs = DontCare, DontCare, *line.split
else next
end
inputs << ins.split(//).map(&:to_sym)
outputs << outs.split(//).map(&:to_sym)
state << (st == '*' ? DontCare : st.to_sym)
next_state << (nxt == '*' ? DontCare : nxt.to_sym)
end
# FIXME: the below hack makes state have all possible values by expanding a don’t-care a bit (if present)
(next_state - state - [DontCare]).each do |missing_state|
i = state.index DontCare
state << missing_state
next_state << next_state[i]
inputs << inputs[i]
outputs << outputs[i]
end if state.index DontCare
new inputs.transpose, outputs.transpose, state, next_state
end
def initialize inputs, outputs, state, next_state
@inputs, @outputs, @state, @next_state = inputs.freeze, outputs.freeze, state.freeze, next_state.freeze
end
def == other
[@inputs, @outputs, @state, @next_state] == [other.inputs, other.outputs, other.state, other.next_state]
end
def beta_f
@outputs.map { |o| Blanket.from_array o }.inject(:*) * Blanket.from_array(@next_state)
end
def beta_q
Blanket.from_array @state
end
def beta_x ins
beta @inputs, ins
end
def beta_y ins
beta @outputs, ins
end
alias eql? ==
def expand_x ins
return self unless ins.any? { |i| @inputs[i].include? DontCare }
FSM.from_kiss to_kiss.lines.map { |line| line.dc_expand(ins) }.flatten.sort.join
end
def fsm_cells archs
return 0 if @outputs.map { |output| Blanket.from_array output }.inject(:*).size < 2
Arch[input_count + beta_q.pins, output_count + beta_q.pins].cells archs
end
def hash
@inputs.hash ^ @outputs.hash ^ @state.hash ^ @next_state.hash
end
def implementable_in? archs
return true if @outputs.map { |output| Blanket.from_array output }.inject(:*).size < 2
input_count + beta_q.pins <= archs.map(&:pins).max
end
def input_count
@inputs.size
end
def input_relevance
seps = beta_f.seps
perpin = (beta_q.seps & seps).size.to_f / beta_q.pins
more, less = (0...input_count).map { |i| [(beta_x(Set[i]).seps & seps).size, i] }.sort.reverse.reject { |rel, i| rel.zero? }.partition { |rel, i| rel > perpin }
more.map(&:last) + [nil] * beta_q.pins + less.map(&:last)
end
def q_encoding rows
# FIXME: consider tr DontCare, '*'
encoding @state, rows
end
def output_count
@outputs.size
end
def state_rows_of_next_state_of rows
state = @next_state[rows.bits.first]
B[*(0...@state.size).select { |i| @state[i] == state or @state[i] == DontCare }]
end
def stats
"#{@inputs.size}/#{@outputs.size}+#{(@state.uniq - [DontCare]).size}s"
end
def to_kiss
st = @state.map { |e| e == DontCare ? '*' : e }
nxt = @next_state.map { |e| e == DontCare ? '*' : e }
div = Array.new @state.size, ' '
mid = truth_table? ? [div] : [div, st, div, nxt, div]
cols = @inputs + mid + @outputs
KISS.new(cols.transpose.map(&:join)).formatted
end
def truth_table?
@state.all? { |s| s == DontCare } and @next_state.all? { |ns| ns == DontCare }
end
def x_encoding ins, rows
ins.map { |i| encoding @inputs[i], rows }.join
end
def y_encoding rows
@outputs.map { |output| encoding output, rows }.join
end
protected
attr_reader :inputs, :outputs, :state, :next_state
private
def beta column, ins
return Blanket[B[*0...@state.size]] if ins.empty?
ins.map { |i| Blanket.from_array column[i] }.inject :*
end
def encoding column, rows
encs = rows.bits.map { |row| column[row] }.uniq - [DontCare]
case encs.size
when 0 then DontCare.to_s
when 1 then encs.first.to_s
else raise AmbiguousEncodingQuery, "ambiguous encoding query: block #{rows.bits.join ','}"
end
end
end end
|
require 'active_record'
# a stub
# possible TODO: remove and and refactor into an acts_as_feature mixin
module Arturo
class Feature < ::ActiveRecord::Base
include Arturo::SpecialHandling
Arturo::Feature::SYMBOL_REGEX = /^[a-zA-z][a-zA-Z0-9_]*$/
DEFAULT_ATTRIBUTES = { :deployment_percentage => 0 }.with_indifferent_access
attr_accessible :symbol, :deployment_percentage if ActiveRecord::VERSION::MAJOR < 4
attr_readonly :symbol
validates_presence_of :symbol, :deployment_percentage
validates_uniqueness_of :symbol, :allow_blank => true
validates_numericality_of :deployment_percentage,
:only_integer => true,
:allow_blank => true,
:greater_than_or_equal_to => 0,
:less_than_or_equal_to => 100
# Looks up a feature by symbol. Also accepts a Feature as input.
# @param [Symbol, Arturo::Feature] feature_or_name a Feature or the Symbol of a Feature
# @return [Arturo::Feature, nil] the Feature if found, else Arturo::NoSuchFeature
def self.to_feature(feature_or_symbol)
return feature_or_symbol if feature_or_symbol.kind_of?(self)
symbol = feature_or_symbol.to_sym
self.where(:symbol => symbol).first || Arturo::NoSuchFeature.new(symbol)
end
# Looks up a feature by symbol. Also accepts a Feature as input.
# @param [Symbol, Arturo::Feature] feature_or_name a Feature or the Symbol of a Feature
# @return [Arturo::Feature, nil] the Feature if found, else nil
def self.find_feature(feature_or_symbol)
feature = to_feature(feature_or_symbol)
feature.is_a?(Arturo::NoSuchFeature) ? nil : feature
end
# Create a new Feature
def initialize(*args, &block)
args[0] = DEFAULT_ATTRIBUTES.merge(args[0] || {})
super(*args, &block)
end
# @param [Object] feature_recipient a User, Account,
# or other model with an #id method
# @return [true,false] whether or not this feature is enabled
# for feature_recipient
# @see Arturo::SpecialHandling#whitelisted?
# @see Arturo::SpecialHandling#blacklisted?
def enabled_for?(feature_recipient)
return false if feature_recipient.nil?
return false if blacklisted?(feature_recipient)
return true if whitelisted?(feature_recipient)
passes_threshold?(feature_recipient)
end
def name
return I18n.translate("arturo.feature.nameless") if symbol.blank?
I18n.translate("arturo.feature.#{symbol}", :default => symbol.to_s.titleize)
end
def to_s
"Feature #{name}"
end
def to_param
persisted? ? "#{id}-#{symbol.to_s.parameterize}" : nil
end
def inspect
"<Arturo::Feature #{name}, deployed to #{deployment_percentage}%>"
end
def self.last_updated_at
maximum(:updated_at)
end
protected
def passes_threshold?(feature_recipient)
threshold = self.deployment_percentage || 0
return true if threshold == 100
return false if threshold == 0 || !feature_recipient.id
(((feature_recipient.id + (self.id || 1) + 17) * 13) % 100) < threshold
end
end
end
look up features using to_sym.to_s rather than just to_s
require 'active_record'
# a stub
# possible TODO: remove and and refactor into an acts_as_feature mixin
module Arturo
class Feature < ::ActiveRecord::Base
include Arturo::SpecialHandling
Arturo::Feature::SYMBOL_REGEX = /^[a-zA-z][a-zA-Z0-9_]*$/
DEFAULT_ATTRIBUTES = { :deployment_percentage => 0 }.with_indifferent_access
attr_accessible :symbol, :deployment_percentage if ActiveRecord::VERSION::MAJOR < 4
attr_readonly :symbol
validates_presence_of :symbol, :deployment_percentage
validates_uniqueness_of :symbol, :allow_blank => true
validates_numericality_of :deployment_percentage,
:only_integer => true,
:allow_blank => true,
:greater_than_or_equal_to => 0,
:less_than_or_equal_to => 100
# Looks up a feature by symbol. Also accepts a Feature as input.
# @param [Symbol, Arturo::Feature] feature_or_name a Feature or the Symbol of a Feature
# @return [Arturo::Feature, nil] the Feature if found, else Arturo::NoSuchFeature
def self.to_feature(feature_or_symbol)
return feature_or_symbol if feature_or_symbol.kind_of?(self)
symbol = feature_or_symbol.to_sym.to_s
self.where(:symbol => symbol).first || Arturo::NoSuchFeature.new(symbol)
end
# Looks up a feature by symbol. Also accepts a Feature as input.
# @param [Symbol, Arturo::Feature] feature_or_name a Feature or the Symbol of a Feature
# @return [Arturo::Feature, nil] the Feature if found, else nil
def self.find_feature(feature_or_symbol)
feature = to_feature(feature_or_symbol)
feature.is_a?(Arturo::NoSuchFeature) ? nil : feature
end
# Create a new Feature
def initialize(*args, &block)
args[0] = DEFAULT_ATTRIBUTES.merge(args[0] || {})
super(*args, &block)
end
# @param [Object] feature_recipient a User, Account,
# or other model with an #id method
# @return [true,false] whether or not this feature is enabled
# for feature_recipient
# @see Arturo::SpecialHandling#whitelisted?
# @see Arturo::SpecialHandling#blacklisted?
def enabled_for?(feature_recipient)
return false if feature_recipient.nil?
return false if blacklisted?(feature_recipient)
return true if whitelisted?(feature_recipient)
passes_threshold?(feature_recipient)
end
def name
return I18n.translate("arturo.feature.nameless") if symbol.blank?
I18n.translate("arturo.feature.#{symbol}", :default => symbol.to_s.titleize)
end
def to_s
"Feature #{name}"
end
def to_param
persisted? ? "#{id}-#{symbol.to_s.parameterize}" : nil
end
def inspect
"<Arturo::Feature #{name}, deployed to #{deployment_percentage}%>"
end
def self.last_updated_at
maximum(:updated_at)
end
protected
def passes_threshold?(feature_recipient)
threshold = self.deployment_percentage || 0
return true if threshold == 100
return false if threshold == 0 || !feature_recipient.id
(((feature_recipient.id + (self.id || 1) + 17) * 13) % 100) < threshold
end
end
end
|
require 'rspec/expectations'
require 'aruba/runtime'
require 'aruba/errors'
require 'aruba/setup'
# Aruba
module Aruba
# Api
module Api
# Core methods of aruba
#
# Those methods do not depend on any other API method of aruba
module Core
include ::RSpec::Matchers
# Aruba Runtime
def aruba
# TODO: Check this variable being accessed inconsistently. Should only be using the memo!
# Renaming this to `aruba` causes 100's of rspec failures. Needs a deeper dive, approach with caution!
@_aruba_runtime ||= Runtime.new
end
# Clean the working directory of aruba
#
# This will only clean up aruba's working directory to remove all
# artifacts of your tests. This does NOT clean up the current working
# directory.
def setup_aruba(clobber = true)
Aruba::Setup.new(aruba).call(clobber)
self
end
# Execute block in Aruba's current directory
#
# @yield
# The block which should be run in current directory
def in_current_directory(&block)
create_directory '.' unless directory?('.')
cd('.', &block)
end
# Switch to directory
#
# @param [String] dir
# The directory
#
# @example Normal directory
# cd 'dir'
#
# @example Move up
# cd '..'
#
# @example Run code in directory
# result = cd('some-dir') { Dir.getwd }
#
# rubocop:disable Metrics/MethodLength
def cd(dir, &block)
if block_given?
begin
unless Aruba.platform.directory?(expand_path(dir))
raise ArgumentError, "#{expand_path(dir)} is not a directory or does not exist."
end
old_directory = expand_path('.')
aruba.current_directory << dir
new_directory = expand_path('.')
aruba.event_bus.notify Events::ChangedWorkingDirectory.new(old: old_directory, new: new_directory)
old_dir = Aruba.platform.getwd
real_new_directory = File.expand_path(aruba.current_directory, aruba.root_directory)
Aruba.platform.chdir real_new_directory
result = with_environment(
'OLDPWD' => old_dir,
'PWD' => real_new_directory,
&block
)
ensure
aruba.current_directory.pop
Aruba.platform.chdir old_dir
end
return result
end
raise ArgumentError, "#{expand_path(dir)} is not a directory or does not exist." unless Aruba.platform.directory?(expand_path(dir))
old_directory = expand_path('.')
aruba.current_directory << dir
new_directory = expand_path('.')
aruba.event_bus.notify Events::ChangedWorkingDirectory.new(old: old_directory, new: new_directory)
self
end
# rubocop:enable Metrics/MethodLength
# Expand file name
#
# @param [String] file_name
# Name of file
#
# @param [String] dir_string
# Name of directory to use as starting point, otherwise current directory is used.
#
# @return [String]
# The full path
#
# @example Single file name
#
# # => <path>/tmp/aruba/file
# expand_path('file')
#
# @example Single Dot
#
# # => <path>/tmp/aruba
# expand_path('.')
#
# @example using home directory
#
# # => <path>/home/<name>/file
# expand_path('~/file')
#
# @example using fixtures directory
#
# # => <path>/test/fixtures/file
# expand_path('%/file')
#
# @example Absolute directory
#
# # => /foo/bar
# expand_path('/foo/bar')
#
# rubocop:disable Metrics/MethodLength
# rubocop:disable Metrics/CyclomaticComplexity
# rubocop:disable Metrics/PerceivedComplexity
def expand_path(file_name, dir_string = nil)
message = "Filename #{file_name} needs to be a string. It cannot be nil or empty either. "\
"Please use `expand_path('.')` if you want the current directory to be expanded."
fail ArgumentError, message unless file_name.is_a?(String) && !file_name.empty?
# rubocop:disable Layout/LineLength
fail %(Aruba's working directory does not exist. Maybe you forgot to run `setup_aruba` before using its API.) unless Aruba.platform.directory? File.join(aruba.config.root_directory, aruba.config.working_directory)
# rubocop:enable Layout/LineLength
prefix = file_name[0]
rest = file_name[2..-1]
if aruba.config.fixtures_path_prefix == prefix
path = File.join(*[aruba.fixtures_directory, rest].compact)
unless Aruba.platform.exist? path
raise ArgumentError, "Fixture #{rest} does not exist in fixtures directory #{aruba.fixtures_directory}. "\
"This was the one we found first on your system from all possible candidates: #{aruba_fixture_candidates}."
end
path
elsif prefix == '~'
path = with_environment do
File.expand_path(file_name)
end
raise ArgumentError, 'Expanding "~/" to "/" is not allowed' if path == '/'
raise ArgumentError, "Expanding '~/' to a relative path #{path} is not allowed" unless Aruba.platform.absolute_path? path
path.to_s
elsif absolute?(file_name)
unless aruba.config.allow_absolute_paths
caller_location = caller_locations(1, 1).first
caller_file_line = "#{caller_location.path}:#{caller_location.lineno}"
aruba.logger.warn "Aruba's `expand_path` method was called with an absolute path at #{caller_file_line}"\
', which is not recommended. Change the call to pass a relative path or set '\
'`config.allow_absolute_paths = true` to silence this warning'
end
file_name
else
with_environment do
directory = File.expand_path(aruba.current_directory, aruba.root_directory)
directory = File.expand_path(dir_string, directory) if dir_string
File.expand_path(file_name, directory)
end
end
end
# rubocop:enable Metrics/MethodLength
# rubocop:enable Metrics/CyclomaticComplexity
# rubocop:enable Metrics/PerceivedComplexity
# Run block with environment
#
# @param [Hash] env (optional)
# The variables to be used for block.
#
# @yield
# The block of code which should be run with the changed environment variables
def with_environment(env = {}, &block)
aruba.environment.nest do |nested_env|
nested_env.update(env)
Aruba.platform.with_environment nested_env.to_h, &block
end
end
private
def aruba_fixture_candidates
aruba.config.fixtures_directories.map { |p| format('"%s"', p) }.join(', ')
end
end
end
end
CR: Move private method to localVar
require 'rspec/expectations'
require 'aruba/runtime'
require 'aruba/errors'
require 'aruba/setup'
# Aruba
module Aruba
# Api
module Api
# Core methods of aruba
#
# Those methods do not depend on any other API method of aruba
module Core
include ::RSpec::Matchers
# Aruba Runtime
def aruba
# TODO: Check this variable being accessed inconsistently. Should only be using the memo!
# Renaming this to `aruba` causes 100's of rspec failures. Needs a deeper dive, approach with caution!
@_aruba_runtime ||= Runtime.new
end
# Clean the working directory of aruba
#
# This will only clean up aruba's working directory to remove all
# artifacts of your tests. This does NOT clean up the current working
# directory.
def setup_aruba(clobber = true)
Aruba::Setup.new(aruba).call(clobber)
self
end
# Execute block in Aruba's current directory
#
# @yield
# The block which should be run in current directory
def in_current_directory(&block)
create_directory '.' unless directory?('.')
cd('.', &block)
end
# Switch to directory
#
# @param [String] dir
# The directory
#
# @example Normal directory
# cd 'dir'
#
# @example Move up
# cd '..'
#
# @example Run code in directory
# result = cd('some-dir') { Dir.getwd }
#
# rubocop:disable Metrics/MethodLength
def cd(dir, &block)
if block_given?
begin
unless Aruba.platform.directory?(expand_path(dir))
raise ArgumentError, "#{expand_path(dir)} is not a directory or does not exist."
end
old_directory = expand_path('.')
aruba.current_directory << dir
new_directory = expand_path('.')
aruba.event_bus.notify Events::ChangedWorkingDirectory.new(old: old_directory, new: new_directory)
old_dir = Aruba.platform.getwd
real_new_directory = File.expand_path(aruba.current_directory, aruba.root_directory)
Aruba.platform.chdir real_new_directory
result = with_environment(
'OLDPWD' => old_dir,
'PWD' => real_new_directory,
&block
)
ensure
aruba.current_directory.pop
Aruba.platform.chdir old_dir
end
return result
end
raise ArgumentError, "#{expand_path(dir)} is not a directory or does not exist." unless Aruba.platform.directory?(expand_path(dir))
old_directory = expand_path('.')
aruba.current_directory << dir
new_directory = expand_path('.')
aruba.event_bus.notify Events::ChangedWorkingDirectory.new(old: old_directory, new: new_directory)
self
end
# rubocop:enable Metrics/MethodLength
# Expand file name
#
# @param [String] file_name
# Name of file
#
# @param [String] dir_string
# Name of directory to use as starting point, otherwise current directory is used.
#
# @return [String]
# The full path
#
# @example Single file name
#
# # => <path>/tmp/aruba/file
# expand_path('file')
#
# @example Single Dot
#
# # => <path>/tmp/aruba
# expand_path('.')
#
# @example using home directory
#
# # => <path>/home/<name>/file
# expand_path('~/file')
#
# @example using fixtures directory
#
# # => <path>/test/fixtures/file
# expand_path('%/file')
#
# @example Absolute directory
#
# # => /foo/bar
# expand_path('/foo/bar')
#
# rubocop:disable Metrics/MethodLength
# rubocop:disable Metrics/CyclomaticComplexity
# rubocop:disable Metrics/PerceivedComplexity
def expand_path(file_name, dir_string = nil)
message = "Filename #{file_name} needs to be a string. It cannot be nil or empty either. "\
"Please use `expand_path('.')` if you want the current directory to be expanded."
fail ArgumentError, message unless file_name.is_a?(String) && !file_name.empty?
# rubocop:disable Layout/LineLength
fail %(Aruba's working directory does not exist. Maybe you forgot to run `setup_aruba` before using its API.) unless Aruba.platform.directory? File.join(aruba.config.root_directory, aruba.config.working_directory)
# rubocop:enable Layout/LineLength
prefix = file_name[0]
rest = file_name[2..-1]
if aruba.config.fixtures_path_prefix == prefix
path = File.join(*[aruba.fixtures_directory, rest].compact)
unless Aruba.platform.exist? path
aruba_fixture_candidates = aruba.config.fixtures_directories.map { |p| format('"%s"', p) }.join(', ')
raise ArgumentError, "Fixture #{rest} does not exist in fixtures directory #{aruba.fixtures_directory}. "\
"This was the one we found first on your system from all possible candidates: #{aruba_fixture_candidates}."
end
path
elsif prefix == '~'
path = with_environment do
File.expand_path(file_name)
end
raise ArgumentError, 'Expanding "~/" to "/" is not allowed' if path == '/'
raise ArgumentError, "Expanding '~/' to a relative path #{path} is not allowed" unless Aruba.platform.absolute_path? path
path.to_s
elsif absolute?(file_name)
unless aruba.config.allow_absolute_paths
caller_location = caller_locations(1, 1).first
caller_file_line = "#{caller_location.path}:#{caller_location.lineno}"
aruba.logger.warn "Aruba's `expand_path` method was called with an absolute path at #{caller_file_line}"\
', which is not recommended. Change the call to pass a relative path or set '\
'`config.allow_absolute_paths = true` to silence this warning'
end
file_name
else
with_environment do
directory = File.expand_path(aruba.current_directory, aruba.root_directory)
directory = File.expand_path(dir_string, directory) if dir_string
File.expand_path(file_name, directory)
end
end
end
# rubocop:enable Metrics/MethodLength
# rubocop:enable Metrics/CyclomaticComplexity
# rubocop:enable Metrics/PerceivedComplexity
# Run block with environment
#
# @param [Hash] env (optional)
# The variables to be used for block.
#
# @yield
# The block of code which should be run with the changed environment variables
def with_environment(env = {}, &block)
aruba.environment.nest do |nested_env|
nested_env.update(env)
Aruba.platform.with_environment nested_env.to_h, &block
end
end
end
end
end
|
require 'assert/suite'
require 'assert/assertions'
require 'assert/result'
module Assert
class Context
include Assert::Assertions
# a Context is a scope for tests to run in. Contexts have setup and
# teardown blocks, subjects, and descriptions. Tests are run in the
# scope of a Context instance. Therefore, a Context should have
# minimal base logic/methods/instance_vars. The instance should remain
# pure to not pollute test scopes.
# if a class subclasses Context, add it to the suite
def self.inherited(klass)
Assert.suite << klass
end
# put all logic here to keep context instances pure for running tests
class << self
def setup(&block)
raise ArgumentError, "please provide a setup block" unless block_given?
@_assert_setups ||= []
@_assert_setups << block
end
alias_method :before, :setup
def teardown(&block)
raise ArgumentError, "please provide a teardown block" unless block_given?
@_assert_teardowns ||= []
@_assert_teardowns << block
end
alias_method :after, :teardown
def _assert_setups
setups = if superclass.respond_to?(:_assert_setups)
superclass._assert_setups
end
(setups || []) + (@_assert_setups || [])
end
def _assert_teardowns
teardowns = if superclass.respond_to?(:_assert_teardowns)
superclass._assert_teardowns
end
(@_assert_teardowns || []) + (teardowns || [])
end
def desc(description)
raise ArgumentError, "no context description provided" if description.nil?
@_assert_desc ||= [ description ]
end
def _assert_descs
descs = if superclass.respond_to?(:_assert_descs)
superclass._assert_descs
end
(descs || []) + (@_assert_desc || [])
end
def subject(&block)
raise ArgumentError, "please provide a subject block" unless block_given?
@_assert_subject = block
end
def _assert_subject
@_assert_subject
end
end
def initialize(running_test = nil)
@__running_test__ = running_test
end
# raise Result::Fail if the assertion is false or nil
def assert(assertion, fail_desc=nil, what_failed_msg=nil)
what_failed_msg ||= "Failed assert."
msg = fail_message(fail_desc) { what_failed_msg }
assertion ? pass : fail(msg)
end
# the opposite of assert, raise Result::Fail if the assertion is not false or nil
def assert_not(assertion, fail_desc=nil)
assert(!assertion, fail_desc, "Failed refute.")
end
alias_method :refute, :assert_not
# adds a Skip result to the end of the test's results and breaks test execution
def skip(skip_msg=nil)
raise(Result::TestSkipped, skip_msg || "")
end
# adds a Pass result to the end of the test's results
# does not break test execution
def pass(pass_msg=nil)
capture_result do |test_name, backtrace|
Assert::Result::Pass.new(test_name, pass_msg, backtrace)
end
end
# adds a Fail result to the end of the test's results
# does not break test execution
def fail(fail_msg=nil)
capture_result do |test_name, backtrace|
message = (fail_message(fail_msg) { }).call
Assert::Result::Fail.new(test_name, message, backtrace)
end
end
alias_method :flunk, :fail
# adds an Ignore result to the end of the test's results
# does not break test execution
def ignore(ignore_msg=nil)
capture_result do |test_name, backtrace|
Assert::Result::Ignore.new(test_name, ignore_msg, backtrace)
end
end
def subject
if subject_block = self.class._assert_subject
instance_eval(&subject_block)
end
end
protected
def capture_result
if block_given?
result = yield @__running_test__.name, caller
@__running_test__.results << result
result
end
end
# Returns a Proc that will output a custom message along with the default fail message.
def fail_message(fail_desc=nil, &what_failed)
fail_desc.kind_of?(::Proc) ? fail_desc : Proc.new do
[ what_failed.call, fail_desc ].compact.join("\n")
end
end
end
end
Added a basic should method
require 'assert/suite'
require 'assert/assertions'
require 'assert/result'
module Assert
class Context
include Assert::Assertions
# a Context is a scope for tests to run in. Contexts have setup and
# teardown blocks, subjects, and descriptions. Tests are run in the
# scope of a Context instance. Therefore, a Context should have
# minimal base logic/methods/instance_vars. The instance should remain
# pure to not pollute test scopes.
# if a class subclasses Context, add it to the suite
def self.inherited(klass)
Assert.suite << klass
end
# put all logic here to keep context instances pure for running tests
class << self
def setup(&block)
raise ArgumentError, "please provide a setup block" unless block_given?
@_assert_setups ||= []
@_assert_setups << block
end
alias_method :before, :setup
def teardown(&block)
raise ArgumentError, "please provide a teardown block" unless block_given?
@_assert_teardowns ||= []
@_assert_teardowns << block
end
alias_method :after, :teardown
def _assert_setups
setups = if superclass.respond_to?(:_assert_setups)
superclass._assert_setups
end
(setups || []) + (@_assert_setups || [])
end
def _assert_teardowns
teardowns = if superclass.respond_to?(:_assert_teardowns)
superclass._assert_teardowns
end
(@_assert_teardowns || []) + (teardowns || [])
end
def desc(description)
raise ArgumentError, "no context description provided" if description.nil?
@_assert_desc ||= [ description ]
end
def _assert_descs
descs = if superclass.respond_to?(:_assert_descs)
superclass._assert_descs
end
(descs || []) + (@_assert_desc || [])
end
def subject(&block)
raise ArgumentError, "please provide a subject block" unless block_given?
@_assert_subject = block
end
def _assert_subject
@_assert_subject
end
def should(desc, &block)
raise ArgumentError, "please provide a test block" unless block_given?
method_name = "test_#{desc.gsub(/\s/, '_')}"
if method_defined?(method_name)
from = caller.first
puts "WARNING: should #{desc.inspect} redefining #{method_name}"
puts " from: #{from}"
end
define_method(method_name, &block)
end
end
def initialize(running_test = nil)
@__running_test__ = running_test
end
# raise Result::Fail if the assertion is false or nil
def assert(assertion, fail_desc=nil, what_failed_msg=nil)
what_failed_msg ||= "Failed assert."
msg = fail_message(fail_desc) { what_failed_msg }
assertion ? pass : fail(msg)
end
# the opposite of assert, raise Result::Fail if the assertion is not false or nil
def assert_not(assertion, fail_desc=nil)
assert(!assertion, fail_desc, "Failed refute.")
end
alias_method :refute, :assert_not
# adds a Skip result to the end of the test's results and breaks test execution
def skip(skip_msg=nil)
raise(Result::TestSkipped, skip_msg || "")
end
# adds a Pass result to the end of the test's results
# does not break test execution
def pass(pass_msg=nil)
capture_result do |test_name, backtrace|
Assert::Result::Pass.new(test_name, pass_msg, backtrace)
end
end
# adds a Fail result to the end of the test's results
# does not break test execution
def fail(fail_msg=nil)
capture_result do |test_name, backtrace|
message = (fail_message(fail_msg) { }).call
Assert::Result::Fail.new(test_name, message, backtrace)
end
end
alias_method :flunk, :fail
# adds an Ignore result to the end of the test's results
# does not break test execution
def ignore(ignore_msg=nil)
capture_result do |test_name, backtrace|
Assert::Result::Ignore.new(test_name, ignore_msg, backtrace)
end
end
def subject
if subject_block = self.class._assert_subject
instance_eval(&subject_block)
end
end
protected
def capture_result
if block_given?
result = yield @__running_test__.name, caller
@__running_test__.results << result
result
end
end
# Returns a Proc that will output a custom message along with the default fail message.
def fail_message(fail_desc=nil, &what_failed)
fail_desc.kind_of?(::Proc) ? fail_desc : Proc.new do
[ what_failed.call, fail_desc ].compact.join("\n")
end
end
end
end
|
require 'encryptor'
# Adds attr_accessors that encrypt and decrypt an object's attributes
module AttrEncrypted
autoload :Version, 'attr_encrypted/version'
def self.extended(base) # :nodoc:
base.class_eval do
include InstanceMethods
attr_writer :attr_encrypted_options
@attr_encrypted_options, @encrypted_attributes = {}, {}
end
end
# Generates attr_accessors that encrypt and decrypt attributes transparently
#
# Options (any other options you specify are passed to the encryptor's encrypt and decrypt methods)
#
# :attribute => The name of the referenced encrypted attribute. For example
# <tt>attr_accessor :email, :attribute => :ee</tt> would generate an
# attribute named 'ee' to store the encrypted email. This is useful when defining
# one attribute to encrypt at a time or when the :prefix and :suffix options
# aren't enough. Defaults to nil.
#
# :prefix => A prefix used to generate the name of the referenced encrypted attributes.
# For example <tt>attr_accessor :email, :password, :prefix => 'crypted_'</tt> would
# generate attributes named 'crypted_email' and 'crypted_password' to store the
# encrypted email and password. Defaults to 'encrypted_'.
#
# :suffix => A suffix used to generate the name of the referenced encrypted attributes.
# For example <tt>attr_accessor :email, :password, :prefix => '', :suffix => '_encrypted'</tt>
# would generate attributes named 'email_encrypted' and 'password_encrypted' to store the
# encrypted email. Defaults to ''.
#
# :key => The encryption key. This option may not be required if you're using a custom encryptor. If you pass
# a symbol representing an instance method then the :key option will be replaced with the result of the
# method before being passed to the encryptor. Objects that respond to :call are evaluated as well (including procs).
# Any other key types will be passed directly to the encryptor.
#
# :encode => If set to true, attributes will be encoded as well as encrypted. This is useful if you're
# planning on storing the encrypted attributes in a database. The default encoding is 'm' (base64),
# however this can be overwritten by setting the :encode option to some other encoding string instead of
# just 'true'. See http://www.ruby-doc.org/core/classes/Array.html#M002245 for more encoding directives.
# Defaults to false unless you're using it with ActiveRecord, DataMapper, or Sequel.
#
# :default_encoding => Defaults to 'm' (base64).
#
# :marshal => If set to true, attributes will be marshaled as well as encrypted. This is useful if you're planning
# on encrypting something other than a string. Defaults to false unless you're using it with ActiveRecord
# or DataMapper.
#
# :marshaler => The object to use for marshaling. Defaults to Marshal.
#
# :dump_method => The dump method name to call on the <tt>:marshaler</tt> object to. Defaults to 'dump'.
#
# :load_method => The load method name to call on the <tt>:marshaler</tt> object. Defaults to 'load'.
#
# :encryptor => The object to use for encrypting. Defaults to Encryptor.
#
# :encrypt_method => The encrypt method name to call on the <tt>:encryptor</tt> object. Defaults to 'encrypt'.
#
# :decrypt_method => The decrypt method name to call on the <tt>:encryptor</tt> object. Defaults to 'decrypt'.
#
# :if => Attributes are only encrypted if this option evaluates to true. If you pass a symbol representing an instance
# method then the result of the method will be evaluated. Any objects that respond to <tt>:call</tt> are evaluated as well.
# Defaults to true.
#
# :unless => Attributes are only encrypted if this option evaluates to false. If you pass a symbol representing an instance
# method then the result of the method will be evaluated. Any objects that respond to <tt>:call</tt> are evaluated as well.
# Defaults to false.
#
# :mode => Selects encryption mode for attribute: choose <tt>:single_iv_and_salt</tt> for compatibility
# with the old attr_encrypted API: the default IV and salt of the underlying encryptor object
# is used; <tt>:per_attribute_iv_and_salt</tt> uses a per-attribute IV and salt attribute and
# is the recommended mode for new deployments.
# Defaults to <tt>:single_iv_and_salt</tt>.
#
# You can specify your own default options
#
# class User
# # now all attributes will be encoded and marshaled by default
# attr_encrypted_options.merge!(:encode => true, :marshal => true, :some_other_option => true)
# attr_encrypted :configuration, :key => 'my secret key'
# end
#
#
# Example
#
# class User
# attr_encrypted :email, :credit_card, :key => 'some secret key'
# attr_encrypted :configuration, :key => 'some other secret key', :marshal => true
# end
#
# @user = User.new
# @user.encrypted_email # nil
# @user.email? # false
# @user.email = 'test@example.com'
# @user.email? # true
# @user.encrypted_email # returns the encrypted version of 'test@example.com'
#
# @user.configuration = { :time_zone => 'UTC' }
# @user.encrypted_configuration # returns the encrypted version of configuration
#
# See README for more examples
def attr_encrypted(*attributes)
options = {
:prefix => 'encrypted_',
:suffix => '',
:if => true,
:unless => false,
:encode => false,
:default_encoding => 'm',
:marshal => false,
:marshaler => Marshal,
:dump_method => 'dump',
:load_method => 'load',
:encryptor => Encryptor,
:encrypt_method => 'encrypt',
:decrypt_method => 'decrypt',
:mode => :single_iv_and_salt
}.merge!(attr_encrypted_options).merge!(attributes.last.is_a?(Hash) ? attributes.pop : {})
options[:encode] = options[:default_encoding] if options[:encode] == true
attributes.each do |attribute|
encrypted_attribute_name = (options[:attribute] ? options[:attribute] : [options[:prefix], attribute, options[:suffix]].join).to_sym
iv_name = "#{encrypted_attribute_name}_iv".to_sym
salt_name = "#{encrypted_attribute_name}_salt".to_sym
instance_methods_as_symbols = attribute_instance_methods_as_symbols
attr_reader encrypted_attribute_name unless instance_methods_as_symbols.include?(encrypted_attribute_name)
attr_writer encrypted_attribute_name unless instance_methods_as_symbols.include?(:"#{encrypted_attribute_name}=")
if options[:mode] == :per_attribute_iv_and_salt
attr_reader iv_name unless instance_methods_as_symbols.include?(iv_name)
attr_writer iv_name unless instance_methods_as_symbols.include?(:"#{iv_name}=")
attr_reader salt_name unless instance_methods_as_symbols.include?(salt_name)
attr_writer salt_name unless instance_methods_as_symbols.include?(:"#{salt_name}=")
end
define_method(attribute) do
instance_variable_get("@#{attribute}") || instance_variable_set("@#{attribute}", decrypt(attribute, send(encrypted_attribute_name)))
end
define_method("#{attribute}=") do |value|
send("#{encrypted_attribute_name}=", encrypt(attribute, value))
instance_variable_set("@#{attribute}", value)
end
define_method("#{attribute}?") do
value = send(attribute)
value.respond_to?(:empty?) ? !value.empty? : !!value
end
encrypted_attributes[attribute.to_sym] = options.merge(:attribute => encrypted_attribute_name)
end
end
alias_method :attr_encryptor, :attr_encrypted
# Default options to use with calls to <tt>attr_encrypted</tt>
#
# It will inherit existing options from its superclass
def attr_encrypted_options
@attr_encrypted_options ||= superclass.attr_encrypted_options.dup
end
# Checks if an attribute is configured with <tt>attr_encrypted</tt>
#
# Example
#
# class User
# attr_accessor :name
# attr_encrypted :email
# end
#
# User.attr_encrypted?(:name) # false
# User.attr_encrypted?(:email) # true
def attr_encrypted?(attribute)
encrypted_attributes.has_key?(attribute.to_sym)
end
# Decrypts a value for the attribute specified
#
# Example
#
# class User
# attr_encrypted :email
# end
#
# email = User.decrypt(:email, 'SOME_ENCRYPTED_EMAIL_STRING')
def decrypt(attribute, encrypted_value, options = {})
options = encrypted_attributes[attribute.to_sym].merge(options)
if options[:if] && !options[:unless] && !encrypted_value.nil? && !(encrypted_value.is_a?(String) && encrypted_value.empty?)
encrypted_value = encrypted_value.unpack(options[:encode]).first if options[:encode]
value = options[:encryptor].send(options[:decrypt_method], options.merge!(:value => encrypted_value))
if options[:marshal]
value = options[:marshaler].send(options[:load_method], value)
elsif defined?(Encoding)
encoding = Encoding.default_internal || Encoding.default_external
value = value.force_encoding(encoding.name)
end
value
else
encrypted_value
end
end
# Encrypts a value for the attribute specified
#
# Example
#
# class User
# attr_encrypted :email
# end
#
# encrypted_email = User.encrypt(:email, 'test@example.com')
def encrypt(attribute, value, options = {})
options = encrypted_attributes[attribute.to_sym].merge(options)
if options[:if] && !options[:unless] && !value.nil? && !(value.is_a?(String) && value.empty?)
value = options[:marshal] ? options[:marshaler].send(options[:dump_method], value) : value.to_s
encrypted_value = options[:encryptor].send(options[:encrypt_method], options.merge!(:value => value))
encrypted_value = [encrypted_value].pack(options[:encode]) if options[:encode]
encrypted_value
else
value
end
end
# Contains a hash of encrypted attributes with virtual attribute names as keys
# and their corresponding options as values
#
# Example
#
# class User
# attr_encrypted :email, :key => 'my secret key'
# end
#
# User.encrypted_attributes # { :email => { :attribute => 'encrypted_email', :key => 'my secret key' } }
def encrypted_attributes
@encrypted_attributes ||= superclass.encrypted_attributes.dup
end
# Forwards calls to :encrypt_#{attribute} or :decrypt_#{attribute} to the corresponding encrypt or decrypt method
# if attribute was configured with attr_encrypted
#
# Example
#
# class User
# attr_encrypted :email, :key => 'my secret key'
# end
#
# User.encrypt_email('SOME_ENCRYPTED_EMAIL_STRING')
def method_missing(method, *arguments, &block)
if method.to_s =~ /^((en|de)crypt)_(.+)$/ && attr_encrypted?($3)
send($1, $3, *arguments)
else
super
end
end
module InstanceMethods
# Decrypts a value for the attribute specified using options evaluated in the current object's scope
#
# Example
#
# class User
# attr_accessor :secret_key
# attr_encrypted :email, :key => :secret_key
#
# def initialize(secret_key)
# self.secret_key = secret_key
# end
# end
#
# @user = User.new('some-secret-key')
# @user.decrypt(:email, 'SOME_ENCRYPTED_EMAIL_STRING')
def decrypt(attribute, encrypted_value)
self.class.decrypt(attribute, encrypted_value, evaluated_attr_encrypted_options_for(attribute))
end
# Encrypts a value for the attribute specified using options evaluated in the current object's scope
#
# Example
#
# class User
# attr_accessor :secret_key
# attr_encrypted :email, :key => :secret_key
#
# def initialize(secret_key)
# self.secret_key = secret_key
# end
# end
#
# @user = User.new('some-secret-key')
# @user.encrypt(:email, 'test@example.com')
def encrypt(attribute, value)
self.class.encrypt(attribute, value, evaluated_attr_encrypted_options_for(attribute))
end
protected
# Returns attr_encrypted options evaluated in the current object's scope for the attribute specified
def evaluated_attr_encrypted_options_for(attribute)
if evaluate_attr_encrypted_option(self.class.encrypted_attributes[attribute.to_sym][:mode]) == :per_attribute_iv_and_salt
load_iv_for_attribute(attribute, self.class.encrypted_attributes[attribute.to_sym][:algorithm])
load_salt_for_attribute(attribute)
end
self.class.encrypted_attributes[attribute.to_sym].inject({}) { |hash, (option, value)| hash[option] = evaluate_attr_encrypted_option(value); hash }
end
# Evaluates symbol (method reference) or proc (responds to call) options
#
# If the option is not a symbol or proc then the original option is returned
def evaluate_attr_encrypted_option(option)
if option.is_a?(Symbol) && respond_to?(option)
send(option)
elsif option.respond_to?(:call)
option.call(self)
else
option
end
end
def load_iv_for_attribute(attribute, algorithm)
encrypted_attribute_name = self.class.encrypted_attributes[attribute.to_sym][:attribute]
iv = send("#{encrypted_attribute_name}_iv")
if(iv == nil)
begin
algorithm = algorithm || "aes-256-cbc"
algo = OpenSSL::Cipher::Cipher.new(algorithm)
iv = [algo.random_iv].pack("m")
send("#{encrypted_attribute_name}_iv=", iv)
rescue RuntimeError
end
end
self.class.encrypted_attributes[attribute.to_sym] = self.class.encrypted_attributes[attribute.to_sym].merge(:iv => iv.unpack("m").first) if (iv && !iv.empty?)
end
def load_salt_for_attribute(attribute)
encrypted_attribute_name = self.class.encrypted_attributes[attribute.to_sym][:attribute]
salt = send("#{encrypted_attribute_name}_salt") || send("#{encrypted_attribute_name}_salt=", Digest::SHA256.hexdigest((Time.now.to_i * rand(1000)).to_s)[0..15])
self.class.encrypted_attributes[attribute.to_sym] = self.class.encrypted_attributes[attribute.to_sym].merge(:salt => salt)
end
end
protected
def attribute_instance_methods_as_symbols
instance_methods.collect { |method| method.to_sym }
end
end
Object.extend AttrEncrypted
Dir[File.join(File.dirname(__FILE__), 'attr_encrypted', 'adapters', '*.rb')].each { |adapter| require adapter }
Improve salt strength.
- A longer salt and using SecureRandom should reduce the chance of a
collision.
- Fixes #154
require 'encryptor'
# Adds attr_accessors that encrypt and decrypt an object's attributes
module AttrEncrypted
autoload :Version, 'attr_encrypted/version'
def self.extended(base) # :nodoc:
base.class_eval do
include InstanceMethods
attr_writer :attr_encrypted_options
@attr_encrypted_options, @encrypted_attributes = {}, {}
end
end
# Generates attr_accessors that encrypt and decrypt attributes transparently
#
# Options (any other options you specify are passed to the encryptor's encrypt and decrypt methods)
#
# :attribute => The name of the referenced encrypted attribute. For example
# <tt>attr_accessor :email, :attribute => :ee</tt> would generate an
# attribute named 'ee' to store the encrypted email. This is useful when defining
# one attribute to encrypt at a time or when the :prefix and :suffix options
# aren't enough. Defaults to nil.
#
# :prefix => A prefix used to generate the name of the referenced encrypted attributes.
# For example <tt>attr_accessor :email, :password, :prefix => 'crypted_'</tt> would
# generate attributes named 'crypted_email' and 'crypted_password' to store the
# encrypted email and password. Defaults to 'encrypted_'.
#
# :suffix => A suffix used to generate the name of the referenced encrypted attributes.
# For example <tt>attr_accessor :email, :password, :prefix => '', :suffix => '_encrypted'</tt>
# would generate attributes named 'email_encrypted' and 'password_encrypted' to store the
# encrypted email. Defaults to ''.
#
# :key => The encryption key. This option may not be required if you're using a custom encryptor. If you pass
# a symbol representing an instance method then the :key option will be replaced with the result of the
# method before being passed to the encryptor. Objects that respond to :call are evaluated as well (including procs).
# Any other key types will be passed directly to the encryptor.
#
# :encode => If set to true, attributes will be encoded as well as encrypted. This is useful if you're
# planning on storing the encrypted attributes in a database. The default encoding is 'm' (base64),
# however this can be overwritten by setting the :encode option to some other encoding string instead of
# just 'true'. See http://www.ruby-doc.org/core/classes/Array.html#M002245 for more encoding directives.
# Defaults to false unless you're using it with ActiveRecord, DataMapper, or Sequel.
#
# :default_encoding => Defaults to 'm' (base64).
#
# :marshal => If set to true, attributes will be marshaled as well as encrypted. This is useful if you're planning
# on encrypting something other than a string. Defaults to false unless you're using it with ActiveRecord
# or DataMapper.
#
# :marshaler => The object to use for marshaling. Defaults to Marshal.
#
# :dump_method => The dump method name to call on the <tt>:marshaler</tt> object to. Defaults to 'dump'.
#
# :load_method => The load method name to call on the <tt>:marshaler</tt> object. Defaults to 'load'.
#
# :encryptor => The object to use for encrypting. Defaults to Encryptor.
#
# :encrypt_method => The encrypt method name to call on the <tt>:encryptor</tt> object. Defaults to 'encrypt'.
#
# :decrypt_method => The decrypt method name to call on the <tt>:encryptor</tt> object. Defaults to 'decrypt'.
#
# :if => Attributes are only encrypted if this option evaluates to true. If you pass a symbol representing an instance
# method then the result of the method will be evaluated. Any objects that respond to <tt>:call</tt> are evaluated as well.
# Defaults to true.
#
# :unless => Attributes are only encrypted if this option evaluates to false. If you pass a symbol representing an instance
# method then the result of the method will be evaluated. Any objects that respond to <tt>:call</tt> are evaluated as well.
# Defaults to false.
#
# :mode => Selects encryption mode for attribute: choose <tt>:single_iv_and_salt</tt> for compatibility
# with the old attr_encrypted API: the default IV and salt of the underlying encryptor object
# is used; <tt>:per_attribute_iv_and_salt</tt> uses a per-attribute IV and salt attribute and
# is the recommended mode for new deployments.
# Defaults to <tt>:single_iv_and_salt</tt>.
#
# You can specify your own default options
#
# class User
# # now all attributes will be encoded and marshaled by default
# attr_encrypted_options.merge!(:encode => true, :marshal => true, :some_other_option => true)
# attr_encrypted :configuration, :key => 'my secret key'
# end
#
#
# Example
#
# class User
# attr_encrypted :email, :credit_card, :key => 'some secret key'
# attr_encrypted :configuration, :key => 'some other secret key', :marshal => true
# end
#
# @user = User.new
# @user.encrypted_email # nil
# @user.email? # false
# @user.email = 'test@example.com'
# @user.email? # true
# @user.encrypted_email # returns the encrypted version of 'test@example.com'
#
# @user.configuration = { :time_zone => 'UTC' }
# @user.encrypted_configuration # returns the encrypted version of configuration
#
# See README for more examples
def attr_encrypted(*attributes)
options = {
:prefix => 'encrypted_',
:suffix => '',
:if => true,
:unless => false,
:encode => false,
:default_encoding => 'm',
:marshal => false,
:marshaler => Marshal,
:dump_method => 'dump',
:load_method => 'load',
:encryptor => Encryptor,
:encrypt_method => 'encrypt',
:decrypt_method => 'decrypt',
:mode => :single_iv_and_salt
}.merge!(attr_encrypted_options).merge!(attributes.last.is_a?(Hash) ? attributes.pop : {})
options[:encode] = options[:default_encoding] if options[:encode] == true
attributes.each do |attribute|
encrypted_attribute_name = (options[:attribute] ? options[:attribute] : [options[:prefix], attribute, options[:suffix]].join).to_sym
iv_name = "#{encrypted_attribute_name}_iv".to_sym
salt_name = "#{encrypted_attribute_name}_salt".to_sym
instance_methods_as_symbols = attribute_instance_methods_as_symbols
attr_reader encrypted_attribute_name unless instance_methods_as_symbols.include?(encrypted_attribute_name)
attr_writer encrypted_attribute_name unless instance_methods_as_symbols.include?(:"#{encrypted_attribute_name}=")
if options[:mode] == :per_attribute_iv_and_salt
attr_reader iv_name unless instance_methods_as_symbols.include?(iv_name)
attr_writer iv_name unless instance_methods_as_symbols.include?(:"#{iv_name}=")
attr_reader salt_name unless instance_methods_as_symbols.include?(salt_name)
attr_writer salt_name unless instance_methods_as_symbols.include?(:"#{salt_name}=")
end
define_method(attribute) do
instance_variable_get("@#{attribute}") || instance_variable_set("@#{attribute}", decrypt(attribute, send(encrypted_attribute_name)))
end
define_method("#{attribute}=") do |value|
send("#{encrypted_attribute_name}=", encrypt(attribute, value))
instance_variable_set("@#{attribute}", value)
end
define_method("#{attribute}?") do
value = send(attribute)
value.respond_to?(:empty?) ? !value.empty? : !!value
end
encrypted_attributes[attribute.to_sym] = options.merge(:attribute => encrypted_attribute_name)
end
end
alias_method :attr_encryptor, :attr_encrypted
# Default options to use with calls to <tt>attr_encrypted</tt>
#
# It will inherit existing options from its superclass
def attr_encrypted_options
@attr_encrypted_options ||= superclass.attr_encrypted_options.dup
end
# Checks if an attribute is configured with <tt>attr_encrypted</tt>
#
# Example
#
# class User
# attr_accessor :name
# attr_encrypted :email
# end
#
# User.attr_encrypted?(:name) # false
# User.attr_encrypted?(:email) # true
def attr_encrypted?(attribute)
encrypted_attributes.has_key?(attribute.to_sym)
end
# Decrypts a value for the attribute specified
#
# Example
#
# class User
# attr_encrypted :email
# end
#
# email = User.decrypt(:email, 'SOME_ENCRYPTED_EMAIL_STRING')
def decrypt(attribute, encrypted_value, options = {})
options = encrypted_attributes[attribute.to_sym].merge(options)
if options[:if] && !options[:unless] && !encrypted_value.nil? && !(encrypted_value.is_a?(String) && encrypted_value.empty?)
encrypted_value = encrypted_value.unpack(options[:encode]).first if options[:encode]
value = options[:encryptor].send(options[:decrypt_method], options.merge!(:value => encrypted_value))
if options[:marshal]
value = options[:marshaler].send(options[:load_method], value)
elsif defined?(Encoding)
encoding = Encoding.default_internal || Encoding.default_external
value = value.force_encoding(encoding.name)
end
value
else
encrypted_value
end
end
# Encrypts a value for the attribute specified
#
# Example
#
# class User
# attr_encrypted :email
# end
#
# encrypted_email = User.encrypt(:email, 'test@example.com')
def encrypt(attribute, value, options = {})
options = encrypted_attributes[attribute.to_sym].merge(options)
if options[:if] && !options[:unless] && !value.nil? && !(value.is_a?(String) && value.empty?)
value = options[:marshal] ? options[:marshaler].send(options[:dump_method], value) : value.to_s
encrypted_value = options[:encryptor].send(options[:encrypt_method], options.merge!(:value => value))
encrypted_value = [encrypted_value].pack(options[:encode]) if options[:encode]
encrypted_value
else
value
end
end
# Contains a hash of encrypted attributes with virtual attribute names as keys
# and their corresponding options as values
#
# Example
#
# class User
# attr_encrypted :email, :key => 'my secret key'
# end
#
# User.encrypted_attributes # { :email => { :attribute => 'encrypted_email', :key => 'my secret key' } }
def encrypted_attributes
@encrypted_attributes ||= superclass.encrypted_attributes.dup
end
# Forwards calls to :encrypt_#{attribute} or :decrypt_#{attribute} to the corresponding encrypt or decrypt method
# if attribute was configured with attr_encrypted
#
# Example
#
# class User
# attr_encrypted :email, :key => 'my secret key'
# end
#
# User.encrypt_email('SOME_ENCRYPTED_EMAIL_STRING')
def method_missing(method, *arguments, &block)
if method.to_s =~ /^((en|de)crypt)_(.+)$/ && attr_encrypted?($3)
send($1, $3, *arguments)
else
super
end
end
module InstanceMethods
# Decrypts a value for the attribute specified using options evaluated in the current object's scope
#
# Example
#
# class User
# attr_accessor :secret_key
# attr_encrypted :email, :key => :secret_key
#
# def initialize(secret_key)
# self.secret_key = secret_key
# end
# end
#
# @user = User.new('some-secret-key')
# @user.decrypt(:email, 'SOME_ENCRYPTED_EMAIL_STRING')
def decrypt(attribute, encrypted_value)
self.class.decrypt(attribute, encrypted_value, evaluated_attr_encrypted_options_for(attribute))
end
# Encrypts a value for the attribute specified using options evaluated in the current object's scope
#
# Example
#
# class User
# attr_accessor :secret_key
# attr_encrypted :email, :key => :secret_key
#
# def initialize(secret_key)
# self.secret_key = secret_key
# end
# end
#
# @user = User.new('some-secret-key')
# @user.encrypt(:email, 'test@example.com')
def encrypt(attribute, value)
self.class.encrypt(attribute, value, evaluated_attr_encrypted_options_for(attribute))
end
protected
# Returns attr_encrypted options evaluated in the current object's scope for the attribute specified
def evaluated_attr_encrypted_options_for(attribute)
if evaluate_attr_encrypted_option(self.class.encrypted_attributes[attribute.to_sym][:mode]) == :per_attribute_iv_and_salt
load_iv_for_attribute(attribute, self.class.encrypted_attributes[attribute.to_sym][:algorithm])
load_salt_for_attribute(attribute)
end
self.class.encrypted_attributes[attribute.to_sym].inject({}) { |hash, (option, value)| hash[option] = evaluate_attr_encrypted_option(value); hash }
end
# Evaluates symbol (method reference) or proc (responds to call) options
#
# If the option is not a symbol or proc then the original option is returned
def evaluate_attr_encrypted_option(option)
if option.is_a?(Symbol) && respond_to?(option)
send(option)
elsif option.respond_to?(:call)
option.call(self)
else
option
end
end
def load_iv_for_attribute(attribute, algorithm)
encrypted_attribute_name = self.class.encrypted_attributes[attribute.to_sym][:attribute]
iv = send("#{encrypted_attribute_name}_iv")
if(iv == nil)
begin
algorithm = algorithm || "aes-256-cbc"
algo = OpenSSL::Cipher::Cipher.new(algorithm)
iv = [algo.random_iv].pack("m")
send("#{encrypted_attribute_name}_iv=", iv)
rescue RuntimeError
end
end
self.class.encrypted_attributes[attribute.to_sym] = self.class.encrypted_attributes[attribute.to_sym].merge(:iv => iv.unpack("m").first) if (iv && !iv.empty?)
end
def load_salt_for_attribute(attribute)
encrypted_attribute_name = self.class.encrypted_attributes[attribute.to_sym][:attribute]
salt = send("#{encrypted_attribute_name}_salt") || send("#{encrypted_attribute_name}_salt=", SecureRandom.hex)
self.class.encrypted_attributes[attribute.to_sym] = self.class.encrypted_attributes[attribute.to_sym].merge(:salt => salt)
end
end
protected
def attribute_instance_methods_as_symbols
instance_methods.collect { |method| method.to_sym }
end
end
Object.extend AttrEncrypted
Dir[File.join(File.dirname(__FILE__), 'attr_encrypted', 'adapters', '*.rb')].each { |adapter| require adapter }
|
module Avatax
VERSION = "0.0.1"
end
version bump
module Avatax
VERSION = "0.0.2"
end
|
require 'aws-sdk'
require 'aws-eni/errors'
require 'aws-eni/meta'
module Aws
module ENI
module Client
extend self
# determine the region from instance metadata
def region
Meta.instance('placement/availability-zone').sub(/^(.*)[a-z]$/,'\1')
rescue Errors::MetaConnectionFailed
raise Errors::EnvironmentError, 'Unable to load EC2 meta-data'
end
# determine the vpc cidr block from instance metadata
def vpc_cidr
hwaddr = Meta.instance('network/interfaces/macs/').lines.first.strip.chomp('/')
Meta.interface(hwaddr, 'vpc-ipv4-cidr-block')
rescue Errors::MetaConnectionFailed, Errors::MetaNotFound
raise Errors::EnvironmentError, 'Unable to load EC2 meta-data'
end
# lazy-load our ec2 client
def client
@client ||= EC2::Client.new(region: region)
rescue StandardError => e
raise Errors::EnvironmentError, 'Unable to initialize EC2 client'
end
# pass along method calls to our lazy-loaded api client
def method_missing(method, *args)
client.public_send(method, *args)
rescue EC2::Errors::UnauthorizedOperation => e
raise Errors::ClientPermissionError, "Operation not permitted: #{e.message}"
rescue EC2::Errors::ServiceError => e
error = e.class.to_s.gsub(/^.*::/, '')
raise Errors::ClientOperationError, "EC2 service error (#{error}: #{e.message})"
end
# retrieve a single interface resource
def describe_interface(id)
resp = describe_network_interfaces(filters: [{ name: 'network-interface-id', values: [id] }])
raise Errors::UnknownInterface, "Interface #{id} could not be located" if resp[:network_interfaces].empty?
resp[:network_interfaces].first
end
# retrieve a single address resource by public ip, associated private ip,
# allocation id, or association id
def describe_address(address)
filter_by = case address
when /^eipalloc-/
'allocation-id'
when /^eipassoc-/
'association-id'
else
if IPAddr.new(vpc_cidr) === IPAddr.new(address)
'private-ip-address'
else
'public-ip'
end
end
resp = describe_addresses(filters: [
{ name: 'domain', values: ['vpc'] },
{ name: filter_by, values: [address] }
])
raise Errors::UnknownAddress, "No EIP with #{address} could be located" if resp[:addresses].empty?
resp[:addresses].first
end
# retrieve a list of available addresses
def available_addresses
filters = [{ name: 'domain', values: ['vpc'] }]
describe_addresses(filters: filters)[:addresses].select{ |addr| !addr.association_id }
end
# retrieve an array of private ips associated with the given interface
def interface_private_ips(id)
interface = describe_interface(id)
if interface[:private_ip_addresses]
primary = interface[:private_ip_addresses].find { |ip| ip[:primary] }
interface[:private_ip_addresses].map { |ip| ip[:private_ip_address] }.tap do |ips|
# ensure primary ip is first in the list
ips.unshift(*ips.delete(primary[:private_ip_address])) if primary
end
end
end
# determine whether a given interface is attached or free
def interface_attached(id)
describe_interface(id)[:status] == 'in-use'
end
# test whether we have the appropriate permissions within our AWS access
# credentials to perform all possible API calls
def has_access?
test_methods = {
describe_network_interfaces: {},
create_network_interface: {
subnet_id: 'subnet-abcd1234'
},
attach_network_interface: {
network_interface_id: 'eni-abcd1234',
instance_id: 'i-abcd1234',
device_index: 0
},
detach_network_interface: {
attachment_id: 'eni-attach-abcd1234'
},
delete_network_interface: {
network_interface_id: 'eni-abcd1234'
},
create_tags: {
resources: ['eni-abcd1234'],
tags: []
},
describe_addresses: {},
allocate_address: {},
release_address: {
allocation_id: 'eipalloc-abcd1234'
},
associate_address: {
allocation_id: 'eipalloc-abcd1234',
network_interface_id: 'eni-abcd1234'
},
disassociate_address: {
association_id: 'eipassoc-abcd1234'
}
# these have no dry_run method
# assign_private_ip_addresses: {
# network_interface_id: 'eni-abcd1234'
# }
# unassign_private_ip_addresses: {
# network_interface_id: 'eni-abcd1234',
# private_ip_addresses: ['0.0.0.0']
# }
}
test_methods.all? do |method, params|
begin
client.public_send(method, params.merge(dry_run: true))
rescue EC2::Errors::DryRunOperation
true
rescue EC2::Errors::InvalidAllocationIDNotFound, EC2::Errors::InvalidAssociationIDNotFound
true
rescue EC2::Errors::UnauthorizedOperation
false
rescue
raise Errors::ClientOperationError, 'Unexpected behavior while testing EC2 client permissions'
else
raise Errors::ClientOperationError, 'Unexpected behavior while testing EC2 client permissions'
end
end
end
end
end
end
pass through any non-AWS errors when testing client access
require 'aws-sdk'
require 'aws-eni/errors'
require 'aws-eni/meta'
module Aws
module ENI
module Client
extend self
# determine the region from instance metadata
def region
Meta.instance('placement/availability-zone').sub(/^(.*)[a-z]$/,'\1')
rescue Errors::MetaConnectionFailed
raise Errors::EnvironmentError, 'Unable to load EC2 meta-data'
end
# determine the vpc cidr block from instance metadata
def vpc_cidr
hwaddr = Meta.instance('network/interfaces/macs/').lines.first.strip.chomp('/')
Meta.interface(hwaddr, 'vpc-ipv4-cidr-block')
rescue Errors::MetaConnectionFailed, Errors::MetaNotFound
raise Errors::EnvironmentError, 'Unable to load EC2 meta-data'
end
# lazy-load our ec2 client
def client
@client ||= EC2::Client.new(region: region)
rescue StandardError => e
raise if e === Errors::ServiceError
raise Errors::EnvironmentError, 'Unable to initialize EC2 client'
end
# pass along method calls to our lazy-loaded api client
def method_missing(method, *args)
client.public_send(method, *args)
rescue EC2::Errors::UnauthorizedOperation => e
raise Errors::ClientPermissionError, "Operation not permitted: #{e.message}"
rescue EC2::Errors::ServiceError => e
error = e.class.to_s.gsub(/^.*::/, '')
raise Errors::ClientOperationError, "EC2 service error (#{error}: #{e.message})"
end
# retrieve a single interface resource
def describe_interface(id)
resp = describe_network_interfaces(filters: [{ name: 'network-interface-id', values: [id] }])
raise Errors::UnknownInterface, "Interface #{id} could not be located" if resp[:network_interfaces].empty?
resp[:network_interfaces].first
end
# retrieve a single address resource by public ip, associated private ip,
# allocation id, or association id
def describe_address(address)
filter_by = case address
when /^eipalloc-/
'allocation-id'
when /^eipassoc-/
'association-id'
else
if IPAddr.new(vpc_cidr) === IPAddr.new(address)
'private-ip-address'
else
'public-ip'
end
end
resp = describe_addresses(filters: [
{ name: 'domain', values: ['vpc'] },
{ name: filter_by, values: [address] }
])
raise Errors::UnknownAddress, "No EIP with #{address} could be located" if resp[:addresses].empty?
resp[:addresses].first
end
# retrieve a list of available addresses
def available_addresses
filters = [{ name: 'domain', values: ['vpc'] }]
describe_addresses(filters: filters)[:addresses].select{ |addr| !addr.association_id }
end
# retrieve an array of private ips associated with the given interface
def interface_private_ips(id)
interface = describe_interface(id)
if interface[:private_ip_addresses]
primary = interface[:private_ip_addresses].find { |ip| ip[:primary] }
interface[:private_ip_addresses].map { |ip| ip[:private_ip_address] }.tap do |ips|
# ensure primary ip is first in the list
ips.unshift(*ips.delete(primary[:private_ip_address])) if primary
end
end
end
# determine whether a given interface is attached or free
def interface_attached(id)
describe_interface(id)[:status] == 'in-use'
end
# test whether we have the appropriate permissions within our AWS access
# credentials to perform all possible API calls
def has_access?
test_methods = {
describe_network_interfaces: {},
create_network_interface: {
subnet_id: 'subnet-abcd1234'
},
attach_network_interface: {
network_interface_id: 'eni-abcd1234',
instance_id: 'i-abcd1234',
device_index: 0
},
detach_network_interface: {
attachment_id: 'eni-attach-abcd1234'
},
delete_network_interface: {
network_interface_id: 'eni-abcd1234'
},
create_tags: {
resources: ['eni-abcd1234'],
tags: []
},
describe_addresses: {},
allocate_address: {},
release_address: {
allocation_id: 'eipalloc-abcd1234'
},
associate_address: {
allocation_id: 'eipalloc-abcd1234',
network_interface_id: 'eni-abcd1234'
},
disassociate_address: {
association_id: 'eipassoc-abcd1234'
}
# these have no dry_run method
# assign_private_ip_addresses: {
# network_interface_id: 'eni-abcd1234'
# }
# unassign_private_ip_addresses: {
# network_interface_id: 'eni-abcd1234',
# private_ip_addresses: ['0.0.0.0']
# }
}
test_methods.all? do |method, params|
begin
client.public_send(method, params.merge(dry_run: true))
rescue EC2::Errors::DryRunOperation
true
rescue EC2::Errors::InvalidAllocationIDNotFound, EC2::Errors::InvalidAssociationIDNotFound
true
rescue EC2::Errors::UnauthorizedOperation
false
rescue EC2::Errors::ServiceError
raise Errors::ClientOperationError, 'Unexpected behavior while testing EC2 client permissions'
else
raise Errors::ClientOperationError, 'Unexpected behavior while testing EC2 client permissions'
end
end
end
end
end
end
|
module Awspec
VERSION = '0.53.0'
end
Bump up version number
module Awspec
VERSION = '0.54.0'
end
|
# frozen_string_literal: true
module Awspec
VERSION = '1.27.1'
end
Bump up version number
# frozen_string_literal: true
module Awspec
VERSION = '1.28.0'
end
|
module Awspec
VERSION = '1.12.0'
end
Bump up version number
module Awspec
VERSION = '1.12.1'
end
|
module Bipbip
VERSION = '0.6.9'
end
Bumped version
module Bipbip
VERSION = '0.6.10'
end
|
module Bitcoin
# bitcoin script
class Script
include Bitcoin::Opcodes
WITNESS_VERSION = 0x00
attr_accessor :chunks
def initialize
@chunks = []
end
# generate P2PKH script
def self.to_p2pkh(pubkey_hash)
new << OP_DUP << OP_HASH160 << pubkey_hash << OP_EQUALVERIFY << OP_CHECKSIG
end
# generate P2WPKH script
def self.to_p2wpkh(pubkey_hash)
new << WITNESS_VERSION << pubkey_hash
end
def self.parse_from_payload(payload)
s = new
buf = StringIO.new(payload)
until buf.eof?
opcode = buf.read(1)
if opcode?(opcode)
s << opcode.ord
else
pushcode = opcode.ord
len = case pushcode
when OP_PUSHDATA1
buf.read(1)
when OP_PUSHDATA2
buf.read(2)
when OP_PUSHDATA4
buf.read(4)
else
pushcode if pushcode < OP_PUSHDATA1
end
s << buf.read(len).bth if len
end
end
s
end
def to_payload
chunks.join
end
def to_addr
return p2pkh_addr if p2pkh?
return p2wpkh_addr if p2wpkh?
return nil if p2wpkh?
return nil if p2sh?
end
# whether this script is a P2PKH format script.
def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end
# whether this script is a P2WPKH format script.
def p2wpkh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 21
end
def p2wsh?
false
end
def p2sh?
false
end
# append object to payload
def <<(obj)
if obj.is_a?(Integer)
append_opcode(obj)
elsif obj.is_a?(String)
append_data(obj.b)
end
end
# append opcode to payload
# @param [Integer] opcode append opcode which defined by Bitcoin::Opcodes
# @return [Script] return self
def append_opcode(opcode)
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end
# append data to payload with pushdata opcode
# @param [String] data append data. this data is not binary
# @return [Script] return self
def append_data(data)
data = data.htb
size = data.bytesize
header = if size < OP_PUSHDATA1
[size].pack('C')
elsif size < 0xff
[OP_PUSHDATA1, size].pack('CC')
elsif size < 0xffff
[OP_PUSHDATA2, size].pack('Cv')
elsif size < 0xffffffff
[OP_PUSHDATA4, size].pack('CV')
else
raise ArgumentError, 'data size is too big.'
end
chunks << (header + data)
self
end
def to_s
str = chunks.map { |c|Script.opcode?(c) ? Opcodes.opcode_to_name(c.ord) : Script.pushed_data(c) }.join(' ')
str.gsub!(/OP_0/, '0') if p2wpkh? || p2wsh?
str
end
# determine where the data is an opcode.
def self.opcode?(data)
!pushdata?(data)
end
# determine where the data is a pushdadta.
def self.pushdata?(data)
# the minimum value of opcode is pushdata operation.
first_byte = data.each_byte.next
OP_0 < first_byte && first_byte <= OP_PUSHDATA4
end
# get pushed data in pushdata bytes
def self.pushed_data(data)
opcode = data.each_byte.next
offset = 1
case opcode
when OP_PUSHDATA1
offset += 1
when OP_PUSHDATA2
offset += 2
when OP_PUSHDATA4
offset += 4
end
data[offset..-1].bth
end
private
# generate p2pkh address. if script dose not p2pkh, return nil.
def p2pkh_addr
return nil unless p2pkh?
hash160 = Script.pushed_data(chunks[2])
return nil unless hash160.htb.bytesize == 20
hex = Bitcoin.chain_params.address_version + hash160
Bitcoin.encode_base58_address(hex)
end
# generate p2wpkh address. if script dose not p2wpkh, return nil.
def p2wpkh_addr
return nil unless p2wpkh?
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end
end
end
Fix ruby 2.3 error
module Bitcoin
# bitcoin script
class Script
include Bitcoin::Opcodes
WITNESS_VERSION = 0x00
attr_accessor :chunks
def initialize
@chunks = []
end
# generate P2PKH script
def self.to_p2pkh(pubkey_hash)
new << OP_DUP << OP_HASH160 << pubkey_hash << OP_EQUALVERIFY << OP_CHECKSIG
end
# generate P2WPKH script
def self.to_p2wpkh(pubkey_hash)
new << WITNESS_VERSION << pubkey_hash
end
def self.parse_from_payload(payload)
s = new
buf = StringIO.new(payload)
until buf.eof?
opcode = buf.read(1)
if opcode?(opcode)
s << opcode.ord
else
pushcode = opcode.ord
len = case pushcode
when OP_PUSHDATA1
buf.read(1)
when OP_PUSHDATA2
buf.read(2)
when OP_PUSHDATA4
buf.read(4)
else
pushcode if pushcode < OP_PUSHDATA1
end
s << buf.read(len).bth if len
end
end
s
end
def to_payload
chunks.join
end
def to_addr
return p2pkh_addr if p2pkh?
return p2wpkh_addr if p2wpkh?
return nil if p2wpkh?
return nil if p2sh?
end
# whether this script is a P2PKH format script.
def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end
# whether this script is a P2WPKH format script.
def p2wpkh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 21
end
def p2wsh?
false
end
def p2sh?
false
end
# append object to payload
def <<(obj)
if obj.is_a?(Integer)
append_opcode(obj)
elsif obj.is_a?(String)
append_data(obj.b)
end
end
# append opcode to payload
# @param [Integer] opcode append opcode which defined by Bitcoin::Opcodes
# @return [Script] return self
def append_opcode(opcode)
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end
# append data to payload with pushdata opcode
# @param [String] data append data. this data is not binary
# @return [Script] return self
def append_data(data)
data = data.htb
size = data.bytesize
header = if size < OP_PUSHDATA1
[size].pack('C')
elsif size < 0xff
[OP_PUSHDATA1, size].pack('CC')
elsif size < 0xffff
[OP_PUSHDATA2, size].pack('Cv')
elsif size < 0xffffffff
[OP_PUSHDATA4, size].pack('CV')
else
raise ArgumentError, 'data size is too big.'
end
chunks << (header + data)
self
end
def to_s
str = chunks.map { |c|Script.opcode?(c) ? Opcodes.opcode_to_name(c.ord) : Script.pushed_data(c) }.join(' ')
str.gsub!(/OP_0/, '0') if p2wpkh? || p2wsh?
str.gsub!(/OP_FALSE/, '0') if p2wpkh? || p2wsh?
str
end
# determine where the data is an opcode.
def self.opcode?(data)
!pushdata?(data)
end
# determine where the data is a pushdadta.
def self.pushdata?(data)
# the minimum value of opcode is pushdata operation.
first_byte = data.each_byte.next
OP_0 < first_byte && first_byte <= OP_PUSHDATA4
end
# get pushed data in pushdata bytes
def self.pushed_data(data)
opcode = data.each_byte.next
offset = 1
case opcode
when OP_PUSHDATA1
offset += 1
when OP_PUSHDATA2
offset += 2
when OP_PUSHDATA4
offset += 4
end
data[offset..-1].bth
end
private
# generate p2pkh address. if script dose not p2pkh, return nil.
def p2pkh_addr
return nil unless p2pkh?
hash160 = Script.pushed_data(chunks[2])
return nil unless hash160.htb.bytesize == 20
hex = Bitcoin.chain_params.address_version + hash160
Bitcoin.encode_base58_address(hex)
end
# generate p2wpkh address. if script dose not p2wpkh, return nil.
def p2wpkh_addr
return nil unless p2wpkh?
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end
end
end
|
module Blendris
# Model is the main driver for Blendris. All Blendris objects
# will inherit from it to function as a database model.
class Model
include RedisAccessor
attr_reader :key
# Instantiate a new instance of this model. We do some basic
# checking to make sure that this object already exists in Redis
# as the requested type. This is to prevent keys being used in
# the wrong way.
# If the :verify option isn't set to false, then each field of
# this model is also verified.
def initialize(new_key, options = {})
@key = sanitize_key(new_key)
actual_type = constantize(redis.get(key))
raise ArgumentError.new("#{self.class.name} second argument must be a hash") unless options.kind_of? Hash
raise TypeError.new("#{key} does not exist, not a #{self.class.name} - you may want create instead of new") if !actual_type
raise TypeError.new("#{key} is a #{actual_type}, not a #{self.class.name}") if actual_type != self.class
if options[:verify] != false
parameters = self.class.local_parameters.find_all {|s| s.kind_of? Symbol}
dne = parameters.find {|p| not self.send(p.to_s)}
raise ArgumentError.new("#{self.class.name} #{key} is missing its #{dne}") if dne
raise ArgumentError.new("blank keys are not allowed") if @key.length == 0
end
end
# An object's id is considered to be the SHA1 digest of its key. This is
# to ensure that all objects that represent the same key return the same id.
def id
Digest::SHA1.hexdigest key
end
# Look up the given symbol by its name. The list of symbols are defined
# when the model is declared.
def [](name)
name = name.to_s
subkey = self.subkey(name)
options = self.class.redis_symbols[name]
return unless options
on_change = lambda { self.fire_on_change_for name }
options = options.merge(:model => self, :on_change => on_change)
options[:type].new subkey, options
end
# Calculate the key to address the given child node.
def subkey(child)
sanitize_key "#{self.key}:#{child}"
end
# Compare two instances. If two instances have the same class and key, they are equal.
def ==(other)
return false unless self.class == other.class
return self.key == other.key
end
# Return a list of field names for this model.
def fields
self.class.redis_symbols.map {|name, field| name.to_s}
end
# Fire the list of blocks called when the given symbol changes.
def fire_on_change_for(symbol)
blocks = [ self.class.on_change_table[nil], self.class.on_change_table[symbol.to_s] ]
blocks.flatten!
blocks.compact!
blocks.each do |block|
self.instance_exec symbol.to_s, &block
end
end
class << self
include RedisAccessor
include Enumerable
# This method will instantiate a new object with the correct key
# and assign the values passed to it.
def create(*args)
parameters = local_parameters.find_all {|s| s.kind_of? Symbol}
got = args.count
wanted = parameters.count
if got != wanted
msg = "wrong number of arguments for a new #{self.class.name} (%d for %d)" % [ got, wanted ]
raise ArgumentError.new(msg)
end
key = generate_key(self, args)
current_model = redis.get(key)
if current_model && current_model != self.name
raise ArgumentError.new("#{key} is a #{current_model}, not a #{self.name}")
end
redis.set key, self.name
redis.sadd index_key, key
obj = new(key, :verify => false)
parameters.each_with_index do |parm, i|
obj[parm].set args[i]
end
obj
end
def key(*fields)
@local_parameters = fields
@local_parameters.flatten!
@local_parameters.compact!
nil
end
def each
RedisSet.new(index_key).each {|k| yield new(k)}
end
def index_key
"blendris:index:model:#{self.name}"
end
# Defines a new data type for Blendris:Model construction.
def type(name, klass)
(class << self; self; end).instance_eval do
define_method(name) do |*args|
varname = args.shift.to_s
options = args.shift || {}
options[:type] = klass
redis_symbols[varname] = options
# Declare the getter for this field.
define_method(varname) do
self[varname].get
end
# Declare the setter for this field, if it is not a key field.
unless local_parameters.find {|p| p.to_s == varname}
define_method("#{varname}=") do |value|
self[varname].set value
end
end
end
end
end
# Variables stored in the Redis database.
def redis_symbols
@redis_symbols ||= {}
end
# Parameters used when creating a new copy of this model.
def local_parameters
@local_parameters ||= []
end
# Define a block to call when one of the given symbol values changes.
def on_change(*symbols, &block)
symbols.flatten!
symbols.compact!
if symbols.count == 0
on_change_table[nil] ||= []
on_change_table[nil] << block
else
symbols.each do |symbol|
on_change_table[symbol.to_s] ||= []
on_change_table[symbol.to_s] << block
end
end
end
# The hash of blocks called when fields on this object change.
def on_change_table
@on_change_table ||= {}
end
end
end
end
Add ability to destroy to destroy model objects. Does not unlink the object from any refs pointing to them.
module Blendris
# Model is the main driver for Blendris. All Blendris objects
# will inherit from it to function as a database model.
class Model
include RedisAccessor
attr_reader :key
# Instantiate a new instance of this model. We do some basic
# checking to make sure that this object already exists in Redis
# as the requested type. This is to prevent keys being used in
# the wrong way.
# If the :verify option isn't set to false, then each field of
# this model is also verified.
def initialize(new_key, options = {})
@key = sanitize_key(new_key)
actual_type = constantize(redis.get(key))
raise ArgumentError.new("#{self.class.name} second argument must be a hash") unless options.kind_of? Hash
raise TypeError.new("#{key} does not exist, not a #{self.class.name} - you may want create instead of new") if !actual_type
raise TypeError.new("#{key} is a #{actual_type}, not a #{self.class.name}") if actual_type != self.class
if options[:verify] != false
parameters = self.class.local_parameters.find_all {|s| s.kind_of? Symbol}
dne = parameters.find {|p| not self.send(p.to_s)}
raise ArgumentError.new("#{self.class.name} #{key} is missing its #{dne}") if dne
raise ArgumentError.new("blank keys are not allowed") if @key.length == 0
end
end
# An object's id is considered to be the SHA1 digest of its key. This is
# to ensure that all objects that represent the same key return the same id.
def id
Digest::SHA1.hexdigest key
end
# Look up the given symbol by its name. The list of symbols are defined
# when the model is declared.
def [](name)
name = name.to_s
subkey = self.subkey(name)
options = self.class.redis_symbols[name]
return unless options
on_change = lambda { self.fire_on_change_for name }
options = options.merge(:model => self, :on_change => on_change)
options[:type].new subkey, options
end
# Calculate the key to address the given child node.
def subkey(child)
sanitize_key "#{self.key}:#{child}"
end
# Compare two instances. If two instances have the same class and key, they are equal.
def ==(other)
return false unless self.class == other.class
return self.key == other.key
end
# Return a list of field names for this model.
def fields
self.class.redis_symbols.map {|name, field| name.to_s}
end
# Fire the list of blocks called when the given symbol changes.
def fire_on_change_for(symbol)
blocks = [ self.class.on_change_table[nil], self.class.on_change_table[symbol.to_s] ]
blocks.flatten!
blocks.compact!
blocks.each do |block|
self.instance_exec symbol.to_s, &block
end
end
def destroy
self.class.redis_symbols.keys.each { |key| self[key].clear }
redis.srem self.class.index_key, key
redis.del key
end
class << self
include RedisAccessor
include Enumerable
# This method will instantiate a new object with the correct key
# and assign the values passed to it.
def create(*args)
parameters = local_parameters.find_all {|s| s.kind_of? Symbol}
got = args.count
wanted = parameters.count
if got != wanted
msg = "wrong number of arguments for a new #{self.class.name} (%d for %d)" % [ got, wanted ]
raise ArgumentError.new(msg)
end
key = generate_key(self, args)
current_model = redis.get(key)
if current_model && current_model != self.name
raise ArgumentError.new("#{key} is a #{current_model}, not a #{self.name}")
end
redis.set key, self.name
redis.sadd index_key, key
obj = new(key, :verify => false)
parameters.each_with_index do |parm, i|
obj[parm].set args[i]
end
obj
end
def key(*fields)
@local_parameters = fields
@local_parameters.flatten!
@local_parameters.compact!
nil
end
def each
RedisSet.new(index_key).each {|k| yield new(k)}
end
def index_key
"blendris:index:model:#{self.name}"
end
# Defines a new data type for Blendris:Model construction.
def type(name, klass)
(class << self; self; end).instance_eval do
define_method(name) do |*args|
varname = args.shift.to_s
options = args.shift || {}
options[:type] = klass
redis_symbols[varname] = options
# Declare the getter for this field.
define_method(varname) do
self[varname].get
end
# Declare the setter for this field, if it is not a key field.
unless local_parameters.find {|p| p.to_s == varname}
define_method("#{varname}=") do |value|
self[varname].set value
end
end
end
end
end
# Variables stored in the Redis database.
def redis_symbols
@redis_symbols ||= {}
end
# Parameters used when creating a new copy of this model.
def local_parameters
@local_parameters ||= []
end
# Define a block to call when one of the given symbol values changes.
def on_change(*symbols, &block)
symbols.flatten!
symbols.compact!
if symbols.count == 0
on_change_table[nil] ||= []
on_change_table[nil] << block
else
symbols.each do |symbol|
on_change_table[symbol.to_s] ||= []
on_change_table[symbol.to_s] << block
end
end
end
# The hash of blocks called when fields on this object change.
def on_change_table
@on_change_table ||= {}
end
end
end
end
|
module Blizzardry
class MPQ
require 'blizzardry/mpq/file'
require 'blizzardry/mpq/storm'
def initialize(handle)
@handle = handle
end
def patch(archive)
Storm.SFileOpenPatchArchive(@handle, archive, nil, 0)
end
def patched?
Storm.SFileIsPatchedArchive(@handle)
end
def close
Storm.SFileCloseArchive(@handle)
end
def has?(filename)
Storm.SFileHasFile(@handle, filename)
end
def find(query)
result = Storm::SearchResult.new
handle = Storm.SFileFindFirstFile(@handle, query, result, nil)
unless handle.null?
yield result[:filename].to_s
while Storm.SFileFindNextFile(handle, result)
yield result[:filename].to_s
end
end
ensure
Storm.SFileFindClose(handle)
end
def get(filename)
handle = FFI::MemoryPointer.new :pointer
if Storm.SFileOpenFileEx(@handle, filename, 0, handle)
File.new(handle.read_pointer)
end
end
def extract(filename, local)
Storm.SFileExtractFile(@handle, filename, local, 0)
end
class << self
private :new
def open(archive)
handle = FFI::MemoryPointer.new :pointer
Storm.SFileOpenArchive(archive, 0, 0, handle)
mpq = new(handle.read_pointer)
if block_given?
yield mpq
mpq.close
nil
else
mpq
end
end
end
end
end
Return nil in case of MPQ open failure
module Blizzardry
class MPQ
require 'blizzardry/mpq/file'
require 'blizzardry/mpq/storm'
def initialize(handle)
@handle = handle
end
def patch(archive)
Storm.SFileOpenPatchArchive(@handle, archive, nil, 0)
end
def patched?
Storm.SFileIsPatchedArchive(@handle)
end
def close
Storm.SFileCloseArchive(@handle)
end
def has?(filename)
Storm.SFileHasFile(@handle, filename)
end
def find(query)
result = Storm::SearchResult.new
handle = Storm.SFileFindFirstFile(@handle, query, result, nil)
unless handle.null?
yield result[:filename].to_s
while Storm.SFileFindNextFile(handle, result)
yield result[:filename].to_s
end
end
ensure
Storm.SFileFindClose(handle)
end
def get(filename)
handle = FFI::MemoryPointer.new :pointer
if Storm.SFileOpenFileEx(@handle, filename, 0, handle)
File.new(handle.read_pointer)
end
end
def extract(filename, local)
Storm.SFileExtractFile(@handle, filename, local, 0)
end
class << self
private :new
def open(archive)
handle = FFI::MemoryPointer.new :pointer
return unless Storm.SFileOpenArchive(archive, 0, 0, handle)
mpq = new(handle.read_pointer)
if block_given?
yield mpq
mpq.close
nil
else
mpq
end
end
end
end
end
|
module Blizzardry
class MPQ
require 'blizzardry/mpq/file'
require 'blizzardry/mpq/storm'
def initialize(handle)
@handle = handle
end
def patch(archive, prefix = nil)
Storm.SFileOpenPatchArchive(@handle, archive, prefix, 0)
end
def patched?
Storm.SFileIsPatchedArchive(@handle)
end
def close
Storm.SFileCloseArchive(@handle)
end
def contains?(filename)
Storm.SFileHasFile(@handle, filename)
end
def find(query)
results = []
result = Storm::SearchResult.new
handle = Storm.SFileFindFirstFile(@handle, query, result, nil)
unless handle.null?
results << result[:filename].to_s
yield results.last if block_given?
while Storm.SFileFindNextFile(handle, result)
results << result[:filename].to_s
yield results.last if block_given?
end
end
results
ensure
Storm.SFileFindClose(handle)
end
def get(filename)
handle = FFI::MemoryPointer.new :pointer
if Storm.SFileOpenFileEx(@handle, filename, 0, handle)
File.new(handle.read_pointer)
end
end
def extract(filename, local)
Storm.SFileExtractFile(@handle, filename, local, 0)
end
class << self
private :new
def open(archive, flags = 0)
handle = FFI::MemoryPointer.new :pointer
return unless Storm.SFileOpenArchive(archive, 0, flags, handle)
mpq = new(handle.read_pointer)
if block_given?
yield mpq
mpq.close
nil
else
mpq
end
end
end
end
end
Treat MPQ archive as an index of files
module Blizzardry
class MPQ
require 'blizzardry/mpq/file'
require 'blizzardry/mpq/storm'
def initialize(handle)
@handle = handle
end
def patch(archive, prefix = nil)
Storm.SFileOpenPatchArchive(@handle, archive, prefix, 0)
end
def patched?
Storm.SFileIsPatchedArchive(@handle)
end
def close
Storm.SFileCloseArchive(@handle)
end
def contains?(filename)
Storm.SFileHasFile(@handle, filename)
end
def find(query)
results = []
result = Storm::SearchResult.new
handle = Storm.SFileFindFirstFile(@handle, query, result, nil)
unless handle.null?
results << result[:filename].to_s
yield results.last if block_given?
while Storm.SFileFindNextFile(handle, result)
results << result[:filename].to_s
yield results.last if block_given?
end
end
results
ensure
Storm.SFileFindClose(handle)
end
def [](filename)
handle = FFI::MemoryPointer.new :pointer
if Storm.SFileOpenFileEx(@handle, filename, 0, handle)
File.new(handle.read_pointer)
end
end
def extract(filename, local)
Storm.SFileExtractFile(@handle, filename, local, 0)
end
class << self
private :new
def open(archive, flags = 0)
handle = FFI::MemoryPointer.new :pointer
return unless Storm.SFileOpenArchive(archive, 0, flags, handle)
mpq = new(handle.read_pointer)
if block_given?
yield mpq
mpq.close
nil
else
mpq
end
end
end
end
end
|
module Bootstrap
class << self
# Inspired by Kaminari
def load!
register_compass_extension if compass?
if rails?
register_rails_engine
end
configure_sass
end
# Paths
def gem_path
@gem_path ||= File.expand_path '..', File.dirname(__FILE__)
end
def stylesheets_path
File.join assets_path, 'stylesheets'
end
def fonts_path
File.join assets_path, 'fonts'
end
def javascripts_path
File.join assets_path, 'javascripts'
end
def assets_path
@assets_path ||= File.join gem_path, 'assets'
end
# Environment detection helpers
def asset_pipeline?
defined?(::Sprockets)
end
def compass?
defined?(::Compass)
end
def rails?
defined?(::Rails)
end
private
def configure_sass
require 'sass' unless defined?(::Sass)
::Sass.load_paths << stylesheets_path
# bootstrap requires minimum precision of 10, see https://github.com/twbs/bootstrap-sass/issues/409
::Sass::Script::Number.precision = [10, ::Sass::Script::Number.precision].max
end
def register_compass_extension
::Compass::Frameworks.register(
'bootstrap',
:path => gem_path,
:stylesheets_directory => stylesheets_path,
:templates_directory => File.join(gem_path, 'templates')
)
end
def register_rails_engine
require 'bootstrap-sass/engine'
end
end
end
Bootstrap.load!
refs #647
module Bootstrap
class << self
# Inspired by Kaminari
def load!
register_compass_extension if compass?
if rails?
register_rails_engine
end
configure_sass
end
# Paths
def gem_path
@gem_path ||= File.expand_path '..', File.dirname(__FILE__)
end
def stylesheets_path
File.join assets_path, 'stylesheets'
end
def fonts_path
File.join assets_path, 'fonts'
end
def javascripts_path
File.join assets_path, 'javascripts'
end
def assets_path
@assets_path ||= File.join gem_path, 'assets'
end
# Environment detection helpers
def asset_pipeline?
defined?(::Sprockets)
end
def compass?
defined?(::Compass)
end
def rails?
defined?(::Rails)
end
private
def configure_sass
require 'sass'
::Sass.load_paths << stylesheets_path
# bootstrap requires minimum precision of 10, see https://github.com/twbs/bootstrap-sass/issues/409
::Sass::Script::Number.precision = [10, ::Sass::Script::Number.precision].max
end
def register_compass_extension
::Compass::Frameworks.register(
'bootstrap',
:path => gem_path,
:stylesheets_directory => stylesheets_path,
:templates_directory => File.join(gem_path, 'templates')
)
end
def register_rails_engine
require 'bootstrap-sass/engine'
end
end
end
Bootstrap.load!
|
#!/usr/bin/env ruby
require 'pp'
require_relative 'bs_optparse'
require_relative 'bs_file_accessor'
require_relative 'league_controller'
# parse! alters its argument, see method description for details
options = BsTeamRanker::BsOptparse.parse!(ARGV)
pp options
pp ARGV
file_accessor = BsTeamRanker::BsFileAccessor.new
league_controller = BsTeamRanker::LeagueController.new
ARGV.each do |infile|
puts "Adding games #{infile}"
games_string = file_accessor.string_from_file(infile, 'utf-8')
league_controller.add_games(games_string)
end
ranked_teams = league_controller.ranked_teams(league_controller.teams)
ranked_string = league_controller.ranked_teams_string(ranked_teams)
puts ranked_string
file_accessor.write(ranked_string, options.output_file_name)
Change bs_team_ranker.rb file permission to executable.
In Terminal, chmod +ux team_ranker.rb
Now can run file as $./bs_team_ranker.rb -o 'my_ranks.txt' 'sample-input.txt' instead of $ruby bs_team_ranker.rb -o 'my_ranks.txt' 'sample-input.txt'
References
http://rubylearning.com/blog/2011/01/03/how-do-i-make-a-command-line-tool-in-ruby/
http://www.michaelrigart.be/en/blog/a-simple-ruby-command-line-tool.html
#!/usr/bin/env ruby
require 'pp'
require_relative 'bs_optparse'
require_relative 'bs_file_accessor'
require_relative 'league_controller'
# parse! alters its argument, see method description for details
options = BsTeamRanker::BsOptparse.parse!(ARGV)
pp options
pp ARGV
file_accessor = BsTeamRanker::BsFileAccessor.new
league_controller = BsTeamRanker::LeagueController.new
ARGV.each do |infile|
puts "Adding games #{infile}"
games_string = file_accessor.string_from_file(infile, 'utf-8')
league_controller.add_games(games_string)
end
ranked_teams = league_controller.ranked_teams(league_controller.teams)
ranked_string = league_controller.ranked_teams_string(ranked_teams)
puts ranked_string
file_accessor.write(ranked_string, options.output_file_name)
|
# encoding: UTF-8
# --
# Copyright (C) 2008-2010 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++
module BSON
# A BSON seralizer/deserializer in pure Ruby.
class BSON_RUBY
MINKEY = -1
EOO = 0
NUMBER = 1
STRING = 2
OBJECT = 3
ARRAY = 4
BINARY = 5
UNDEFINED = 6
OID = 7
BOOLEAN = 8
DATE = 9
NULL = 10
REGEX = 11
REF = 12
CODE = 13
SYMBOL = 14
CODE_W_SCOPE = 15
NUMBER_INT = 16
TIMESTAMP = 17
NUMBER_LONG = 18
MAXKEY = 127
def initialize
@buf = ByteBuffer.new
end
if RUBY_VERSION >= '1.9'
NULL_BYTE = "\0".force_encoding('binary').freeze
UTF8_ENCODING = Encoding.find('utf-8')
BINARY_ENCODING = Encoding.find('binary')
def self.to_utf8_binary(str)
str.encode(UTF8_ENCODING).force_encoding(BINARY_ENCODING)
end
else
NULL_BYTE = "\0"
def self.to_utf8_binary(str)
begin
str.unpack("U*")
rescue => ex
raise InvalidStringEncoding, "String not valid utf-8: #{str.inspect}"
end
str
end
end
def self.serialize_cstr(buf, val)
buf.put_binary(to_utf8_binary(val.to_s))
buf.put_binary(NULL_BYTE)
end
def self.serialize_key(buf, key)
raise InvalidDocument, "Key names / regex patterns must not contain the NULL byte" if key.include? "\x00"
self.serialize_cstr(buf, key)
end
def to_a
@buf.to_a
end
def to_s
@buf.to_s
end
# Serializes an object.
# Implemented to ensure an API compatible with BSON extension.
def self.serialize(obj, check_keys=false, move_id=false)
new.serialize(obj, check_keys, move_id)
end
def self.deserialize(buf=nil)
new.deserialize(buf)
end
def serialize(obj, check_keys=false, move_id=false)
raise(InvalidDocument, "BSON.serialize takes a Hash but got a #{obj.class}") unless obj.is_a?(Hash)
raise "Document is null" unless obj
@buf.rewind
# put in a placeholder for the total size
@buf.put_int(0)
# Write key/value pairs. Always write _id first if it exists.
if move_id
if obj.has_key? '_id'
serialize_key_value('_id', obj['_id'], false)
elsif obj.has_key? :_id
serialize_key_value('_id', obj[:_id], false)
end
obj.each {|k, v| serialize_key_value(k, v, check_keys) unless k == '_id' || k == :_id }
else
if obj.has_key?('_id') && obj.has_key?(:_id)
obj['_id'] = obj.delete(:_id)
end
obj.each {|k, v| serialize_key_value(k, v, check_keys) }
end
serialize_eoo_element(@buf)
if @buf.size > 4 * 1024 * 1024
raise InvalidDocument, "Document is too large (#{@buf.size}). BSON documents are limited to 4MB (#{4 * 1024 * 1024})."
end
@buf.put_int(@buf.size, 0)
@buf
end
# Returns the array stored in the buffer.
# Implemented to ensure an API compatible with BSON extension.
def unpack(arg)
@buf.to_a
end
def serialize_key_value(k, v, check_keys)
k = k.to_s
if check_keys
if k[0] == ?$
raise InvalidKeyName.new("key #{k} must not start with '$'")
end
if k.include? ?.
raise InvalidKeyName.new("key #{k} must not contain '.'")
end
end
type = bson_type(v)
case type
when STRING, SYMBOL
serialize_string_element(@buf, k, v, type)
when NUMBER, NUMBER_INT
serialize_number_element(@buf, k, v, type)
when OBJECT
serialize_object_element(@buf, k, v, check_keys)
when OID
serialize_oid_element(@buf, k, v)
when ARRAY
serialize_array_element(@buf, k, v, check_keys)
when REGEX
serialize_regex_element(@buf, k, v)
when BOOLEAN
serialize_boolean_element(@buf, k, v)
when DATE
serialize_date_element(@buf, k, v)
when NULL
serialize_null_element(@buf, k)
when REF
serialize_dbref_element(@buf, k, v)
when BINARY
serialize_binary_element(@buf, k, v)
when UNDEFINED
serialize_null_element(@buf, k)
when CODE_W_SCOPE
serialize_code_w_scope(@buf, k, v)
when MAXKEY
serialize_max_key_element(@buf, k)
when MINKEY
serialize_min_key_element(@buf, k)
else
raise "unhandled type #{type}"
end
end
def deserialize(buf=nil)
# If buf is nil, use @buf, assumed to contain already-serialized BSON.
# This is only true during testing.
if buf.is_a? String
@buf = ByteBuffer.new(buf.unpack("C*")) if buf
else
@buf = ByteBuffer.new(buf.to_a) if buf
end
@buf.rewind
@buf.get_int # eat message size
doc = BSON::OrderedHash.new
while @buf.more?
type = @buf.get
case type
when STRING, CODE
key = deserialize_cstr(@buf)
doc[key] = deserialize_string_data(@buf)
when SYMBOL
key = deserialize_cstr(@buf)
doc[key] = deserialize_string_data(@buf).intern
when NUMBER
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_data(@buf)
when NUMBER_INT
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_int_data(@buf)
when NUMBER_LONG
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_long_data(@buf)
when OID
key = deserialize_cstr(@buf)
doc[key] = deserialize_oid_data(@buf)
when ARRAY
key = deserialize_cstr(@buf)
doc[key] = deserialize_array_data(@buf)
when REGEX
key = deserialize_cstr(@buf)
doc[key] = deserialize_regex_data(@buf)
when OBJECT
key = deserialize_cstr(@buf)
doc[key] = deserialize_object_data(@buf)
when BOOLEAN
key = deserialize_cstr(@buf)
doc[key] = deserialize_boolean_data(@buf)
when DATE
key = deserialize_cstr(@buf)
doc[key] = deserialize_date_data(@buf)
when NULL
key = deserialize_cstr(@buf)
doc[key] = nil
when UNDEFINED
key = deserialize_cstr(@buf)
doc[key] = nil
when REF
key = deserialize_cstr(@buf)
doc[key] = deserialize_dbref_data(@buf)
when BINARY
key = deserialize_cstr(@buf)
doc[key] = deserialize_binary_data(@buf)
when CODE_W_SCOPE
key = deserialize_cstr(@buf)
doc[key] = deserialize_code_w_scope_data(@buf)
when TIMESTAMP
key = deserialize_cstr(@buf)
doc[key] = [deserialize_number_int_data(@buf),
deserialize_number_int_data(@buf)]
when MAXKEY
key = deserialize_cstr(@buf)
doc[key] = MaxKey.new
when MINKEY, 255 # This is currently easier than unpack the type byte as an unsigned char.
key = deserialize_cstr(@buf)
doc[key] = MinKey.new
when EOO
break
else
raise "Unknown type #{type}, key = #{key}"
end
end
@buf.rewind
doc
end
# For debugging.
def hex_dump
str = ''
@buf.to_a.each_with_index { |b,i|
if (i % 8) == 0
str << "\n" if i > 0
str << '%4d: ' % i
else
str << ' '
end
str << '%02X' % b
}
str
end
def deserialize_date_data(buf)
unsigned = buf.get_long()
# see note for deserialize_number_long_data below
milliseconds = unsigned >= 2 ** 64 / 2 ? unsigned - 2**64 : unsigned
Time.at(milliseconds.to_f / 1000.0).utc # at() takes fractional seconds
end
def deserialize_boolean_data(buf)
buf.get == 1
end
def deserialize_number_data(buf)
buf.get_double
end
def deserialize_number_int_data(buf)
# sometimes ruby makes me angry... why would the same code pack as signed
# but unpack as unsigned
unsigned = buf.get_int
unsigned >= 2**32 / 2 ? unsigned - 2**32 : unsigned
end
def deserialize_number_long_data(buf)
# same note as above applies here...
unsigned = buf.get_long
unsigned >= 2 ** 64 / 2 ? unsigned - 2**64 : unsigned
end
def deserialize_object_data(buf)
size = buf.get_int
buf.position -= 4
object = BSON_CODER.new().deserialize(buf.get(size))
if object.has_key? "$ref"
DBRef.new(object["$ref"], object["$id"])
else
object
end
end
def deserialize_array_data(buf)
h = deserialize_object_data(buf)
a = []
h.each { |k, v| a[k.to_i] = v }
a
end
def deserialize_regex_data(buf)
str = deserialize_cstr(buf)
options_str = deserialize_cstr(buf)
options = 0
options |= Regexp::IGNORECASE if options_str.include?('i')
options |= Regexp::MULTILINE if options_str.include?('m')
options |= Regexp::EXTENDED if options_str.include?('x')
Regexp.new(str, options)
end
def encoded_str(str)
if RUBY_VERSION >= '1.9'
str.force_encoding("utf-8")
if Encoding.default_internal
str.encode!(Encoding.default_internal)
end
end
str
end
def deserialize_string_data(buf)
len = buf.get_int
bytes = buf.get(len)
str = bytes[0..-2]
if str.respond_to? "pack"
str = str.pack("C*")
end
encoded_str(str)
end
def deserialize_code_w_scope_data(buf)
buf.get_int
len = buf.get_int
code = buf.get(len)[0..-2]
if code.respond_to? "pack"
code = code.pack("C*")
end
scope_size = buf.get_int
buf.position -= 4
scope = BSON_CODER.new().deserialize(buf.get(scope_size))
Code.new(encoded_str(code), scope)
end
def deserialize_oid_data(buf)
ObjectId.new(buf.get(12))
end
def deserialize_dbref_data(buf)
ns = deserialize_string_data(buf)
oid = deserialize_oid_data(buf)
DBRef.new(ns, oid)
end
def deserialize_binary_data(buf)
len = buf.get_int
type = buf.get
len = buf.get_int if type == Binary::SUBTYPE_BYTES
Binary.new(buf.get(len), type)
end
def serialize_eoo_element(buf)
buf.put(EOO)
end
def serialize_null_element(buf, key)
buf.put(NULL)
self.class.serialize_key(buf, key)
end
def serialize_dbref_element(buf, key, val)
oh = BSON::OrderedHash.new
oh['$ref'] = val.namespace
oh['$id'] = val.object_id
serialize_object_element(buf, key, oh, false)
end
def serialize_binary_element(buf, key, val)
buf.put(BINARY)
self.class.serialize_key(buf, key)
bytes = val.to_a
num_bytes = bytes.length
subtype = val.respond_to?(:subtype) ? val.subtype : Binary::SUBTYPE_BYTES
if subtype == Binary::SUBTYPE_BYTES
buf.put_int(num_bytes + 4)
buf.put(subtype)
buf.put_int(num_bytes)
buf.put_array(bytes)
else
buf.put_int(num_bytes)
buf.put(subtype)
buf.put_array(bytes)
end
end
def serialize_boolean_element(buf, key, val)
buf.put(BOOLEAN)
self.class.serialize_key(buf, key)
buf.put(val ? 1 : 0)
end
def serialize_date_element(buf, key, val)
buf.put(DATE)
self.class.serialize_key(buf, key)
millisecs = (val.to_f * 1000).to_i
buf.put_long(millisecs)
end
def serialize_number_element(buf, key, val, type)
if type == NUMBER
buf.put(type)
self.class.serialize_key(buf, key)
buf.put_double(val)
else
if val > 2**64 / 2 - 1 or val < -2**64 / 2
raise RangeError.new("MongoDB can only handle 8-byte ints")
end
if val > 2**32 / 2 - 1 or val < -2**32 / 2
buf.put(NUMBER_LONG)
self.class.serialize_key(buf, key)
buf.put_long(val)
else
buf.put(type)
self.class.serialize_key(buf, key)
buf.put_int(val)
end
end
end
def serialize_object_element(buf, key, val, check_keys, opcode=OBJECT)
buf.put(opcode)
self.class.serialize_key(buf, key)
buf.put_array(BSON_CODER.new.serialize(val, check_keys).to_a)
end
def serialize_array_element(buf, key, val, check_keys)
# Turn array into hash with integer indices as keys
h = BSON::OrderedHash.new
i = 0
val.each { |v| h[i] = v; i += 1 }
serialize_object_element(buf, key, h, check_keys, ARRAY)
end
def serialize_regex_element(buf, key, val)
buf.put(REGEX)
self.class.serialize_key(buf, key)
str = val.source
# We use serialize_key here since regex patterns aren't prefixed with
# length (can't contain the NULL byte).
self.class.serialize_key(buf, str)
options = val.options
options_str = ''
options_str << 'i' if ((options & Regexp::IGNORECASE) != 0)
options_str << 'm' if ((options & Regexp::MULTILINE) != 0)
options_str << 'x' if ((options & Regexp::EXTENDED) != 0)
options_str << val.extra_options_str if val.respond_to?(:extra_options_str)
# Must store option chars in alphabetical order
self.class.serialize_cstr(buf, options_str.split(//).sort.uniq.join)
end
def serialize_max_key_element(buf, key)
buf.put(MAXKEY)
self.class.serialize_key(buf, key)
end
def serialize_min_key_element(buf, key)
buf.put(MINKEY)
self.class.serialize_key(buf, key)
end
def serialize_oid_element(buf, key, val)
buf.put(OID)
self.class.serialize_key(buf, key)
buf.put_array(val.to_a)
end
def serialize_string_element(buf, key, val, type)
buf.put(type)
self.class.serialize_key(buf, key)
# Make a hole for the length
len_pos = buf.position
buf.put_int(0)
# Save the string
start_pos = buf.position
self.class.serialize_cstr(buf, val)
end_pos = buf.position
# Put the string size in front
buf.put_int(end_pos - start_pos, len_pos)
# Go back to where we were
buf.position = end_pos
end
def serialize_code_w_scope(buf, key, val)
buf.put(CODE_W_SCOPE)
self.class.serialize_key(buf, key)
# Make a hole for the length
len_pos = buf.position
buf.put_int(0)
buf.put_int(val.length + 1)
self.class.serialize_cstr(buf, val)
buf.put_array(BSON_CODER.new.serialize(val.scope).to_a)
end_pos = buf.position
buf.put_int(end_pos - len_pos, len_pos)
buf.position = end_pos
end
def deserialize_cstr(buf)
chars = ""
while true
b = buf.get
break if b == 0
chars << b.chr
end
encoded_str(chars)
end
def bson_type(o)
case o
when nil
NULL
when Integer
NUMBER_INT
when Float
NUMBER
when ByteBuffer
BINARY
when Code
CODE_W_SCOPE
when String
STRING
when Array
ARRAY
when Regexp
REGEX
when ObjectID
OID
when ObjectId
OID
when DBRef
REF
when true, false
BOOLEAN
when Time
DATE
when Hash
OBJECT
when Symbol
SYMBOL
when MaxKey
MAXKEY
when MinKey
MINKEY
when Numeric
raise InvalidDocument, "Cannot serialize the Numeric type #{o.class} as BSON; only Fixum, Bignum, and Float are supported."
when Date, DateTime
raise InvalidDocument, "#{o.class} is not currently supported; " +
"use a UTC Time instance instead."
else
if defined?(ActiveSupport::TimeWithZone) && o.is_a?(ActiveSupport::TimeWithZone)
raise InvalidDocument, "ActiveSupport::TimeWithZone is not currently supported; " +
"use a UTC Time instance instead."
else
raise InvalidDocument, "Cannot serialize #{o.class} as a BSON type; it either isn't supported or won't translate to BSON."
end
end
end
end
end
minor: comment cleanup
# encoding: UTF-8
# --
# Copyright (C) 2008-2010 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++
module BSON
# A BSON seralizer/deserializer in pure Ruby.
class BSON_RUBY
MINKEY = -1
EOO = 0
NUMBER = 1
STRING = 2
OBJECT = 3
ARRAY = 4
BINARY = 5
UNDEFINED = 6
OID = 7
BOOLEAN = 8
DATE = 9
NULL = 10
REGEX = 11
REF = 12
CODE = 13
SYMBOL = 14
CODE_W_SCOPE = 15
NUMBER_INT = 16
TIMESTAMP = 17
NUMBER_LONG = 18
MAXKEY = 127
def initialize
@buf = ByteBuffer.new
end
if RUBY_VERSION >= '1.9'
NULL_BYTE = "\0".force_encoding('binary').freeze
UTF8_ENCODING = Encoding.find('utf-8')
BINARY_ENCODING = Encoding.find('binary')
def self.to_utf8_binary(str)
str.encode(UTF8_ENCODING).force_encoding(BINARY_ENCODING)
end
else
NULL_BYTE = "\0"
def self.to_utf8_binary(str)
begin
str.unpack("U*")
rescue => ex
raise InvalidStringEncoding, "String not valid utf-8: #{str.inspect}"
end
str
end
end
def self.serialize_cstr(buf, val)
buf.put_binary(to_utf8_binary(val.to_s))
buf.put_binary(NULL_BYTE)
end
def self.serialize_key(buf, key)
raise InvalidDocument, "Key names / regex patterns must not contain the NULL byte" if key.include? "\x00"
self.serialize_cstr(buf, key)
end
def to_a
@buf.to_a
end
def to_s
@buf.to_s
end
# Serializes an object.
# Implemented to ensure an API compatible with BSON extension.
def self.serialize(obj, check_keys=false, move_id=false)
new.serialize(obj, check_keys, move_id)
end
def self.deserialize(buf=nil)
new.deserialize(buf)
end
def serialize(obj, check_keys=false, move_id=false)
raise(InvalidDocument, "BSON.serialize takes a Hash but got a #{obj.class}") unless obj.is_a?(Hash)
raise "Document is null" unless obj
@buf.rewind
# put in a placeholder for the total size
@buf.put_int(0)
# Write key/value pairs. Always write _id first if it exists.
if move_id
if obj.has_key? '_id'
serialize_key_value('_id', obj['_id'], false)
elsif obj.has_key? :_id
serialize_key_value('_id', obj[:_id], false)
end
obj.each {|k, v| serialize_key_value(k, v, check_keys) unless k == '_id' || k == :_id }
else
if obj.has_key?('_id') && obj.has_key?(:_id)
obj['_id'] = obj.delete(:_id)
end
obj.each {|k, v| serialize_key_value(k, v, check_keys) }
end
serialize_eoo_element(@buf)
if @buf.size > 4 * 1024 * 1024
raise InvalidDocument, "Document is too large (#{@buf.size}). BSON documents are limited to 4MB (#{4 * 1024 * 1024})."
end
@buf.put_int(@buf.size, 0)
@buf
end
# Returns the array stored in the buffer.
# Implemented to ensure an API compatible with BSON extension.
def unpack(arg)
@buf.to_a
end
def serialize_key_value(k, v, check_keys)
k = k.to_s
if check_keys
if k[0] == ?$
raise InvalidKeyName.new("key #{k} must not start with '$'")
end
if k.include? ?.
raise InvalidKeyName.new("key #{k} must not contain '.'")
end
end
type = bson_type(v)
case type
when STRING, SYMBOL
serialize_string_element(@buf, k, v, type)
when NUMBER, NUMBER_INT
serialize_number_element(@buf, k, v, type)
when OBJECT
serialize_object_element(@buf, k, v, check_keys)
when OID
serialize_oid_element(@buf, k, v)
when ARRAY
serialize_array_element(@buf, k, v, check_keys)
when REGEX
serialize_regex_element(@buf, k, v)
when BOOLEAN
serialize_boolean_element(@buf, k, v)
when DATE
serialize_date_element(@buf, k, v)
when NULL
serialize_null_element(@buf, k)
when REF
serialize_dbref_element(@buf, k, v)
when BINARY
serialize_binary_element(@buf, k, v)
when UNDEFINED
serialize_null_element(@buf, k)
when CODE_W_SCOPE
serialize_code_w_scope(@buf, k, v)
when MAXKEY
serialize_max_key_element(@buf, k)
when MINKEY
serialize_min_key_element(@buf, k)
else
raise "unhandled type #{type}"
end
end
def deserialize(buf=nil)
# If buf is nil, use @buf, assumed to contain already-serialized BSON.
# This is only true during testing.
if buf.is_a? String
@buf = ByteBuffer.new(buf.unpack("C*")) if buf
else
@buf = ByteBuffer.new(buf.to_a) if buf
end
@buf.rewind
@buf.get_int # eat message size
doc = BSON::OrderedHash.new
while @buf.more?
type = @buf.get
case type
when STRING, CODE
key = deserialize_cstr(@buf)
doc[key] = deserialize_string_data(@buf)
when SYMBOL
key = deserialize_cstr(@buf)
doc[key] = deserialize_string_data(@buf).intern
when NUMBER
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_data(@buf)
when NUMBER_INT
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_int_data(@buf)
when NUMBER_LONG
key = deserialize_cstr(@buf)
doc[key] = deserialize_number_long_data(@buf)
when OID
key = deserialize_cstr(@buf)
doc[key] = deserialize_oid_data(@buf)
when ARRAY
key = deserialize_cstr(@buf)
doc[key] = deserialize_array_data(@buf)
when REGEX
key = deserialize_cstr(@buf)
doc[key] = deserialize_regex_data(@buf)
when OBJECT
key = deserialize_cstr(@buf)
doc[key] = deserialize_object_data(@buf)
when BOOLEAN
key = deserialize_cstr(@buf)
doc[key] = deserialize_boolean_data(@buf)
when DATE
key = deserialize_cstr(@buf)
doc[key] = deserialize_date_data(@buf)
when NULL
key = deserialize_cstr(@buf)
doc[key] = nil
when UNDEFINED
key = deserialize_cstr(@buf)
doc[key] = nil
when REF
key = deserialize_cstr(@buf)
doc[key] = deserialize_dbref_data(@buf)
when BINARY
key = deserialize_cstr(@buf)
doc[key] = deserialize_binary_data(@buf)
when CODE_W_SCOPE
key = deserialize_cstr(@buf)
doc[key] = deserialize_code_w_scope_data(@buf)
when TIMESTAMP
key = deserialize_cstr(@buf)
doc[key] = [deserialize_number_int_data(@buf),
deserialize_number_int_data(@buf)]
when MAXKEY
key = deserialize_cstr(@buf)
doc[key] = MaxKey.new
when MINKEY, 255 # This is currently easier than unpack the type byte as an unsigned char.
key = deserialize_cstr(@buf)
doc[key] = MinKey.new
when EOO
break
else
raise "Unknown type #{type}, key = #{key}"
end
end
@buf.rewind
doc
end
# For debugging.
def hex_dump
str = ''
@buf.to_a.each_with_index { |b,i|
if (i % 8) == 0
str << "\n" if i > 0
str << '%4d: ' % i
else
str << ' '
end
str << '%02X' % b
}
str
end
def deserialize_date_data(buf)
unsigned = buf.get_long()
milliseconds = unsigned >= 2 ** 64 / 2 ? unsigned - 2**64 : unsigned
Time.at(milliseconds.to_f / 1000.0).utc # at() takes fractional seconds
end
def deserialize_boolean_data(buf)
buf.get == 1
end
def deserialize_number_data(buf)
buf.get_double
end
def deserialize_number_int_data(buf)
unsigned = buf.get_int
unsigned >= 2**32 / 2 ? unsigned - 2**32 : unsigned
end
def deserialize_number_long_data(buf)
unsigned = buf.get_long
unsigned >= 2 ** 64 / 2 ? unsigned - 2**64 : unsigned
end
def deserialize_object_data(buf)
size = buf.get_int
buf.position -= 4
object = BSON_CODER.new().deserialize(buf.get(size))
if object.has_key? "$ref"
DBRef.new(object["$ref"], object["$id"])
else
object
end
end
def deserialize_array_data(buf)
h = deserialize_object_data(buf)
a = []
h.each { |k, v| a[k.to_i] = v }
a
end
def deserialize_regex_data(buf)
str = deserialize_cstr(buf)
options_str = deserialize_cstr(buf)
options = 0
options |= Regexp::IGNORECASE if options_str.include?('i')
options |= Regexp::MULTILINE if options_str.include?('m')
options |= Regexp::EXTENDED if options_str.include?('x')
Regexp.new(str, options)
end
def encoded_str(str)
if RUBY_VERSION >= '1.9'
str.force_encoding("utf-8")
if Encoding.default_internal
str.encode!(Encoding.default_internal)
end
end
str
end
def deserialize_string_data(buf)
len = buf.get_int
bytes = buf.get(len)
str = bytes[0..-2]
if str.respond_to? "pack"
str = str.pack("C*")
end
encoded_str(str)
end
def deserialize_code_w_scope_data(buf)
buf.get_int
len = buf.get_int
code = buf.get(len)[0..-2]
if code.respond_to? "pack"
code = code.pack("C*")
end
scope_size = buf.get_int
buf.position -= 4
scope = BSON_CODER.new().deserialize(buf.get(scope_size))
Code.new(encoded_str(code), scope)
end
def deserialize_oid_data(buf)
ObjectId.new(buf.get(12))
end
def deserialize_dbref_data(buf)
ns = deserialize_string_data(buf)
oid = deserialize_oid_data(buf)
DBRef.new(ns, oid)
end
def deserialize_binary_data(buf)
len = buf.get_int
type = buf.get
len = buf.get_int if type == Binary::SUBTYPE_BYTES
Binary.new(buf.get(len), type)
end
def serialize_eoo_element(buf)
buf.put(EOO)
end
def serialize_null_element(buf, key)
buf.put(NULL)
self.class.serialize_key(buf, key)
end
def serialize_dbref_element(buf, key, val)
oh = BSON::OrderedHash.new
oh['$ref'] = val.namespace
oh['$id'] = val.object_id
serialize_object_element(buf, key, oh, false)
end
def serialize_binary_element(buf, key, val)
buf.put(BINARY)
self.class.serialize_key(buf, key)
bytes = val.to_a
num_bytes = bytes.length
subtype = val.respond_to?(:subtype) ? val.subtype : Binary::SUBTYPE_BYTES
if subtype == Binary::SUBTYPE_BYTES
buf.put_int(num_bytes + 4)
buf.put(subtype)
buf.put_int(num_bytes)
buf.put_array(bytes)
else
buf.put_int(num_bytes)
buf.put(subtype)
buf.put_array(bytes)
end
end
def serialize_boolean_element(buf, key, val)
buf.put(BOOLEAN)
self.class.serialize_key(buf, key)
buf.put(val ? 1 : 0)
end
def serialize_date_element(buf, key, val)
buf.put(DATE)
self.class.serialize_key(buf, key)
millisecs = (val.to_f * 1000).to_i
buf.put_long(millisecs)
end
def serialize_number_element(buf, key, val, type)
if type == NUMBER
buf.put(type)
self.class.serialize_key(buf, key)
buf.put_double(val)
else
if val > 2**64 / 2 - 1 or val < -2**64 / 2
raise RangeError.new("MongoDB can only handle 8-byte ints")
end
if val > 2**32 / 2 - 1 or val < -2**32 / 2
buf.put(NUMBER_LONG)
self.class.serialize_key(buf, key)
buf.put_long(val)
else
buf.put(type)
self.class.serialize_key(buf, key)
buf.put_int(val)
end
end
end
def serialize_object_element(buf, key, val, check_keys, opcode=OBJECT)
buf.put(opcode)
self.class.serialize_key(buf, key)
buf.put_array(BSON_CODER.new.serialize(val, check_keys).to_a)
end
def serialize_array_element(buf, key, val, check_keys)
# Turn array into hash with integer indices as keys
h = BSON::OrderedHash.new
i = 0
val.each { |v| h[i] = v; i += 1 }
serialize_object_element(buf, key, h, check_keys, ARRAY)
end
def serialize_regex_element(buf, key, val)
buf.put(REGEX)
self.class.serialize_key(buf, key)
str = val.source
# We use serialize_key here since regex patterns aren't prefixed with
# length (can't contain the NULL byte).
self.class.serialize_key(buf, str)
options = val.options
options_str = ''
options_str << 'i' if ((options & Regexp::IGNORECASE) != 0)
options_str << 'm' if ((options & Regexp::MULTILINE) != 0)
options_str << 'x' if ((options & Regexp::EXTENDED) != 0)
options_str << val.extra_options_str if val.respond_to?(:extra_options_str)
# Must store option chars in alphabetical order
self.class.serialize_cstr(buf, options_str.split(//).sort.uniq.join)
end
def serialize_max_key_element(buf, key)
buf.put(MAXKEY)
self.class.serialize_key(buf, key)
end
def serialize_min_key_element(buf, key)
buf.put(MINKEY)
self.class.serialize_key(buf, key)
end
def serialize_oid_element(buf, key, val)
buf.put(OID)
self.class.serialize_key(buf, key)
buf.put_array(val.to_a)
end
def serialize_string_element(buf, key, val, type)
buf.put(type)
self.class.serialize_key(buf, key)
# Make a hole for the length
len_pos = buf.position
buf.put_int(0)
# Save the string
start_pos = buf.position
self.class.serialize_cstr(buf, val)
end_pos = buf.position
# Put the string size in front
buf.put_int(end_pos - start_pos, len_pos)
# Go back to where we were
buf.position = end_pos
end
def serialize_code_w_scope(buf, key, val)
buf.put(CODE_W_SCOPE)
self.class.serialize_key(buf, key)
# Make a hole for the length
len_pos = buf.position
buf.put_int(0)
buf.put_int(val.length + 1)
self.class.serialize_cstr(buf, val)
buf.put_array(BSON_CODER.new.serialize(val.scope).to_a)
end_pos = buf.position
buf.put_int(end_pos - len_pos, len_pos)
buf.position = end_pos
end
def deserialize_cstr(buf)
chars = ""
while true
b = buf.get
break if b == 0
chars << b.chr
end
encoded_str(chars)
end
def bson_type(o)
case o
when nil
NULL
when Integer
NUMBER_INT
when Float
NUMBER
when ByteBuffer
BINARY
when Code
CODE_W_SCOPE
when String
STRING
when Array
ARRAY
when Regexp
REGEX
when ObjectID
OID
when ObjectId
OID
when DBRef
REF
when true, false
BOOLEAN
when Time
DATE
when Hash
OBJECT
when Symbol
SYMBOL
when MaxKey
MAXKEY
when MinKey
MINKEY
when Numeric
raise InvalidDocument, "Cannot serialize the Numeric type #{o.class} as BSON; only Fixum, Bignum, and Float are supported."
when Date, DateTime
raise InvalidDocument, "#{o.class} is not currently supported; " +
"use a UTC Time instance instead."
else
if defined?(ActiveSupport::TimeWithZone) && o.is_a?(ActiveSupport::TimeWithZone)
raise InvalidDocument, "ActiveSupport::TimeWithZone is not currently supported; " +
"use a UTC Time instance instead."
else
raise InvalidDocument, "Cannot serialize #{o.class} as a BSON type; it either isn't supported or won't translate to BSON."
end
end
end
end
end
|
# A ConditionalTask acts as a prodcedure that can be used by a TimedTask.
#
# @example
# bot = Bot.new(config)
# period_len = 90
# tt = TimedTask.new(period_len) { MoveAfkUsers.new(bot).run }
# tt.start
# ...
# tt.stop
class PeriodicTask
def initialize(bot)
@bot = bot
@afk_channel = Channel.find_by_name("AFK")
end
# Start the task.
#
# @info: This method is run by to start this periodic task
# in a TimedTask.
def run
task
end
protected
# Logic of a periodic task.
#
# @info: A descendant class is supposed to implement this method.
def task
raise "Not implemented yet."
end
end
# Check the afk state of every user that is online.
# If they are afk and not already located at the AFK channel,
# then move them to the AFK channel.
class MoveAfkUsers < PeriodicTask
def task
afk_users = User.all.select(&:afk?)
afk_users.each do |user|
unless (user.channel_id == @afk_channel.id) or user.unmovable?
@bot.move_target(user, @afk_channel.id)
@bot.say_as_poke(user, "Sorry, but I moved you to the AFK channel.")
end
end
end
end
# Periodically check the time sincee we received a message from the mss server.
# In case the last message is older than 20 seconds, we asssume that the server
# is offline.
class CheckMcServer < PeriodicTask
REACHABLE_THRESHOLD = 20.0
def task
dt = Time.now-$mss_msg
$server_reachable = (dt < REACHABLE_THRESHOLD) ? ColorText.new("online", 'green')
: ColorText.new("offline", 'red')
Mailbox.instance.notify_all_with(Event.new("mss", 'reachable_update'))
end
end
class RollTheDice < PeriodicTask
def task
user = Session.random_user
msg = "Hey, watch #{TauntLinkStore.next_random.to_s} out."
@bot.say_as_private(user, msg) unless user.nil?
end
end
Do not roll the dice if there is no client online or there is no link
# A ConditionalTask acts as a prodcedure that can be used by a TimedTask.
#
# @example
# bot = Bot.new(config)
# period_len = 90
# tt = TimedTask.new(period_len) { MoveAfkUsers.new(bot).run }
# tt.start
# ...
# tt.stop
class PeriodicTask
def initialize(bot)
@bot = bot
@afk_channel = Channel.find_by_name("AFK")
end
# Start the task.
#
# @info: This method is run by to start this periodic task
# in a TimedTask.
def run
task
end
protected
# Logic of a periodic task.
#
# @info: A descendant class is supposed to implement this method.
def task
raise "Not implemented yet."
end
end
# Check the afk state of every user that is online.
# If they are afk and not already located at the AFK channel,
# then move them to the AFK channel.
class MoveAfkUsers < PeriodicTask
def task
afk_users = User.all.select(&:afk?)
afk_users.each do |user|
unless (user.channel_id == @afk_channel.id) or user.unmovable?
@bot.move_target(user, @afk_channel.id)
@bot.say_as_poke(user, "Sorry, but I moved you to the AFK channel.")
end
end
end
end
# Periodically check the time sincee we received a message from the mss server.
# In case the last message is older than 20 seconds, we asssume that the server
# is offline.
class CheckMcServer < PeriodicTask
REACHABLE_THRESHOLD = 20.0
def task
dt = Time.now-$mss_msg
$server_reachable = (dt < REACHABLE_THRESHOLD) ? ColorText.new("online", 'green')
: ColorText.new("offline", 'red')
Mailbox.instance.notify_all_with(Event.new("mss", 'reachable_update'))
end
end
# Sends a random link, stored in TauntLinkStore to a random user.
class RollTheDice < PeriodicTask
def task
user = Session.random_user
taunt_link = TauntLinkStore.next_random.to_s
return if taunt_link.empty? or user.nil?
msg = "Hey, watch #{taunt_link} out."
@bot.say_as_private(user, msg)
end
end
|
# frozen_string_literal: true
module Bundler
class Settings
# Class used to build the mirror set and then find a mirror for a given URI
#
# @param prober [Prober object, nil] by default a TCPSocketProbe, this object
# will be used to probe the mirror address to validate that the mirror replies.
class Mirrors
def initialize(prober = nil)
@all = Mirror.new
@prober = prober || TCPSocketProbe.new
@mirrors = {}
end
# Returns a mirror for the given uri.
#
# Depending on the uri having a valid mirror or not, it may be a
# mirror that points to the provided uri
def for(uri)
if @all.validate!(@prober).valid?
@all
else
fetch_valid_mirror_for(Settings.normalize_uri(uri))
end
end
def each
@mirrors.each do |k, v|
yield k, v.uri.to_s
end
end
def parse(key, value)
config = MirrorConfig.new(key, value)
mirror = if config.all?
@all
else
(@mirrors[config.uri] = @mirrors[config.uri] || Mirror.new)
end
config.update_mirror(mirror)
end
private
def fetch_valid_mirror_for(uri)
mirror = (@mirrors[URI(uri.to_s.downcase)] || @mirrors[URI(uri.to_s).host] || Mirror.new(uri)).validate!(@prober)
mirror = Mirror.new(uri) unless mirror.valid?
mirror
end
end
# A mirror
#
# Contains both the uri that should be used as a mirror and the
# fallback timeout which will be used for probing if the mirror
# replies on time or not.
class Mirror
DEFAULT_FALLBACK_TIMEOUT = 0.1
attr_reader :uri, :fallback_timeout
def initialize(uri = nil, fallback_timeout = 0)
self.uri = uri
self.fallback_timeout = fallback_timeout
@valid = nil
end
def uri=(uri)
@uri = if uri.nil?
nil
else
URI(uri.to_s)
end
@valid = nil
end
def fallback_timeout=(timeout)
case timeout
when true, "true"
@fallback_timeout = DEFAULT_FALLBACK_TIMEOUT
when false, "false"
@fallback_timeout = 0
else
@fallback_timeout = timeout.to_i
end
@valid = nil
end
def ==(other)
!other.nil? && uri == other.uri && fallback_timeout == other.fallback_timeout
end
def valid?
return false if @uri.nil?
return @valid unless @valid.nil?
false
end
def validate!(probe = nil)
@valid = false if uri.nil?
if @valid.nil?
@valid = fallback_timeout == 0 || (probe || TCPSocketProbe.new).replies?(self)
end
self
end
end
# Class used to parse one configuration line
#
# Gets the configuration line and the value.
# This object provides a `update_mirror` method
# used to setup the given mirror value.
class MirrorConfig
attr_accessor :uri, :value
def initialize(config_line, value)
uri, fallback =
config_line.match(%r{^mirror\.(all|.+?)(\.fallback_timeout)?\/?$}).captures
@fallback = !fallback.nil?
@all = false
if uri == "all"
@all = true
else
@uri = URI(uri).absolute? ? Settings.normalize_uri(uri) : uri
end
@value = value
end
def all?
@all
end
def update_mirror(mirror)
if @fallback
mirror.fallback_timeout = @value
else
mirror.uri = Settings.normalize_uri(@value)
end
end
end
# Class used for probing TCP availability for a given mirror.
class TCPSocketProbe
def replies?(mirror)
MirrorSockets.new(mirror).any? do |socket, address, timeout|
begin
socket.connect_nonblock(address)
rescue Errno::EINPROGRESS
wait_for_writtable_socket(socket, address, timeout)
rescue # Connection failed somehow, again
false
end
end
end
private
def wait_for_writtable_socket(socket, address, timeout)
if IO.select(nil, [socket], nil, timeout)
probe_writtable_socket(socket, address)
else # TCP Handshake timed out, or there is something dropping packets
false
end
end
def probe_writtable_socket(socket, address)
socket.connect_nonblock(address)
rescue Errno::EISCONN
true
rescue # Connection failed
false
end
end
end
# Class used to build the list of sockets that correspond to
# a given mirror.
#
# One mirror may correspond to many different addresses, both
# because of it having many dns entries or because
# the network interface is both ipv4 and ipv5
class MirrorSockets
def initialize(mirror)
@timeout = mirror.fallback_timeout
@addresses = Socket.getaddrinfo(mirror.uri.host, mirror.uri.port).map do |address|
SocketAddress.new(address[0], address[3], address[1])
end
end
def any?
@addresses.any? do |address|
socket = Socket.new(Socket.const_get(address.type), Socket::SOCK_STREAM, 0)
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
value = yield socket, address.to_socket_address, @timeout
socket.close unless socket.closed?
value
end
end
end
# Socket address builder.
#
# Given a socket type, a host and a port,
# provides a method to build sockaddr string
class SocketAddress
attr_reader :type, :host, :port
def initialize(type, host, port)
@type = type
@host = host
@port = port
end
def to_socket_address
Socket.pack_sockaddr_in(@port, @host)
end
end
end
Auto merge of #5534 - rafaelfranca:fix-mirror-socket, r=andremedeiros
Require socket in the file that needs it
Closes #5533
# frozen_string_literal: true
require "socket"
module Bundler
class Settings
# Class used to build the mirror set and then find a mirror for a given URI
#
# @param prober [Prober object, nil] by default a TCPSocketProbe, this object
# will be used to probe the mirror address to validate that the mirror replies.
class Mirrors
def initialize(prober = nil)
@all = Mirror.new
@prober = prober || TCPSocketProbe.new
@mirrors = {}
end
# Returns a mirror for the given uri.
#
# Depending on the uri having a valid mirror or not, it may be a
# mirror that points to the provided uri
def for(uri)
if @all.validate!(@prober).valid?
@all
else
fetch_valid_mirror_for(Settings.normalize_uri(uri))
end
end
def each
@mirrors.each do |k, v|
yield k, v.uri.to_s
end
end
def parse(key, value)
config = MirrorConfig.new(key, value)
mirror = if config.all?
@all
else
(@mirrors[config.uri] = @mirrors[config.uri] || Mirror.new)
end
config.update_mirror(mirror)
end
private
def fetch_valid_mirror_for(uri)
mirror = (@mirrors[URI(uri.to_s.downcase)] || @mirrors[URI(uri.to_s).host] || Mirror.new(uri)).validate!(@prober)
mirror = Mirror.new(uri) unless mirror.valid?
mirror
end
end
# A mirror
#
# Contains both the uri that should be used as a mirror and the
# fallback timeout which will be used for probing if the mirror
# replies on time or not.
class Mirror
DEFAULT_FALLBACK_TIMEOUT = 0.1
attr_reader :uri, :fallback_timeout
def initialize(uri = nil, fallback_timeout = 0)
self.uri = uri
self.fallback_timeout = fallback_timeout
@valid = nil
end
def uri=(uri)
@uri = if uri.nil?
nil
else
URI(uri.to_s)
end
@valid = nil
end
def fallback_timeout=(timeout)
case timeout
when true, "true"
@fallback_timeout = DEFAULT_FALLBACK_TIMEOUT
when false, "false"
@fallback_timeout = 0
else
@fallback_timeout = timeout.to_i
end
@valid = nil
end
def ==(other)
!other.nil? && uri == other.uri && fallback_timeout == other.fallback_timeout
end
def valid?
return false if @uri.nil?
return @valid unless @valid.nil?
false
end
def validate!(probe = nil)
@valid = false if uri.nil?
if @valid.nil?
@valid = fallback_timeout == 0 || (probe || TCPSocketProbe.new).replies?(self)
end
self
end
end
# Class used to parse one configuration line
#
# Gets the configuration line and the value.
# This object provides a `update_mirror` method
# used to setup the given mirror value.
class MirrorConfig
attr_accessor :uri, :value
def initialize(config_line, value)
uri, fallback =
config_line.match(%r{^mirror\.(all|.+?)(\.fallback_timeout)?\/?$}).captures
@fallback = !fallback.nil?
@all = false
if uri == "all"
@all = true
else
@uri = URI(uri).absolute? ? Settings.normalize_uri(uri) : uri
end
@value = value
end
def all?
@all
end
def update_mirror(mirror)
if @fallback
mirror.fallback_timeout = @value
else
mirror.uri = Settings.normalize_uri(@value)
end
end
end
# Class used for probing TCP availability for a given mirror.
class TCPSocketProbe
def replies?(mirror)
MirrorSockets.new(mirror).any? do |socket, address, timeout|
begin
socket.connect_nonblock(address)
rescue Errno::EINPROGRESS
wait_for_writtable_socket(socket, address, timeout)
rescue # Connection failed somehow, again
false
end
end
end
private
def wait_for_writtable_socket(socket, address, timeout)
if IO.select(nil, [socket], nil, timeout)
probe_writtable_socket(socket, address)
else # TCP Handshake timed out, or there is something dropping packets
false
end
end
def probe_writtable_socket(socket, address)
socket.connect_nonblock(address)
rescue Errno::EISCONN
true
rescue # Connection failed
false
end
end
end
# Class used to build the list of sockets that correspond to
# a given mirror.
#
# One mirror may correspond to many different addresses, both
# because of it having many dns entries or because
# the network interface is both ipv4 and ipv5
class MirrorSockets
def initialize(mirror)
@timeout = mirror.fallback_timeout
@addresses = Socket.getaddrinfo(mirror.uri.host, mirror.uri.port).map do |address|
SocketAddress.new(address[0], address[3], address[1])
end
end
def any?
@addresses.any? do |address|
socket = Socket.new(Socket.const_get(address.type), Socket::SOCK_STREAM, 0)
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
value = yield socket, address.to_socket_address, @timeout
socket.close unless socket.closed?
value
end
end
end
# Socket address builder.
#
# Given a socket type, a host and a port,
# provides a method to build sockaddr string
class SocketAddress
attr_reader :type, :host, :port
def initialize(type, host, port)
@type = type
@host = host
@port = port
end
def to_socket_address
Socket.pack_sockaddr_in(@port, @host)
end
end
end
|
require "uri"
require "rubygems/installer"
require "rubygems/spec_fetcher"
require "rubygems/format"
require "digest/sha1"
require "open3"
module Bundler
module Source
# TODO: Refactor this class
class Rubygems
attr_reader :remotes
def initialize(options = {})
@options = options
@remotes = (options["remotes"] || []).map { |r| normalize_uri(r) }
@allow_remote = false
# Hardcode the paths for now
@installed = {}
@caches = [ Bundler.app_cache ] + Gem.path.map { |p| File.expand_path("#{p}/cache") }
@spec_fetch_map = {}
end
def remote!
@allow_remote = true
end
def [](spec)
installed_specs[spec].first ||
@allow_remote && (
cached_specs[spec].first ||
remote_specs[spec].first)
end
def hash
Rubygems.hash
end
def eql?(o)
Rubygems === o
end
alias == eql?
# Not really needed, but it seems good to implement this method for interface
# consistency. Source name is mostly used to identify Path & Git sources
def name
":gems"
end
def options
{ "remotes" => @remotes.map { |r| r.to_s } }
end
def self.from_lock(options)
s = new(options)
Array(options["remote"]).each { |r| s.add_remote(r) }
s
end
def to_lock
out = "GEM\n"
out << remotes.map {|r| " remote: #{r}\n" }.join
out << " specs:\n"
end
def to_s
remotes = self.remotes.map { |r| r.to_s }.join(', ')
"rubygems repository #{remotes}"
end
def specs
@specs ||= @allow_remote ? fetch_specs : installed_specs
end
def fetch(spec)
action = @spec_fetch_map[spec.full_name]
action.call if action
end
def install(spec)
path = cached_gem(spec)
if @installed[spec.full_name]
Bundler.ui.info "Using #{spec.name} (#{spec.version}) "
return
else
Bundler.ui.info "Installing #{spec.name} (#{spec.version}) "
end
install_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
installer = Gem::Installer.new path,
:install_dir => install_path,
:ignore_dependencies => true,
:wrappers => true,
:env_shebang => true,
:bin_dir => "#{install_path}/bin"
installer.install
# SUDO HAX
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/gems #{Gem.dir}/specifications"
sudo "mv #{Bundler.tmp}/gems/#{spec.full_name} #{Gem.dir}/gems/"
sudo "mv #{Bundler.tmp}/specifications/#{spec.full_name}.gemspec #{Gem.dir}/specifications/"
end
spec.loaded_from = "#{Gem.dir}/specifications/#{spec.full_name}.gemspec"
end
def sudo(str)
`sudo -E #{str}`
end
def cache(spec)
cached_path = cached_gem(spec)
raise GemNotFound, "Missing gem file '#{spec.full_name}.gem'." unless cached_path
return if File.dirname(cached_path) == Bundler.app_cache.to_s
Bundler.ui.info " * #{File.basename(cached_path)}"
FileUtils.cp(cached_path, Bundler.app_cache)
end
def add_remote(source)
@remotes << normalize_uri(source)
end
private
def cached_gem(spec)
possibilities = @caches.map { |p| "#{p}/#{spec.full_name}.gem" }
possibilities.find { |p| File.exist?(p) }
end
def normalize_uri(uri)
uri = uri.to_s
uri = "#{uri}/" unless uri =~ %r'/$'
uri = URI(uri)
raise ArgumentError, "The source must be an absolute URI" unless uri.absolute?
uri
end
def fetch_specs
Index.build do |idx|
idx.use installed_specs
idx.use cached_specs
idx.use remote_specs
end
end
def installed_specs
@installed_specs ||= begin
idx = Index.new
Gem::SourceIndex.from_installed_gems.to_a.reverse.each do |name, spec|
next if name == 'bundler'
@installed[spec.full_name] = true
spec.source = self
idx << spec
end
# Always have bundler locally
bundler = Gem::Specification.new do |s|
s.name = 'bundler'
s.version = VERSION
s.platform = Gem::Platform::RUBY
s.source = self
# TODO: Remove this
s.loaded_from = 'w0t'
end
@installed[bundler.full_name] = true
idx << bundler
idx
end
end
def cached_specs
@cached_specs ||= begin
idx = Index.new
@caches.each do |path|
Dir["#{path}/*.gem"].each do |gemfile|
next if name == 'bundler'
s = Gem::Format.from_file_by_path(gemfile).spec
s.source = self
idx << s
end
end
idx
end
end
def remote_specs
@remote_specs ||= begin
idx = Index.new
remotes = self.remotes.map { |uri| uri.to_s }
old = Gem.sources
remotes.each do |uri|
Bundler.ui.info "Fetching source index for #{uri}"
Gem.sources = ["#{uri}"]
fetch_all_remote_specs do |n,v|
v.each do |name, version, platform|
next if name == 'bundler'
spec = RemoteSpecification.new(name, version, platform, uri)
spec.source = self
# Temporary hack until this can be figured out better
@spec_fetch_map[spec.full_name] = lambda do
path = download_gem_from_uri(spec, uri)
s = Gem::Format.from_file_by_path(path).spec
spec.__swap__(s)
end
idx << spec
end
end
end
idx
ensure
Gem.sources = old
end
end
def fetch_all_remote_specs(&blk)
begin
# Fetch all specs, minus prerelease specs
Gem::SpecFetcher.new.list(true, false).each(&blk)
# Then fetch the prerelease specs
begin
Gem::SpecFetcher.new.list(false, true).each(&blk)
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not fetch prerelease specs from #{self}"
end
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not reach #{self}"
end
end
def download_gem_from_uri(spec, uri)
spec.fetch_platform
download_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
gem_path = "#{Gem.dir}/cache/#{spec.full_name}.gem"
FileUtils.mkdir_p("#{download_path}/cache")
Gem::RemoteFetcher.fetcher.download(spec, uri, download_path)
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/cache"
sudo "mv #{Bundler.tmp}/cache/#{spec.full_name}.gem #{gem_path}"
end
gem_path
end
end
class Path
attr_reader :path, :options
# Kind of a hack, but needed for the lock file parser
attr_accessor :name, :version
DEFAULT_GLOB = "{,*/}*.gemspec"
def initialize(options)
@options = options
@glob = options["glob"] || DEFAULT_GLOB
@allow_remote = false
if options["path"]
@path = Pathname.new(options["path"]).expand_path(Bundler.root)
end
@name = options["name"]
@version = options["version"]
end
def remote!
@allow_remote = true
end
def self.from_lock(options)
new(options.merge("path" => options.delete("remote")))
end
def to_lock
out = "PATH\n"
out << " remote: #{relative_path}\n"
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def to_s
"source at #{@path}"
end
def hash
self.class.hash
end
def eql?(o)
Path === o &&
path == o.path &&
name == o.name &&
version == o.version
end
alias == eql?
def name
File.basename(@path.to_s)
end
def load_spec_files
index = Index.new
if File.directory?(path)
Dir["#{path}/#{@glob}"].each do |file|
file = Pathname.new(file)
# Eval the gemspec from its parent directory
spec = Dir.chdir(file.dirname) do
begin
Gem::Specification.from_yaml(file.basename)
# Raises ArgumentError if the file is not valid YAML
rescue ArgumentError, Gem::EndOfYAMLException, Gem::Exception
begin
eval(File.read(file.basename), TOPLEVEL_BINDING, file.expand_path.to_s)
rescue LoadError
raise GemspecError, "There was a LoadError while evaluating #{file.basename}.\n" +
"Does it try to require a relative path? That doesn't work in Ruby 1.9."
end
end
end
if spec
spec.loaded_from = file.to_s
spec.source = self
index << spec
end
end
if index.empty? && @name && @version
index << Gem::Specification.new do |s|
s.name = @name
s.source = self
s.version = Gem::Version.new(@version)
s.platform = Gem::Platform::RUBY
s.summary = "Fake gemspec for #{@name}"
s.relative_loaded_from = "#{@name}.gemspec"
if path.join("bin").exist?
binaries = path.join("bin").children.map{|c| c.basename.to_s }
s.executables = binaries
end
end
end
else
raise PathError, "The path `#{path}` does not exist."
end
index
end
def [](spec)
specs[spec].first
end
def local_specs
@local_specs ||= load_spec_files
end
class Installer < Gem::Installer
def initialize(spec)
@spec = spec
@bin_dir = "#{Gem.dir}/bin"
@gem_dir = spec.full_gem_path
@wrappers = true
@env_shebang = true
@format_executable = false
end
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
# Let's be honest, when we're working from a path, we can't
# really expect native extensions to work because the whole point
# is to just be able to modify what's in that path and go. So, let's
# not put ourselves through the pain of actually trying to generate
# the full gem.
Installer.new(spec).generate_bin
end
alias specs local_specs
def cache(spec)
unless path.to_s.index(Bundler.root.to_s) == 0
Bundler.ui.warn " * #{spec.name} at `#{path}` will not be cached."
end
end
private
def relative_path
if path.to_s.include?(Bundler.root.to_s)
return path.relative_path_from(Bundler.root)
end
path
end
def generate_bin(spec)
gem_dir = Pathname.new(spec.full_gem_path)
# Some gem authors put absolute paths in their gemspec
# and we have to save them from themselves
spec.files = spec.files.map do |p|
next if File.directory?(p)
begin
Pathname.new(p).relative_path_from(gem_dir).to_s
rescue ArgumentError
p
end
end.compact
gem_file = Dir.chdir(gem_dir){ Gem::Builder.new(spec).build }
installer = Gem::Installer.new File.join(gem_dir, gem_file),
:bin_dir => "#{Gem.dir}/bin",
:wrappers => true,
:env_shebang => false,
:format_executable => false
installer.instance_eval { @gem_dir = gem_dir }
installer.build_extensions
installer.generate_bin
rescue Gem::InvalidSpecificationException => e
Bundler.ui.warn "\n#{spec.name} at #{spec.full_gem_path} did not have a valid gemspec.\n" \
"This prevents bundler from installing bins or native extensions, but " \
"that may not affect its functionality."
if !spec.extensions.empty? && !spec.email.empty?
Bundler.ui.warn "If you need to use this package without installing it from a gem " \
"repository, please contact #{spec.email} and ask them " \
"to modify their .gemspec so it can work with `gem build`."
end
Bundler.ui.warn "The validation message from Rubygems was:\n #{e.message}"
ensure
Dir.chdir(gem_dir){ FileUtils.rm_rf(gem_file) if gem_file && File.exist?(gem_file) }
end
end
class Git < Path
attr_reader :uri, :ref, :options
def initialize(options)
super
@uri = options["uri"]
@ref = options["ref"] || options["branch"] || options["tag"] || 'master'
@revision = options["revision"]
@update = false
end
def self.from_lock(options)
new(options.merge("uri" => options.delete("remote")))
end
def to_lock
out = "GIT\n"
out << " remote: #{@uri}\n"
out << " revision: #{shortref_for(revision)}\n"
%w(ref branch tag).each do |opt|
out << " #{opt}: #{options[opt]}\n" if options[opt]
end
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def eql?(o)
Git === o &&
uri == o.uri &&
ref == o.ref &&
name == o.name &&
version == o.version
end
alias == eql?
def to_s
ref = @options["ref"] ? shortref_for(@options["ref"]) : @ref
"#{@uri} (at #{ref})"
end
def name
File.basename(@uri, '.git')
end
def path
Bundler.install_path.join("#{base_name}-#{shortref_for(revision)}")
end
def unlock!
@revision = nil
end
def specs
if @allow_remote && !@update
# Start by making sure the git cache is up to date
cache
checkout
@update = true
end
local_specs
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
unless @installed
Bundler.ui.debug " * Checking out revision: #{ref}"
checkout
@installed = true
end
generate_bin(spec)
end
def load_spec_files
super if cache_path.exist?
rescue PathError
raise PathError, "#{to_s} is not checked out. Please run `bundle install`"
end
private
def git(command)
if Bundler.requires_sudo?
out = %x{sudo -E git #{command}}
else
out = %x{git #{command}}
end
if $? != 0
raise GitError, "An error has occurred in git. Cannot complete bundling."
end
out
end
def base_name
File.basename(uri.sub(%r{^(\w+://)?([^/:]+:)},''), ".git")
end
def shortref_for(ref)
ref[0..6]
end
def uri_hash
if uri =~ %r{^\w+://(\w+@)?}
# Downcase the domain component of the URI
# and strip off a trailing slash, if one is present
input = URI.parse(uri).normalize.to_s.sub(%r{/$},'')
else
# If there is no URI scheme, assume it is an ssh/git URI
input = uri
end
Digest::SHA1.hexdigest(input)
end
def cache_path
@cache_path ||= Bundler.cache.join("git", "#{base_name}-#{uri_hash}")
end
def cache
if cached?
Bundler.ui.info "Updating #{uri}"
in_cache { git %|fetch --force --quiet "#{uri}" refs/heads/*:refs/heads/*| }
else
Bundler.ui.info "Fetching #{uri}"
FileUtils.mkdir_p(cache_path.dirname)
git %|clone "#{uri}" "#{cache_path}" --bare --no-hardlinks|
end
end
def checkout
unless File.exist?(path.join(".git"))
FileUtils.mkdir_p(path.dirname)
git %|clone --no-checkout "#{cache_path}" "#{path}"|
end
Dir.chdir(path) do
git "fetch --force --quiet"
git "reset --hard #{revision}"
git "submodule init"
git "submodule update"
end
end
def revision
@revision ||= in_cache { git("rev-parse #{ref}").strip }
end
def cached?
cache_path.exist?
end
def in_cache(&blk)
cache unless cached?
Dir.chdir(cache_path, &blk)
end
end
end
end
avoid warning: method redefined; discarding old name
Closes GH-373
require "uri"
require "rubygems/installer"
require "rubygems/spec_fetcher"
require "rubygems/format"
require "digest/sha1"
require "open3"
module Bundler
module Source
# TODO: Refactor this class
class Rubygems
attr_reader :remotes
def initialize(options = {})
@options = options
@remotes = (options["remotes"] || []).map { |r| normalize_uri(r) }
@allow_remote = false
# Hardcode the paths for now
@installed = {}
@caches = [ Bundler.app_cache ] + Gem.path.map { |p| File.expand_path("#{p}/cache") }
@spec_fetch_map = {}
end
def remote!
@allow_remote = true
end
def [](spec)
installed_specs[spec].first ||
@allow_remote && (
cached_specs[spec].first ||
remote_specs[spec].first)
end
def hash
Rubygems.hash
end
def eql?(o)
Rubygems === o
end
alias == eql?
# Not really needed, but it seems good to implement this method for interface
# consistency. Source name is mostly used to identify Path & Git sources
def name
":gems"
end
def options
{ "remotes" => @remotes.map { |r| r.to_s } }
end
def self.from_lock(options)
s = new(options)
Array(options["remote"]).each { |r| s.add_remote(r) }
s
end
def to_lock
out = "GEM\n"
out << remotes.map {|r| " remote: #{r}\n" }.join
out << " specs:\n"
end
def to_s
remotes = self.remotes.map { |r| r.to_s }.join(', ')
"rubygems repository #{remotes}"
end
def specs
@specs ||= @allow_remote ? fetch_specs : installed_specs
end
def fetch(spec)
action = @spec_fetch_map[spec.full_name]
action.call if action
end
def install(spec)
path = cached_gem(spec)
if @installed[spec.full_name]
Bundler.ui.info "Using #{spec.name} (#{spec.version}) "
return
else
Bundler.ui.info "Installing #{spec.name} (#{spec.version}) "
end
install_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
installer = Gem::Installer.new path,
:install_dir => install_path,
:ignore_dependencies => true,
:wrappers => true,
:env_shebang => true,
:bin_dir => "#{install_path}/bin"
installer.install
# SUDO HAX
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/gems #{Gem.dir}/specifications"
sudo "mv #{Bundler.tmp}/gems/#{spec.full_name} #{Gem.dir}/gems/"
sudo "mv #{Bundler.tmp}/specifications/#{spec.full_name}.gemspec #{Gem.dir}/specifications/"
end
spec.loaded_from = "#{Gem.dir}/specifications/#{spec.full_name}.gemspec"
end
def sudo(str)
`sudo -E #{str}`
end
def cache(spec)
cached_path = cached_gem(spec)
raise GemNotFound, "Missing gem file '#{spec.full_name}.gem'." unless cached_path
return if File.dirname(cached_path) == Bundler.app_cache.to_s
Bundler.ui.info " * #{File.basename(cached_path)}"
FileUtils.cp(cached_path, Bundler.app_cache)
end
def add_remote(source)
@remotes << normalize_uri(source)
end
private
def cached_gem(spec)
possibilities = @caches.map { |p| "#{p}/#{spec.full_name}.gem" }
possibilities.find { |p| File.exist?(p) }
end
def normalize_uri(uri)
uri = uri.to_s
uri = "#{uri}/" unless uri =~ %r'/$'
uri = URI(uri)
raise ArgumentError, "The source must be an absolute URI" unless uri.absolute?
uri
end
def fetch_specs
Index.build do |idx|
idx.use installed_specs
idx.use cached_specs
idx.use remote_specs
end
end
def installed_specs
@installed_specs ||= begin
idx = Index.new
Gem::SourceIndex.from_installed_gems.to_a.reverse.each do |name, spec|
next if name == 'bundler'
@installed[spec.full_name] = true
spec.source = self
idx << spec
end
# Always have bundler locally
bundler = Gem::Specification.new do |s|
s.name = 'bundler'
s.version = VERSION
s.platform = Gem::Platform::RUBY
s.source = self
# TODO: Remove this
s.loaded_from = 'w0t'
end
@installed[bundler.full_name] = true
idx << bundler
idx
end
end
def cached_specs
@cached_specs ||= begin
idx = Index.new
@caches.each do |path|
Dir["#{path}/*.gem"].each do |gemfile|
next if name == 'bundler'
s = Gem::Format.from_file_by_path(gemfile).spec
s.source = self
idx << s
end
end
idx
end
end
def remote_specs
@remote_specs ||= begin
idx = Index.new
remotes = self.remotes.map { |uri| uri.to_s }
old = Gem.sources
remotes.each do |uri|
Bundler.ui.info "Fetching source index for #{uri}"
Gem.sources = ["#{uri}"]
fetch_all_remote_specs do |n,v|
v.each do |name, version, platform|
next if name == 'bundler'
spec = RemoteSpecification.new(name, version, platform, uri)
spec.source = self
# Temporary hack until this can be figured out better
@spec_fetch_map[spec.full_name] = lambda do
path = download_gem_from_uri(spec, uri)
s = Gem::Format.from_file_by_path(path).spec
spec.__swap__(s)
end
idx << spec
end
end
end
idx
ensure
Gem.sources = old
end
end
def fetch_all_remote_specs(&blk)
begin
# Fetch all specs, minus prerelease specs
Gem::SpecFetcher.new.list(true, false).each(&blk)
# Then fetch the prerelease specs
begin
Gem::SpecFetcher.new.list(false, true).each(&blk)
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not fetch prerelease specs from #{self}"
end
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not reach #{self}"
end
end
def download_gem_from_uri(spec, uri)
spec.fetch_platform
download_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
gem_path = "#{Gem.dir}/cache/#{spec.full_name}.gem"
FileUtils.mkdir_p("#{download_path}/cache")
Gem::RemoteFetcher.fetcher.download(spec, uri, download_path)
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/cache"
sudo "mv #{Bundler.tmp}/cache/#{spec.full_name}.gem #{gem_path}"
end
gem_path
end
end
class Path
attr_reader :path, :options
# Kind of a hack, but needed for the lock file parser
attr_accessor :version
DEFAULT_GLOB = "{,*/}*.gemspec"
def initialize(options)
@options = options
@glob = options["glob"] || DEFAULT_GLOB
@allow_remote = false
if options["path"]
@path = Pathname.new(options["path"]).expand_path(Bundler.root)
end
@name = options["name"]
@version = options["version"]
end
def remote!
@allow_remote = true
end
def self.from_lock(options)
new(options.merge("path" => options.delete("remote")))
end
def to_lock
out = "PATH\n"
out << " remote: #{relative_path}\n"
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def to_s
"source at #{@path}"
end
def hash
self.class.hash
end
def eql?(o)
Path === o &&
path == o.path &&
name == o.name &&
version == o.version
end
alias == eql?
def name
File.basename(@path.to_s)
end
def load_spec_files
index = Index.new
if File.directory?(path)
Dir["#{path}/#{@glob}"].each do |file|
file = Pathname.new(file)
# Eval the gemspec from its parent directory
spec = Dir.chdir(file.dirname) do
begin
Gem::Specification.from_yaml(file.basename)
# Raises ArgumentError if the file is not valid YAML
rescue ArgumentError, Gem::EndOfYAMLException, Gem::Exception
begin
eval(File.read(file.basename), TOPLEVEL_BINDING, file.expand_path.to_s)
rescue LoadError
raise GemspecError, "There was a LoadError while evaluating #{file.basename}.\n" +
"Does it try to require a relative path? That doesn't work in Ruby 1.9."
end
end
end
if spec
spec.loaded_from = file.to_s
spec.source = self
index << spec
end
end
if index.empty? && @name && @version
index << Gem::Specification.new do |s|
s.name = @name
s.source = self
s.version = Gem::Version.new(@version)
s.platform = Gem::Platform::RUBY
s.summary = "Fake gemspec for #{@name}"
s.relative_loaded_from = "#{@name}.gemspec"
if path.join("bin").exist?
binaries = path.join("bin").children.map{|c| c.basename.to_s }
s.executables = binaries
end
end
end
else
raise PathError, "The path `#{path}` does not exist."
end
index
end
def [](spec)
specs[spec].first
end
def local_specs
@local_specs ||= load_spec_files
end
class Installer < Gem::Installer
def initialize(spec)
@spec = spec
@bin_dir = "#{Gem.dir}/bin"
@gem_dir = spec.full_gem_path
@wrappers = true
@env_shebang = true
@format_executable = false
end
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
# Let's be honest, when we're working from a path, we can't
# really expect native extensions to work because the whole point
# is to just be able to modify what's in that path and go. So, let's
# not put ourselves through the pain of actually trying to generate
# the full gem.
Installer.new(spec).generate_bin
end
alias specs local_specs
def cache(spec)
unless path.to_s.index(Bundler.root.to_s) == 0
Bundler.ui.warn " * #{spec.name} at `#{path}` will not be cached."
end
end
private
def relative_path
if path.to_s.include?(Bundler.root.to_s)
return path.relative_path_from(Bundler.root)
end
path
end
def generate_bin(spec)
gem_dir = Pathname.new(spec.full_gem_path)
# Some gem authors put absolute paths in their gemspec
# and we have to save them from themselves
spec.files = spec.files.map do |p|
next if File.directory?(p)
begin
Pathname.new(p).relative_path_from(gem_dir).to_s
rescue ArgumentError
p
end
end.compact
gem_file = Dir.chdir(gem_dir){ Gem::Builder.new(spec).build }
installer = Gem::Installer.new File.join(gem_dir, gem_file),
:bin_dir => "#{Gem.dir}/bin",
:wrappers => true,
:env_shebang => false,
:format_executable => false
installer.instance_eval { @gem_dir = gem_dir }
installer.build_extensions
installer.generate_bin
rescue Gem::InvalidSpecificationException => e
Bundler.ui.warn "\n#{spec.name} at #{spec.full_gem_path} did not have a valid gemspec.\n" \
"This prevents bundler from installing bins or native extensions, but " \
"that may not affect its functionality."
if !spec.extensions.empty? && !spec.email.empty?
Bundler.ui.warn "If you need to use this package without installing it from a gem " \
"repository, please contact #{spec.email} and ask them " \
"to modify their .gemspec so it can work with `gem build`."
end
Bundler.ui.warn "The validation message from Rubygems was:\n #{e.message}"
ensure
Dir.chdir(gem_dir){ FileUtils.rm_rf(gem_file) if gem_file && File.exist?(gem_file) }
end
end
class Git < Path
attr_reader :uri, :ref, :options
def initialize(options)
super
@uri = options["uri"]
@ref = options["ref"] || options["branch"] || options["tag"] || 'master'
@revision = options["revision"]
@update = false
end
def self.from_lock(options)
new(options.merge("uri" => options.delete("remote")))
end
def to_lock
out = "GIT\n"
out << " remote: #{@uri}\n"
out << " revision: #{shortref_for(revision)}\n"
%w(ref branch tag).each do |opt|
out << " #{opt}: #{options[opt]}\n" if options[opt]
end
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def eql?(o)
Git === o &&
uri == o.uri &&
ref == o.ref &&
name == o.name &&
version == o.version
end
alias == eql?
def to_s
ref = @options["ref"] ? shortref_for(@options["ref"]) : @ref
"#{@uri} (at #{ref})"
end
def name
File.basename(@uri, '.git')
end
def path
Bundler.install_path.join("#{base_name}-#{shortref_for(revision)}")
end
def unlock!
@revision = nil
end
def specs
if @allow_remote && !@update
# Start by making sure the git cache is up to date
cache
checkout
@update = true
end
local_specs
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
unless @installed
Bundler.ui.debug " * Checking out revision: #{ref}"
checkout
@installed = true
end
generate_bin(spec)
end
def load_spec_files
super if cache_path.exist?
rescue PathError
raise PathError, "#{to_s} is not checked out. Please run `bundle install`"
end
private
def git(command)
if Bundler.requires_sudo?
out = %x{sudo -E git #{command}}
else
out = %x{git #{command}}
end
if $? != 0
raise GitError, "An error has occurred in git. Cannot complete bundling."
end
out
end
def base_name
File.basename(uri.sub(%r{^(\w+://)?([^/:]+:)},''), ".git")
end
def shortref_for(ref)
ref[0..6]
end
def uri_hash
if uri =~ %r{^\w+://(\w+@)?}
# Downcase the domain component of the URI
# and strip off a trailing slash, if one is present
input = URI.parse(uri).normalize.to_s.sub(%r{/$},'')
else
# If there is no URI scheme, assume it is an ssh/git URI
input = uri
end
Digest::SHA1.hexdigest(input)
end
def cache_path
@cache_path ||= Bundler.cache.join("git", "#{base_name}-#{uri_hash}")
end
def cache
if cached?
Bundler.ui.info "Updating #{uri}"
in_cache { git %|fetch --force --quiet "#{uri}" refs/heads/*:refs/heads/*| }
else
Bundler.ui.info "Fetching #{uri}"
FileUtils.mkdir_p(cache_path.dirname)
git %|clone "#{uri}" "#{cache_path}" --bare --no-hardlinks|
end
end
def checkout
unless File.exist?(path.join(".git"))
FileUtils.mkdir_p(path.dirname)
git %|clone --no-checkout "#{cache_path}" "#{path}"|
end
Dir.chdir(path) do
git "fetch --force --quiet"
git "reset --hard #{revision}"
git "submodule init"
git "submodule update"
end
end
def revision
@revision ||= in_cache { git("rev-parse #{ref}").strip }
end
def cached?
cache_path.exist?
end
def in_cache(&blk)
cache unless cached?
Dir.chdir(cache_path, &blk)
end
end
end
end
|
require "uri"
require "rubygems/installer"
require "rubygems/spec_fetcher"
require "rubygems/format"
require "digest/sha1"
require "open3"
module Bundler
module Source
# TODO: Refactor this class
class Rubygems
attr_reader :remotes
def initialize(options = {})
@options = options
@remotes = (options["remotes"] || []).map { |r| normalize_uri(r) }
@allow_remote = false
@allow_cached = false
# Hardcode the paths for now
@caches = [ Bundler.app_cache ] + Gem.path.map { |p| File.expand_path("#{p}/cache") }
@spec_fetch_map = {}
end
def remote!
@allow_remote = true
end
def cached!
@allow_cached = true
end
def hash
Rubygems.hash
end
def eql?(o)
Rubygems === o
end
alias == eql?
# Not really needed, but it seems good to implement this method for interface
# consistency. Source name is mostly used to identify Path & Git sources
def name
":gems"
end
def options
{ "remotes" => @remotes.map { |r| r.to_s } }
end
def self.from_lock(options)
s = new(options)
Array(options["remote"]).each { |r| s.add_remote(r) }
s
end
def to_lock
out = "GEM\n"
out << remotes.map {|r| " remote: #{r}\n" }.join
out << " specs:\n"
end
def to_s
remotes = self.remotes.map { |r| r.to_s }.join(', ')
"rubygems repository #{remotes}"
end
def specs
@specs ||= fetch_specs
end
def fetch(spec)
action = @spec_fetch_map[spec.full_name]
action.call if action
end
def install(spec)
path = cached_gem(spec)
if installed_specs[spec].any?
Bundler.ui.info "Using #{spec.name} (#{spec.version}) "
return
end
Bundler.ui.info "Installing #{spec.name} (#{spec.version}) "
install_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
installer = Gem::Installer.new path,
:install_dir => install_path,
:ignore_dependencies => true,
:wrappers => true,
:env_shebang => true,
:bin_dir => "#{install_path}/bin"
installer.install
# SUDO HAX
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/gems #{Gem.dir}/specifications"
sudo "mv #{Bundler.tmp}/gems/#{spec.full_name} #{Gem.dir}/gems/"
sudo "mv #{Bundler.tmp}/specifications/#{spec.full_name}.gemspec #{Gem.dir}/specifications/"
end
spec.loaded_from = "#{Gem.dir}/specifications/#{spec.full_name}.gemspec"
end
def sudo(str)
`sudo -p 'Bundler requires your password to install to your system gems location\nPassword: ' -E #{str}`
end
def cache(spec)
cached_path = cached_gem(spec)
raise GemNotFound, "Missing gem file '#{spec.full_name}.gem'." unless cached_path
return if File.dirname(cached_path) == Bundler.app_cache.to_s
Bundler.ui.info " * #{File.basename(cached_path)}"
FileUtils.cp(cached_path, Bundler.app_cache)
end
def add_remote(source)
@remotes << normalize_uri(source)
end
private
def cached_gem(spec)
possibilities = @caches.map { |p| "#{p}/#{spec.full_name}.gem" }
possibilities.find { |p| File.exist?(p) }
end
def normalize_uri(uri)
uri = uri.to_s
uri = "#{uri}/" unless uri =~ %r'/$'
uri = URI(uri)
raise ArgumentError, "The source must be an absolute URI" unless uri.absolute?
uri
end
def fetch_specs
Index.build do |idx|
idx.use installed_specs
idx.use cached_specs if @allow_cached
idx.use remote_specs if @allow_remote
end
end
def installed_specs
@installed_specs ||= begin
idx = Index.new
Gem::SourceIndex.from_installed_gems.to_a.reverse.each do |name, spec|
next if name == 'bundler'
spec.source = self
idx << spec
end
# Always have bundler locally
bundler = Gem::Specification.new do |s|
s.name = 'bundler'
s.version = VERSION
s.platform = Gem::Platform::RUBY
s.source = self
# TODO: Remove this
s.loaded_from = 'w0t'
end
idx << bundler
idx
end
end
def cached_specs
@cached_specs ||= begin
idx = Index.new
@caches.each do |path|
Dir["#{path}/*.gem"].each do |gemfile|
next if name == 'bundler'
s = Gem::Format.from_file_by_path(gemfile).spec
s.source = self
idx << s
end
end
idx
end
end
def remote_specs
@remote_specs ||= begin
idx = Index.new
remotes = self.remotes.map { |uri| uri.to_s }
old = Gem.sources
remotes.each do |uri|
Bundler.ui.info "Fetching source index for #{uri}"
Gem.sources = ["#{uri}"]
fetch_all_remote_specs do |n,v|
v.each do |name, version, platform|
next if name == 'bundler'
spec = RemoteSpecification.new(name, version, platform, uri)
spec.source = self
# Temporary hack until this can be figured out better
@spec_fetch_map[spec.full_name] = lambda do
path = download_gem_from_uri(spec, uri)
s = Gem::Format.from_file_by_path(path).spec
spec.__swap__(s)
end
idx << spec
end
end
end
idx
ensure
Gem.sources = old
end
end
def fetch_all_remote_specs(&blk)
begin
# Fetch all specs, minus prerelease specs
Gem::SpecFetcher.new.list(true, false).each(&blk)
# Then fetch the prerelease specs
begin
Gem::SpecFetcher.new.list(false, true).each(&blk)
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not fetch prerelease specs from #{self}"
end
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not reach #{self}"
end
end
def download_gem_from_uri(spec, uri)
spec.fetch_platform
download_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
gem_path = "#{Gem.dir}/cache/#{spec.full_name}.gem"
FileUtils.mkdir_p("#{download_path}/cache")
Gem::RemoteFetcher.fetcher.download(spec, uri, download_path)
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/cache"
sudo "mv #{Bundler.tmp}/cache/#{spec.full_name}.gem #{gem_path}"
end
gem_path
end
end
class Path
attr_reader :path, :options
# Kind of a hack, but needed for the lock file parser
attr_accessor :version
DEFAULT_GLOB = "{,*/}*.gemspec"
def initialize(options)
@options = options
@glob = options["glob"] || DEFAULT_GLOB
@allow_cached = false
@allow_remote = false
if options["path"]
@path = Pathname.new(options["path"]).expand_path(Bundler.root)
end
@name = options["name"]
@version = options["version"]
end
def remote!
@allow_remote = true
end
def cached!
@allow_cached = true
end
def self.from_lock(options)
new(options.merge("path" => options.delete("remote")))
end
def to_lock
out = "PATH\n"
out << " remote: #{relative_path}\n"
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def to_s
"source at #{@path}"
end
def hash
self.class.hash
end
def eql?(o)
Path === o &&
path == o.path &&
name == o.name &&
version == o.version
end
alias == eql?
def name
File.basename(@path.to_s)
end
def load_spec_files
index = Index.new
if File.directory?(path)
Dir["#{path}/#{@glob}"].each do |file|
file = Pathname.new(file)
# Eval the gemspec from its parent directory
spec = Dir.chdir(file.dirname) do
begin
Gem::Specification.from_yaml(file.basename)
# Raises ArgumentError if the file is not valid YAML
rescue ArgumentError, Gem::EndOfYAMLException, Gem::Exception
begin
eval(File.read(file.basename), TOPLEVEL_BINDING, file.expand_path.to_s)
rescue LoadError
raise GemspecError, "There was a LoadError while evaluating #{file.basename}.\n" +
"Does it try to require a relative path? That doesn't work in Ruby 1.9."
end
end
end
if spec
spec.loaded_from = file.to_s
spec.source = self
index << spec
end
end
if index.empty? && @name && @version
index << Gem::Specification.new do |s|
s.name = @name
s.source = self
s.version = Gem::Version.new(@version)
s.platform = Gem::Platform::RUBY
s.summary = "Fake gemspec for #{@name}"
s.relative_loaded_from = "#{@name}.gemspec"
if path.join("bin").exist?
binaries = path.join("bin").children.map{|c| c.basename.to_s }
s.executables = binaries
end
end
end
else
raise PathError, "The path `#{path}` does not exist."
end
index
end
def local_specs
@local_specs ||= load_spec_files
end
class Installer < Gem::Installer
def initialize(spec)
@spec = spec
@bin_dir = "#{Gem.dir}/bin"
@gem_dir = spec.full_gem_path
@wrappers = true
@env_shebang = true
@format_executable = false
end
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
# Let's be honest, when we're working from a path, we can't
# really expect native extensions to work because the whole point
# is to just be able to modify what's in that path and go. So, let's
# not put ourselves through the pain of actually trying to generate
# the full gem.
Installer.new(spec).generate_bin
end
alias specs local_specs
def cache(spec)
unless path.to_s.index(Bundler.root.to_s) == 0
Bundler.ui.warn " * #{spec.name} at `#{path}` will not be cached."
end
end
private
def relative_path
if path.to_s.include?(Bundler.root.to_s)
return path.relative_path_from(Bundler.root)
end
path
end
def generate_bin(spec)
gem_dir = Pathname.new(spec.full_gem_path)
# Some gem authors put absolute paths in their gemspec
# and we have to save them from themselves
spec.files = spec.files.map do |p|
next if File.directory?(p)
begin
Pathname.new(p).relative_path_from(gem_dir).to_s
rescue ArgumentError
p
end
end.compact
gem_file = Dir.chdir(gem_dir){ Gem::Builder.new(spec).build }
installer = Gem::Installer.new File.join(gem_dir, gem_file),
:bin_dir => "#{Gem.dir}/bin",
:wrappers => true,
:env_shebang => false,
:format_executable => false
installer.instance_eval { @gem_dir = gem_dir }
installer.build_extensions
installer.generate_bin
rescue Gem::InvalidSpecificationException => e
Bundler.ui.warn "\n#{spec.name} at #{spec.full_gem_path} did not have a valid gemspec.\n" \
"This prevents bundler from installing bins or native extensions, but " \
"that may not affect its functionality."
if !spec.extensions.empty? && !spec.email.empty?
Bundler.ui.warn "If you need to use this package without installing it from a gem " \
"repository, please contact #{spec.email} and ask them " \
"to modify their .gemspec so it can work with `gem build`."
end
Bundler.ui.warn "The validation message from Rubygems was:\n #{e.message}"
ensure
Dir.chdir(gem_dir){ FileUtils.rm_rf(gem_file) if gem_file && File.exist?(gem_file) }
end
end
class Git < Path
attr_reader :uri, :ref, :options
def initialize(options)
super
@uri = options["uri"]
@ref = options["ref"] || options["branch"] || options["tag"] || 'master'
@revision = options["revision"]
@submodules = options["submodules"]
@update = false
end
def self.from_lock(options)
new(options.merge("uri" => options.delete("remote")))
end
def to_lock
out = "GIT\n"
out << " remote: #{@uri}\n"
out << " revision: #{shortref_for(revision)}\n"
%w(ref branch tag submodules).each do |opt|
out << " #{opt}: #{options[opt]}\n" if options[opt]
end
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def eql?(o)
Git === o &&
uri == o.uri &&
ref == o.ref &&
name == o.name &&
version == o.version
end
alias == eql?
def to_s
ref = @options["ref"] ? shortref_for(@options["ref"]) : @ref
"#{@uri} (at #{ref})"
end
def name
File.basename(@uri, '.git')
end
def path
Bundler.install_path.join("#{base_name}-#{shortref_for(revision)}")
end
def unlock!
@revision = nil
end
# TODO: actually cache git specs
def specs
if (@allow_remote || @allow_cached) && !@update
# Start by making sure the git cache is up to date
cache
checkout
@update = true
end
local_specs
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
unless @installed
Bundler.ui.debug " * Checking out revision: #{ref}"
checkout
@installed = true
end
generate_bin(spec)
end
def load_spec_files
super if cache_path.exist?
rescue PathError
raise PathError, "#{to_s} is not checked out. Please run `bundle install`"
end
private
def git(command)
if Bundler.requires_sudo?
out = %x{sudo -E git #{command}}
else
out = %x{git #{command}}
end
if $? != 0
raise GitError, "An error has occurred in git. Cannot complete bundling."
end
out
end
def base_name
File.basename(uri.sub(%r{^(\w+://)?([^/:]+:)},''), ".git")
end
def shortref_for(ref)
ref[0..6]
end
def uri_hash
if uri =~ %r{^\w+://(\w+@)?}
# Downcase the domain component of the URI
# and strip off a trailing slash, if one is present
input = URI.parse(uri).normalize.to_s.sub(%r{/$},'')
else
# If there is no URI scheme, assume it is an ssh/git URI
input = uri
end
Digest::SHA1.hexdigest(input)
end
def cache_path
@cache_path ||= Bundler.cache.join("git", "#{base_name}-#{uri_hash}")
end
def cache
if cached?
Bundler.ui.info "Updating #{uri}"
in_cache { git %|fetch --force --quiet "#{uri}" refs/heads/*:refs/heads/*| }
else
Bundler.ui.info "Fetching #{uri}"
FileUtils.mkdir_p(cache_path.dirname)
git %|clone "#{uri}" "#{cache_path}" --bare --no-hardlinks|
end
end
def checkout
unless File.exist?(path.join(".git"))
FileUtils.mkdir_p(path.dirname)
git %|clone --no-checkout "#{cache_path}" "#{path}"|
end
Dir.chdir(path) do
git "fetch --force --quiet"
git "reset --hard #{revision}"
if @submodules
git "submodule init"
git "submodule update"
end
end
end
def revision
@revision ||= in_cache { git("rev-parse #{ref}").strip }
end
def cached?
cache_path.exist?
end
def in_cache(&blk)
cache unless cached?
Dir.chdir(cache_path, &blk)
end
end
end
end
This message might be better
require "uri"
require "rubygems/installer"
require "rubygems/spec_fetcher"
require "rubygems/format"
require "digest/sha1"
require "open3"
module Bundler
module Source
# TODO: Refactor this class
class Rubygems
attr_reader :remotes
def initialize(options = {})
@options = options
@remotes = (options["remotes"] || []).map { |r| normalize_uri(r) }
@allow_remote = false
@allow_cached = false
# Hardcode the paths for now
@caches = [ Bundler.app_cache ] + Gem.path.map { |p| File.expand_path("#{p}/cache") }
@spec_fetch_map = {}
end
def remote!
@allow_remote = true
end
def cached!
@allow_cached = true
end
def hash
Rubygems.hash
end
def eql?(o)
Rubygems === o
end
alias == eql?
# Not really needed, but it seems good to implement this method for interface
# consistency. Source name is mostly used to identify Path & Git sources
def name
":gems"
end
def options
{ "remotes" => @remotes.map { |r| r.to_s } }
end
def self.from_lock(options)
s = new(options)
Array(options["remote"]).each { |r| s.add_remote(r) }
s
end
def to_lock
out = "GEM\n"
out << remotes.map {|r| " remote: #{r}\n" }.join
out << " specs:\n"
end
def to_s
remotes = self.remotes.map { |r| r.to_s }.join(', ')
"rubygems repository #{remotes}"
end
def specs
@specs ||= fetch_specs
end
def fetch(spec)
action = @spec_fetch_map[spec.full_name]
action.call if action
end
def install(spec)
path = cached_gem(spec)
if installed_specs[spec].any?
Bundler.ui.info "Using #{spec.name} (#{spec.version}) "
return
end
Bundler.ui.info "Installing #{spec.name} (#{spec.version}) "
install_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
installer = Gem::Installer.new path,
:install_dir => install_path,
:ignore_dependencies => true,
:wrappers => true,
:env_shebang => true,
:bin_dir => "#{install_path}/bin"
installer.install
# SUDO HAX
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/gems #{Gem.dir}/specifications"
sudo "mv #{Bundler.tmp}/gems/#{spec.full_name} #{Gem.dir}/gems/"
sudo "mv #{Bundler.tmp}/specifications/#{spec.full_name}.gemspec #{Gem.dir}/specifications/"
end
spec.loaded_from = "#{Gem.dir}/specifications/#{spec.full_name}.gemspec"
end
def sudo(str)
`sudo -p 'Enter your password to install the bundled RubyGems to your system: ' -E #{str}`
end
def cache(spec)
cached_path = cached_gem(spec)
raise GemNotFound, "Missing gem file '#{spec.full_name}.gem'." unless cached_path
return if File.dirname(cached_path) == Bundler.app_cache.to_s
Bundler.ui.info " * #{File.basename(cached_path)}"
FileUtils.cp(cached_path, Bundler.app_cache)
end
def add_remote(source)
@remotes << normalize_uri(source)
end
private
def cached_gem(spec)
possibilities = @caches.map { |p| "#{p}/#{spec.full_name}.gem" }
possibilities.find { |p| File.exist?(p) }
end
def normalize_uri(uri)
uri = uri.to_s
uri = "#{uri}/" unless uri =~ %r'/$'
uri = URI(uri)
raise ArgumentError, "The source must be an absolute URI" unless uri.absolute?
uri
end
def fetch_specs
Index.build do |idx|
idx.use installed_specs
idx.use cached_specs if @allow_cached
idx.use remote_specs if @allow_remote
end
end
def installed_specs
@installed_specs ||= begin
idx = Index.new
Gem::SourceIndex.from_installed_gems.to_a.reverse.each do |name, spec|
next if name == 'bundler'
spec.source = self
idx << spec
end
# Always have bundler locally
bundler = Gem::Specification.new do |s|
s.name = 'bundler'
s.version = VERSION
s.platform = Gem::Platform::RUBY
s.source = self
# TODO: Remove this
s.loaded_from = 'w0t'
end
idx << bundler
idx
end
end
def cached_specs
@cached_specs ||= begin
idx = Index.new
@caches.each do |path|
Dir["#{path}/*.gem"].each do |gemfile|
next if name == 'bundler'
s = Gem::Format.from_file_by_path(gemfile).spec
s.source = self
idx << s
end
end
idx
end
end
def remote_specs
@remote_specs ||= begin
idx = Index.new
remotes = self.remotes.map { |uri| uri.to_s }
old = Gem.sources
remotes.each do |uri|
Bundler.ui.info "Fetching source index for #{uri}"
Gem.sources = ["#{uri}"]
fetch_all_remote_specs do |n,v|
v.each do |name, version, platform|
next if name == 'bundler'
spec = RemoteSpecification.new(name, version, platform, uri)
spec.source = self
# Temporary hack until this can be figured out better
@spec_fetch_map[spec.full_name] = lambda do
path = download_gem_from_uri(spec, uri)
s = Gem::Format.from_file_by_path(path).spec
spec.__swap__(s)
end
idx << spec
end
end
end
idx
ensure
Gem.sources = old
end
end
def fetch_all_remote_specs(&blk)
begin
# Fetch all specs, minus prerelease specs
Gem::SpecFetcher.new.list(true, false).each(&blk)
# Then fetch the prerelease specs
begin
Gem::SpecFetcher.new.list(false, true).each(&blk)
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not fetch prerelease specs from #{self}"
end
rescue Gem::RemoteFetcher::FetchError
Bundler.ui.warn "Could not reach #{self}"
end
end
def download_gem_from_uri(spec, uri)
spec.fetch_platform
download_path = Bundler.requires_sudo? ? Bundler.tmp : Gem.dir
gem_path = "#{Gem.dir}/cache/#{spec.full_name}.gem"
FileUtils.mkdir_p("#{download_path}/cache")
Gem::RemoteFetcher.fetcher.download(spec, uri, download_path)
if Bundler.requires_sudo?
sudo "mkdir -p #{Gem.dir}/cache"
sudo "mv #{Bundler.tmp}/cache/#{spec.full_name}.gem #{gem_path}"
end
gem_path
end
end
class Path
attr_reader :path, :options
# Kind of a hack, but needed for the lock file parser
attr_accessor :version
DEFAULT_GLOB = "{,*/}*.gemspec"
def initialize(options)
@options = options
@glob = options["glob"] || DEFAULT_GLOB
@allow_cached = false
@allow_remote = false
if options["path"]
@path = Pathname.new(options["path"]).expand_path(Bundler.root)
end
@name = options["name"]
@version = options["version"]
end
def remote!
@allow_remote = true
end
def cached!
@allow_cached = true
end
def self.from_lock(options)
new(options.merge("path" => options.delete("remote")))
end
def to_lock
out = "PATH\n"
out << " remote: #{relative_path}\n"
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def to_s
"source at #{@path}"
end
def hash
self.class.hash
end
def eql?(o)
Path === o &&
path == o.path &&
name == o.name &&
version == o.version
end
alias == eql?
def name
File.basename(@path.to_s)
end
def load_spec_files
index = Index.new
if File.directory?(path)
Dir["#{path}/#{@glob}"].each do |file|
file = Pathname.new(file)
# Eval the gemspec from its parent directory
spec = Dir.chdir(file.dirname) do
begin
Gem::Specification.from_yaml(file.basename)
# Raises ArgumentError if the file is not valid YAML
rescue ArgumentError, Gem::EndOfYAMLException, Gem::Exception
begin
eval(File.read(file.basename), TOPLEVEL_BINDING, file.expand_path.to_s)
rescue LoadError
raise GemspecError, "There was a LoadError while evaluating #{file.basename}.\n" +
"Does it try to require a relative path? That doesn't work in Ruby 1.9."
end
end
end
if spec
spec.loaded_from = file.to_s
spec.source = self
index << spec
end
end
if index.empty? && @name && @version
index << Gem::Specification.new do |s|
s.name = @name
s.source = self
s.version = Gem::Version.new(@version)
s.platform = Gem::Platform::RUBY
s.summary = "Fake gemspec for #{@name}"
s.relative_loaded_from = "#{@name}.gemspec"
if path.join("bin").exist?
binaries = path.join("bin").children.map{|c| c.basename.to_s }
s.executables = binaries
end
end
end
else
raise PathError, "The path `#{path}` does not exist."
end
index
end
def local_specs
@local_specs ||= load_spec_files
end
class Installer < Gem::Installer
def initialize(spec)
@spec = spec
@bin_dir = "#{Gem.dir}/bin"
@gem_dir = spec.full_gem_path
@wrappers = true
@env_shebang = true
@format_executable = false
end
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
# Let's be honest, when we're working from a path, we can't
# really expect native extensions to work because the whole point
# is to just be able to modify what's in that path and go. So, let's
# not put ourselves through the pain of actually trying to generate
# the full gem.
Installer.new(spec).generate_bin
end
alias specs local_specs
def cache(spec)
unless path.to_s.index(Bundler.root.to_s) == 0
Bundler.ui.warn " * #{spec.name} at `#{path}` will not be cached."
end
end
private
def relative_path
if path.to_s.include?(Bundler.root.to_s)
return path.relative_path_from(Bundler.root)
end
path
end
def generate_bin(spec)
gem_dir = Pathname.new(spec.full_gem_path)
# Some gem authors put absolute paths in their gemspec
# and we have to save them from themselves
spec.files = spec.files.map do |p|
next if File.directory?(p)
begin
Pathname.new(p).relative_path_from(gem_dir).to_s
rescue ArgumentError
p
end
end.compact
gem_file = Dir.chdir(gem_dir){ Gem::Builder.new(spec).build }
installer = Gem::Installer.new File.join(gem_dir, gem_file),
:bin_dir => "#{Gem.dir}/bin",
:wrappers => true,
:env_shebang => false,
:format_executable => false
installer.instance_eval { @gem_dir = gem_dir }
installer.build_extensions
installer.generate_bin
rescue Gem::InvalidSpecificationException => e
Bundler.ui.warn "\n#{spec.name} at #{spec.full_gem_path} did not have a valid gemspec.\n" \
"This prevents bundler from installing bins or native extensions, but " \
"that may not affect its functionality."
if !spec.extensions.empty? && !spec.email.empty?
Bundler.ui.warn "If you need to use this package without installing it from a gem " \
"repository, please contact #{spec.email} and ask them " \
"to modify their .gemspec so it can work with `gem build`."
end
Bundler.ui.warn "The validation message from Rubygems was:\n #{e.message}"
ensure
Dir.chdir(gem_dir){ FileUtils.rm_rf(gem_file) if gem_file && File.exist?(gem_file) }
end
end
class Git < Path
attr_reader :uri, :ref, :options
def initialize(options)
super
@uri = options["uri"]
@ref = options["ref"] || options["branch"] || options["tag"] || 'master'
@revision = options["revision"]
@submodules = options["submodules"]
@update = false
end
def self.from_lock(options)
new(options.merge("uri" => options.delete("remote")))
end
def to_lock
out = "GIT\n"
out << " remote: #{@uri}\n"
out << " revision: #{shortref_for(revision)}\n"
%w(ref branch tag submodules).each do |opt|
out << " #{opt}: #{options[opt]}\n" if options[opt]
end
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
out << " specs:\n"
end
def eql?(o)
Git === o &&
uri == o.uri &&
ref == o.ref &&
name == o.name &&
version == o.version
end
alias == eql?
def to_s
ref = @options["ref"] ? shortref_for(@options["ref"]) : @ref
"#{@uri} (at #{ref})"
end
def name
File.basename(@uri, '.git')
end
def path
Bundler.install_path.join("#{base_name}-#{shortref_for(revision)}")
end
def unlock!
@revision = nil
end
# TODO: actually cache git specs
def specs
if (@allow_remote || @allow_cached) && !@update
# Start by making sure the git cache is up to date
cache
checkout
@update = true
end
local_specs
end
def install(spec)
Bundler.ui.info "Using #{spec.name} (#{spec.version}) from #{to_s} "
unless @installed
Bundler.ui.debug " * Checking out revision: #{ref}"
checkout
@installed = true
end
generate_bin(spec)
end
def load_spec_files
super if cache_path.exist?
rescue PathError
raise PathError, "#{to_s} is not checked out. Please run `bundle install`"
end
private
def git(command)
if Bundler.requires_sudo?
out = %x{sudo -E git #{command}}
else
out = %x{git #{command}}
end
if $? != 0
raise GitError, "An error has occurred in git. Cannot complete bundling."
end
out
end
def base_name
File.basename(uri.sub(%r{^(\w+://)?([^/:]+:)},''), ".git")
end
def shortref_for(ref)
ref[0..6]
end
def uri_hash
if uri =~ %r{^\w+://(\w+@)?}
# Downcase the domain component of the URI
# and strip off a trailing slash, if one is present
input = URI.parse(uri).normalize.to_s.sub(%r{/$},'')
else
# If there is no URI scheme, assume it is an ssh/git URI
input = uri
end
Digest::SHA1.hexdigest(input)
end
def cache_path
@cache_path ||= Bundler.cache.join("git", "#{base_name}-#{uri_hash}")
end
def cache
if cached?
Bundler.ui.info "Updating #{uri}"
in_cache { git %|fetch --force --quiet "#{uri}" refs/heads/*:refs/heads/*| }
else
Bundler.ui.info "Fetching #{uri}"
FileUtils.mkdir_p(cache_path.dirname)
git %|clone "#{uri}" "#{cache_path}" --bare --no-hardlinks|
end
end
def checkout
unless File.exist?(path.join(".git"))
FileUtils.mkdir_p(path.dirname)
git %|clone --no-checkout "#{cache_path}" "#{path}"|
end
Dir.chdir(path) do
git "fetch --force --quiet"
git "reset --hard #{revision}"
if @submodules
git "submodule init"
git "submodule update"
end
end
end
def revision
@revision ||= in_cache { git("rev-parse #{ref}").strip }
end
def cached?
cache_path.exist?
end
def in_cache(&blk)
cache unless cached?
Dir.chdir(cache_path, &blk)
end
end
end
end
|
module Cadmus
# A routing constraint that determines whether a request has a valid Cadmus page glob. A
# page glob consists of one or more valid slug parts separated by forward slashes. A valid
# slug part consists of a lower-case letter followed by any combination of lower-case letters,
# digits, and hyphens.
class SlugConstraint
# @param request an HTTP request object.
# @return [Boolean] true if this request's +:page_glob+ parameter is a valid Cadmus page
# glob, false if it's not. Allows +:page_glob+ to be nil only if the Rails environment
# is +test+, because +assert_recognizes+ doesn't always pass the full params hash
# including globbed parameters.
def matches?(request)
page_glob = request.symbolized_path_parameters[:page_glob]
# assert_recognizes doesn't pass the full params hash as we would in a real Rails
# application. So we have to always pass this constraint if we're testing.
return true if page_glob.nil? && Rails.env.test?
page_glob.sub(/^\//, '').split(/\//).all? do |part|
part =~ /^[a-z][a-z0-9\-]*$/
end
end
end
end
ActionDispatch::Routing::Mapper.class_eval do
# Defines a "cadmus_pages" DSL command you can use in config/routes.rb. This sets up a Cadmus
# PagesController that will accept the following routes:
#
# * GET /pages -> PagesController#index
# * GET /pages/new -> PagesController#new
# * POST /pages -> PagesController#create
# * GET /pages/anything/possibly-including/slashes/edit -> PagesController#edit
# * GET /pages/anything/possibly-including/slashes -> PagesController#show
# * PUT /pages/anything/possibly-including/slashes -> PagesController#update
# * DELETE /pages/anything/possibly-including/slashes -> PagesController#destroy
#
# cadmus_pages accepts two additional options:
#
# * :controller - changes which controller it maps to. By default, it is "pages" (meaning PagesController).
# * :shallow - if set to "true", the edit, show, update and destroy routes won't include the "/pages" prefix. Useful if you're
# already inside a unique prefix.
def cadmus_pages(options)
options = options.with_indifferent_access
controller = options[:controller] || 'pages'
get "pages" => "#{controller}#index", :as => 'pages'
get "pages/new" => "#{controller}#new", :as => 'new_page'
post "pages" => "#{controller}#create"
slug_constraint = Cadmus::SlugConstraint.new
page_actions = Proc.new do
get "*page_glob/edit" => "#{controller}#edit", :as => 'edit_page', :constraints => slug_constraint
get "*page_glob" => "#{controller}#show", :as => 'page', :constraints => slug_constraint
put "*page_glob" => "#{controller}#update", :constraints => slug_constraint
delete "*page_glob" => "#{controller}#destroy", :constraints => slug_constraint
end
if options[:shallow]
instance_eval(&page_actions)
else
scope 'pages' do
instance_eval(&page_actions)
end
end
end
end
Allow cadmus_pages with no options specified
module Cadmus
# A routing constraint that determines whether a request has a valid Cadmus page glob. A
# page glob consists of one or more valid slug parts separated by forward slashes. A valid
# slug part consists of a lower-case letter followed by any combination of lower-case letters,
# digits, and hyphens.
class SlugConstraint
# @param request an HTTP request object.
# @return [Boolean] true if this request's +:page_glob+ parameter is a valid Cadmus page
# glob, false if it's not. Allows +:page_glob+ to be nil only if the Rails environment
# is +test+, because +assert_recognizes+ doesn't always pass the full params hash
# including globbed parameters.
def matches?(request)
page_glob = request.symbolized_path_parameters[:page_glob]
# assert_recognizes doesn't pass the full params hash as we would in a real Rails
# application. So we have to always pass this constraint if we're testing.
return true if page_glob.nil? && Rails.env.test?
page_glob.sub(/^\//, '').split(/\//).all? do |part|
part =~ /^[a-z][a-z0-9\-]*$/
end
end
end
end
ActionDispatch::Routing::Mapper.class_eval do
# Defines a "cadmus_pages" DSL command you can use in config/routes.rb. This sets up a Cadmus
# PagesController that will accept the following routes:
#
# * GET /pages -> PagesController#index
# * GET /pages/new -> PagesController#new
# * POST /pages -> PagesController#create
# * GET /pages/anything/possibly-including/slashes/edit -> PagesController#edit
# * GET /pages/anything/possibly-including/slashes -> PagesController#show
# * PUT /pages/anything/possibly-including/slashes -> PagesController#update
# * DELETE /pages/anything/possibly-including/slashes -> PagesController#destroy
#
# cadmus_pages accepts two additional options:
#
# * :controller - changes which controller it maps to. By default, it is "pages" (meaning PagesController).
# * :shallow - if set to "true", the edit, show, update and destroy routes won't include the "/pages" prefix. Useful if you're
# already inside a unique prefix.
def cadmus_pages(options = nil)
options ||= {}
options = options.with_indifferent_access
controller = options[:controller] || 'pages'
get "pages" => "#{controller}#index", :as => 'pages'
get "pages/new" => "#{controller}#new", :as => 'new_page'
post "pages" => "#{controller}#create"
slug_constraint = Cadmus::SlugConstraint.new
page_actions = Proc.new do
get "*page_glob/edit" => "#{controller}#edit", :as => 'edit_page', :constraints => slug_constraint
get "*page_glob" => "#{controller}#show", :as => 'page', :constraints => slug_constraint
put "*page_glob" => "#{controller}#update", :constraints => slug_constraint
delete "*page_glob" => "#{controller}#destroy", :constraints => slug_constraint
end
if options[:shallow]
instance_eval(&page_actions)
else
scope 'pages' do
instance_eval(&page_actions)
end
end
end
end |
# encoding: utf-8
# Scrapes a Gatherer card details page and extracts card info
# Issues:
#
# It can't read oracle texts that follow the pattern "Words {image}". It is only
# currently set up to handle "{image}: Words".
#
# NOTE: Cards that "flip" (e.g., Erayo, Soratami Ascendant, Kitsune Mystic,
# Callow Jushi) are not handled consistently in Gatherer. We will not try to
# improve upon this tragedy; some things will therefore remain unsearchable.
#
module MTGExtractor
class CardExtractor
require 'restclient'
attr_accessor :url
def initialize(url)
@url = url
end
def get_card_details
response = RestClient.get(@url).force_encoding("utf-8")
card_details = {}
card_details['gatherer_url'] = @url
card_details['multiverse_id'] = extract_multiverse_id(@url)
card_details['image_url'] = build_image_url(card_details['multiverse_id'])
card_details['name'] = extract_name(response)
card_details['mana_cost'] = extract_mana_cost(response)
card_details['converted_cost'] = extract_converted_mana_cost(response)
card_details['types'] = extract_types(response)
card_details['oracle_text'] = extract_oracle_text(response)
card_details['power'] = extract_power(response)
card_details['toughness'] = extract_toughness(response)
card_details['loyalty'] = extract_loyalty(response)
card_details['rarity'] = extract_rarity(response)
card_details['colors'] = determine_colors(response)
card_details['transformed_id'] = extract_transformed_multiverse_id(response)
card_details['page_html'] = response
card_details
end
def extract_multiverse_id(url)
url.match(/multiverseid=(\d+)/)[1]
end
def build_image_url(id)
"http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=#{id}&type=card"
end
def extract_name(html)
match_data = /<span id="ctl00_ctl00_ctl00_MainContent_SubContent_SubContentHeader_subtitleDisplay"[^>]*>([^<]+)/
html.match(match_data)[1]
end
def multipart_card?(html)
html.match(/This is one part of the multi-part card/) != nil
end
def extract_mana_cost(html)
# Gatherer displays both sides of double-sided cards (e.g., Kruin Outlaw
# and Terror of Kruin Pass) on the same page, yet the "back" side of such,
# cards doesn't have a mana cost. Thus, we must associate the mana cost
# block with the actual block on the page associated with the "front" side,
# of the card. We do this by finding the name of the card in the summary
# display section on the Gatherer page.
#
# However, Gatherer displays multi-part cards (e.g., Fire // Ice) with each
# "mini-card" on its own page, named by that specific mini-card. (I.e., Fire
# gets its own page, and Ice gets its own page.) So the Gatherer name for
# the card is inaccurate for this purpose.
#
# Solution: identify multi-part cards, and pull the mana cost out simply for
# these cards, because there will be only one mana cost block on the page.
# All other cards, allow for the possibility that it's a "flipping" (e.g.,
# Erayo, Soratami Ascendant) or double-sided (e.g., Kruin Outlaw) card.
if multipart_card?(html)
mana_cost = convert_mana_cost(html)
else
name = extract_name(html)
mana_cost_group_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Mana Cost:.+?<div[^>]*>(.+?)<\/div>/m
mana_cost_group = html.match(mana_cost_group_regex)
mana_cost = mana_cost_group ? convert_mana_cost(mana_cost_group[1]) : nil
end
mana_cost
end
def convert_mana_cost(html)
mana_cost_regex = /<img src="\/Handlers\/Image\.ashx\?size=medium&name=([a-zA-Z0-9]+)&/
match = html.scan(mana_cost_regex).flatten
match.length > 0 ? match : nil
end
def extract_converted_mana_cost(html)
# See remarks for #extract_mana_cost, above. Similar processing with respect
# to double-sided cards is necessary here as well.
if multipart_card?(html)
cmc = convert_converted_mana_cost(html)
else
name = extract_name(html)
cmc_group_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Converted Mana Cost:<\/div>\s+<div[^>]*>[^<]+/m
cmc_group = html.match(cmc_group_regex)
cmc = cmc_group ? convert_converted_mana_cost(cmc_group[0]) : nil
end
cmc
end
def convert_converted_mana_cost(html)
cmc_regex = /Converted Mana Cost:<\/div>\s+<div[^>]*>\s+(\d+)/
match = html.match(cmc_regex)
match ? match[1] : "0"
end
def extract_types(html)
if multipart_card?(html)
card_types_regex = /Types:<\/div>\s+<div[^>]*>\s+([^>]+)<\/div>/
else
name = extract_name(html)
card_types_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Types:<\/div>\s+<div[^>]*>\s+([^>]+)<\/div>)/m
end
card_types = html.match(card_types_regex)[1]
if card_types
card_types.split("—").collect {|type| type.strip.split(/\s+/)}.flatten
else
card_types
end
end
def extract_oracle_text(html)
# See remarks for #extract_mana_cost, above. Similar processing with respect
# to double-sided cards is necessary here as well.
card_html = html.gsub(/<div\s+class="cardtextbox"[^>]*><\/div>/, "")
oracle_text = ""
if !multipart_card?(html)
name = extract_name(html)
single_card_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}(.+?Expansion:)/m
card_html = html.match(single_card_regex)[1]
end
if card_html.match(/Card Text:/)
if card_html.match(/Flavor Text:/)
oracle_regex = /Card Text:<\/div>(.+?)Flavor Text:/m
else
oracle_regex = /Card Text:<\/div>(.+?)Expansion:/m
end
oracle_html = card_html.match(oracle_regex)[1]
oracle_text_regex = /<div.+?class="cardtextbox"[^>]*>(.+?)<\/div>/
oracle_text = oracle_html.scan(oracle_text_regex).flatten.join("\n\n")
oracle_text = oracle_text.gsub(/<\/?[ib]>|<\/div>/, '').strip
# "flipping" card with side-by-side Gatherer display?
if !extract_transformed_multiverse_id(html) and
html.match(/Card Name:.+Card Name:/m) and
oracle_text.match(/\bflip\b/)
# hack together the flipped version of the card html
# and add it's oracle text to the unflipped oracle text
flipped_name_regex = /Card Name:.+Card Name:<\/div>\s+<div[^>]*>\s+([^<]+)/m
flipped_name = html.match(flipped_name_regex)[1]
more_oracle_text = [flipped_name]
flipped_card_regex = /Card Name:.+Card Name:(.+?)Expansion:/m
card_html = html.match(flipped_card_regex)[1]
name = extract_name(html)
card_html = "<span id=\"ctl00_ctl00_ctl00_MainContent_SubContent_SubContentHeader_subtitleDisplay\">#{name}</span> <div>Card Name:</div> <div> #{name}#{card_html}"
more_oracle_text.push(extract_types(card_html).join(' '))
power = extract_power(card_html)
if power
toughness = extract_toughness(card_html)
more_oracle_text.push("#{power} / #{toughness}")
end
flipped_oracle_text = card_html.scan(oracle_text_regex).flatten.join("\n\n")
flipped_oracle_text = flipped_oracle_text.gsub(/<\/?[ib]>|<\/div>/, '').strip
more_oracle_text.push(flipped_oracle_text)
more_oracle_text = more_oracle_text.join("\n\n")
oracle_text += "\n\n----\n\n#{more_oracle_text}"
end
mana_cost_regex = /<img src="\/Handlers\/Image\.ashx\?.*?name=([a-zA-Z0-9]+)[^>]*>/
oracle_text.gsub!(mana_cost_regex, '{\1}')
end
oracle_text
end
def extract_printed_text(html)
# TODO
end
def extract_power(html)
name = extract_name(html)
creature_power_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?P\/T:<\/div>\s+<div class="value">\s+(\d+) \/ \d+)/m
match = html.match(creature_power_regex)
match ? match[1] : nil
end
def extract_toughness(html)
name = extract_name(html)
creature_toughness_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?P\/T:<\/div>\s+<div class="value">\s+\d+ \/ (\d+))/m
match = html.match(creature_toughness_regex)
match ? match[1] : nil
end
def extract_loyalty(html)
match_data = /Loyalty:<\/div>\s+<div[^>]*>\s+(\w+)<\/div>/
match = html.match(match_data)
match ? match[1] : nil
end
def extract_color_indicator(html)
if !multipart_card?(html)
name = extract_name(html)
single_card_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}(.+?Expansion:)/m
html = html.match(single_card_regex)[1]
end
match_data = /Color Indicator:<\/div>\s+<div[^>]*>\s+(\w+)/
match = html.match(match_data)
match ? match[1] : nil
end
def determine_colors(html)
indicator_to_color = {
"Red" => "R",
"Blue" => "U",
"Green" => "G",
"White" => "W",
"Black" => "B"
}
mana_cost = extract_mana_cost(html)
match = mana_cost.join("").scan(/[ubrgw]/i) if mana_cost
indicator = extract_color_indicator(html)
if indicator
card_colors = indicator_to_color[indicator]
elsif match && match.length > 0
card_colors = match.flatten.uniq.join
else
card_colors = ''
end
card_colors
end
def extract_rarity(html)
match_data = /Rarity:<\/div>\s+<div[^>]*>\s+<span[^>]*>([\w\s]*)/
match = html.match(match_data)[1]
end
def extract_transformed_multiverse_id(html)
# Get the multiverse id of the transformed card, if one exists
card_multiverse_id = extract_multiverse_id(html)
multiverse_id_regex = /<img src="\.\.\/\.\.\/Handlers\/Image\.ashx\?multiverseid=(\d+)&type=card/
multiverse_ids_on_page = html.scan(multiverse_id_regex).flatten.uniq
(multiverse_ids_on_page - [card_multiverse_id]).first
end
end
end
Remove old code comments
# encoding: utf-8
# NOTE: Cards that "flip" (e.g., Erayo, Soratami Ascendant, Kitsune Mystic,
# Callow Jushi) are not handled consistently in Gatherer. We will not try to
# improve upon this tragedy; some things will therefore remain unsearchable.
module MTGExtractor
class CardExtractor
require 'restclient'
attr_accessor :url
def initialize(url)
@url = url
end
def get_card_details
response = RestClient.get(@url).force_encoding("utf-8")
card_details = {}
card_details['gatherer_url'] = @url
card_details['multiverse_id'] = extract_multiverse_id(@url)
card_details['image_url'] = build_image_url(card_details['multiverse_id'])
card_details['name'] = extract_name(response)
card_details['mana_cost'] = extract_mana_cost(response)
card_details['converted_cost'] = extract_converted_mana_cost(response)
card_details['types'] = extract_types(response)
card_details['oracle_text'] = extract_oracle_text(response)
card_details['power'] = extract_power(response)
card_details['toughness'] = extract_toughness(response)
card_details['loyalty'] = extract_loyalty(response)
card_details['rarity'] = extract_rarity(response)
card_details['colors'] = determine_colors(response)
card_details['transformed_id'] = extract_transformed_multiverse_id(response)
card_details['page_html'] = response
card_details
end
def extract_multiverse_id(url)
url.match(/multiverseid=(\d+)/)[1]
end
def build_image_url(id)
"http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=#{id}&type=card"
end
def extract_name(html)
match_data = /<span id="ctl00_ctl00_ctl00_MainContent_SubContent_SubContentHeader_subtitleDisplay"[^>]*>([^<]+)/
html.match(match_data)[1]
end
def multipart_card?(html)
html.match(/This is one part of the multi-part card/) != nil
end
def extract_mana_cost(html)
# Gatherer displays both sides of double-sided cards (e.g., Kruin Outlaw
# and Terror of Kruin Pass) on the same page, yet the "back" side of such,
# cards doesn't have a mana cost. Thus, we must associate the mana cost
# block with the actual block on the page associated with the "front" side,
# of the card. We do this by finding the name of the card in the summary
# display section on the Gatherer page.
#
# However, Gatherer displays multi-part cards (e.g., Fire // Ice) with each
# "mini-card" on its own page, named by that specific mini-card. (I.e., Fire
# gets its own page, and Ice gets its own page.) So the Gatherer name for
# the card is inaccurate for this purpose.
#
# Solution: identify multi-part cards, and pull the mana cost out simply for
# these cards, because there will be only one mana cost block on the page.
# All other cards, allow for the possibility that it's a "flipping" (e.g.,
# Erayo, Soratami Ascendant) or double-sided (e.g., Kruin Outlaw) card.
if multipart_card?(html)
mana_cost = convert_mana_cost(html)
else
name = extract_name(html)
mana_cost_group_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Mana Cost:.+?<div[^>]*>(.+?)<\/div>/m
mana_cost_group = html.match(mana_cost_group_regex)
mana_cost = mana_cost_group ? convert_mana_cost(mana_cost_group[1]) : nil
end
mana_cost
end
def convert_mana_cost(html)
mana_cost_regex = /<img src="\/Handlers\/Image\.ashx\?size=medium&name=([a-zA-Z0-9]+)&/
match = html.scan(mana_cost_regex).flatten
match.length > 0 ? match : nil
end
def extract_converted_mana_cost(html)
# See remarks for #extract_mana_cost, above. Similar processing with respect
# to double-sided cards is necessary here as well.
if multipart_card?(html)
cmc = convert_converted_mana_cost(html)
else
name = extract_name(html)
cmc_group_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Converted Mana Cost:<\/div>\s+<div[^>]*>[^<]+/m
cmc_group = html.match(cmc_group_regex)
cmc = cmc_group ? convert_converted_mana_cost(cmc_group[0]) : nil
end
cmc
end
def convert_converted_mana_cost(html)
cmc_regex = /Converted Mana Cost:<\/div>\s+<div[^>]*>\s+(\d+)/
match = html.match(cmc_regex)
match ? match[1] : "0"
end
def extract_types(html)
if multipart_card?(html)
card_types_regex = /Types:<\/div>\s+<div[^>]*>\s+([^>]+)<\/div>/
else
name = extract_name(html)
card_types_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?Types:<\/div>\s+<div[^>]*>\s+([^>]+)<\/div>)/m
end
card_types = html.match(card_types_regex)[1]
if card_types
card_types.split("—").collect {|type| type.strip.split(/\s+/)}.flatten
else
card_types
end
end
def extract_oracle_text(html)
# See remarks for #extract_mana_cost, above. Similar processing with respect
# to double-sided cards is necessary here as well.
card_html = html.gsub(/<div\s+class="cardtextbox"[^>]*><\/div>/, "")
oracle_text = ""
if !multipart_card?(html)
name = extract_name(html)
single_card_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}(.+?Expansion:)/m
card_html = html.match(single_card_regex)[1]
end
if card_html.match(/Card Text:/)
if card_html.match(/Flavor Text:/)
oracle_regex = /Card Text:<\/div>(.+?)Flavor Text:/m
else
oracle_regex = /Card Text:<\/div>(.+?)Expansion:/m
end
oracle_html = card_html.match(oracle_regex)[1]
oracle_text_regex = /<div.+?class="cardtextbox"[^>]*>(.+?)<\/div>/
oracle_text = oracle_html.scan(oracle_text_regex).flatten.join("\n\n")
oracle_text = oracle_text.gsub(/<\/?[ib]>|<\/div>/, '').strip
# "flipping" card with side-by-side Gatherer display?
if !extract_transformed_multiverse_id(html) and
html.match(/Card Name:.+Card Name:/m) and
oracle_text.match(/\bflip\b/)
# hack together the flipped version of the card html
# and add it's oracle text to the unflipped oracle text
flipped_name_regex = /Card Name:.+Card Name:<\/div>\s+<div[^>]*>\s+([^<]+)/m
flipped_name = html.match(flipped_name_regex)[1]
more_oracle_text = [flipped_name]
flipped_card_regex = /Card Name:.+Card Name:(.+?)Expansion:/m
card_html = html.match(flipped_card_regex)[1]
name = extract_name(html)
card_html = "<span id=\"ctl00_ctl00_ctl00_MainContent_SubContent_SubContentHeader_subtitleDisplay\">#{name}</span> <div>Card Name:</div> <div> #{name}#{card_html}"
more_oracle_text.push(extract_types(card_html).join(' '))
power = extract_power(card_html)
if power
toughness = extract_toughness(card_html)
more_oracle_text.push("#{power} / #{toughness}")
end
flipped_oracle_text = card_html.scan(oracle_text_regex).flatten.join("\n\n")
flipped_oracle_text = flipped_oracle_text.gsub(/<\/?[ib]>|<\/div>/, '').strip
more_oracle_text.push(flipped_oracle_text)
more_oracle_text = more_oracle_text.join("\n\n")
oracle_text += "\n\n----\n\n#{more_oracle_text}"
end
mana_cost_regex = /<img src="\/Handlers\/Image\.ashx\?.*?name=([a-zA-Z0-9]+)[^>]*>/
oracle_text.gsub!(mana_cost_regex, '{\1}')
end
oracle_text
end
def extract_printed_text(html)
# TODO
end
def extract_power(html)
name = extract_name(html)
creature_power_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?P\/T:<\/div>\s+<div class="value">\s+(\d+) \/ \d+)/m
match = html.match(creature_power_regex)
match ? match[1] : nil
end
def extract_toughness(html)
name = extract_name(html)
creature_toughness_regex = /(?:Card Name:<\/div>\s+<div[^>]*>\s+#{name}.+?P\/T:<\/div>\s+<div class="value">\s+\d+ \/ (\d+))/m
match = html.match(creature_toughness_regex)
match ? match[1] : nil
end
def extract_loyalty(html)
match_data = /Loyalty:<\/div>\s+<div[^>]*>\s+(\w+)<\/div>/
match = html.match(match_data)
match ? match[1] : nil
end
def extract_color_indicator(html)
if !multipart_card?(html)
name = extract_name(html)
single_card_regex = /Card Name:<\/div>\s+<div[^>]*>\s+#{name}(.+?Expansion:)/m
html = html.match(single_card_regex)[1]
end
match_data = /Color Indicator:<\/div>\s+<div[^>]*>\s+(\w+)/
match = html.match(match_data)
match ? match[1] : nil
end
def determine_colors(html)
indicator_to_color = {
"Red" => "R",
"Blue" => "U",
"Green" => "G",
"White" => "W",
"Black" => "B"
}
mana_cost = extract_mana_cost(html)
match = mana_cost.join("").scan(/[ubrgw]/i) if mana_cost
indicator = extract_color_indicator(html)
if indicator
card_colors = indicator_to_color[indicator]
elsif match && match.length > 0
card_colors = match.flatten.uniq.join
else
card_colors = ''
end
card_colors
end
def extract_rarity(html)
match_data = /Rarity:<\/div>\s+<div[^>]*>\s+<span[^>]*>([\w\s]*)/
match = html.match(match_data)[1]
end
def extract_transformed_multiverse_id(html)
# Get the multiverse id of the transformed card, if one exists
card_multiverse_id = extract_multiverse_id(html)
multiverse_id_regex = /<img src="\.\.\/\.\.\/Handlers\/Image\.ashx\?multiverseid=(\d+)&type=card/
multiverse_ids_on_page = html.scan(multiverse_id_regex).flatten.uniq
(multiverse_ids_on_page - [card_multiverse_id]).first
end
end
end
|
module Catche
VERSION = "0.2.3"
end
Bump version to 0.2.4
module Catche
VERSION = "0.2.4"
end
|
module Certman
class Client
include Certman::Resource::STS
include Certman::Resource::S3
include Certman::Resource::SES
include Certman::Resource::Route53
include Certman::Resource::ACM
def initialize(domain)
@do_rollback = false
@cname_exists = false
@domain = domain
@cert_arn = nil
@savepoint = []
end
def request(remain_resources = false)
check_resource
enforce_region_to_us_east_1 do
step('[S3] Create Bucket for SES inbound', :s3_bucket) do
create_bucket
end
step('[SES] Create Domain Identity', :ses_domain_identity) do
create_domain_identity
end
end
step('[Route53] Create TXT Record Set to verify Domain Identity', :route53_txt) do
create_txt_rset
end
enforce_region_to_us_east_1 do
step('[SES] Check Domain Identity Status *verified*', nil) do
check_domain_identity_verified
end
end
step('[Route53] Create MX Record Set', :route53_mx) do
create_mx_rset
end
enforce_region_to_us_east_1 do
step('[SES] Create Receipt Rule Set', :ses_rule_set) do
create_rule_set
end
step('[SES] Create Receipt Rule', :ses_rule) do
create_rule
end
step('[SES] Replace Active Receipt Rule Set', :ses_replace_active_rule_set) do
replace_active_rule_set
end
end
step('[ACM] Request Certificate', :acm_certificate) do
request_certificate
end
enforce_region_to_us_east_1 do
step('[S3] Check approval mail (will take about 30 min)', nil) do
check_approval_mail
end
end
cleanup_resources if !remain_resources || @do_rollback
@cert_arn
end
def delete
s = spinner('[ACM] Delete Certificate')
delete_certificate
s.success
end
def check_resource
s = spinner('[ACM] Check Certificate')
raise 'Certificate already exist' if check_certificate
s.success
s = spinner('[Route53] Check Hosted Zone')
raise "Hosted Zone #{root_domain} does not exist" unless check_hosted_zone
s.success
s = spinner('[Route53] Check TXT Record')
raise "_amazonses.#{email_domain} TXT already exist" if check_txt_rset
s.success
s = spinner('[Route53] Check MX Record')
raise "#{email_domain} MX already exist" if check_mx_rset
s.success
if check_cname_rset
pastel = Pastel.new
puts pastel.cyan("#{email_domain} CNAME already exist. Use #{root_domain}")
@cname_exists = true
check_resource
end
true
end
def rollback
@do_rollback = true
end
private
def enforce_region_to_us_east_1
region = Aws.config[:region]
unless Certman::Resource::SES::REGIONS.include?(Aws.config[:region])
Aws.config[:region] = 'us-east-1'
end
yield
Aws.config[:region] = region
end
def step(message, save)
return if @do_rollback
s = spinner(message)
begin
yield
@savepoint.push(save)
s.success
rescue => e
pastel = Pastel.new
puts ''
puts pastel.red("Error: #{e.message}")
@do_rollback = true
s.error
end
end
def cleanup_resources
@savepoint.reverse.each do |state|
case state
when :s3_bucket
enforce_region_to_us_east_1 do
s = spinner('[S3] Delete Bucket')
delete_bucket
s.success
end
when :ses_domain_identity
enforce_region_to_us_east_1 do
s = spinner('[SES] Delete Verified Domain Identiry')
delete_domain_identity
s.success
end
when :route53_txt
s = spinner('[Route53] Delete TXT Record Set')
delete_txt_rset
s.success
when :route53_mx
s = spinner('[Route53] Delete MX Record Set')
delete_mx_rset
s.success
when :ses_rule_set
enforce_region_to_us_east_1 do
s = spinner('[SES] Delete Receipt Rule Set')
delete_rule_set
s.success
end
when :ses_rule
enforce_region_to_us_east_1 do
s = spinner('[SES] Delete Receipt Rule')
delete_rule
s.success
end
when :ses_replace_active_rule_set
enforce_region_to_us_east_1 do
s = spinner('[SES] Revert Active Receipt Rule Set')
revert_active_rue_set
s.success
end
when :acm_certificate
if @do_rollback
s = spinner('[ACM] Delete Certificate')
delete_certificate
@cert_arn = nil
s.success
end
end
end
end
def bucket_name
@bucket_name ||= if "#{email_domain}-certman".length < 63
"#{email_domain}-certman"
else
"#{Digest::SHA1.hexdigest(email_domain)}-certman"
end
end
def root_domain
PublicSuffix.domain(@domain)
end
def email_domain
return root_domain if @cname_exists
@domain.sub(/\A(www|\*)\./, '')
end
def validation_domain
return root_domain if @cname_exists
@domain
end
def rule_name
@rule_name ||= if "RuleCertman_#{email_domain}".length < 64
"RuleCertman_#{email_domain}"
else
"RuleCertman_#{Digest::SHA1.hexdigest(email_domain)}"
end
end
def rule_set_name
@rule_set_name ||= if "RuleSetCertman_#{email_domain}".length < 64
"RuleSetCertman_#{email_domain}"
else
"RuleSetCertman_#{Digest::SHA1.hexdigest(email_domain)}"
end
end
def spinner(message)
Certman::Log.new(message)
end
end
end
BREAKING - Enforce region (SES/S3/Route53) MX by Hash
module Certman
class Client
include Certman::Resource::STS
include Certman::Resource::S3
include Certman::Resource::SES
include Certman::Resource::Route53
include Certman::Resource::ACM
def initialize(domain)
@do_rollback = false
@cname_exists = false
@domain = domain
@cert_arn = nil
@savepoint = []
end
def request(remain_resources = false)
check_resource
enforce_region_by_hash do
step('[S3] Create Bucket for SES inbound', :s3_bucket) do
create_bucket
end
step('[SES] Create Domain Identity', :ses_domain_identity) do
create_domain_identity
end
end
step('[Route53] Create TXT Record Set to verify Domain Identity', :route53_txt) do
create_txt_rset
end
enforce_region_by_hash do
step('[SES] Check Domain Identity Status *verified*', nil) do
check_domain_identity_verified
end
step('[Route53] Create MX Record Set', :route53_mx) do
create_mx_rset
end
step('[SES] Create Receipt Rule Set', :ses_rule_set) do
create_rule_set
end
step('[SES] Create Receipt Rule', :ses_rule) do
create_rule
end
step('[SES] Replace Active Receipt Rule Set', :ses_replace_active_rule_set) do
replace_active_rule_set
end
end
step('[ACM] Request Certificate', :acm_certificate) do
request_certificate
end
enforce_region_by_hash do
step('[S3] Check approval mail (will take about 30 min)', nil) do
check_approval_mail
end
end
cleanup_resources if !remain_resources || @do_rollback
@cert_arn
end
def delete
s = spinner('[ACM] Delete Certificate')
delete_certificate
s.success
end
def check_resource
s = spinner('[ACM] Check Certificate')
raise 'Certificate already exist' if check_certificate
s.success
s = spinner('[Route53] Check Hosted Zone')
raise "Hosted Zone #{root_domain} does not exist" unless check_hosted_zone
s.success
s = spinner('[Route53] Check TXT Record')
raise "_amazonses.#{email_domain} TXT already exist" if check_txt_rset
s.success
enforce_region_by_hash do
s = spinner('[Route53] Check MX Record')
raise "#{email_domain} MX already exist" if check_mx_rset
s.success
end
if check_cname_rset
pastel = Pastel.new
puts pastel.cyan("#{email_domain} CNAME already exist. Use #{root_domain}")
@cname_exists = true
check_resource
end
true
end
def rollback
@do_rollback = true
end
private
def enforce_region_by_hash
region = Aws.config[:region]
key = Digest::SHA1.hexdigest(@domain).to_i(16) % Certman::Resource::SES::REGIONS.length
Aws.config[:region] = Certman::Resource::SES::REGIONS[key]
yield
Aws.config[:region] = region
end
def step(message, save)
return if @do_rollback
s = spinner(message)
begin
yield
@savepoint.push(save)
s.success
rescue => e
pastel = Pastel.new
puts ''
puts pastel.red("Error: #{e.message}")
@do_rollback = true
s.error
end
end
def cleanup_resources
@savepoint.reverse.each do |state|
case state
when :s3_bucket
enforce_region_by_hash do
s = spinner('[S3] Delete Bucket')
delete_bucket
s.success
end
when :ses_domain_identity
enforce_region_by_hash do
s = spinner('[SES] Delete Verified Domain Identiry')
delete_domain_identity
s.success
end
when :route53_txt
s = spinner('[Route53] Delete TXT Record Set')
delete_txt_rset
s.success
when :route53_mx
enforce_region_by_hash do
s = spinner('[Route53] Delete MX Record Set')
delete_mx_rset
s.success
end
when :ses_rule_set
enforce_region_by_hash do
s = spinner('[SES] Delete Receipt Rule Set')
delete_rule_set
s.success
end
when :ses_rule
enforce_region_by_hash do
s = spinner('[SES] Delete Receipt Rule')
delete_rule
s.success
end
when :ses_replace_active_rule_set
enforce_region_by_hash do
s = spinner('[SES] Revert Active Receipt Rule Set')
revert_active_rue_set
s.success
end
when :acm_certificate
if @do_rollback
s = spinner('[ACM] Delete Certificate')
delete_certificate
@cert_arn = nil
s.success
end
end
end
end
def bucket_name
@bucket_name ||= if "#{email_domain}-certman".length < 63
"#{email_domain}-certman"
else
"#{Digest::SHA1.hexdigest(email_domain)}-certman"
end
end
def root_domain
PublicSuffix.domain(@domain)
end
def email_domain
return root_domain if @cname_exists
@domain.sub(/\A(www|\*)\./, '')
end
def validation_domain
return root_domain if @cname_exists
@domain
end
def rule_name
@rule_name ||= if "RuleCertman_#{email_domain}".length < 64
"RuleCertman_#{email_domain}"
else
"RuleCertman_#{Digest::SHA1.hexdigest(email_domain)}"
end
end
def rule_set_name
@rule_set_name ||= if "RuleSetCertman_#{email_domain}".length < 64
"RuleSetCertman_#{email_domain}"
else
"RuleSetCertman_#{Digest::SHA1.hexdigest(email_domain)}"
end
end
def spinner(message)
Certman::Log.new(message)
end
end
end
|
module Chanko
VERSION = "2.1.1"
end
Bump chanko to 2.2.0
module Chanko
VERSION = "2.2.0"
end
|
module Chronic
class Scalar < Tag #:nodoc:
DAY_PORTIONS = %w( am pm morning afternoon evening night )
def self.scan(tokens, options)
# for each token
tokens.each_index do |i|
if t = self.scan_for_scalars(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_days(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_months(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_years(tokens[i], tokens[i + 1], options) then tokens[i].tag(t) end
end
end
def self.scan_for_scalars(token, post_token)
if token.word =~ /^\d*$/
unless post_token && DAY_PORTIONS.include?(post_token.word)
return Scalar.new(token.word.to_i)
end
end
end
def self.scan_for_days(token, post_token)
if token.word =~ /^\d\d?$/
toi = token.word.to_i
unless toi > 31 || toi < 1 || (post_token && DAY_PORTIONS.include?(post_token.word))
return ScalarDay.new(toi)
end
end
end
def self.scan_for_months(token, post_token)
if token.word =~ /^\d\d?$/
toi = token.word.to_i
unless toi > 12 || toi < 1 || (post_token && DAY_PORTIONS.include?(post_token.word))
return ScalarMonth.new(toi)
end
end
end
def self.scan_for_years(token, post_token, options)
if token.word =~ /^([1-9]\d)?\d\d?$/
unless post_token && DAY_PORTIONS.include?(post_token.word)
year = make_year(token.word.to_i, options[:ambiguous_year_future_bias])
return ScalarYear.new(year.to_i)
end
end
end
# Build a year from a 2 digit suffix
def self.make_year(year, bias)
return year if year.to_s.size > 2
start_year = Time.now.year - bias
century = (start_year / 100) * 100
full_year = century + year
full_year += 100 if full_year < start_year
full_year
end
def to_s
'scalar'
end
end
class ScalarDay < Scalar #:nodoc:
def to_s
super << '-day-' << @type.to_s
end
end
class ScalarMonth < Scalar #:nodoc:
def to_s
super << '-month-' << @type.to_s
end
end
class ScalarYear < Scalar #:nodoc:
def to_s
super << '-year-' << @type.to_s
end
end
end
oops, use Chronic.time_class again, not Time directly
module Chronic
class Scalar < Tag #:nodoc:
DAY_PORTIONS = %w( am pm morning afternoon evening night )
def self.scan(tokens, options)
# for each token
tokens.each_index do |i|
if t = self.scan_for_scalars(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_days(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_months(tokens[i], tokens[i + 1]) then tokens[i].tag(t) end
if t = self.scan_for_years(tokens[i], tokens[i + 1], options) then tokens[i].tag(t) end
end
end
def self.scan_for_scalars(token, post_token)
if token.word =~ /^\d*$/
unless post_token && DAY_PORTIONS.include?(post_token.word)
return Scalar.new(token.word.to_i)
end
end
end
def self.scan_for_days(token, post_token)
if token.word =~ /^\d\d?$/
toi = token.word.to_i
unless toi > 31 || toi < 1 || (post_token && DAY_PORTIONS.include?(post_token.word))
return ScalarDay.new(toi)
end
end
end
def self.scan_for_months(token, post_token)
if token.word =~ /^\d\d?$/
toi = token.word.to_i
unless toi > 12 || toi < 1 || (post_token && DAY_PORTIONS.include?(post_token.word))
return ScalarMonth.new(toi)
end
end
end
def self.scan_for_years(token, post_token, options)
if token.word =~ /^([1-9]\d)?\d\d?$/
unless post_token && DAY_PORTIONS.include?(post_token.word)
year = make_year(token.word.to_i, options[:ambiguous_year_future_bias])
return ScalarYear.new(year.to_i)
end
end
end
# Build a year from a 2 digit suffix
def self.make_year(year, bias)
return year if year.to_s.size > 2
start_year = Chronic.time_class.now.year - bias
century = (start_year / 100) * 100
full_year = century + year
full_year += 100 if full_year < start_year
full_year
end
def to_s
'scalar'
end
end
class ScalarDay < Scalar #:nodoc:
def to_s
super << '-day-' << @type.to_s
end
end
class ScalarMonth < Scalar #:nodoc:
def to_s
super << '-month-' << @type.to_s
end
end
class ScalarYear < Scalar #:nodoc:
def to_s
super << '-year-' << @type.to_s
end
end
end
|
# encoding: utf-8
require 'claide/command/banner'
require 'claide/command/parser'
require 'claide/command/plugins_helper'
require 'claide/command/options'
require 'claide/command/shell_completion_helper'
require 'claide/command/validation_helper'
module CLAide
# This class is used to build a command-line interface
#
# Each command is represented by a subclass of this class, which may be
# nested to create more granular commands.
#
# Following is an overview of the types of commands and what they should do.
#
# ### Any command type
#
# * Inherit from the command class under which the command should be nested.
# * Set {Command.summary} to a brief description of the command.
# * Override {Command.options} to return the options it handles and their
# descriptions and prepending them to the results of calling `super`.
# * Override {Command#initialize} if it handles any parameters.
# * Override {Command#validate!} to check if the required parameters the
# command handles are valid, or call {Command#help!} in case they’re not.
#
# ### Abstract command
#
# The following is needed for an abstract command:
#
# * Set {Command.abstract_command} to `true`.
# * Subclass the command.
#
# When the optional {Command.description} is specified, it will be shown at
# the top of the command’s help banner.
#
# ### Normal command
#
# The following is needed for a normal command:
#
# * Set {Command.arguments} to the description of the arguments this command
# handles.
# * Override {Command#run} to perform the actual work.
#
# When the optional {Command.description} is specified, it will be shown
# underneath the usage section of the command’s help banner. Otherwise this
# defaults to {Command.summary}.
#
class Command
class << self
# @return [Boolean] Indicates whether or not this command can actually
# perform work of itself, or that it only contains subcommands.
#
attr_accessor :abstract_command
alias_method :abstract_command?, :abstract_command
# @return [Boolean] Indicates whether or not this command is used during
# command parsing and whether or not it should be shown in the
# help banner or to show its subcommands instead.
#
# Setting this to `true` implies it’s an abstract command.
#
attr_reader :ignore_in_command_lookup
alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup
def ignore_in_command_lookup=(flag)
@ignore_in_command_lookup = self.abstract_command = flag
end
# @return [String] The subcommand which an abstract command should invoke
# by default.
#
attr_accessor :default_subcommand
# @return [String] A brief description of the command, which is shown
# next to the command in the help banner of a parent command.
#
attr_accessor :summary
# @return [String] A longer description of the command, which is shown
# underneath the usage section of the command’s help banner. Any
# indentation in this value will be ignored.
#
attr_accessor :description
# @return [String] The prefix for loading CLAide plugins for this
# command. Plugins are loaded via their
# <plugin_prefix>_plugin.rb file.
#
attr_accessor :plugin_prefix
# @return [Array<Argument>]
# A list of arguments the command handles. This is shown
# in the usage section of the command’s help banner.
# Each Argument in the array represents an argument by its name
# (or list of alternatives) and whether it's required or optional
#
def arguments
@arguments ||= []
end
# @param [Array<Argument>] arguments
# An array listing the command arguments.
# Each Argument object describe the argument by its name
# (or list of alternatives) and whether it's required or optional
#
# @todo Remove deprecation
#
def arguments=(arguments)
if arguments.is_a?(Array)
if arguments.empty? || arguments[0].is_a?(Argument)
@arguments = arguments
else
self.arguments_array = arguments
end
else
self.arguments_string = arguments
end
end
# @return [Boolean] The default value for {Command#ansi_output}. This
# defaults to `true` if `STDOUT` is connected to a TTY and
# `String` has the instance methods `#red`, `#green`, and
# `#yellow` (which are defined by, for instance, the
# [colored](https://github.com/defunkt/colored) gem).
#
def ansi_output
if @ansi_output.nil?
@ansi_output = STDOUT.tty?
end
@ansi_output
end
attr_writer :ansi_output
alias_method :ansi_output?, :ansi_output
# @return [String] The name of the command. Defaults to a snake-cased
# version of the class’ name.
#
def command
@command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part|
part.downcase << '-'
end[0..-2]
end
attr_writer :command
# @return [String] The version of the command. This value will be printed
# by the `--version` flag if used for the root command.
#
attr_accessor :version
end
#-------------------------------------------------------------------------#
# @return [String] The full command up-to this command, as it would be
# looked up during parsing.
#
# @note (see #ignore_in_command_lookup)
#
# @example
#
# BevarageMaker::Tea.full_command # => "beverage-maker tea"
#
def self.full_command
if superclass == Command
ignore_in_command_lookup? ? '' : command
else
if ignore_in_command_lookup?
superclass.full_command
else
"#{superclass.full_command} #{command}"
end
end
end
# @return [Bool] Whether this is the root command class
#
def self.root_command?
superclass == CLAide::Command
end
# @return [Array<Class>] A list of all command classes that are nested
# under this command.
#
def self.subcommands
@subcommands ||= []
end
# @return [Array<Class>] A list of command classes that are nested under
# this command _or_ the subcommands of those command classes in
# case the command class should be ignored in command lookup.
#
def self.subcommands_for_command_lookup
subcommands.map do |subcommand|
if subcommand.ignore_in_command_lookup?
subcommand.subcommands_for_command_lookup
else
subcommand
end
end.flatten
end
# Searches the list of subcommands that should not be ignored for command
# lookup for a subcommand with the given `name`.
#
# @param [String] name
# The name of the subcommand to be found.
#
# @return [CLAide::Command, nil] The subcommand, if found.
#
def self.find_subcommand(name)
subcommands_for_command_lookup.find { |sc| sc.command == name }
end
# @visibility private
#
# Automatically registers a subclass as a subcommand.
#
def self.inherited(subcommand)
subcommands << subcommand
end
# Should be overridden by a subclass if it handles any options.
#
# The subclass has to combine the result of calling `super` and its own
# list of options. The recommended way of doing this is by concatenating
# concatenating to this classes’ own options.
#
# @return [Array<Array>]
#
# A list of option name and description tuples.
#
# @example
#
# def self.options
# [
# ['--verbose', 'Print more info'],
# ['--help', 'Print help banner'],
# ].concat(super)
# end
#
def self.options
Options.default_options(self)
end
# Instantiates the command class matching the parameters through
# {Command.parse}, validates it through {Command#validate!}, and runs it
# through {Command#run}.
#
# @note You should normally call this on
#
# @note The ANSI support is configured before running a command to allow
# the same process to run multiple commands with different
# settings. For example a process with ANSI output enabled might
# want to programmatically invoke another command with the output
# enabled.
#
# @param [Array, ARGV] argv
# A list of parameters. For instance, the standard `ARGV` constant,
# which contains the parameters passed to the program.
#
# @return [void]
#
def self.run(argv = [])
argv = ARGV.coherce(argv)
PluginsHelper.load_plugins(plugin_prefix)
command = parse(argv)
ANSI.disabled = !command.ansi_output?
unless Options.handle_root_option(command, argv)
command.validate!
command.run
end
rescue Object => exception
handle_exception(command, exception)
end
def self.parse(argv)
Parser.parse(self, argv)
end
# Presents an exception to the user according to class of the .
#
# @param [Command] command
# The command which originated the exception.
#
# @param [Object] exception
# The exception to present.
#
# @return [void]
#
def self.handle_exception(command, exception)
if exception.is_a?(InformativeError)
puts exception.message
if command.verbose?
puts
puts(*exception.backtrace)
end
exit exception.exit_status
else
report_error(exception)
end
end
# Allows the application to perform custom error reporting, by overriding
# this method.
#
# @param [Exception] exception
#
# An exception that occurred while running a command through
# {Command.run}.
#
# @raise
#
# By default re-raises the specified exception.
#
# @return [void]
#
def self.report_error(exception)
raise exception
end
# @visibility private
#
# @param [String] error_message
# The error message to show to the user.
#
# @param [Class] help_class
# The class to use to raise a ‘help’ error.
#
# @raise [Help]
#
# Signals CLAide that a help banner for this command should be shown,
# with an optional error message.
#
# @return [void]
#
def self.help!(error_message = nil, help_class = Help)
raise help_class.new(banner, error_message)
end
# @visibility private
#
# Returns the banner for the command.
#
# @param [Class] banner_class
# The class to use to format help banners.
#
# @return [String] The banner for the command.
#
def self.banner(banner_class = Banner)
banner_class.new(self).formatted_banner
end
#-------------------------------------------------------------------------#
# Set to `true` if the user specifies the `--verbose` option.
#
# @note
#
# If you want to make use of this value for your own configuration, you
# should check the value _after_ calling the `super` {Command#initialize}
# implementation.
#
# @return [Boolean]
#
# Wether or not backtraces should be included when presenting the user an
# exception that includes the {InformativeError} module.
#
attr_accessor :verbose
alias_method :verbose?, :verbose
# Set to `true` if {Command.ansi_output} returns `true` and the user
# did **not** specify the `--no-ansi` option.
#
# @note (see #verbose)
#
# @return [Boolean]
#
# Whether or not to use ANSI codes to prettify output. For instance, by
# default {InformativeError} exception messages will be colored red and
# subcommands in help banners green.
#
attr_accessor :ansi_output
alias_method :ansi_output?, :ansi_output
# Subclasses should override this method to remove the arguments/options
# they support from `argv` _before_ calling `super`.
#
# The `super` implementation sets the {#verbose} attribute based on whether
# or not the `--verbose` option is specified; and the {#ansi_output}
# attribute to `false` if {Command.ansi_output} returns `true`, but the
# user specified the `--no-ansi` option.
#
# @param [ARGV, Array] argv
#
# A list of (user-supplied) params that should be handled.
#
def initialize(argv)
argv = ARGV.new(argv) unless argv.is_a?(ARGV)
@verbose = argv.flag?('verbose')
@ansi_output = argv.flag?('ansi', Command.ansi_output?)
@argv = argv
end
# @return [Bool] Whether the command was invoked by an abstract command by
# default.
#
attr_accessor :invoked_as_default
alias_method :invoked_as_default?, :invoked_as_default
# Raises a Help exception if the `--help` option is specified, if `argv`
# still contains remaining arguments/options by the time it reaches this
# implementation, or when called on an ‘abstract command’.
#
# Subclasses should call `super` _before_ doing their own validation. This
# way when the user specifies the `--help` flag a help banner is shown,
# instead of possible actual validation errors.
#
# @raise [Help]
#
# @return [void]
#
def validate!
help! if @argv.flag?('help')
unless @argv.empty?
help! ValidationHelper.argument_suggestion(@argv.remainder, self.class)
end
help! if self.class.abstract_command?
end
# This method should be overridden by the command class to perform its
# work.
#
# @return [void
#
def run
raise 'A subclass should override the `CLAide::Command#run` method to ' \
'actually perform some work.'
end
protected
# @raise [Help]
#
# Signals CLAide that a help banner for this command should be shown,
# with an optional error message.
#
# @return [void]
#
def help!(error_message = nil)
if invoked_as_default?
command = self.class.superclass
else
command = self.class
end
command.help!(error_message)
end
#-------------------------------------------------------------------------#
# Handle deprecated form of self.arguments as an
# Array<Array<(String, Symbol)>> like in:
#
# self.arguments = [ ['NAME', :required], ['QUERY', :optional] ]
#
# @todo Remove deprecated format support
#
def self.arguments_array=(arguments)
warn '[!] The signature of CLAide#arguments has changed. ' \
"Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow
@arguments = arguments.map do |(name_str, type)|
names = name_str.split('|')
required = (type == :required)
Argument.new(names, required)
end
end
# Handle deprecated form of self.arguments as a String, like in:
#
# self.arguments = 'NAME [QUERY]'
#
# @todo Remove deprecated format support
#
def self.arguments_string=(arguments)
warn '[!] The specification of arguments as a string has been' \
" deprecated #{self}: `#{arguments}`".ansi.yellow
@arguments = arguments.split(' ').map do |argument|
if argument.start_with?('[')
Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false)
else
Argument.new(argument.split('|'), true)
end
end
end
end
end
[Doc] Fixed missing parameter documentation
# encoding: utf-8
require 'claide/command/banner'
require 'claide/command/parser'
require 'claide/command/plugins_helper'
require 'claide/command/options'
require 'claide/command/shell_completion_helper'
require 'claide/command/validation_helper'
module CLAide
# This class is used to build a command-line interface
#
# Each command is represented by a subclass of this class, which may be
# nested to create more granular commands.
#
# Following is an overview of the types of commands and what they should do.
#
# ### Any command type
#
# * Inherit from the command class under which the command should be nested.
# * Set {Command.summary} to a brief description of the command.
# * Override {Command.options} to return the options it handles and their
# descriptions and prepending them to the results of calling `super`.
# * Override {Command#initialize} if it handles any parameters.
# * Override {Command#validate!} to check if the required parameters the
# command handles are valid, or call {Command#help!} in case they’re not.
#
# ### Abstract command
#
# The following is needed for an abstract command:
#
# * Set {Command.abstract_command} to `true`.
# * Subclass the command.
#
# When the optional {Command.description} is specified, it will be shown at
# the top of the command’s help banner.
#
# ### Normal command
#
# The following is needed for a normal command:
#
# * Set {Command.arguments} to the description of the arguments this command
# handles.
# * Override {Command#run} to perform the actual work.
#
# When the optional {Command.description} is specified, it will be shown
# underneath the usage section of the command’s help banner. Otherwise this
# defaults to {Command.summary}.
#
class Command
class << self
# @return [Boolean] Indicates whether or not this command can actually
# perform work of itself, or that it only contains subcommands.
#
attr_accessor :abstract_command
alias_method :abstract_command?, :abstract_command
# @return [Boolean] Indicates whether or not this command is used during
# command parsing and whether or not it should be shown in the
# help banner or to show its subcommands instead.
#
# Setting this to `true` implies it’s an abstract command.
#
attr_reader :ignore_in_command_lookup
alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup
def ignore_in_command_lookup=(flag)
@ignore_in_command_lookup = self.abstract_command = flag
end
# @return [String] The subcommand which an abstract command should invoke
# by default.
#
attr_accessor :default_subcommand
# @return [String] A brief description of the command, which is shown
# next to the command in the help banner of a parent command.
#
attr_accessor :summary
# @return [String] A longer description of the command, which is shown
# underneath the usage section of the command’s help banner. Any
# indentation in this value will be ignored.
#
attr_accessor :description
# @return [String] The prefix for loading CLAide plugins for this
# command. Plugins are loaded via their
# <plugin_prefix>_plugin.rb file.
#
attr_accessor :plugin_prefix
# @return [Array<Argument>]
# A list of arguments the command handles. This is shown
# in the usage section of the command’s help banner.
# Each Argument in the array represents an argument by its name
# (or list of alternatives) and whether it's required or optional
#
def arguments
@arguments ||= []
end
# @param [Array<Argument>] arguments
# An array listing the command arguments.
# Each Argument object describe the argument by its name
# (or list of alternatives) and whether it's required or optional
#
# @todo Remove deprecation
#
def arguments=(arguments)
if arguments.is_a?(Array)
if arguments.empty? || arguments[0].is_a?(Argument)
@arguments = arguments
else
self.arguments_array = arguments
end
else
self.arguments_string = arguments
end
end
# @return [Boolean] The default value for {Command#ansi_output}. This
# defaults to `true` if `STDOUT` is connected to a TTY and
# `String` has the instance methods `#red`, `#green`, and
# `#yellow` (which are defined by, for instance, the
# [colored](https://github.com/defunkt/colored) gem).
#
def ansi_output
if @ansi_output.nil?
@ansi_output = STDOUT.tty?
end
@ansi_output
end
attr_writer :ansi_output
alias_method :ansi_output?, :ansi_output
# @return [String] The name of the command. Defaults to a snake-cased
# version of the class’ name.
#
def command
@command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part|
part.downcase << '-'
end[0..-2]
end
attr_writer :command
# @return [String] The version of the command. This value will be printed
# by the `--version` flag if used for the root command.
#
attr_accessor :version
end
#-------------------------------------------------------------------------#
# @return [String] The full command up-to this command, as it would be
# looked up during parsing.
#
# @note (see #ignore_in_command_lookup)
#
# @example
#
# BevarageMaker::Tea.full_command # => "beverage-maker tea"
#
def self.full_command
if superclass == Command
ignore_in_command_lookup? ? '' : command
else
if ignore_in_command_lookup?
superclass.full_command
else
"#{superclass.full_command} #{command}"
end
end
end
# @return [Bool] Whether this is the root command class
#
def self.root_command?
superclass == CLAide::Command
end
# @return [Array<Class>] A list of all command classes that are nested
# under this command.
#
def self.subcommands
@subcommands ||= []
end
# @return [Array<Class>] A list of command classes that are nested under
# this command _or_ the subcommands of those command classes in
# case the command class should be ignored in command lookup.
#
def self.subcommands_for_command_lookup
subcommands.map do |subcommand|
if subcommand.ignore_in_command_lookup?
subcommand.subcommands_for_command_lookup
else
subcommand
end
end.flatten
end
# Searches the list of subcommands that should not be ignored for command
# lookup for a subcommand with the given `name`.
#
# @param [String] name
# The name of the subcommand to be found.
#
# @return [CLAide::Command, nil] The subcommand, if found.
#
def self.find_subcommand(name)
subcommands_for_command_lookup.find { |sc| sc.command == name }
end
# @visibility private
#
# Automatically registers a subclass as a subcommand.
#
def self.inherited(subcommand)
subcommands << subcommand
end
# Should be overridden by a subclass if it handles any options.
#
# The subclass has to combine the result of calling `super` and its own
# list of options. The recommended way of doing this is by concatenating
# concatenating to this classes’ own options.
#
# @return [Array<Array>]
#
# A list of option name and description tuples.
#
# @example
#
# def self.options
# [
# ['--verbose', 'Print more info'],
# ['--help', 'Print help banner'],
# ].concat(super)
# end
#
def self.options
Options.default_options(self)
end
# Instantiates the command class matching the parameters through
# {Command.parse}, validates it through {Command#validate!}, and runs it
# through {Command#run}.
#
# @note You should normally call this on
#
# @note The ANSI support is configured before running a command to allow
# the same process to run multiple commands with different
# settings. For example a process with ANSI output enabled might
# want to programmatically invoke another command with the output
# enabled.
#
# @param [Array, ARGV] argv
# A list of parameters. For instance, the standard `ARGV` constant,
# which contains the parameters passed to the program.
#
# @return [void]
#
def self.run(argv = [])
argv = ARGV.coherce(argv)
PluginsHelper.load_plugins(plugin_prefix)
command = parse(argv)
ANSI.disabled = !command.ansi_output?
unless Options.handle_root_option(command, argv)
command.validate!
command.run
end
rescue Object => exception
handle_exception(command, exception)
end
def self.parse(argv)
Parser.parse(self, argv)
end
# Presents an exception to the user according to class of the .
#
# @param [Command] command
# The command which originated the exception.
#
# @param [Object] exception
# The exception to present.
#
# @return [void]
#
def self.handle_exception(command, exception)
if exception.is_a?(InformativeError)
puts exception.message
if command.verbose?
puts
puts(*exception.backtrace)
end
exit exception.exit_status
else
report_error(exception)
end
end
# Allows the application to perform custom error reporting, by overriding
# this method.
#
# @param [Exception] exception
#
# An exception that occurred while running a command through
# {Command.run}.
#
# @raise
#
# By default re-raises the specified exception.
#
# @return [void]
#
def self.report_error(exception)
raise exception
end
# @visibility private
#
# @param [String] error_message
# The error message to show to the user.
#
# @param [Class] help_class
# The class to use to raise a ‘help’ error.
#
# @raise [Help]
#
# Signals CLAide that a help banner for this command should be shown,
# with an optional error message.
#
# @return [void]
#
def self.help!(error_message = nil, help_class = Help)
raise help_class.new(banner, error_message)
end
# @visibility private
#
# Returns the banner for the command.
#
# @param [Class] banner_class
# The class to use to format help banners.
#
# @return [String] The banner for the command.
#
def self.banner(banner_class = Banner)
banner_class.new(self).formatted_banner
end
#-------------------------------------------------------------------------#
# Set to `true` if the user specifies the `--verbose` option.
#
# @note
#
# If you want to make use of this value for your own configuration, you
# should check the value _after_ calling the `super` {Command#initialize}
# implementation.
#
# @return [Boolean]
#
# Wether or not backtraces should be included when presenting the user an
# exception that includes the {InformativeError} module.
#
attr_accessor :verbose
alias_method :verbose?, :verbose
# Set to `true` if {Command.ansi_output} returns `true` and the user
# did **not** specify the `--no-ansi` option.
#
# @note (see #verbose)
#
# @return [Boolean]
#
# Whether or not to use ANSI codes to prettify output. For instance, by
# default {InformativeError} exception messages will be colored red and
# subcommands in help banners green.
#
attr_accessor :ansi_output
alias_method :ansi_output?, :ansi_output
# Subclasses should override this method to remove the arguments/options
# they support from `argv` _before_ calling `super`.
#
# The `super` implementation sets the {#verbose} attribute based on whether
# or not the `--verbose` option is specified; and the {#ansi_output}
# attribute to `false` if {Command.ansi_output} returns `true`, but the
# user specified the `--no-ansi` option.
#
# @param [ARGV, Array] argv
#
# A list of (user-supplied) params that should be handled.
#
def initialize(argv)
argv = ARGV.new(argv) unless argv.is_a?(ARGV)
@verbose = argv.flag?('verbose')
@ansi_output = argv.flag?('ansi', Command.ansi_output?)
@argv = argv
end
# @return [Bool] Whether the command was invoked by an abstract command by
# default.
#
attr_accessor :invoked_as_default
alias_method :invoked_as_default?, :invoked_as_default
# Raises a Help exception if the `--help` option is specified, if `argv`
# still contains remaining arguments/options by the time it reaches this
# implementation, or when called on an ‘abstract command’.
#
# Subclasses should call `super` _before_ doing their own validation. This
# way when the user specifies the `--help` flag a help banner is shown,
# instead of possible actual validation errors.
#
# @raise [Help]
#
# @return [void]
#
def validate!
help! if @argv.flag?('help')
unless @argv.empty?
help! ValidationHelper.argument_suggestion(@argv.remainder, self.class)
end
help! if self.class.abstract_command?
end
# This method should be overridden by the command class to perform its
# work.
#
# @return [void
#
def run
raise 'A subclass should override the `CLAide::Command#run` method to ' \
'actually perform some work.'
end
protected
# @param [String] error_message
# A custom optional error message
#
# @raise [Help]
#
# Signals CLAide that a help banner for this command should be shown,
# with an optional error message.
#
# @return [void]
#
def help!(error_message = nil)
if invoked_as_default?
command = self.class.superclass
else
command = self.class
end
command.help!(error_message)
end
#-------------------------------------------------------------------------#
# Handle deprecated form of self.arguments as an
# Array<Array<(String, Symbol)>> like in:
#
# self.arguments = [ ['NAME', :required], ['QUERY', :optional] ]
#
# @todo Remove deprecated format support
#
def self.arguments_array=(arguments)
warn '[!] The signature of CLAide#arguments has changed. ' \
"Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow
@arguments = arguments.map do |(name_str, type)|
names = name_str.split('|')
required = (type == :required)
Argument.new(names, required)
end
end
# Handle deprecated form of self.arguments as a String, like in:
#
# self.arguments = 'NAME [QUERY]'
#
# @todo Remove deprecated format support
#
def self.arguments_string=(arguments)
warn '[!] The specification of arguments as a string has been' \
" deprecated #{self}: `#{arguments}`".ansi.yellow
@arguments = arguments.split(' ').map do |argument|
if argument.start_with?('[')
Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false)
else
Argument.new(argument.split('|'), true)
end
end
end
end
end
|
class Clive
# An Argument represents an argument for an Option or Command, it can be optional
# and can also be constricted by various other values, see {#initialize}.
class Argument
# Creates an object which will respond with true to all call to the method(s)
# given.
#
# @example
# eg = AlwaysTrue.for(:a, :b, :c)
# eg.a #=> true
# eg.b(1,2,3) #=> true
# eg.c { 1 } #=> true
# eg.d #=> NoMethodError
#
class AlwaysTrue
# @param syms [Symbol] Methods which should return true
def self.for(*syms)
c = Class.new
syms.each do |sym|
c.send(:define_method, sym) {|*a| true }
end
c.send(:define_method, :inspect) { "#<AlwaysTrue #{syms.map {|i| ":#{i}" }.join(', ') }>" }
c.new
end
end
# An Argument will have these traits by default.
DEFAULTS = {
:optional => false,
:type => Type::Object,
:match => AlwaysTrue.for(:match),
:within => AlwaysTrue.for(:include?),
:default => nil,
:constraint => AlwaysTrue.for(:call)
}
attr_reader :name, :default, :type
# A new instance of Argument.
#
# @param opts [Hash]
#
# @option opts [#to_sym] :name
# Name of the argument.
#
# @option opts [Boolean] :optional
# Whether this argument is optional. An optional argument does not have
# to be given and will pass +:default+ to the block instead.
#
# @option opts [Type] :type
# Type that the matching argument should be cast to. See {Type} and the
# various subclasses for details. Each {Type} defines something that the
# argument must match in addition to the +:match+ argument given.
#
# @option opts [#match] :match
# Regular expression the argument must match.
#
# @option opts [#include?] :within
# Collection that the argument should be in. This will be checked
# against the string argument and the cast object (see +:type+). So for
# instance if +:type+ is set to +Integer+ you can set +:within+ to be an array
# of integers, [1,2,3], or an array of strings, %w(1 2 3), and get the
# same result.
#
# @option opts :default
# Default value the argument takes. This is only set or used if the Option or
# Command is actually called.
#
# @option opts [#call, #to_proc] :constraint
# Proc which is passed the found argument and should return +true+ if the
# value is ok and false if not.
# If the object responds to #to_proc this will be called and the resulting
# Proc object saved for later use. This allows you to pass method symbols.
#
# @example
#
# Argument.new(:arg, :optional => true, :type => Integer, :constraint => :odd?)
#
def initialize(name, opts={})
@name = name.to_sym
opts[:constraint] = opts[:constraint].to_proc if opts[:constraint].respond_to?(:to_proc)
opts = DEFAULTS.merge(opts)
@optional = opts[:optional]
@type = Type.find_class(opts[:type].to_s) rescue opts[:type]
@match = opts[:match]
@within = opts[:within]
@default = opts[:default]
@constraint = opts[:constraint]
end
# @return Whether the argument is optional.
def optional?
@optional
end
# @return [String] String representation for the argument.
def to_s
optional? ? "[<#@name>]" : "<#@name>"
end
# @return [String]
# Choices or range of choices that can be made, for the help string.
def choice_str
if @within
case @within
when Array
'(' + @within.join(', ') + ')'
when Range
'(' + @within.to_s + ')'
else
''
end
else
''
end
end
def inspect
"#<#{self.class} #{to_s}>"
end
# Determines whether the object given can be this argument. Checks whether
# it is valid based on the options passed to {#initialize}.
#
# @param obj [String, Object]
# This method will be called at least twice for each argument, the first
# time when testing for {Arguments#possible?} and then for {Arguments#valid?}.
# When called in {Arguments#possible?} +obj+ will be passed as a string,
# for {Arguments#valid?} though +obj+ will have been cast using {#coerce}
# to the correct type meaning this method must deal with both cases.
#
# @return Whether +obj+ could be this argument.
#
def possible?(obj)
return false if obj.is_a?(String) && !@type.valid?(obj)
return false unless @match.match(obj.to_s)
coerced = coerce(obj)
unless @within.include?(obj.to_s) || @within.include?(coerced)
return false
end
begin
return false unless @constraint.call(obj.to_s)
rescue
begin
return false unless @constraint.call(coerced)
rescue
return false
end
end
true
end
# Converts the given String argument to the correct type determined by the
# +:type+ passed to {#initialize}.
def coerce(str)
return str unless str.is_a?(String)
@type.typecast(str)
end
end
end
Arguments can now be 'infinite'
class Clive
# An Argument represents an argument for an Option or Command, it can be optional
# and can also be constricted by various other values, see {#initialize}.
class Argument
# Creates an object which will respond with true to all call to the method(s)
# given.
#
# @example
# eg = AlwaysTrue.for(:a, :b, :c)
# eg.a #=> true
# eg.b(1,2,3) #=> true
# eg.c { 1 } #=> true
# eg.d #=> NoMethodError
#
class AlwaysTrue
# @param syms [Symbol] Methods which should return true
def self.for(*syms)
c = Class.new
syms.each do |sym|
c.send(:define_method, sym) {|*a| true }
end
c.send(:define_method, :inspect) { "#<AlwaysTrue #{syms.map {|i| ":#{i}" }.join(', ') }>" }
c.new
end
end
# An Argument will have these traits by default.
DEFAULTS = {
:optional => false,
:type => Type::Object,
:match => AlwaysTrue.for(:match),
:within => AlwaysTrue.for(:include?),
:default => nil,
:constraint => AlwaysTrue.for(:call),
:infinite => false
}
attr_reader :name, :default, :type
# A new instance of Argument.
#
# @param opts [Hash]
#
# @option opts [#to_sym] :name
# Name of the argument.
#
# @option opts [Boolean] :optional
# Whether this argument is optional. An optional argument does not have
# to be given and will pass +:default+ to the block instead.
#
# @option opts [Type] :type
# Type that the matching argument should be cast to. See {Type} and the
# various subclasses for details. Each {Type} defines something that the
# argument must match in addition to the +:match+ argument given.
#
# @option opts [#match] :match
# Regular expression the argument must match.
#
# @option opts [#include?] :within
# Collection that the argument should be in. This will be checked
# against the string argument and the cast object (see +:type+). So for
# instance if +:type+ is set to +Integer+ you can set +:within+ to be an array
# of integers, [1,2,3], or an array of strings, %w(1 2 3), and get the
# same result.
#
# @option opts :default
# Default value the argument takes. This is only set or used if the Option or
# Command is actually called.
#
# @option opts [#call, #to_proc] :constraint
# Proc which is passed the found argument and should return +true+ if the
# value is ok and false if not.
# If the object responds to #to_proc this will be called and the resulting
# Proc object saved for later use. This allows you to pass method symbols.
#
# @example
#
# Argument.new(:arg, :optional => true, :type => Integer, :constraint => :odd?)
#
def initialize(name, opts={})
@name = name.to_sym
opts[:constraint] = opts[:constraint].to_proc if opts[:constraint].respond_to?(:to_proc)
opts = DEFAULTS.merge(opts)
@optional = opts[:optional]
@type = Type.find_class(opts[:type].to_s) rescue opts[:type]
@match = opts[:match]
@within = opts[:within]
@default = opts[:default]
@constraint = opts[:constraint]
@infinite = opts[:infinite]
end
# @return Whether the argument is optional.
def optional?
@optional
end
# @return Whether the argument is infinite.
def infinite?
@infinite
end
# @return [String] String representation for the argument.
def to_s
(optional? ? "[<#@name>]" : "<#@name>") + (infinite? ? '...' : '')
end
# @return [String]
# Choices or range of choices that can be made, for the help string.
def choice_str
if @within
case @within
when Array
'(' + @within.join(', ') + ')'
when Range
'(' + @within.to_s + ')'
else
''
end
else
''
end
end
def inspect
"#<#{self.class} #{to_s}>"
end
# Determines whether the object given can be this argument. Checks whether
# it is valid based on the options passed to {#initialize}.
#
# @param obj [String, Object]
# This method will be called at least twice for each argument, the first
# time when testing for {Arguments#possible?} and then for {Arguments#valid?}.
# When called in {Arguments#possible?} +obj+ will be passed as a string,
# for {Arguments#valid?} though +obj+ will have been cast using {#coerce}
# to the correct type meaning this method must deal with both cases.
#
# @return Whether +obj+ could be this argument.
#
def possible?(obj)
return false if obj.is_a?(String) && !@type.valid?(obj)
return false unless @match.match(obj.to_s)
coerced = coerce(obj)
unless @within.include?(obj.to_s) || @within.include?(coerced)
return false
end
begin
return false unless @constraint.call(obj.to_s)
rescue
begin
return false unless @constraint.call(coerced)
rescue
return false
end
end
true
end
# Converts the given String argument to the correct type determined by the
# +:type+ passed to {#initialize}.
def coerce(str)
return str unless str.is_a?(String)
@type.typecast(str)
end
end
end
|
require 'faraday'
require 'faraday_middleware'
Dir[File.expand_path('../resources/*.rb', __FILE__)].each{|f| require f}
module Closeio
class Client
include Closeio::Client::Activity
include Closeio::Client::BulkAction
include Closeio::Client::Contact
include Closeio::Client::CustomField
include Closeio::Client::EmailTemplate
include Closeio::Client::Lead
include Closeio::Client::LeadStatus
include Closeio::Client::Opportunity
include Closeio::Client::OpportunityStatus
include Closeio::Client::Organization
include Closeio::Client::Report
include Closeio::Client::SmartView
include Closeio::Client::Task
include Closeio::Client::User
attr_reader :api_key
def initialize(api_key)
@api_key = api_key
end
def get(path, options={})
connection.get(path, options).body
end
def post(path, req_body)
connection.post do |req|
req.url(path)
req.body = req_body
end.body
end
def put(path, options={})
connection.put(path, options).body
end
def delete(path, options = {})
connection.delete(path, options).body
end
def paginate(path, options)
results = []
skip = 0
begin
res = get(lead_path, options.merge!(_skip: skip))
results.push res.data
skip += res.data.count if data
end while res.has_more
json = {has_more: false, total_results: res.total_results, data: results.flatten}
Hashie::Mash.new json
end
private
def connection
Faraday.new(url: "https://app.close.io/api/v1", headers: { accept: 'application/json' }) do |connection|
connection.basic_auth api_key, ''
connection.request :json
connection.response :logger
connection.use FaradayMiddleware::Mashify
connection.response :json
connection.adapter Faraday.default_adapter
end
end
end
end
res.data instead of data
require 'faraday'
require 'faraday_middleware'
Dir[File.expand_path('../resources/*.rb', __FILE__)].each{|f| require f}
module Closeio
class Client
include Closeio::Client::Activity
include Closeio::Client::BulkAction
include Closeio::Client::Contact
include Closeio::Client::CustomField
include Closeio::Client::EmailTemplate
include Closeio::Client::Lead
include Closeio::Client::LeadStatus
include Closeio::Client::Opportunity
include Closeio::Client::OpportunityStatus
include Closeio::Client::Organization
include Closeio::Client::Report
include Closeio::Client::SmartView
include Closeio::Client::Task
include Closeio::Client::User
attr_reader :api_key
def initialize(api_key)
@api_key = api_key
end
def get(path, options={})
connection.get(path, options).body
end
def post(path, req_body)
connection.post do |req|
req.url(path)
req.body = req_body
end.body
end
def put(path, options={})
connection.put(path, options).body
end
def delete(path, options = {})
connection.delete(path, options).body
end
def paginate(path, options)
results = []
skip = 0
begin
res = get(lead_path, options.merge!(_skip: skip))
results.push res.data
skip += res.data.count if res.data
end while res.has_more
json = {has_more: false, total_results: res.total_results, data: results.flatten}
Hashie::Mash.new json
end
private
def connection
Faraday.new(url: "https://app.close.io/api/v1", headers: { accept: 'application/json' }) do |connection|
connection.basic_auth api_key, ''
connection.request :json
connection.response :logger
connection.use FaradayMiddleware::Mashify
connection.response :json
connection.adapter Faraday.default_adapter
end
end
end
end
|
require 'extlib'
class CloudFormation
ELEMENTS = [
:parameters,
:resources,
:conditions,
:outputs,
:mappings,
:rules,
:metadata,
:description,
:aws_template_format_version
]
attr_reader(*ELEMENTS)
def initialize(cfn_string)
if json_text?(cfn_string)
raise ParserError.new('Invalid JSON!') unless valid_json?(cfn_string)
cfn_hash = JSON.parse(cfn_string)
else
cfn_hash = YAML.load(cfn_string)
end
#puts cfn_hash['Metadata']
ELEMENTS.each do |e|
key = e.to_s.camel_case
if key =~ /^Aws/
key = key.sub(/^Aws/, "AWS")
end
if cfn_hash[key]
attr = parse_element(e, cfn_hash[key])
instance_variable_set("@" + e.to_s, attr)
end
end
end
private
def json_text?(cfn_string)
first_character = cfn_string.gsub(/\s/, '').split('').first
matches = cfn_string.scan(/\{[^}]*\}/)
first_character == '{' && !matches.empty?
end
def valid_json?(cfn_string)
JSON.parse(cfn_string)
return true
rescue JSON::ParserError => error
return false
end
def parse_element(elm_name, cfn_hash)
function = parser(elm_name)
send(function, elm_name, cfn_hash)
end
def parser(name)
case name
when :description
:simple_parser
when :aws_template_format_version
:simple_parser
else
:complex_parser
end
end
def simple_parser(name, cfn_hash)
cfn_hash
end
def complex_parser(name, cfn_hash)
elms = []
case name
when :metadata
cfn_hash.each_pair { |k, v| elms << Metadata.new(k, v) }
when :rules
cfn_hash.each_pair { |k, v| elms << Rules.new(k, v) }
when :parameters
cfn_hash.each_pair { |k, v| elms << Parameter.new(k, v) }
when :resources
cfn_hash.each_pair { |k, v| elms << Resource.new(k, v) }
when :outputs
cfn_hash.each_pair { |k, v| elms << Output.new(k, v) }
when :mappings
cfn_hash.each_pair { |k, v| elms << Mapping.new(k, v) }
when :conditions
cfn_hash.each_pair { |k, v| elms << Condition.new(k, v) }
end
return elms
end
end
Update YAML support to reject non-scalar types, removed json-specific loading as its a subset of YAML
require 'extlib'
require 'psych'
class CloudFormation
ELEMENTS = [
:parameters,
:resources,
:conditions,
:outputs,
:mappings,
:rules,
:metadata,
:description,
:aws_template_format_version
]
attr_reader(*ELEMENTS)
def initialize(cfn_string)
cfn_hash = Psych.safe_load(cfn_string)
ELEMENTS.each do |e|
key = e.to_s.camel_case
if key =~ /^Aws/
key = key.sub(/^Aws/, "AWS")
end
if cfn_hash[key]
attr = parse_element(e, cfn_hash[key])
instance_variable_set("@" + e.to_s, attr)
end
end
rescue Psych::DisallowedClass => error
raise Exception.new "Invalid YAML. Only simple scalars are supported. #{error.message}. Check that values are quoted."
rescue Psych::Exception => error
raise Exception.new "Invalid YAML. #{error.message}"
end
private
def parse_element(elm_name, cfn_hash)
function = parser(elm_name)
send(function, elm_name, cfn_hash)
end
def parser(name)
case name
when :description
:simple_parser
when :aws_template_format_version
:simple_parser
else
:complex_parser
end
end
def simple_parser(name, cfn_hash)
cfn_hash
end
def complex_parser(name, cfn_hash)
elms = []
case name
when :metadata
cfn_hash.each_pair { |k, v| elms << Metadata.new(k, v) }
when :rules
cfn_hash.each_pair { |k, v| elms << Rules.new(k, v) }
when :parameters
cfn_hash.each_pair { |k, v| elms << Parameter.new(k, v) }
when :resources
cfn_hash.each_pair { |k, v| elms << Resource.new(k, v) }
when :outputs
cfn_hash.each_pair { |k, v| elms << Output.new(k, v) }
when :mappings
cfn_hash.each_pair { |k, v| elms << Mapping.new(k, v) }
when :conditions
cfn_hash.each_pair { |k, v| elms << Condition.new(k, v) }
end
return elms
end
end
|
module CodeRay
# = Tokens
#
# The Tokens class represents a list of tokens returnd from
# a Scanner.
#
# A token is not a special object, just a two-element Array
# consisting of
# * the _token_ _kind_ (a Symbol representing the type of the token)
# * the _token_ _text_ (the original source of the token in a String)
#
# A token looks like this:
#
# [:comment, '# It looks like this']
# [:float, '3.1415926']
# [:error, '$^']
#
# Some scanners also yield some kind of sub-tokens, represented by special
# token texts, namely :open and :close .
#
# The Ruby scanner, for example, splits "a string" into:
#
# [
# [:open, :string],
# [:delimiter, '"'],
# [:content, 'a string'],
# [:delimiter, '"'],
# [:close, :string]
# ]
#
# Tokens is also the interface between Scanners and Encoders:
# The input is split and saved into a Tokens object. The Encoder
# then builds the output from this object.
#
# Thus, the syntax below becomes clear:
#
# CodeRay.scan('price = 2.59', :ruby).html
# # the Tokens object is here -------^
#
# See how small it is? ;)
#
# Tokens gives you the power to handle pre-scanned code very easily:
# You can convert it to a webpage, a YAML file, or dump it into a gzip'ed string
# that you put in your DB.
#
# Tokens' subclass TokenStream allows streaming to save memory.
class Tokens < Array
# The Scanner instance that created the tokens.
attr_accessor :scanner
# Whether the object is a TokenStream.
#
# Returns false.
def stream?
false
end
# Iterates over all tokens.
#
# If a filter is given, only tokens of that kind are yielded.
def each kind_filter = nil, &block
unless kind_filter
super(&block)
else
super() do |text, kind|
next unless kind == kind_filter
yield text, kind
end
end
end
# Iterates over all text tokens.
# Range tokens like [:open, :string] are left out.
#
# Example:
# tokens.each_text_token { |text, kind| text.replace html_escape(text) }
def each_text_token
each do |text, kind|
next unless text.is_a? ::String
yield text, kind
end
end
# Encode the tokens using encoder.
#
# encoder can be
# * a symbol like :html oder :statistic
# * an Encoder class
# * an Encoder object
#
# options are passed to the encoder.
def encode encoder, options = {}
unless encoder.is_a? Encoders::Encoder
unless encoder.is_a? Class
encoder_class = Encoders[encoder]
end
encoder = encoder_class.new options
end
encoder.encode_tokens self, options
end
# Turn into a string using Encoders::Text.
#
# +options+ are passed to the encoder if given.
def to_s options = {}
encode :text, options
end
# Redirects unknown methods to encoder calls.
#
# For example, if you call +tokens.html+, the HTML encoder
# is used to highlight the tokens.
def method_missing meth, options = {}
Encoders[meth].new(options).encode_tokens self
end
# Returns the tokens compressed by joining consecutive
# tokens of the same kind.
#
# This can not be undone, but should yield the same output
# in most Encoders. It basically makes the output smaller.
#
# Combined with dump, it saves space for the cost of time.
#
# If the scanner is written carefully, this is not required -
# for example, consecutive //-comment lines could already be
# joined in one comment token by the Scanner.
def optimize
print ' Tokens#optimize: before: %d - ' % size if $DEBUG
last_kind = last_text = nil
new = self.class.new
for text, kind in self
if text.is_a? String
if kind == last_kind
last_text << text
else
new << [last_text, last_kind] if last_kind
last_text = text
last_kind = kind
end
else
new << [last_text, last_kind] if last_kind
last_kind = last_text = nil
new << [text, kind]
end
end
new << [last_text, last_kind] if last_kind
print 'after: %d (%d saved = %2.0f%%)' %
[new.size, size - new.size, 1.0 - (new.size.to_f / size)] if $DEBUG
new
end
# Compact the object itself; see optimize.
def optimize!
replace optimize
end
# Ensure that all :open tokens have a correspondent :close one.
#
# TODO: Test this!
def fix
tokens = self.class.new
# Check token nesting using a stack of kinds.
opened = []
for type, kind in self
case type
when :open
opened.push [:close, kind]
when :begin_line
opened.push [:end_line, kind]
when :close, :end_line
expected = opened.pop
if [type, kind] != expected
# Unexpected :close; decide what to do based on the kind:
# - token was never opened: delete the :close (just skip it)
next unless opened.rindex expected
# - token was opened earlier: also close tokens in between
tokens << token until (token = opened.pop) == expected
end
end
tokens << [type, kind]
end
# Close remaining opened tokens
tokens << token while token = opened.pop
tokens
end
def fix!
replace fix
end
# TODO: Scanner#split_into_lines
#
# Makes sure that:
# - newlines are single tokens
# (which means all other token are single-line)
# - there are no open tokens at the end the line
#
# This makes it simple for encoders that work line-oriented,
# like HTML with list-style numeration.
def split_into_lines
raise NotImplementedError
end
def split_into_lines!
replace split_into_lines
end
# Dumps the object into a String that can be saved
# in files or databases.
#
# The dump is created with Marshal.dump;
# In addition, it is gzipped using GZip.gzip.
#
# The returned String object includes Undumping
# so it has an #undump method. See Tokens.load.
#
# You can configure the level of compression,
# but the default value 7 should be what you want
# in most cases as it is a good compromise between
# speed and compression rate.
#
# See GZip module.
def dump gzip_level = 7
require 'coderay/helpers/gzip_simple'
dump = Marshal.dump self
dump = dump.gzip gzip_level
dump.extend Undumping
end
# The total size of the tokens.
# Should be equal to the input size before
# scanning.
def text_size
size = 0
each_text_token do |t, k|
size + t.size
end
size
end
# The total size of the tokens.
# Should be equal to the input size before
# scanning.
def text
map { |t, k| t if t.is_a? ::String }.join
end
# Include this module to give an object an #undump
# method.
#
# The string returned by Tokens.dump includes Undumping.
module Undumping
# Calls Tokens.load with itself.
def undump
Tokens.load self
end
end
# Undump the object using Marshal.load, then
# unzip it using GZip.gunzip.
#
# The result is commonly a Tokens object, but
# this is not guaranteed.
def Tokens.load dump
require 'coderay/helpers/gzip_simple'
dump = dump.gunzip
@dump = Marshal.load dump
end
end
# = TokenStream
#
# The TokenStream class is a fake Array without elements.
#
# It redirects the method << to a block given at creation.
#
# This allows scanners and Encoders to use streaming (no
# tokens are saved, the input is highlighted the same time it
# is scanned) with the same code.
#
# See CodeRay.encode_stream and CodeRay.scan_stream
class TokenStream < Tokens
# Whether the object is a TokenStream.
#
# Returns true.
def stream?
true
end
# The Array is empty, but size counts the tokens given by <<.
attr_reader :size
# Creates a new TokenStream that calls +block+ whenever
# its << method is called.
#
# Example:
#
# require 'coderay'
#
# token_stream = CodeRay::TokenStream.new do |kind, text|
# puts 'kind: %s, text size: %d.' % [kind, text.size]
# end
#
# token_stream << [:regexp, '/\d+/']
# #-> kind: rexpexp, text size: 5.
#
def initialize &block
raise ArgumentError, 'Block expected for streaming.' unless block
@callback = block
@size = 0
end
# Calls +block+ with +token+ and increments size.
#
# Returns self.
def << token
@callback.call(*token)
@size += 1
self
end
# This method is not implemented due to speed reasons. Use Tokens.
def text_size
raise NotImplementedError,
'This method is not implemented due to speed reasons.'
end
# A TokenStream cannot be dumped. Use Tokens.
def dump
raise NotImplementedError, 'A TokenStream cannot be dumped.'
end
# A TokenStream cannot be optimized. Use Tokens.
def optimize
raise NotImplementedError, 'A TokenStream cannot be optimized.'
end
end
end
if $0 == __FILE__
$VERBOSE = true
$: << File.join(File.dirname(__FILE__), '..')
eval DATA.read, nil, $0, __LINE__ + 4
end
__END__
require 'test/unit'
class TokensTest < Test::Unit::TestCase
def test_creation
assert CodeRay::Tokens < Array
tokens = nil
assert_nothing_raised do
tokens = CodeRay::Tokens.new
end
assert_kind_of Array, tokens
end
def test_adding_tokens
tokens = CodeRay::Tokens.new
assert_nothing_raised do
tokens << ['string', :type]
tokens << ['()', :operator]
end
assert_equal tokens.size, 2
end
def test_dump_undump
tokens = CodeRay::Tokens.new
assert_nothing_raised do
tokens << ['string', :type]
tokens << ['()', :operator]
end
tokens2 = nil
assert_nothing_raised do
tokens2 = tokens.dump.undump
end
assert_equal tokens, tokens2
end
end
Code cleanup.
module CodeRay
# = Tokens
#
# The Tokens class represents a list of tokens returnd from
# a Scanner.
#
# A token is not a special object, just a two-element Array
# consisting of
# * the _token_ _kind_ (a Symbol representing the type of the token)
# * the _token_ _text_ (the original source of the token in a String)
#
# A token looks like this:
#
# [:comment, '# It looks like this']
# [:float, '3.1415926']
# [:error, '$^']
#
# Some scanners also yield some kind of sub-tokens, represented by special
# token texts, namely :open and :close .
#
# The Ruby scanner, for example, splits "a string" into:
#
# [
# [:open, :string],
# [:delimiter, '"'],
# [:content, 'a string'],
# [:delimiter, '"'],
# [:close, :string]
# ]
#
# Tokens is also the interface between Scanners and Encoders:
# The input is split and saved into a Tokens object. The Encoder
# then builds the output from this object.
#
# Thus, the syntax below becomes clear:
#
# CodeRay.scan('price = 2.59', :ruby).html
# # the Tokens object is here -------^
#
# See how small it is? ;)
#
# Tokens gives you the power to handle pre-scanned code very easily:
# You can convert it to a webpage, a YAML file, or dump it into a gzip'ed string
# that you put in your DB.
#
# Tokens' subclass TokenStream allows streaming to save memory.
class Tokens < Array
# The Scanner instance that created the tokens.
attr_accessor :scanner
# Whether the object is a TokenStream.
#
# Returns false.
def stream?
false
end
# Iterates over all tokens.
#
# If a filter is given, only tokens of that kind are yielded.
def each kind_filter = nil, &block
unless kind_filter
super(&block)
else
super() do |text, kind|
next unless kind == kind_filter
yield text, kind
end
end
end
# Iterates over all text tokens.
# Range tokens like [:open, :string] are left out.
#
# Example:
# tokens.each_text_token { |text, kind| text.replace html_escape(text) }
def each_text_token
each do |text, kind|
next unless text.is_a? ::String
yield text, kind
end
end
# Encode the tokens using encoder.
#
# encoder can be
# * a symbol like :html oder :statistic
# * an Encoder class
# * an Encoder object
#
# options are passed to the encoder.
def encode encoder, options = {}
unless encoder.is_a? Encoders::Encoder
unless encoder.is_a? Class
encoder_class = Encoders[encoder]
end
encoder = encoder_class.new options
end
encoder.encode_tokens self, options
end
# Turn into a string using Encoders::Text.
#
# +options+ are passed to the encoder if given.
def to_s options = {}
encode :text, options
end
# Redirects unknown methods to encoder calls.
#
# For example, if you call +tokens.html+, the HTML encoder
# is used to highlight the tokens.
def method_missing meth, options = {}
Encoders[meth].new(options).encode_tokens self
end
# Returns the tokens compressed by joining consecutive
# tokens of the same kind.
#
# This can not be undone, but should yield the same output
# in most Encoders. It basically makes the output smaller.
#
# Combined with dump, it saves space for the cost of time.
#
# If the scanner is written carefully, this is not required -
# for example, consecutive //-comment lines could already be
# joined in one comment token by the Scanner.
def optimize
# print ' Tokens#optimize: before: %d - ' % size
last_kind = last_text = nil
new = self.class.new
for text, kind in self
if text.is_a? String
if kind == last_kind
last_text << text
else
new << [last_text, last_kind] if last_kind
last_text = text
last_kind = kind
end
else
new << [last_text, last_kind] if last_kind
last_kind = last_text = nil
new << [text, kind]
end
end
new << [last_text, last_kind] if last_kind
# print 'after: %d (%d saved = %2.0f%%)' % [new.size, size - new.size, 1.0 - (new.size.to_f / size)]
new
end
# Compact the object itself; see optimize.
def optimize!
replace optimize
end
# Ensure that all :open tokens have a correspondent :close one.
#
# TODO: Test this!
def fix
tokens = self.class.new
# Check token nesting using a stack of kinds.
opened = []
for type, kind in self
case type
when :open
opened.push [:close, kind]
when :begin_line
opened.push [:end_line, kind]
when :close, :end_line
expected = opened.pop
if [type, kind] != expected
# Unexpected :close; decide what to do based on the kind:
# - token was never opened: delete the :close (just skip it)
next unless opened.rindex expected
# - token was opened earlier: also close tokens in between
tokens << token until (token = opened.pop) == expected
end
end
tokens << [type, kind]
end
# Close remaining opened tokens
tokens << token while token = opened.pop
tokens
end
def fix!
replace fix
end
# TODO: Scanner#split_into_lines
#
# Makes sure that:
# - newlines are single tokens
# (which means all other token are single-line)
# - there are no open tokens at the end the line
#
# This makes it simple for encoders that work line-oriented,
# like HTML with list-style numeration.
def split_into_lines
raise NotImplementedError
end
def split_into_lines!
replace split_into_lines
end
# Dumps the object into a String that can be saved
# in files or databases.
#
# The dump is created with Marshal.dump;
# In addition, it is gzipped using GZip.gzip.
#
# The returned String object includes Undumping
# so it has an #undump method. See Tokens.load.
#
# You can configure the level of compression,
# but the default value 7 should be what you want
# in most cases as it is a good compromise between
# speed and compression rate.
#
# See GZip module.
def dump gzip_level = 7
require 'coderay/helpers/gzip_simple'
dump = Marshal.dump self
dump = dump.gzip gzip_level
dump.extend Undumping
end
# The total size of the tokens.
# Should be equal to the input size before
# scanning.
def text_size
size = 0
each_text_token do |t, k|
size + t.size
end
size
end
# The total size of the tokens.
# Should be equal to the input size before
# scanning.
def text
map { |t, k| t if t.is_a? ::String }.join
end
# Include this module to give an object an #undump
# method.
#
# The string returned by Tokens.dump includes Undumping.
module Undumping
# Calls Tokens.load with itself.
def undump
Tokens.load self
end
end
# Undump the object using Marshal.load, then
# unzip it using GZip.gunzip.
#
# The result is commonly a Tokens object, but
# this is not guaranteed.
def Tokens.load dump
require 'coderay/helpers/gzip_simple'
dump = dump.gunzip
@dump = Marshal.load dump
end
end
# = TokenStream
#
# The TokenStream class is a fake Array without elements.
#
# It redirects the method << to a block given at creation.
#
# This allows scanners and Encoders to use streaming (no
# tokens are saved, the input is highlighted the same time it
# is scanned) with the same code.
#
# See CodeRay.encode_stream and CodeRay.scan_stream
class TokenStream < Tokens
# Whether the object is a TokenStream.
#
# Returns true.
def stream?
true
end
# The Array is empty, but size counts the tokens given by <<.
attr_reader :size
# Creates a new TokenStream that calls +block+ whenever
# its << method is called.
#
# Example:
#
# require 'coderay'
#
# token_stream = CodeRay::TokenStream.new do |kind, text|
# puts 'kind: %s, text size: %d.' % [kind, text.size]
# end
#
# token_stream << [:regexp, '/\d+/']
# #-> kind: rexpexp, text size: 5.
#
def initialize &block
raise ArgumentError, 'Block expected for streaming.' unless block
@callback = block
@size = 0
end
# Calls +block+ with +token+ and increments size.
#
# Returns self.
def << token
@callback.call(*token)
@size += 1
self
end
# This method is not implemented due to speed reasons. Use Tokens.
def text_size
raise NotImplementedError,
'This method is not implemented due to speed reasons.'
end
# A TokenStream cannot be dumped. Use Tokens.
def dump
raise NotImplementedError, 'A TokenStream cannot be dumped.'
end
# A TokenStream cannot be optimized. Use Tokens.
def optimize
raise NotImplementedError, 'A TokenStream cannot be optimized.'
end
end
end
if $0 == __FILE__
$VERBOSE = true
$: << File.join(File.dirname(__FILE__), '..')
eval DATA.read, nil, $0, __LINE__ + 4
end
__END__
require 'test/unit'
class TokensTest < Test::Unit::TestCase
def test_creation
assert CodeRay::Tokens < Array
tokens = nil
assert_nothing_raised do
tokens = CodeRay::Tokens.new
end
assert_kind_of Array, tokens
end
def test_adding_tokens
tokens = CodeRay::Tokens.new
assert_nothing_raised do
tokens << ['string', :type]
tokens << ['()', :operator]
end
assert_equal tokens.size, 2
end
def test_dump_undump
tokens = CodeRay::Tokens.new
assert_nothing_raised do
tokens << ['string', :type]
tokens << ['()', :operator]
end
tokens2 = nil
assert_nothing_raised do
tokens2 = tokens.dump.undump
end
assert_equal tokens, tokens2
end
end |
require 'connector/base'
require 'oauth'
require 'oauth/signature/rsa/sha1'
require 'model/company'
require 'model/attendee'
require 'model/invoice'
require 'nokogiri'
require 'logger'
require 'renderer'
require 'monkey-date'
module Connector
class Xero < Base
attr_writer :access_token
attr_accessor :date, :settlement
def self.logger
@@logger ||= Logger.new("xero-#{Date.today}.log")
end
def self.logger=(logger)
@@logger = logger
end
def logger
self.class.logger
end
def initialize(consumer_key=nil, secret_key=nil, options={})
@consumer_key = consumer_key
@secret_key = secret_key
@options = options
@renderer = Renderer::Hml.new
self.date = Date.today
self.settlement = 15
end
def access_token
return @access_token if @access_token
consumer = OAuth::Consumer.new(@consumer_key, @secret_key, @options)
@access_token = OAuth::AccessToken.new(consumer, @consumer_key, @secret_key)
end
def put_invoice(invoice)
uri = 'https://api.xero.com/api.xro/2.0/Invoice'
invoice_as_xml = create_invoice(invoice)
logger.info "send #{invoice_as_xml}"
response = access_token.put(uri, invoice_as_xml)
logger.info "get #{response.code}, #{response.body}"
invoice.invoice_id = parse_invoice_response(response)
invoice
end
def put_contact(company)
uri = 'https://api.xero.com/api.xro/2.0/Contact'
xml = create_contact(company)
logger.info "send #{xml}"
response = access_token.put(uri, xml)
logger.info "get #{response.code}, #{response.body}"
company.invoicing_id = parse_contact_response(response)
company
end
# parse response and return xpath content for /Response/Invoices/Invoice/InvoiceNumber
def parse_invoice_response(response)
case Integer(response.code)
when 200 then
extract_invoice_id(response)
else
fail!(response)
end
end
# parse response and return xpath content for /Response/Invoices/Invoice/InvoiceNumber
def parse_contact_response(response)
case Integer(response.code)
when 200 then
extract_contact_id(response)
else
fail!(response)
end
end
def extract_invoice_id(response)
doc = Nokogiri::XML(response.body)
doc.xpath('/Response/Invoices/Invoice/InvoiceNumber').first.content
end
def extract_contact_id(response)
doc = Nokogiri::XML(response.body)
doc.xpath('/Response/Contacts/Contact/ContactID').first.content
end
def fail!(response)
doc = Nokogiri::XML(response.body)
messages = doc.xpath('//Message').to_a.map { |element| element.content }.uniq
raise Problem, messages.join(', ')
end
def create_invoice(invoice)
@renderer.render('xero/invoice.xml.haml', :invoice => invoice)
end
def create_contact(company)
@renderer.render('xero/contact.xml.haml', :company => company)
end
class Problem < StandardError;
end
end
end
extract parse_response, that yields response when HttpSuccess
require 'connector/base'
require 'oauth'
require 'oauth/signature/rsa/sha1'
require 'model/company'
require 'model/attendee'
require 'model/invoice'
require 'nokogiri'
require 'logger'
require 'renderer'
require 'monkey-date'
module Connector
class Xero < Base
attr_writer :access_token
attr_accessor :date, :settlement
def self.logger
@@logger ||= Logger.new("xero-#{Date.today}.log")
end
def self.logger=(logger)
@@logger = logger
end
def logger
self.class.logger
end
def initialize(consumer_key=nil, secret_key=nil, options={})
@consumer_key = consumer_key
@secret_key = secret_key
@options = options
@renderer = Renderer::Hml.new
self.date = Date.today
self.settlement = 15
end
def access_token
return @access_token if @access_token
consumer = OAuth::Consumer.new(@consumer_key, @secret_key, @options)
@access_token = OAuth::AccessToken.new(consumer, @consumer_key, @secret_key)
end
def put_invoice(invoice)
uri = 'https://api.xero.com/api.xro/2.0/Invoice'
invoice_as_xml = create_invoice(invoice)
logger.info "send #{invoice_as_xml}"
response = access_token.put(uri, invoice_as_xml)
logger.info "get #{response.code}, #{response.body}"
invoice.invoice_id = parse_invoice_response(response)
invoice
end
def put_contact(company)
uri = 'https://api.xero.com/api.xro/2.0/Contact'
xml = create_contact(company)
logger.info "send #{xml}"
response = access_token.put(uri, xml)
logger.info "get #{response.code}, #{response.body}"
company.invoicing_id = parse_contact_response(response)
company
end
# parse response and return xpath content for /Response/Invoices/Invoice/InvoiceNumber
def parse_response(response)
case Integer(response.code)
when 200 then
return yield(response)
else
fail!(response)
end
end
# parse response and return xpath content for /Response/Invoices/Invoice/InvoiceNumber
def parse_invoice_response(response)
parse_response(response) {|r| return extract_invoice_id(r)}
end
# parse response and return xpath content for /Response/Invoices/Invoice/InvoiceNumber
def parse_contact_response(response)
parse_response(response) {|r| return extract_contact_id(r)}
end
def extract_invoice_id(response)
doc = Nokogiri::XML(response.body)
doc.xpath('/Response/Invoices/Invoice/InvoiceNumber').first.content
end
def extract_contact_id(response)
doc = Nokogiri::XML(response.body)
doc.xpath('/Response/Contacts/Contact/ContactID').first.content
end
def fail!(response)
doc = Nokogiri::XML(response.body)
messages = doc.xpath('//Message').to_a.map { |element| element.content }.uniq
raise Problem, messages.join(', ')
end
def create_invoice(invoice)
@renderer.render('xero/invoice.xml.haml', :invoice => invoice)
end
def create_contact(company)
@renderer.render('xero/contact.xml.haml', :company => company)
end
class Problem < StandardError;
end
end
end |
module ConohaVersion
ITSELF = "0.9.7"
end
Version 0.9.8
module ConohaVersion
ITSELF = "0.9.8"
end
|
# Use ActiveSupport's version of JSON if available
if Object.const_defined?('ActiveSupport') && ActiveSupport.const_defined?('JSON')
class JSON
def self.parse(i)
ActiveSupport::JSON.decode(i)
end
end
else
require 'json/add/rails'
end
class Contacts
class Gmail < Base
URL = "https://mail.google.com/mail/"
LOGIN_URL = "https://www.google.com/accounts/ServiceLoginAuth"
LOGIN_REFERER_URL = "https://www.google.com/accounts/ServiceLogin?service=mail&passive=true&rm=false&continue=http%3A%2F%2Fmail.google.com%2Fmail%3Fui%3Dhtml%26zy%3Dl<mpl=yj_blanco<mplcache=2&hl=en"
CONTACT_LIST_URL = "https://mail.google.com/mail/contacts/data/contacts?thumb=true&show=ALL&enums=true&psort=Name&max=10000&out=js&rf=&jsx=true"
PROTOCOL_ERROR = "Gmail has changed its protocols, please upgrade this library first. If that does not work, dive into the code and submit a patch at http://github.com/cardmagic/contacts"
def real_connect
postdata = "ltmpl=yj_blanco"
postdata += "&continue=%s" % CGI.escape(URL)
postdata += "<mplcache=2"
postdata += "&service=mail"
postdata += "&rm=false"
postdata += "<mpl=yj_blanco"
postdata += "&hl=en"
postdata += "&Email=%s" % CGI.escape(login)
postdata += "&Passwd=%s" % CGI.escape(password)
postdata += "&rmShown=1"
postdata += "&null=Sign+in"
time = Time.now.to_i
time_past = Time.now.to_i - 8 - rand(12)
cookie = "GMAIL_LOGIN=T#{time_past}/#{time_past}/#{time}"
data, resp, cookies, forward, old_url = post(LOGIN_URL, postdata, cookie, LOGIN_REFERER_URL) + [LOGIN_URL]
cookies = remove_cookie("GMAIL_LOGIN", cookies)
if data.index("Username and password do not match") || data.index("New to Gmail? It's free and easy")
raise AuthenticationError, "Username and password do not match"
elsif data.index("The username or password you entered is incorrect")
raise AuthenticationError, "Username and password do not match"
elsif data.index("Required field must not be blank")
raise AuthenticationError, "Login and password must not be blank"
elsif data.index("errormsg_0_logincaptcha")
raise AuthenticationError, "Captcha error"
elsif data.index("Invalid request")
raise ConnectionError, PROTOCOL_ERROR
elsif cookies == ""
raise ConnectionError, PROTOCOL_ERROR
end
cookies = remove_cookie("LSID", cookies)
cookies = remove_cookie("GV", cookies)
@cookies = cookies
end
private
def parse(data, options)
data.gsub!(/^while \(true\); &&&START&&&/, '')
data.gsub!(/ &&&END&&&$/, '')
data.gsub!(/\t/, ' ') # tabs in the note field cause errors with JSON.parse
data.gsub!(/[\t\x00-\x1F]/, " ") # strip control characters
@contacts = JSON.parse(data)['Body']['Contacts'] || {}
# Determine in which format to return the data.
# Return the full JSON Hash.
return @contacts if(options[:details])
# Default format.
# ['Name', 'Email1', 'Email2', ...]
if @contacts != nil
@contacts = @contacts.delete_if {|c| c["Emails"].nil?}.map do |c|
name, emails = c.values_at "Name", "Emails"
# emails are returned in a form of
# [{"Address"=>"home.email@gmail.com"}, {"Type"=>{"Id"=>"WORK"}, "Address"=>"work.email@gmail.com"}]
emails = emails.collect{|a| a.values_at("Address")}
[name, emails].flatten
end
else
[]
end
end
end
TYPES[:gmail] = Gmail
end
Make ActiveSupport JSON detection compatible with Rails 2.3.4
# Use ActiveSupport's version of JSON if available
if Object.const_defined?('ActiveSupport') && ActiveSupport.const_defined?('JSON') && ActiveSupport::JSON.is_a?(Class)
puts JSON.class
class JSON
def self.parse(i)
ActiveSupport::JSON.decode(i)
end
end
else
require 'json/add/rails'
end
class Contacts
class Gmail < Base
URL = "https://mail.google.com/mail/"
LOGIN_URL = "https://www.google.com/accounts/ServiceLoginAuth"
LOGIN_REFERER_URL = "https://www.google.com/accounts/ServiceLogin?service=mail&passive=true&rm=false&continue=http%3A%2F%2Fmail.google.com%2Fmail%3Fui%3Dhtml%26zy%3Dl<mpl=yj_blanco<mplcache=2&hl=en"
CONTACT_LIST_URL = "https://mail.google.com/mail/contacts/data/contacts?thumb=true&show=ALL&enums=true&psort=Name&max=10000&out=js&rf=&jsx=true"
PROTOCOL_ERROR = "Gmail has changed its protocols, please upgrade this library first. If that does not work, dive into the code and submit a patch at http://github.com/cardmagic/contacts"
def real_connect
postdata = "ltmpl=yj_blanco"
postdata += "&continue=%s" % CGI.escape(URL)
postdata += "<mplcache=2"
postdata += "&service=mail"
postdata += "&rm=false"
postdata += "<mpl=yj_blanco"
postdata += "&hl=en"
postdata += "&Email=%s" % CGI.escape(login)
postdata += "&Passwd=%s" % CGI.escape(password)
postdata += "&rmShown=1"
postdata += "&null=Sign+in"
time = Time.now.to_i
time_past = Time.now.to_i - 8 - rand(12)
cookie = "GMAIL_LOGIN=T#{time_past}/#{time_past}/#{time}"
data, resp, cookies, forward, old_url = post(LOGIN_URL, postdata, cookie, LOGIN_REFERER_URL) + [LOGIN_URL]
cookies = remove_cookie("GMAIL_LOGIN", cookies)
if data.index("Username and password do not match") || data.index("New to Gmail? It's free and easy")
raise AuthenticationError, "Username and password do not match"
elsif data.index("The username or password you entered is incorrect")
raise AuthenticationError, "Username and password do not match"
elsif data.index("Required field must not be blank")
raise AuthenticationError, "Login and password must not be blank"
elsif data.index("errormsg_0_logincaptcha")
raise AuthenticationError, "Captcha error"
elsif data.index("Invalid request")
raise ConnectionError, PROTOCOL_ERROR
elsif cookies == ""
raise ConnectionError, PROTOCOL_ERROR
end
cookies = remove_cookie("LSID", cookies)
cookies = remove_cookie("GV", cookies)
@cookies = cookies
end
private
def parse(data, options)
data.gsub!(/^while \(true\); &&&START&&&/, '')
data.gsub!(/ &&&END&&&$/, '')
data.gsub!(/\t/, ' ') # tabs in the note field cause errors with JSON.parse
data.gsub!(/[\t\x00-\x1F]/, " ") # strip control characters
@contacts = JSON.parse(data)['Body']['Contacts'] || {}
# Determine in which format to return the data.
# Return the full JSON Hash.
return @contacts if(options[:details])
# Default format.
# ['Name', 'Email1', 'Email2', ...]
if @contacts != nil
@contacts = @contacts.delete_if {|c| c["Emails"].nil?}.map do |c|
name, emails = c.values_at "Name", "Emails"
# emails are returned in a form of
# [{"Address"=>"home.email@gmail.com"}, {"Type"=>{"Id"=>"WORK"}, "Address"=>"work.email@gmail.com"}]
emails = emails.collect{|a| a.values_at("Address")}
[name, emails].flatten
end
else
[]
end
end
end
TYPES[:gmail] = Gmail
end
|
require 'aws-sdk'
module EcsDeployer
module Service
class Client
LOG_SEPARATOR = '-' * 96
attr_accessor :wait_timeout, :polling_interval
# @param [String] cluster
# @param [Logger] logger
# @param [Hash] aws_options
# @return [EcsDeployer::Service::Client]
def initialize(cluster, logger, aws_options = {})
@cluster = cluster
@logger = logger
@ecs = Aws::ECS::Client.new(aws_options)
@task = EcsDeployer::Task::Client.new(aws_options)
@wait_timeout = 900
@polling_interval = 20
end
# @param [String] service
# @param [Aws::ECS::Types::TaskDefinition] task_definition
# @return [Aws::ECS::Types::Service]
def update(service, task_definition = nil, wait = true)
task_definition = @task.register_clone(@cluster, service) if task_definition.nil?
result = @ecs.update_service(
cluster: @cluster,
service: service,
task_definition: task_definition[:family] + ':' + task_definition[:revision].to_s
)
wait_for_deploy(service, result.service.task_definition) if wait
result.service
end
# @param [String] service
# @return [Bool]
def exist?(service)
status = nil
result = @ecs.describe_services(
cluster: @cluster,
services: [service]
)
result[:services].each do |svc|
next unless svc[:service_name] == service && svc[:status] == 'ACTIVE'
status = svc
break
end
status.nil? ? false : true
end
private
# @param [String] service
# @param [String] task_definition_arn
def detect_stopped_task(service, task_definition_arn)
stopped_tasks = @ecs.list_tasks(
cluster: @cluster,
service_name: service,
desired_status: 'STOPPED'
).task_arns
return if stopped_tasks.size.zero?
description_tasks = @ecs.describe_tasks(
cluster: @cluster,
tasks: stopped_tasks
).tasks
description_tasks.each do |task|
raise TaskStoppedError, task.stopped_reason if task.task_definition_arn == task_definition_arn
end
end
# @param [String] service
# @param [String] task_definition_arn
# @return [Hash]
def deploy_status(service, task_definition_arn)
detect_stopped_task(service, task_definition_arn)
# Get current tasks
result = @ecs.list_tasks(
cluster: @cluster,
service_name: service,
desired_status: 'RUNNING'
)
new_task_count = 0
status_logs = []
if result[:task_arns].size.positive?
result = @ecs.describe_tasks(
cluster: @cluster,
tasks: result[:task_arns]
)
result[:tasks].each do |task|
new_task_count += 1 if task_definition_arn == task[:task_definition_arn] && task[:last_status] == 'RUNNING'
status_logs << " #{task[:task_definition_arn]} [#{task[:last_status]}]"
end
end
{
new_task_count: new_task_count,
status_logs: status_logs
}
end
# @param [String] service
# @param [String] task_definition_arn
def wait_for_deploy(service, task_definition_arn)
raise ServiceNotFoundError, "'#{service}' service is not found." unless exist?(service)
wait_time = 0
@logger.info 'Start deployment.'
result = @ecs.describe_services(
cluster: @cluster,
services: [service]
)
desired_count = result[:services][0][:desired_count]
loop do
sleep(@polling_interval)
wait_time += @polling_interval
result = deploy_status(service, task_definition_arn)
@logger.info "Updating... [#{result[:new_task_count]}/#{desired_count}] (#{wait_time} seconds elapsed)"
@logger.info "New task: #{task_definition_arn}"
@logger.info LOG_SEPARATOR
if result[:status_logs].count.positive?
result[:status_logs].each do |log|
@logger.info log
end
@logger.info LOG_SEPARATOR
end
if result[:new_task_count] == desired_count
@logger.info "Service update succeeded. [#{result[:new_task_count]}/#{desired_count}]"
@logger.info "New task definition: #{task_definition_arn}"
break
else
@logger.info 'You can stop process with Ctrl+C. Deployment continues in background.'
if wait_time > @wait_timeout
@logger.info "New task definition: #{task_definition_arn}"
raise DeployTimeoutError, 'Service is being updating, but process is timed out.'
end
end
end
end
end
end
end
Fix deploy complete check
require 'aws-sdk'
module EcsDeployer
module Service
class Client
LOG_SEPARATOR = '-' * 96
attr_accessor :wait_timeout, :polling_interval
# @param [String] cluster
# @param [Logger] logger
# @param [Hash] aws_options
# @return [EcsDeployer::Service::Client]
def initialize(cluster, logger, aws_options = {})
@cluster = cluster
@logger = logger
@ecs = Aws::ECS::Client.new(aws_options)
@task = EcsDeployer::Task::Client.new(aws_options)
@wait_timeout = 900
@polling_interval = 20
end
# @param [String] service
# @param [Aws::ECS::Types::TaskDefinition] task_definition
# @return [Aws::ECS::Types::Service]
def update(service, task_definition = nil, wait = true)
task_definition = @task.register_clone(@cluster, service) if task_definition.nil?
result = @ecs.update_service(
cluster: @cluster,
service: service,
task_definition: task_definition[:family] + ':' + task_definition[:revision].to_s
)
wait_for_deploy(service, result.service.task_definition) if wait
result.service
end
# @param [String] service
# @return [Bool]
def exist?(service)
status = nil
result = @ecs.describe_services(
cluster: @cluster,
services: [service]
)
result[:services].each do |svc|
next unless svc[:service_name] == service && svc[:status] == 'ACTIVE'
status = svc
break
end
status.nil? ? false : true
end
private
# @param [String] service
# @param [String] task_definition_arn
def detect_stopped_task(service, task_definition_arn)
stopped_tasks = @ecs.list_tasks(
cluster: @cluster,
service_name: service,
desired_status: 'STOPPED'
).task_arns
return if stopped_tasks.size.zero?
description_tasks = @ecs.describe_tasks(
cluster: @cluster,
tasks: stopped_tasks
).tasks
description_tasks.each do |task|
raise TaskStoppedError, task.stopped_reason if task.task_definition_arn == task_definition_arn
end
end
# @param [String] service
# @param [String] task_definition_arn
# @return [Hash]
def deploy_status(service, task_definition_arn)
detect_stopped_task(service, task_definition_arn)
# Get current tasks
result = @ecs.list_tasks(
cluster: @cluster,
service_name: service,
desired_status: 'RUNNING'
)
new_registerd_task_count = 0
current_task_count = 0
status_logs = []
if result[:task_arns].size.positive?
result = @ecs.describe_tasks(
cluster: @cluster,
tasks: result[:task_arns]
)
result[:tasks].each do |task|
if task_definition_arn == task[:task_definition_arn]
new_registerd_task_count += 1 if task[:last_status] == 'RUNNING'
else
current_task_count += 1
end
status_logs << " #{task[:task_definition_arn]} [#{task[:last_status]}]"
end
end
{
current_task_count: current_task_count,
new_registerd_task_count: new_registerd_task_count,
status_logs: status_logs
}
end
# @param [String] service
# @param [String] task_definition_arn
def wait_for_deploy(service, task_definition_arn)
raise ServiceNotFoundError, "'#{service}' service is not found." unless exist?(service)
wait_time = 0
@logger.info 'Start deployment.'
result = @ecs.describe_services(
cluster: @cluster,
services: [service]
)
desired_count = result[:services][0][:desired_count]
loop do
sleep(@polling_interval)
wait_time += @polling_interval
result = deploy_status(service, task_definition_arn)
@logger.info "Updating... [#{result[:new_registerd_task_count]}/#{desired_count}] (#{wait_time} seconds elapsed)"
@logger.info "New task: #{task_definition_arn}"
@logger.info LOG_SEPARATOR
if result[:status_logs].count.positive?
result[:status_logs].each do |log|
@logger.info log
end
@logger.info LOG_SEPARATOR
end
if result[:new_registerd_task_count] == desired_count && result[:current_task_count] == 0
@logger.info "Service update succeeded. [#{result[:new_registerd_task_count]}/#{desired_count}]"
@logger.info "New task definition: #{task_definition_arn}"
break
else
@logger.info 'You can stop process with Ctrl+C. Deployment continues in background.'
if wait_time > @wait_timeout
@logger.info "New task definition: #{task_definition_arn}"
raise DeployTimeoutError, 'Service is being updating, but process is timed out.'
end
end
end
end
end
end
end
|
require 'rexml/document'
class Contacts
class Plaxo < Base
URL = "http://www.plaxo.com/"
LOGIN_URL = "https://www.plaxo.com/signin"
ADDRESS_BOOK_URL = "http://www.plaxo.com/po3/?module=ab&operation=viewFull&mode=normal"
CONTACT_LIST_URL = "http://www.plaxo.com/axis/soap/contact?_action=getContacts&_format=xml"
PROTOCOL_ERROR = "Plaxo has changed its protocols, please upgrade this library first. If that does not work, dive into the code and submit a patch at http://github.com/cardmagic/contacts"
def real_connect
end # real_connect
def contacts
getdata = "&authInfo.authByEmail.email=%s" % CGI.escape(login)
getdata += "&authInfo.authByEmail.password=%s" % CGI.escape(password)
data, resp, cookies, forward = get(CONTACT_LIST_URL + getdata)
if resp.code_type != Net::HTTPOK
raise ConnectionError, PROTOCOL_ERROR
end
parse data
end # contacts
private
def parse(data, options={})
doc = REXML::Document.new(data)
code = doc.elements['//response/code'].text
if code == '401'
raise AuthenticationError, "Username and password do not match"
elsif code == '200'
@contacts = []
doc.elements.each('//contact') do |cont|
name = if cont.elements['fullName']
cont.elements['fullName'].text
elsif cont.elements['displayName']
cont.elements['displayName'].text
end
email = if cont.elements['email1']
cont.elements['email1'].text
end
@contacts << [name, email]
end.compact
@contacts
else
raise ConnectionError, PROTOCOL_ERROR
end
end # parse
end # Plaxo
TYPES[:plaxo] = Plaxo
end # Contacts
# sample contacts responses
=begin
Bad email
=========
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>401</code>
<subCode>1</subCode>
<message>User not found.</message>
</response>
</ns1:GetContactsResponse>
Bad password
============
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>401</code>
<subCode>4</subCode>
<message>Bad password or security token.</message>
</response>
</ns1:GetContactsResponse>
Success
=======
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>200</code>
<message>OK</message>
<userId>77311236242</userId>
</response>
<contacts>
<contact>
<itemId>61312569</itemId>
<displayName>Joe Blow1</displayName>
<fullName>Joe Blow1</fullName>
<firstName>Joe</firstName>
<lastName>Blow1</lastName>
<homeEmail1>joeblow1@mailinator.com</homeEmail1>
<email1>joeblow1@mailinator.com</email1>
<folderId>5291351</folderId>
</contact>
<contact>
<itemId>61313159</itemId>
<displayName>Joe Blow2</displayName>
<fullName>Joe Blow2</fullName>
<firstName>Joe</firstName>
<lastName>Blow2</lastName>
<homeEmail1>joeblow2@mailinator.com</homeEmail1>
<email1>joeblow2@mailinator.com</email1>
<folderId>5291351</folderId>
</contact>
</contacts>
<totalCount>2</totalCount>
<editCounter>3</editCounter>
</ns1:GetContactsResponse>
=end
fixin plaxo
require 'rexml/document'
class Contacts
class Plaxo < Base
URL = "http://www.plaxo.com/"
LOGIN_URL = "https://www.plaxo.com/signin"
ADDRESS_BOOK_URL = "http://www.plaxo.com/po3/?module=ab&operation=viewFull&mode=normal"
CONTACT_LIST_URL = "http://www.plaxo.com/axis/soap/contact?_action=getContacts&_format=xml"
PROTOCOL_ERROR = "Plaxo has changed its protocols, please upgrade this library first. If that does not work, dive into the code and submit a patch at http://github.com/cardmagic/contacts"
def real_connect
end # real_connect
def contacts
getdata = "&authInfo.authByEmail.email=%s" % CGI.escape(login)
getdata += "&authInfo.authByEmail.password=%s" % CGI.escape(password)
data, resp, cookies, forward = get(CONTACT_LIST_URL + getdata)
if resp.code_type != Net::HTTPOK
raise ConnectionError, PROTOCOL_ERROR
end
parse data
end # contacts
private
def parse(data, options={})
doc = REXML::Document.new(data)
code = doc.elements['//response/code'].text
if code == '401'
raise AuthenticationError, "Username and password do not match"
elsif code == '200'
@contacts = []
doc.elements.each('//contact') do |cont|
name = if cont.elements['fullName']
cont.elements['fullName'].text
elsif cont.elements['displayName']
cont.elements['displayName'].text
end
email = if cont.elements['email1']
cont.elements['email1'].text
end
if name || email
@contacts << [name, email]
end
end
@contacts
else
raise ConnectionError, PROTOCOL_ERROR
end
end # parse
end # Plaxo
TYPES[:plaxo] = Plaxo
end # Contacts
# sample contacts responses
=begin
Bad email
=========
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>401</code>
<subCode>1</subCode>
<message>User not found.</message>
</response>
</ns1:GetContactsResponse>
Bad password
============
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>401</code>
<subCode>4</subCode>
<message>Bad password or security token.</message>
</response>
</ns1:GetContactsResponse>
Success
=======
<?xml version="1.0" encoding="utf-8" ?>
<ns1:GetContactsResponse xmlns:ns1="Plaxo">
<response>
<code>200</code>
<message>OK</message>
<userId>77311236242</userId>
</response>
<contacts>
<contact>
<itemId>61312569</itemId>
<displayName>Joe Blow1</displayName>
<fullName>Joe Blow1</fullName>
<firstName>Joe</firstName>
<lastName>Blow1</lastName>
<homeEmail1>joeblow1@mailinator.com</homeEmail1>
<email1>joeblow1@mailinator.com</email1>
<folderId>5291351</folderId>
</contact>
<contact>
<itemId>61313159</itemId>
<displayName>Joe Blow2</displayName>
<fullName>Joe Blow2</fullName>
<firstName>Joe</firstName>
<lastName>Blow2</lastName>
<homeEmail1>joeblow2@mailinator.com</homeEmail1>
<email1>joeblow2@mailinator.com</email1>
<folderId>5291351</folderId>
</contact>
</contacts>
<totalCount>2</totalCount>
<editCounter>3</editCounter>
</ns1:GetContactsResponse>
=end |
module EffectiveResources
VERSION = '1.18.7'.freeze
end
Version 1.18.8
module EffectiveResources
VERSION = '1.18.8'.freeze
end
|
module Excon
class StandardInstrumentor
def self.instrument(name, params = {}, &block)
params = params.dup
# reduce duplication/noise of output
params.delete(:connection)
params.delete(:stack)
if params.has_key?(:headers) && params[:headers].has_key?('Authorization')
params[:headers] = params[:headers].dup
params[:headers]['Authorization'] = REDACTED
end
if params.has_key?(:password)
params[:password] = REDACTED
end
$stderr.puts(name)
indent = 0
pretty_printer = lambda do |hash|
indent += 2
max_key_length = hash.keys.map {|key| key.inspect.length}.max
hash.keys.sort.each do |key|
value = hash[key]
$stderr.write("#{' ' * indent}#{key.inspect.ljust(max_key_length)} => ")
case value
when Array
$stderr.puts("[")
value.each do |v|
$stderr.puts("#{' ' * indent} #{v.inspect}")
end
$stderr.write("#{' ' * indent}]")
when Hash
$stderr.puts("{")
pretty_printer.call(value)
$stderr.write("#{' ' * indent}}")
else
$stderr.write("#{value.inspect}")
end
$stderr.puts
end
indent -= 2
end
pretty_printer.call(params)
if block_given?
yield
end
end
end
end
standard instrumentor keys sort fix
need to cast to string before sort for backwards compatibility
module Excon
class StandardInstrumentor
def self.instrument(name, params = {}, &block)
params = params.dup
# reduce duplication/noise of output
params.delete(:connection)
params.delete(:stack)
if params.has_key?(:headers) && params[:headers].has_key?('Authorization')
params[:headers] = params[:headers].dup
params[:headers]['Authorization'] = REDACTED
end
if params.has_key?(:password)
params[:password] = REDACTED
end
$stderr.puts(name)
indent = 0
pretty_printer = lambda do |hash|
indent += 2
max_key_length = hash.keys.map {|key| key.inspect.length}.max
hash.keys.sort_by {|key| key.to_s}.each do |key|
value = hash[key]
$stderr.write("#{' ' * indent}#{key.inspect.ljust(max_key_length)} => ")
case value
when Array
$stderr.puts("[")
value.each do |v|
$stderr.puts("#{' ' * indent} #{v.inspect}")
end
$stderr.write("#{' ' * indent}]")
when Hash
$stderr.puts("{")
pretty_printer.call(value)
$stderr.write("#{' ' * indent}}")
else
$stderr.write("#{value.inspect}")
end
$stderr.puts
end
indent -= 2
end
pretty_printer.call(params)
if block_given?
yield
end
end
end
end
|
module ExtractedValidator
VERSION = "0.0.1"
end
Bump version to 0.0.2
module ExtractedValidator
VERSION = "0.0.2"
end
|
require 'rack/client'
require 'json'
require 'time'
puts "using the one in path!"
module EY
module ApiHMAC
class BaseConnection
attr_reader :auth_id, :auth_key
def initialize(auth_id, auth_key, user_agent = nil)
@auth_id = auth_id
@auth_key = auth_key
@standard_headers = {
'CONTENT_TYPE' => 'application/json',
'Accept' => 'application/json',
'HTTP_DATE' => Time.now.httpdate,
'USER_AGENT' => user_agent || default_user_agent
}
end
class NotFound < StandardError
def initialize(url)
super("#{url} not found")
end
end
class ValidationError < StandardError
attr_reader :error_messages
def initialize(response)
json_response = JSON.parse(response.body)
@error_messages = json_response["error_messages"]
super("error: #{@error_messages.join("\n")}")
rescue => e
@error_messages = []
super("error: #{response.body}")
end
end
class UnknownError < StandardError
def initialize(response)
super("unknown error(#{response.status}): #{response.body}")
end
end
attr_writer :backend
def backend
@backend ||= Rack::Client::Handler::NetHTTP
end
def post(url, body, &block)
request(:post, url, body, &block)
end
def put(url, body, &block)
request(:put, url, body, &block)
end
def delete(url, &block)
request(:delete, url, &block)
end
def get(url, &block)
request(:get, url, &block)
end
protected
def client
bak = self.backend
#damn you scope!
auth_id_arg = auth_id
auth_key_arg = auth_key
@client ||= Rack::Client.new do
use EY::ApiHMAC::ApiAuth::Client, auth_id_arg, auth_key_arg
run bak
end
end
def request(method, url, body = nil, &block)
if body
response = client.send(method, url, @standard_headers, body.to_json)
else
response = client.send(method, url, @standard_headers)
end
handle_response(url, response, &block)
end
def handle_response(url, response)
case response.status
when 200, 201
json_body = JSON.parse(response.body)
yield json_body, response["Location"] if block_given?
when 404
raise NotFound.new(url)
when 400
raise ValidationError.new(response)
else
raise UnknownError.new(response)
end
end
end
end
end
When I puts you puts we puts.
require 'rack/client'
require 'json'
require 'time'
module EY
module ApiHMAC
class BaseConnection
attr_reader :auth_id, :auth_key
def initialize(auth_id, auth_key, user_agent = nil)
@auth_id = auth_id
@auth_key = auth_key
@standard_headers = {
'CONTENT_TYPE' => 'application/json',
'Accept' => 'application/json',
'HTTP_DATE' => Time.now.httpdate,
'USER_AGENT' => user_agent || default_user_agent
}
end
class NotFound < StandardError
def initialize(url)
super("#{url} not found")
end
end
class ValidationError < StandardError
attr_reader :error_messages
def initialize(response)
json_response = JSON.parse(response.body)
@error_messages = json_response["error_messages"]
super("error: #{@error_messages.join("\n")}")
rescue => e
@error_messages = []
super("error: #{response.body}")
end
end
class UnknownError < StandardError
def initialize(response)
super("unknown error(#{response.status}): #{response.body}")
end
end
attr_writer :backend
def backend
@backend ||= Rack::Client::Handler::NetHTTP
end
def post(url, body, &block)
request(:post, url, body, &block)
end
def put(url, body, &block)
request(:put, url, body, &block)
end
def delete(url, &block)
request(:delete, url, &block)
end
def get(url, &block)
request(:get, url, &block)
end
protected
def client
bak = self.backend
#damn you scope!
auth_id_arg = auth_id
auth_key_arg = auth_key
@client ||= Rack::Client.new do
use EY::ApiHMAC::ApiAuth::Client, auth_id_arg, auth_key_arg
run bak
end
end
def request(method, url, body = nil, &block)
if body
response = client.send(method, url, @standard_headers, body.to_json)
else
response = client.send(method, url, @standard_headers)
end
handle_response(url, response, &block)
end
def handle_response(url, response)
case response.status
when 200, 201
json_body = JSON.parse(response.body)
yield json_body, response["Location"] if block_given?
when 404
raise NotFound.new(url)
when 400
raise ValidationError.new(response)
else
raise UnknownError.new(response)
end
end
end
end
end
|
module Fastlane
module Actions
module SharedValues
PODIO_ITEM_URL = :PODIO_ITEM_URL
end
class PodioItemAction < Action
AUTH_URL = 'https://podio.com/oauth/token'
BASE_URL = 'https://api.podio.com'
def self.run(params)
require 'rest_client'
require 'json'
require 'uri'
post_item(params)
end
#####################################################
# @!group Documentation
#####################################################
def self.description
'Creates an item within your Podio app. In case an item with the given identifying value already exists within your Podio app, it updates that item. See https://developers.podio.com'
end
def self.details
"Use this action to create or update an item within your Podio app
(see https://help.podio.com/hc/en-us/articles/201019278-Creating-apps-).
Pass in dictionary with field keys and their values.
Field key is located under Modify app -> Advanced -> Developer -> External ID
(see https://developers.podio.com/examples/items).
end
def self.available_options
[
FastlaneCore::ConfigItem.new(key: :client_id,
env_name: 'PODIO_ITEM_CLIENT_ID',
description: 'Client ID for Podio API (see https://developers.podio.com/api-key)',
is_string: true,
verify_block: proc do |value|
fail "No Client ID for Podio given, pass using `client_id: 'id'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :client_secret,
env_name: 'PODIO_ITEM_CLIENT_SECRET',
description: 'Client secret for Podio API (see https://developers.podio.com/api-key)',
is_string: true,
verify_block: proc do |value|
fail "No Client Secret for Podio given, pass using `client_secret: 'secret'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :app_id,
env_name: 'PODIO_ITEM_APP_ID',
description: 'App ID of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)',
is_string: true,
verify_block: proc do |value|
fail "No App ID for Podio given, pass using `app_id: 'id'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :app_token,
env_name: 'PODIO_ITEM_APP_TOKEN',
description: 'App token of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)',
is_string: true,
verify_block: proc do |value|
fail "No App token for Podio given, pass using `app_token: 'token'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :identifying_field,
env_name: 'PODIO_ITEM_IDENTIFYING_FIELD',
description: 'String specifying the field key used for identification of an item',
is_string: true,
verify_block: proc do |value|
fail "No Identifying field given, pass using `identifying_field: 'field name'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :identifying_value,
description: 'String uniquely specifying an item within the app',
is_string: true,
verify_block: proc do |value|
fail "No Identifying value given, pass using `identifying_value: 'unique value'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :other_fields,
description: 'Dictionary of your app fields. Podio supports several field types, see https://developers.podio.com/doc/items',
is_string: false,
optional: true)
]
end
def self.output
[
['PODIO_ITEM_URL', 'URL to newly created (or updated) Podio item']
]
end
def self.authors
['pprochazka72, laugejepsen']
end
def self.is_supported?(_platform)
true
end
#####################################################
# @!group Logic
#####################################################
def self.post_item(options)
auth_config = authenticate(options[:client_id],
options[:client_secret],
options[:app_id],
options[:app_token])
item_id, item_url = get_item(auth_config,
options[:identifying_field],
options[:identifying_value],
options[:app_id])
unless options[:other_fields].nil?
options[:other_fields].each do |key, value|
uri = URI.parse(value)
if uri.kind_of?(URI::HTTP)
link_embed_id = get_embed_id(auth_config, uri)
options[:other_fields].merge!(key => link_embed_id)
end
end
update_item(auth_config, item_id, options[:other_fields])
end
Actions.lane_context[SharedValues::PODIO_ITEM_URL] = item_url
end
def self.authenticate(client_id, client_secret, app_id, app_token)
auth_response = RestClient.post AUTH_URL, grant_type: 'app',
app_id: app_id,
app_token: app_token,
client_id: client_id,
client_secret: client_secret
fail 'Failed to authenticate with Podio API' if auth_response.code != 200
auth_response_dictionary = JSON.parse(auth_response.body)
access_token = auth_response_dictionary['access_token']
{ Authorization: "OAuth2 #{access_token}", content_type: :json, accept: :json }
end
def self.get_item(auth_config, identifying_field, identifying_value, app_id)
item_id, item_url = get_existing_item(auth_config, identifying_value, app_id)
unless item_id
item_id, item_url = create_item(auth_config, identifying_field, identifying_value, app_id)
end
[item_id, item_url]
end
def self.get_existing_item(auth_config, identifying_value, app_id)
filter_request_body = { query: identifying_value, limit: 1, ref_type: 'item' }.to_json
filter_response = RestClient.post "#{BASE_URL}/search/app/#{app_id}/", filter_request_body, auth_config
fail "Failed to search for already existing item #{identifying_value}" if filter_response.code != 200
existing_items = JSON.parse(filter_response.body)
existing_item_id = nil
existing_item_url = nil
if existing_items.length > 0
existing_item = existing_items[0]
if existing_item['title'] == identifying_value
existing_item_id = existing_item['id']
existing_item_url = existing_item['link']
end
end
[existing_item_id, existing_item_url]
end
def self.create_item(auth_config, identifying_field, identifying_value, app_id)
item_request_body = { fields: { identifying_field => identifying_value } }.to_json
item_response = RestClient.post "#{BASE_URL}/item/app/#{app_id}", item_request_body, auth_config
fail "Failed to create item \"#{identifying_value}\"" if item_response.code != 200
item_response_dictionary = JSON.parse(item_response.body)
[item_response_dictionary['item_id'], item_response_dictionary['link']]
end
def self.update_item(auth_config, item_id, fields)
if fields.length > 0
item_request_body = { fields: fields }.to_json
item_response = RestClient.put "#{BASE_URL}/item/#{item_id}", item_request_body, auth_config
fail "Failed to update item values \"#{fields}\"" unless item_response.code != 200 || item_response.code != 204
end
end
def self.get_embed_id(auth_config, url)
embed_request_body = { url: url }.to_json
embed_response = RestClient.post "#{BASE_URL}/embed/", embed_request_body, auth_config
fail "Failed to create embed for link #{link}" if embed_response.code != 200
embed_response_dictionary = JSON.parse(embed_response.body)
embed_response_dictionary['embed_id']
end
end
end
end
Fixed syntax error
module Fastlane
module Actions
module SharedValues
PODIO_ITEM_URL = :PODIO_ITEM_URL
end
class PodioItemAction < Action
AUTH_URL = 'https://podio.com/oauth/token'
BASE_URL = 'https://api.podio.com'
def self.run(params)
require 'rest_client'
require 'json'
require 'uri'
post_item(params)
end
#####################################################
# @!group Documentation
#####################################################
def self.description
'Creates an item within your Podio app. In case an item with the given identifying value already exists within your Podio app, it updates that item. See https://developers.podio.com'
end
def self.details
"Use this action to create or update an item within your Podio app
(see https://help.podio.com/hc/en-us/articles/201019278-Creating-apps-).
Pass in dictionary with field keys and their values.
Field key is located under Modify app -> Advanced -> Developer -> External ID
(see https://developers.podio.com/examples/items)."
end
def self.available_options
[
FastlaneCore::ConfigItem.new(key: :client_id,
env_name: 'PODIO_ITEM_CLIENT_ID',
description: 'Client ID for Podio API (see https://developers.podio.com/api-key)',
is_string: true,
verify_block: proc do |value|
fail "No Client ID for Podio given, pass using `client_id: 'id'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :client_secret,
env_name: 'PODIO_ITEM_CLIENT_SECRET',
description: 'Client secret for Podio API (see https://developers.podio.com/api-key)',
is_string: true,
verify_block: proc do |value|
fail "No Client Secret for Podio given, pass using `client_secret: 'secret'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :app_id,
env_name: 'PODIO_ITEM_APP_ID',
description: 'App ID of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)',
is_string: true,
verify_block: proc do |value|
fail "No App ID for Podio given, pass using `app_id: 'id'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :app_token,
env_name: 'PODIO_ITEM_APP_TOKEN',
description: 'App token of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)',
is_string: true,
verify_block: proc do |value|
fail "No App token for Podio given, pass using `app_token: 'token'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :identifying_field,
env_name: 'PODIO_ITEM_IDENTIFYING_FIELD',
description: 'String specifying the field key used for identification of an item',
is_string: true,
verify_block: proc do |value|
fail "No Identifying field given, pass using `identifying_field: 'field name'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :identifying_value,
description: 'String uniquely specifying an item within the app',
is_string: true,
verify_block: proc do |value|
fail "No Identifying value given, pass using `identifying_value: 'unique value'`".red unless value && !value.empty?
end),
FastlaneCore::ConfigItem.new(key: :other_fields,
description: 'Dictionary of your app fields. Podio supports several field types, see https://developers.podio.com/doc/items',
is_string: false,
optional: true)
]
end
def self.output
[
['PODIO_ITEM_URL', 'URL to newly created (or updated) Podio item']
]
end
def self.authors
['pprochazka72, laugejepsen']
end
def self.is_supported?(_platform)
true
end
#####################################################
# @!group Logic
#####################################################
def self.post_item(options)
auth_config = authenticate(options[:client_id],
options[:client_secret],
options[:app_id],
options[:app_token])
item_id, item_url = get_item(auth_config,
options[:identifying_field],
options[:identifying_value],
options[:app_id])
unless options[:other_fields].nil?
options[:other_fields].each do |key, value|
uri = URI.parse(value)
if uri.kind_of?(URI::HTTP)
link_embed_id = get_embed_id(auth_config, uri)
options[:other_fields].merge!(key => link_embed_id)
end
end
update_item(auth_config, item_id, options[:other_fields])
end
Actions.lane_context[SharedValues::PODIO_ITEM_URL] = item_url
end
def self.authenticate(client_id, client_secret, app_id, app_token)
auth_response = RestClient.post AUTH_URL, grant_type: 'app',
app_id: app_id,
app_token: app_token,
client_id: client_id,
client_secret: client_secret
fail 'Failed to authenticate with Podio API' if auth_response.code != 200
auth_response_dictionary = JSON.parse(auth_response.body)
access_token = auth_response_dictionary['access_token']
{ Authorization: "OAuth2 #{access_token}", content_type: :json, accept: :json }
end
def self.get_item(auth_config, identifying_field, identifying_value, app_id)
item_id, item_url = get_existing_item(auth_config, identifying_value, app_id)
unless item_id
item_id, item_url = create_item(auth_config, identifying_field, identifying_value, app_id)
end
[item_id, item_url]
end
def self.get_existing_item(auth_config, identifying_value, app_id)
filter_request_body = { query: identifying_value, limit: 1, ref_type: 'item' }.to_json
filter_response = RestClient.post "#{BASE_URL}/search/app/#{app_id}/", filter_request_body, auth_config
fail "Failed to search for already existing item #{identifying_value}" if filter_response.code != 200
existing_items = JSON.parse(filter_response.body)
existing_item_id = nil
existing_item_url = nil
if existing_items.length > 0
existing_item = existing_items[0]
if existing_item['title'] == identifying_value
existing_item_id = existing_item['id']
existing_item_url = existing_item['link']
end
end
[existing_item_id, existing_item_url]
end
def self.create_item(auth_config, identifying_field, identifying_value, app_id)
item_request_body = { fields: { identifying_field => identifying_value } }.to_json
item_response = RestClient.post "#{BASE_URL}/item/app/#{app_id}", item_request_body, auth_config
fail "Failed to create item \"#{identifying_value}\"" if item_response.code != 200
item_response_dictionary = JSON.parse(item_response.body)
[item_response_dictionary['item_id'], item_response_dictionary['link']]
end
def self.update_item(auth_config, item_id, fields)
if fields.length > 0
item_request_body = { fields: fields }.to_json
item_response = RestClient.put "#{BASE_URL}/item/#{item_id}", item_request_body, auth_config
fail "Failed to update item values \"#{fields}\"" unless item_response.code != 200 || item_response.code != 204
end
end
def self.get_embed_id(auth_config, url)
embed_request_body = { url: url }.to_json
embed_response = RestClient.post "#{BASE_URL}/embed/", embed_request_body, auth_config
fail "Failed to create embed for link #{link}" if embed_response.code != 200
embed_response_dictionary = JSON.parse(embed_response.body)
embed_response_dictionary['embed_id']
end
end
end
end
|
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/km_everything/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Trace Wax", "Justin Schier", "Alex Kramer", "Case Commons LLC"]
gem.email = ["gems@tracedwax.com"]
gem.summary = "Log Rails controller actions to KissMetrics"
gem.homepage = "https://github.com/tracedwax/km_everything"
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "km_everything"
gem.require_paths = ["lib"]
gem.version = KmEverything::VERSION
gem.required_ruby_version = '>= 1.9.2'
gem.add_development_dependency 'rspec', '~> 2.11'
end
Add license to gemspec
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/km_everything/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Trace Wax", "Justin Schier", "Alex Kramer", "Case Commons LLC"]
gem.email = ["gems@tracedwax.com"]
gem.summary = "Log Rails controller actions to KissMetrics"
gem.homepage = "https://github.com/tracedwax/km_everything"
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "km_everything"
gem.require_paths = ["lib"]
gem.version = KmEverything::VERSION
gem.license = "MIT"
gem.required_ruby_version = '>= 1.9.2'
gem.add_development_dependency 'rspec', '~> 2.11'
end
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'layer6_client/version'
Gem::Specification.new do |spec|
spec.name = "layer6_client"
spec.version = Layer6Client::VERSION
spec.authors = ["Anthony Clark"]
spec.email = ["anthony.clark@shopify.com"]
spec.summary = %q{TODO: Write a short summary, because Rubygems requires one.}
spec.description = %q{TODO: Write a longer description or delete this line.}
spec.homepage = "TODO: Put your gem's website or public repo URL here."
spec.license = "MIT"
# Prevent pushing this gem to RubyGems.org. To allow pushes either set the 'allowed_push_host'
# to allow pushing to a single host or delete this section to allow pushing to any host.
if spec.respond_to?(:metadata)
spec.metadata['allowed_push_host'] = "TODO: Set to 'http://mygemserver.com'"
else
raise "RubyGems 2.0 or newer is required to protect against public gem pushes."
end
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
spec.bindir = "exe"
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
spec.add_development_dependency "bundler", "~> 1.12"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "minitest", "~> 5.0"
end
Update gemspec
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'layer6_client/version'
Gem::Specification.new do |spec|
spec.name = "layer6_client"
spec.version = Layer6Client::VERSION
spec.authors = ["Anthony Clark"]
spec.email = ["anthony.clark@shopify.com"]
spec.summary = 'Ruby bindings for the Layer6 api'
spec.homepage = "https://github.com/AnthonyClark/layer6_client"
spec.license = "MIT"
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
spec.bindir = "exe"
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
spec.add_development_dependency "bundler", "~> 1.12"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "minitest", "~> 5.0"
end
|
module Filter
class Builder::SearchField < Builder
def self.render
lambda do |attr, opts|
attr = opts[:is_scoped] ? attr : [attr, 'cont'].join('_')
haml_tag :div, class: 'form-group' do
value = opts[:value]
if opts[:skip_button]
haml_concat label(opts[:namespace], attr, opts[:title] || "Search #{attr.to_s.humanize}")
haml_concat text_field(opts[:namespace], attr, value: value, class: 'form-control', placeholder: opts[:title] || "Search #{attr.to_s.humanize}")
else
haml_tag :div, class: 'input-group' do
haml_concat text_field(opts[:namespace], attr, value: value, class: 'form-control', placeholder: opts[:title] || "Search #{attr.to_s.humanize}")
haml_tag :span, class: 'input-group-btn' do
haml_tag :button, type: 'button', class: 'btn btn-primary search' do
haml_tag :i, class: 'fa fa-search'
haml_concat 'Search'
end
end
end
end
end
end
end
def self.value(params, name)
params[name] || params["#{name}_cont"]
end
end
end
Clean up
module Filter
class Builder::SearchField < Builder
def self.render
lambda do |attr, opts|
attr = opts[:is_scoped] ? attr : [attr, 'cont'].join('_')
haml_tag :div, class: 'form-group' do
input_options = { value: opts[:value], class: 'form-control', placeholder: opts[:title] || "Search #{attr.to_s.humanize}" }
if opts[:skip_button]
haml_concat label(opts[:namespace], attr, opts[:title] || "Search #{attr.to_s.humanize}")
haml_concat text_field(opts[:namespace], attr, input_options)
else
haml_tag :div, class: 'input-group' do
haml_concat text_field(opts[:namespace], attr, input_options)
haml_tag :span, class: 'input-group-btn' do
haml_tag :button, type: 'button', class: 'btn btn-primary search' do
haml_tag :i, class: 'fa fa-search'
haml_concat 'Search'
end
end
end
end
end
end
end
def self.value(params, name)
params[name] || params["#{name}_cont"]
end
end
end
|
require "fluent/plugin/filter"
module Fluent::Plugin
class ConcatFilter < Filter
Fluent::Plugin.register_filter("concat", self)
helpers :timer, :event_emitter
desc "The key for part of multiline log"
config_param :key, :string
desc "The separator of lines"
config_param :separator, :string, default: "\n"
desc "The number of lines"
config_param :n_lines, :integer, default: nil
desc "The regexp to match beginning of multiline"
config_param :multiline_start_regexp, :string, default: nil
desc "The regexp to match ending of multiline"
config_param :multiline_end_regexp, :string, default: nil
desc "The regexp to match continuous lines"
config_param :continuous_line_regexp, :string, default: nil
desc "The key to determine which stream an event belongs to"
config_param :stream_identity_key, :string, default: nil
desc "The interval between data flushes, 0 means disable timeout"
config_param :flush_interval, :time, default: 60
desc "The label name to handle timeout"
config_param :timeout_label, :string, default: nil
desc "Use timestamp of first record when buffer is flushed"
config_param :use_first_timestamp, :bool, default: false
desc "The field name that is the reference to concatenate records"
config_param :partial_key, :string, default: nil
desc "The value stored in the field specified by partial_key that represent partial log"
config_param :partial_value, :string, default: nil
class TimeoutError < StandardError
end
def initialize
super
@buffer = Hash.new {|h, k| h[k] = [] }
@timeout_map_mutex = Thread::Mutex.new
@timeout_map_mutex.synchronize do
@timeout_map = Hash.new {|h, k| h[k] = Fluent::Engine.now }
end
end
def configure(conf)
super
if @n_lines && (@multiline_start_regexp || @multiline_end_regexp)
raise Fluent::ConfigError, "n_lines and multiline_start_regexp/multiline_end_regexp are exclusive"
end
if @n_lines.nil? && @multiline_start_regexp.nil? && @multiline_end_regexp.nil? && @partial_key.nil?
raise Fluent::ConfigError, "Either n_lines or multiline_start_regexp or multiline_end_regexp is required"
end
if @partial_key && @n_lines
raise Fluent::ConfigError, "partial_key and n_lines are exclusive"
end
if @partial_key && (@multiline_start_regexp || @multiline_end_regexp)
raise Fluent::ConfigError, "partial_key and multiline_start_regexp/multiline_end_regexp are exclusive"
end
if @partial_key && @partial_value.nil?
raise Fluent::ConfigError, "partial_value is required when partial_key is specified"
end
@mode = nil
case
when @n_lines
@mode = :line
when @partial_key
@mode = :partial
when @multiline_start_regexp || @multiline_end_regexp
@mode = :regexp
if @multiline_start_regexp
@multiline_start_regexp = Regexp.compile(@multiline_start_regexp[1..-2])
end
if @multiline_end_regexp
@multiline_end_regexp = Regexp.compile(@multiline_end_regexp[1..-2])
end
if @continuous_line_regexp
@continuous_line_regexp = Regexp.compile(@continuous_line_regexp[1..-2])
end
end
end
def start
super
@finished = false
timer_execute(:filter_concat_timer, 1, &method(:on_timer))
end
def shutdown
@finished = true
flush_remaining_buffer
super
end
def filter_stream(tag, es)
new_es = Fluent::MultiEventStream.new
es.each do |time, record|
if /\Afluent\.(?:trace|debug|info|warn|error|fatal)\z/ =~ tag
new_es.add(time, record)
next
end
unless record.key?(@key)
new_es.add(time, record)
next
end
begin
flushed_es = process(tag, time, record)
unless flushed_es.empty?
flushed_es.each do |_time, new_record|
time = _time if @use_first_timestamp
new_es.add(time, record.merge(new_record))
end
end
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
new_es
end
private
def on_timer
return if @flush_interval <= 0
return if @finished
flush_timeout_buffer
rescue => e
log.error "failed to flush timeout buffer", error: e
end
def process(tag, time, record)
if @stream_identity_key
stream_identity = "#{tag}:#{record[@stream_identity_key]}"
else
stream_identity = "#{tag}:default"
end
@timeout_map_mutex.synchronize do
@timeout_map[stream_identity] = Fluent::Engine.now
end
case @mode
when :line
process_line(stream_identity, tag, time, record)
when :partial
process_partial(stream_identity, tag, time, record)
when :regexp
process_regexp(stream_identity, tag, time, record)
end
end
def process_line(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
@buffer[stream_identity] << [tag, time, record]
if @buffer[stream_identity].size >= @n_lines
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
new_es
end
def process_partial(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
@buffer[stream_identity] << [tag, time, record]
unless @partial_value == record[@partial_key]
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
new_es
end
def process_regexp(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
case
when firstline?(record[@key])
if @buffer[stream_identity].empty?
@buffer[stream_identity] << [tag, time, record]
if lastline?(record[@key])
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
else
new_time, new_record = flush_buffer(stream_identity, [tag, time, record])
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
if lastline?(record[@key])
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
return new_es
end
when lastline?(record[@key])
@buffer[stream_identity] << [tag, time, record]
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
return new_es
else
if @buffer[stream_identity].empty?
if !@multiline_start_regexp
@buffer[stream_identity] << [tag, time, record]
else
new_es.add(time, record)
return new_es
end
else
if continuous_line?(record[@key])
# Continuation of the previous line
@buffer[stream_identity] << [tag, time, record]
else
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
new_es.add(time, record)
end
end
end
new_es
end
def firstline?(text)
@multiline_start_regexp && !!@multiline_start_regexp.match(text)
end
def lastline?(text)
@multiline_end_regexp && !!@multiline_end_regexp.match(text)
end
def continuous_line?(text)
if @continuous_line_regexp
!!@continuous_line_regexp.match(text)
else
true
end
end
def flush_buffer(stream_identity, new_element = nil)
lines = @buffer[stream_identity].map {|_tag, _time, record| record[@key] }
_tag, time, first_record = @buffer[stream_identity].first
new_record = {
@key => lines.join(@separator)
}
@buffer[stream_identity] = []
@buffer[stream_identity] << new_element if new_element
[time, first_record.merge(new_record)]
end
def flush_timeout_buffer
now = Fluent::Engine.now
timeout_stream_identities = []
@timeout_map_mutex.synchronize do
@timeout_map.each do |stream_identity, previous_timestamp|
next if @flush_interval > (now - previous_timestamp)
next if @buffer[stream_identity].empty?
time, flushed_record = flush_buffer(stream_identity)
timeout_stream_identities << stream_identity
tag = stream_identity.split(":").first
message = "Timeout flush: #{stream_identity}"
handle_timeout_error(tag, @use_first_timestamp ? time : now, flushed_record, message)
log.info(message)
end
@timeout_map.reject! do |stream_identity, _|
timeout_stream_identities.include?(stream_identity)
end
end
end
def flush_remaining_buffer
@buffer.each do |stream_identity, elements|
next if elements.empty?
lines = elements.map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
tag, time, record = elements.first
message = "Flush remaining buffer: #{stream_identity}"
handle_timeout_error(tag, time, record.merge(new_record), message)
log.info(message)
end
@buffer.clear
end
def handle_timeout_error(tag, time, record, message)
if @timeout_label
event_router = event_emitter_router(@timeout_label)
event_router.emit(tag, time, record)
else
router.emit_error_event(tag, time, record, TimeoutError.new(message))
end
end
end
end
Remove partial_key
Signed-off-by: Kenji Okimoto <7a4b90a0a1e6fc688adec907898b6822ce215e6c@clear-code.com>
require "fluent/plugin/filter"
module Fluent::Plugin
class ConcatFilter < Filter
Fluent::Plugin.register_filter("concat", self)
helpers :timer, :event_emitter
desc "The key for part of multiline log"
config_param :key, :string
desc "The separator of lines"
config_param :separator, :string, default: "\n"
desc "The number of lines"
config_param :n_lines, :integer, default: nil
desc "The regexp to match beginning of multiline"
config_param :multiline_start_regexp, :string, default: nil
desc "The regexp to match ending of multiline"
config_param :multiline_end_regexp, :string, default: nil
desc "The regexp to match continuous lines"
config_param :continuous_line_regexp, :string, default: nil
desc "The key to determine which stream an event belongs to"
config_param :stream_identity_key, :string, default: nil
desc "The interval between data flushes, 0 means disable timeout"
config_param :flush_interval, :time, default: 60
desc "The label name to handle timeout"
config_param :timeout_label, :string, default: nil
desc "Use timestamp of first record when buffer is flushed"
config_param :use_first_timestamp, :bool, default: false
desc "The field name that is the reference to concatenate records"
config_param :partial_key, :string, default: nil
desc "The value stored in the field specified by partial_key that represent partial log"
config_param :partial_value, :string, default: nil
desc "If true, keep partial_key in concatenated records"
config_param :keep_partial_key, :bool, default: false
class TimeoutError < StandardError
end
def initialize
super
@buffer = Hash.new {|h, k| h[k] = [] }
@timeout_map_mutex = Thread::Mutex.new
@timeout_map_mutex.synchronize do
@timeout_map = Hash.new {|h, k| h[k] = Fluent::Engine.now }
end
end
def configure(conf)
super
if @n_lines && (@multiline_start_regexp || @multiline_end_regexp)
raise Fluent::ConfigError, "n_lines and multiline_start_regexp/multiline_end_regexp are exclusive"
end
if @n_lines.nil? && @multiline_start_regexp.nil? && @multiline_end_regexp.nil? && @partial_key.nil?
raise Fluent::ConfigError, "Either n_lines or multiline_start_regexp or multiline_end_regexp is required"
end
if @partial_key && @n_lines
raise Fluent::ConfigError, "partial_key and n_lines are exclusive"
end
if @partial_key && (@multiline_start_regexp || @multiline_end_regexp)
raise Fluent::ConfigError, "partial_key and multiline_start_regexp/multiline_end_regexp are exclusive"
end
if @partial_key && @partial_value.nil?
raise Fluent::ConfigError, "partial_value is required when partial_key is specified"
end
@mode = nil
case
when @n_lines
@mode = :line
when @partial_key
@mode = :partial
when @multiline_start_regexp || @multiline_end_regexp
@mode = :regexp
if @multiline_start_regexp
@multiline_start_regexp = Regexp.compile(@multiline_start_regexp[1..-2])
end
if @multiline_end_regexp
@multiline_end_regexp = Regexp.compile(@multiline_end_regexp[1..-2])
end
if @continuous_line_regexp
@continuous_line_regexp = Regexp.compile(@continuous_line_regexp[1..-2])
end
end
end
def start
super
@finished = false
timer_execute(:filter_concat_timer, 1, &method(:on_timer))
end
def shutdown
@finished = true
flush_remaining_buffer
super
end
def filter_stream(tag, es)
new_es = Fluent::MultiEventStream.new
es.each do |time, record|
if /\Afluent\.(?:trace|debug|info|warn|error|fatal)\z/ =~ tag
new_es.add(time, record)
next
end
unless record.key?(@key)
new_es.add(time, record)
next
end
begin
flushed_es = process(tag, time, record)
unless flushed_es.empty?
flushed_es.each do |_time, new_record|
time = _time if @use_first_timestamp
merged_record = record.merge(new_record)
merged_record.delete(@partial_key) unless @keep_partial_key
new_es.add(time, merged_record)
end
end
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
new_es
end
private
def on_timer
return if @flush_interval <= 0
return if @finished
flush_timeout_buffer
rescue => e
log.error "failed to flush timeout buffer", error: e
end
def process(tag, time, record)
if @stream_identity_key
stream_identity = "#{tag}:#{record[@stream_identity_key]}"
else
stream_identity = "#{tag}:default"
end
@timeout_map_mutex.synchronize do
@timeout_map[stream_identity] = Fluent::Engine.now
end
case @mode
when :line
process_line(stream_identity, tag, time, record)
when :partial
process_partial(stream_identity, tag, time, record)
when :regexp
process_regexp(stream_identity, tag, time, record)
end
end
def process_line(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
@buffer[stream_identity] << [tag, time, record]
if @buffer[stream_identity].size >= @n_lines
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
new_es
end
def process_partial(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
@buffer[stream_identity] << [tag, time, record]
unless @partial_value == record[@partial_key]
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_record.delete(@partial_key)
new_es.add(time, new_record)
end
new_es
end
def process_regexp(stream_identity, tag, time, record)
new_es = Fluent::MultiEventStream.new
case
when firstline?(record[@key])
if @buffer[stream_identity].empty?
@buffer[stream_identity] << [tag, time, record]
if lastline?(record[@key])
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
else
new_time, new_record = flush_buffer(stream_identity, [tag, time, record])
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
if lastline?(record[@key])
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
end
return new_es
end
when lastline?(record[@key])
@buffer[stream_identity] << [tag, time, record]
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
return new_es
else
if @buffer[stream_identity].empty?
if !@multiline_start_regexp
@buffer[stream_identity] << [tag, time, record]
else
new_es.add(time, record)
return new_es
end
else
if continuous_line?(record[@key])
# Continuation of the previous line
@buffer[stream_identity] << [tag, time, record]
else
new_time, new_record = flush_buffer(stream_identity)
time = new_time if @use_first_timestamp
new_es.add(time, new_record)
new_es.add(time, record)
end
end
end
new_es
end
def firstline?(text)
@multiline_start_regexp && !!@multiline_start_regexp.match(text)
end
def lastline?(text)
@multiline_end_regexp && !!@multiline_end_regexp.match(text)
end
def continuous_line?(text)
if @continuous_line_regexp
!!@continuous_line_regexp.match(text)
else
true
end
end
def flush_buffer(stream_identity, new_element = nil)
lines = @buffer[stream_identity].map {|_tag, _time, record| record[@key] }
_tag, time, first_record = @buffer[stream_identity].first
new_record = {
@key => lines.join(@separator)
}
@buffer[stream_identity] = []
@buffer[stream_identity] << new_element if new_element
[time, first_record.merge(new_record)]
end
def flush_timeout_buffer
now = Fluent::Engine.now
timeout_stream_identities = []
@timeout_map_mutex.synchronize do
@timeout_map.each do |stream_identity, previous_timestamp|
next if @flush_interval > (now - previous_timestamp)
next if @buffer[stream_identity].empty?
time, flushed_record = flush_buffer(stream_identity)
timeout_stream_identities << stream_identity
tag = stream_identity.split(":").first
message = "Timeout flush: #{stream_identity}"
handle_timeout_error(tag, @use_first_timestamp ? time : now, flushed_record, message)
log.info(message)
end
@timeout_map.reject! do |stream_identity, _|
timeout_stream_identities.include?(stream_identity)
end
end
end
def flush_remaining_buffer
@buffer.each do |stream_identity, elements|
next if elements.empty?
lines = elements.map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
tag, time, record = elements.first
message = "Flush remaining buffer: #{stream_identity}"
handle_timeout_error(tag, time, record.merge(new_record), message)
log.info(message)
end
@buffer.clear
end
def handle_timeout_error(tag, time, record, message)
if @timeout_label
event_router = event_emitter_router(@timeout_label)
event_router.emit(tag, time, record)
else
router.emit_error_event(tag, time, record, TimeoutError.new(message))
end
end
end
end
|
module Fluent
class ConcatFilter < Filter
Plugin.register_filter("concat", self)
desc "The key for part of multiline log"
config_param :key, :string, required: true
desc "The separator of lines"
config_param :separator, :string, default: "\n"
desc "The number of lines"
config_param :n_lines, :integer, default: nil
desc "The regexp to match beginning of multiline"
config_param :multiline_start_regexp, :string, default: nil
desc "The regexp to match ending of multiline"
config_param :multiline_end_regexp, :string, default: nil
desc "The key to determine which stream an event belongs to"
config_param :stream_identity_key, :string, default: nil
def initialize
super
@buffer = Hash.new {|h, k| h[k] = [] }
end
def configure(conf)
super
if @n_lines && @multiline_start_regexp
raise ConfigError, "n_lines and multiline_start_regexp are exclusive"
end
if @n_lines.nil? && @multiline_start_regexp.nil?
raise ConfigError, "Either n_lines or multiline_start_regexp is required"
end
@mode = nil
case
when @n_lines
@mode = :line
when @multiline_start_regexp
@mode = :regexp
@multiline_start_regexp = Regexp.compile(@multiline_start_regexp[1..-2])
if @multiline_end_regexp
@multiline_end_regexp = Regexp.compile(@multiline_end_regexp[1..-2])
end
end
end
def shutdown
super
flush_all_buffer
end
def filter_stream(tag, es)
new_es = MultiEventStream.new
es.each do |time, record|
begin
new_record = process(tag, time, record)
new_es.add(time, record.merge(new_record)) if new_record
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
new_es
end
private
def process(tag, time, record)
if @stream_identity_key
stream_identity = "#{tag}:#{record["@stream_identity_key"]}"
else
stream_identity = "#{tag}:default"
end
case @mode
when :line
@buffer[stream_identity] << [tag, time, record]
if @buffer[stream_identity].size >= @n_lines
return flush_buffer(stream_identity)
end
when :regexp
case
when firstline?(record[@key])
if @buffer[stream_identity].empty?
@buffer[stream_identity] << [tag, time, record]
else
return flush_buffer(stream_identity, [tag, time, record])
end
when lastline?(record[@key])
@buffer[stream_identity] << [tag, time, record]
return flush_buffer(stream_identity)
else
if @buffer[stream_identity].empty?
return record
else
# Continuation of the previous line
@buffer[stream_identity] << [tag, time, record]
end
end
end
nil
end
def firstline?(text)
!!@multiline_start_regexp.match(text)
end
def lastline?(text)
@multiline_end_regexp && !!@multiline_end_regexp.match(text)
end
def flush_buffer(stream_identity, new_element = nil)
lines = @buffer[stream_identity].map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
@buffer[stream_identity] = []
@buffer[stream_identity] << new_element if new_element
new_record
end
def flush_all_buffer
@buffer.each do |stream_identity, elements|
next if elements.empty?
es = MultiEventStream.new
lines = elements.map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
tag, time, record = elements.last
es.add(time, record.merge(new_record))
router.emit_stream(tag, es)
end
@buffer.clear
end
end
end
Add flush interval
See #1
module Fluent
class ConcatFilter < Filter
Plugin.register_filter("concat", self)
desc "The key for part of multiline log"
config_param :key, :string, required: true
desc "The separator of lines"
config_param :separator, :string, default: "\n"
desc "The number of lines"
config_param :n_lines, :integer, default: nil
desc "The regexp to match beginning of multiline"
config_param :multiline_start_regexp, :string, default: nil
desc "The regexp to match ending of multiline"
config_param :multiline_end_regexp, :string, default: nil
desc "The key to determine which stream an event belongs to"
config_param :stream_identity_key, :string, default: nil
desc "The interval between data flushes"
config_param :flush_interval, :time, default: 60
class TimeoutError < StandardError
end
def initialize
super
@buffer = Hash.new {|h, k| h[k] = [] }
@timeout_map = Hash.new {|h, k| h[k] = Fluent::Engine.now }
end
def configure(conf)
super
if @n_lines && @multiline_start_regexp
raise ConfigError, "n_lines and multiline_start_regexp are exclusive"
end
if @n_lines.nil? && @multiline_start_regexp.nil?
raise ConfigError, "Either n_lines or multiline_start_regexp is required"
end
@mode = nil
case
when @n_lines
@mode = :line
when @multiline_start_regexp
@mode = :regexp
@multiline_start_regexp = Regexp.compile(@multiline_start_regexp[1..-2])
if @multiline_end_regexp
@multiline_end_regexp = Regexp.compile(@multiline_end_regexp[1..-2])
end
end
end
def start
super
@loop = Coolio::Loop.new
timer = TimeoutTimer.new(1, method(:on_timer))
@loop.attach(timer)
@thread = Thread.new(@loop, &:run)
end
def shutdown
super
@finished = true
@loop.watchers.each(&:detach)
@loop.stop
@thread.join
flush_all_buffer
end
def filter_stream(tag, es)
new_es = MultiEventStream.new
es.each do |time, record|
begin
new_record = process(tag, time, record)
new_es.add(time, record.merge(new_record)) if new_record
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
new_es
end
private
def on_timer
return if @finished
flush_timeout_buffer
end
def process(tag, time, record)
if @stream_identity_key
stream_identity = "#{tag}:#{record["@stream_identity_key"]}"
else
stream_identity = "#{tag}:default"
end
@timeout_map[stream_identity] = Fluent::Engine.now
case @mode
when :line
@buffer[stream_identity] << [tag, time, record]
if @buffer[stream_identity].size >= @n_lines
return flush_buffer(stream_identity)
end
when :regexp
case
when firstline?(record[@key])
if @buffer[stream_identity].empty?
@buffer[stream_identity] << [tag, time, record]
else
return flush_buffer(stream_identity, [tag, time, record])
end
when lastline?(record[@key])
@buffer[stream_identity] << [tag, time, record]
return flush_buffer(stream_identity)
else
if @buffer[stream_identity].empty?
return record
else
# Continuation of the previous line
@buffer[stream_identity] << [tag, time, record]
end
end
end
nil
end
def firstline?(text)
!!@multiline_start_regexp.match(text)
end
def lastline?(text)
@multiline_end_regexp && !!@multiline_end_regexp.match(text)
end
def flush_buffer(stream_identity, new_element = nil)
lines = @buffer[stream_identity].map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
@buffer[stream_identity] = []
@buffer[stream_identity] << new_element if new_element
new_record
end
def flush_timeout_buffer
now = Fluent::Engine.now
timeout_stream_identities = []
@timeout_map.each do |stream_identity, previous_timestamp|
next unless @flush_interval > (now - previous_timestamp)
timeout_stream_identities << stream_identity
flushed_record = flush_buffer(stream_identity)
tag = stream_identity.split(":").first
message = "Timeout flush: #{stream_identity}"
router.emit_error_event(tag, now, flushed_record, TimeoutError.new(message))
log.info message
end
@timeout_map.reject! do |stream_identity, _|
timeout_stream_identities.include?(stream_identity)
end
end
def flush_all_buffer
@buffer.each do |stream_identity, elements|
next if elements.empty?
es = MultiEventStream.new
lines = elements.map {|_tag, _time, record| record[@key] }
new_record = {
@key => lines.join(@separator)
}
tag, time, record = elements.last
es.add(time, record.merge(new_record))
router.emit_stream(tag, es)
end
@buffer.clear
end
class TimeoutTimer < Coolio::TimerWatcher
def initialize(interval, callback)
super(interval, true)
@callback = callback
end
def on_timer
@callback.call
end
end
end
end
|
require 'rails/generators'
class TandemGenerator < Rails::Generators::Base
desc "This generator sets up tandem."
def self.source_root
@source_root ||= File.join(File.dirname(__FILE__), 'templates')
end
def create_initializer_file
initializer 'tandem.rb', File.open(File.join(self.class.source_root,'initializer.rb'), 'rb') { |f| f.read }
end
def add_mounting_route
route('mount Tandem::Engine => "/"')
end
def import_migrations
rake("tandem:install:migrations")
end
def inject_tandem_assets
append_to_file 'app/assets/javascripts/application.js', '//= require tandem'
insert_into_file 'app/assets/stylesheets/application.css', " *= require tandem\n", :before => /^\*\/$/
end
end
[#31830409] Fix bug that prevented the tandem generator from adding the tandem.css to the application's stylesheet manifest
Rails started including a space before the end comment. Made the space
optional to be backwards compatible with previous versions of rails
where there isn't a space.
require 'rails/generators'
class TandemGenerator < Rails::Generators::Base
desc "This generator sets up tandem."
def self.source_root
@source_root ||= File.join(File.dirname(__FILE__), 'templates')
end
def create_initializer_file
initializer 'tandem.rb', File.open(File.join(self.class.source_root,'initializer.rb'), 'rb') { |f| f.read }
end
def add_mounting_route
route('mount Tandem::Engine => "/"')
end
def import_migrations
rake("tandem:install:migrations")
end
def inject_tandem_assets
append_to_file 'app/assets/javascripts/application.js', '//= require tandem'
insert_into_file 'app/assets/stylesheets/application.css', " *= require tandem\n", :before => /^\s?\*\/$/
end
end
|
Then /^the user should be in groups "([^\"]*)"$/ do |groups|
fail unless @resource["groups"] == groups
end
better user in groups check
Then /^"([^\"]*)" should be in groups? "([^\"]*)"$/ do |user, groups|
steps %Q{
Then there should be a resource "User[#{user}]"
And the user should be in groups "#{groups}"
}
end
Then /^the user should be in groups "([^\"]*)"$/ do |groups|
g = @resource["groups"]
g_s = g
if g.is_a?(Array)
g_s = g.join(' ')
end
fail unless g_s == groups
end
|
require 'rubygems'
require 'mongo'
require 'pp'
require 'bunny'
require 'ghtorrent/settings'
require 'ghtorrent/persister'
require 'ghtorrent/command'
require 'ghtorrent/bson_orderedhash'
class GHTLoad < GHTorrent::Command
include GHTorrent::Settings
include GHTorrent::Persister
def persister
@persister ||= connect(:mongo, settings)
@persister
end
def prepare_options(options)
options.banner <<-BANNER
Loads data from a MongoDB collection or a file to a queue for further processing.
#{command_name} [options] mongo_collection
#{command_name} [options] -i input_file
#{command_name} options:
BANNER
options.opt :earliest, 'Seconds since epoch of earliest item to load (Mongo mode only)',
:short => 'e', :default => 0, :type => :int
options.opt :latest, 'Seconds since epoch of latest item to load (Mongo mode only)',
:short => 'x', :default => Time.now.to_i + (60 * 60 * 24 * 360 * 20),
:type => :int
options.opt :filter,
'Filter items by regexp on item attributes: item.attr=regexp (Mongo mode only)',
:short => 'f', :type => String, :multi => true
options.opt :file, 'Input file', :type => String
options.opt :number, 'Total number of items to load',
:short => 'n', :type => :int, :default => 2**48
options.opt :rate, 'Number of items to load per second',
:type => :int, :default => 1000
options.opt :route_key, 'Routing key to attached to loaded items', :type => String
end
def validate
super
filter = options[:filter]
case
when filter.is_a?(Array)
options[:filter].each { |x|
Trollop::die "not a valid filter #{x}" unless is_filter_valid?(x)
}
when filter == []
# Noop
else
Trollop::die 'A filter can only be a string'
end
if options[:file_given]
Trollop::die "File does not exist: #{options[:file]}" unless File.exists?(options[:file])
end
end
def mongo_stream
puts "Loading events after #{Time.at(options[:earliest])}" if options[:verbose]
puts "Loading events before #{Time.at(options[:latest])}" if options[:verbose]
puts "Loading #{options[:number]} items" if options[:verbose]
what = case
when options[:filter].is_a?(Array)
options[:filter].reduce({}) { |acc,x|
(k,r) = x.split(/=/)
acc[k] = Regexp.new(r)
acc
}
when filter == []
{}
end
from = {'_id' => {
'$gte' => BSON::ObjectId.from_time(Time.at(options[:earliest])),
'$lte' => BSON::ObjectId.from_time(Time.at(options[:latest]))}
}
(puts 'Mongo filter:'; pp what.merge(from)) if options[:verbose]
persister.get_underlying_connection[:events].find(what.merge(from),
:snapshot => true)
end
def mongo_process(e)
unq = read_value(e, 'type')
if unq.class != String or unq.nil? then
raise Exception.new('Unique value can only be a String')
end
[e['id'], "evt.#{e['type']}"]
end
def file_stream
File.open(options[:file])
end
def file_process(e)
[e.strip, '']
end
def go
if options[:file_given]
@mode = :file
alias :process :file_process
alias :stream :file_stream
else
@mode = :mongodb
alias :process :mongo_process
alias :stream :mongo_stream
end
# Num events read
total_read = current_sec_read = 0
conn = Bunny.new(:host => config(:amqp_host),
:port => config(:amqp_port),
:username => config(:amqp_username),
:password => config(:amqp_password))
conn.start
channel = conn.create_channel
puts "Connection to #{config(:amqp_host)} succeeded"
exchange = channel.topic(config(:amqp_exchange),
:durable => true, :auto_delete => false)
stopped = false
ts = Time.now
while not stopped
begin
stream.each do |e|
id, route = process(e)
if options[:route_key_given]
route = options[:route_key]
end
exchange.publish id, :persistent => false, :routing_key => route
total_read += 1
puts "Publish id = #{id} (#{total_read} read)" if options.verbose
# Basic rate limiting
if options[:rate_given]
current_sec_read += 1
if current_sec_read >= options[:rate]
time_diff = (Time.now - ts) * 1000
if time_diff <= 1000.0
puts "Rate limit reached, sleeping for #{1000 - time_diff} ms"
sleep((1000.0 - time_diff) / 1000)
end
current_sec_read = 0
ts = Time.now
end
end
if total_read >= options[:number]
puts 'Finished reading, exiting'
return
end
end
stopped = true
rescue Interrupt
puts 'Interrupted'
stopped = true
end
end
end
private
def is_filter_valid?(filter)
(k, r) = filter.split(/=/)
return false if r.nil?
begin
Regexp.new(r)
true
rescue
false
end
end
end
#vim: set filetype=ruby expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
Support for floating point add rates
require 'rubygems'
require 'mongo'
require 'pp'
require 'bunny'
require 'ghtorrent/settings'
require 'ghtorrent/persister'
require 'ghtorrent/command'
require 'ghtorrent/bson_orderedhash'
class GHTLoad < GHTorrent::Command
include GHTorrent::Settings
include GHTorrent::Persister
def persister
@persister ||= connect(:mongo, settings)
@persister
end
def prepare_options(options)
options.banner <<-BANNER
Loads data from a MongoDB collection or a file to a queue for further processing.
#{command_name} [options] mongo_collection
#{command_name} [options] -i input_file
#{command_name} options:
BANNER
options.opt :earliest, 'Seconds since epoch of earliest item to load (Mongo mode only)',
:short => 'e', :default => 0, :type => :int
options.opt :latest, 'Seconds since epoch of latest item to load (Mongo mode only)',
:short => 'x', :default => Time.now.to_i + (60 * 60 * 24 * 360 * 20),
:type => :int
options.opt :filter,
'Filter items by regexp on item attributes: item.attr=regexp (Mongo mode only)',
:short => 'f', :type => String, :multi => true
options.opt :file, 'Input file', :type => String
options.opt :number, 'Total number of items to load',
:short => 'n', :type => :int, :default => 2**48
options.opt :rate, 'Number of items to load per second',
:type => :float, :default => 1000.0
options.opt :route_key, 'Routing key to attached to loaded items', :type => String
end
def validate
super
filter = options[:filter]
case
when filter.is_a?(Array)
options[:filter].each { |x|
Trollop::die "not a valid filter #{x}" unless is_filter_valid?(x)
}
when filter == []
# Noop
else
Trollop::die 'A filter can only be a string'
end
if options[:file_given]
Trollop::die "File does not exist: #{options[:file]}" unless File.exists?(options[:file])
end
end
def mongo_stream
puts "Loading events after #{Time.at(options[:earliest])}" if options[:verbose]
puts "Loading events before #{Time.at(options[:latest])}" if options[:verbose]
puts "Loading #{options[:number]} items" if options[:verbose]
what = case
when options[:filter].is_a?(Array)
options[:filter].reduce({}) { |acc,x|
(k,r) = x.split(/=/)
acc[k] = Regexp.new(r)
acc
}
when filter == []
{}
end
from = {'_id' => {
'$gte' => BSON::ObjectId.from_time(Time.at(options[:earliest])),
'$lte' => BSON::ObjectId.from_time(Time.at(options[:latest]))}
}
(puts 'Mongo filter:'; pp what.merge(from)) if options[:verbose]
persister.get_underlying_connection[:events].find(what.merge(from),
:snapshot => true)
end
def mongo_process(e)
unq = read_value(e, 'type')
if unq.class != String or unq.nil? then
raise Exception.new('Unique value can only be a String')
end
[e['id'], "evt.#{e['type']}"]
end
def file_stream
File.open(options[:file])
end
def file_process(e)
[e.strip, '']
end
def go
if options[:file_given]
@mode = :file
alias :process :file_process
alias :stream :file_stream
else
@mode = :mongodb
alias :process :mongo_process
alias :stream :mongo_stream
end
# Num events read
total_read = current_min_read = 0
conn = Bunny.new(:host => config(:amqp_host),
:port => config(:amqp_port),
:username => config(:amqp_username),
:password => config(:amqp_password))
conn.start
channel = conn.create_channel
puts "Connection to #{config(:amqp_host)} succeeded"
exchange = channel.topic(config(:amqp_exchange),
:durable => true, :auto_delete => false)
stopped = false
ts = Time.now
while not stopped
begin
stream.each do |e|
id, route = process(e)
if options[:route_key_given]
route = options[:route_key]
end
exchange.publish id, :persistent => false, :routing_key => route
total_read += 1
puts "Publish id = #{id} (#{total_read} read)" if options.verbose
# Basic rate limiting
if options[:rate_given]
current_min_read += 1
if current_min_read >= options[:rate] * 60
time_diff = (Time.now - ts) * 1000
if time_diff <= 60 * 1000.0
puts "Rate limit reached, sleeping for #{60 * 1000 - time_diff} ms"
sleep((60 * 1000.0 - time_diff) / 1000)
end
current_min_read = 0
ts = Time.now
end
end
if total_read >= options[:number]
puts 'Finished reading, exiting'
return
end
end
stopped = true
rescue Interrupt
puts 'Interrupted'
stopped = true
end
end
end
private
def is_filter_valid?(filter)
(k, r) = filter.split(/=/)
return false if r.nil?
begin
Regexp.new(r)
true
rescue
false
end
end
end
#vim: set filetype=ruby expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
|
module GitCommitAutouser
VERSION = "0.0.1"
end
Bump up version.
module GitCommitAutouser
VERSION = "0.0.2"
end
|
require 'curses'
require_relative 'color'
require_relative 'help_window'
require_relative '../git'
require_relative '../quit_action'
module GitCrecord
module UI
class HunksWindow
SELECTED_MAP = {true => 'X', false => ' ', :partly => '~'}.freeze
def initialize(win, files)
@win = win
@files = files
@visibles = @files
@highlighted = @files[0]
@scroll_position = 0
resize
end
def getch
@win.getch
end
def refresh
@win.refresh(scroll_position, 0, 0, 0, Curses.lines - 1, @win.maxx)
end
def redraw
@win.clear
print_list(@files)
refresh
end
def resize
new_width = Curses.cols
new_height = [Curses.lines, height(new_width)].max
return if @win.maxx == new_width && @win.maxy == new_height
@win.resize(new_height, new_width)
redraw
end
def height(width, hunks = @files)
hunks.reduce(0) do |h, entry|
h + \
entry.strings(content_width(entry, width), large: true).size + \
height(width, entry.subs)
end
end
def content_width(entry, width = @win.maxx)
width - entry.x_offset - 5
end
def scroll_position
if @scroll_position + 3 > @highlighted.y1
@scroll_position = @highlighted.y1 - 3
elsif @scroll_position - 4 + Curses.lines <= @highlighted.y2
@scroll_position = [@highlighted.y2 + 4, @win.maxy].min - Curses.lines
end
@scroll_position
end
def move_highlight(to)
return if to == @highlighted || to.nil?
from = @highlighted
@highlighted = to
print_entry(from, from.y1 - 1)
print_entry(to, to.y1 - 1)
refresh
end
def print_list(list, line_number: -1)
list.each do |entry|
line_number = print_entry(entry, line_number)
next unless entry.expanded
line_number = print_list(entry.subs, line_number: line_number)
end
line_number
end
def print_entry(entry, line_number)
entry.y1 = line_number + 1
entry.strings(content_width(entry)).each_with_index do |string, index|
@win.attrset(entry.is_a?(Hunks::File) ? attrs(entry) : 0)
@win.setpos(line_number += 1, entry.x_offset)
if index == 0 && entry.selectable
@win.addstr("[#{SELECTED_MAP.fetch(entry.selected)}] ")
else
@win.addstr(' ')
end
@win.attrset(attrs(entry))
@win.addstr(string)
add_spaces = content_width(entry) - string.size
@win.addstr(' ' * add_spaces) if add_spaces > 0
end
entry.y2 = line_number
end
def attrs(entry)
color = Color.normal
if entry.is_a?(Hunks::Line)
color = Color.green if entry.add?
color = Color.red if entry.del?
end
color = Color.hl if entry == @highlighted
color | (entry.is_a?(Hunks::Line) ? 0 : Curses::A_BOLD)
end
def update_visibles
@visibles = @files.each_with_object([]) do |entry, vs|
vs << entry
next unless entry.expanded
entry.highlightable_subs.each do |entryy|
vs << entryy
vs.concat(entryy.highlightable_subs) if entryy.expanded
end
end
end
def quit
:quit
end
def stage
QuitAction.new{ Git.stage(@files) }
end
def commit
QuitAction.new do
Git.commit if Git.stage(@files) == true
end
end
def highlight_next
move_highlight(@visibles[@visibles.index(@highlighted) + 1])
end
def highlight_previous
move_highlight(@visibles[[@visibles.index(@highlighted) - 1, 0].max])
end
def highlight_first
move_highlight(@visibles[0])
end
def highlight_last
move_highlight(@visibles[-1])
end
def highlight_next_hunk
index = @visibles.index(@highlighted)
move_highlight(
@visibles[(index + 1)..-1].find do |hunk|
hunk.is_a?(Hunks::File) || hunk.is_a?(Hunks::Hunk)
end
)
end
def highlight_previous_hunk
index = @visibles.index(@highlighted)
move_highlight(
@visibles[0...index].reverse_each.find do |hunk|
hunk.is_a?(Hunks::File) || hunk.is_a?(Hunks::Hunk)
end
)
end
def collapse
return if @highlighted.is_a?(Hunks::Line) || !@highlighted.expanded
@highlighted.expanded = false
update_visibles
redraw
end
def expand
return if @highlighted.is_a?(Hunks::Line) || @highlighted.expanded
@highlighted.expanded = true
update_visibles
@highlighted = @visibles[@visibles.index(@highlighted) + 1]
redraw
end
def toggle_fold
@highlighted.expanded = !@highlighted.expanded
update_visibles
redraw
end
def toggle_selection
@highlighted.selected = !@highlighted.selected
redraw
end
def toggle_all_selections
new_selected = @files[0].selected == false
@files.each{ |file| file.selected = new_selected }
redraw
end
def help_window
HelpWindow.show
refresh
end
end
end
end
Refactor addstr functionality in HunkWindow
require 'curses'
require_relative 'color'
require_relative 'help_window'
require_relative '../git'
require_relative '../quit_action'
module GitCrecord
module UI
class HunksWindow
SELECTED_MAP = {true => 'X', false => ' ', :partly => '~'}.freeze
def initialize(win, files)
@win = win
@files = files
@visibles = @files
@highlighted = @files[0]
@scroll_position = 0
resize
end
def getch
@win.getch
end
def refresh
@win.refresh(scroll_position, 0, 0, 0, Curses.lines - 1, @win.maxx)
end
def redraw
@win.clear
print_list(@files)
refresh
end
def resize
new_width = Curses.cols
new_height = [Curses.lines, height(new_width)].max
return if @win.maxx == new_width && @win.maxy == new_height
@win.resize(new_height, new_width)
redraw
end
def height(width, hunks = @files)
hunks.reduce(0) do |h, entry|
h + \
entry.strings(content_width(entry, width), large: true).size + \
height(width, entry.subs)
end
end
def content_width(entry, width = @win.maxx)
width - entry.x_offset - 5
end
def scroll_position
if @scroll_position + 3 > @highlighted.y1
@scroll_position = @highlighted.y1 - 3
elsif @scroll_position - 4 + Curses.lines <= @highlighted.y2
@scroll_position = [@highlighted.y2 + 4, @win.maxy].min - Curses.lines
end
@scroll_position
end
def move_highlight(to)
return if to == @highlighted || to.nil?
from = @highlighted
@highlighted = to
print_entry(from, from.y1 - 1)
print_entry(to, to.y1 - 1)
refresh
end
def addstr(str, y = nil, x = 0, attr: 0, fill: false)
@win.setpos(y, x) unless y.nil?
@win.attrset(attr)
@win.addstr(str)
fill_size = @win.maxx - @win.curx
return unless fill && fill_size > 0
@win.addstr((fill * fill_size)[0..fill_size])
end
def print_list(list, line_number: -1)
list.each do |entry|
line_number = print_entry(entry, line_number)
next unless entry.expanded
line_number = print_list(entry.subs, line_number: line_number)
end
line_number
end
def print_entry(entry, line_number)
entry.y1 = line_number + 1
prefix = "[#{SELECTED_MAP.fetch(entry.selected)}] "
attr = attrs(entry)
prefix_attr = entry.is_a?(Hunks::File) ? attr : 0
entry.strings(content_width(entry)).each_with_index do |string, index|
prefix = ' ' unless index == 0 && entry.selectable
addstr(prefix, line_number += 1, entry.x_offset, attr: prefix_attr)
addstr(string, attr: attr, fill: ' ')
end
entry.y2 = line_number
end
def attrs(entry)
color = Color.normal
if entry.is_a?(Hunks::Line)
color = Color.green if entry.add?
color = Color.red if entry.del?
end
color = Color.hl if entry == @highlighted
color | (entry.is_a?(Hunks::Line) ? 0 : Curses::A_BOLD)
end
def update_visibles
@visibles = @files.each_with_object([]) do |entry, vs|
vs << entry
next unless entry.expanded
entry.highlightable_subs.each do |entryy|
vs << entryy
vs.concat(entryy.highlightable_subs) if entryy.expanded
end
end
end
def quit
:quit
end
def stage
QuitAction.new{ Git.stage(@files) }
end
def commit
QuitAction.new do
Git.commit if Git.stage(@files) == true
end
end
def highlight_next
move_highlight(@visibles[@visibles.index(@highlighted) + 1])
end
def highlight_previous
move_highlight(@visibles[[@visibles.index(@highlighted) - 1, 0].max])
end
def highlight_first
move_highlight(@visibles[0])
end
def highlight_last
move_highlight(@visibles[-1])
end
def highlight_next_hunk
index = @visibles.index(@highlighted)
move_highlight(
@visibles[(index + 1)..-1].find do |hunk|
hunk.is_a?(Hunks::File) || hunk.is_a?(Hunks::Hunk)
end
)
end
def highlight_previous_hunk
index = @visibles.index(@highlighted)
move_highlight(
@visibles[0...index].reverse_each.find do |hunk|
hunk.is_a?(Hunks::File) || hunk.is_a?(Hunks::Hunk)
end
)
end
def collapse
return if @highlighted.is_a?(Hunks::Line) || !@highlighted.expanded
@highlighted.expanded = false
update_visibles
redraw
end
def expand
return if @highlighted.is_a?(Hunks::Line) || @highlighted.expanded
@highlighted.expanded = true
update_visibles
@highlighted = @visibles[@visibles.index(@highlighted) + 1]
redraw
end
def toggle_fold
@highlighted.expanded = !@highlighted.expanded
update_visibles
redraw
end
def toggle_selection
@highlighted.selected = !@highlighted.selected
redraw
end
def toggle_all_selections
new_selected = @files[0].selected == false
@files.each{ |file| file.selected = new_selected }
redraw
end
def help_window
HelpWindow.show
refresh
end
end
end
end
|
module Godmin
module Helpers
module Translations
extend ActiveSupport::Concern
included do
helper_method :godmin_translate
end
def godmin_translate(translate, scope: nil, default: nil, **options)
if @resource_class
scope ||= @resource_class.to_s.underscore
options[:resource] ||= @resource_class.model_name.human
end
view_context.t(
translation_path(translate, scope),
default: view_context.t(translation_path(translate), default: default, **options),
**options)
end
def translation_path(translate, scope = nil)
["godmin", scope, translate].compact.join(".")
end
end
end
end
Cleanup of translation method :lipstick:
module Godmin
module Helpers
module Translations
extend ActiveSupport::Concern
included do
helper_method :godmin_translate
end
def godmin_translate(translate, scope: nil, default: nil, **options)
if @resource_class
scope ||= @resource_class.to_s.underscore
options[:resource] ||= @resource_class.model_name.human
end
defaults = []
defaults << ["godmin", scope, translate].compact.join(".").to_sym
defaults << ["godmin", translate].join(".").to_sym
defaults << default
view_context.t(defaults.shift, default: defaults, **options)
end
end
end
end
|
# frozen_string_literal: true
module GraphQL
class Schema
class InputObject < GraphQL::Schema::Member
extend GraphQL::Schema::Member::AcceptsDefinition
extend Forwardable
extend GraphQL::Schema::Member::HasArguments
extend GraphQL::Schema::Member::HasArguments::ArgumentObjectLoader
extend GraphQL::Schema::Member::ValidatesInput
extend GraphQL::Schema::Member::HasValidators
include GraphQL::Dig
def initialize(arguments = nil, ruby_kwargs: nil, context:, defaults_used:)
@context = context
if ruby_kwargs
@ruby_style_hash = ruby_kwargs
@arguments = arguments
else
@arguments = self.class.arguments_class.new(arguments, context: context, defaults_used: defaults_used)
# Symbolized, underscored hash:
@ruby_style_hash = @arguments.to_kwargs
end
# Apply prepares, not great to have it duplicated here.
maybe_lazies = []
self.class.arguments.each_value do |arg_defn|
ruby_kwargs_key = arg_defn.keyword
if @ruby_style_hash.key?(ruby_kwargs_key)
loads = arg_defn.loads
# Resolvers do this loading themselves;
# With the interpreter, it's done during `coerce_arguments`
if loads && !arg_defn.from_resolver? && !context.interpreter?
value = @ruby_style_hash[ruby_kwargs_key]
loaded_value = if arg_defn.type.list?
value.map { |val| load_application_object(arg_defn, loads, val, context) }
else
load_application_object(arg_defn, loads, value, context)
end
maybe_lazies << context.schema.after_lazy(loaded_value) do |loaded_value|
overwrite_argument(ruby_kwargs_key, loaded_value)
end
end
# Weirdly, procs are applied during coercion, but not methods.
# Probably because these methods require a `self`.
if arg_defn.prepare.is_a?(Symbol) || context.nil? || !context.interpreter?
prepared_value = arg_defn.prepare_value(self, @ruby_style_hash[ruby_kwargs_key])
overwrite_argument(ruby_kwargs_key, prepared_value)
end
end
end
@maybe_lazies = maybe_lazies
end
# @return [GraphQL::Query::Context] The context for this query
attr_reader :context
# @return [GraphQL::Query::Arguments, GraphQL::Execution::Interpereter::Arguments] The underlying arguments instance
attr_reader :arguments
# Ruby-like hash behaviors, read-only
def_delegators :@ruby_style_hash, :keys, :values, :each, :map, :any?, :empty?
def to_h
@ruby_style_hash.inject({}) do |h, (key, value)|
h.merge(key => unwrap_value(value))
end
end
def to_hash
to_h
end
def prepare
if context
context.schema.after_any_lazies(@maybe_lazies) do
object = context[:current_object]
# Pass this object's class with `as` so that messages are rendered correctly from inherited validators
Schema::Validator.validate!(self.class.validators, object, context, @ruby_style_hash, as: self.class)
self
end
else
self
end
end
def unwrap_value(value)
case value
when Array
value.map { |item| unwrap_value(item) }
when Hash
value.inject({}) do |h, (key, value)|
h.merge(key => unwrap_value(value))
end
when InputObject
value.to_h
else
value
end
end
# Lookup a key on this object, it accepts new-style underscored symbols
# Or old-style camelized identifiers.
# @param key [Symbol, String]
def [](key)
if @ruby_style_hash.key?(key)
@ruby_style_hash[key]
elsif @arguments
@arguments[key]
else
nil
end
end
def key?(key)
@ruby_style_hash.key?(key) || (@arguments && @arguments.key?(key)) || false
end
# A copy of the Ruby-style hash
def to_kwargs
@ruby_style_hash.dup
end
class << self
# @return [Class<GraphQL::Arguments>]
attr_accessor :arguments_class
def argument(*args, **kwargs, &block)
argument_defn = super(*args, **kwargs, &block)
# Add a method access
method_name = argument_defn.keyword
class_eval <<-RUBY, __FILE__, __LINE__
def #{method_name}
self[#{method_name.inspect}]
end
RUBY
end
def to_graphql
type_defn = GraphQL::InputObjectType.new
type_defn.name = graphql_name
type_defn.description = description
type_defn.metadata[:type_class] = self
type_defn.mutation = mutation
type_defn.ast_node = ast_node
arguments.each do |name, arg|
type_defn.arguments[arg.graphql_definition.name] = arg.graphql_definition
end
# Make a reference to a classic-style Arguments class
self.arguments_class = GraphQL::Query::Arguments.construct_arguments_class(type_defn)
# But use this InputObject class at runtime
type_defn.arguments_class = self
type_defn
end
def kind
GraphQL::TypeKinds::INPUT_OBJECT
end
# @api private
INVALID_OBJECT_MESSAGE = "Expected %{object} to be a key-value object responding to `to_h` or `to_unsafe_h`."
def validate_non_null_input(input, ctx)
result = GraphQL::Query::InputValidationResult.new
warden = ctx.warden
if input.is_a?(Array)
result.add_problem(INVALID_OBJECT_MESSAGE % { object: JSON.generate(input, quirks_mode: true) })
return result
end
if !(input.respond_to?(:to_h) || input.respond_to?(:to_unsafe_h))
# We're not sure it'll act like a hash, so reject it:
result.add_problem(INVALID_OBJECT_MESSAGE % { object: JSON.generate(input, quirks_mode: true) })
return result
end
# Inject missing required arguments
missing_required_inputs = self.arguments.reduce({}) do |m, (argument_name, argument)|
if !input.key?(argument_name) && argument.type.non_null? && warden.get_argument(self, argument_name)
m[argument_name] = nil
end
m
end
[input, missing_required_inputs].each do |args_to_validate|
args_to_validate.each do |argument_name, value|
argument = warden.get_argument(self, argument_name)
# Items in the input that are unexpected
unless argument
result.add_problem("Field is not defined on #{self.graphql_name}", [argument_name])
next
end
# Items in the input that are expected, but have invalid values
argument_result = argument.type.validate_input(value, ctx)
result.merge_result!(argument_name, argument_result) unless argument_result.valid?
end
end
result
end
def coerce_input(value, ctx)
if value.nil?
return nil
end
arguments = coerce_arguments(nil, value, ctx)
ctx.schema.after_lazy(arguments) do |resolved_arguments|
if resolved_arguments.is_a?(GraphQL::Error)
raise resolved_arguments
else
input_obj_instance = self.new(resolved_arguments, ruby_kwargs: resolved_arguments.keyword_arguments, context: ctx, defaults_used: nil)
input_obj_instance.prepare
end
end
end
# It's funny to think of a _result_ of an input object.
# This is used for rendering the default value in introspection responses.
def coerce_result(value, ctx)
# Allow the application to provide values as :snake_symbols, and convert them to the camelStrings
value = value.reduce({}) { |memo, (k, v)| memo[Member::BuildType.camelize(k.to_s)] = v; memo }
result = {}
arguments.each do |input_key, input_field_defn|
input_value = value[input_key]
if value.key?(input_key)
result[input_key] = if input_value.nil?
nil
else
input_field_defn.type.coerce_result(input_value, ctx)
end
end
end
result
end
end
private
def overwrite_argument(key, value)
# Argument keywords come in frozen from the interpreter, dup them before modifying them.
if @ruby_style_hash.frozen?
@ruby_style_hash = @ruby_style_hash.dup
end
@ruby_style_hash[key] = value
end
end
end
end
⚡️ Use merge! instead of merge
# frozen_string_literal: true
module GraphQL
class Schema
class InputObject < GraphQL::Schema::Member
extend GraphQL::Schema::Member::AcceptsDefinition
extend Forwardable
extend GraphQL::Schema::Member::HasArguments
extend GraphQL::Schema::Member::HasArguments::ArgumentObjectLoader
extend GraphQL::Schema::Member::ValidatesInput
extend GraphQL::Schema::Member::HasValidators
include GraphQL::Dig
def initialize(arguments = nil, ruby_kwargs: nil, context:, defaults_used:)
@context = context
if ruby_kwargs
@ruby_style_hash = ruby_kwargs
@arguments = arguments
else
@arguments = self.class.arguments_class.new(arguments, context: context, defaults_used: defaults_used)
# Symbolized, underscored hash:
@ruby_style_hash = @arguments.to_kwargs
end
# Apply prepares, not great to have it duplicated here.
maybe_lazies = []
self.class.arguments.each_value do |arg_defn|
ruby_kwargs_key = arg_defn.keyword
if @ruby_style_hash.key?(ruby_kwargs_key)
loads = arg_defn.loads
# Resolvers do this loading themselves;
# With the interpreter, it's done during `coerce_arguments`
if loads && !arg_defn.from_resolver? && !context.interpreter?
value = @ruby_style_hash[ruby_kwargs_key]
loaded_value = if arg_defn.type.list?
value.map { |val| load_application_object(arg_defn, loads, val, context) }
else
load_application_object(arg_defn, loads, value, context)
end
maybe_lazies << context.schema.after_lazy(loaded_value) do |loaded_value|
overwrite_argument(ruby_kwargs_key, loaded_value)
end
end
# Weirdly, procs are applied during coercion, but not methods.
# Probably because these methods require a `self`.
if arg_defn.prepare.is_a?(Symbol) || context.nil? || !context.interpreter?
prepared_value = arg_defn.prepare_value(self, @ruby_style_hash[ruby_kwargs_key])
overwrite_argument(ruby_kwargs_key, prepared_value)
end
end
end
@maybe_lazies = maybe_lazies
end
# @return [GraphQL::Query::Context] The context for this query
attr_reader :context
# @return [GraphQL::Query::Arguments, GraphQL::Execution::Interpereter::Arguments] The underlying arguments instance
attr_reader :arguments
# Ruby-like hash behaviors, read-only
def_delegators :@ruby_style_hash, :keys, :values, :each, :map, :any?, :empty?
def to_h
@ruby_style_hash.reduce({}) do |h, (key, value)|
h.merge!(key => unwrap_value(value))
end
end
def to_hash
to_h
end
def prepare
if context
context.schema.after_any_lazies(@maybe_lazies) do
object = context[:current_object]
# Pass this object's class with `as` so that messages are rendered correctly from inherited validators
Schema::Validator.validate!(self.class.validators, object, context, @ruby_style_hash, as: self.class)
self
end
else
self
end
end
def unwrap_value(value)
case value
when Array
value.map { |item| unwrap_value(item) }
when Hash
value.reduce({}) do |h, (key, value)|
h.merge!(key => unwrap_value(value))
end
when InputObject
value.to_h
else
value
end
end
# Lookup a key on this object, it accepts new-style underscored symbols
# Or old-style camelized identifiers.
# @param key [Symbol, String]
def [](key)
if @ruby_style_hash.key?(key)
@ruby_style_hash[key]
elsif @arguments
@arguments[key]
else
nil
end
end
def key?(key)
@ruby_style_hash.key?(key) || (@arguments && @arguments.key?(key)) || false
end
# A copy of the Ruby-style hash
def to_kwargs
@ruby_style_hash.dup
end
class << self
# @return [Class<GraphQL::Arguments>]
attr_accessor :arguments_class
def argument(*args, **kwargs, &block)
argument_defn = super(*args, **kwargs, &block)
# Add a method access
method_name = argument_defn.keyword
class_eval <<-RUBY, __FILE__, __LINE__
def #{method_name}
self[#{method_name.inspect}]
end
RUBY
end
def to_graphql
type_defn = GraphQL::InputObjectType.new
type_defn.name = graphql_name
type_defn.description = description
type_defn.metadata[:type_class] = self
type_defn.mutation = mutation
type_defn.ast_node = ast_node
arguments.each do |name, arg|
type_defn.arguments[arg.graphql_definition.name] = arg.graphql_definition
end
# Make a reference to a classic-style Arguments class
self.arguments_class = GraphQL::Query::Arguments.construct_arguments_class(type_defn)
# But use this InputObject class at runtime
type_defn.arguments_class = self
type_defn
end
def kind
GraphQL::TypeKinds::INPUT_OBJECT
end
# @api private
INVALID_OBJECT_MESSAGE = "Expected %{object} to be a key-value object responding to `to_h` or `to_unsafe_h`."
def validate_non_null_input(input, ctx)
result = GraphQL::Query::InputValidationResult.new
warden = ctx.warden
if input.is_a?(Array)
result.add_problem(INVALID_OBJECT_MESSAGE % { object: JSON.generate(input, quirks_mode: true) })
return result
end
if !(input.respond_to?(:to_h) || input.respond_to?(:to_unsafe_h))
# We're not sure it'll act like a hash, so reject it:
result.add_problem(INVALID_OBJECT_MESSAGE % { object: JSON.generate(input, quirks_mode: true) })
return result
end
# Inject missing required arguments
missing_required_inputs = self.arguments.reduce({}) do |m, (argument_name, argument)|
if !input.key?(argument_name) && argument.type.non_null? && warden.get_argument(self, argument_name)
m[argument_name] = nil
end
m
end
[input, missing_required_inputs].each do |args_to_validate|
args_to_validate.each do |argument_name, value|
argument = warden.get_argument(self, argument_name)
# Items in the input that are unexpected
unless argument
result.add_problem("Field is not defined on #{self.graphql_name}", [argument_name])
next
end
# Items in the input that are expected, but have invalid values
argument_result = argument.type.validate_input(value, ctx)
result.merge_result!(argument_name, argument_result) unless argument_result.valid?
end
end
result
end
def coerce_input(value, ctx)
if value.nil?
return nil
end
arguments = coerce_arguments(nil, value, ctx)
ctx.schema.after_lazy(arguments) do |resolved_arguments|
if resolved_arguments.is_a?(GraphQL::Error)
raise resolved_arguments
else
input_obj_instance = self.new(resolved_arguments, ruby_kwargs: resolved_arguments.keyword_arguments, context: ctx, defaults_used: nil)
input_obj_instance.prepare
end
end
end
# It's funny to think of a _result_ of an input object.
# This is used for rendering the default value in introspection responses.
def coerce_result(value, ctx)
# Allow the application to provide values as :snake_symbols, and convert them to the camelStrings
value = value.reduce({}) { |memo, (k, v)| memo[Member::BuildType.camelize(k.to_s)] = v; memo }
result = {}
arguments.each do |input_key, input_field_defn|
input_value = value[input_key]
if value.key?(input_key)
result[input_key] = if input_value.nil?
nil
else
input_field_defn.type.coerce_result(input_value, ctx)
end
end
end
result
end
end
private
def overwrite_argument(key, value)
# Argument keywords come in frozen from the interpreter, dup them before modifying them.
if @ruby_style_hash.frozen?
@ruby_style_hash = @ruby_style_hash.dup
end
@ruby_style_hash[key] = value
end
end
end
end
|
#!/usr/bin/env ruby
# encoding: UTF-8
#
# Copyright 2012-2014, Continuuity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'net/scp'
class ShellAutomator < Automator
attr_accessor :credentials, :scripts_dir, :scripts_tar, :remote_cache_dir
def initialize(env, task)
super(env, task)
work_dir = @env[:work_dir]
tenant = @env[:tenant]
# local and remote top-level script directory name
@scripts_parent_dir = "scripts"
# local scripts dir
#@scripts_dir = "#{File.expand_path(File.dirname(__FILE__))}/#{@scripts_parent_dir}"
@scripts_dir = %W[ #{work_dir} #{tenant} automatortypes shell #{@scripts_parent_dir} ].join('/')
# name of tarball to generate
#@scripts_tar = "#{File.expand_path(File.dirname(__FILE__))}/scripts.tar.gz"
@scripts_tar = %W[ #{work_dir} #{tenant} automatortypes shell scripts.tar.gz ].join('/')
# remote storage directory
@remote_cache_dir = "/var/cache/loom"
# remote script location to be exported in $PATH
@remote_scripts_dir = "#{@remote_cache_dir}/#{@scripts_parent_dir}"
# loom wrapper for common functions
@wrapper_script = "#{@remote_scripts_dir}/.lib/loom_wrapper.sh"
end
def generate_scripts_tar
if !File.exist?(@scripts_tar) or ((Time.now - File.stat(@scripts_tar).mtime).to_i > 600)
log.debug "Generating #{@scripts_tar} from #{@scripts_dir}"
scripts_tar_path = File.dirname(@scripts_dir)
scripts_parent_dir = File.basename(@scripts_dir)
`tar -cLzf "#{@scripts_tar}.new" -C "#{scripts_tar_path}" #{scripts_parent_dir}`
`mv "#{@scripts_tar}.new" "#{@scripts_tar}"`
log.debug "Generation complete: #{@scripts_tar}"
end
end
def set_credentials(sshauth)
@credentials = Hash.new
@credentials[:paranoid] = false
sshauth.each do |k, v|
if (k =~ /identityfile/)
@credentials[:keys] = [ v ]
elsif (k =~ /password/)
@credentials[:password] = v
end
end
end
def runshell(inputmap)
sshauth = inputmap['sshauth']
ipaddress = inputmap['ipaddress']
fields = inputmap['fields']
raise "required parameter \"script\" not found in input: #{fields}" if fields['script'].nil?
shellscript = fields['script']
shellargs = fields['args']
# do we need sudo bash?
sudo = 'sudo' unless sshauth['user'] == 'root'
set_credentials(sshauth)
jsondata = JSON.generate(task)
# copy the task json data to the cache dir on the remote machine
begin
# write json task data to a local tmp file
tmpjson = Tempfile.new("loom")
tmpjson.write(jsondata)
tmpjson.close
# scp task.json to remote
log.debug "Copying json attributes to remote"
begin
Net::SCP.upload!(ipaddress, sshauth['user'], tmpjson.path, "#{@remote_cache_dir}/#{@task['taskId']}.json", :ssh =>
@credentials)
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
log.debug "Copy json attributes complete"
ensure
tmpjson.close
tmpjson.unlink
end
# execute the defined shell script
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh,
"cd #{@remote_scripts_dir}; export PATH=$PATH:#{@remote_scripts_dir}; #{sudo} #{@wrapper_script} #{@remote_cache_dir}/#{@task['taskId']}.json #{shellscript} #{shellargs}",
"Running shell command #{shellscript} #{shellargs}")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
@result['status'] = 0
log.debug "Result of shell command: #{@result}"
@result
end
def bootstrap(inputmap)
sshauth = inputmap['sshauth']
ipaddress = inputmap['ipaddress']
# do we need sudo bash?
sudo = 'sudo' unless sshauth['user'] == 'root'
set_credentials(sshauth)
generate_scripts_tar()
# check to ensure scp is installed and attempt to install it
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
log.debug "Checking for scp installation"
begin
ssh_exec!(ssh, "which scp", "Checking for scp")
rescue CommandExecutionException
log.warn "scp not found, attempting to install openssh-client"
scp_install_cmd = "#{sudo} yum -qy install openssh-clients"
begin
ssh_exec!(ssh, "which yum", "Checking for yum")
rescue CommandExecutionException
scp_install_cmd = "#{sudo} apt-get -qy install openssh-client"
end
ssh_exec!(ssh, scp_install_cmd, "installing openssh-client via #{scp_install_cmd}")
else
log.debug "scp found on remote"
end
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh, "#{sudo} mkdir -p #{@remote_cache_dir}", "Creating remote cache dir")
ssh_exec!(ssh, "#{sudo} chown -R #{sshauth['user']} #{@remote_cache_dir}", "Changing cache dir owner to #{sshauth['user']}")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
log.debug "ShellAutomator bootstrap uploading scripts to #{ipaddress}"
# scp tarball to target machine
begin
Net::SCP.upload!(ipaddress, sshauth['user'], "#{@scripts_tar}", "#{@remote_cache_dir}/scripts.tar.gz", :ssh =>
@credentials, :verbose => true)
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
# extract scripts tarball on remote machine
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh, "tar xf #{@remote_cache_dir}/scripts.tar.gz -C #{@remote_cache_dir}", "Extract remote #{@remote_cache_dir}/scripts.tar.gz")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
@result['status'] = 0
log.info "ShellAutomator bootstrap completed successfully: #{@result}"
end
def install(inputmap)
runshell(inputmap)
end
def configure(inputmap)
runshell(inputmap)
end
def init(inputmap)
runshell(inputmap)
end
def start(inputmap)
runshell(inputmap)
end
def stop(inputmap)
runshell(inputmap)
end
def remove(inputmap)
runshell(inputmap)
end
end
removing commented code
#!/usr/bin/env ruby
# encoding: UTF-8
#
# Copyright 2012-2014, Continuuity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'net/scp'
class ShellAutomator < Automator
attr_accessor :credentials, :scripts_dir, :scripts_tar, :remote_cache_dir
def initialize(env, task)
super(env, task)
work_dir = @env[:work_dir]
tenant = @env[:tenant]
# local and remote top-level script directory name
@scripts_parent_dir = "scripts"
# local scripts dir
@scripts_dir = %W[ #{work_dir} #{tenant} automatortypes shell #{@scripts_parent_dir} ].join('/')
# name of tarball to generate
@scripts_tar = %W[ #{work_dir} #{tenant} automatortypes shell scripts.tar.gz ].join('/')
# remote storage directory
@remote_cache_dir = "/var/cache/loom"
# remote script location to be exported in $PATH
@remote_scripts_dir = "#{@remote_cache_dir}/#{@scripts_parent_dir}"
# loom wrapper for common functions
@wrapper_script = "#{@remote_scripts_dir}/.lib/loom_wrapper.sh"
end
def generate_scripts_tar
if !File.exist?(@scripts_tar) or ((Time.now - File.stat(@scripts_tar).mtime).to_i > 600)
log.debug "Generating #{@scripts_tar} from #{@scripts_dir}"
scripts_tar_path = File.dirname(@scripts_dir)
scripts_parent_dir = File.basename(@scripts_dir)
`tar -cLzf "#{@scripts_tar}.new" -C "#{scripts_tar_path}" #{scripts_parent_dir}`
`mv "#{@scripts_tar}.new" "#{@scripts_tar}"`
log.debug "Generation complete: #{@scripts_tar}"
end
end
def set_credentials(sshauth)
@credentials = Hash.new
@credentials[:paranoid] = false
sshauth.each do |k, v|
if (k =~ /identityfile/)
@credentials[:keys] = [ v ]
elsif (k =~ /password/)
@credentials[:password] = v
end
end
end
def runshell(inputmap)
sshauth = inputmap['sshauth']
ipaddress = inputmap['ipaddress']
fields = inputmap['fields']
raise "required parameter \"script\" not found in input: #{fields}" if fields['script'].nil?
shellscript = fields['script']
shellargs = fields['args']
# do we need sudo bash?
sudo = 'sudo' unless sshauth['user'] == 'root'
set_credentials(sshauth)
jsondata = JSON.generate(task)
# copy the task json data to the cache dir on the remote machine
begin
# write json task data to a local tmp file
tmpjson = Tempfile.new("loom")
tmpjson.write(jsondata)
tmpjson.close
# scp task.json to remote
log.debug "Copying json attributes to remote"
begin
Net::SCP.upload!(ipaddress, sshauth['user'], tmpjson.path, "#{@remote_cache_dir}/#{@task['taskId']}.json", :ssh =>
@credentials)
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
log.debug "Copy json attributes complete"
ensure
tmpjson.close
tmpjson.unlink
end
# execute the defined shell script
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh,
"cd #{@remote_scripts_dir}; export PATH=$PATH:#{@remote_scripts_dir}; #{sudo} #{@wrapper_script} #{@remote_cache_dir}/#{@task['taskId']}.json #{shellscript} #{shellargs}",
"Running shell command #{shellscript} #{shellargs}")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
@result['status'] = 0
log.debug "Result of shell command: #{@result}"
@result
end
def bootstrap(inputmap)
sshauth = inputmap['sshauth']
ipaddress = inputmap['ipaddress']
# do we need sudo bash?
sudo = 'sudo' unless sshauth['user'] == 'root'
set_credentials(sshauth)
generate_scripts_tar()
# check to ensure scp is installed and attempt to install it
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
log.debug "Checking for scp installation"
begin
ssh_exec!(ssh, "which scp", "Checking for scp")
rescue CommandExecutionException
log.warn "scp not found, attempting to install openssh-client"
scp_install_cmd = "#{sudo} yum -qy install openssh-clients"
begin
ssh_exec!(ssh, "which yum", "Checking for yum")
rescue CommandExecutionException
scp_install_cmd = "#{sudo} apt-get -qy install openssh-client"
end
ssh_exec!(ssh, scp_install_cmd, "installing openssh-client via #{scp_install_cmd}")
else
log.debug "scp found on remote"
end
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh, "#{sudo} mkdir -p #{@remote_cache_dir}", "Creating remote cache dir")
ssh_exec!(ssh, "#{sudo} chown -R #{sshauth['user']} #{@remote_cache_dir}", "Changing cache dir owner to #{sshauth['user']}")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
log.debug "ShellAutomator bootstrap uploading scripts to #{ipaddress}"
# scp tarball to target machine
begin
Net::SCP.upload!(ipaddress, sshauth['user'], "#{@scripts_tar}", "#{@remote_cache_dir}/scripts.tar.gz", :ssh =>
@credentials, :verbose => true)
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
# extract scripts tarball on remote machine
begin
Net::SSH.start(ipaddress, sshauth['user'], @credentials) do |ssh|
ssh_exec!(ssh, "tar xf #{@remote_cache_dir}/scripts.tar.gz -C #{@remote_cache_dir}", "Extract remote #{@remote_cache_dir}/scripts.tar.gz")
end
rescue Net::SSH::AuthenticationFailed
raise $!, "SSH Authentication failure for #{ipaddress}: #{$!}", $!.backtrace
end
@result['status'] = 0
log.info "ShellAutomator bootstrap completed successfully: #{@result}"
end
def install(inputmap)
runshell(inputmap)
end
def configure(inputmap)
runshell(inputmap)
end
def init(inputmap)
runshell(inputmap)
end
def start(inputmap)
runshell(inputmap)
end
def stop(inputmap)
runshell(inputmap)
end
def remove(inputmap)
runshell(inputmap)
end
end
|
module Wice
module JsCalendarHelpers
include ActionView::Helpers::AssetTagHelper
include ActionView::Helpers::TagHelper
include ActionView::Helpers::JavaScriptHelper
include ActionView::Helpers::FormTagHelper
# Jquery
def date_calendar_jquery(initial_date, opts = {}, html_opts = {}) #:nodoc:
date_format = Wice::Defaults::DATE_FORMAT
options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id =
prepare_data_for_calendar(opts, date_format, initial_date)
remove_date_function = %! $('##{date_span_id}').html(''); $('##{dom_id}')[0].value = ''; !
date_picker =
hidden_field_tag(name, date_string, :id => dom_id) + ' ' +
link_to_function(
content_tag(:span, date_string, :id => date_span_id),
remove_date_function,
:class => 'date_label',
:title => ::Wice::WiceGridNlMessageProvider.get_message(:DATE_STRING_TOOLTIP))
html = "<span id=\"#{datepicker_placeholder_id}\">#{date_picker}</span>"
javascript = calendar_constructor_jquery(dom_id, Wice::Defaults::DATE_FORMAT_JQUERY,
date_span_id, opts[:fire_event], html_opts[:title], datepicker_placeholder_id)
[html, javascript]
end
# Prototype
def date_calendar_prototype(initial_date, opts = {}, html_opts = {}) #:nodoc:
select_date_datetime_common_prototype(initial_date, opts, html_opts, false, Wice::Defaults::DATE_FORMAT)
end
def datetime_calendar_prototype(initial_date, opts = {}, html_opts = {}) #:nodoc:
select_date_datetime_common_prototype(initial_date, opts, html_opts, true, Wice::Defaults::DATETIME_FORMAT)
end
protected
# common
def prepare_data_for_calendar(opts, date_format, initial_date) #:nodoc:
options = {:prefix => 'date'}
options.merge!(opts)
name = options[:prefix]
date_string = initial_date.nil? ? '' : initial_date.strftime(date_format)
dom_id = options[:id] || name.gsub(/([\[\(])|(\]\[)/, '_').gsub(/[\]\)]/, '').gsub(/\./, '_').gsub(/_+/, '_')
datepicker_placeholder_id = dom_id + '_date_placeholder'
date_span_id = dom_id + '_date_view'
return options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id
end
# jquery
def calendar_constructor_jquery(dom_id, date_format, date_span_id, fireEvent, title, datepicker_placeholder_id)
javascript = %| $( "##{dom_id}" ).datepicker({\n|
javascript << %| firstDay: 1,\n|
javascript << %| showOn: "button",\n|
javascript << %| dateFormat: "#{date_format}",\n|
javascript << %| buttonImage: "#{::Wice::Defaults::CALENDAR_ICON}",\n|
javascript << %| buttonImageOnly: true,\n|
javascript << %| buttonText: "#{title}",\n|
javascript << %| onSelect: function(dateText, inst) {\n|
javascript << %| $("##{date_span_id}").html(dateText);\n|
if fireEvent
javascript << %| $("##{dom_id}").trigger('wg:calendarChanged');\n|
end
javascript << %| }\n|
javascript << %| });\n|
lang = Object.const_defined?(:I18n) ? I18n.locale : nil
if lang
javascript << %| if ($.datepicker.regional['#{lang}']){\n|
javascript << %| var regionalOptions = $.datepicker.regional['#{lang}'] ;\n|
javascript << %| delete regionalOptions.dateFormat ;\n|
javascript << %| delete regionalOptions.firstDate ;\n|
javascript << %| $( "##{dom_id}" ).datepicker( "option", $.datepicker.regional['#{lang}'] );\n|
javascript << %| }\n|
end
javascript += %| $('##{datepicker_placeholder_id} .ui-datepicker-trigger').addClass('clickable');\n|
javascript
end
# prortotype
def calendar_constructor_prototype(popup_trigger_icon_id, dom_id, date_format, date_span_id, with_time, fireEvent)
javascript = ''
unless @_wg_date_picker_language_initialized
lang = Object.const_defined?(:I18n) ? I18n.locale : nil
javascript << %| Calendar.language = '#{lang}';\n| unless lang.blank?
@_wg_date_picker_language_initialized = true
end
javascript << %| new Calendar({\n |
javascript << %| popupTriggerElement : "#{popup_trigger_icon_id}",\n |
javascript << %| initialDate : $('#{dom_id}').value,\n |
if fireEvent
javascript << %| onHideCallback : function(){Event.fire($(#{dom_id}), 'wg:calendarChanged')},\n |
end
javascript << %| dateFormat : "#{date_format}",\n|
unless Wice::Defaults::POPUP_PLACEMENT_STRATEGY == :trigger
javascript << %| popupPositioningStrategy : "#{Wice::Defaults::POPUP_PLACEMENT_STRATEGY}",\n|
end
if with_time
javascript << %| withTime : true,\n|
end
javascript << %| outputFields : $A(['#{date_span_id}', '#{dom_id}'])\n |
javascript << %| });\n|
javascript
end
def select_date_datetime_common_prototype(initial_date, opts, html_opts, with_time, date_format) #:nodoc:
options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id =
prepare_data_for_calendar(opts, date_format, initial_date)
popup_trigger_icon_id = dom_id + '_trigger'
function = %! $('#{date_span_id}').innerHTML = ''; $('#{dom_id}').value = ''; !
if opts[:fire_event]
function += "Event.fire($(#{dom_id}), 'wg:calendarChanged')"
end
date_picker = image_tag(Defaults::CALENDAR_ICON,
:id => popup_trigger_icon_id,
:class => 'clickable',
:title => html_opts[:title]) +
link_to_function(
content_tag(:span, date_string, :id => date_span_id),
function,
:class => 'date_label',
:title => WiceGridNlMessageProvider.get_message(:DATE_STRING_TOOLTIP)) + ' ' +
hidden_field_tag(name, date_string, :class => 'text-input', :id => dom_id)
html = "<span id=\"#{datepicker_placeholder_id}\">#{date_picker}</span>"
javascript = calendar_constructor_prototype(popup_trigger_icon_id, dom_id, date_format, date_span_id, with_time, opts[:fire_event])
[html, javascript]
end
end
end
adding month and year dropdowns to datepickers
module Wice
module JsCalendarHelpers
include ActionView::Helpers::AssetTagHelper
include ActionView::Helpers::TagHelper
include ActionView::Helpers::JavaScriptHelper
include ActionView::Helpers::FormTagHelper
# Jquery
def date_calendar_jquery(initial_date, opts = {}, html_opts = {}) #:nodoc:
date_format = Wice::Defaults::DATE_FORMAT
options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id =
prepare_data_for_calendar(opts, date_format, initial_date)
remove_date_function = %! $('##{date_span_id}').html(''); $('##{dom_id}')[0].value = ''; !
date_picker =
hidden_field_tag(name, date_string, :id => dom_id) + ' ' +
link_to_function(
content_tag(:span, date_string, :id => date_span_id),
remove_date_function,
:class => 'date_label',
:title => ::Wice::WiceGridNlMessageProvider.get_message(:DATE_STRING_TOOLTIP))
html = "<span id=\"#{datepicker_placeholder_id}\">#{date_picker}</span>"
javascript = calendar_constructor_jquery(dom_id, Wice::Defaults::DATE_FORMAT_JQUERY,
date_span_id, opts[:fire_event], html_opts[:title], datepicker_placeholder_id)
[html, javascript]
end
# Prototype
def date_calendar_prototype(initial_date, opts = {}, html_opts = {}) #:nodoc:
select_date_datetime_common_prototype(initial_date, opts, html_opts, false, Wice::Defaults::DATE_FORMAT)
end
def datetime_calendar_prototype(initial_date, opts = {}, html_opts = {}) #:nodoc:
select_date_datetime_common_prototype(initial_date, opts, html_opts, true, Wice::Defaults::DATETIME_FORMAT)
end
protected
# common
def prepare_data_for_calendar(opts, date_format, initial_date) #:nodoc:
options = {:prefix => 'date'}
options.merge!(opts)
name = options[:prefix]
date_string = initial_date.nil? ? '' : initial_date.strftime(date_format)
dom_id = options[:id] || name.gsub(/([\[\(])|(\]\[)/, '_').gsub(/[\]\)]/, '').gsub(/\./, '_').gsub(/_+/, '_')
datepicker_placeholder_id = dom_id + '_date_placeholder'
date_span_id = dom_id + '_date_view'
return options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id
end
# jquery
def calendar_constructor_jquery(dom_id, date_format, date_span_id, fireEvent, title, datepicker_placeholder_id)
javascript = %| $( "##{dom_id}" ).datepicker({\n|
javascript << %| firstDay: 1,\n|
javascript << %| showOn: "button",\n|
javascript << %| dateFormat: "#{date_format}",\n|
javascript << %| buttonImage: "#{::Wice::Defaults::CALENDAR_ICON}",\n|
javascript << %| buttonImageOnly: true,\n|
javascript << %| buttonText: "#{title}",\n|
javascript << %| changeMonth: true,\n|
javascript << %| changeYear: true,\n|
javascript << %| onSelect: function(dateText, inst) {\n|
javascript << %| $("##{date_span_id}").html(dateText);\n|
if fireEvent
javascript << %| $("##{dom_id}").trigger('wg:calendarChanged');\n|
end
javascript << %| }\n|
javascript << %| });\n|
lang = Object.const_defined?(:I18n) ? I18n.locale : nil
if lang
javascript << %| if ($.datepicker.regional['#{lang}']){\n|
javascript << %| var regionalOptions = $.datepicker.regional['#{lang}'] ;\n|
javascript << %| delete regionalOptions.dateFormat ;\n|
javascript << %| delete regionalOptions.firstDate ;\n|
javascript << %| $( "##{dom_id}" ).datepicker( "option", $.datepicker.regional['#{lang}'] );\n|
javascript << %| }\n|
end
javascript += %| $('##{datepicker_placeholder_id} .ui-datepicker-trigger').addClass('clickable');\n|
javascript
end
# prortotype
def calendar_constructor_prototype(popup_trigger_icon_id, dom_id, date_format, date_span_id, with_time, fireEvent)
javascript = ''
unless @_wg_date_picker_language_initialized
lang = Object.const_defined?(:I18n) ? I18n.locale : nil
javascript << %| Calendar.language = '#{lang}';\n| unless lang.blank?
@_wg_date_picker_language_initialized = true
end
javascript << %| new Calendar({\n |
javascript << %| popupTriggerElement : "#{popup_trigger_icon_id}",\n |
javascript << %| initialDate : $('#{dom_id}').value,\n |
if fireEvent
javascript << %| onHideCallback : function(){Event.fire($(#{dom_id}), 'wg:calendarChanged')},\n |
end
javascript << %| dateFormat : "#{date_format}",\n|
unless Wice::Defaults::POPUP_PLACEMENT_STRATEGY == :trigger
javascript << %| popupPositioningStrategy : "#{Wice::Defaults::POPUP_PLACEMENT_STRATEGY}",\n|
end
if with_time
javascript << %| withTime : true,\n|
end
javascript << %| outputFields : $A(['#{date_span_id}', '#{dom_id}'])\n |
javascript << %| });\n|
javascript
end
def select_date_datetime_common_prototype(initial_date, opts, html_opts, with_time, date_format) #:nodoc:
options, name, date_string, dom_id, datepicker_placeholder_id, date_span_id =
prepare_data_for_calendar(opts, date_format, initial_date)
popup_trigger_icon_id = dom_id + '_trigger'
function = %! $('#{date_span_id}').innerHTML = ''; $('#{dom_id}').value = ''; !
if opts[:fire_event]
function += "Event.fire($(#{dom_id}), 'wg:calendarChanged')"
end
date_picker = image_tag(Defaults::CALENDAR_ICON,
:id => popup_trigger_icon_id,
:class => 'clickable',
:title => html_opts[:title]) +
link_to_function(
content_tag(:span, date_string, :id => date_span_id),
function,
:class => 'date_label',
:title => WiceGridNlMessageProvider.get_message(:DATE_STRING_TOOLTIP)) + ' ' +
hidden_field_tag(name, date_string, :class => 'text-input', :id => dom_id)
html = "<span id=\"#{datepicker_placeholder_id}\">#{date_picker}</span>"
javascript = calendar_constructor_prototype(popup_trigger_icon_id, dom_id, date_format, date_span_id, with_time, opts[:fire_event])
[html, javascript]
end
end
end
|
module Importers
class ProfilesImporter < BaseImporter
JOB_LEVEL_CODES = {
'C'=> 'councillor',
'D' => 'director',
'E' => 'temporary_worker'
}
def import!
each_row(col_sep: ";") do |row|
person = Person.new
if row[:n_personal].present?
person = Person.where(councillor_code: row[:n_personal]).first!
else
person.first_name = row[:nombre]
person.last_name = row[:apellidos]
person.admin_first_name = transliterate(row[:nombre])
person.admin_last_name = transliterate(row[:apellidos])
person.role = row[:cargo]
person.job_level = JOB_LEVEL_CODES[row[:codigo_cargo]]
# councillors should have a personal code and be managed on the other side of this if/else
raise person if person.job_level == 'councillor'
end
profiled_at = DateTime.parse(row[:fecha])
if person.profiled_at.blank? || person.profiled_at < profiled_at
person.profiled_at = profiled_at
puts "Importing profile for #{person.name}"
person.twitter = row[:cuenta_de_twitter]
person.facebook = row[:cuenta_de_facebook]
person.unit = row[:unidad]
parse_studies(person, row)
parse_courses(person, row)
parse_languages(person, row)
parse_career(person, row)
parse_political_posts(person, row)
person.publications = row[:publicaciones]
person.teaching_activity = row[:actividad]
person.special_mentions = row[:distinciones]
person.other = row[:otra_informacion]
person.save
end
end
end
private
def parse_studies(person, row)
person.profile['studies'] = []
(1..4).each do |index|
col = row.index("#{index}_titulacion_oficial".to_sym)
person.add_study(row[col], row[col+1], row[col+2], row[col+3])
end
studies_comment_col = row.index(:"4_titulacion_oficial")+4
person.studies_comment = row[studies_comment_col]
end
def parse_courses(person, row)
person.profile['courses'] = []
(1..4).each do |index|
col = row.index("#{index}_nombre_del_curso".to_sym)
person.add_course(row[col], row[col+1], row[col+2], row[col+3])
end
courses_comment_col = row.index(:"4_nombre_del_curso")+4
person.courses_comment = row[courses_comment_col]
end
def parse_languages(person, row)
person.profile['languages'] = []
person.add_language('Inglés', row[:ingles]) if row[:ingles].present?
person.add_language('Francés', row[:frances]) if row[:frances].present?
person.add_language('Alemán', row[:aleman]) if row[:aleman].present?
person.add_language('Italiano', row[:italiano]) if row[:italiano].present?
person.add_language(row[:otro_idioma], row[:nivel_otro_idioma])
end
def parse_career(person, row)
person.profile['public_jobs'] = []
(1..4).each do |index|
col = row.index("#{index}_puesto_desempenado".to_sym)
person.add_public_job(row[col], row[col+1], row[col+2], row[col+3])
end
person.profile['private_jobs'] = []
(1..4).each do |index|
col = row.index("#{index}_cargoactividad".to_sym)
person.add_private_job(row[col], row[col+1], row[col+2], row[col+3])
end
career_comment_col = row.index(:"4_cargoactividad")+4
person.career_comment = row[career_comment_col]
person.public_jobs_level = row[:grado_consolidado]
person.public_jobs_body = row[:cuerpo_o_escala_de_la_administracion]
person.public_jobs_start_year = row[:ano_de_ingreso]
end
def parse_political_posts(person, row)
person.profile['political_posts'] = []
(1..4).each do |index|
col = row.index("#{index}_cargo".to_sym)
person.add_political_post(row[col], row[col+1], row[col+2], row[col+3])
end
political_posts_comment_col = row.index(:"4_cargo")+4
person.political_posts_comment = row[political_posts_comment_col]
end
end
end
Updates profiles, non-profiles and calendars
module Importers
class ProfilesImporter < BaseImporter
JOB_LEVEL_CODES = {
'C'=> 'councillor',
'D' => 'director',
'E' => 'temporary_worker'
}
def import!
each_row(col_sep: ",") do |row|
person = Person.new
if row[:n_personal].present?
person = Person.where(councillor_code: row[:n_personal]).first!
else
person.first_name = row[:nombre]
person.last_name = row[:apellidos]
person.admin_first_name = transliterate(row[:nombre])
person.admin_last_name = transliterate(row[:apellidos])
person.role = row[:cargo]
person.job_level = JOB_LEVEL_CODES[row[:codigo_cargo]]
# councillors should have a personal code and be managed on the other side of this if/else
raise person if person.job_level == 'councillor'
end
profiled_at = DateTime.parse(row[:fecha])
if person.profiled_at.blank? || person.profiled_at < profiled_at
person.profiled_at = profiled_at
puts "Importing profile for #{person.name}"
person.twitter = row[:cuenta_de_twitter]
person.facebook = row[:cuenta_de_facebook]
person.unit = row[:unidad]
parse_studies(person, row)
parse_courses(person, row)
parse_languages(person, row)
parse_career(person, row)
parse_political_posts(person, row)
person.publications = row[:publicaciones]
person.teaching_activity = row[:actividad]
person.special_mentions = row[:distinciones]
person.other = row[:otra_informacion]
person.save
end
end
end
private
def parse_studies(person, row)
person.profile['studies'] = []
(1..4).each do |index|
col = row.index("#{index}_titulacion_oficial".to_sym)
person.add_study(row[col], row[col+1], row[col+2], row[col+3])
end
studies_comment_col = row.index(:"4_titulacion_oficial")+4
person.studies_comment = row[studies_comment_col]
end
def parse_courses(person, row)
person.profile['courses'] = []
(1..4).each do |index|
col = row.index("#{index}_nombre_del_curso".to_sym)
person.add_course(row[col], row[col+1], row[col+2], row[col+3])
end
courses_comment_col = row.index(:"4_nombre_del_curso")+4
person.courses_comment = row[courses_comment_col]
end
def parse_languages(person, row)
person.profile['languages'] = []
person.add_language('Inglés', row[:ingles]) if row[:ingles].present?
person.add_language('Francés', row[:frances]) if row[:frances].present?
person.add_language('Alemán', row[:aleman]) if row[:aleman].present?
person.add_language('Italiano', row[:italiano]) if row[:italiano].present?
person.add_language(row[:otro_idioma], row[:nivel_otro_idioma])
end
def parse_career(person, row)
person.profile['public_jobs'] = []
(1..4).each do |index|
col = row.index("#{index}_puesto_desempenado".to_sym)
person.add_public_job(row[col], row[col+1], row[col+2], row[col+3])
end
person.profile['private_jobs'] = []
(1..4).each do |index|
col = row.index("#{index}_cargoactividad".to_sym)
person.add_private_job(row[col], row[col+1], row[col+2], row[col+3])
end
career_comment_col = row.index(:"4_cargoactividad")+4
person.career_comment = row[career_comment_col]
person.public_jobs_level = row[:grado_consolidado]
person.public_jobs_body = row[:cuerpo_o_escala_de_la_administracion]
person.public_jobs_start_year = row[:ano_de_ingreso]
end
def parse_political_posts(person, row)
person.profile['political_posts'] = []
(1..4).each do |index|
col = row.index("#{index}_cargo".to_sym)
person.add_political_post(row[col], row[col+1], row[col+2], row[col+3])
end
political_posts_comment_col = row.index(:"4_cargo")+4
person.political_posts_comment = row[political_posts_comment_col]
end
end
end
|
# vim: fileencoding=utf-8
require_relative 'base'
module LogArchiver
module Plugin
# バージョンを返す
class UserInterface < Base
include Cinch::Plugin
set(plugin_name: 'UserInterface')
self.prefix = '.log '
# ログ公開URLを返す
match('url', method: :url_list)
match(/url list\b/, method: :url_list)
match(/url today\b/, method: :url_today)
match(/url yesterday\b/, method: :url_yesterday)
match(%r(url ((?:19|20)\d{2}[-/][01]\d[-/][0-3]\d)), method: :url_date)
# 記録しているかどうかの状態を返す
match(/status/, method: :status)
# サイト名と説明を返す
match(/site desc/, method: :site_description)
def initialize(*)
super
@config = config
@base_url = config['URL']
end
# チャンネルのログ公開ページのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_list(m)
header = ui_header('URL')
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'URL')
return
end
channel_url =
Rails.application.routes.url_helpers.channel_url(channel,
host: @base_url)
send_and_record(m, "#{header}#{channel_url}")
end
# 今日のログのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_today(m)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.today(channel)
end
end
# 昨日のログのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_yesterday(m)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.yesterday(channel)
end
end
# 指定された日のログ公開URLを発言する
# @param [Cinch::Message] m
# @param [String] date 日付の指定
# @return [void]
def url_date(m, date)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.new(channel: channel, date: date)
end
end
# 現在のログ取得状況を返す
# @param [Cinch::Message] m
# @return [void]
def status(m)
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'status')
return
end
header = "#{ui_header('status')}#{m.channel} は"
message =
if channel.logging_enabled?
"#{header}ログを記録しています"
else
"#{header}ログの記録を停止しています"
end
send_and_record(m, message)
end
# サイト説明を返す
# @param [Cinch::Message] m
# @return [void]
def site_desc(m)
header = ui_header('desc')
send_and_record(m, header + Setting.first.text_on_homepage)
end
private
# 共通のヘッダ文字列を返す
# @param [String] subcommand サブコマンド
# @return [String]
def ui_header(subcommand)
# 設定は管理画面から変更される可能性があるので毎回読み込む
setting = Setting.first
"#{setting.site_title}<#{subcommand}>: "
end
# チャンネルが登録されていないことを発言・記録する
# @param [Cinch::Message] m 受信したメッセージ
# @param [String] subcommand
# @return [void]
def send_and_record_channel_not_registered(m, subcommand)
header = ui_header(subcommand)
send_and_record(m, "#{header}#{m.channel} は登録されていません")
end
# 1日分のログのURLを発言・記録する
# @param [Cinch::Message] m 受信したメッセージ
# @yieldparam channel [::Channel] チャンネル
# @yieldreturn [ChannelBrowse::Day] 1日分の閲覧
# @return [void]
#
# 1日分の閲覧を表す ChannelBrowse::Day インスタンスを返すブロックを
# 渡して使う。ブロックにはチャンネルが渡される。ブロックで指定するのは
# チャンネルが登録されていることの確認を先に行えるようにするため。
#
# 1日分の閲覧の属性が無効な場合は、日付指定に誤りがあることを
# 発言・記録する。
def send_and_record_day_url(m, &block)
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'URL')
return
end
header = ui_header('URL')
browse_day = block[channel]
unless browse_day.valid?
send_and_record(m, "#{header}日付指定が間違っています")
return
end
send_and_record(m, "#{header}#{browse_day.url(@base_url)}")
end
end
end
end
IRCボット: .log site descに反応しなかった問題を修正する
matchで指定した名前のメソッドが存在しなかった。
対応するメソッドの名前を指定された名前に合わせた。
# vim: fileencoding=utf-8
require_relative 'base'
module LogArchiver
module Plugin
# バージョンを返す
class UserInterface < Base
include Cinch::Plugin
set(plugin_name: 'UserInterface')
self.prefix = '.log '
# ログ公開URLを返す
match('url', method: :url_list)
match(/url list\b/, method: :url_list)
match(/url today\b/, method: :url_today)
match(/url yesterday\b/, method: :url_yesterday)
match(%r(url ((?:19|20)\d{2}[-/][01]\d[-/][0-3]\d)), method: :url_date)
# 記録しているかどうかの状態を返す
match(/status/, method: :status)
# サイト名と説明を返す
match(/site desc/, method: :site_description)
def initialize(*)
super
@config = config
@base_url = config['URL']
end
# チャンネルのログ公開ページのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_list(m)
header = ui_header('URL')
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'URL')
return
end
channel_url =
Rails.application.routes.url_helpers.channel_url(channel,
host: @base_url)
send_and_record(m, "#{header}#{channel_url}")
end
# 今日のログのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_today(m)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.today(channel)
end
end
# 昨日のログのURLを発言する
# @param [Cinch::Message] m
# @return [void]
def url_yesterday(m)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.yesterday(channel)
end
end
# 指定された日のログ公開URLを発言する
# @param [Cinch::Message] m
# @param [String] date 日付の指定
# @return [void]
def url_date(m, date)
send_and_record_day_url(m) do |channel|
ChannelBrowse::Day.new(channel: channel, date: date)
end
end
# 現在のログ取得状況を返す
# @param [Cinch::Message] m
# @return [void]
def status(m)
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'status')
return
end
header = "#{ui_header('status')}#{m.channel} は"
message =
if channel.logging_enabled?
"#{header}ログを記録しています"
else
"#{header}ログの記録を停止しています"
end
send_and_record(m, message)
end
# サイト説明を返す
# @param [Cinch::Message] m
# @return [void]
def site_description(m)
header = ui_header('desc')
send_and_record(m, header + Setting.first.text_on_homepage)
end
private
# 共通のヘッダ文字列を返す
# @param [String] subcommand サブコマンド
# @return [String]
def ui_header(subcommand)
# 設定は管理画面から変更される可能性があるので毎回読み込む
setting = Setting.first
"#{setting.site_title}<#{subcommand}>: "
end
# チャンネルが登録されていないことを発言・記録する
# @param [Cinch::Message] m 受信したメッセージ
# @param [String] subcommand
# @return [void]
def send_and_record_channel_not_registered(m, subcommand)
header = ui_header(subcommand)
send_and_record(m, "#{header}#{m.channel} は登録されていません")
end
# 1日分のログのURLを発言・記録する
# @param [Cinch::Message] m 受信したメッセージ
# @yieldparam channel [::Channel] チャンネル
# @yieldreturn [ChannelBrowse::Day] 1日分の閲覧
# @return [void]
#
# 1日分の閲覧を表す ChannelBrowse::Day インスタンスを返すブロックを
# 渡して使う。ブロックにはチャンネルが渡される。ブロックで指定するのは
# チャンネルが登録されていることの確認を先に行えるようにするため。
#
# 1日分の閲覧の属性が無効な場合は、日付指定に誤りがあることを
# 発言・記録する。
def send_and_record_day_url(m, &block)
channel = Channel.from_cinch_message(m)
unless channel
send_and_record_channel_not_registered(m, 'URL')
return
end
header = ui_header('URL')
browse_day = block[channel]
unless browse_day.valid?
send_and_record(m, "#{header}日付指定が間違っています")
return
end
send_and_record(m, "#{header}#{browse_day.url(@base_url)}")
end
end
end
end
|
module Jekyll
module Assets
module Configuration
DEVELOPMENT = {
"skip_prefix_with_cdn" => false,
"prefix" => "/assets",
"digest" => false,
"assets" => [],
"compress" => {
"css" => false,
"js" => false
},
"sources" => [
"_assets/css", "_assets/stylesheets",
"_assets/images", "_assets/img", "_assets/fonts",
"_assets/javascripts", "_assets/js"
]
}
PRODUCTION = DEVELOPMENT.merge({
"digest" => true,
"compress" => {
"css" => true,
"js" => true
},
})
def self.defaults
%W(development test).include?(Jekyll.env) ? DEVELOPMENT : PRODUCTION
end
def self.merge(merge_into, config = self.defaults)
merge_into = merge_into.dup
config.each_with_object(merge_into) do |(k, v), h|
if !h.has_key?(k) || (v.is_a?(Hash) && !h[k])
h[k] = v
elsif v.is_a?(Hash)
h[k] = merge h[k], \
v
end
end
merge_into
end
end
end
end
Slight modification.
module Jekyll
module Assets
module Configuration
DEVELOPMENT = {
"skip_prefix_with_cdn" => false,
"prefix" => "/assets",
"digest" => false,
"assets" => [],
"compress" => {
"css" => false,
"js" => false
},
"sources" => [
"_assets/css", "_assets/stylesheets",
"_assets/images", "_assets/img", "_assets/fonts",
"_assets/javascripts", "_assets/js"
]
}
PRODUCTION = DEVELOPMENT.merge({
"digest" => true,
"compress" => {
"css" => true,
"js" => true
},
})
def self.defaults
%W(development test).include?(Jekyll.env) ? \
DEVELOPMENT : PRODUCTION
end
def self.merge(merge_into, config = self.defaults)
merge_into = merge_into.dup
config.each_with_object(merge_into) do |(k, v), h|
if !h.has_key?(k) || (v.is_a?(Hash) && !h[k])
h[k] = v
elsif v.is_a?(Hash)
h[k] = merge h[k], \
v
end
end
merge_into
end
end
end
end
|
# Cookbook Name:: overseer
# Library:: Chef::Overseer
#
# Author:: Aaron Kalin <akalin@martinisoftware.com>
#
# Copyright 2012, Aaron Kalin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Overseer
# Public: Setup application default settings
#
# apps - data bag hash of applications to be setup
#
# Sets up app data
def initialize_app_defaults(apps)
http_port = node['overseer']['first_http_port']
apps.each do |app|
app['user'] ||= app['id']
app['name'] ||= app['id']
app['env'] ||= Hash.new
app['http'] ||= Hash.new
app['http']['host_name'] ||= "_"
app['http']['http_port'] ||= 80
deploy_to = "#{node['overseer']['root_path']}/#{app['name']}"
app['http']['upstream_server'] ||=
"unix:#{deploy_to}/shared/sockets/unicorn.sock"
app['vhost_template'] ||= "overseer::nginx_vhost.conf.erb"
if app['env']['PORT'].nil?
app['env']['PORT'] = http_port.to_s
http_port += 100
end
if app['env']['RAILS_ENV'].nil?
app['env']['RAILS_ENV'] = "production"
end
end
end
def install_rvm_environment(app, user)
include_recipe 'rvm'
version = node['rvm']['version']
branch = node['rvm']['branch']
script_flags = build_script_flags(version, branch)
installer_url = node['rvm']['installer_url']
rvm_prefix = "#{node['overseer']['root_path']}/#{app['name']}"
install_rvm rvm_prefix: rvm_prefix,
installer_url: installer_url,
script_flags: script_flags,
user: app['name']
end
def create_app_user(user)
user_home = "#{node['overseer']['root_path']}/#{user}"
user_account user do
home user_home
system_user false
manage_home true
create_group true
action :create
end
{ 'id' => user, 'gid' => user, 'home' => user_home }
end
def create_app_user_foreman_templates(user)
directory "#{user['home']}/.foreman/templates" do
owner user['id']
group user['gid']
mode "2755"
recursive true
end
cookbook_file "#{user['home']}/.foreman/templates/run.erb" do
source "foreman/runit/run.erb"
owner user['id']
group user['gid']
mode "0644"
end
cookbook_file "#{user['home']}/.foreman/templates/log_run.erb" do
source "foreman/runit/log_run.erb"
owner user['id']
group user['gid']
mode "0644"
end
end
def create_app_user_runit_service(user)
directory "#{user['home']}/service" do
owner user['id']
group user['gid']
mode "2755"
recursive true
end
directory "/var/log/user-#{user['id']}" do
owner "root"
group "root"
mode "755"
recursive true
end
runit_service "user-#{user['id']}" do
template_name "user"
options({ user: user['id'] })
end
end
def create_app_dirs(config, user)
root_path = node['overseer']['root_path']
app_home = "#{root_path}/#{config['name']}"
directory root_path
[app_home, "#{app_home}/shared", "#{app_home}/shared/config"].each do |dir|
directory dir do
owner user['id']
group user['gid']
mode "2775"
recursive true
end
end
end
def configure_app_environment(config, user)
root_path = node['overseer']['root_path']
app_home = "#{root_path}/#{config['name']}"
template "#{app_home}/shared/env" do
source "env.erb"
owner user['id']
group user['gid']
mode "0664"
variables({ config: config })
end
if config['ssh_keys']
user_account user do
ssh_keys config['ssh_keys']
action :manage
end
end
end
def create_app_vhost(app, user)
template_cookbook, template_source = app['vhost_template'].split('::')
template "#{node['nginx']['dir']}/sites-available/#{app['name']}.conf" do
cookbook template_cookbook
source template_source
owner "root"
mode "0644"
variables({
app: app,
deploy_to_path: "#{node['overseer']['root_path']}/#{app['name']}",
log_path: node['nginx']['log_dir'],
})
not_if { template_cookbook == "none" }
notifies :reload, "service[nginx]"
end
nginx_site "#{app['name']}.conf"
end
end
User is a hash, use id for account config
# Cookbook Name:: overseer
# Library:: Chef::Overseer
#
# Author:: Aaron Kalin <akalin@martinisoftware.com>
#
# Copyright 2012, Aaron Kalin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Overseer
# Public: Setup application default settings
#
# apps - data bag hash of applications to be setup
#
# Sets up app data
def initialize_app_defaults(apps)
http_port = node['overseer']['first_http_port']
apps.each do |app|
app['user'] ||= app['id']
app['name'] ||= app['id']
app['env'] ||= Hash.new
app['http'] ||= Hash.new
app['http']['host_name'] ||= "_"
app['http']['http_port'] ||= 80
deploy_to = "#{node['overseer']['root_path']}/#{app['name']}"
app['http']['upstream_server'] ||=
"unix:#{deploy_to}/shared/sockets/unicorn.sock"
app['vhost_template'] ||= "overseer::nginx_vhost.conf.erb"
if app['env']['PORT'].nil?
app['env']['PORT'] = http_port.to_s
http_port += 100
end
if app['env']['RAILS_ENV'].nil?
app['env']['RAILS_ENV'] = "production"
end
end
end
def install_rvm_environment(app, user)
include_recipe 'rvm'
version = node['rvm']['version']
branch = node['rvm']['branch']
script_flags = build_script_flags(version, branch)
installer_url = node['rvm']['installer_url']
rvm_prefix = "#{node['overseer']['root_path']}/#{app['name']}"
install_rvm rvm_prefix: rvm_prefix,
installer_url: installer_url,
script_flags: script_flags,
user: app['name']
end
def create_app_user(user)
user_home = "#{node['overseer']['root_path']}/#{user}"
user_account user do
home user_home
system_user false
manage_home true
create_group true
action :create
end
{ 'id' => user, 'gid' => user, 'home' => user_home }
end
def create_app_user_foreman_templates(user)
directory "#{user['home']}/.foreman/templates" do
owner user['id']
group user['gid']
mode "2755"
recursive true
end
cookbook_file "#{user['home']}/.foreman/templates/run.erb" do
source "foreman/runit/run.erb"
owner user['id']
group user['gid']
mode "0644"
end
cookbook_file "#{user['home']}/.foreman/templates/log_run.erb" do
source "foreman/runit/log_run.erb"
owner user['id']
group user['gid']
mode "0644"
end
end
def create_app_user_runit_service(user)
directory "#{user['home']}/service" do
owner user['id']
group user['gid']
mode "2755"
recursive true
end
directory "/var/log/user-#{user['id']}" do
owner "root"
group "root"
mode "755"
recursive true
end
runit_service "user-#{user['id']}" do
template_name "user"
options({ user: user['id'] })
end
end
def create_app_dirs(config, user)
root_path = node['overseer']['root_path']
app_home = "#{root_path}/#{config['name']}"
directory root_path
[app_home, "#{app_home}/shared", "#{app_home}/shared/config"].each do |dir|
directory dir do
owner user['id']
group user['gid']
mode "2775"
recursive true
end
end
end
def configure_app_environment(config, user)
root_path = node['overseer']['root_path']
app_home = "#{root_path}/#{config['name']}"
template "#{app_home}/shared/env" do
source "env.erb"
owner user['id']
group user['gid']
mode "0664"
variables({ config: config })
end
if config['ssh_keys']
user_account user['id'] do
ssh_keys config['ssh_keys']
action :manage
end
end
end
def create_app_vhost(app, user)
template_cookbook, template_source = app['vhost_template'].split('::')
template "#{node['nginx']['dir']}/sites-available/#{app['name']}.conf" do
cookbook template_cookbook
source template_source
owner "root"
mode "0644"
variables({
app: app,
deploy_to_path: "#{node['overseer']['root_path']}/#{app['name']}",
log_path: node['nginx']['log_dir'],
})
not_if { template_cookbook == "none" }
notifies :reload, "service[nginx]"
end
nginx_site "#{app['name']}.conf"
end
end
|
module JSound
module Midi
# A collection of methods for building MIDI messages.
module MessageBuilder
def note_on(pitch, velocity=127, channel=0)
Messages::NoteOn.new(pitch,velocity,channel)
end
def note_off(pitch, velocity=127, channel=0)
Messages::NoteOff.new(pitch,velocity,channel)
end
# Most methods in here take 7-bit ints for their args, but this one takes a 14-bit
# The value can be an int in the range 0-16383 (8192 is no bend)
# or it can be a float, which is assumed to be in the range -1.0 to 1.0
def pitch_bend(value, channel=0)
Messages::PitchBend.new(value, channel)
end
def control_change(control, value, channel=0)
Messages::ControlChange.new(control, value, channel)
end
def channel_pressure(pressure, channel=0)
Messages::ChannelPressure.new(pressure, channel)
end
alias channel_aftertouch channel_pressure
def poly_pressure(pitch, pressure, channel=0)
Messages::PolyPressure.new(pitch, pressure, channel)
end
alias poly_aftertouch poly_pressure
def program_change(program, channel=0)
Messages::ProgramChange.new(program, channel)
end
# Make all methods be module functions (accessible by sending the method name to module directly)
instance_methods.each do |method|
module_function method
end
end
end
end
added all_notes_off method to MessageBuilder
module JSound
module Midi
# A collection of methods for building MIDI messages.
module MessageBuilder
def note_on(pitch, velocity=127, channel=0)
Messages::NoteOn.new(pitch,velocity,channel)
end
def note_off(pitch, velocity=127, channel=0)
Messages::NoteOff.new(pitch,velocity,channel)
end
# Most methods in here take 7-bit ints for their args, but this one takes a 14-bit
# The value can be an int in the range 0-16383 (8192 is no bend)
# or it can be a float, which is assumed to be in the range -1.0 to 1.0
def pitch_bend(value, channel=0)
Messages::PitchBend.new(value, channel)
end
def control_change(control, value, channel=0)
Messages::ControlChange.new(control, value, channel)
end
def all_notes_off(channel=0)
control_change(123, 0, channel)
end
def channel_pressure(pressure, channel=0)
Messages::ChannelPressure.new(pressure, channel)
end
alias channel_aftertouch channel_pressure
def poly_pressure(pitch, pressure, channel=0)
Messages::PolyPressure.new(pitch, pressure, channel)
end
alias poly_aftertouch poly_pressure
def program_change(program, channel=0)
Messages::ProgramChange.new(program, channel)
end
# Make all methods be module functions (accessible by sending the method name to module directly)
instance_methods.each do |method|
module_function method
end
end
end
end |
module Keisan
module AST
class ConstantLiteral < Literal
def self.from_value(value)
case value
when Numeric
AST::Number.new(value)
when ::String
AST::String.new(value)
when TrueClass, FalseClass
AST::Boolean.new(value)
when NilClass
AST::Null.new
else
raise TypeError.new("#{value}'s type is invalid, #{value.class}")
end
end
def coerce(other)
[self.class.from_value(other), self]
end
def to_s
value.to_s
end
end
end
end
Do not change order when coercing
module Keisan
module AST
class ConstantLiteral < Literal
def self.from_value(value)
case value
when Numeric
AST::Number.new(value)
when ::String
AST::String.new(value)
when TrueClass, FalseClass
AST::Boolean.new(value)
when NilClass
AST::Null.new
else
raise TypeError.new("#{value}'s type is invalid, #{value.class}")
end
end
def coerce(other)
[self, self.class.from_value(other)]
end
def to_s
value.to_s
end
end
end
end
|
Add a package for Firespitter.
class Firespitter < Kosmos::Package
title 'Firespitter'
url 'http://addons.cursecdn.com/files/2202/682/Firespitter.zip'
def install
merge_directory '.', into: 'GameData'
end
end
|
require 'larrow/qingcloud'
module Larrow
module Runner
module Service
class Cloud
include Qingcloud
def initialize args={}
access_id = args[:qy_access_key_id]
secret_key = args[:qy_secret_access_key]
zone_id = args[:zone_id]
@keypair_id = args[:keypair_id]
Qingcloud.establish_connection access_id,secret_key,zone_id
end
# return: Array< [ instance,eip ] >
# WARN: eips contains promise object, so it should be force
def create image_id:nil,count:1
RunLogger.level(1).detail "assign node"
instances = Instance.create(image_id: image_id,
count:count,
login_mode:'keypair',
keypair_id: @keypair_id
)
eips = Eip.create(count:count)
(0...count).map do |i|
RunLogger.level(1).detail "bind ip: #{eips[i].address}"
eips[i] = eips[i].associate instances[i].id
[ instances[i], eips[i] ]
end
end
# return image future
def create_image instance_id
Image.create instance_id
end
def image? image_id
Image.list(:self, ids: [image_id]).size == 1
end
def check_available
KeyPair.list
rescue
Qingcloud.remove_connection
raise $!
end
end
end
end
end
new cloud instance will remove Qingcloud connection
require 'larrow/qingcloud'
module Larrow
module Runner
module Service
class Cloud
include Qingcloud
def initialize args={}
Qingcloud.remove_connection
access_id = args[:qy_access_key_id]
secret_key = args[:qy_secret_access_key]
zone_id = args[:zone_id]
@keypair_id = args[:keypair_id]
Qingcloud.establish_connection access_id,secret_key,zone_id
end
# return: Array< [ instance,eip ] >
# WARN: eips contains promise object, so it should be force
def create image_id:nil,count:1
RunLogger.level(1).detail "assign node"
instances = Instance.create(image_id: image_id,
count:count,
login_mode:'keypair',
keypair_id: @keypair_id
)
eips = Eip.create(count:count)
(0...count).map do |i|
RunLogger.level(1).detail "bind ip: #{eips[i].address}"
eips[i] = eips[i].associate instances[i].id
[ instances[i], eips[i] ]
end
end
# return image future
def create_image instance_id
Image.create instance_id
end
def image? image_id
Image.list(:self, ids: [image_id]).size == 1
end
def check_available
KeyPair.list
rescue
Qingcloud.remove_connection
raise $!
end
end
end
end
end
|
require 'ui'
require 'timehelper'
require 'player'
require 'yaml'
require 'gameplay'
module MasterMind
module Tobi
class SinglePlayer
include TimeHelper
attr_reader :start_time
attr_reader :history
attr_reader :sequence
attr_reader :game_logic
attr_accessor :end_guess
ALLOWED = ['c', 'h', 'q', 'cheat', 'history', 'quit']
def initialize(sequence, game_logic)
@start_time = Time.now
@history = []
@sequence = sequence
@game_logic = game_logic
@end_guess = 13
end
# generate game sequence and start game play
def start_game
print UI::GENERATE_MESSAGE % [game_logic.sequence_type, game_logic.length, UI::COLOR_STRINGS[game_logic.level]]
print UI::INPUT_PROMPT
guesses = 0
# allow the user guess up to twelve times before ending game
while guesses < UI::GUESS_MAX
input = gets.chomp.downcase
next if invalid_length?(input)
next if treat_option?(input, history)
guesses = treat_guess(input, guesses, history)
end
puts UI::SORRY_SINGLE_MESSAGE % sequence.join.upcase if guesses == UI::GUESS_MAX
end
# check if user's guess is longer or fewer than the required length
def invalid_length?(input)
if input.length < game_logic.length && !(ALLOWED.include?(input))
print UI::INPUT_SHORT_MESSAGE
return true
elsif input.length > game_logic.length && !(ALLOWED.include?(input))
print UI::INPUT_LONG_MESSAGE
return true
end
return false
end
# check if user selects an option
def treat_option?(input, history)
case input
when "h", "history" then print_history(history)
when "q", "quit" then exit(0)
when "c", "cheat" then print UI::SEQUENCE_MESSAGE % sequence.join.upcase
else return false
end
return true
end
# treat guesses entered by user
def treat_guess(input, guesses, history)
guesses += 1
if input == sequence.join # right guess entered
right_guess(start_time, sequence, guesses)
guesses = end_guess # sentinel value to end guess loop
else
wrong_guess(sequence, guesses, input, history) # wrong guess entered
end
return guesses
end
def print_top_ten(current_player)
top_ten_list = YAML.load_stream(File.open(UI::DB_STORE)).sort{|player1, player2| # load player objects from db and sort by guesses/time
by_guess = player1.guesses <=> player2.guesses # first sort by guesses
by_guess == 0 ? player1.time <=> player2.time : by_guess # then sort by time
}[0...10] if File.file?(UI::DB_STORE) # pick out top ten
puts average_string(top_ten_list, current_player) if top_ten_list.length > 1 # print out user's performance compared to average
# print out top ten results
if !top_ten_list.nil?
puts ""
puts UI::TOP_TEN
top_ten_list.each_with_index{|player, index| puts "#{index+1}. " + player.to_s }
end
end
def right_guess(start_time, sequence, guesses)
time_elapsed = (Time.now - start_time).to_i # time used by user in seconds
current_player = store_game(sequence, guesses, time_elapsed) # store user data to top-scores file
puts UI::CONGRATS_MESSAGE % [current_player.name, sequence.join.upcase, guesses, guesses > 1 ? "guesses" : "guess",
time_convert(time_elapsed) << '.']
print_top_ten(current_player)
end
def average_string(top_ten_list, current_player) # generates user's performance compared to average
# time difference obtained
time_diff = (top_ten_list.inject(0){ |sum, player| sum += player.time } / top_ten_list.length.to_f).round - current_player.time
# guess difference obtained
guess_diff = (top_ten_list.inject(0){ |sum, player| sum += player.guesses } / top_ten_list.length.to_f).round - current_player.guesses
"That's %s %s and %s %s %s the average\n" % [time_convert(time_diff.abs), time_diff < 0 ? "slower" : "faster",
guess_diff.abs, guess_diff.abs == 1 ? "guess" : "guesses", guess_diff < 0 ? "more" : "fewer"]
end
def wrong_guess(sequence, guesses, input, history)
result = GameLogic.check_input(sequence, input) # get results from input
history << GamePlay.new(input, result[:correct_elements], result[:correct_position]) # add game play to history
puts UI::INFO_MESSAGE % [input.upcase, result[:correct_elements], result[:correct_position]]
puts UI::GUESSES_MESSAGE % [guesses, guesses > 1 ? "guesses" : "guess"]
print UI::INPUT_PROMPT
end
def store_game(sequence, guesses, time) #get player name and store details to file
print UI::NAME_MESSAGE
name = get_name
current_player = Player.new(name, sequence, time, guesses) # create new player object
# write player object to file if file does not exist, or verify whether to add record from user, and write if it exists
File.open(UI::DB_STORE, 'a'){|file| file.write(YAML.dump(current_player))} if user_permits_store?
current_player
end
def user_permits_store? # confirm from user to add record to top-scores if file exists
return true if !File.exist?(UI::DB_STORE) # if file does not exist, return true
print UI::OVERWRITE_MESSAGE
print UI::INPUT_PROMPT
option_chosen = false
return MasterMind::Tobi::GameHelper.yes_or_no?
end
def get_name
name = ""
while name.eql?("")
name = gets.chomp.capitalize
print UI::INVALID_MESSAGE if name.eql?("")
end
name
end
def print_history(history)
if history.empty?
print "No history yet. Enter a guess" + UI::INPUT_PROMPT
else
puts ""
puts history
print UI::INPUT_PROMPT
end
end
end
end
end
refactor single_game
require 'ui'
require 'timehelper'
require 'player'
require 'yaml'
require 'gameplay'
module MasterMind
module Tobi
class SinglePlayer
include TimeHelper
attr_reader :start_time
attr_reader :history
attr_reader :sequence
attr_reader :game_logic
attr_accessor :end_guess
ALLOWED = ['c', 'h', 'q', 'cheat', 'history', 'quit']
def initialize(sequence, game_logic)
@start_time = Time.now
@history = []
@sequence = sequence
@game_logic = game_logic
@end_guess = 13
end
# generate game sequence and start game play
def start_game
print UI::GENERATE_MESSAGE % [game_logic.sequence_type, game_logic.length, UI::COLOR_STRINGS[game_logic.level]]
print UI::INPUT_PROMPT
guesses = 0
# allow the user guess up to twelve times before ending game
while guesses < UI::GUESS_MAX
guesses = process_input(input, guesses, history)
end
puts UI::SORRY_SINGLE_MESSAGE % sequence.join.upcase if guesses == UI::GUESS_MAX
end
def process_input(input, guesses, history)
input = gets.chomp.downcase
length_or_option = false
length_or_option = invalid_length?(input)
length_or_option = treat_option?(input, history)
if !length_or_option
guesses = treat_guess(input, guesses, history)
end
# check if user's guess is longer or fewer than the required length
def invalid_length?(input)
if input.length < game_logic.length && !(ALLOWED.include?(input))
print UI::INPUT_SHORT_MESSAGE
return true
elsif input.length > game_logic.length && !(ALLOWED.include?(input))
print UI::INPUT_LONG_MESSAGE
return true
end
return false
end
# check if user selects an option
def treat_option?(input, history)
case input
when "h", "history" then print_history(history)
when "q", "quit" then exit(0)
when "c", "cheat" then print UI::SEQUENCE_MESSAGE % sequence.join.upcase
else return false
end
return true
end
# treat guesses entered by user
def treat_guess(input, guesses, history)
guesses += 1
if input == sequence.join # right guess entered
right_guess(start_time, sequence, guesses)
guesses = end_guess # sentinel value to end guess loop
else
wrong_guess(sequence, guesses, input, history) # wrong guess entered
end
return guesses
end
def print_top_ten(current_player)
top_ten_list = YAML.load_stream(File.open(UI::DB_STORE)).sort{|player1, player2| # load player objects from db and sort by guesses/time
by_guess = player1.guesses <=> player2.guesses # first sort by guesses
by_guess == 0 ? player1.time <=> player2.time : by_guess # then sort by time
}[0...10] if File.file?(UI::DB_STORE) # pick out top ten
puts average_string(top_ten_list, current_player) if top_ten_list.length > 1 # print out user's performance compared to average
# print out top ten results
if !top_ten_list.nil?
puts ""
puts UI::TOP_TEN
top_ten_list.each_with_index{|player, index| puts "#{index+1}. " + player.to_s }
end
end
def right_guess(start_time, sequence, guesses)
time_elapsed = (Time.now - start_time).to_i # time used by user in seconds
current_player = store_game(sequence, guesses, time_elapsed) # store user data to top-scores file
puts UI::CONGRATS_MESSAGE % [current_player.name, sequence.join.upcase, guesses, guesses > 1 ? "guesses" : "guess",
time_convert(time_elapsed) << '.']
print_top_ten(current_player)
end
def average_string(top_ten_list, current_player) # generates user's performance compared to average
# time difference obtained
time_diff = (top_ten_list.inject(0){ |sum, player| sum += player.time } / top_ten_list.length.to_f).round - current_player.time
# guess difference obtained
guess_diff = (top_ten_list.inject(0){ |sum, player| sum += player.guesses } / top_ten_list.length.to_f).round - current_player.guesses
"That's %s %s and %s %s %s the average\n" % [time_convert(time_diff.abs), time_diff < 0 ? "slower" : "faster",
guess_diff.abs, guess_diff.abs == 1 ? "guess" : "guesses", guess_diff < 0 ? "more" : "fewer"]
end
def wrong_guess(sequence, guesses, input, history)
result = GameLogic.check_input(sequence, input) # get results from input
history << GamePlay.new(input, result[:correct_elements], result[:correct_position]) # add game play to history
puts UI::INFO_MESSAGE % [input.upcase, result[:correct_elements], result[:correct_position]]
puts UI::GUESSES_MESSAGE % [guesses, guesses > 1 ? "guesses" : "guess"]
print UI::INPUT_PROMPT
end
def store_game(sequence, guesses, time) #get player name and store details to file
print UI::NAME_MESSAGE
name = get_name
current_player = Player.new(name, sequence, time, guesses) # create new player object
# write player object to file if file does not exist, or verify whether to add record from user, and write if it exists
File.open(UI::DB_STORE, 'a'){|file| file.write(YAML.dump(current_player))} if user_permits_store?
current_player
end
def user_permits_store? # confirm from user to add record to top-scores if file exists
return true if !File.exist?(UI::DB_STORE) # if file does not exist, return true
print UI::OVERWRITE_MESSAGE
print UI::INPUT_PROMPT
option_chosen = false
return MasterMind::Tobi::GameHelper.yes_or_no?
end
def get_name
name = ""
while name.eql?("")
name = gets.chomp.capitalize
print UI::INVALID_MESSAGE if name.eql?("")
end
name
end
def print_history(history)
if history.empty?
print "No history yet. Enter a guess" + UI::INPUT_PROMPT
else
puts ""
puts history
print UI::INPUT_PROMPT
end
end
end
end
end |
# rspec-rails mock_model extensions to add the stubs
# for all attributes and the errors method
module MockedFixtures
module Mocks
module Rspec
def self.included(base)
base.class_eval do
include InstanceMethods
alias_method_chain :mock_model, :attributes
alias_method :mock_model_with_rspec, :mock_model_with_attributes
end
end
module InstanceMethods
def mock_model_with_attributes(model_class, options_and_stubs = {})
if options_and_stubs.delete(:all_attributes)
schema = MockedFixtures::SchemaParser.load_schema
table = model_class.table_name
schema[table][:columns].each { |column| options_and_stubs[column[0].to_sym] = nil unless options_and_stubs.has_key?(column[0].to_sym) }
end
if options_and_stubs.delete(:add_errors)
errors = []
errors.stub!(:count).and_return(0)
errors.stub!(:on).and_return(nil)
options_and_stubs.reverse_merge!(:errors => errors)
end
mock_model_without_attributes(model_class, options_and_stubs)
end
end
end
end
end
Spec::Rails::Example::RailsExampleGroup.send(:include, MockedFixtures::Mocks::Rspec)
fixed overwriting stub value if nil when already defined
# rspec-rails mock_model extensions to add the stubs
# for all attributes and the errors method
module MockedFixtures
module Mocks
module Rspec
def self.included(base)
base.class_eval do
include InstanceMethods
alias_method_chain :mock_model, :attributes
alias_method :mock_model_with_rspec, :mock_model_with_attributes
end
end
module InstanceMethods
def mock_model_with_attributes(model_class, options_and_stubs = {})
if options_and_stubs.delete(:all_attributes)
schema = MockedFixtures::SchemaParser.load_schema
table = model_class.table_name
schema[table][:columns].each { |column|
unless options_and_stubs.has_key?(column[0].to_sym) || column[0] == model_class.primary_key
options_and_stubs[column[0].to_sym] = nil
end
}
end
if options_and_stubs.delete(:add_errors)
errors = []
errors.stub!(:count).and_return(0)
errors.stub!(:on).and_return(nil)
options_and_stubs.reverse_merge!(:errors => errors)
end
mock_model_without_attributes(model_class, options_and_stubs)
end
end
end
end
end
Spec::Rails::Example::RailsExampleGroup.send(:include, MockedFixtures::Mocks::Rspec)
|
require "zlib"
require 'pp'
# Methods used in the ApiLoc publication
class BScript
def apiloc_stats
puts "For each species, how many genes, publications"
total_proteins = 0
total_publications = 0
Species.apicomplexan.all.sort{|a,b| a.name <=> b.name}.each do |s|
protein_count = s.number_of_proteins_localised_in_apiloc
publication_count = s.number_of_publications_in_apiloc
puts [
s.name,
protein_count,
publication_count,
].join("\t")
total_proteins += protein_count
total_publications += publication_count
end
puts [
'Total',
total_proteins,
total_publications
].join("\t")
end
# Like HTML stats, but used for the version information part
# of the ApiLoc website
def apiloc_html_stats
total_proteins = 0
total_publications = 0
puts '<table>'
puts '<tr><th>Species</th><th>Localised genes</th><th>Publications curated</th></tr>'
Species.apicomplexan.all.push.sort{|a,b| a.name <=> b.name}.each do |s|
protein_count = s.number_of_proteins_localised_in_apiloc
publication_count = s.number_of_publications_in_apiloc
puts "<tr><td><i>#{s.name}</i></td><td>#{protein_count}</td><td>#{publication_count}</td></tr>"
total_proteins += protein_count
total_publications += publication_count
end
print [
'<tr><td><b>Total</b>',
total_proteins,
total_publications
].join("</b></td><td><b>")
puts '</b></td></tr>'
puts '</table>'
end
def species_localisation_breakdown
# names = Localisation.all(:joins => :apiloc_top_level_localisation).reach.name.uniq.push(nil)
# print "species\t"
# puts names.join("\t")
top_names = [
'apical',
'inner membrane complex',
'merozoite surface',
'parasite plasma membrane',
'parasitophorous vacuole',
'exported',
'cytoplasm',
'food vacuole',
'mitochondrion',
'apicoplast',
'golgi',
'endoplasmic reticulum',
'other',
'nucleus'
]
interests = [
'Plasmodium falciparum',
'Toxoplasma gondii',
'Plasmodium berghei',
'Cryptosporidium parvum'
]
puts [nil].push(interests).flatten.join("\t")
top_names.each do |top_name|
top = TopLevelLocalisation.find_by_name(top_name)
print top_name
interests.each do |name|
s = Species.find_by_name(name)
if top.name == 'other'
count = 0
CodingRegion.all(
:select => 'distinct(coding_regions.id)',
:joins => {:expression_contexts => {:localisation => :apiloc_top_level_localisation}},
:conditions => ['top_level_localisation_id = ? and species_id = ?', top.id, s.id]
).each do |code|
tops = code.expressed_localisations.reach.apiloc_top_level_localisation.flatten
if tops.length == 1
raise unless tops[0].name == 'other'
count += 1
end
end
print "\t#{count}"
else
count = CodingRegion.count(
:select => 'distinct(coding_regions.id)',
:joins => {:expression_contexts => {:localisation => :apiloc_top_level_localisation}},
:conditions => ['top_level_localisation_id = ? and species_id = ?', top.id, s.id]
)
print "\t#{count}"
end
end
puts
end
end
def how_many_falciparum_genes_have_toxo_orthologs
puts ".. all according to orthomcl #{OrthomclRun::ORTHOMCL_OFFICIAL_NEWEST_NAME}"
all_orthomcl_groups_with_falciparum = OrthomclRun.find_by_name(OrthomclRun::ORTHOMCL_OFFICIAL_NEWEST_NAME).orthomcl_groups.select {|group|
group.orthomcl_genes.code('pfa').count > 0
}
puts "How many P. falciparum orthomcl groups?"
puts all_orthomcl_groups_with_falciparum.length
numbers_of_orthologs = all_orthomcl_groups_with_falciparum.each do |group|
group.orthomcl_genes.code('tgo').count
end
puts
puts "How many P. falciparum genes have any toxo orthomcl orthologs?"
puts numbers_of_orthologs.reject {|num|
num == 0
}.length
puts
puts "How many P. falciparum genes have 1 to 1 mapping with toxo?"
puts all_orthomcl_groups_with_falciparum.select {|group|
group.orthomcl_genes.code('pfa') == 1 and group.orthomcl_genes.code('tgo') == 1
}
end
def distribution_of_falciparum_hits_given_toxo
toxo_only = []
falc_only = []
no_hits = []
hits_not_localised = []
falc_and_toxo = []
# why the hell doesn't bioruby do this for me?
falciparum_blasts = {}
toxo_blasts = {}
# convert the blast file as it currently exists into a hash of plasmodb => blast_hits
Bio::Blast::Report.new(
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_vs_toxo_blast/falciparum_v_toxo.1e-5.tab.out",'r').read,
:tab
).iterations[0].hits.each do |hit|
q = hit.query_id.gsub(/.*\|/,'')
s = hit.definition.gsub(/.*\|/,'')
falciparum_blasts[q] ||= []
falciparum_blasts[q].push s
end
Bio::Blast::Report.new(
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_vs_toxo_blast/toxo_v_falciparum.1e-5.tab.out",'r').read,
:tab
).iterations[0].hits.each do |hit|
q = hit.query_id.gsub(/.*\|/,'')
s = hit.definition.gsub(/.*\|/,'')
toxo_blasts[q] ||= []
toxo_blasts[q].push s
end
# On average, how many hits does the toxo gene have to falciparum given
# arbitrary 1e-5 cutoff?
# File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_how_many_hits.csv",'w') do |how_many_hits|
# File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_best_evalue.csv",'w') do |best_evalue|
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_best_evalue.csv", 'w') do |loc_comparison|
blast_hits = CodingRegion.s(Species::FALCIPARUM_NAME).all(
:joins => :amino_acid_sequence,
:include => {:expression_contexts => :localisation}
# :limit => 10,
# :conditions => ['string_id = ?', 'PF13_0280']
).collect do |falciparum|
# does this falciparum have a hit?
# compare localisation of the falciparum and toxo protein
falciparum_locs = falciparum.expression_contexts.reach.localisation.reject{|l| l.nil?}
toxo_ids = falciparum_blasts[falciparum.string_id]
toxo_ids ||= []
toxos = toxo_ids.collect do |toxo_id|
t = CodingRegion.find_by_name_or_alternate_and_species(toxo_id, Species::TOXOPLASMA_GONDII)
raise unless t
t
end
toxo_locs = toxos.collect {|toxo|
toxo.expression_contexts.reach.localisation.retract
}.flatten.reject{|l| l.nil?}
if toxos.length > 0
# protein localised in falciparum but not in toxo
if !falciparum_locs.empty? and !toxo_locs.empty?
loc_comparison.puts [
falciparum.string_id,
falciparum.annotation.annotation,
falciparum.localisation_english
].join("\t")
toxos.each do |toxo|
loc_comparison.puts [
toxo.string_id,
toxo.annotation.annotation,
toxo.localisation_english
].join("\t")
end
loc_comparison.puts
falc_and_toxo.push [falciparum, toxos]
end
# stats about how well the protein is localised
if toxo_locs.empty? and !falciparum_locs.empty?
falc_only.push [falciparum, toxos]
end
if !toxo_locs.empty? and falciparum_locs.empty?
toxo_only.push [falciparum, toxos]
end
if toxo_locs.empty? and falciparum_locs.empty?
hits_not_localised.push falciparum.string_id
end
else
no_hits.push falciparum.string_id
end
end
end
puts "How many genes are localised in toxo and falciparum?"
puts falc_and_toxo.length
puts
puts "How many genes are localised in toxo but not in falciparum?"
puts toxo_only.length
puts
puts "How many genes are localised in falciparum but not in toxo?"
puts falc_only.length
puts
puts "How many falciparum genes have no toxo hit?"
puts no_hits.length
puts
puts "How many have hits but are not localised?"
puts hits_not_localised.length
puts
end
def tgo_v_pfa_crossover_count
both = OrthomclGroup.all_overlapping_groups(%w(tgo pfa))
pfa = OrthomclGroup.all_overlapping_groups(%w(pfa))
tgo = OrthomclGroup.all_overlapping_groups(%w(tgo))
both_genes_pfa = both.collect{|b| b.orthomcl_genes.codes(%w(pfa)).count(:select => 'distinct(orthomcl_genes.id)')}.sum
both_genes_tgo = both.collect{|b| b.orthomcl_genes.codes(%w(tgo)).count(:select => 'distinct(orthomcl_genes.id)')}.sum
pfa_genes = CodingRegion.s(Species::FALCIPARUM).count(:joins => :amino_acid_sequence)
tgo_genes = CodingRegion.s(Species::TOXOPLASMA_GONDII).count(:joins => :amino_acid_sequence)
puts "How many OrthoMCL groups have at least one protein in pfa and tgo?"
puts "#{both.length} groups, #{both_genes_pfa} falciparum genes, #{both_genes_tgo} toxo genes"
puts
puts "How many OrthoMCL groups are specific to falciparum?"
puts "#{pfa.length - both.length} groups, #{pfa_genes - both_genes_pfa} genes"
puts
puts "How many OrthoMCL groups are specific to toxo?"
puts "#{tgo.length - both.length} groups, #{tgo_genes - both_genes_tgo} genes"
puts
end
# Print out a fasta file of all the sequences that are in apiloc.
# If a block is given it takes each coding region so that it can be transformed
# into a fasta sequence header as in AminiAcidSequence#fasta, otherwise
# a default is used.
def apiloc_fasta(io = $stdout)
CodingRegion.all(
:joins => :expression_contexts
).uniq.each do |code|
if code.amino_acid_sequence and code.amino_acid_sequence.sequence.length > 0
io.print ">"
if block_given?
io.puts yield(code)
else
io.puts [
code.species.name,
code.string_id,
code.annotation ? code.annotation.annotation : nil
].join(' | ')
end
io.puts code.amino_acid_sequence.sequence
else
$stderr.puts "Couldn't find amino acid sequence for #{code.string_id}/#{code.id}"
end
end
end
def apiloc_mapping_orthomcl_v3
# Starting with falciparum, how many genes have localised orthologues?
CodingRegion.falciparum.all(
:joins => {:expression_contexts => :localisation},
:select => 'distinct(coding_regions.*)'
).each do |code|
next if ["PF14_0078",'PF13_0011'].include?(code.string_id) #fair enough there is no orthomcl for this - just the way v3 is.
# Is this in orthomcl
ogene = nil
begin
ogene = code.single_orthomcl
rescue CodingRegion::UnexpectedOrthomclGeneCount
next
end
if ogene
groups = ogene.orthomcl_groups
raise unless groups.length == 1
group = groups[0]
others = group.orthomcl_genes.apicomplexan.all.reject{|r| r.id==ogene.id}
next if others.empty?
orthologues = CodingRegion.all(
:joins => [
{:expression_contexts => :localisation},
:orthomcl_genes,
],
:conditions => "orthomcl_genes.id in (#{others.collect{|o|o.id}.join(',')})",
:select => 'distinct(coding_regions.*)'
)
if orthologues.empty?
$stderr.puts "Nothing useful found for #{code.names.join(', ')}"
else
# output the whole group, including localisations where known
puts [
code.string_id,
code.case_sensitive_literature_defined_coding_region_alternate_string_ids.reach.name.join(', '),
code.annotation.annotation,
code.localisation_english
].join("\t")
group.orthomcl_genes.apicomplexan.all.each do |oge|
next if %w(cmur chom).include?(oge.official_split[0])
c = nil
if oge.official_split[1] == 'TGGT1_036620' #stupid v3
c = CodingRegion.find_by_name_or_alternate("TGME49_084810")
else
c = oge.single_code!
end
if c.nil?
# if no coding region is returned, then don't complain too much,
# but I will check these manually later
puts oge.orthomcl_name
else
next if c.id == code.id #don't duplicate the query
print c.string_id
puts [
nil,
c.case_sensitive_literature_defined_coding_region_alternate_string_ids.reach.name.join(', '),
c.annotation.annotation,
c.localisation_english
].join("\t")
end
end
puts
end
end
end
end
# Get all of the sequences that are recorded in ApiLoc and put them into
# a blast file where the hits can be identified using a -m 8 blast output
def create_apiloc_m8_ready_blast_database
File.open('/tmp/apiloc_m8_ready.protein.fa','w') do |file|
BScript.new.apiloc_fasta(file) do |code|
"#{code.species.name.gsub(' ','_')}|#{code.string_id}"
end
end
Dir.chdir('/tmp') do
`formatdb -i apiloc_m8_ready.protein.fa`
%w(
apiloc_m8_ready.protein.fa
apiloc_m8_ready.protein.fa.phr
apiloc_m8_ready.protein.fa.pin
apiloc_m8_ready.protein.fa.psq
).each do |filename|
`mv #{filename} /blastdb`
end
end
end
# Taking all the falciparum proteins, where are the orthologues localised?
def orthomcl_localisation_annotations
CodingRegion.falciparum.all(
:joins => :expressed_localisations,
:limit => 20,
:select => 'distinct(coding_regions.*)'
).each do |code|
begin
falciparum_orthomcl_gene = code.single_orthomcl
puts [
code.string_id,
code.annotation.annotation,
falciparum_orthomcl_gene.official_group.orthomcl_genes.code('scer').all.collect { |sceg|
sceg.single_code.coding_region_go_terms.useful.all.reach.go_term.term.join(', ')
}.join(' | ')
].join("\t")
rescue CodingRegion::UnexpectedOrthomclGeneCount => e
$stderr.puts "Couldn't map #{code.string_id}/#{code.annotation.annotation} to orthomcl"
end
end
end
def upload_apiloc_relevant_go_terms
require 'ensembl'
# create the species and dummy scaffolds, genes, etc.
# yeast should already be uploaded
# yeast = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::YEAST_NAME, 'scer')
# human = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::HUMAN_NAME, 'hsap')
# mouse = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::MOUSE_NAME, 'mmus')
# mouse = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::ELEGANS_NAME, 'cele')
gene = Gene.new.create_dummy('apiloc conservation dummy gene for multiple species')
ensembl_uniprot_db = ExternalDb.find_by_db_name("Uniprot/SWISSPROT")
# for each human, mouse, yeast gene in a group with a localised apicomplexan
# gene, get the go terms from Ensembl so we can start to compare them later
# OrthomclGroup.all(
ogroup = OrthomclGroup.first(
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
)
# ).each do |ogroup|
ogroup.orthomcl_genes.codes(%w(hsap mmus scer cele)).all.each do |orthomcl_gene|
ensembl = OrthomclGene.new.official_split[1]
# fetch the uniprot ID from Ensembl
ensp = Ensembl::Core::Translation.find_by_stable_id(ensembl)
unless ensp
$stderr.puts "Couldn't find ensembl gene to match #{ensembl}, skipping"
next
end
# extract the uniprot id
uniprots = ensp.xrefs.select{|x| ensembl_uniprot_db.id == x.id}.collect{|x| x.db_primaryacc}.uniq
uniprot = uniprots[0]
unless uniprots.length == 1
$stderr.puts "Unexpected number of uniprot IDs found: #{uniprots.inspect}"
next if uniprots.empty?
end
# wget the uniprot txt file entry
filename = "/tmp/uniprot#{uniprot}.txt"
`wget http://www.uniprot.org/#{uniprot}.txt -O #{filename}`
# parse the uniprot entry
bio = Bio::Uniprot.new(File.open(filename).read)
p bio
# create the gene
# find the GO term that I've annnotated, otherwise add a new one, which
# will need to be filled in with the term
# add the relevant GO term and evidence code
# end
end
end
# not realyl working - too slow for me.
def map_using_uniprot_mapper
# require 'uni_prot_i_d_mapping_selected'
# mapper = Bio::UniProtIDMappingSelected.new
# ogroup =
OrthomclGroup.all(
# :limit => 5,
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
# )
).each do |ogroup|
ogroup.orthomcl_genes.codes(%w(mmus)).all.each do |orthomcl_gene|
ensembl = orthomcl_gene.official_split[1]
puts ensembl
# mapped = mapper.find_by_ensembl_protein_id(ensembl)
# p mapped
end
end
end
def generate_biomart_to_go_input
{
'hsap' => 'human',
'mmus' => 'mouse',
'atha' => 'arabidopsis',
'dmel' => 'fly',
'cele' => 'worm',
'scer' => 'yeast',
'crei' => 'chlamydomonas',
'tthe' => 'tetrahymena',
'rnor' => 'rat',
'spom' => 'pombe',
}.each do |code, name|
$stderr.puts name
out = File.open("#{species_orthologue_folder}/#{name}.txt",'w')
OrthomclGroup.all(
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
).uniq.each do |ogroup|
ogroup.orthomcl_genes.code(code).all.each do |orthomcl_gene|
ensembl = orthomcl_gene.official_split[1]
out.puts ensembl
end
end
end
end
# \
def species_orthologue_folder; "#{PHD_DIR}/apiloc/species_orthologues3"; end
# all the methods required to get from the biomart and uniprot
# id to GO term mappings to a spreadsheet that can be inspected for the
# localisations required.
def apiloc_gathered_output_to_generated_spreadsheet_for_inspection
upload_apiloc_ensembl_go_terms
upload_apiloc_uniprot_go_terms
upload_apiloc_uniprot_mappings
upload_apiloc_flybase_mappings
# for some reason a single refseq sequence can be linked to multiple uniprot sequences,
# which is stupid but something I'll have to live with
OrthomclGene.new.link_orthomcl_and_coding_regions(%w(atha), :accept_multiple_coding_regions=>true)
OrthomclGene.new.link_orthomcl_and_coding_regions(%w(hsap mmus dmel cele))
generate_apiloc_orthomcl_groups_for_inspection
end
def upload_apiloc_ensembl_go_terms
{
'human' => Species::HUMAN_NAME,
'mouse' => Species::MOUSE_NAME,
'rat' => Species::RAT_NAME,
}.each do |this_name, proper_name|
$stderr.puts this_name
FasterCSV.foreach("#{species_orthologue_folder}/biomart_results/#{this_name}.csv",
:col_sep => "\t",
:headers => true
) do |row|
protein_name = row['Ensembl Protein ID']
go_id = row['GO Term Accession (cc)']
evidence = row['GO Term Evidence Code (cc)']
next if go_id.nil? #ignore empty columns
code = CodingRegion.find_or_create_dummy(protein_name, proper_name)
go = GoTerm.find_by_go_identifier_or_alternate go_id
unless go
$stderr.puts "Couldn't find GO id #{go_id}"
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
) or raise
end
end
end
def upload_apiloc_uniprot_go_terms
{
'arabidopsis' => Species::ARABIDOPSIS_NAME,
'worm' => Species::ELEGANS_NAME,
'fly' => Species::DROSOPHILA_NAME
}.each do |this_name, proper_name|
File.open("#{species_orthologue_folder}/uniprot_results/#{this_name}.uniprot.txt").read.split("//\n").each do |uniprot|
u = Bio::UniProt.new(uniprot)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.find_or_create_dummy(protein_name, proper_name)
protein_alternate_names = axes[1..(axes.length-1)].no_nils
protein_alternate_names.each do |name|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, name
) or raise
end
goes = u.dr["GO"]
next if goes.nil? #no go terms associated
goes.each do |go_array|
go_id = go_array[0]
evidence_almost = go_array[2]
evidence = nil
if (matches = evidence_almost.match(/^([A-Z]{2,3})\:.*$/))
evidence = matches[1]
end
# error checking
if evidence.nil?
raise Exception, "No evidence code found in #{go_array.inspect} from #{evidence_almost}!"
end
go = GoTerm.find_by_go_identifier_or_alternate go_id
unless go
$stderr.puts "Couldn't find GO id #{go_id}"
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
).save!
end
end
end
end
def upload_apiloc_uniprot_mappings
{
'arabidopsis' => Species::ARABIDOPSIS_NAME,
'worm' => Species::ELEGANS_NAME,
'fly' => Species::DROSOPHILA_NAME
}.each do |this_name, proper_name|
FasterCSV.foreach("#{species_orthologue_folder}/uniprot_results/#{this_name}.mapping.tab",
:col_sep => "\t", :headers => true
) do |row|
code = CodingRegion.fs(row[1], proper_name) or raise Exception, "Don't know #{row[1]}"
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, row[0]
)
end
end
end
# Drosophila won't match well to orthomcl because orthomcl uses protein IDs whereas
# uniprot uses gene ids.
# This file was created by using the (useful and working) ID converter in flybase
def upload_apiloc_flybase_mappings
FasterCSV.foreach("#{species_orthologue_folder}/uniprot_results/flybase.mapping.tab",
:col_sep => "\t"
) do |row|
next if row[1] == 'unknown ID' #ignore rubbish
gene_id = row[3]
next if gene_id == '-' # not sure what this is, but I'll ignore for the moment
protein_id = row[1]
code = CodingRegion.fs(gene_id, Species::DROSOPHILA_NAME)
if code.nil?
$stderr.puts "Couldn't find gene #{gene_id}, skipping"
next
end
CodingRegionAlternateStringId.find_or_create_by_name_and_coding_region_id(
protein_id, code.id
)
end
end
# Return a list of orthomcl groups that fulfil these conditions:
# 1. It has a localised apicomplexan gene in it, as recorded by ApiLoc
# 2. It has a localised non-apicomplexan gene in it, as recorded by GO CC IDA annotation
def apiloc_orthomcl_groups_of_interest
OrthomclGroup.all(
:select => 'distinct(orthomcl_groups.*)',
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => [
:expressed_localisations
]}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME},
}
).select do |ogroup|
# only select those groups that have go terms annotated in non-apicomplexan species
OrthomclGroup.count(
:joins => {:coding_regions =>[
:go_terms
]},
:conditions => ['orthomcl_groups.id = ? and coding_region_go_terms.evidence_code = ? and go_terms.partition = ?',
ogroup.id, 'IDA', GoTerm::CELLULAR_COMPONENT
]
) > 0
end
end
def generate_apiloc_orthomcl_groups_for_inspection
interestings = %w(hsap mmus scer drer osat crei atha dmel cele)
apiloc_orthomcl_groups_of_interest.each do |ogroup|
paragraph = []
ogroup.orthomcl_genes.all.each do |orthomcl_gene|
four = orthomcl_gene.official_split[0]
# Possible to have many coding regions now - using all of them just together, though there is
# probably one good one and other useless and IEA if anything annotated. Actually
# not necesssarilly, due to strain problems.
#
# Only print out one entry for each OrthoMCL gene, to condense things
# but that line should have all the (uniq) go terms associated
orthomcl_gene.coding_regions.uniq.each do |code|
if OrthomclGene::OFFICIAL_ORTHOMCL_APICOMPLEXAN_CODES.include?(four)
paragraph.push [
orthomcl_gene.orthomcl_name,
code.nil? ? nil : code.annotation.annotation,
code.nil? ? nil : code.localisation_english,
].join("\t")
elsif interestings.include?(four)
unless code.nil?
goes = code.coding_region_go_terms.cc.useful.all
unless goes.empty?
worthwhile = true
orig = orthomcl_gene.orthomcl_name
goes.each do |code_go|
paragraph.push [
orig,
code_go.go_term.go_identifier,
code_go.go_term.term,
code_go.evidence_code
].join("\t")
orig = ''
end
end
end
end
end
end
puts paragraph.uniq.join("\n")
puts
end
end
def apiloc_go_localisation_conservation_groups_to_database
# FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues2/breakdown.manual.xls",
# FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues4/breakdown2.manual.csv",
FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues4/breakdown3.manual.csv",
:col_sep => "\t"
) do |row|
# ignore lines that have nothing first or are the header line
next unless row[0] and row[0].length > 0 and row[3]
single = row[0]
eg = row[1]
full = OrthomclLocalisationConservations.single_letter_to_full_name(single)
raise Exception, "Couldn't understand single letter '#{single}'" unless full
# find the orthomcl group by using the gene in the first line (the example)
ogene = OrthomclGene.official.find_by_orthomcl_name(eg)
raise Exception, "Coun't find orthomcl gene '#{eg}' as expected" if ogene.nil?
# create the record
OrthomclLocalisationConservations.find_or_create_by_orthomcl_group_id_and_conservation(
ogene.official_group.id, full
).id
end
end
def yes_vs_no_human_examples
OrthomclLocalisationConservations.all.collect do |l|
max_human = OrthomclGene.code('hsap').all(
:joins =>[
[:coding_regions => :go_terms],
:orthomcl_gene_orthomcl_group_orthomcl_runs
],
:conditions => {:orthomcl_gene_orthomcl_group_orthomcl_runs => {:orthomcl_group_id => l.orthomcl_group_id}}
).max do |h1, h2|
counter = lambda {|h|
CodingRegionGoTerm.cc.useful.count(
:joins => {:coding_region => :orthomcl_genes},
:conditions => {:orthomcl_genes => {:id => h.id}}
)
}
counter.call(h1) <=> counter.call(h2)
end
next unless max_human
puts [
l.conservation,
max_human.coding_regions.first.names.sort
].flatten.join("\t")
end
end
def upload_uniprot_identifiers_for_ensembl_ids
FasterCSV.foreach("#{species_orthologue_folder}/gostat/human_ensembl_uniprot_ids.txt",
:col_sep => "\t", :headers => true
) do |row|
ens = row['Ensembl Protein ID']
uni = row['UniProt/SwissProt Accession']
raise unless ens
next unless uni
code = CodingRegion.f(ens)
raise unless code
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, uni, CodingRegionAlternateStringId::UNIPROT_SOURCE_NAME
) or raise
end
end
def download_uniprot_data
UNIPROT_SPECIES_ID_NAME_HASH.each do |taxon_id, species_name|
# Download the data into the expected name
Dir.chdir("#{DATA_DIR}/UniProt/knowledgebase") do
unless File.exists?("#{species_name}.gz")
cmd = "wget -O '#{species_name}.gz' 'http://www.uniprot.org/uniprot/?query=taxonomy%3a#{taxon_id}&compress=yes&format=txt'"
p cmd
`#{cmd}`
end
end
end
end
# Delete all the data associated with the uniprot species so
# I can start again.
def destroy_all_uniprot_species
APILOC_UNIPROT_SPECIES_NAMES.each do |species_name|
s = Species.find_by_name(species_name)
puts "#{species_name}..."
s.delete unless s.nil?
end
end
UNIPROT_SPECIES_ID_NAME_HASH = {
3702 => Species::ARABIDOPSIS_NAME,
9606 => Species::HUMAN_NAME,
10090 => Species::MOUSE_NAME,
4932 => Species::YEAST_NAME,
4896 => Species::POMBE_NAME,
10116 => Species::RAT_NAME,
7227 => Species::DROSOPHILA_NAME,
6239 => Species::ELEGANS_NAME,
44689 => Species::DICTYOSTELIUM_DISCOIDEUM_NAME,
7955 => Species::DANIO_RERIO_NAME,
5691 => Species::TRYPANOSOMA_BRUCEI_NAME,
}
APILOC_UNIPROT_SPECIES_NAMES = UNIPROT_SPECIES_ID_NAME_HASH.values
# Given that the species of interest are already downloaded from uniprot
# (using download_uniprot_data for instance), upload this data
# to the database, including GO terms. Other things need to be run afterwards
# to be able to link to OrthoMCL.
#
# This method could be more DRY - UniProtIterator could replace
# much of the code here. But eh for the moment.
def uniprot_to_database(species_names=nil)
species_names ||= APILOC_UNIPROT_SPECIES_NAMES
species_names = [species_names] unless species_names.kind_of?(Array)
species_names.each do |species_name|
count = 0
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
# Don't use a static name because if two instance are running clashes occur.
Tempfile.open("#{species_name}_reduced") do |tempfile|
filename = tempfile.path
cmd = "zcat '#{complete_filename}' |egrep '^(AC|DR GO|//)' >'#{filename}'"
`#{cmd}`
dummy_gene = Gene.find_or_create_dummy(species_name)
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
File.foreach(filename) do |line|
if line == "//\n"
count += 1
progress.inc
#current uniprot is finished - upload it
#puts current_uniprot_string
u = Bio::UniProt.new(current_uniprot_string)
# Upload the UniProt name as the
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.find_or_create_by_gene_id_and_string_id(
dummy_gene.id,
protein_name
)
raise unless code.save!
protein_alternate_names = axes.no_nils
protein_alternate_names.each do |name|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, name, 'UniProt'
) or raise
end
goes = u.dr["GO"]
goes ||= [] #no go terms associated - best to still make it to the end of the method, because it is too complex here for such hackery
goes.each do |go_array|
go_id = go_array[0]
evidence_almost = go_array[2]
evidence = nil
if (matches = evidence_almost.match(/^([A-Z]{2,3})\:.*$/))
evidence = matches[1]
end
# error checking
if evidence.nil?
raise Exception, "No evidence code found in #{go_array.inspect} from #{evidence_almost}!"
end
go = GoTerm.find_by_go_identifier_or_alternate go_id
if go
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
).save!
else
$stderr.puts "Couldn't find GO id #{go_id}"
end
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
progress.finish
end #tempfile
$stderr.puts "Uploaded #{count} from #{species_name}, now there is #{CodingRegion.s(species_name).count} coding regions in #{species_name}."
end
#uploadin the last one not required because the last line is always
# '//' already - making it easy.
end
def tetrahymena_orf_names_to_database
species_name = Species::TETRAHYMENA_NAME
current_uniprot_string = ''
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{Species::TETRAHYMENA_NAME}.gz"
progress = ProgressBar.new(Species::TETRAHYMENA_NAME, `gunzip -c '#{filename}' |grep '^//' |wc -l`.to_i)
Zlib::GzipReader.open(filename).each do |line|
if line == "//\n"
progress.inc
#current uniprot is finished - upload it
u = Bio::UniProt.new(current_uniprot_string)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.fs(protein_name, species_name)
raise unless code
u.gn[0][:orfs].each do |orfname|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, orfname
)
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
end
# upload aliases so that orthomcl entries can be linked to uniprot ones.
# have to run tetrahymena_orf_names_to_database first though.
def tetrahymena_gene_aliases_to_database
bads = 0
goods = 0
filename = "#{DATA_DIR}/Tetrahymena thermophila/genome/TGD/Tt_ID_Mapping_File.txt"
progress = ProgressBar.new(Species::TETRAHYMENA_NAME, `wc -l '#{filename}'`.to_i)
FasterCSV.foreach(filename,
:col_sep => "\t"
) do |row|
progress.inc
uniprot = row[0]
orthomcl = row[1]
code = CodingRegion.fs(uniprot, Species::TETRAHYMENA_NAME)
if code.nil?
bads +=1
else
goods += 1
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_source_and_name(
code.id, 'TGD', orthomcl
)
raise unless a
end
end
progress.finish
$stderr.puts "Found #{goods}, failed #{bads}"
end
def yeastgenome_ids_to_database
species_name = Species::YEAST_NAME
current_uniprot_string = ''
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
progress = ProgressBar.new(species_name, `gunzip -c '#{filename}' |grep '^//' |wc -l`.to_i)
Zlib::GzipReader.open(filename).each do |line|
if line == "//\n"
progress.inc
#current uniprot is finished - upload it
u = Bio::UniProt.new(current_uniprot_string)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.fs(protein_name, species_name)
if code
unless u.gn.empty?
u.gn[0][:loci].each do |orfname|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, orfname
)
end
end
else
$stderr.puts "Unable to find protein `#{protein_name}'"
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
progress.finish
end
def elegans_wormbase_identifiers
species_name = Species::ELEGANS_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|DR WormBase|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], Species::ELEGANS_NAME)
raise unless code
# DR WormBase; WBGene00000467; cep-1.
ides = u.dr['WormBase']
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'WormBase'
)
raise unless a.save!
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm #{filename}`
end
def dicystelium_names_to_database
species_name = Species::DICTYOSTELIUM_DISCOIDEUM_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|GN|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
skipped_count = 0
skipped_count2 = 0
added_count = 0
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], species_name)
raise unless code
# GN Name=myoJ; Synonyms=myo5B; ORFNames=DDB_G0272112;
unless u.gn.empty? # for some reason using u.gn when there is nothing there returns an array, not a hash. Annoying.
ides = []
u.gn.each do |g|
ides.push g[:name] unless g[:name].nil?
ides.push g[:orfs] unless g[:orfs].nil?
end
ides = ides.flatten.no_nils
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'UniProtName'
)
raise unless a.save!
end
if ides.empty?
skipped_count2 += 1
else
added_count += 1
end
else
skipped_count += 1
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm '#{filename}'`
progress.finish
$stderr.puts "Added names for #{added_count}, skipped #{skipped_count} and #{skipped_count2}"
end
def tbrucei_names_to_database
species_name = Species::TRYPANOSOMA_BRUCEI_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|GN|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
skipped_count = 0
skipped_count2 = 0
added_count = 0
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], species_name)
raise unless code
# GN Name=myoJ; Synonyms=myo5B; ORFNames=DDB_G0272112;
unless u.gn.empty? # for some reason using u.gn when there is nothing there returns an array, not a hash. Annoying.
ides = []
u.gn.each do |g|
#ides.push g[:name] unless g[:name].nil?
ides.push g[:orfs] unless g[:orfs].nil?
end
ides = ides.flatten.no_nils
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'UniProtName'
)
raise unless a.save!
end
if ides.empty?
skipped_count2 += 1
else
added_count += 1
end
else
skipped_count += 1
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm '#{filename}'`
progress.finish
$stderr.puts "Added names for #{added_count}, skipped #{skipped_count} and #{skipped_count2}"
end
def uniprot_ensembl_databases
[
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::DANIO_RERIO_NAME,
Species::DROSOPHILA_NAME,
Species::RAT_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR Ensembl') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
ens = u.dr['Ensembl']
ens ||= []
ens.flatten.each do |e|
if e.match(/^ENS/) or (species_name == Species::DROSOPHILA_NAME and e.match(/^FBpp/))
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, e, 'Ensembl'
)
end
end
end
end
end
def uniprot_refseq_databases
[
Species::ARABIDOPSIS_NAME,
Species::RICE_NAME,
Species::POMBE_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR RefSeq') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
refseqs = u.dr['RefSeq']
refseqs ||= []
refseqs = refseqs.collect{|r| r[0]}
refseqs.each do |r|
r = r.gsub(/\..*/,'')
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, r, 'Refseq'
)
end
end
end
end
def uniprot_gene_names
[
Species::TBRUCEI_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'GN ORFNames=') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
gene_names = []
u.gn.each do |gn|
gn[:orfs].each do |orf|
gene_names.push orf
end
end
gene_names.each do |g|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, g, 'UniProtGeneName'
)
end
end
end
end
def uniprot_eupathdb_databases
[
Species::TBRUCEI_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR EuPathDB') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
# p u.dr
next if u.dr.empty?
if (u.dr['EuPathDB'].nil?); $stderr.puts "Incorrectly parsed line? #{u.dr.inspect}"; break; end
refseqs = u.dr['EuPathDB'].flatten
refseqs = refseqs.collect{|r| r.gsub(/^EupathDB:/,'')}
refseqs.each do |r|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, r, 'EuPathDB'
)
end
end
end
end
def chlamydomonas_link_to_orthomcl_ids
species_name = Species::CHLAMYDOMONAS_NAME
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'GN') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
gn = u.gn
unless gn.empty?
orfs = gn.collect{|g| g[:orfs]}
unless orfs.empty?
orfs.flatten.each do |orf|
o = 'CHLREDRAFT_168484' if orf == 'CHLRE_168484' #manual fix
raise Exception, "Unexpected orf: #{orf}" unless orf.match(/^CHLREDRAFT_/) or orf.match(/^CHLRE_/)
o = orf.gsub(/^CHLREDRAFT_/, '')
o = o.gsub(/^CHLRE_/,'')
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, o, 'JGI'
)
end
end
end
end
end
# OrthoMCL gene IDs for Drosophila are encoded in the 'DR EnsemblMetazoa;' lines,
# such as
# DR EnsemblMetazoa; FBtr0075201; FBpp0074964; FBgn0036740.
# (and in particular the FBpp ones). Upload these pp ones as synonyms
def drosophila_ensembl_metazoa
addeds = 0
non_addeds = 0
terms = 0
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{Species::DROSOPHILA_NAME}.gz", 'DR EnsemblMetazoa;') do |u|
ensembl_metazoas = u.dr['EnsemblMetazoa']
if ensembl_metazoas.nil?
non_addeds += 1
else
added = false
code = CodingRegion.fs(u.ac[0], Species::DROSOPHILA_NAME) or raise
ensembl_metazoas.flatten.select{|s| s.match /^FBpp/}.each do |e|
added = true
terms+= 1
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, e, 'EnsemblMetazoa'
)
end
addeds += 1 if added
end
end
$stderr.puts "Uploaded #{terms} IDs for #{addeds} different genes, missed #{non_addeds}"
end
def uniprot_go_annotation_species_stats
APILOC_UNIPROT_SPECIES_NAMES.each do |species_name|
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
if File.exists?(filename)
puts [
species_name,
`zcat '#{filename}'|grep ' GO' |grep -v IEA |grep -v ISS |grep 'C\:' |wc -l`
].join("\t")
else
puts "Couldn't find #{species_name} uniprot file"
end
end
end
# Create a spreadsheet that encapsulates all of the localisation
# information from apiloc, so that large scale analysis is simpler
def create_apiloc_spreadsheet
nil_char = nil #because I'm not sure how the joins will work
microscopy_method_names = LocalisationAnnotation::POPULAR_MICROSCOPY_TYPE_NAME_SCOPE.keys.sort.reverse
small_split_string = '#' #use when only 1 delimiter within a cell is needed
big_split_string = ';' #use when 2 delimiters in one cell are needed
orthomcl_split_char = '_'
# Headings
puts [
'Species',
'Gene ID',
'Abbreviations',
'Official Gene Annotation',
'Localisation Summary',
'Cellular Localisation',
'Total Number of Cellular Localisations',
'OrthoMCL Group Identifier',
'Apicomplexan Orthologues with Recorded Localisation',
'Apicomplexan Orthologues without Recorded Localisation',
'Non-Apicomplexan Orthologues with IDA GO Cellular Component Annotation',
'Consensus Localisation of Orthology Group',
'PubMed IDs of Publications with Localisation',
microscopy_method_names,
'All Localisation Methods Used',
'Strains',
'Gene Model Mapping Comments',
'Quotes'
].flatten.join("\t")
codes = CodingRegion.all(:joins => :expressed_localisations).uniq
progress = ProgressBar.new('apiloc_spreadsheet', codes.length)
codes.each do |code|
$stderr.puts code.string_id
progress.inc
to_print = []
organellar_locs = []
# species
to_print.push code.species.name
#EuPath or GenBank ID
to_print.push code.string_id
#common names
to_print.push code.literature_defined_names.join(small_split_string)
#annotation
a1 = code.annotation
to_print.push(a1.nil? ? nil_char : a1.annotation)
#full localisation description
to_print.push code.localisation_english
#'organellar' localisation (one per record,
#if there is more repeat the whole record)
#this might more sensibly be GO-oriented, but eh for the moment
organellar_locs = code.topsa.uniq
to_print.push nil_char
# number of organellar localisations (ie number of records for this gene)
to_print.push organellar_locs.length
# OrthoMCL-related stuffs
ogene = code.single_orthomcl!
ogroup = (ogene.nil? ? nil : ogene.official_group)
if ogroup.nil?
5.times do
to_print.push nil_char
end
else
#orthomcl group
to_print.push ogroup.orthomcl_name
#localised apicomplexans in orthomcl group
locked = CodingRegion.all(
:joins => [
{:orthomcl_genes => :orthomcl_groups},
:expression_contexts
],
:conditions => [
'orthomcl_groups.id = ? and coding_regions.id != ?',
ogroup.id, code.id
],
:select => 'distinct(coding_regions.*)'
)
to_print.push "\"#{locked.collect{|a|
[
a.string_id,
a.annotation.annotation,
a.localisation_english
].join(small_split_string)
}.join(big_split_string)}\""
#unlocalised apicomplexans in orthomcl group
to_print.push ogroup.orthomcl_genes.apicomplexan.all.reject {|a|
a.coding_regions.select { |c|
c.expressed_localisations.count > 0
}.length > 0
}.reach.orthomcl_name.join(', ').gsub('|',orthomcl_split_char)
#non-apicomplexans with useful GO annotations in orthomcl group
#species, orthomcl id, uniprot id(s), go annotations
go_codes = CodingRegion.go_cc_usefully_termed.not_apicomplexan.all(
:joins => {:orthomcl_genes => :orthomcl_groups},
:conditions =>
["orthomcl_groups.id = ?", ogroup.id],
:select => 'distinct(coding_regions.*)',
:order => 'coding_regions.id'
)
to_print.push "\"#{go_codes.collect { |g|
[
g.species.name,
g.orthomcl_genes.reach.orthomcl_name.join(', ').gsub('|',orthomcl_split_char),
g.names.join(', '),
g.coding_region_go_terms.useful.cc.all.reach.go_term.term.join(', ')
].join(small_split_string)
}.join(big_split_string)}\""
# consensus of orthology group.
to_print.push 'consensus - TODO'
end
contexts = code.expression_contexts
annotations = code.localisation_annotations
# pubmed ids that localise the gene
to_print.push contexts.reach.publication.definition.no_nils.uniq.join(small_split_string)
# Categorise the microscopy methods
microscopy_method_names.each do |name|
scopes =
LocalisationAnnotation::POPULAR_MICROSCOPY_TYPE_NAME_SCOPE[name]
done = LocalisationAnnotation
scopes.each do |scope|
done = done.send(scope)
end
if done.find_by_coding_region_id(code.id)
to_print.push 'yes'
else
to_print.push 'no'
end
end
# localisation methods used (assume different methods never give different results for the same gene)
to_print.push annotations.reach.microscopy_method.no_nils.uniq.join(small_split_string)
# strains
to_print.push annotations.reach.strain.no_nils.uniq.join(small_split_string)
# mapping comments
to_print.push annotations.reach.gene_mapping_comments.no_nils.uniq.join(small_split_string).gsub(/\"/,'')
# quotes
# have to escape quote characters otherwise I get rows joined together
to_print.push "\"#{annotations.reach.quote.uniq.join(small_split_string).gsub(/\"/,'\"')}\""
if organellar_locs.empty?
puts to_print.join("\t")
else
organellar_locs.each do |o|
to_print[5] = o.name
puts to_print.join("\t")
end
end
end
progress.finish
end
# The big GOA file has not been 'redundancy reduced', a process which is buggy,
# like the species level ones. Here I upload the species that I'm interested
# in using that big file, not the small one
def goa_all_species_to_database
require 'gene_association'
UNIPROT_SPECIES_ID_NAME_HASH.each do |species_id, species_name|
bad_codes_count = 0
bad_go_count = 0
good_count = 0
Bio::GzipAndFilterGeneAssociation.foreach(
"#{DATA_DIR}/GOA/gene_association.goa_uniprot.gz",
"\ttaxon:#{species_id}\t"
) do |go|
name = go.primary_id
code = CodingRegion.fs(name, species_name)
unless code
$stderr.puts "Couldn't find coding region #{name}"
bad_codes_count += 1
next
end
go_term = GoTerm.find_by_go_identifier(go.go_identifier)
unless go_term
$stderr.puts "Couldn't find coding region #{go.go_identifier}"
bad_go_count += 1
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id(
code.id, go_term.id
)
good_count += 1
end
$stderr.puts "#{good_count} all good, failed to find #{bad_codes_count} coding regions and #{bad_go_count} go terms"
end
end
def how_many_genes_have_dual_localisation?
dual_loc_folder = "#{PHD_DIR}/apiloc/experiments/dual_localisations"
raise unless File.exists?(dual_loc_folder)
file = File.open(File.join(dual_loc_folder, 'duals.csv'),'w')
Species.apicomplexan.each do |species|
species_name = species.name
codes = CodingRegion.s(species_name).all(
:joins => :expressed_localisations,
:select => 'distinct(coding_regions.*)'
)
counts = []
nuc_aware_counts = []
codes_per_count = []
# write the results to the species-specific file
codes.each do |code|
next if code.string_id == CodingRegion::UNANNOTATED_CODING_REGIONS_DUMMY_GENE_NAME
tops = TopLevelLocalisation.positive.all(
:joins => {:apiloc_localisations => :expressed_coding_regions},
:conditions => ['coding_regions.id = ?',code.id],
:select => 'distinct(top_level_localisations.*)'
)
count = tops.length
counts[count] ||= 0
counts[count] += 1
codes_per_count[count] ||= []
codes_per_count[count].push code.string_id
# nucleus and cytoplasm as a single localisation if both are included
names = tops.reach.name.retract
if names.include?('nucleus') and names.include?('cytoplasm')
count -= 1
end
# Write out the coding regions to a file
# gather the falciparum data
og = code.single_orthomcl!
fals = []
if og and og.official_group
fals = og.official_group.orthomcl_genes.code('pfal').all.collect do |ogene|
ogene.single_code
end
end
file.puts [
code.species.name,
code.string_id,
code.names,
count,
code.compartments.join('|'),
fals.reach.compartments.join('|'),
fals.reach.localisation_english.join('|')
].join("\t")
nuc_aware_counts[count] ||= 0
nuc_aware_counts[count] += 1
end
puts species_name
# p codes_per_count
p counts
p nuc_aware_counts
end
file.close
end
def falciparum_test_prediction_by_orthology_to_non_apicomplexans
bins = {}
puts [
'PlasmoDB ID',
'Names',
'Compartments',
'Prediction',
'Comparison',
'Full P. falciparum Localisation Information'
].join("\t")
CodingRegion.localised.falciparum.all(
:select => 'distinct(coding_regions.*)'
).each do |code|
# Unassigned genes just cause problems for orthomcl
next if code.string_id == CodingRegion::NO_MATCHING_GENE_MODEL
# When there is more than 1 P. falciparum protein in the group, then ignore this
group = code.single_orthomcl.official_group
if group.nil?
$stderr.puts "#{code.names.join(', ')} has no OrthoMCL group, ignoring."
next
end
num = group.orthomcl_genes.code(code.species.orthomcl_three_letter).count
if num != 1
$stderr.puts "#{code.names.join(', ')} has #{num} genes in its localisation group, ignoring"
next
end
pred = code.apicomplexan_localisation_prediction_by_most_common_localisation
next if pred.nil?
goodness = code.compare_localisation_to_list(pred)
puts [
code.string_id,
code.names.join('|'),
code.compartments.join('|'),
pred,
goodness,
code.localisation_english,
].join("\t")
bins[goodness] ||= 0
bins[goodness] += 1
end
# Print the results of the analysis
p bins
end
# Looking through all the genes in the database, cache of the compartments so that things are easier to compare
def cache_all_compartments
# Cache all apicomplexan compartments
codes = CodingRegion.apicomplexan.all
progress = ProgressBar.new('apicomplexans', codes.length)
codes.each do |code|
progress.inc
comps = code.compartments
comps.each do |comp|
CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
end
end
progress.finish
# Cache all non-apicomplexan compartments
codes = CodingRegion.go_cc_usefully_termed.all(:select => 'distinct(coding_regions.*)')
progress = ProgressBar.new('eukaryotes', codes.length)
codes.each do |code|
p code
progress.inc
comps = code.compartments
comps.each do |comp|
p comp
g = CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
g.save!
p g
end
end
progress.finish
end
# How conserved is localisation between the three branches of life with significant
# data known about them?
# This method FAILS due to memory and compute time issues - I ended up
# essentially abandoning rails for this effort.
def conservation_of_eukaryotic_sub_cellular_localisation(debug = false)
groups_to_counts = {}
# For each orthomcl group that has a connection to coding region, and
# that coding region has a cached compartment
groups = OrthomclGroup.all(
# :select => 'distinct(orthomcl_groups.*)',
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}}
# :limit => 10,
# :include => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}}
)
# ProgressBar on stdout, because debug is on stderr
progress = ProgressBar.new('conservation', groups.length, STDOUT)
groups.each do |ortho_group|
progress.inc
$stderr.puts "---------------------------------------------" if debug
# For each non-Apicomplexan gene with localisation information in this group,
# assign it compartments.
# For each apicomplexan, get the compartments from apiloc
# This is nicely abstracted already!
# However, a single orthomcl gene can have multiple CodingRegion's associated.
# Therefore each has to be analysed as an array, frustratingly.
# reject the orthomcl gene if it has no coding regions associated with it.
orthomcl_genes = OrthomclGene.all(
:joins => [:coding_regions, :orthomcl_groups],
:conditions => {:orthomcl_groups => {:id => ortho_group.id}}
)
# ortho_group.orthomcl_genes.uniq.reject do |s|
# # reject the orthomcl gene if it has no coding regions associated with it.
# s.coding_regions.empty?
# end
# Setup data structures
kingdom_orthomcls = {} #array of kingdoms to orthomcl genes
orthomcl_locs = {} #array of orthomcl_genes to localisations, cached for convenience and speed
orthomcl_genes.each do |orthomcl_gene|
# Localisations from all coding regions associated with an orthomcl gene are used.
locs = CodingRegionCompartmentCache.all(
:joins => {:coding_region => :orthomcl_genes},
:conditions => {:orthomcl_genes => {:id => orthomcl_gene.id}}
).reach.compartment.uniq
# locs = orthomcl_gene.coding_regions.reach.cached_compartments.flatten.uniq
next if locs.empty? #ignore unlocalised genes completely from hereafter
name = orthomcl_gene.orthomcl_name
orthomcl_locs[name] = locs
# no one orthomcl gene will have coding regions from 2 different species,
# so using the first element of the array is fine
species = orthomcl_gene.coding_regions[0].species
kingdom_orthomcls[species.kingdom] ||= []
kingdom_orthomcls[species.kingdom].push name
end
$stderr.puts kingdom_orthomcls.inspect if debug
$stderr.puts orthomcl_locs.inspect if debug
$stderr.puts "Kingdoms: #{kingdom_orthomcls.to_a.collect{|k| k[0]}.sort.join(', ')}" if debug
# within the one kingdom, do they agree?
kingdom_orthomcls.each do |kingdom, orthomcls|
# If there is only a single coding region, then don't record
number_in_kingdom_localised = orthomcls.length
if number_in_kingdom_localised < 2
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdom}, skipping (#{orthomcls.join(', ')})" if debug
next
end
# convert orthomcl genes to localisation arrays
locs = orthomcls.collect {|orthomcl|
orthomcl_locs[orthomcl]
}
# OK, so now we are on. Let's do this
agreement = OntologyComparison.new.agreement_of_group(locs)
index = [kingdom]
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}, #{orthomcls.join(' ')}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within two kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_matrix do |array1, array2|
kingdom1 = array1[0]
kingdom2 = array2[0]
orthomcl_array1 = array1[1]
orthomcl_array2 = array2[1]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdoms.join(' ')}, skipping"
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group(locs_for_all)
index = [kingdom1, kingdom2].sort
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within three kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_3d_matrix do |a1, a2, a3|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
kingdoms = [kingdom1, kingdom2, kingdom3]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdoms.join(' ')}, skipping" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
end
progress.finish
# print out the counts for each group of localisations
p groups_to_counts
end
# An attempt to make conservation_of_eukaryotic_sub_cellular_localisation faster
# as well as using less memory. In the end the easiest way was to stay away from Rails
# almost completely, and just use find_by_sql for the big database dump to a csv file,
# and then parse that csv file one line at a time.
def conservation_of_eukaryotic_sub_cellular_localisation_slimmer
# Cache all of the kingdom information as orthomcl_split to kingdom
orthomcl_abbreviation_to_kingdom = {}
Species.all(:conditions => 'orthomcl_three_letter is not null').each do |sp|
orthomcl_abbreviation_to_kingdom[sp.orthomcl_three_letter] = Species::FOUR_WAY_NAME_TO_KINGDOM[sp.name]
end
# Copy the data out of the database to a csv file. There shouldn't be any duplicates
tempfile = File.open('/tmp/eukaryotic_conservation','w')
# Tempfile.open('eukaryotic_conservation') do |tempfile|
`chmod go+w #{tempfile.path}` #so postgres can write to this file as well
OrthomclGene.find_by_sql "copy (select groupa.orthomcl_name, gene.orthomcl_name, cache.compartment from orthomcl_groups groupa inner join orthomcl_gene_orthomcl_group_orthomcl_runs ogogor on groupa.id=ogogor.orthomcl_group_id inner join orthomcl_genes gene on ogogor.orthomcl_gene_id=gene.id inner join orthomcl_gene_coding_regions ogc on ogc.orthomcl_gene_id=gene.id inner join coding_regions code on ogc.coding_region_id=code.id inner join coding_region_compartment_caches cache on code.id=cache.coding_region_id order by groupa.orthomcl_name) to '#{tempfile.path}'"
tempfile.close
# Parse the csv file to get the answers I'm looking for
data = {}
kingdom_orthomcls = {} #array of kingdoms to orthomcl genes
orthomcl_locs = {} #array of orthomcl_genes to localisations, cached for convenience and speed
FasterCSV.foreach(tempfile.path, :col_sep => "\t") do |row|
# name columns
raise unless row.length == 3
group = row[0]
gene = row[1]
compartment = row[2]
data[group] ||= {}
kingdom = orthomcl_abbreviation_to_kingdom[OrthomclGene.new.official_split(gene)[0]]
data[group]['kingdom_orthomcls'] ||= {}
data[group]['kingdom_orthomcls'][kingdom] ||= []
data[group]['kingdom_orthomcls'][kingdom].push gene
data[group]['kingdom_orthomcls'][kingdom].uniq!
data[group]['orthomcl_locs'] ||= {}
data[group]['orthomcl_locs'][gene] ||= []
data[group]['orthomcl_locs'][gene].push compartment
data[group]['orthomcl_locs'][gene].uniq!
end
# Classify each of the groups into the different categories where possible
groups_to_counts = {}
data.each do |group, data2|
$stderr.puts
$stderr.puts '============================'
classify_eukaryotic_conservation_of_single_orthomcl_group(
data2['kingdom_orthomcls'],
data2['orthomcl_locs'],
groups_to_counts
)
end
groups_to_counts.to_a.sort{|a,b| a[0].length<=>b[0].length}.each do |king_array, agrees|
yes = agrees[OntologyComparison::COMPLETE_AGREEMENT]
no = agrees[OntologyComparison::DISAGREEMENT]
maybe = agrees[OntologyComparison::INCOMPLETE_AGREEMENT]
yes ||= 0; no||= 0; maybe ||= 0;
total = (yes+no+maybe).to_f
puts [
king_array.join(','),
yes, no, maybe,
agrees[OntologyComparison::UNKNOWN_AGREEMENT],
((yes.to_f/total)*100).round,
((no.to_f/total)*100).round,
((maybe.to_f/total)*100).round,
].join("\t")
end
end
# This is a modularisation of conservation_of_eukaryotic_sub_cellular_localisation,
# and does the calculations on the already transformed data (kingdom_orthomcls, orthomcl_locs).
# More details in conservation_of_eukaryotic_sub_cellular_localisation
def classify_eukaryotic_conservation_of_single_orthomcl_group(kingdom_orthomcls, orthomcl_locs, groups_to_counts, debug = true)
$stderr.puts kingdom_orthomcls.inspect if debug
$stderr.puts orthomcl_locs.inspect if debug
$stderr.puts "Kingdoms: #{kingdom_orthomcls.to_a.collect{|k| k[0]}.sort.join(', ')}" if debug
# within the one kingdom, do they agree?
kingdom_orthomcls.each do |kingdom, orthomcls|
# If there is only a single coding region, then don't record
number_in_kingdom_localised = orthomcls.length
if number_in_kingdom_localised < 2
$stderr.puts "One kingdom: #{kingdom}, skipping (#{orthomcls.join(', ')})" if debug
next
end
# convert orthomcl genes to localisation arrays
locs = orthomcls.collect {|orthomcl|
orthomcl_locs[orthomcl]
}
# OK, so now we are on. Let's do this
agreement = OntologyComparison.new.agreement_of_group(locs)
index = [kingdom]
$stderr.puts "One kingdom: #{index.inspect}, #{agreement}, #{orthomcls.join(' ')}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within two kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_matrix do |array1, array2|
kingdom1 = array1[0]
kingdom2 = array2[0]
orthomcl_array1 = array1[1]
orthomcl_array2 = array2[1]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Two kingdoms: #{kingdoms.join(' ')}, #{orthomcl_arrays}, skipping"
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group(locs_for_all)
index = [kingdom1, kingdom2].sort
$stderr.puts "Two kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within three kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_3d_matrix do |a1, a2, a3|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
kingdoms = [kingdom1, kingdom2, kingdom3]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Three kingdoms: #{kingdoms.join(' ')}, skipping" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "Three kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
#within 4 kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_4d_matrix do |a1, a2, a3, a4|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
kingdom4 = a4[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
orthomcl_array4 = a4[1]
kingdoms = [kingdom1, kingdom2, kingdom3, kingdom4]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3, orthomcl_array4]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Four kingdoms: #{kingdoms.join(' ')}, skipping cos #{zero_entriers} have no entries" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "Four kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
end
# Using the assumption that the yeast-mouse, yeast-human and falciparum-toxo divergences are approximately
# equivalent, whatever that means, work out the conservation of localisation between each of those groups.
# Does yeast/mouse exhibit the same problems as falciparum/toxo when comparing localisations?
def localisation_conservation_between_pairs_of_species(species1 = Species::FALCIPARUM_NAME, species2 = Species::TOXOPLASMA_GONDII_NAME)
groups_to_counts = {} #this array ends up holding all the answers after we have finished going through everything
toxo_fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[species1]).with_species(Species::ORTHOMCL_CURRENT_LETTERS[species2]).all(
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}},
# :limit => 10,
:select => 'distinct(orthomcl_groups.*)'
# :conditions => ['orthomcl_groups.orthomcl_name = ? or orthomcl_groups.orthomcl_name = ?','OG3_10042','OG3_10032']
)
$stderr.puts "Found #{toxo_fal_groups.length} groups containing proteins from #{species1} and #{species2}"
progress = ProgressBar.new('tfal', toxo_fal_groups.length, STDOUT)
toxo_fal_groups.each do |tfgroup|
progress.inc
orthomcl_locs = {}
species_orthomcls = {} #used like kingdom_locs in previous methods
# collect the orthomcl_locs array for each species
arrays = [species1, species2].collect do |species_name|
# collect compartments for each of the toxos
genes = tfgroup.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[species_name]).all
gene_locs = {}
# add all the locs for a given gene
genes.each do |gene|
locs = gene.coding_regions.collect{|c| c.coding_region_compartment_caches.reach.compartment.retract}.flatten.uniq #all compartments associated with the gene
unless locs.empty?
gene_locs[gene.orthomcl_name] = locs
end
end
# $stderr.puts "Found #{genes.length} orthomcl genes in #{species_name} from #{tfgroup.orthomcl_name}, of those, #{gene_locs.length} had localisations"
gene_locs.each do |gene, locs|
species_orthomcls[species_name] ||= []
species_orthomcls[species_name].push gene
orthomcl_locs[gene] = locs
end
end
# pp species_orthomcls
# pp orthomcl_locs
classify_eukaryotic_conservation_of_single_orthomcl_group(species_orthomcls, orthomcl_locs, groups_to_counts)
end
progress.finish
pp groups_to_counts
end
# Run localisation_conservation_between_pairs_of_species for each pair of species
# that I care about
def exhaustive_localisation_conservation_between_pairs_of_species
[
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
].each_lower_triangular_matrix do |s1, s2|
puts '=============================================================='
localisation_conservation_between_pairs_of_species(s1, s2)
end
end
def localisation_pairs_as_matrix
master = {}
File.foreach("#{PHD_DIR}/apiloc/pairs/results.ruby").each do |line|
hash = eval "{#{line}}"
master = master.merge hash
end
organisms = [
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
]
print "\t"
puts organisms.join("\t")
organisms.each do |o1|
print o1
organisms.each do |o2|
print "\t"
next if o1 == o2
result = master[[o1,o2].sort]
raise Exception, "Couldn't find #{[o1,o2].sort}" if result.nil?
print result['complete agreement'].to_f/result.values.sum
end
puts
end
end
# If you take only localised falciparum proteins with localised yeast and mouse orthologues,
# what are the chances that they are conserved
def falciparum_predicted_by_yeast_mouse(predicting_species=[Species::YEAST_NAME, Species::MOUSE_NAME],
test_species=Species::FALCIPARUM_NAME)
answer = {}
# Build up the query using the with_species named_scope,
# retrieving all groups that have members in each species
fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[test_species])
predicting_species.each do |sp|
fal_groups = fal_groups.send(:with_species, Species::ORTHOMCL_CURRENT_LETTERS[sp])
end
fal_groups = fal_groups.all(:select => 'distinct(orthomcl_groups.*)')#, :limit => 20)
$stderr.puts "Found #{fal_groups.length} groups with #{predicting_species.join(', ')} and #{test_species} proteins"
progress = ProgressBar.new('predictionByTwo', fal_groups.length, STDOUT)
fal_groups.each do |fal_group|
progress.inc
$stderr.puts
# get the localisations from each of the predicting species
predicting_array = predicting_species.collect do |species_name|
genes = fal_group.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[species_name]).all
gene_locs = {}
# add all the locs for a given gene
genes.each do |gene|
locs = gene.coding_regions.collect{|c| c.coding_region_compartment_caches.reach.compartment.retract}.flatten.uniq #all compartments associated with the gene
unless locs.empty?
gene_locs[gene.orthomcl_name] = locs
end
end
gene_locs
end
$stderr.puts "OGroup #{fal_group.orthomcl_name} gave #{predicting_array.inspect}"
# only consider cases where there is localisations in each of the predicting species
next if predicting_array.select{|a| a.empty?}.length > 0
# only consider genes where the localisations from the predicting species agree
flattened = predicting_array.inject{|a,b| a.merge(b)}.values
$stderr.puts "flattened: #{flattened.inspect}"
agreement = OntologyComparison.new.agreement_of_group(flattened)
next unless agreement == OntologyComparison::COMPLETE_AGREEMENT
$stderr.puts "They agree..."
# Now compare the agreement between a random falciparum hit and the locs from the predicting
prediction = flattened.to_a[0]
$stderr.puts "Prediction: #{prediction}"
all_fals = CodingRegion.falciparum.all(
:joins => [:coding_region_compartment_caches, {:orthomcl_genes => :orthomcl_groups}],
:conditions => ['orthomcl_groups.id = ?', fal_group.id]
)
next if all_fals.empty?
fal = all_fals[rand(all_fals.length)]
fal_compartments = fal.cached_compartments
$stderr.puts "fal: #{fal.string_id} #{fal_compartments}"
agreement = OntologyComparison.new.agreement_of_group([prediction, fal_compartments])
$stderr.puts "Final agreement #{agreement}"
answer[agreement] ||= 0
answer[agreement] += 1
end
progress.finish
pp answer
end
def how_many_genes_are_localised_in_each_species
interests = Species.all.reach.name.retract
# How many genes?
interests.each do |interest|
count = OrthomclGene.count(
:joins => {:coding_regions => [:coding_region_compartment_caches, {:gene => {:scaffold => :species}}]},
:select => 'distinct(orthomcl_genes.id)',
:conditions => {:species => {:name => interest}}
)
puts [
'OrthoMCL genes',
interest,
count
].join("\t")
end
# how many orthomcl groups?
interests.each do |interest|
count = OrthomclGroup.official.count(
:joins => {:orthomcl_genes => {:coding_regions => [:coding_region_compartment_caches, {:gene => {:scaffold => :species}}]}},
:conditions => ['orthomcl_genes.orthomcl_name like ? and species.name = ?', "#{Species::ORTHOMCL_CURRENT_LETTERS[interest]}|%", interest],
:select => 'distinct(orthomcl_groups.id)'
)
puts [
'OrthoMCL groups',
interest,
count
].join("\t")
end
end
# Predict the localisation of a protein by determining the amount
def prediction_by_most_common_localisation(predicting_species=[Species::YEAST_NAME, Species::MOUSE_NAME],
test_species=Species::FALCIPARUM_NAME)
answer = {}
# Build up the query using the with_species named_scope,
# retrieving all groups that have members in each species
fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[test_species])
predicting_species.each do |sp|
fal_groups = fal_groups.send(:with_species, Species::ORTHOMCL_CURRENT_LETTERS[sp])
end
fal_groups = fal_groups.all(:select => 'distinct(orthomcl_groups.*)')#, :limit => 20)
$stderr.puts "Found #{fal_groups.length} groups with #{predicting_species.join(', ')} and #{test_species} proteins"
progress = ProgressBar.new('predictionByCommon', fal_groups.length, STDOUT)
fal_groups.each do |fal_group|
progress.inc
# Only include gene that have exactly 1 gene from that species, otherwise it is harder to
# work out what is going on.
all_tests = fal_group.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[test_species]).all
if all_tests.length > 1
answer['Too many orthomcl genes found'] ||= 0
answer['Too many orthomcl genes found'] += 1
next
end
# gather the actual coding region - discard if there is not exactly 1
codes = all_tests[0].coding_regions
unless codes.length == 1
answer["#{codes.length} coding regions for the 1 orthomcl gene"] ||= 0
answer["#{codes.length} coding regions for the 1 orthomcl gene"] += 1
next
end
code = codes[0]
# Find the most common localisation in each species predicting
preds = [] # the prediction of the most common localisations
commons = predicting_species.collect do |s|
common = code.localisation_prediction_by_most_common_localisation(s)
# Ignore when no loc is found or it is confusing
if common.nil?
answer["No localisation found when trying to find common"] ||= 0
answer["No localisation found when trying to find common"] += 1
next
end
# add the commonest localisation to the prediction array
preds.push [common]
end
# Don't predict unless all species are present
if preds.length == predicting_species.length
# Only predict if the top 2 species are in agreement
if OntologyComparison.new.agreement_of_group(preds) == OntologyComparison::COMPLETE_AGREEMENT
final_locs = code.cached_compartments
if final_locs.empty?
answer["No test species localisation"] ||= 0
answer["No test species localisation"] += 1
else
# Add the final localisation compartments
preds.push final_locs
acc = OntologyComparison.new.agreement_of_group(preds)
answer[acc] ||= 0
answer[acc] += 1
end
else
answer["Predicting species don't agree"] ||= 0
answer["Predicting species don't agree"] += 1
end
else
answer["Not enough localisation info in predicting groups"] ||= 0
answer["Not enough localisation info in predicting groups"] += 1
end
end
progress.finish
pp answer
end
def stuarts_basel_spreadsheet_yeast_setup
# uniprot_to_database(Species::YEAST_NAME)
# yeastgenome_ids_to_database
# OrthomclGene.new.link_orthomcl_and_coding_regions(
# "scer",
# :accept_multiple_coding_regions => true
# )
# cache compartments
codes = CodingRegion.s(Species::YEAST_NAME).go_cc_usefully_termed.all
progress = ProgressBar.new('eukaryotes', codes.length)
codes.each do |code|
progress.inc
comps = code.compartments
comps.each do |comp|
CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
end
end
progress.finish
end
def stuarts_basel_spreadsheet(accept_multiples = false)
species_of_interest = [
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM,
Species::TOXOPLASMA_GONDII,
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME
]
$stderr.puts "Copying data to tempfile.."
# Copy the data out of the database to a csv file. Beware that there is duplicates in this file
tempfile = File.open('/tmp/eukaryotic_conservation','w')
# Tempfile.open('eukaryotic_conservation') do |tempfile|
`chmod go+w #{tempfile.path}` #so postgres can write to this file as well
OrthomclGene.find_by_sql "copy (select groupa.orthomcl_name, gene.orthomcl_name, cache.compartment from orthomcl_groups groupa inner join orthomcl_gene_orthomcl_group_orthomcl_runs ogogor on groupa.id=ogogor.orthomcl_group_id inner join orthomcl_genes gene on ogogor.orthomcl_gene_id=gene.id inner join orthomcl_gene_coding_regions ogc on ogc.orthomcl_gene_id=gene.id inner join coding_regions code on ogc.coding_region_id=code.id inner join coding_region_compartment_caches cache on code.id=cache.coding_region_id order by groupa.orthomcl_name) to '#{tempfile.path}'"
tempfile.close
groups_genes = {}
genes_localisations = {}
# Read groups, genes, and locs into memory
$stderr.puts "Reading into memory sql results.."
FasterCSV.foreach(tempfile.path, :col_sep => "\t") do |row|
#FasterCSV.foreach('/tmp/eukaryotic_conservation_test', :col_sep => "\t") do |row|
# name columns
raise unless row.length == 3
group = row[0]
gene = row[1]
compartment = row[2]
groups_genes[group] ||= []
groups_genes[group].push gene
groups_genes[group].uniq!
genes_localisations[gene] ||= []
genes_localisations[gene].push compartment
genes_localisations[gene].uniq!
end
# Print headers
header = ['']
species_of_interest.each do |s|
header.push "#{s} ID 1"
header.push "#{s} loc 1"
header.push "#{s} ID 2"
header.push "#{s} loc 2"
end
puts header.join("\t")
# Iterate through each OrthoMCL group, printing them out if they fit the criteria
$stderr.puts "Iterating through groups.."
groups_genes.each do |group, ogenes|
$stderr.puts "looking at group #{group}"
# associate genes with species
species_gene = {}
ogenes.each do |ogene|
sp = Species.four_letter_to_species_name(OrthomclGene.new.official_split(ogene)[0])
unless species_of_interest.include?(sp)
$stderr.puts "Ignoring info for #{sp}"
next
end
species_gene[sp] ||= []
species_gene[sp].push ogene
species_gene[sp].uniq!
end
# skip groups that are only localised in a single species
if species_gene.length == 1
$stderr.puts "Rejecting #{group} because it only has localised genes in 1 species of interest"
next
end
# skip groups that have more than 2 localised genes in each group.
failed = false
species_gene.each do |species, genes|
if genes.length > 2
$stderr.puts "Rejecting #{group}, because there are >2 genes with localisation info in #{species}.."
failed = true
end
end
next if failed
# procedure for making printing easier
generate_cell = lambda do |gene|
locs = genes_localisations[gene]
if locs.include?('cytoplasm') and locs.include?('nucleus')
locs.reject!{|l| l=='cytoplasm'}
end
if locs.length == 1
[OrthomclGene.new.official_split(gene)[1], locs[0]]
elsif locs.length == 0
raise Exception, "Unexpected lack of loc information"
else
if accept_multiples
[OrthomclGene.new.official_split(gene)[1], locs.sort.join(', ')]
else
$stderr.puts "Returning nil for #{gene} because there is #{locs.length} localisations"
nil
end
end
end
row = [group]
failed = false #fail if genes have >1 localisation
species_of_interest.each do |s|
$stderr.puts "What's in #{s}? #{species_gene[s].inspect}"
if species_gene[s].nil? or species_gene[s].length == 0
row.push ['','']
row.push ['','']
elsif species_gene[s].length == 1
r = generate_cell.call species_gene[s][0]
failed = true if r.nil?
row.push r
row.push ['','']
else
species_gene[s].each do |g|
r = generate_cell.call g
failed = true if r.nil?
row.push r
end
end
end
puts row.join("\t") unless failed
end
end
# Generate the data for
def publication_per_year_graphing
years = {}
fails = 0
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.each do |p|
y = p.year
if y.nil?
fails += 1
$stderr.puts "Failed: #{p.inspect}"
else
years[y] ||= 0
years[y] += 1
end
end
puts ['Year','Number of Publications'].join("\t")
years.sort.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to year-ify #{fails} publications."
end
def localisation_per_year_graphing
already_localised = []
years = {}
fails = 0
# Get all the publications that have localisations in order
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.sort {|p1,p2|
if p1.year.nil?
-1
elsif p2.year.nil?
1
else
p1.year <=> p2.year
end
}.each do |pub|
y = pub.year
if y.nil? #ignore publications with improperly parsed years
fails += 1
next
end
ids = CodingRegion.all(:select => 'coding_regions.id',
:joins => {
:expression_contexts => [:localisation, :publication]
},
:conditions => {:publications => {:id => pub.id}}
)
ids.each do |i|
unless already_localised.include?(i)
already_localised.push i
years[y] ||= 0
years[y] += 1
end
end
end
puts ['Year','Number of New Protein Localisations'].join("\t")
years.sort.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to year-ify #{fails} publications."
end
# How many and which genes are recorded in the malaria metabolic pathways database,
# but aren't recorded in ApiLoc?
def comparison_with_hagai
File.open("#{PHD_DIR}/screenscraping_hagai/localised_genes_and_links.txt").each_line do |line|
line.strip!
splits = line.split(' ')
#next unless splits[0].match(/#{splits[1]}/) #ignore possibly incorrect links
code = CodingRegion.ff(splits[1])
unless code
puts "Couldn't find plasmodb id #{splits[1]}"
next
end
if code.expressed_localisations.count == 0
puts "Not found in ApiLoc: #{splits[1]}"
else
puts "Found in ApiLoc: #{splits[1]}"
end
end
end
# Create a spreadsheet with all the synonyms, so it can be attached as supplementary
def synonyms_spreadsheet
sep = "\t"
# Print titles
puts [
"Localistion or Developmental Stage?",
"Species",
"Full name(s)",
"Synonym"
].join(sep)
# Procedure for printing out each of the hits
printer = lambda do |species_name, actual, synonym, cv_name|
if actual.kind_of?(Array)
puts [cv_name, species_name, actual.join(","), synonym].join(sep)
else
puts [cv_name, species_name, actual, synonym].join(sep)
end
end
# Print all the synonyms
[
LocalisationConstants::KNOWN_LOCALISATION_SYNONYMS,
DevelopmentalStageConstants::KNOWN_DEVELOPMENTAL_STAGE_SYNONYMS,
].each do |cv|
cv_name = {
DevelopmentalStageConstants::KNOWN_DEVELOPMENTAL_STAGE_SYNONYMS => 'Developmental Stage',
LocalisationConstants::KNOWN_LOCALISATION_SYNONYMS => 'Localisation'
}[cv]
cv.each do |sp, hash|
if sp == Species::OTHER_SPECIES #for species not with a genome project
# Species::OTHER_SPECIES => {
# 'Sarcocystis muris' => {
# 'surface' => 'cell surface'
# },
# 'Babesia gibsoni' => {
# 'surface' => 'cell surface',
# 'erythrocyte cytoplasm' => 'host cell cytoplasm',
# 'pm' => 'plasma membrane',
# 'membrane' => 'plasma membrane'
# },
hash.each do |species_name, hash2|
hash2.each do |synonym, actual|
printer.call(species_name, actual, synonym, cv_name)
end
end
else #normal species
hash.each do |synonym, actual|
printer.call(sp, actual, synonym, cv_name)
end
end
end
end
end
def umbrella_localisations_controlled_vocabulary
sep = "\t"
# Print titles
puts [
"Localistion or Developmental Stage?",
"Umbrella",
"Specific Localisation Name"
].join(sep)
ApilocLocalisationTopLevelLocalisation::APILOC_TOP_LEVEL_LOCALISATION_HASH.each do |umbrella, unders|
unders.each do |under|
puts ["Localisation", umbrella, under].join(sep)
end
end
DevelopmentalStageTopLevelDevelopmentalStage::APILOC_DEVELOPMENTAL_STAGE_TOP_LEVEL_DEVELOPMENTAL_STAGES.each do |under, umbrella|
puts ["Developmental Stage", umbrella, under].join(sep)
end
end
def how_many_apicomplexan_genes_have_localised_orthologues
$stderr.puts "starting group search"
groups = OrthomclGroup.official.all(
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}},
:select => 'distinct(orthomcl_groups.id)'
)
$stderr.puts "finished group search, found #{groups.length} groups"
group_ids = groups.collect{|g| g.id}
$stderr.puts "finished group id transform"
puts '# Genes that have localised ortholgues, if you consider GO CC IDA terms from all Eukaryotes'
Species.sequenced_apicomplexan.all.each do |sp|
num_orthomcl_genes = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)'
)
# go through the groups and work out how many coding regions there are in those groups from this species
num_with_a_localised_orthologue = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)',
:joins => :orthomcl_groups,
:conditions => "orthomcl_gene_orthomcl_group_orthomcl_runs.orthomcl_group_id in #{group_ids.to_sql_in_string}"
)
puts [
sp.name,
num_orthomcl_genes,
num_with_a_localised_orthologue
].join("\t")
end
puts
puts '# Genes that have localised ortholgues, if you don\'t consider GO CC IDA terms from all Eukaryotes'
$stderr.puts "starting group search"
groups = OrthomclGroup.official.all(
:joins => {:orthomcl_genes => {:coding_regions => :expressed_localisations}},
:select => 'distinct(orthomcl_groups.id)'
)
$stderr.puts "finished group search, found #{groups.length} groups"
group_ids = groups.collect{|g| g.id}
$stderr.puts "finished group id transform"
puts '# Genes that have localised ortholgues, if you consider GO CC IDA terms from all Eukaryotes'
Species.sequenced_apicomplexan.all.each do |sp|
num_orthomcl_genes = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)'
)
# go through the groups and work out how many coding regions there are in those groups from this species
num_with_a_localised_orthologue = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)',
:joins => :orthomcl_groups,
:conditions => "orthomcl_gene_orthomcl_group_orthomcl_runs.orthomcl_group_id in #{group_ids.to_sql_in_string}"
)
puts [
sp.name,
num_orthomcl_genes,
num_with_a_localised_orthologue
].join("\t")
end
end
def conservation_of_localisation_in_apicomplexa
groups_skipped_because_less_than_2_different_species = 0
# For each OrthoMCL group that contains 2 or more proteins localised,
# When there is at least 2 different species involved
groups = OrthomclGroup.all(
:joins => {:orthomcl_genes => {:coding_regions => :expressed_localisations}}
).uniq
groups.each do |group|
$stderr.puts "Inspecting #{group.orthomcl_name}.."
genes = group.orthomcl_genes.apicomplexan.all.uniq
# If there is more than 1 species involved
outputs = []
if genes.collect{|g| g.official_split[0]}.uniq.length > 1
genes.each do |g|
codes = g.coding_regions.all(:joins => :coding_region_compartment_caches).uniq
if codes.length != 1
$stderr.puts "Skipping coding regions for #{g.orthomcl_name}, since only #{codes.length} genes with loc were linked"
next
end
code = codes[0]
outputs.push [
code.species.name,
code.string_id,
code.annotation.annotation,
code.coding_region_compartment_caches.reach.compartment.join(', '),
code.localisation_english,
]
end
else
groups_skipped_because_less_than_2_different_species += 1
end
if outputs.collect{|o| o[0]}.uniq.length > 1 #if there is >1 species involved
puts
puts '#####################################'
puts outputs.collect{|d| d.join("\t")}.join("\n")
else
$stderr.puts "Skipped group #{group.orthomcl_name} because of lack of annotation, only found #{outputs.collect{|d| d.join(",")}.join(" ### ")}"
end
end
$stderr.puts "Skipped #{groups_skipped_because_less_than_2_different_species} groups due to lack of >1 species having loc information"
end
# the idea is to find how many genes have annotations that fall into these 2 categories:
# * Fall under the current definition of what is an organelle
# * Don't fall under any organelle, and aren't (exclusively) annotated by GO terms that are ancestors of the organelle terms.
def how_many_non_organelle_cc_annotations
# Create a list of all the GO terms that are included in the various compartments
# this is a list of subsumers
compartment_go_terms = CodingRegion.new.create_organelle_go_term_mappers
# Create a list of ancestors of compartment GO terms.
ancestors = OntologyComparison::RECOGNIZED_LOCATIONS.collect {|loc|
go_entry = GoTerm.find_by_term(loc)
raise Exception, "Unable to find GO term in database: #{loc}" unless go_entry
anc = Bio::Go.new.ancestors_cc(go_entry.go_identifier)
$stderr.puts "Found #{anc.length} ancestors for #{go_entry.go_identifier} #{go_entry.term}"
anc
}.flatten.sort.uniq
# For each non-apicomplexan species with a orthomcl code
Species.not_apicomplexan.all.each do |sp|
$stderr.puts sp.name
# get all the different GO terms for each of the different genes in the species
count_subsumed = 0
count_ancestral = 0
count_wayward = 0
wayward_ids = {}
codes = CodingRegion.s(sp.name).all(:joins => [:orthomcl_genes, :go_terms], :include => :go_terms).uniq
progress = ProgressBar.new(sp.name,codes.length)
codes.each do |code|
progress.inc
local_wayward_ids = {}
subsumed = false
ancestral = false
wayward = false
code.go_terms.each do |g|
next unless g.aspect == GoTerm::CELLULAR_COMPONENT
anc = false
sub = false
#ancestral?
if ancestors.include?(g.go_identifier)
anc = true
ancestral = true
end
#subsumed?
compartment_go_terms.each do |subsumer|
if subsumer.subsume?(g.go_identifier, false)
sub = true
subsumed = true
end
end
# else wayward
if !anc and !sub
local_wayward_ids[g.term] = 0 if local_wayward_ids[g.term].nil?
local_wayward_ids[g.term] += 1
wayward_ids[g.term] = 0 if wayward_ids[g.term].nil?
wayward_ids[g.term] += 1
wayward = true
end
end
# $stderr.puts "#{code.string_id}: ancestral: #{ancestral}, subsumed: #{subsumed}, wayward: #{wayward}: "+
# "#{local_wayward_ids.collect{|term, count| "#{count} #{term}"}.join("\t")}"
#error check
count_subsumed += 1 if subsumed
count_ancestral += 1 if ancestral
count_wayward += 1 if wayward
end
progress.finish
to_print = [
sp.name,
count_ancestral,
count_wayward,
count_subsumed,
]
puts to_print.join("\t")
$stderr.puts "Found these wayward from #{sp.name}:\n"
strings = wayward_ids.to_a.sort{|a,b|
b[1]<=>a[1]
}.collect{|a|
"wayward\t#{a[1]}\t#{a[0]}"
}.join("\n")
$stderr.puts strings
$stderr.puts
end
end
def most_localisations_by_authorship
already_localised = []
authors_localisations = {}
fails = 0
# Get all the publications that have localisations in order
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.sort {|p1,p2|
if p1.year.nil?
-1
elsif p2.year.nil?
1
else
p1.year <=> p2.year
end
}.each do |pub|
y = pub.year
if y.nil? #ignore publications with improperly parsed years
fails += 1
next
end
ids = CodingRegion.all(:select => 'distinct(coding_regions.id)',
:joins => {
:expression_contexts => [:localisation, :publication]
},
:conditions => {:publications => {:id => pub.id}}
)
ids.each do |i|
unless already_localised.include?(i)
already_localised.push i
authors = pub.authors.split('., ')
authors.each do |author|
last_name = author.split(' ')[0].gsub(/,/,'')
authors_localisations[last_name] ||= 0
authors_localisations[last_name] += 1
end
end
end
end
puts ['Last name','Number of New Protein Localisations'].join("\t")
authors_localisations.to_a.sort{|a,b| b[1]<=>a[1]}.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to parse #{fails} publications properly"
end
# upload the IDA annotations from geneontology.org from there
def tbrucei_amigo_gene_associations_to_database
require 'gene_association'
failed_to_find_id_count = 0
failed_to_find_go_term_count = 0
ida_annotation_count = 0
upload_annotations = 0
Bio::GzipAndFilterGeneAssociation.foreach(
"#{DATA_DIR}/GO/cvs/go/gene-associations/gene_association.GeneDB_Tbrucei.gz", #all T. brucei annotations are from GeneDB
"\tIDA\t"
) do |go|
ida_annotation_count += 1
puts "Trying GO term #{go.go_identifier} for #{go.primary_id}"
code = CodingRegion.fs(go.primary_id, Species::TBRUCEI_NAME)
if code
go_term = GoTerm.find_by_go_identifier_or_alternate(go.go_identifier)
if go_term
puts "Uploading GO term #{go.go_identifier} for #{code.string_id}"
a = CodingRegionGoTerm.find_or_create_by_go_term_id_and_coding_region_id_and_evidence_code(
go_term.id, code.id, go.evidence_code
)
raise unless a.save!
upload_annotations += 1
else
failed_to_find_go_term_count += 1
end
else
failed_to_find_id_count = 0
end
end
$stderr.puts "Found #{ida_annotation_count} annotations attempted to be uploaded"
$stderr.puts "Uploaded #{upload_annotations} annotations"
$stderr.puts "Failed to upload #{failed_to_find_id_count} annotations since the gene was not found in ApiLoc"
$stderr.puts "Failed to upload #{failed_to_find_go_term_count} annotations since the go term was not found in ApiLoc"
end
# Which organelle has the most conserved localisation?
def conservation_of_localisation_stratified_by_organelle_pairings
srand 47 #set random number generator to be a deterministic series of random numbers so I don't get differences between runs
# Define list of species to pair up
specees = [
Species::ARABIDOPSIS_NAME,
Species::HUMAN_NAME,
Species::MOUSE_NAME,
Species::YEAST_NAME,
Species::POMBE_NAME,
Species::RAT_NAME,
Species::DROSOPHILA_NAME,
Species::ELEGANS_NAME,
Species::DICTYOSTELIUM_DISCOIDEUM_NAME,
Species::DANIO_RERIO_NAME,
Species::TRYPANOSOMA_BRUCEI_NAME,
Species::PLASMODIUM_FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
]
# for each pair
specees.pairs.each do |pair|
p1 = pair[0]
p2 = pair[1]
$stderr.puts "SQLing #{p1} versus #{p2}.."
# for each group, choose a protein (repeatably) randomly from each species, so we have a pair of genes
# not sure how to do this the rails way
# Copy the data out of the database to a csv file.
# tempfile = File.new("#{PHD_DIR}/apiloc/experiments/organelle_conservation/dummy.csv") #debug
# csv_path = "/home/ben/phd/gnr2/apiloc_logs/organelle_conservation/#{p1} and #{p2}.csv".gsub(' ','_')
# tempfile = File.open(csv_path)
# tempfile = File.open("/home/ben/phd/gnr2/apiloc_logs/organelle_conservation/#{p1} and #{p2}.csv".gsub(' ','_'),'w')
# `chmod go+w #{tempfile.path}` #so postgres can write to this file as well
# OrthomclGene.find_by_sql "copy(select distinct(groups.orthomcl_name,codes1.string_id,codes2.string_id, ogenes1.orthomcl_name, ogenes2.orthomcl_name, caches1.compartment, caches2.compartment) from orthomcl_groups groups,
#
# orthomcl_gene_orthomcl_group_orthomcl_runs ogogor1,
# orthomcl_genes ogenes1,
# orthomcl_gene_coding_regions ogcr1,
# coding_regions codes1,
# coding_region_compartment_caches caches1,
# genes genes1,
# scaffolds scaffolds1,
# species species1,
#
# orthomcl_gene_orthomcl_group_orthomcl_runs ogogor2,
# orthomcl_genes ogenes2,
# orthomcl_gene_coding_regions ogcr2,
# coding_regions codes2,
# coding_region_compartment_caches caches2,
# genes genes2,
# scaffolds scaffolds2,
# species species2
#
# where
# species1.name = '#{p1}' and
# groups.id = ogogor1.orthomcl_group_id and
# ogogor1.orthomcl_gene_id = ogenes1.id and
# ogcr1.orthomcl_gene_id = ogenes1.id and
# ogcr1.coding_region_id = codes1.id and
# caches1.coding_region_id = codes1.id and
# codes1.gene_id = genes1.id and
# genes1.scaffold_id = scaffolds1.id and
# scaffolds1.species_id = species1.id
#
# and
# species2.name = '#{p2}' and
# groups.id = ogogor2.orthomcl_group_id and
# ogogor2.orthomcl_gene_id = ogenes2.id and
# ogcr2.orthomcl_gene_id = ogenes2.id and
# ogcr2.coding_region_id = codes2.id and
# caches2.coding_region_id = codes2.id and
# codes2.gene_id = genes2.id and
# genes2.scaffold_id = scaffolds2.id and
# scaffolds2.species_id = species2.id) to '#{tempfile.path}'"
# tempfile.close
# next #just create the CSVs at this point
orth1 = Species::ORTHOMCL_FOUR_LETTERS[p1]
orth2 = Species::ORTHOMCL_FOUR_LETTERS[p2]
$stderr.puts "Groups of #{orth1}"
groups1 = OrhtomclGroup.all(
:joins => {:orthomcl_gene => {:coding_regions => :coding_region_compartment_caches}},
:conditions => ["orthomcl_genes.orthomcl_name like '?'","#{orth1}%"]
)
$stderr.puts "Groups of #{orth2}"
groups2 = OrhtomclGroup.all(
:joins => {:orthomcl_gene => {:coding_regions => :coding_region_compartment_caches}},
:conditions => ["orthomcl_genes.orthomcl_name like '?'","#{orth2}%"]
)
# convert it all to a big useful hash, partly for historical reasons
dat = {}
progress = ProgressBar.new('hashing',groups1.length)
groups1.each do |group|
progress.inc
if groups2.include?(group1)
ogenes1 = OrthomclGene.all(
:include => [:orthomcl_groups,
:coding_regions => :coding_region_compartment_caches],
:joins => {:coding_regions => :coding_region_compartment_caches},
:conditions => ["orthomcl_genes.orthomcl_name like '?' and orthomcl_group_id = ?","#{orth1}%",group.id]
)
ogenes2 = OrthomclGene.all(
:include => [:orthomcl_groups,
:coding_regions => :coding_region_compartment_caches],
:joins => {:coding_regions => :coding_region_compartment_caches},
:conditions => ["orthomcl_genes.orthomcl_name like '?' and orthomcl_group_id = ?","#{orth2}%",group.id]
)
ogenes1.each do |ogene1|
caches = ogene1.coding_regions.all.reach.coding_region_compartment_caches.compartment.retract
dat[group.orthomcl_name] ||= {}
dat[group.orthomcl_name][p1] ||= {}
dat[group.orthomcl_name][p1][ogene1.orthomcl_name] = caches.uniq
end
ogenes2.each do |ogene2|
caches = ogene2.coding_regions.all.reach.coding_region_compartment_caches.compartment.retract
dat[group.orthomcl_name][p2] ||= {}
dat[group.orthomcl_name][p2][ogene2.orthomcl_name] = caches.uniq
end
break
end
end
progress.finish
p dat
# Read in the CSV, converting it all to a hash
# of orthomcl_group => Array of arrays of the rest of the recorded info
# group => species => gene => compartments
# dat = {}
# File.open(csv_path).each_line do |line|
# row = line.strip.split(',')
# unless row.length == 7
# raise Exception, "failed to parse line #{line}"
# end
# # groups.orthomcl_name,codes1.string_id,codes2.string_id, ogenes1.orthomcl_name,
# # ogenes2.orthomcl_name, caches1.compartment, caches2.compartment
# group = row[0].gsub('(','')
# code1 = row[1]
# code2 = row[2]
# ogene1 = row[3]
# ogene2 = row[4]
# cache1 = row[5]
# cache2 = row[6].gsub(')','')
#
# dat[group] ||= {}
# dat[group][p1] ||= {}
# dat[group][p1][ogene1] ||= []
# dat[group][p1][ogene1].push cache1
#
# dat[group][p2] ||= {}
# dat[group][p2][ogene2] ||= []
# dat[group][p2][ogene2].push cache2
# end
# for each of the orthomcl groups
tally = {}
dat.each do |group, other|
raise Exception, "Found unexpected number of species in hash group => #{other.inspect}" unless other.keys.length == 2
# choose one gene (repeatably) randomly from each species
p_ones = other[p1].to_a
p_twos = other[p2].to_a
rand1 = p_ones[rand(p_ones.size)]
rand2 = p_twos[rand(p_twos.size)]
g1 = {rand1[0] => rand1[1]}
g2 = {rand2[0] => rand2[1]}
locs1 = g1.values.flatten.uniq
locs2 = g2.values.flatten.uniq
# work out whether the two genes are conserved in their localisation
agree = OntologyComparison.new.agreement_of_pair(locs1,locs2)
# debug out genes involved, compartments, group_id, species,
$stderr.puts "From group #{group}, chose #{g1.inspect} from #{p1} and #{g2.inspect} from #{p2}. Agreement: #{agree}"
# record conservation, organelles involved, within the species pairing
[g1.values, g2.values].flatten.uniq.each do |org|
tally[org] ||= {}
tally[org][agree] ||= 0
tally[org][agree] += 1
end
end
#puts "From #{p1} and #{p2},"
OntologyComparison::RECOGNIZED_LOCATIONS.each do |loc|
if tally[loc]
puts [
p1,p2,loc,
tally[loc][OntologyComparison::COMPLETE_AGREEMENT],
tally[loc][OntologyComparison::INCOMPLETE_AGREEMENT],
tally[loc][OntologyComparison::DISAGREEMENT],
].join("\t")
else
puts [
p1,p2,loc,
0,0,0
].join("\t")
end
end
end
srand #revert to regular random number generation in case anything else happens after this method
end
end
small
require "zlib"
require 'pp'
# Methods used in the ApiLoc publication
class BScript
def apiloc_stats
puts "For each species, how many genes, publications"
total_proteins = 0
total_publications = 0
Species.apicomplexan.all.sort{|a,b| a.name <=> b.name}.each do |s|
protein_count = s.number_of_proteins_localised_in_apiloc
publication_count = s.number_of_publications_in_apiloc
puts [
s.name,
protein_count,
publication_count,
].join("\t")
total_proteins += protein_count
total_publications += publication_count
end
puts [
'Total',
total_proteins,
total_publications
].join("\t")
end
# Like HTML stats, but used for the version information part
# of the ApiLoc website
def apiloc_html_stats
total_proteins = 0
total_publications = 0
puts '<table>'
puts '<tr><th>Species</th><th>Localised genes</th><th>Publications curated</th></tr>'
Species.apicomplexan.all.push.sort{|a,b| a.name <=> b.name}.each do |s|
protein_count = s.number_of_proteins_localised_in_apiloc
publication_count = s.number_of_publications_in_apiloc
puts "<tr><td><i>#{s.name}</i></td><td>#{protein_count}</td><td>#{publication_count}</td></tr>"
total_proteins += protein_count
total_publications += publication_count
end
print [
'<tr><td><b>Total</b>',
total_proteins,
total_publications
].join("</b></td><td><b>")
puts '</b></td></tr>'
puts '</table>'
end
def species_localisation_breakdown
# names = Localisation.all(:joins => :apiloc_top_level_localisation).reach.name.uniq.push(nil)
# print "species\t"
# puts names.join("\t")
top_names = [
'apical',
'inner membrane complex',
'merozoite surface',
'parasite plasma membrane',
'parasitophorous vacuole',
'exported',
'cytoplasm',
'food vacuole',
'mitochondrion',
'apicoplast',
'golgi',
'endoplasmic reticulum',
'other',
'nucleus'
]
interests = [
'Plasmodium falciparum',
'Toxoplasma gondii',
'Plasmodium berghei',
'Cryptosporidium parvum'
]
puts [nil].push(interests).flatten.join("\t")
top_names.each do |top_name|
top = TopLevelLocalisation.find_by_name(top_name)
print top_name
interests.each do |name|
s = Species.find_by_name(name)
if top.name == 'other'
count = 0
CodingRegion.all(
:select => 'distinct(coding_regions.id)',
:joins => {:expression_contexts => {:localisation => :apiloc_top_level_localisation}},
:conditions => ['top_level_localisation_id = ? and species_id = ?', top.id, s.id]
).each do |code|
tops = code.expressed_localisations.reach.apiloc_top_level_localisation.flatten
if tops.length == 1
raise unless tops[0].name == 'other'
count += 1
end
end
print "\t#{count}"
else
count = CodingRegion.count(
:select => 'distinct(coding_regions.id)',
:joins => {:expression_contexts => {:localisation => :apiloc_top_level_localisation}},
:conditions => ['top_level_localisation_id = ? and species_id = ?', top.id, s.id]
)
print "\t#{count}"
end
end
puts
end
end
def how_many_falciparum_genes_have_toxo_orthologs
puts ".. all according to orthomcl #{OrthomclRun::ORTHOMCL_OFFICIAL_NEWEST_NAME}"
all_orthomcl_groups_with_falciparum = OrthomclRun.find_by_name(OrthomclRun::ORTHOMCL_OFFICIAL_NEWEST_NAME).orthomcl_groups.select {|group|
group.orthomcl_genes.code('pfa').count > 0
}
puts "How many P. falciparum orthomcl groups?"
puts all_orthomcl_groups_with_falciparum.length
numbers_of_orthologs = all_orthomcl_groups_with_falciparum.each do |group|
group.orthomcl_genes.code('tgo').count
end
puts
puts "How many P. falciparum genes have any toxo orthomcl orthologs?"
puts numbers_of_orthologs.reject {|num|
num == 0
}.length
puts
puts "How many P. falciparum genes have 1 to 1 mapping with toxo?"
puts all_orthomcl_groups_with_falciparum.select {|group|
group.orthomcl_genes.code('pfa') == 1 and group.orthomcl_genes.code('tgo') == 1
}
end
def distribution_of_falciparum_hits_given_toxo
toxo_only = []
falc_only = []
no_hits = []
hits_not_localised = []
falc_and_toxo = []
# why the hell doesn't bioruby do this for me?
falciparum_blasts = {}
toxo_blasts = {}
# convert the blast file as it currently exists into a hash of plasmodb => blast_hits
Bio::Blast::Report.new(
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_vs_toxo_blast/falciparum_v_toxo.1e-5.tab.out",'r').read,
:tab
).iterations[0].hits.each do |hit|
q = hit.query_id.gsub(/.*\|/,'')
s = hit.definition.gsub(/.*\|/,'')
falciparum_blasts[q] ||= []
falciparum_blasts[q].push s
end
Bio::Blast::Report.new(
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_vs_toxo_blast/toxo_v_falciparum.1e-5.tab.out",'r').read,
:tab
).iterations[0].hits.each do |hit|
q = hit.query_id.gsub(/.*\|/,'')
s = hit.definition.gsub(/.*\|/,'')
toxo_blasts[q] ||= []
toxo_blasts[q].push s
end
# On average, how many hits does the toxo gene have to falciparum given
# arbitrary 1e-5 cutoff?
# File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_how_many_hits.csv",'w') do |how_many_hits|
# File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_best_evalue.csv",'w') do |best_evalue|
File.open("#{PHD_DIR}/apiloc/experiments/falciparum_to_toxo_best_evalue.csv", 'w') do |loc_comparison|
blast_hits = CodingRegion.s(Species::FALCIPARUM_NAME).all(
:joins => :amino_acid_sequence,
:include => {:expression_contexts => :localisation}
# :limit => 10,
# :conditions => ['string_id = ?', 'PF13_0280']
).collect do |falciparum|
# does this falciparum have a hit?
# compare localisation of the falciparum and toxo protein
falciparum_locs = falciparum.expression_contexts.reach.localisation.reject{|l| l.nil?}
toxo_ids = falciparum_blasts[falciparum.string_id]
toxo_ids ||= []
toxos = toxo_ids.collect do |toxo_id|
t = CodingRegion.find_by_name_or_alternate_and_species(toxo_id, Species::TOXOPLASMA_GONDII)
raise unless t
t
end
toxo_locs = toxos.collect {|toxo|
toxo.expression_contexts.reach.localisation.retract
}.flatten.reject{|l| l.nil?}
if toxos.length > 0
# protein localised in falciparum but not in toxo
if !falciparum_locs.empty? and !toxo_locs.empty?
loc_comparison.puts [
falciparum.string_id,
falciparum.annotation.annotation,
falciparum.localisation_english
].join("\t")
toxos.each do |toxo|
loc_comparison.puts [
toxo.string_id,
toxo.annotation.annotation,
toxo.localisation_english
].join("\t")
end
loc_comparison.puts
falc_and_toxo.push [falciparum, toxos]
end
# stats about how well the protein is localised
if toxo_locs.empty? and !falciparum_locs.empty?
falc_only.push [falciparum, toxos]
end
if !toxo_locs.empty? and falciparum_locs.empty?
toxo_only.push [falciparum, toxos]
end
if toxo_locs.empty? and falciparum_locs.empty?
hits_not_localised.push falciparum.string_id
end
else
no_hits.push falciparum.string_id
end
end
end
puts "How many genes are localised in toxo and falciparum?"
puts falc_and_toxo.length
puts
puts "How many genes are localised in toxo but not in falciparum?"
puts toxo_only.length
puts
puts "How many genes are localised in falciparum but not in toxo?"
puts falc_only.length
puts
puts "How many falciparum genes have no toxo hit?"
puts no_hits.length
puts
puts "How many have hits but are not localised?"
puts hits_not_localised.length
puts
end
def tgo_v_pfa_crossover_count
both = OrthomclGroup.all_overlapping_groups(%w(tgo pfa))
pfa = OrthomclGroup.all_overlapping_groups(%w(pfa))
tgo = OrthomclGroup.all_overlapping_groups(%w(tgo))
both_genes_pfa = both.collect{|b| b.orthomcl_genes.codes(%w(pfa)).count(:select => 'distinct(orthomcl_genes.id)')}.sum
both_genes_tgo = both.collect{|b| b.orthomcl_genes.codes(%w(tgo)).count(:select => 'distinct(orthomcl_genes.id)')}.sum
pfa_genes = CodingRegion.s(Species::FALCIPARUM).count(:joins => :amino_acid_sequence)
tgo_genes = CodingRegion.s(Species::TOXOPLASMA_GONDII).count(:joins => :amino_acid_sequence)
puts "How many OrthoMCL groups have at least one protein in pfa and tgo?"
puts "#{both.length} groups, #{both_genes_pfa} falciparum genes, #{both_genes_tgo} toxo genes"
puts
puts "How many OrthoMCL groups are specific to falciparum?"
puts "#{pfa.length - both.length} groups, #{pfa_genes - both_genes_pfa} genes"
puts
puts "How many OrthoMCL groups are specific to toxo?"
puts "#{tgo.length - both.length} groups, #{tgo_genes - both_genes_tgo} genes"
puts
end
# Print out a fasta file of all the sequences that are in apiloc.
# If a block is given it takes each coding region so that it can be transformed
# into a fasta sequence header as in AminiAcidSequence#fasta, otherwise
# a default is used.
def apiloc_fasta(io = $stdout)
CodingRegion.all(
:joins => :expression_contexts
).uniq.each do |code|
if code.amino_acid_sequence and code.amino_acid_sequence.sequence.length > 0
io.print ">"
if block_given?
io.puts yield(code)
else
io.puts [
code.species.name,
code.string_id,
code.annotation ? code.annotation.annotation : nil
].join(' | ')
end
io.puts code.amino_acid_sequence.sequence
else
$stderr.puts "Couldn't find amino acid sequence for #{code.string_id}/#{code.id}"
end
end
end
def apiloc_mapping_orthomcl_v3
# Starting with falciparum, how many genes have localised orthologues?
CodingRegion.falciparum.all(
:joins => {:expression_contexts => :localisation},
:select => 'distinct(coding_regions.*)'
).each do |code|
next if ["PF14_0078",'PF13_0011'].include?(code.string_id) #fair enough there is no orthomcl for this - just the way v3 is.
# Is this in orthomcl
ogene = nil
begin
ogene = code.single_orthomcl
rescue CodingRegion::UnexpectedOrthomclGeneCount
next
end
if ogene
groups = ogene.orthomcl_groups
raise unless groups.length == 1
group = groups[0]
others = group.orthomcl_genes.apicomplexan.all.reject{|r| r.id==ogene.id}
next if others.empty?
orthologues = CodingRegion.all(
:joins => [
{:expression_contexts => :localisation},
:orthomcl_genes,
],
:conditions => "orthomcl_genes.id in (#{others.collect{|o|o.id}.join(',')})",
:select => 'distinct(coding_regions.*)'
)
if orthologues.empty?
$stderr.puts "Nothing useful found for #{code.names.join(', ')}"
else
# output the whole group, including localisations where known
puts [
code.string_id,
code.case_sensitive_literature_defined_coding_region_alternate_string_ids.reach.name.join(', '),
code.annotation.annotation,
code.localisation_english
].join("\t")
group.orthomcl_genes.apicomplexan.all.each do |oge|
next if %w(cmur chom).include?(oge.official_split[0])
c = nil
if oge.official_split[1] == 'TGGT1_036620' #stupid v3
c = CodingRegion.find_by_name_or_alternate("TGME49_084810")
else
c = oge.single_code!
end
if c.nil?
# if no coding region is returned, then don't complain too much,
# but I will check these manually later
puts oge.orthomcl_name
else
next if c.id == code.id #don't duplicate the query
print c.string_id
puts [
nil,
c.case_sensitive_literature_defined_coding_region_alternate_string_ids.reach.name.join(', '),
c.annotation.annotation,
c.localisation_english
].join("\t")
end
end
puts
end
end
end
end
# Get all of the sequences that are recorded in ApiLoc and put them into
# a blast file where the hits can be identified using a -m 8 blast output
def create_apiloc_m8_ready_blast_database
File.open('/tmp/apiloc_m8_ready.protein.fa','w') do |file|
BScript.new.apiloc_fasta(file) do |code|
"#{code.species.name.gsub(' ','_')}|#{code.string_id}"
end
end
Dir.chdir('/tmp') do
`formatdb -i apiloc_m8_ready.protein.fa`
%w(
apiloc_m8_ready.protein.fa
apiloc_m8_ready.protein.fa.phr
apiloc_m8_ready.protein.fa.pin
apiloc_m8_ready.protein.fa.psq
).each do |filename|
`mv #{filename} /blastdb`
end
end
end
# Taking all the falciparum proteins, where are the orthologues localised?
def orthomcl_localisation_annotations
CodingRegion.falciparum.all(
:joins => :expressed_localisations,
:limit => 20,
:select => 'distinct(coding_regions.*)'
).each do |code|
begin
falciparum_orthomcl_gene = code.single_orthomcl
puts [
code.string_id,
code.annotation.annotation,
falciparum_orthomcl_gene.official_group.orthomcl_genes.code('scer').all.collect { |sceg|
sceg.single_code.coding_region_go_terms.useful.all.reach.go_term.term.join(', ')
}.join(' | ')
].join("\t")
rescue CodingRegion::UnexpectedOrthomclGeneCount => e
$stderr.puts "Couldn't map #{code.string_id}/#{code.annotation.annotation} to orthomcl"
end
end
end
def upload_apiloc_relevant_go_terms
require 'ensembl'
# create the species and dummy scaffolds, genes, etc.
# yeast should already be uploaded
# yeast = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::YEAST_NAME, 'scer')
# human = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::HUMAN_NAME, 'hsap')
# mouse = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::MOUSE_NAME, 'mmus')
# mouse = Species.find_or_create_by_name_and_orthomcl_three_letter(Species::ELEGANS_NAME, 'cele')
gene = Gene.new.create_dummy('apiloc conservation dummy gene for multiple species')
ensembl_uniprot_db = ExternalDb.find_by_db_name("Uniprot/SWISSPROT")
# for each human, mouse, yeast gene in a group with a localised apicomplexan
# gene, get the go terms from Ensembl so we can start to compare them later
# OrthomclGroup.all(
ogroup = OrthomclGroup.first(
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
)
# ).each do |ogroup|
ogroup.orthomcl_genes.codes(%w(hsap mmus scer cele)).all.each do |orthomcl_gene|
ensembl = OrthomclGene.new.official_split[1]
# fetch the uniprot ID from Ensembl
ensp = Ensembl::Core::Translation.find_by_stable_id(ensembl)
unless ensp
$stderr.puts "Couldn't find ensembl gene to match #{ensembl}, skipping"
next
end
# extract the uniprot id
uniprots = ensp.xrefs.select{|x| ensembl_uniprot_db.id == x.id}.collect{|x| x.db_primaryacc}.uniq
uniprot = uniprots[0]
unless uniprots.length == 1
$stderr.puts "Unexpected number of uniprot IDs found: #{uniprots.inspect}"
next if uniprots.empty?
end
# wget the uniprot txt file entry
filename = "/tmp/uniprot#{uniprot}.txt"
`wget http://www.uniprot.org/#{uniprot}.txt -O #{filename}`
# parse the uniprot entry
bio = Bio::Uniprot.new(File.open(filename).read)
p bio
# create the gene
# find the GO term that I've annnotated, otherwise add a new one, which
# will need to be filled in with the term
# add the relevant GO term and evidence code
# end
end
end
# not realyl working - too slow for me.
def map_using_uniprot_mapper
# require 'uni_prot_i_d_mapping_selected'
# mapper = Bio::UniProtIDMappingSelected.new
# ogroup =
OrthomclGroup.all(
# :limit => 5,
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
# )
).each do |ogroup|
ogroup.orthomcl_genes.codes(%w(mmus)).all.each do |orthomcl_gene|
ensembl = orthomcl_gene.official_split[1]
puts ensembl
# mapped = mapper.find_by_ensembl_protein_id(ensembl)
# p mapped
end
end
end
def generate_biomart_to_go_input
{
'hsap' => 'human',
'mmus' => 'mouse',
'atha' => 'arabidopsis',
'dmel' => 'fly',
'cele' => 'worm',
'scer' => 'yeast',
'crei' => 'chlamydomonas',
'tthe' => 'tetrahymena',
'rnor' => 'rat',
'spom' => 'pombe',
}.each do |code, name|
$stderr.puts name
out = File.open("#{species_orthologue_folder}/#{name}.txt",'w')
OrthomclGroup.all(
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => :expressed_localisations}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME}
}
).uniq.each do |ogroup|
ogroup.orthomcl_genes.code(code).all.each do |orthomcl_gene|
ensembl = orthomcl_gene.official_split[1]
out.puts ensembl
end
end
end
end
# \
def species_orthologue_folder; "#{PHD_DIR}/apiloc/species_orthologues3"; end
# all the methods required to get from the biomart and uniprot
# id to GO term mappings to a spreadsheet that can be inspected for the
# localisations required.
def apiloc_gathered_output_to_generated_spreadsheet_for_inspection
upload_apiloc_ensembl_go_terms
upload_apiloc_uniprot_go_terms
upload_apiloc_uniprot_mappings
upload_apiloc_flybase_mappings
# for some reason a single refseq sequence can be linked to multiple uniprot sequences,
# which is stupid but something I'll have to live with
OrthomclGene.new.link_orthomcl_and_coding_regions(%w(atha), :accept_multiple_coding_regions=>true)
OrthomclGene.new.link_orthomcl_and_coding_regions(%w(hsap mmus dmel cele))
generate_apiloc_orthomcl_groups_for_inspection
end
def upload_apiloc_ensembl_go_terms
{
'human' => Species::HUMAN_NAME,
'mouse' => Species::MOUSE_NAME,
'rat' => Species::RAT_NAME,
}.each do |this_name, proper_name|
$stderr.puts this_name
FasterCSV.foreach("#{species_orthologue_folder}/biomart_results/#{this_name}.csv",
:col_sep => "\t",
:headers => true
) do |row|
protein_name = row['Ensembl Protein ID']
go_id = row['GO Term Accession (cc)']
evidence = row['GO Term Evidence Code (cc)']
next if go_id.nil? #ignore empty columns
code = CodingRegion.find_or_create_dummy(protein_name, proper_name)
go = GoTerm.find_by_go_identifier_or_alternate go_id
unless go
$stderr.puts "Couldn't find GO id #{go_id}"
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
) or raise
end
end
end
def upload_apiloc_uniprot_go_terms
{
'arabidopsis' => Species::ARABIDOPSIS_NAME,
'worm' => Species::ELEGANS_NAME,
'fly' => Species::DROSOPHILA_NAME
}.each do |this_name, proper_name|
File.open("#{species_orthologue_folder}/uniprot_results/#{this_name}.uniprot.txt").read.split("//\n").each do |uniprot|
u = Bio::UniProt.new(uniprot)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.find_or_create_dummy(protein_name, proper_name)
protein_alternate_names = axes[1..(axes.length-1)].no_nils
protein_alternate_names.each do |name|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, name
) or raise
end
goes = u.dr["GO"]
next if goes.nil? #no go terms associated
goes.each do |go_array|
go_id = go_array[0]
evidence_almost = go_array[2]
evidence = nil
if (matches = evidence_almost.match(/^([A-Z]{2,3})\:.*$/))
evidence = matches[1]
end
# error checking
if evidence.nil?
raise Exception, "No evidence code found in #{go_array.inspect} from #{evidence_almost}!"
end
go = GoTerm.find_by_go_identifier_or_alternate go_id
unless go
$stderr.puts "Couldn't find GO id #{go_id}"
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
).save!
end
end
end
end
def upload_apiloc_uniprot_mappings
{
'arabidopsis' => Species::ARABIDOPSIS_NAME,
'worm' => Species::ELEGANS_NAME,
'fly' => Species::DROSOPHILA_NAME
}.each do |this_name, proper_name|
FasterCSV.foreach("#{species_orthologue_folder}/uniprot_results/#{this_name}.mapping.tab",
:col_sep => "\t", :headers => true
) do |row|
code = CodingRegion.fs(row[1], proper_name) or raise Exception, "Don't know #{row[1]}"
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, row[0]
)
end
end
end
# Drosophila won't match well to orthomcl because orthomcl uses protein IDs whereas
# uniprot uses gene ids.
# This file was created by using the (useful and working) ID converter in flybase
def upload_apiloc_flybase_mappings
FasterCSV.foreach("#{species_orthologue_folder}/uniprot_results/flybase.mapping.tab",
:col_sep => "\t"
) do |row|
next if row[1] == 'unknown ID' #ignore rubbish
gene_id = row[3]
next if gene_id == '-' # not sure what this is, but I'll ignore for the moment
protein_id = row[1]
code = CodingRegion.fs(gene_id, Species::DROSOPHILA_NAME)
if code.nil?
$stderr.puts "Couldn't find gene #{gene_id}, skipping"
next
end
CodingRegionAlternateStringId.find_or_create_by_name_and_coding_region_id(
protein_id, code.id
)
end
end
# Return a list of orthomcl groups that fulfil these conditions:
# 1. It has a localised apicomplexan gene in it, as recorded by ApiLoc
# 2. It has a localised non-apicomplexan gene in it, as recorded by GO CC IDA annotation
def apiloc_orthomcl_groups_of_interest
OrthomclGroup.all(
:select => 'distinct(orthomcl_groups.*)',
:joins => {
:orthomcl_gene_orthomcl_group_orthomcl_runs => [
:orthomcl_run,
{:orthomcl_gene => {:coding_regions => [
:expressed_localisations
]}}
]
},
:conditions => {
:orthomcl_runs => {:name => OrthomclRun::ORTHOMCL_OFFICIAL_VERSION_3_NAME},
}
).select do |ogroup|
# only select those groups that have go terms annotated in non-apicomplexan species
OrthomclGroup.count(
:joins => {:coding_regions =>[
:go_terms
]},
:conditions => ['orthomcl_groups.id = ? and coding_region_go_terms.evidence_code = ? and go_terms.partition = ?',
ogroup.id, 'IDA', GoTerm::CELLULAR_COMPONENT
]
) > 0
end
end
def generate_apiloc_orthomcl_groups_for_inspection
interestings = %w(hsap mmus scer drer osat crei atha dmel cele)
apiloc_orthomcl_groups_of_interest.each do |ogroup|
paragraph = []
ogroup.orthomcl_genes.all.each do |orthomcl_gene|
four = orthomcl_gene.official_split[0]
# Possible to have many coding regions now - using all of them just together, though there is
# probably one good one and other useless and IEA if anything annotated. Actually
# not necesssarilly, due to strain problems.
#
# Only print out one entry for each OrthoMCL gene, to condense things
# but that line should have all the (uniq) go terms associated
orthomcl_gene.coding_regions.uniq.each do |code|
if OrthomclGene::OFFICIAL_ORTHOMCL_APICOMPLEXAN_CODES.include?(four)
paragraph.push [
orthomcl_gene.orthomcl_name,
code.nil? ? nil : code.annotation.annotation,
code.nil? ? nil : code.localisation_english,
].join("\t")
elsif interestings.include?(four)
unless code.nil?
goes = code.coding_region_go_terms.cc.useful.all
unless goes.empty?
worthwhile = true
orig = orthomcl_gene.orthomcl_name
goes.each do |code_go|
paragraph.push [
orig,
code_go.go_term.go_identifier,
code_go.go_term.term,
code_go.evidence_code
].join("\t")
orig = ''
end
end
end
end
end
end
puts paragraph.uniq.join("\n")
puts
end
end
def apiloc_go_localisation_conservation_groups_to_database
# FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues2/breakdown.manual.xls",
# FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues4/breakdown2.manual.csv",
FasterCSV.foreach("#{PHD_DIR}/apiloc/species_orthologues4/breakdown3.manual.csv",
:col_sep => "\t"
) do |row|
# ignore lines that have nothing first or are the header line
next unless row[0] and row[0].length > 0 and row[3]
single = row[0]
eg = row[1]
full = OrthomclLocalisationConservations.single_letter_to_full_name(single)
raise Exception, "Couldn't understand single letter '#{single}'" unless full
# find the orthomcl group by using the gene in the first line (the example)
ogene = OrthomclGene.official.find_by_orthomcl_name(eg)
raise Exception, "Coun't find orthomcl gene '#{eg}' as expected" if ogene.nil?
# create the record
OrthomclLocalisationConservations.find_or_create_by_orthomcl_group_id_and_conservation(
ogene.official_group.id, full
).id
end
end
def yes_vs_no_human_examples
OrthomclLocalisationConservations.all.collect do |l|
max_human = OrthomclGene.code('hsap').all(
:joins =>[
[:coding_regions => :go_terms],
:orthomcl_gene_orthomcl_group_orthomcl_runs
],
:conditions => {:orthomcl_gene_orthomcl_group_orthomcl_runs => {:orthomcl_group_id => l.orthomcl_group_id}}
).max do |h1, h2|
counter = lambda {|h|
CodingRegionGoTerm.cc.useful.count(
:joins => {:coding_region => :orthomcl_genes},
:conditions => {:orthomcl_genes => {:id => h.id}}
)
}
counter.call(h1) <=> counter.call(h2)
end
next unless max_human
puts [
l.conservation,
max_human.coding_regions.first.names.sort
].flatten.join("\t")
end
end
def upload_uniprot_identifiers_for_ensembl_ids
FasterCSV.foreach("#{species_orthologue_folder}/gostat/human_ensembl_uniprot_ids.txt",
:col_sep => "\t", :headers => true
) do |row|
ens = row['Ensembl Protein ID']
uni = row['UniProt/SwissProt Accession']
raise unless ens
next unless uni
code = CodingRegion.f(ens)
raise unless code
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, uni, CodingRegionAlternateStringId::UNIPROT_SOURCE_NAME
) or raise
end
end
def download_uniprot_data
UNIPROT_SPECIES_ID_NAME_HASH.each do |taxon_id, species_name|
# Download the data into the expected name
Dir.chdir("#{DATA_DIR}/UniProt/knowledgebase") do
unless File.exists?("#{species_name}.gz")
cmd = "wget -O '#{species_name}.gz' 'http://www.uniprot.org/uniprot/?query=taxonomy%3a#{taxon_id}&compress=yes&format=txt'"
p cmd
`#{cmd}`
end
end
end
end
# Delete all the data associated with the uniprot species so
# I can start again.
def destroy_all_uniprot_species
APILOC_UNIPROT_SPECIES_NAMES.each do |species_name|
s = Species.find_by_name(species_name)
puts "#{species_name}..."
s.delete unless s.nil?
end
end
UNIPROT_SPECIES_ID_NAME_HASH = {
3702 => Species::ARABIDOPSIS_NAME,
9606 => Species::HUMAN_NAME,
10090 => Species::MOUSE_NAME,
4932 => Species::YEAST_NAME,
4896 => Species::POMBE_NAME,
10116 => Species::RAT_NAME,
7227 => Species::DROSOPHILA_NAME,
6239 => Species::ELEGANS_NAME,
44689 => Species::DICTYOSTELIUM_DISCOIDEUM_NAME,
7955 => Species::DANIO_RERIO_NAME,
5691 => Species::TRYPANOSOMA_BRUCEI_NAME,
}
APILOC_UNIPROT_SPECIES_NAMES = UNIPROT_SPECIES_ID_NAME_HASH.values
# Given that the species of interest are already downloaded from uniprot
# (using download_uniprot_data for instance), upload this data
# to the database, including GO terms. Other things need to be run afterwards
# to be able to link to OrthoMCL.
#
# This method could be more DRY - UniProtIterator could replace
# much of the code here. But eh for the moment.
def uniprot_to_database(species_names=nil)
species_names ||= APILOC_UNIPROT_SPECIES_NAMES
species_names = [species_names] unless species_names.kind_of?(Array)
species_names.each do |species_name|
count = 0
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
# Don't use a static name because if two instance are running clashes occur.
Tempfile.open("#{species_name}_reduced") do |tempfile|
filename = tempfile.path
cmd = "zcat '#{complete_filename}' |egrep '^(AC|DR GO|//)' >'#{filename}'"
`#{cmd}`
dummy_gene = Gene.find_or_create_dummy(species_name)
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
File.foreach(filename) do |line|
if line == "//\n"
count += 1
progress.inc
#current uniprot is finished - upload it
#puts current_uniprot_string
u = Bio::UniProt.new(current_uniprot_string)
# Upload the UniProt name as the
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.find_or_create_by_gene_id_and_string_id(
dummy_gene.id,
protein_name
)
raise unless code.save!
protein_alternate_names = axes.no_nils
protein_alternate_names.each do |name|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, name, 'UniProt'
) or raise
end
goes = u.dr["GO"]
goes ||= [] #no go terms associated - best to still make it to the end of the method, because it is too complex here for such hackery
goes.each do |go_array|
go_id = go_array[0]
evidence_almost = go_array[2]
evidence = nil
if (matches = evidence_almost.match(/^([A-Z]{2,3})\:.*$/))
evidence = matches[1]
end
# error checking
if evidence.nil?
raise Exception, "No evidence code found in #{go_array.inspect} from #{evidence_almost}!"
end
go = GoTerm.find_by_go_identifier_or_alternate go_id
if go
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id_and_evidence_code(
code.id, go.id, evidence
).save!
else
$stderr.puts "Couldn't find GO id #{go_id}"
end
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
progress.finish
end #tempfile
$stderr.puts "Uploaded #{count} from #{species_name}, now there is #{CodingRegion.s(species_name).count} coding regions in #{species_name}."
end
#uploadin the last one not required because the last line is always
# '//' already - making it easy.
end
def tetrahymena_orf_names_to_database
species_name = Species::TETRAHYMENA_NAME
current_uniprot_string = ''
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{Species::TETRAHYMENA_NAME}.gz"
progress = ProgressBar.new(Species::TETRAHYMENA_NAME, `gunzip -c '#{filename}' |grep '^//' |wc -l`.to_i)
Zlib::GzipReader.open(filename).each do |line|
if line == "//\n"
progress.inc
#current uniprot is finished - upload it
u = Bio::UniProt.new(current_uniprot_string)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.fs(protein_name, species_name)
raise unless code
u.gn[0][:orfs].each do |orfname|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, orfname
)
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
end
# upload aliases so that orthomcl entries can be linked to uniprot ones.
# have to run tetrahymena_orf_names_to_database first though.
def tetrahymena_gene_aliases_to_database
bads = 0
goods = 0
filename = "#{DATA_DIR}/Tetrahymena thermophila/genome/TGD/Tt_ID_Mapping_File.txt"
progress = ProgressBar.new(Species::TETRAHYMENA_NAME, `wc -l '#{filename}'`.to_i)
FasterCSV.foreach(filename,
:col_sep => "\t"
) do |row|
progress.inc
uniprot = row[0]
orthomcl = row[1]
code = CodingRegion.fs(uniprot, Species::TETRAHYMENA_NAME)
if code.nil?
bads +=1
else
goods += 1
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_source_and_name(
code.id, 'TGD', orthomcl
)
raise unless a
end
end
progress.finish
$stderr.puts "Found #{goods}, failed #{bads}"
end
def yeastgenome_ids_to_database
species_name = Species::YEAST_NAME
current_uniprot_string = ''
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
progress = ProgressBar.new(species_name, `gunzip -c '#{filename}' |grep '^//' |wc -l`.to_i)
Zlib::GzipReader.open(filename).each do |line|
if line == "//\n"
progress.inc
#current uniprot is finished - upload it
u = Bio::UniProt.new(current_uniprot_string)
axes = u.ac
protein_name = axes[0]
raise unless protein_name
code = CodingRegion.fs(protein_name, species_name)
if code
unless u.gn.empty?
u.gn[0][:loci].each do |orfname|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name(
code.id, orfname
)
end
end
else
$stderr.puts "Unable to find protein `#{protein_name}'"
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
progress.finish
end
def elegans_wormbase_identifiers
species_name = Species::ELEGANS_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|DR WormBase|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], Species::ELEGANS_NAME)
raise unless code
# DR WormBase; WBGene00000467; cep-1.
ides = u.dr['WormBase']
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'WormBase'
)
raise unless a.save!
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm #{filename}`
end
def dicystelium_names_to_database
species_name = Species::DICTYOSTELIUM_DISCOIDEUM_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|GN|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
skipped_count = 0
skipped_count2 = 0
added_count = 0
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], species_name)
raise unless code
# GN Name=myoJ; Synonyms=myo5B; ORFNames=DDB_G0272112;
unless u.gn.empty? # for some reason using u.gn when there is nothing there returns an array, not a hash. Annoying.
ides = []
u.gn.each do |g|
ides.push g[:name] unless g[:name].nil?
ides.push g[:orfs] unless g[:orfs].nil?
end
ides = ides.flatten.no_nils
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'UniProtName'
)
raise unless a.save!
end
if ides.empty?
skipped_count2 += 1
else
added_count += 1
end
else
skipped_count += 1
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm '#{filename}'`
progress.finish
$stderr.puts "Added names for #{added_count}, skipped #{skipped_count} and #{skipped_count2}"
end
def tbrucei_names_to_database
species_name = Species::TRYPANOSOMA_BRUCEI_NAME
current_uniprot_string = ''
complete_filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
# Convert the whole gzip in to a smaller one, so parsing is faster:
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}_reduced"
`zcat '#{complete_filename}' |egrep '^(AC|GN|//)' >'#{filename}'`
progress = ProgressBar.new(species_name, `grep '^//' '#{filename}' |wc -l`.to_i)
skipped_count = 0
skipped_count2 = 0
added_count = 0
File.foreach(filename) do |line|
if line == "//\n"
progress.inc
u = Bio::UniProt.new(current_uniprot_string)
code = CodingRegion.fs(u.ac[0], species_name)
raise unless code
# GN Name=myoJ; Synonyms=myo5B; ORFNames=DDB_G0272112;
unless u.gn.empty? # for some reason using u.gn when there is nothing there returns an array, not a hash. Annoying.
ides = []
u.gn.each do |g|
#ides.push g[:name] unless g[:name].nil?
ides.push g[:orfs] unless g[:orfs].nil?
end
ides = ides.flatten.no_nils
ides ||= []
ides.flatten.each do |ident|
a = CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, ident, 'UniProtName'
)
raise unless a.save!
end
if ides.empty?
skipped_count2 += 1
else
added_count += 1
end
else
skipped_count += 1
end
current_uniprot_string = ''
else
current_uniprot_string += line
end
end
`rm '#{filename}'`
progress.finish
$stderr.puts "Added names for #{added_count}, skipped #{skipped_count} and #{skipped_count2}"
end
def uniprot_ensembl_databases
[
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::DANIO_RERIO_NAME,
Species::DROSOPHILA_NAME,
Species::RAT_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR Ensembl') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
ens = u.dr['Ensembl']
ens ||= []
ens.flatten.each do |e|
if e.match(/^ENS/) or (species_name == Species::DROSOPHILA_NAME and e.match(/^FBpp/))
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, e, 'Ensembl'
)
end
end
end
end
end
def uniprot_refseq_databases
[
Species::ARABIDOPSIS_NAME,
Species::RICE_NAME,
Species::POMBE_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR RefSeq') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
refseqs = u.dr['RefSeq']
refseqs ||= []
refseqs = refseqs.collect{|r| r[0]}
refseqs.each do |r|
r = r.gsub(/\..*/,'')
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, r, 'Refseq'
)
end
end
end
end
def uniprot_gene_names
[
Species::TBRUCEI_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'GN ORFNames=') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
gene_names = []
u.gn.each do |gn|
gn[:orfs].each do |orf|
gene_names.push orf
end
end
gene_names.each do |g|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, g, 'UniProtGeneName'
)
end
end
end
end
def uniprot_eupathdb_databases
[
Species::TBRUCEI_NAME,
].each do |species_name|
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'DR EuPathDB') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
# p u.dr
next if u.dr.empty?
if (u.dr['EuPathDB'].nil?); $stderr.puts "Incorrectly parsed line? #{u.dr.inspect}"; break; end
refseqs = u.dr['EuPathDB'].flatten
refseqs = refseqs.collect{|r| r.gsub(/^EupathDB:/,'')}
refseqs.each do |r|
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, r, 'EuPathDB'
)
end
end
end
end
def chlamydomonas_link_to_orthomcl_ids
species_name = Species::CHLAMYDOMONAS_NAME
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz", 'GN') do |u|
code = CodingRegion.fs(u.ac[0], species_name) or raise
gn = u.gn
unless gn.empty?
orfs = gn.collect{|g| g[:orfs]}
unless orfs.empty?
orfs.flatten.each do |orf|
o = 'CHLREDRAFT_168484' if orf == 'CHLRE_168484' #manual fix
raise Exception, "Unexpected orf: #{orf}" unless orf.match(/^CHLREDRAFT_/) or orf.match(/^CHLRE_/)
o = orf.gsub(/^CHLREDRAFT_/, '')
o = o.gsub(/^CHLRE_/,'')
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, o, 'JGI'
)
end
end
end
end
end
# OrthoMCL gene IDs for Drosophila are encoded in the 'DR EnsemblMetazoa;' lines,
# such as
# DR EnsemblMetazoa; FBtr0075201; FBpp0074964; FBgn0036740.
# (and in particular the FBpp ones). Upload these pp ones as synonyms
def drosophila_ensembl_metazoa
addeds = 0
non_addeds = 0
terms = 0
Bio::UniProtIterator.foreach("#{DATA_DIR}/UniProt/knowledgebase/#{Species::DROSOPHILA_NAME}.gz", 'DR EnsemblMetazoa;') do |u|
ensembl_metazoas = u.dr['EnsemblMetazoa']
if ensembl_metazoas.nil?
non_addeds += 1
else
added = false
code = CodingRegion.fs(u.ac[0], Species::DROSOPHILA_NAME) or raise
ensembl_metazoas.flatten.select{|s| s.match /^FBpp/}.each do |e|
added = true
terms+= 1
CodingRegionAlternateStringId.find_or_create_by_coding_region_id_and_name_and_source(
code.id, e, 'EnsemblMetazoa'
)
end
addeds += 1 if added
end
end
$stderr.puts "Uploaded #{terms} IDs for #{addeds} different genes, missed #{non_addeds}"
end
def uniprot_go_annotation_species_stats
APILOC_UNIPROT_SPECIES_NAMES.each do |species_name|
filename = "#{DATA_DIR}/UniProt/knowledgebase/#{species_name}.gz"
if File.exists?(filename)
puts [
species_name,
`zcat '#{filename}'|grep ' GO' |grep -v IEA |grep -v ISS |grep 'C\:' |wc -l`
].join("\t")
else
puts "Couldn't find #{species_name} uniprot file"
end
end
end
# Create a spreadsheet that encapsulates all of the localisation
# information from apiloc, so that large scale analysis is simpler
def create_apiloc_spreadsheet
nil_char = nil #because I'm not sure how the joins will work
microscopy_method_names = LocalisationAnnotation::POPULAR_MICROSCOPY_TYPE_NAME_SCOPE.keys.sort.reverse
small_split_string = '#' #use when only 1 delimiter within a cell is needed
big_split_string = ';' #use when 2 delimiters in one cell are needed
orthomcl_split_char = '_'
# Headings
puts [
'Species',
'Gene ID',
'Abbreviations',
'Official Gene Annotation',
'Localisation Summary',
'Cellular Localisation',
'Total Number of Cellular Localisations',
'OrthoMCL Group Identifier',
'Apicomplexan Orthologues with Recorded Localisation',
'Apicomplexan Orthologues without Recorded Localisation',
'Non-Apicomplexan Orthologues with IDA GO Cellular Component Annotation',
'Consensus Localisation of Orthology Group',
'PubMed IDs of Publications with Localisation',
microscopy_method_names,
'All Localisation Methods Used',
'Strains',
'Gene Model Mapping Comments',
'Quotes'
].flatten.join("\t")
codes = CodingRegion.all(:joins => :expressed_localisations).uniq
progress = ProgressBar.new('apiloc_spreadsheet', codes.length)
codes.each do |code|
$stderr.puts code.string_id
progress.inc
to_print = []
organellar_locs = []
# species
to_print.push code.species.name
#EuPath or GenBank ID
to_print.push code.string_id
#common names
to_print.push code.literature_defined_names.join(small_split_string)
#annotation
a1 = code.annotation
to_print.push(a1.nil? ? nil_char : a1.annotation)
#full localisation description
to_print.push code.localisation_english
#'organellar' localisation (one per record,
#if there is more repeat the whole record)
#this might more sensibly be GO-oriented, but eh for the moment
organellar_locs = code.topsa.uniq
to_print.push nil_char
# number of organellar localisations (ie number of records for this gene)
to_print.push organellar_locs.length
# OrthoMCL-related stuffs
ogene = code.single_orthomcl!
ogroup = (ogene.nil? ? nil : ogene.official_group)
if ogroup.nil?
5.times do
to_print.push nil_char
end
else
#orthomcl group
to_print.push ogroup.orthomcl_name
#localised apicomplexans in orthomcl group
locked = CodingRegion.all(
:joins => [
{:orthomcl_genes => :orthomcl_groups},
:expression_contexts
],
:conditions => [
'orthomcl_groups.id = ? and coding_regions.id != ?',
ogroup.id, code.id
],
:select => 'distinct(coding_regions.*)'
)
to_print.push "\"#{locked.collect{|a|
[
a.string_id,
a.annotation.annotation,
a.localisation_english
].join(small_split_string)
}.join(big_split_string)}\""
#unlocalised apicomplexans in orthomcl group
to_print.push ogroup.orthomcl_genes.apicomplexan.all.reject {|a|
a.coding_regions.select { |c|
c.expressed_localisations.count > 0
}.length > 0
}.reach.orthomcl_name.join(', ').gsub('|',orthomcl_split_char)
#non-apicomplexans with useful GO annotations in orthomcl group
#species, orthomcl id, uniprot id(s), go annotations
go_codes = CodingRegion.go_cc_usefully_termed.not_apicomplexan.all(
:joins => {:orthomcl_genes => :orthomcl_groups},
:conditions =>
["orthomcl_groups.id = ?", ogroup.id],
:select => 'distinct(coding_regions.*)',
:order => 'coding_regions.id'
)
to_print.push "\"#{go_codes.collect { |g|
[
g.species.name,
g.orthomcl_genes.reach.orthomcl_name.join(', ').gsub('|',orthomcl_split_char),
g.names.join(', '),
g.coding_region_go_terms.useful.cc.all.reach.go_term.term.join(', ')
].join(small_split_string)
}.join(big_split_string)}\""
# consensus of orthology group.
to_print.push 'consensus - TODO'
end
contexts = code.expression_contexts
annotations = code.localisation_annotations
# pubmed ids that localise the gene
to_print.push contexts.reach.publication.definition.no_nils.uniq.join(small_split_string)
# Categorise the microscopy methods
microscopy_method_names.each do |name|
scopes =
LocalisationAnnotation::POPULAR_MICROSCOPY_TYPE_NAME_SCOPE[name]
done = LocalisationAnnotation
scopes.each do |scope|
done = done.send(scope)
end
if done.find_by_coding_region_id(code.id)
to_print.push 'yes'
else
to_print.push 'no'
end
end
# localisation methods used (assume different methods never give different results for the same gene)
to_print.push annotations.reach.microscopy_method.no_nils.uniq.join(small_split_string)
# strains
to_print.push annotations.reach.strain.no_nils.uniq.join(small_split_string)
# mapping comments
to_print.push annotations.reach.gene_mapping_comments.no_nils.uniq.join(small_split_string).gsub(/\"/,'')
# quotes
# have to escape quote characters otherwise I get rows joined together
to_print.push "\"#{annotations.reach.quote.uniq.join(small_split_string).gsub(/\"/,'\"')}\""
if organellar_locs.empty?
puts to_print.join("\t")
else
organellar_locs.each do |o|
to_print[5] = o.name
puts to_print.join("\t")
end
end
end
progress.finish
end
# The big GOA file has not been 'redundancy reduced', a process which is buggy,
# like the species level ones. Here I upload the species that I'm interested
# in using that big file, not the small one
def goa_all_species_to_database
require 'gene_association'
UNIPROT_SPECIES_ID_NAME_HASH.each do |species_id, species_name|
bad_codes_count = 0
bad_go_count = 0
good_count = 0
Bio::GzipAndFilterGeneAssociation.foreach(
"#{DATA_DIR}/GOA/gene_association.goa_uniprot.gz",
"\ttaxon:#{species_id}\t"
) do |go|
name = go.primary_id
code = CodingRegion.fs(name, species_name)
unless code
$stderr.puts "Couldn't find coding region #{name}"
bad_codes_count += 1
next
end
go_term = GoTerm.find_by_go_identifier(go.go_identifier)
unless go_term
$stderr.puts "Couldn't find coding region #{go.go_identifier}"
bad_go_count += 1
next
end
CodingRegionGoTerm.find_or_create_by_coding_region_id_and_go_term_id(
code.id, go_term.id
)
good_count += 1
end
$stderr.puts "#{good_count} all good, failed to find #{bad_codes_count} coding regions and #{bad_go_count} go terms"
end
end
def how_many_genes_have_dual_localisation?
dual_loc_folder = "#{PHD_DIR}/apiloc/experiments/dual_localisations"
raise unless File.exists?(dual_loc_folder)
file = File.open(File.join(dual_loc_folder, 'duals.csv'),'w')
Species.apicomplexan.each do |species|
species_name = species.name
codes = CodingRegion.s(species_name).all(
:joins => :expressed_localisations,
:select => 'distinct(coding_regions.*)'
)
counts = []
nuc_aware_counts = []
codes_per_count = []
# write the results to the species-specific file
codes.each do |code|
next if code.string_id == CodingRegion::UNANNOTATED_CODING_REGIONS_DUMMY_GENE_NAME
tops = TopLevelLocalisation.positive.all(
:joins => {:apiloc_localisations => :expressed_coding_regions},
:conditions => ['coding_regions.id = ?',code.id],
:select => 'distinct(top_level_localisations.*)'
)
count = tops.length
counts[count] ||= 0
counts[count] += 1
codes_per_count[count] ||= []
codes_per_count[count].push code.string_id
# nucleus and cytoplasm as a single localisation if both are included
names = tops.reach.name.retract
if names.include?('nucleus') and names.include?('cytoplasm')
count -= 1
end
# Write out the coding regions to a file
# gather the falciparum data
og = code.single_orthomcl!
fals = []
if og and og.official_group
fals = og.official_group.orthomcl_genes.code('pfal').all.collect do |ogene|
ogene.single_code
end
end
file.puts [
code.species.name,
code.string_id,
code.names,
count,
code.compartments.join('|'),
fals.reach.compartments.join('|'),
fals.reach.localisation_english.join('|')
].join("\t")
nuc_aware_counts[count] ||= 0
nuc_aware_counts[count] += 1
end
puts species_name
# p codes_per_count
p counts
p nuc_aware_counts
end
file.close
end
def falciparum_test_prediction_by_orthology_to_non_apicomplexans
bins = {}
puts [
'PlasmoDB ID',
'Names',
'Compartments',
'Prediction',
'Comparison',
'Full P. falciparum Localisation Information'
].join("\t")
CodingRegion.localised.falciparum.all(
:select => 'distinct(coding_regions.*)'
).each do |code|
# Unassigned genes just cause problems for orthomcl
next if code.string_id == CodingRegion::NO_MATCHING_GENE_MODEL
# When there is more than 1 P. falciparum protein in the group, then ignore this
group = code.single_orthomcl.official_group
if group.nil?
$stderr.puts "#{code.names.join(', ')} has no OrthoMCL group, ignoring."
next
end
num = group.orthomcl_genes.code(code.species.orthomcl_three_letter).count
if num != 1
$stderr.puts "#{code.names.join(', ')} has #{num} genes in its localisation group, ignoring"
next
end
pred = code.apicomplexan_localisation_prediction_by_most_common_localisation
next if pred.nil?
goodness = code.compare_localisation_to_list(pred)
puts [
code.string_id,
code.names.join('|'),
code.compartments.join('|'),
pred,
goodness,
code.localisation_english,
].join("\t")
bins[goodness] ||= 0
bins[goodness] += 1
end
# Print the results of the analysis
p bins
end
# Looking through all the genes in the database, cache of the compartments so that things are easier to compare
def cache_all_compartments
# Cache all apicomplexan compartments
codes = CodingRegion.apicomplexan.all
progress = ProgressBar.new('apicomplexans', codes.length)
codes.each do |code|
progress.inc
comps = code.compartments
comps.each do |comp|
CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
end
end
progress.finish
# Cache all non-apicomplexan compartments
codes = CodingRegion.go_cc_usefully_termed.all(:select => 'distinct(coding_regions.*)')
progress = ProgressBar.new('eukaryotes', codes.length)
codes.each do |code|
p code
progress.inc
comps = code.compartments
comps.each do |comp|
p comp
g = CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
g.save!
p g
end
end
progress.finish
end
# How conserved is localisation between the three branches of life with significant
# data known about them?
# This method FAILS due to memory and compute time issues - I ended up
# essentially abandoning rails for this effort.
def conservation_of_eukaryotic_sub_cellular_localisation(debug = false)
groups_to_counts = {}
# For each orthomcl group that has a connection to coding region, and
# that coding region has a cached compartment
groups = OrthomclGroup.all(
# :select => 'distinct(orthomcl_groups.*)',
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}}
# :limit => 10,
# :include => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}}
)
# ProgressBar on stdout, because debug is on stderr
progress = ProgressBar.new('conservation', groups.length, STDOUT)
groups.each do |ortho_group|
progress.inc
$stderr.puts "---------------------------------------------" if debug
# For each non-Apicomplexan gene with localisation information in this group,
# assign it compartments.
# For each apicomplexan, get the compartments from apiloc
# This is nicely abstracted already!
# However, a single orthomcl gene can have multiple CodingRegion's associated.
# Therefore each has to be analysed as an array, frustratingly.
# reject the orthomcl gene if it has no coding regions associated with it.
orthomcl_genes = OrthomclGene.all(
:joins => [:coding_regions, :orthomcl_groups],
:conditions => {:orthomcl_groups => {:id => ortho_group.id}}
)
# ortho_group.orthomcl_genes.uniq.reject do |s|
# # reject the orthomcl gene if it has no coding regions associated with it.
# s.coding_regions.empty?
# end
# Setup data structures
kingdom_orthomcls = {} #array of kingdoms to orthomcl genes
orthomcl_locs = {} #array of orthomcl_genes to localisations, cached for convenience and speed
orthomcl_genes.each do |orthomcl_gene|
# Localisations from all coding regions associated with an orthomcl gene are used.
locs = CodingRegionCompartmentCache.all(
:joins => {:coding_region => :orthomcl_genes},
:conditions => {:orthomcl_genes => {:id => orthomcl_gene.id}}
).reach.compartment.uniq
# locs = orthomcl_gene.coding_regions.reach.cached_compartments.flatten.uniq
next if locs.empty? #ignore unlocalised genes completely from hereafter
name = orthomcl_gene.orthomcl_name
orthomcl_locs[name] = locs
# no one orthomcl gene will have coding regions from 2 different species,
# so using the first element of the array is fine
species = orthomcl_gene.coding_regions[0].species
kingdom_orthomcls[species.kingdom] ||= []
kingdom_orthomcls[species.kingdom].push name
end
$stderr.puts kingdom_orthomcls.inspect if debug
$stderr.puts orthomcl_locs.inspect if debug
$stderr.puts "Kingdoms: #{kingdom_orthomcls.to_a.collect{|k| k[0]}.sort.join(', ')}" if debug
# within the one kingdom, do they agree?
kingdom_orthomcls.each do |kingdom, orthomcls|
# If there is only a single coding region, then don't record
number_in_kingdom_localised = orthomcls.length
if number_in_kingdom_localised < 2
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdom}, skipping (#{orthomcls.join(', ')})" if debug
next
end
# convert orthomcl genes to localisation arrays
locs = orthomcls.collect {|orthomcl|
orthomcl_locs[orthomcl]
}
# OK, so now we are on. Let's do this
agreement = OntologyComparison.new.agreement_of_group(locs)
index = [kingdom]
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}, #{orthomcls.join(' ')}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within two kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_matrix do |array1, array2|
kingdom1 = array1[0]
kingdom2 = array2[0]
orthomcl_array1 = array1[1]
orthomcl_array2 = array2[1]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdoms.join(' ')}, skipping"
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group(locs_for_all)
index = [kingdom1, kingdom2].sort
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within three kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_3d_matrix do |a1, a2, a3|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
kingdoms = [kingdom1, kingdom2, kingdom3]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "#{ortho_group.orthomcl_name}, #{kingdoms.join(' ')}, skipping" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "#{ortho_group.orthomcl_name}, #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
end
progress.finish
# print out the counts for each group of localisations
p groups_to_counts
end
# An attempt to make conservation_of_eukaryotic_sub_cellular_localisation faster
# as well as using less memory. In the end the easiest way was to stay away from Rails
# almost completely, and just use find_by_sql for the big database dump to a csv file,
# and then parse that csv file one line at a time.
def conservation_of_eukaryotic_sub_cellular_localisation_slimmer
# Cache all of the kingdom information as orthomcl_split to kingdom
orthomcl_abbreviation_to_kingdom = {}
Species.all(:conditions => 'orthomcl_three_letter is not null').each do |sp|
orthomcl_abbreviation_to_kingdom[sp.orthomcl_three_letter] = Species::FOUR_WAY_NAME_TO_KINGDOM[sp.name]
end
# Copy the data out of the database to a csv file. There shouldn't be any duplicates
tempfile = File.open('/tmp/eukaryotic_conservation','w')
# Tempfile.open('eukaryotic_conservation') do |tempfile|
`chmod go+w #{tempfile.path}` #so postgres can write to this file as well
OrthomclGene.find_by_sql "copy (select groupa.orthomcl_name, gene.orthomcl_name, cache.compartment from orthomcl_groups groupa inner join orthomcl_gene_orthomcl_group_orthomcl_runs ogogor on groupa.id=ogogor.orthomcl_group_id inner join orthomcl_genes gene on ogogor.orthomcl_gene_id=gene.id inner join orthomcl_gene_coding_regions ogc on ogc.orthomcl_gene_id=gene.id inner join coding_regions code on ogc.coding_region_id=code.id inner join coding_region_compartment_caches cache on code.id=cache.coding_region_id order by groupa.orthomcl_name) to '#{tempfile.path}'"
tempfile.close
# Parse the csv file to get the answers I'm looking for
data = {}
kingdom_orthomcls = {} #array of kingdoms to orthomcl genes
orthomcl_locs = {} #array of orthomcl_genes to localisations, cached for convenience and speed
FasterCSV.foreach(tempfile.path, :col_sep => "\t") do |row|
# name columns
raise unless row.length == 3
group = row[0]
gene = row[1]
compartment = row[2]
data[group] ||= {}
kingdom = orthomcl_abbreviation_to_kingdom[OrthomclGene.new.official_split(gene)[0]]
data[group]['kingdom_orthomcls'] ||= {}
data[group]['kingdom_orthomcls'][kingdom] ||= []
data[group]['kingdom_orthomcls'][kingdom].push gene
data[group]['kingdom_orthomcls'][kingdom].uniq!
data[group]['orthomcl_locs'] ||= {}
data[group]['orthomcl_locs'][gene] ||= []
data[group]['orthomcl_locs'][gene].push compartment
data[group]['orthomcl_locs'][gene].uniq!
end
# Classify each of the groups into the different categories where possible
groups_to_counts = {}
data.each do |group, data2|
$stderr.puts
$stderr.puts '============================'
classify_eukaryotic_conservation_of_single_orthomcl_group(
data2['kingdom_orthomcls'],
data2['orthomcl_locs'],
groups_to_counts
)
end
groups_to_counts.to_a.sort{|a,b| a[0].length<=>b[0].length}.each do |king_array, agrees|
yes = agrees[OntologyComparison::COMPLETE_AGREEMENT]
no = agrees[OntologyComparison::DISAGREEMENT]
maybe = agrees[OntologyComparison::INCOMPLETE_AGREEMENT]
yes ||= 0; no||= 0; maybe ||= 0;
total = (yes+no+maybe).to_f
puts [
king_array.join(','),
yes, no, maybe,
agrees[OntologyComparison::UNKNOWN_AGREEMENT],
((yes.to_f/total)*100).round,
((no.to_f/total)*100).round,
((maybe.to_f/total)*100).round,
].join("\t")
end
end
# This is a modularisation of conservation_of_eukaryotic_sub_cellular_localisation,
# and does the calculations on the already transformed data (kingdom_orthomcls, orthomcl_locs).
# More details in conservation_of_eukaryotic_sub_cellular_localisation
def classify_eukaryotic_conservation_of_single_orthomcl_group(kingdom_orthomcls, orthomcl_locs, groups_to_counts, debug = true)
$stderr.puts kingdom_orthomcls.inspect if debug
$stderr.puts orthomcl_locs.inspect if debug
$stderr.puts "Kingdoms: #{kingdom_orthomcls.to_a.collect{|k| k[0]}.sort.join(', ')}" if debug
# within the one kingdom, do they agree?
kingdom_orthomcls.each do |kingdom, orthomcls|
# If there is only a single coding region, then don't record
number_in_kingdom_localised = orthomcls.length
if number_in_kingdom_localised < 2
$stderr.puts "One kingdom: #{kingdom}, skipping (#{orthomcls.join(', ')})" if debug
next
end
# convert orthomcl genes to localisation arrays
locs = orthomcls.collect {|orthomcl|
orthomcl_locs[orthomcl]
}
# OK, so now we are on. Let's do this
agreement = OntologyComparison.new.agreement_of_group(locs)
index = [kingdom]
$stderr.puts "One kingdom: #{index.inspect}, #{agreement}, #{orthomcls.join(' ')}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within two kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_matrix do |array1, array2|
kingdom1 = array1[0]
kingdom2 = array2[0]
orthomcl_array1 = array1[1]
orthomcl_array2 = array2[1]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Two kingdoms: #{kingdoms.join(' ')}, #{orthomcl_arrays}, skipping"
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group(locs_for_all)
index = [kingdom1, kingdom2].sort
$stderr.puts "Two kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
# within three kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_3d_matrix do |a1, a2, a3|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
kingdoms = [kingdom1, kingdom2, kingdom3]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Three kingdoms: #{kingdoms.join(' ')}, skipping" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "Three kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
#within 4 kingdoms, do they agree?
kingdom_orthomcls.to_a.each_lower_triangular_4d_matrix do |a1, a2, a3, a4|
kingdom1 = a1[0]
kingdom2 = a2[0]
kingdom3 = a3[0]
kingdom4 = a4[0]
orthomcl_array1 = a1[1]
orthomcl_array2 = a2[1]
orthomcl_array3 = a3[1]
orthomcl_array4 = a4[1]
kingdoms = [kingdom1, kingdom2, kingdom3, kingdom4]
orthomcl_arrays = [orthomcl_array1, orthomcl_array2, orthomcl_array3, orthomcl_array4]
# don't include unless there is an orthomcl in each kingdom
zero_entriers = orthomcl_arrays.select{|o| o.length==0}
if zero_entriers.length > 0
$stderr.puts "Four kingdoms: #{kingdoms.join(' ')}, skipping cos #{zero_entriers} have no entries" if debug
next
end
locs_for_all = orthomcl_arrays.flatten.collect {|orthomcl| orthomcl_locs[orthomcl]}
agreement = OntologyComparison.new.agreement_of_group locs_for_all
index = kingdoms.sort
$stderr.puts "Four kingdoms: #{index.inspect}, #{agreement}" if debug
groups_to_counts[index] ||= {}
groups_to_counts[index][agreement] ||= 0
groups_to_counts[index][agreement] += 1
end
end
# Using the assumption that the yeast-mouse, yeast-human and falciparum-toxo divergences are approximately
# equivalent, whatever that means, work out the conservation of localisation between each of those groups.
# Does yeast/mouse exhibit the same problems as falciparum/toxo when comparing localisations?
def localisation_conservation_between_pairs_of_species(species1 = Species::FALCIPARUM_NAME, species2 = Species::TOXOPLASMA_GONDII_NAME)
groups_to_counts = {} #this array ends up holding all the answers after we have finished going through everything
toxo_fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[species1]).with_species(Species::ORTHOMCL_CURRENT_LETTERS[species2]).all(
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}},
# :limit => 10,
:select => 'distinct(orthomcl_groups.*)'
# :conditions => ['orthomcl_groups.orthomcl_name = ? or orthomcl_groups.orthomcl_name = ?','OG3_10042','OG3_10032']
)
$stderr.puts "Found #{toxo_fal_groups.length} groups containing proteins from #{species1} and #{species2}"
progress = ProgressBar.new('tfal', toxo_fal_groups.length, STDOUT)
toxo_fal_groups.each do |tfgroup|
progress.inc
orthomcl_locs = {}
species_orthomcls = {} #used like kingdom_locs in previous methods
# collect the orthomcl_locs array for each species
arrays = [species1, species2].collect do |species_name|
# collect compartments for each of the toxos
genes = tfgroup.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[species_name]).all
gene_locs = {}
# add all the locs for a given gene
genes.each do |gene|
locs = gene.coding_regions.collect{|c| c.coding_region_compartment_caches.reach.compartment.retract}.flatten.uniq #all compartments associated with the gene
unless locs.empty?
gene_locs[gene.orthomcl_name] = locs
end
end
# $stderr.puts "Found #{genes.length} orthomcl genes in #{species_name} from #{tfgroup.orthomcl_name}, of those, #{gene_locs.length} had localisations"
gene_locs.each do |gene, locs|
species_orthomcls[species_name] ||= []
species_orthomcls[species_name].push gene
orthomcl_locs[gene] = locs
end
end
# pp species_orthomcls
# pp orthomcl_locs
classify_eukaryotic_conservation_of_single_orthomcl_group(species_orthomcls, orthomcl_locs, groups_to_counts)
end
progress.finish
pp groups_to_counts
end
# Run localisation_conservation_between_pairs_of_species for each pair of species
# that I care about
def exhaustive_localisation_conservation_between_pairs_of_species
[
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
].each_lower_triangular_matrix do |s1, s2|
puts '=============================================================='
localisation_conservation_between_pairs_of_species(s1, s2)
end
end
def localisation_pairs_as_matrix
master = {}
File.foreach("#{PHD_DIR}/apiloc/pairs/results.ruby").each do |line|
hash = eval "{#{line}}"
master = master.merge hash
end
organisms = [
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME,
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
]
print "\t"
puts organisms.join("\t")
organisms.each do |o1|
print o1
organisms.each do |o2|
print "\t"
next if o1 == o2
result = master[[o1,o2].sort]
raise Exception, "Couldn't find #{[o1,o2].sort}" if result.nil?
print result['complete agreement'].to_f/result.values.sum
end
puts
end
end
# If you take only localised falciparum proteins with localised yeast and mouse orthologues,
# what are the chances that they are conserved
def falciparum_predicted_by_yeast_mouse(predicting_species=[Species::YEAST_NAME, Species::MOUSE_NAME],
test_species=Species::FALCIPARUM_NAME)
answer = {}
# Build up the query using the with_species named_scope,
# retrieving all groups that have members in each species
fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[test_species])
predicting_species.each do |sp|
fal_groups = fal_groups.send(:with_species, Species::ORTHOMCL_CURRENT_LETTERS[sp])
end
fal_groups = fal_groups.all(:select => 'distinct(orthomcl_groups.*)')#, :limit => 20)
$stderr.puts "Found #{fal_groups.length} groups with #{predicting_species.join(', ')} and #{test_species} proteins"
progress = ProgressBar.new('predictionByTwo', fal_groups.length, STDOUT)
fal_groups.each do |fal_group|
progress.inc
$stderr.puts
# get the localisations from each of the predicting species
predicting_array = predicting_species.collect do |species_name|
genes = fal_group.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[species_name]).all
gene_locs = {}
# add all the locs for a given gene
genes.each do |gene|
locs = gene.coding_regions.collect{|c| c.coding_region_compartment_caches.reach.compartment.retract}.flatten.uniq #all compartments associated with the gene
unless locs.empty?
gene_locs[gene.orthomcl_name] = locs
end
end
gene_locs
end
$stderr.puts "OGroup #{fal_group.orthomcl_name} gave #{predicting_array.inspect}"
# only consider cases where there is localisations in each of the predicting species
next if predicting_array.select{|a| a.empty?}.length > 0
# only consider genes where the localisations from the predicting species agree
flattened = predicting_array.inject{|a,b| a.merge(b)}.values
$stderr.puts "flattened: #{flattened.inspect}"
agreement = OntologyComparison.new.agreement_of_group(flattened)
next unless agreement == OntologyComparison::COMPLETE_AGREEMENT
$stderr.puts "They agree..."
# Now compare the agreement between a random falciparum hit and the locs from the predicting
prediction = flattened.to_a[0]
$stderr.puts "Prediction: #{prediction}"
all_fals = CodingRegion.falciparum.all(
:joins => [:coding_region_compartment_caches, {:orthomcl_genes => :orthomcl_groups}],
:conditions => ['orthomcl_groups.id = ?', fal_group.id]
)
next if all_fals.empty?
fal = all_fals[rand(all_fals.length)]
fal_compartments = fal.cached_compartments
$stderr.puts "fal: #{fal.string_id} #{fal_compartments}"
agreement = OntologyComparison.new.agreement_of_group([prediction, fal_compartments])
$stderr.puts "Final agreement #{agreement}"
answer[agreement] ||= 0
answer[agreement] += 1
end
progress.finish
pp answer
end
def how_many_genes_are_localised_in_each_species
interests = Species.all.reach.name.retract
# How many genes?
interests.each do |interest|
count = OrthomclGene.count(
:joins => {:coding_regions => [:coding_region_compartment_caches, {:gene => {:scaffold => :species}}]},
:select => 'distinct(orthomcl_genes.id)',
:conditions => {:species => {:name => interest}}
)
puts [
'OrthoMCL genes',
interest,
count
].join("\t")
end
# how many orthomcl groups?
interests.each do |interest|
count = OrthomclGroup.official.count(
:joins => {:orthomcl_genes => {:coding_regions => [:coding_region_compartment_caches, {:gene => {:scaffold => :species}}]}},
:conditions => ['orthomcl_genes.orthomcl_name like ? and species.name = ?', "#{Species::ORTHOMCL_CURRENT_LETTERS[interest]}|%", interest],
:select => 'distinct(orthomcl_groups.id)'
)
puts [
'OrthoMCL groups',
interest,
count
].join("\t")
end
end
# Predict the localisation of a protein by determining the amount
def prediction_by_most_common_localisation(predicting_species=[Species::YEAST_NAME, Species::MOUSE_NAME],
test_species=Species::FALCIPARUM_NAME)
answer = {}
# Build up the query using the with_species named_scope,
# retrieving all groups that have members in each species
fal_groups = OrthomclGroup.with_species(Species::ORTHOMCL_CURRENT_LETTERS[test_species])
predicting_species.each do |sp|
fal_groups = fal_groups.send(:with_species, Species::ORTHOMCL_CURRENT_LETTERS[sp])
end
fal_groups = fal_groups.all(:select => 'distinct(orthomcl_groups.*)')#, :limit => 20)
$stderr.puts "Found #{fal_groups.length} groups with #{predicting_species.join(', ')} and #{test_species} proteins"
progress = ProgressBar.new('predictionByCommon', fal_groups.length, STDOUT)
fal_groups.each do |fal_group|
progress.inc
# Only include gene that have exactly 1 gene from that species, otherwise it is harder to
# work out what is going on.
all_tests = fal_group.orthomcl_genes.code(Species::ORTHOMCL_CURRENT_LETTERS[test_species]).all
if all_tests.length > 1
answer['Too many orthomcl genes found'] ||= 0
answer['Too many orthomcl genes found'] += 1
next
end
# gather the actual coding region - discard if there is not exactly 1
codes = all_tests[0].coding_regions
unless codes.length == 1
answer["#{codes.length} coding regions for the 1 orthomcl gene"] ||= 0
answer["#{codes.length} coding regions for the 1 orthomcl gene"] += 1
next
end
code = codes[0]
# Find the most common localisation in each species predicting
preds = [] # the prediction of the most common localisations
commons = predicting_species.collect do |s|
common = code.localisation_prediction_by_most_common_localisation(s)
# Ignore when no loc is found or it is confusing
if common.nil?
answer["No localisation found when trying to find common"] ||= 0
answer["No localisation found when trying to find common"] += 1
next
end
# add the commonest localisation to the prediction array
preds.push [common]
end
# Don't predict unless all species are present
if preds.length == predicting_species.length
# Only predict if the top 2 species are in agreement
if OntologyComparison.new.agreement_of_group(preds) == OntologyComparison::COMPLETE_AGREEMENT
final_locs = code.cached_compartments
if final_locs.empty?
answer["No test species localisation"] ||= 0
answer["No test species localisation"] += 1
else
# Add the final localisation compartments
preds.push final_locs
acc = OntologyComparison.new.agreement_of_group(preds)
answer[acc] ||= 0
answer[acc] += 1
end
else
answer["Predicting species don't agree"] ||= 0
answer["Predicting species don't agree"] += 1
end
else
answer["Not enough localisation info in predicting groups"] ||= 0
answer["Not enough localisation info in predicting groups"] += 1
end
end
progress.finish
pp answer
end
def stuarts_basel_spreadsheet_yeast_setup
# uniprot_to_database(Species::YEAST_NAME)
# yeastgenome_ids_to_database
# OrthomclGene.new.link_orthomcl_and_coding_regions(
# "scer",
# :accept_multiple_coding_regions => true
# )
# cache compartments
codes = CodingRegion.s(Species::YEAST_NAME).go_cc_usefully_termed.all
progress = ProgressBar.new('eukaryotes', codes.length)
codes.each do |code|
progress.inc
comps = code.compartments
comps.each do |comp|
CodingRegionCompartmentCache.find_or_create_by_coding_region_id_and_compartment(
code.id, comp
)
end
end
progress.finish
end
def stuarts_basel_spreadsheet(accept_multiples = false)
species_of_interest = [
Species::ARABIDOPSIS_NAME,
Species::FALCIPARUM,
Species::TOXOPLASMA_GONDII,
Species::YEAST_NAME,
Species::MOUSE_NAME,
Species::HUMAN_NAME
]
$stderr.puts "Copying data to tempfile.."
# Copy the data out of the database to a csv file. Beware that there is duplicates in this file
tempfile = File.open('/tmp/eukaryotic_conservation','w')
# Tempfile.open('eukaryotic_conservation') do |tempfile|
`chmod go+w #{tempfile.path}` #so postgres can write to this file as well
OrthomclGene.find_by_sql "copy (select groupa.orthomcl_name, gene.orthomcl_name, cache.compartment from orthomcl_groups groupa inner join orthomcl_gene_orthomcl_group_orthomcl_runs ogogor on groupa.id=ogogor.orthomcl_group_id inner join orthomcl_genes gene on ogogor.orthomcl_gene_id=gene.id inner join orthomcl_gene_coding_regions ogc on ogc.orthomcl_gene_id=gene.id inner join coding_regions code on ogc.coding_region_id=code.id inner join coding_region_compartment_caches cache on code.id=cache.coding_region_id order by groupa.orthomcl_name) to '#{tempfile.path}'"
tempfile.close
groups_genes = {}
genes_localisations = {}
# Read groups, genes, and locs into memory
$stderr.puts "Reading into memory sql results.."
FasterCSV.foreach(tempfile.path, :col_sep => "\t") do |row|
#FasterCSV.foreach('/tmp/eukaryotic_conservation_test', :col_sep => "\t") do |row|
# name columns
raise unless row.length == 3
group = row[0]
gene = row[1]
compartment = row[2]
groups_genes[group] ||= []
groups_genes[group].push gene
groups_genes[group].uniq!
genes_localisations[gene] ||= []
genes_localisations[gene].push compartment
genes_localisations[gene].uniq!
end
# Print headers
header = ['']
species_of_interest.each do |s|
header.push "#{s} ID 1"
header.push "#{s} loc 1"
header.push "#{s} ID 2"
header.push "#{s} loc 2"
end
puts header.join("\t")
# Iterate through each OrthoMCL group, printing them out if they fit the criteria
$stderr.puts "Iterating through groups.."
groups_genes.each do |group, ogenes|
$stderr.puts "looking at group #{group}"
# associate genes with species
species_gene = {}
ogenes.each do |ogene|
sp = Species.four_letter_to_species_name(OrthomclGene.new.official_split(ogene)[0])
unless species_of_interest.include?(sp)
$stderr.puts "Ignoring info for #{sp}"
next
end
species_gene[sp] ||= []
species_gene[sp].push ogene
species_gene[sp].uniq!
end
# skip groups that are only localised in a single species
if species_gene.length == 1
$stderr.puts "Rejecting #{group} because it only has localised genes in 1 species of interest"
next
end
# skip groups that have more than 2 localised genes in each group.
failed = false
species_gene.each do |species, genes|
if genes.length > 2
$stderr.puts "Rejecting #{group}, because there are >2 genes with localisation info in #{species}.."
failed = true
end
end
next if failed
# procedure for making printing easier
generate_cell = lambda do |gene|
locs = genes_localisations[gene]
if locs.include?('cytoplasm') and locs.include?('nucleus')
locs.reject!{|l| l=='cytoplasm'}
end
if locs.length == 1
[OrthomclGene.new.official_split(gene)[1], locs[0]]
elsif locs.length == 0
raise Exception, "Unexpected lack of loc information"
else
if accept_multiples
[OrthomclGene.new.official_split(gene)[1], locs.sort.join(', ')]
else
$stderr.puts "Returning nil for #{gene} because there is #{locs.length} localisations"
nil
end
end
end
row = [group]
failed = false #fail if genes have >1 localisation
species_of_interest.each do |s|
$stderr.puts "What's in #{s}? #{species_gene[s].inspect}"
if species_gene[s].nil? or species_gene[s].length == 0
row.push ['','']
row.push ['','']
elsif species_gene[s].length == 1
r = generate_cell.call species_gene[s][0]
failed = true if r.nil?
row.push r
row.push ['','']
else
species_gene[s].each do |g|
r = generate_cell.call g
failed = true if r.nil?
row.push r
end
end
end
puts row.join("\t") unless failed
end
end
# Generate the data for
def publication_per_year_graphing
years = {}
fails = 0
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.each do |p|
y = p.year
if y.nil?
fails += 1
$stderr.puts "Failed: #{p.inspect}"
else
years[y] ||= 0
years[y] += 1
end
end
puts ['Year','Number of Publications'].join("\t")
years.sort.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to year-ify #{fails} publications."
end
def localisation_per_year_graphing
already_localised = []
years = {}
fails = 0
# Get all the publications that have localisations in order
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.sort {|p1,p2|
if p1.year.nil?
-1
elsif p2.year.nil?
1
else
p1.year <=> p2.year
end
}.each do |pub|
y = pub.year
if y.nil? #ignore publications with improperly parsed years
fails += 1
next
end
ids = CodingRegion.all(:select => 'coding_regions.id',
:joins => {
:expression_contexts => [:localisation, :publication]
},
:conditions => {:publications => {:id => pub.id}}
)
ids.each do |i|
unless already_localised.include?(i)
already_localised.push i
years[y] ||= 0
years[y] += 1
end
end
end
puts ['Year','Number of New Protein Localisations'].join("\t")
years.sort.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to year-ify #{fails} publications."
end
# How many and which genes are recorded in the malaria metabolic pathways database,
# but aren't recorded in ApiLoc?
def comparison_with_hagai
File.open("#{PHD_DIR}/screenscraping_hagai/localised_genes_and_links.txt").each_line do |line|
line.strip!
splits = line.split(' ')
#next unless splits[0].match(/#{splits[1]}/) #ignore possibly incorrect links
code = CodingRegion.ff(splits[1])
unless code
puts "Couldn't find plasmodb id #{splits[1]}"
next
end
if code.expressed_localisations.count == 0
puts "Not found in ApiLoc: #{splits[1]}"
else
puts "Found in ApiLoc: #{splits[1]}"
end
end
end
# Create a spreadsheet with all the synonyms, so it can be attached as supplementary
def synonyms_spreadsheet
sep = "\t"
# Print titles
puts [
"Localistion or Developmental Stage?",
"Species",
"Full name(s)",
"Synonym"
].join(sep)
# Procedure for printing out each of the hits
printer = lambda do |species_name, actual, synonym, cv_name|
if actual.kind_of?(Array)
puts [cv_name, species_name, actual.join(","), synonym].join(sep)
else
puts [cv_name, species_name, actual, synonym].join(sep)
end
end
# Print all the synonyms
[
LocalisationConstants::KNOWN_LOCALISATION_SYNONYMS,
DevelopmentalStageConstants::KNOWN_DEVELOPMENTAL_STAGE_SYNONYMS,
].each do |cv|
cv_name = {
DevelopmentalStageConstants::KNOWN_DEVELOPMENTAL_STAGE_SYNONYMS => 'Developmental Stage',
LocalisationConstants::KNOWN_LOCALISATION_SYNONYMS => 'Localisation'
}[cv]
cv.each do |sp, hash|
if sp == Species::OTHER_SPECIES #for species not with a genome project
# Species::OTHER_SPECIES => {
# 'Sarcocystis muris' => {
# 'surface' => 'cell surface'
# },
# 'Babesia gibsoni' => {
# 'surface' => 'cell surface',
# 'erythrocyte cytoplasm' => 'host cell cytoplasm',
# 'pm' => 'plasma membrane',
# 'membrane' => 'plasma membrane'
# },
hash.each do |species_name, hash2|
hash2.each do |synonym, actual|
printer.call(species_name, actual, synonym, cv_name)
end
end
else #normal species
hash.each do |synonym, actual|
printer.call(sp, actual, synonym, cv_name)
end
end
end
end
end
def umbrella_localisations_controlled_vocabulary
sep = "\t"
# Print titles
puts [
"Localistion or Developmental Stage?",
"Umbrella",
"Specific Localisation Name"
].join(sep)
ApilocLocalisationTopLevelLocalisation::APILOC_TOP_LEVEL_LOCALISATION_HASH.each do |umbrella, unders|
unders.each do |under|
puts ["Localisation", umbrella, under].join(sep)
end
end
DevelopmentalStageTopLevelDevelopmentalStage::APILOC_DEVELOPMENTAL_STAGE_TOP_LEVEL_DEVELOPMENTAL_STAGES.each do |under, umbrella|
puts ["Developmental Stage", umbrella, under].join(sep)
end
end
def how_many_apicomplexan_genes_have_localised_orthologues
$stderr.puts "starting group search"
groups = OrthomclGroup.official.all(
:joins => {:orthomcl_genes => {:coding_regions => :coding_region_compartment_caches}},
:select => 'distinct(orthomcl_groups.id)'
)
$stderr.puts "finished group search, found #{groups.length} groups"
group_ids = groups.collect{|g| g.id}
$stderr.puts "finished group id transform"
puts '# Genes that have localised ortholgues, if you consider GO CC IDA terms from all Eukaryotes'
Species.sequenced_apicomplexan.all.each do |sp|
num_orthomcl_genes = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)'
)
# go through the groups and work out how many coding regions there are in those groups from this species
num_with_a_localised_orthologue = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)',
:joins => :orthomcl_groups,
:conditions => "orthomcl_gene_orthomcl_group_orthomcl_runs.orthomcl_group_id in #{group_ids.to_sql_in_string}"
)
puts [
sp.name,
num_orthomcl_genes,
num_with_a_localised_orthologue
].join("\t")
end
puts
puts '# Genes that have localised ortholgues, if you don\'t consider GO CC IDA terms from all Eukaryotes'
$stderr.puts "starting group search"
groups = OrthomclGroup.official.all(
:joins => {:orthomcl_genes => {:coding_regions => :expressed_localisations}},
:select => 'distinct(orthomcl_groups.id)'
)
$stderr.puts "finished group search, found #{groups.length} groups"
group_ids = groups.collect{|g| g.id}
$stderr.puts "finished group id transform"
puts '# Genes that have localised ortholgues, if you consider GO CC IDA terms from all Eukaryotes'
Species.sequenced_apicomplexan.all.each do |sp|
num_orthomcl_genes = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)'
)
# go through the groups and work out how many coding regions there are in those groups from this species
num_with_a_localised_orthologue = OrthomclGene.code(sp.orthomcl_three_letter).count(
:select => 'distinct(orthomcl_genes.id)',
:joins => :orthomcl_groups,
:conditions => "orthomcl_gene_orthomcl_group_orthomcl_runs.orthomcl_group_id in #{group_ids.to_sql_in_string}"
)
puts [
sp.name,
num_orthomcl_genes,
num_with_a_localised_orthologue
].join("\t")
end
end
def conservation_of_localisation_in_apicomplexa
groups_skipped_because_less_than_2_different_species = 0
# For each OrthoMCL group that contains 2 or more proteins localised,
# When there is at least 2 different species involved
groups = OrthomclGroup.all(
:joins => {:orthomcl_genes => {:coding_regions => :expressed_localisations}}
).uniq
groups.each do |group|
$stderr.puts "Inspecting #{group.orthomcl_name}.."
genes = group.orthomcl_genes.apicomplexan.all.uniq
# If there is more than 1 species involved
outputs = []
if genes.collect{|g| g.official_split[0]}.uniq.length > 1
genes.each do |g|
codes = g.coding_regions.all(:joins => :coding_region_compartment_caches).uniq
if codes.length != 1
$stderr.puts "Skipping coding regions for #{g.orthomcl_name}, since only #{codes.length} genes with loc were linked"
next
end
code = codes[0]
outputs.push [
code.species.name,
code.string_id,
code.annotation.annotation,
code.coding_region_compartment_caches.reach.compartment.join(', '),
code.localisation_english,
]
end
else
groups_skipped_because_less_than_2_different_species += 1
end
if outputs.collect{|o| o[0]}.uniq.length > 1 #if there is >1 species involved
puts
puts '#####################################'
puts outputs.collect{|d| d.join("\t")}.join("\n")
else
$stderr.puts "Skipped group #{group.orthomcl_name} because of lack of annotation, only found #{outputs.collect{|d| d.join(",")}.join(" ### ")}"
end
end
$stderr.puts "Skipped #{groups_skipped_because_less_than_2_different_species} groups due to lack of >1 species having loc information"
end
# the idea is to find how many genes have annotations that fall into these 2 categories:
# * Fall under the current definition of what is an organelle
# * Don't fall under any organelle, and aren't (exclusively) annotated by GO terms that are ancestors of the organelle terms.
def how_many_non_organelle_cc_annotations
# Create a list of all the GO terms that are included in the various compartments
# this is a list of subsumers
compartment_go_terms = CodingRegion.new.create_organelle_go_term_mappers
# Create a list of ancestors of compartment GO terms.
ancestors = OntologyComparison::RECOGNIZED_LOCATIONS.collect {|loc|
go_entry = GoTerm.find_by_term(loc)
raise Exception, "Unable to find GO term in database: #{loc}" unless go_entry
anc = Bio::Go.new.ancestors_cc(go_entry.go_identifier)
$stderr.puts "Found #{anc.length} ancestors for #{go_entry.go_identifier} #{go_entry.term}"
anc
}.flatten.sort.uniq
# For each non-apicomplexan species with a orthomcl code
Species.not_apicomplexan.all.each do |sp|
$stderr.puts sp.name
# get all the different GO terms for each of the different genes in the species
count_subsumed = 0
count_ancestral = 0
count_wayward = 0
wayward_ids = {}
codes = CodingRegion.s(sp.name).all(:joins => [:orthomcl_genes, :go_terms], :include => :go_terms).uniq
progress = ProgressBar.new(sp.name,codes.length)
codes.each do |code|
progress.inc
local_wayward_ids = {}
subsumed = false
ancestral = false
wayward = false
code.go_terms.each do |g|
next unless g.aspect == GoTerm::CELLULAR_COMPONENT
anc = false
sub = false
#ancestral?
if ancestors.include?(g.go_identifier)
anc = true
ancestral = true
end
#subsumed?
compartment_go_terms.each do |subsumer|
if subsumer.subsume?(g.go_identifier, false)
sub = true
subsumed = true
end
end
# else wayward
if !anc and !sub
local_wayward_ids[g.term] = 0 if local_wayward_ids[g.term].nil?
local_wayward_ids[g.term] += 1
wayward_ids[g.term] = 0 if wayward_ids[g.term].nil?
wayward_ids[g.term] += 1
wayward = true
end
end
# $stderr.puts "#{code.string_id}: ancestral: #{ancestral}, subsumed: #{subsumed}, wayward: #{wayward}: "+
# "#{local_wayward_ids.collect{|term, count| "#{count} #{term}"}.join("\t")}"
#error check
count_subsumed += 1 if subsumed
count_ancestral += 1 if ancestral
count_wayward += 1 if wayward
end
progress.finish
to_print = [
sp.name,
count_ancestral,
count_wayward,
count_subsumed,
]
puts to_print.join("\t")
$stderr.puts "Found these wayward from #{sp.name}:\n"
strings = wayward_ids.to_a.sort{|a,b|
b[1]<=>a[1]
}.collect{|a|
"wayward\t#{a[1]}\t#{a[0]}"
}.join("\n")
$stderr.puts strings
$stderr.puts
end
end
def most_localisations_by_authorship
already_localised = []
authors_localisations = {}
fails = 0
# Get all the publications that have localisations in order
Publication.all(:joins => {:expression_contexts => :localisation}).uniq.sort {|p1,p2|
if p1.year.nil?
-1
elsif p2.year.nil?
1
else
p1.year <=> p2.year
end
}.each do |pub|
y = pub.year
if y.nil? #ignore publications with improperly parsed years
fails += 1
next
end
ids = CodingRegion.all(:select => 'distinct(coding_regions.id)',
:joins => {
:expression_contexts => [:localisation, :publication]
},
:conditions => {:publications => {:id => pub.id}}
)
ids.each do |i|
unless already_localised.include?(i)
already_localised.push i
authors = pub.authors.split('., ')
authors.each do |author|
last_name = author.split(' ')[0].gsub(/,/,'')
authors_localisations[last_name] ||= 0
authors_localisations[last_name] += 1
end
end
end
end
puts ['Last name','Number of New Protein Localisations'].join("\t")
authors_localisations.to_a.sort{|a,b| b[1]<=>a[1]}.each do |a,b|
puts [a,b].join("\t")
end
$stderr.puts "Failed to parse #{fails} publications properly"
end
# upload the IDA annotations from geneontology.org from there
def tbrucei_amigo_gene_associations_to_database
require 'gene_association'
failed_to_find_id_count = 0
failed_to_find_go_term_count = 0
ida_annotation_count = 0
upload_annotations = 0
Bio::GzipAndFilterGeneAssociation.foreach(
"#{DATA_DIR}/GO/cvs/go/gene-associations/gene_association.GeneDB_Tbrucei.gz", #all T. brucei annotations are from GeneDB
"\tIDA\t"
) do |go|
ida_annotation_count += 1
puts "Trying GO term #{go.go_identifier} for #{go.primary_id}"
code = CodingRegion.fs(go.primary_id, Species::TBRUCEI_NAME)
if code
go_term = GoTerm.find_by_go_identifier_or_alternate(go.go_identifier)
if go_term
puts "Uploading GO term #{go.go_identifier} for #{code.string_id}"
a = CodingRegionGoTerm.find_or_create_by_go_term_id_and_coding_region_id_and_evidence_code(
go_term.id, code.id, go.evidence_code
)
raise unless a.save!
upload_annotations += 1
else
failed_to_find_go_term_count += 1
end
else
failed_to_find_id_count = 0
end
end
$stderr.puts "Found #{ida_annotation_count} annotations attempted to be uploaded"
$stderr.puts "Uploaded #{upload_annotations} annotations"
$stderr.puts "Failed to upload #{failed_to_find_id_count} annotations since the gene was not found in ApiLoc"
$stderr.puts "Failed to upload #{failed_to_find_go_term_count} annotations since the go term was not found in ApiLoc"
end
# Which organelle has the most conserved localisation?
def conservation_of_localisation_stratified_by_organelle_pairings
srand 47 #set random number generator to be a deterministic series of random numbers so I don't get differences between runs
# Define list of species to pair up
specees = [
Species::ARABIDOPSIS_NAME,
Species::HUMAN_NAME,
Species::MOUSE_NAME,
Species::YEAST_NAME,
Species::POMBE_NAME,
Species::RAT_NAME,
Species::DROSOPHILA_NAME,
Species::ELEGANS_NAME,
Species::DICTYOSTELIUM_DISCOIDEUM_NAME,
Species::DANIO_RERIO_NAME,
Species::TRYPANOSOMA_BRUCEI_NAME,
Species::PLASMODIUM_FALCIPARUM_NAME,
Species::TOXOPLASMA_GONDII_NAME,
]
# for each pair
specees.pairs.each do |pair|
p1 = pair[0]
p2 = pair[1]
$stderr.puts "SQLing #{p1} versus #{p2}.."
# for each group, choose a protein (repeatably) randomly from each species, so we have a pair of genes
# not sure how to do this the rails way
# Copy the data out of the database to a csv file.
# tempfile = File.new("#{PHD_DIR}/apiloc/experiments/organelle_conservation/dummy.csv") #debug
# csv_path = "/home/ben/phd/gnr2/apiloc_logs/organelle_conservation/#{p1} and #{p2}.csv".gsub(' ','_')
# tempfile = File.open(csv_path)
# tempfile = File.open("/home/ben/phd/gnr2/apiloc_logs/organelle_conservation/#{p1} and #{p2}.csv".gsub(' ','_'),'w')
# `chmod go+w #{tempfile.path}` #so postgres can write to this file as well
# OrthomclGene.find_by_sql "copy(select distinct(groups.orthomcl_name,codes1.string_id,codes2.string_id, ogenes1.orthomcl_name, ogenes2.orthomcl_name, caches1.compartment, caches2.compartment) from orthomcl_groups groups,
#
# orthomcl_gene_orthomcl_group_orthomcl_runs ogogor1,
# orthomcl_genes ogenes1,
# orthomcl_gene_coding_regions ogcr1,
# coding_regions codes1,
# coding_region_compartment_caches caches1,
# genes genes1,
# scaffolds scaffolds1,
# species species1,
#
# orthomcl_gene_orthomcl_group_orthomcl_runs ogogor2,
# orthomcl_genes ogenes2,
# orthomcl_gene_coding_regions ogcr2,
# coding_regions codes2,
# coding_region_compartment_caches caches2,
# genes genes2,
# scaffolds scaffolds2,
# species species2
#
# where
# species1.name = '#{p1}' and
# groups.id = ogogor1.orthomcl_group_id and
# ogogor1.orthomcl_gene_id = ogenes1.id and
# ogcr1.orthomcl_gene_id = ogenes1.id and
# ogcr1.coding_region_id = codes1.id and
# caches1.coding_region_id = codes1.id and
# codes1.gene_id = genes1.id and
# genes1.scaffold_id = scaffolds1.id and
# scaffolds1.species_id = species1.id
#
# and
# species2.name = '#{p2}' and
# groups.id = ogogor2.orthomcl_group_id and
# ogogor2.orthomcl_gene_id = ogenes2.id and
# ogcr2.orthomcl_gene_id = ogenes2.id and
# ogcr2.coding_region_id = codes2.id and
# caches2.coding_region_id = codes2.id and
# codes2.gene_id = genes2.id and
# genes2.scaffold_id = scaffolds2.id and
# scaffolds2.species_id = species2.id) to '#{tempfile.path}'"
# tempfile.close
# next #just create the CSVs at this point
orth1 = Species::ORTHOMCL_FOUR_LETTERS[p1]
orth2 = Species::ORTHOMCL_FOUR_LETTERS[p2]
$stderr.puts "Groups of #{orth1}"
groups1 = OrhtomclGroup.all(
:joins => {:orthomcl_gene => {:coding_regions => :coding_region_compartment_caches}},
:conditions => ["orthomcl_genes.orthomcl_name like '?'","#{orth1}%"]
)
$stderr.puts "Groups of #{orth2}"
groups2 = OrhtomclGroup.all(
:joins => {:orthomcl_gene => {:coding_regions => :coding_region_compartment_caches}},
:conditions => ["orthomcl_genes.orthomcl_name like '?'","#{orth2}%"]
)
# convert it all to a big useful hash, partly for historical reasons
dat = {}
progress = ProgressBar.new('hashing',groups1.length)
groups1.each do |group|
progress.inc
if groups2.include?(group1)
ogenes1 = OrthomclGene.all(
:include => [:orthomcl_groups,
{:coding_regions => :coding_region_compartment_caches}],
:joins => {:coding_regions => :coding_region_compartment_caches},
:conditions => ["orthomcl_genes.orthomcl_name like '?' and orthomcl_group_id = ?","#{orth1}%",group.id]
)
ogenes2 = OrthomclGene.all(
:include => [:orthomcl_groups,
{:coding_regions => :coding_region_compartment_caches}],
:joins => {:coding_regions => :coding_region_compartment_caches},
:conditions => ["orthomcl_genes.orthomcl_name like '?' and orthomcl_group_id = ?","#{orth2}%",group.id]
)
ogenes1.each do |ogene1|
caches = ogene1.coding_regions.all.reach.coding_region_compartment_caches.compartment.retract
dat[group.orthomcl_name] ||= {}
dat[group.orthomcl_name][p1] ||= {}
dat[group.orthomcl_name][p1][ogene1.orthomcl_name] = caches.uniq
end
ogenes2.each do |ogene2|
caches = ogene2.coding_regions.all.reach.coding_region_compartment_caches.compartment.retract
dat[group.orthomcl_name][p2] ||= {}
dat[group.orthomcl_name][p2][ogene2.orthomcl_name] = caches.uniq
end
break
end
end
progress.finish
p dat
# Read in the CSV, converting it all to a hash
# of orthomcl_group => Array of arrays of the rest of the recorded info
# group => species => gene => compartments
# dat = {}
# File.open(csv_path).each_line do |line|
# row = line.strip.split(',')
# unless row.length == 7
# raise Exception, "failed to parse line #{line}"
# end
# # groups.orthomcl_name,codes1.string_id,codes2.string_id, ogenes1.orthomcl_name,
# # ogenes2.orthomcl_name, caches1.compartment, caches2.compartment
# group = row[0].gsub('(','')
# code1 = row[1]
# code2 = row[2]
# ogene1 = row[3]
# ogene2 = row[4]
# cache1 = row[5]
# cache2 = row[6].gsub(')','')
#
# dat[group] ||= {}
# dat[group][p1] ||= {}
# dat[group][p1][ogene1] ||= []
# dat[group][p1][ogene1].push cache1
#
# dat[group][p2] ||= {}
# dat[group][p2][ogene2] ||= []
# dat[group][p2][ogene2].push cache2
# end
# for each of the orthomcl groups
tally = {}
dat.each do |group, other|
raise Exception, "Found unexpected number of species in hash group => #{other.inspect}" unless other.keys.length == 2
# choose one gene (repeatably) randomly from each species
p_ones = other[p1].to_a
p_twos = other[p2].to_a
rand1 = p_ones[rand(p_ones.size)]
rand2 = p_twos[rand(p_twos.size)]
g1 = {rand1[0] => rand1[1]}
g2 = {rand2[0] => rand2[1]}
locs1 = g1.values.flatten.uniq
locs2 = g2.values.flatten.uniq
# work out whether the two genes are conserved in their localisation
agree = OntologyComparison.new.agreement_of_pair(locs1,locs2)
# debug out genes involved, compartments, group_id, species,
$stderr.puts "From group #{group}, chose #{g1.inspect} from #{p1} and #{g2.inspect} from #{p2}. Agreement: #{agree}"
# record conservation, organelles involved, within the species pairing
[g1.values, g2.values].flatten.uniq.each do |org|
tally[org] ||= {}
tally[org][agree] ||= 0
tally[org][agree] += 1
end
end
#puts "From #{p1} and #{p2},"
OntologyComparison::RECOGNIZED_LOCATIONS.each do |loc|
if tally[loc]
puts [
p1,p2,loc,
tally[loc][OntologyComparison::COMPLETE_AGREEMENT],
tally[loc][OntologyComparison::INCOMPLETE_AGREEMENT],
tally[loc][OntologyComparison::DISAGREEMENT],
].join("\t")
else
puts [
p1,p2,loc,
0,0,0
].join("\t")
end
end
end
srand #revert to regular random number generation in case anything else happens after this method
end
end
|
require 'gnuplot'
require 'rational'
require 'matrix'
require 'mapcar'
require 'block'
require 'tempfile'
require 'nodel'
class Vector
include Enumerable
module Norm
def Norm.sqnorm(obj, p)
sum = 0
obj.each{|x| sum += x ** p}
sum
end
end
alias :length :size
alias :index :[]
def [](i)
case i
when Range
Vector[*to_a.slice(i)]
else
index(i)
end
end
def []=(i, v)
case i
when Range
#i.each{|e| self[e] = v[e - i.begin]}
(self.size..i.begin - 1).each{|e| self[e] = 0} # self.size must be in the first place because the size of self can be modified
[v.size, i.entries.size].min.times {|e| self[e + i.begin] = v[e]}
(v.size + i.begin .. i.end).each {|e| self[e] = 0}
else
@elements[i]=v
end
end
class << self
def add(*args)
v = []
args.each{|x| v += x.to_a}
Vector[*v]
end
end
def collect!
els = @elements.collect! {|v| yield(v)}
Vector.elements(els, false)
end
def each
(0...size).each {|i| yield(self[i])}
nil
end
def max
to_a.max
end
def min
to_a.min
end
def norm(p = 2)
Norm.sqnorm(self, p) ** (Float(1)/p)
end
def norm_inf
[min.abs, max.abs].max
end
def slice(*args)
Vector[*to_a.slice(*args)]
end
def slice_set(v, b, e)
for i in b..e
self[i] = v[i-b]
end
end
def slice=(args)
case args[1]
when Range
slice_set(args[0], args[1].begin, args[1].last)
else
slice_set(args[0], args[1], args[2])
end
end
def /(c)
map {|e| e.quo(c)}
end
def transpose
Matrix[self.to_a]
end
alias :t :transpose
# Computes the Householder vector (MC, Golub, p. 210, algorithm 5.1.1)
def house
s = self[1..length-1]
sigma = s.inner_product(s)
v = clone; v[0] = 1
if sigma == 0
beta = 0
else
mu = Math.sqrt(self[0] ** 2 + sigma)
if self[0] <= 0
v[0] = self[0] - mu
else
v[0] = - sigma.quo(self[0] + mu)
end
v2 = v[0] ** 2
beta = 2 * v2.quo(sigma + v2)
v /= v[0]
end
return v, beta
end
# Projection operator (http://en.wikipedia.org/wiki/Gram-Schmidt_process#The_Gram.E2.80.93Schmidt_process)
def proj(v)
vp = v.inner_product(self)
vp = Float vp if vp.is_a?(Integer)
self * (vp / inner_product(self))
end
def normalize
self / self.norm
end
# Stabilized Gram-Schmidt process (http://en.wikipedia.org/wiki/Gram-Schmidt_process#Algorithm)
def Vector.gram_schmidt(*vectors)
v = vectors.clone
for j in 0...v.size
for i in 0..j-1
v[j] -= v[i] * v[j].inner_product(v[i])
end
v[j] /= v[j].norm
end
v
end
end
class Matrix
include Enumerable
public_class_method :new
attr_reader :rows, :wrap
@wrap = nil
def initialize(*argv)
return initialize_old(*argv) if argv[0].is_a?(Symbol)
n, m, val = argv; val = 0 if not val
f = (block_given?)? lambda {|i,j| yield(i, j)} : lambda {|i,j| val}
init_rows((0...n).collect {|i| (0...m).collect {|j| f.call(i,j)}}, true)
end
def initialize_old(init_method, *argv)
self.funcall(init_method, *argv)
end
alias :ids :[]
def [](i, j)
case i
when Range
case j
when Range
Matrix[*i.collect{|l| self.row(l)[j].to_a}]
else
column(j)[i]
end
else
case j
when Range
row(i)[j]
else
ids(i, j)
end
end
end
def []=(i, j, v)
case i
when Range
if i.entries.size == 1
self[i.begin, j] = (v.is_a?(Matrix) ? v.row(0) : v)
else
case j
when Range
if j.entries.size == 1
self[i, j.begin] = (v.is_a?(Matrix) ? v.column(0) : v)
else
i.each{|l| self.row= l, v.row(l - i.begin), j}
end
else
self.column= j, v, i
end
end
else
case j
when Range
if j.entries.size == 1
self[i, j.begin] = (v.is_a?(Vector) ? v[0] : v)
else
self.row= i, v, j
end
else
@rows[i][j] = (v.is_a?(Vector) ? v[0] : v)
end
end
end
def clone
super
end
def initialize_copy(orig)
init_rows(orig.rows, true)
self.wrap=(orig.wrap)
end
class << self
def diag(*args)
dsize = 0
sizes = args.collect{|e| x = (e.is_a?(Matrix)) ? e.row_size : 1; dsize += x; x}
m = Matrix.zero(dsize)
count = 0
sizes.size.times{|i|
range = count..(count+sizes[i]-1)
m[range, range] = args[i]
count += sizes[i]
}
m
end
end
# Division by a scalar
def quo(v)
map {|e| e.quo(v)}
end
# quo seems always desirable
alias :/ :quo
def set(m)
0.upto(m.row_size - 1) do |i|
0.upto(m.column_size - 1) do |j|
self[i, j] = m[i, j]
end
end
self.wrap = m.wrap
end
def wraplate(ijwrap = "")
"class << self
def [](i, j)
#{ijwrap}; @rows[i][j]
end
def []=(i, j, v)
#{ijwrap}; @rows[i][j] = v
end
end"
end
def wrap=(mode = :torus)
case mode
when :torus then eval(wraplate("i %= row_size; j %= column_size"))
when :h_cylinder then eval(wraplate("i %= row_size"))
when :v_cylinder then eval(wraplate("j %= column_size"))
when :nil then eval(wraplate)
end
@wrap = mode
end
def max_len_column(j)
column_collect(j) {|x| x.to_s.length}.max
end
def cols_len
(0...column_size).collect {|j| max_len_column(j)}
end
def to_s(mode = :pretty, len_col = 3)
return super if empty?
if mode == :pretty
clen = cols_len
to_a.collect {|r| mapcar(r, clen) {|x, l| format("%#{l}s ",x.to_s)} << "\n"}.join("")
else
i = 0; s = ""; cs = column_size
each do |e|
i = (i + 1) % cs
s += format("%#{len_col}s ", e.to_s)
s += "\n" if i == 0
end
s
# to_a.each {|r| r.each {|x| print format("%#{len_col}s ", x.to_s)}; print "\n"}
end
end
def each
@rows.each {|x| x.each {|e| yield(e)}}
nil
end
def row!(i)
if block_given?
@rows[i].collect! {|e| yield(e)}
else
Vector.elements(@rows[i], false)
end
end
def row_collect(row, &block)
f = default_block(block)
@rows[row].collect {|e| f.call(e)}
end
def column_collect(col, &block)
f = default_block(block)
(0...row_size).collect {|r| f.call(self[r, col])}
end
alias :row_collect! :row!
def column!(j)
return (0...row_size).collect { |i| @rows[i][j] = yield(@rows[i][j])} if block_given?
end
alias :column_collect! :column!
def column=(args)
c = args[0] # the column to be change
v = args[1] #the values vector
case args.size
when 3 then r = args[2] # the range 2..4
when 4 then r = args[2]..args[3] #the range by borders
else r = 0...row_size
end
#r.each{|e| self[e, c] = v[e - r.begin]}
(self.row_size..r.begin - 1).each{|e| self[e, c] = 0}
[v.size, r.entries.size].min.times{|e| self[e + r.begin, c] = v[e]}
((v.size + r.begin)..r.end).each {|e| self[e, c] = 0}
end
def row=(args)
case args.size
when 3 then range = args[2]
when 4 then range = args[2]..args[3]
else range = 0...column_size
end
row!(args[0])[range]=args[1]
end
def norm(p = 2)
Vector::Norm.sqnorm(self, p) ** (Float(1)/p)
end
def to_plot
gplot = Tempfile.new('plot', Dir::tmpdir, false) # do not unlink
gplot.puts(to_s)
gplot.close
gplot.path
end
def plot(back = true)
Gnuplot.plot("splot '#{to_plot}' matrix with lines; pause -1", back)
end
def Matrix.mplot(*matrices)
s = "splot "
matrices.each {|x| s += "'#{x.to_plot}' matrix with lines,"}
s = s[0..-2] + "; pause -1"
Gnuplot.plot(s)
end
def empty?
@rows.empty? if @rows
end
# some new features
def to_matrix(method, arg)
a = self.send(method, arg).to_a
(arg.is_a?(Range)) ? Matrix[*a] : Matrix[a]
end
def row2matrix(r) # return the row/s of matrix as a matrix
to_matrix(:row, r)
end
def column2matrix(c) # return the colomn/s of matrix as a matrix
to_matrix(:column, c).t
end
module LU
def LU.tau(m, k) # calculate the
t = m.column2matrix(k)
tk = t[k, 0]
(0..k).each{|i| t[i, 0] = 0}
return t if tk == 0
(k+1...m.row_size).each{|i| t[i, 0] = t[i, 0].to_f / tk}
t
end
def LU.M(m, k)
i = Matrix.I(m.row_size)
t = tau(m, k)
e = i.row2matrix(k)
i - t * e
end
def LU.gauss(m)
a = m.clone
(0..m.column_size-2).collect {|i| mi = M(a, i); a = mi * a; mi }
end
end
def U
u = self.clone
LU.gauss(self).each{|m| u = m * u}
u
end
def L
trans = LU.gauss(self)
l = trans[0].inv
(1...trans.size).each{|i| p trans[i].inv; l *= trans[i].inv}
l
end
def hQR #Householder QR
h = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
v, beta = mat[j..m, j].house
h[j] = Matrix.diag(Matrix.I(j), Matrix.I(m-j+1)- beta * (v * v.t))
mat[j..m, j..n] = (Matrix.I(m-j+1) - beta * (v * v.t)) * mat[j..m, j..n]
mat[(j+1)..m,j] = v[2..(m-j+1)] if j < m }
h
end
def hBidiag #Householder Bidiagonalization
u = []
w = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
v, beta = mat[j..m,j].house
mat[j..m, j..n] = (Matrix.I(m-j+1) - beta * (v * v.t)) * mat[j..m, j..n]
mat[j+1..m, j] = v[1..(m-j)]
# uj = [1 mat[j+1..m,j]] U_j's Householder vector
uj = Vector.add(Vector[1], mat[j+1..m, j])
u[j] = Matrix.diag(Matrix.I(j), Matrix.I(m-j+1)- beta * (uj * uj.t))
if j <= n - 2
v, beta = (mat[j, j+1..n]).house
mat[j..m, j+1..n] = mat[j..m, j+1..n] * (Matrix.I(n-j) - beta * (v * v.t))
mat[j, j+2..n] = v[1..n-j-1]
vj = Vector.add(Vector[1], mat[j, j+2..n])
w[j] = Matrix.diag(Matrix.I(j+1), Matrix.I(n-j)- beta * (vj * vj.t))
end }
return u, w
end
# the bidiagonal matrix obtained with
# Householder Bidiagonalization algorithm
def bidiagonal
u,v = self.hBidiag
ub = Matrix.I(row_size)
u.each{|x| ub *= x}
vb = Matrix.I(column_size)
v.each{|x| vb *= x}
ub.t * self * vb
end
#householder Q = H_1 * H_2 * H_3 * ... * H_n
def hQ
h = self.hQR
q = h[0]
(1...h.size).each{|i| q *= h[i]}
q
end
# R = H_n * H_n-1 * ... * H_1 * A
def hR
h = self.hQR
r = self.clone
h.size.times{|i| r = h[i] * r}
r
end
# Modified Gram Schmidt QR factorization (MC, Golub, p. 232)
def gram_schmidt
r = clone
q = clone
n = column_size
m = row_size
for k in 0...n
r[k,k] = self[0...m, k].norm
q[0...m, k] = self[0...m, k] / r[k, k]
for j in (k+1)...n
r[k, j] = q[0...m, k].t * self[0...m, j]
self[0...m, j] -= q[0...m, k] * r[k, j]
end
end
end
def givens(a, b)
if b == 0
c = 0; s = 0
else
if b.abs > a.abs
theta = Float(-a)/b; s = 1/Math.sqrt(1+theta**2); c = s * theta
else
theta = Float(-b)/a; c = 1/Math.sqrt(1+theta**2); s = c * theta
end
end
return c, s
end
def givensQR
q = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
m.downto(j+1){|i|
c, s = givens(mat[i - 1, j], mat[i, j])
qt = Matrix.I(m+1)
qt[i,i] = c; qt[i,j] = s
qt[j,i] = -s; qt[j,j] = c
q[q.size] = qt
mat[i-1..i, j..n] = Matrix[[c, -s],[s, c]] * mat[i-1..i, j..n] }}
return mat, q
end
def givensQ
r, qt = givensQR
q = Matrix.I(row_size)
qt.each{|x| q *= x}
q
end
#the matrix must be an upper Hessenberg matrix
def hessenbergQR
q = []
mat = self.clone
n = row_size - 1
n.times{|j|
c, s = givens(mat[j,j], mat[j+1, j])
cs = Matrix[[c, s], [-s, c]]
q[j] = Matrix.diag(Matrix.I(j), cs, Matrix.I(n - j - 1))
mat[j..j+1, j..n] = cs.t * mat[j..j+1, j..n] }
return mat, q
end
def hessenbergQ
r, qj = hessenbergQR
q = Matrix.I(row_size)
qj.each{|x| q *= x}
q
end
end
Householder Reduction to Hessenberg Form
require 'gnuplot'
require 'rational'
require 'matrix'
require 'mapcar'
require 'block'
require 'tempfile'
require 'nodel'
class Vector
include Enumerable
module Norm
def Norm.sqnorm(obj, p)
sum = 0
obj.each{|x| sum += x ** p}
sum
end
end
alias :length :size
alias :index :[]
def [](i)
case i
when Range
Vector[*to_a.slice(i)]
else
index(i)
end
end
def []=(i, v)
case i
when Range
#i.each{|e| self[e] = v[e - i.begin]}
(self.size..i.begin - 1).each{|e| self[e] = 0} # self.size must be in the first place because the size of self can be modified
[v.size, i.entries.size].min.times {|e| self[e + i.begin] = v[e]}
(v.size + i.begin .. i.end).each {|e| self[e] = 0}
else
@elements[i]=v
end
end
class << self
def add(*args)
v = []
args.each{|x| v += x.to_a}
Vector[*v]
end
end
def collect!
els = @elements.collect! {|v| yield(v)}
Vector.elements(els, false)
end
def each
(0...size).each {|i| yield(self[i])}
nil
end
def max
to_a.max
end
def min
to_a.min
end
def norm(p = 2)
Norm.sqnorm(self, p) ** (Float(1)/p)
end
def norm_inf
[min.abs, max.abs].max
end
def slice(*args)
Vector[*to_a.slice(*args)]
end
def slice_set(v, b, e)
for i in b..e
self[i] = v[i-b]
end
end
def slice=(args)
case args[1]
when Range
slice_set(args[0], args[1].begin, args[1].last)
else
slice_set(args[0], args[1], args[2])
end
end
def /(c)
map {|e| e.quo(c)}
end
def transpose
Matrix[self.to_a]
end
alias :t :transpose
# Computes the Householder vector (MC, Golub, p. 210, algorithm 5.1.1)
def house
s = self[1..length-1]
sigma = s.inner_product(s)
v = clone; v[0] = 1
if sigma == 0
beta = 0
else
mu = Math.sqrt(self[0] ** 2 + sigma)
if self[0] <= 0
v[0] = self[0] - mu
else
v[0] = - sigma.quo(self[0] + mu)
end
v2 = v[0] ** 2
beta = 2 * v2.quo(sigma + v2)
v /= v[0]
end
return v, beta
end
# Projection operator (http://en.wikipedia.org/wiki/Gram-Schmidt_process#The_Gram.E2.80.93Schmidt_process)
def proj(v)
vp = v.inner_product(self)
vp = Float vp if vp.is_a?(Integer)
self * (vp / inner_product(self))
end
def normalize
self / self.norm
end
# Stabilized Gram-Schmidt process (http://en.wikipedia.org/wiki/Gram-Schmidt_process#Algorithm)
def Vector.gram_schmidt(*vectors)
v = vectors.clone
for j in 0...v.size
for i in 0..j-1
v[j] -= v[i] * v[j].inner_product(v[i])
end
v[j] /= v[j].norm
end
v
end
end
class Matrix
include Enumerable
public_class_method :new
attr_reader :rows, :wrap
@wrap = nil
def initialize(*argv)
return initialize_old(*argv) if argv[0].is_a?(Symbol)
n, m, val = argv; val = 0 if not val
f = (block_given?)? lambda {|i,j| yield(i, j)} : lambda {|i,j| val}
init_rows((0...n).collect {|i| (0...m).collect {|j| f.call(i,j)}}, true)
end
def initialize_old(init_method, *argv)
self.funcall(init_method, *argv)
end
alias :ids :[]
def [](i, j)
case i
when Range
case j
when Range
Matrix[*i.collect{|l| self.row(l)[j].to_a}]
else
column(j)[i]
end
else
case j
when Range
row(i)[j]
else
ids(i, j)
end
end
end
def []=(i, j, v)
case i
when Range
if i.entries.size == 1
self[i.begin, j] = (v.is_a?(Matrix) ? v.row(0) : v)
else
case j
when Range
if j.entries.size == 1
self[i, j.begin] = (v.is_a?(Matrix) ? v.column(0) : v)
else
i.each{|l| self.row= l, v.row(l - i.begin), j}
end
else
self.column= j, v, i
end
end
else
case j
when Range
if j.entries.size == 1
self[i, j.begin] = (v.is_a?(Vector) ? v[0] : v)
else
self.row= i, v, j
end
else
@rows[i][j] = (v.is_a?(Vector) ? v[0] : v)
end
end
end
def clone
super
end
def initialize_copy(orig)
init_rows(orig.rows, true)
self.wrap=(orig.wrap)
end
class << self
def diag(*args)
dsize = 0
sizes = args.collect{|e| x = (e.is_a?(Matrix)) ? e.row_size : 1; dsize += x; x}
m = Matrix.zero(dsize)
count = 0
sizes.size.times{|i|
range = count..(count+sizes[i]-1)
m[range, range] = args[i]
count += sizes[i]
}
m
end
end
# Division by a scalar
def quo(v)
map {|e| e.quo(v)}
end
# quo seems always desirable
alias :/ :quo
def set(m)
0.upto(m.row_size - 1) do |i|
0.upto(m.column_size - 1) do |j|
self[i, j] = m[i, j]
end
end
self.wrap = m.wrap
end
def wraplate(ijwrap = "")
"class << self
def [](i, j)
#{ijwrap}; @rows[i][j]
end
def []=(i, j, v)
#{ijwrap}; @rows[i][j] = v
end
end"
end
def wrap=(mode = :torus)
case mode
when :torus then eval(wraplate("i %= row_size; j %= column_size"))
when :h_cylinder then eval(wraplate("i %= row_size"))
when :v_cylinder then eval(wraplate("j %= column_size"))
when :nil then eval(wraplate)
end
@wrap = mode
end
def max_len_column(j)
column_collect(j) {|x| x.to_s.length}.max
end
def cols_len
(0...column_size).collect {|j| max_len_column(j)}
end
def to_s(mode = :pretty, len_col = 3)
return super if empty?
if mode == :pretty
clen = cols_len
to_a.collect {|r| mapcar(r, clen) {|x, l| format("%#{l}s ",x.to_s)} << "\n"}.join("")
else
i = 0; s = ""; cs = column_size
each do |e|
i = (i + 1) % cs
s += format("%#{len_col}s ", e.to_s)
s += "\n" if i == 0
end
s
# to_a.each {|r| r.each {|x| print format("%#{len_col}s ", x.to_s)}; print "\n"}
end
end
def each
@rows.each {|x| x.each {|e| yield(e)}}
nil
end
def row!(i)
if block_given?
@rows[i].collect! {|e| yield(e)}
else
Vector.elements(@rows[i], false)
end
end
def row_collect(row, &block)
f = default_block(block)
@rows[row].collect {|e| f.call(e)}
end
def column_collect(col, &block)
f = default_block(block)
(0...row_size).collect {|r| f.call(self[r, col])}
end
alias :row_collect! :row!
def column!(j)
return (0...row_size).collect { |i| @rows[i][j] = yield(@rows[i][j])} if block_given?
end
alias :column_collect! :column!
def column=(args)
c = args[0] # the column to be change
v = args[1] #the values vector
case args.size
when 3 then r = args[2] # the range 2..4
when 4 then r = args[2]..args[3] #the range by borders
else r = 0...row_size
end
#r.each{|e| self[e, c] = v[e - r.begin]}
(self.row_size..r.begin - 1).each{|e| self[e, c] = 0}
[v.size, r.entries.size].min.times{|e| self[e + r.begin, c] = v[e]}
((v.size + r.begin)..r.end).each {|e| self[e, c] = 0}
end
def row=(args)
case args.size
when 3 then range = args[2]
when 4 then range = args[2]..args[3]
else range = 0...column_size
end
row!(args[0])[range]=args[1]
end
def norm(p = 2)
Vector::Norm.sqnorm(self, p) ** (Float(1)/p)
end
def to_plot
gplot = Tempfile.new('plot', Dir::tmpdir, false) # do not unlink
gplot.puts(to_s)
gplot.close
gplot.path
end
def plot(back = true)
Gnuplot.plot("splot '#{to_plot}' matrix with lines; pause -1", back)
end
def Matrix.mplot(*matrices)
s = "splot "
matrices.each {|x| s += "'#{x.to_plot}' matrix with lines,"}
s = s[0..-2] + "; pause -1"
Gnuplot.plot(s)
end
def empty?
@rows.empty? if @rows
end
# some new features
def to_matrix(method, arg)
a = self.send(method, arg).to_a
(arg.is_a?(Range)) ? Matrix[*a] : Matrix[a]
end
def row2matrix(r) # return the row/s of matrix as a matrix
to_matrix(:row, r)
end
def column2matrix(c) # return the colomn/s of matrix as a matrix
to_matrix(:column, c).t
end
module LU
def LU.tau(m, k) # calculate the
t = m.column2matrix(k)
tk = t[k, 0]
(0..k).each{|i| t[i, 0] = 0}
return t if tk == 0
(k+1...m.row_size).each{|i| t[i, 0] = t[i, 0].to_f / tk}
t
end
def LU.M(m, k)
i = Matrix.I(m.row_size)
t = tau(m, k)
e = i.row2matrix(k)
i - t * e
end
def LU.gauss(m)
a = m.clone
(0..m.column_size-2).collect {|i| mi = M(a, i); a = mi * a; mi }
end
end
def U
u = self.clone
LU.gauss(self).each{|m| u = m * u}
u
end
def L
trans = LU.gauss(self)
l = trans[0].inv
(1...trans.size).each{|i| p trans[i].inv; l *= trans[i].inv}
l
end
def hQR #Householder QR
h = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
v, beta = mat[j..m, j].house
h[j] = Matrix.diag(Matrix.I(j), Matrix.I(m-j+1)- beta * (v * v.t))
mat[j..m, j..n] = (Matrix.I(m-j+1) - beta * (v * v.t)) * mat[j..m, j..n]
mat[(j+1)..m,j] = v[2..(m-j+1)] if j < m }
h
end
def hBidiag #Householder Bidiagonalization
u = []
w = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
v, beta = mat[j..m,j].house
mat[j..m, j..n] = (Matrix.I(m-j+1) - beta * (v * v.t)) * mat[j..m, j..n]
mat[j+1..m, j] = v[1..(m-j)]
# uj = [1 mat[j+1..m,j]] U_j's Householder vector
uj = Vector.add(Vector[1], mat[j+1..m, j])
u[j] = Matrix.diag(Matrix.I(j), Matrix.I(m-j+1)- beta * (uj * uj.t))
if j <= n - 2
v, beta = (mat[j, j+1..n]).house
mat[j..m, j+1..n] = mat[j..m, j+1..n] * (Matrix.I(n-j) - beta * (v * v.t))
mat[j, j+2..n] = v[1..n-j-1]
vj = Vector.add(Vector[1], mat[j, j+2..n])
w[j] = Matrix.diag(Matrix.I(j+1), Matrix.I(n-j)- beta * (vj * vj.t))
end }
return u, w
end
# the bidiagonal matrix obtained with
# Householder Bidiagonalization algorithm
def bidiagonal
u,v = self.hBidiag
ub = Matrix.I(row_size)
u.each{|x| ub *= x}
vb = Matrix.I(column_size)
v.each{|x| vb *= x}
ub.t * self * vb
end
#householder Q = H_1 * H_2 * H_3 * ... * H_n
def hQ
h = self.hQR
q = h[0]
(1...h.size).each{|i| q *= h[i]}
q
end
# R = H_n * H_n-1 * ... * H_1 * A
def hR
h = self.hQR
r = self.clone
h.size.times{|i| r = h[i] * r}
r
end
# Modified Gram Schmidt QR factorization (MC, Golub, p. 232)
def gram_schmidt
r = clone
q = clone
n = column_size
m = row_size
for k in 0...n
r[k,k] = self[0...m, k].norm
q[0...m, k] = self[0...m, k] / r[k, k]
for j in (k+1)...n
r[k, j] = q[0...m, k].t * self[0...m, j]
self[0...m, j] -= q[0...m, k] * r[k, j]
end
end
end
def givens(a, b)
if b == 0
c = 0; s = 0
else
if b.abs > a.abs
theta = Float(-a)/b; s = 1/Math.sqrt(1+theta**2); c = s * theta
else
theta = Float(-b)/a; c = 1/Math.sqrt(1+theta**2); s = c * theta
end
end
return c, s
end
def givensQR
q = []
mat = self.clone
m = row_size - 1
n = column_size - 1
(n+1).times{|j|
m.downto(j+1){|i|
c, s = givens(mat[i - 1, j], mat[i, j])
qt = Matrix.I(m+1)
qt[i-1,i-1] = c; qt[i-1,i] = s
qt[i,i-1] = -s; qt[i,i] = c
q[q.size] = qt
mat[i-1..i, j..n] = Matrix[[c, -s],[s, c]] * mat[i-1..i, j..n] }}
return mat, q
end
def givensQ
r, qt = givensQR
q = Matrix.I(row_size)
qt.each{|x| q *= x}
q
end
#the matrix must be an upper Hessenberg matrix
def hessenbergQR
q = []
mat = self.clone
n = row_size - 1
n.times{|j|
c, s = givens(mat[j,j], mat[j+1, j])
cs = Matrix[[c, s], [-s, c]]
q[j] = Matrix.diag(Matrix.I(j), cs, Matrix.I(n - j - 1))
mat[j..j+1, j..n] = cs.t * mat[j..j+1, j..n] }
return mat, q
end
def hessenbergQ
r, qj = hessenbergQR
q = Matrix.I(row_size)
qj.each{|x| q *= x}
q
end
#Householder Reduction to Hessenberg Form
def hr2hf
u = []
mat = self.clone
n = row_size - 1
(n - 1).times{|k|
v, beta = mat[k+1..n,k].house #the householder matrice part
hhu = Matrix.I(n-k) - beta * (v * v.t)
u[k] = Matrix.diag(Matrix.I(k+1), hhu)
mat[k+1..n, k..n] = hhu * mat[k+1..n, k..n]
mat[0..n, k+1..n] = mat[0..n, k+1..n] * hhu}
return mat, u
end
def hr2hf_u0
mat, u = hr2hf
u0 = Matrix.I(row_size)
u.each{|x| u0 *= x}
u0
end
end
|
# encoding: utf-8
# Copyright (c) 2012, HipByte SPRL and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'motion/project/config'
require 'motion/util/code_sign'
require 'motion/project/target'
require 'socket'
module Motion; module Project;
class XcodeConfig < Config
variable :xcode_dir, :sdk_version, :deployment_target, :frameworks,
:weak_frameworks, :embedded_frameworks, :external_frameworks, :framework_search_paths,
:libs, :identifier, :codesign_certificate, :short_version, :entitlements, :delegate_class, :embed_dsym,
:version
def initialize(project_dir, build_mode)
super
@info_plist = {}
@frameworks = []
@weak_frameworks = []
@embedded_frameworks = []
@external_frameworks = []
@framework_search_paths = []
@libs = []
@targets = []
@bundle_signature = '????'
@short_version = nil
@entitlements = {}
@delegate_class = 'AppDelegate'
@spec_mode = false
@embed_dsym = (development? ? true : false)
@vendor_projects = []
@version = '1.0'
end
def xcode_dir=(xcode_dir)
@xcode_version = nil
@xcode_dir = unescape_path(File.path(xcode_dir))
end
def xcode_dir
@xcode_dir ||= begin
if ENV['RM_TARGET_XCODE_DIR']
ENV['RM_TARGET_XCODE_DIR']
else
xcodeselect = '/usr/bin/xcode-select'
xcode_dir = unescape_path(`#{xcodeselect} -print-path`.strip)
App.fail "Can't locate any version of Xcode on the system." unless File.exist?(xcode_dir)
xcode_dir
end
end
end
def xcode_version
@xcode_version ||= begin
failed = false
vers = `/usr/libexec/PlistBuddy -c "Print :CFBundleShortVersionString" "#{xcode_dir}/../Info.plist"`.strip
failed = true if !$?.success?
build = `/usr/libexec/PlistBuddy -c "Print :ProductBuildVersion" "#{xcode_dir}/../version.plist"`.strip
failed = true if !$?.success?
if failed
txt = `#{locate_binary('xcodebuild')} -version`
vers = txt.scan(/Xcode\s(.+)/)[0][0]
build = txt.scan(/(BuildVersion:|Build version)\s(.+)/)[0][1]
end
[vers, build]
end
end
def platforms; raise; end
def local_platform; raise; end
def deploy_platform; raise; end
def validate
# Xcode version
App.fail "Xcode 6.x or greater is required" if Util::Version.new(xcode_version[0]) < Util::Version.new('6.0')
# sdk_version
platforms.each do |platform|
sdk_path = File.join(platforms_dir, platform + '.platform',
"Developer/SDKs/#{platform}#{sdk_version}.sdk")
unless File.exist?(sdk_path)
App.fail "Can't locate #{platform} SDK #{sdk_version} at `#{sdk_path}'"
end
end
# deployment_target
if Util::Version.new(deployment_target) > Util::Version.new(sdk_version)
App.fail "Deployment target `#{deployment_target}' must be equal or lesser than SDK version `#{sdk_version}'"
end
unless File.exist?(datadir)
App.fail "iOS deployment target #{deployment_target} is not supported by this version of RubyMotion"
end
# embedded_frameworks
%w(embedded_frameworks external_frameworks).each do |attr|
value = send(attr)
if !(value.is_a?(Array) and value.all? { |x| File.exist?(x) and File.extname(x) == '.framework' })
App.fail "app.#{attr} should be an array of framework paths"
end
end
super
end
def platforms_dir
File.join(xcode_dir, 'Platforms')
end
def platform_dir(platform)
File.join(platforms_dir, platform + '.platform')
end
def sdk_version
@sdk_version ||= begin
versions = Dir.glob(File.join(platforms_dir, "#{deploy_platform}.platform/Developer/SDKs/#{deploy_platform}[1-9]*.sdk")).map do |path|
File.basename(path).scan(/#{deploy_platform}(.*)\.sdk/)[0][0]
end
if versions.size == 0
App.fail "Can't find an iOS SDK in `#{platforms_dir}'"
end
supported_version = supported_sdk_versions(versions)
unless supported_version
# We don't have BridgeSupport data for any of the available SDKs. So
# use the latest available SDK of which the major version is the same
# as the latest available BridgeSupport version.
supported_sdks = supported_versions.map do |version|
Util::Version.new(version)
end.sort.reverse
available_sdks = versions.map do |version|
Util::Version.new(version)
end.sort.reverse
available_sdks.each do |available_sdk|
major_version = available_sdk.segments.first
compatible_sdk = supported_sdks.find do |supported_sdk|
supported_sdk.segments.first == major_version
end
if compatible_sdk
# Never override a user's setting!
@deployment_target ||= compatible_sdk.to_s
supported_version = available_sdk.to_s
App.warn("The available SDK (#{available_sdk}) is newer than " \
"the latest available RubyMotion BridgeSupport " \
"metadata (#{compatible_sdk}). The `sdk_version` and " \
"`deployment_target` settings will be configured " \
"accordingly.")
break
end
end
end
supported_version || App.fail("The requested deployment target SDK " \
"is not available or supported by " \
"RubyMotion at this time.")
end
end
def sdk_build_version(platform)
@sdk_build_version ||= begin
sdk_path = sdk(platform)
plist_path = "#{sdk_path}/System/Library/CoreServices/SystemVersion.plist"
sdk_build_version = `/usr/libexec/PlistBuddy -c 'Print :ProductBuildVersion' "#{plist_path}" 2>&1`.strip
if !$?.success?
`#{locate_binary('xcodebuild')} -version -sdk '#{sdk_path}' ProductBuildVersion`.strip
else
sdk_build_version
end
end
end
def deployment_target
@deployment_target ||= sdk_version
end
def sdk(platform)
path = File.join(platform_dir(platform), 'Developer/SDKs',
platform + sdk_version + '.sdk')
escape_path(path)
end
def frameworks_stubs_objects(platform)
stubs = []
deps = frameworks + weak_frameworks
# Look in the 'bridgesupport_files' method for explanation
if deps.include?('ApplicationServices') && deployment_target == '10.7' && sdk_version != '10.7'
deps << 'CoreGraphics'
end
deps.uniq.each do |framework|
stubs_obj = File.join(datadir(sdk_version), platform, "#{framework}_stubs.o")
stubs << stubs_obj if File.exist?(stubs_obj)
end
stubs
end
def bridgesupport_files
@bridgesupport_files ||= begin
bs_files = []
deps = ['RubyMotion'] + (frameworks + weak_frameworks).uniq
# In 10.7 CoreGraphics is a subframework of ApplicationServices. In 10.8 and up
# it is a system framework too. Since in 10.8 and up we ignore the subframework
# version of CoreGraphics and do not generate stubs or BS files for it, we have
# to add them manually if we use the ApplicationServices framework and target 10.7
if deps.include?('ApplicationServices') && deployment_target == '10.7' && sdk_version != '10.7'
deps << 'CoreGraphics'
end
deps << 'UIAutomation' if spec_mode
deps.each do |framework|
bs_path = File.join(datadir(sdk_version), 'BridgeSupport', framework + '.bridgesupport')
if File.exist?(bs_path)
bs_files << bs_path
elsif frameworks.include?(framework)
self.frameworks.delete(framework)
App.warn("Could not find .bridgesupport file for framework \"#{framework}\".")
end
end
bs_files
end
end
def default_archs
h = {}
platforms.each do |platform|
h[platform] = Dir.glob(File.join(datadir, platform, '*.bc')).map do |path|
path.scan(/kernel-(.+).bc$/)[0][0]
end
end
h
end
def archs
@archs ||= default_archs
end
def arch_flags(platform)
archs[platform].map { |x| "-arch #{x}" }.join(' ')
end
def common_flags(platform)
"#{arch_flags(platform)} -isysroot \"#{unescape_path(sdk(platform))}\" -F#{sdk(platform)}/System/Library/Frameworks"
end
def cflags(platform, cplusplus)
optz_level = development? ? '-O0' : '-O3'
"#{common_flags(platform)} #{optz_level} -fexceptions -fblocks" + (cplusplus ? '' : ' -std=c99') + ' -fmodules'
end
def ldflags(platform)
common_flags(platform) + ' -Wl,-no_pie'
end
# @return [String] The application bundle name, excluding extname.
#
def bundle_name
name + (spec_mode ? '_spec' : '')
end
# @return [String] The application bundle filename, including extname.
#
def bundle_filename
bundle_name + '.app'
end
def versionized_build_dir(platform)
File.join(build_dir, platform + '-' + deployment_target + '-' + build_mode_name)
end
def app_bundle_dsym(platform)
File.join(versionized_build_dir(platform), bundle_filename + '.dSYM')
end
def archive_extension
raise "not implemented"
end
def archive
File.join(versionized_build_dir(deploy_platform), bundle_name + archive_extension)
end
def identifier
@identifier ||= "com.yourcompany.#{name.gsub(/\s/, '')}"
spec_mode ? @identifier + '_spec' : @identifier
end
def info_plist
@info_plist
end
def dt_info_plist
{}
end
def generic_info_plist
{
'BuildMachineOSBuild' => osx_host_build_version,
'CFBundleDevelopmentRegion' => 'en',
'CFBundleName' => name,
'CFBundleDisplayName' => name,
'CFBundleIdentifier' => identifier,
'CFBundleExecutable' => name,
'CFBundleInfoDictionaryVersion' => '6.0',
'CFBundlePackageType' => 'APPL',
'CFBundleShortVersionString' => (@short_version || @version),
'CFBundleSignature' => @bundle_signature,
'CFBundleVersion' => @version
}
end
# @return [Hash] A hash that contains all the various `Info.plist` data
# merged into one hash.
#
def merged_info_plist(platform)
generic_info_plist.merge(dt_info_plist).merge(info_plist)
end
# @param [String] platform
# The platform identifier that's being build for, such as
# `iPhoneSimulator`, `iPhoneOS`, or `MacOSX`.
#
#
# @return [String] A serialized version of the `merged_info_plist` hash.
#
def info_plist_data(platform)
Motion::PropertyList.to_s(merged_info_plist(platform))
end
# TODO
# * Add env vars from user.
# * Add optional Instruments template to use.
def profiler_config_plist(platform, args, template, builtin_templates, set_build_env = true)
working_dir = File.expand_path(versionized_build_dir(platform))
optional_data = {}
if template
template_path = nil
if File.exist?(template)
template_path = template
elsif !builtin_templates.grep(/#{template}/i).empty?
template = template.downcase
template_path = profiler_known_templates.find do |path|
File.basename(path, File.extname(path)).downcase == template
end
else
App.fail("Invalid Instruments template path or name.")
end
if !xcode_dir.include?("-Beta.app")
# workaround for RM-599, RM-672 and RM-832. Xcode 6.x beta doesn't need this workaround
template_path = File.expand_path("#{xcode_dir}/../Applications/Instruments.app/Contents/Resources/templates/#{template_path}.tracetemplate")
end
optional_data['XrayTemplatePath'] = template_path
end
env = ENV.to_hash
if set_build_env
env.merge!({
'DYLD_FRAMEWORK_PATH' => working_dir,
'DYLD_LIBRARY_PATH' => working_dir,
'__XCODE_BUILT_PRODUCTS_DIR_PATHS' => working_dir,
'__XPC_DYLD_FRAMEWORK_PATH' => working_dir,
'__XPC_DYLD_LIBRARY_PATH' => working_dir,
})
end
{
'CFBundleIdentifier' => identifier,
'absolutePathOfLaunchable' => File.expand_path(app_bundle_executable(platform)),
'argumentEntries' => (args or ''),
'workingDirectory' => working_dir,
'workspacePath' => '', # Normally: /path/to/Project.xcodeproj/project.xcworkspace
'environmentEntries' => env,
'optionalData' => {
'launchOptions' => {
'architectureType' => 1,
},
}.merge(optional_data),
}
end
def profiler_known_templates
# Get a list of just the templates (ignoring devices)
list = `#{locate_binary('instruments')} -s 2>&1`.strip.split("\n")
start = list.index('Known Templates:') + 1
list = list[start..-1]
# Only interested in the template (file base) names
list.map { |line| line.sub(/^\s*"/, '').sub(/",*$/, '') }
end
def profiler_config_device_identifier(device_name, target)
re = /#{device_name} \(#{target}.*\) \[(.+)\]/
`#{locate_binary('instruments')} -s 2>&1`.strip.split("\n").each { |line|
if m = re.match(line)
return m[1]
end
}
end
def pkginfo_data
"AAPL#{@bundle_signature}"
end
# Unless a certificate has been assigned by the user, this method tries to
# find the certificate for the current configuration, based on the platform
# prefix used in the certificate name and whether or not the current mode is
# set to release.
#
# @param [Array<String>] platform_prefixes
# The prefixes used in the certificate name, specified in the
# preferred order.
#
# @return [String] The name of the certificate.
#
def codesign_certificate(*platform_prefixes)
@codesign_certificate ||= begin
type = (distribution_mode ? 'Distribution' : 'Developer')
regex = /(#{platform_prefixes.join('|')}) #{type}/
certs = Util::CodeSign.identity_names(release?).grep(regex)
if platform_prefixes.size > 1
certs = certs.sort do |x, y|
x_index = platform_prefixes.index(x.match(regex)[1])
y_index = platform_prefixes.index(y.match(regex)[1])
x_index <=> y_index
end
end
if certs.size == 0
App.fail "Cannot find any #{platform_prefixes.join('/')} #{type} " \
"certificate in the keychain."
elsif certs.size > 1
App.warn "Found #{certs.size} #{platform_prefixes.join('/')} " \
"#{type} certificates in the keychain. Set the " \
"`codesign_certificate' project setting to explicitly " \
"use one of (defaults to the first): #{certs.join(', ')}"
end
certs.first
end
end
def gen_bridge_metadata(platform, headers, bs_file, c_flags, exceptions=[])
# Instead of potentially passing hundreds of arguments to the
# `gen_bridge_metadata` command, which can lead to a 'too many arguments'
# error, we list them in a temp file and pass that to the command.
require 'tempfile'
headers_file = Tempfile.new('gen_bridge_metadata-headers-list')
headers.each { |header| headers_file.puts(header) }
headers_file.close # flush
# Prepare rest of options.
sdk_path = self.sdk(local_platform)
includes = ['-I.'] + headers.map { |header| "-I'#{File.dirname(header)}'" }.uniq
exceptions = exceptions.map { |x| "\"#{x}\"" }.join(' ')
c_flags = "#{c_flags} -isysroot '#{sdk_path}' #{bridgesupport_cflags} #{includes.join(' ')}"
sh "RUBYOPT='' '#{File.join(bindir, 'gen_bridge_metadata')}' #{bridgesupport_flags} --cflags \"#{c_flags}\" --headers \"#{headers_file.path}\" -o '#{bs_file}' #{ "-e #{exceptions}" if exceptions.length != 0}"
end
def define_global_env_txt
"rb_define_global_const(\"RUBYMOTION_ENV\", @\"#{rubymotion_env_value}\");\nrb_define_global_const(\"RUBYMOTION_VERSION\", @\"#{Motion::Version}\");\n"
end
def spritekit_texture_atlas_compiler
path = File.join(xcode_dir, 'usr/bin/TextureAtlas')
File.exist?(path) ? path : nil
end
def assets_bundles
xcassets_bundles = []
resources_dirs.each do |dir|
if File.exist?(dir)
xcassets_bundles.concat(Dir.glob(File.join(dir, '*.xcassets')))
end
end
xcassets_bundles
end
# @return [String] The path to the `Info.plist` file that gets generated by
# compiling the asset bundles and contains the data that should be
# merged into the final `Info.plist` file.
#
def asset_bundle_partial_info_plist_path(platform)
File.expand_path(File.join(versionized_build_dir(platform), 'AssetCatalog-Info.plist'))
end
# @return [String, nil] The path to the asset bundle that contains
# application icons, if any.
#
def app_icons_asset_bundle
app_icons_asset_bundles = assets_bundles.map { |b| Dir.glob(File.join(b, '*.appiconset')) }.flatten
if app_icons_asset_bundles.size > 1
App.warn "Found #{app_icons_asset_bundles.size} app icon sets across all " \
"xcasset bundles. Only the first one (alphabetically) " \
"will be used."
end
app_icons_asset_bundles.sort.first
end
# @return [String, nil] The name of the application icon set, without any
# extension.
#
def app_icon_name_from_asset_bundle
if bundle = app_icons_asset_bundle
File.basename(bundle, '.appiconset')
end
end
# Assigns the application icon information, found in the `Info.plist`
# generated by compiling the asset bundles, to the configuration's `icons`.
#
# @return [void]
#
def add_images_from_asset_bundles(platform)
if app_icons_asset_bundle
path = asset_bundle_partial_info_plist_path(platform)
if File.exist?(path)
content = `/usr/libexec/PlistBuddy -c 'Print :CFBundleIcons:CFBundlePrimaryIcon:CFBundleIconFiles' "#{path}" 2>&1`.strip
if $?.success?
self.icons = content.split("\n")[1..-2].map(&:strip)
end
end
end
end
attr_reader :vendor_projects
def vendor_project(path, type, opts={})
opts[:force_load] = true unless opts[:force_load] == false
@vendor_projects << Motion::Project::Vendor.new(path, type, self, opts)
end
def unvendor_project(path)
@vendor_projects.delete_if { |x| x.path == path }
end
def clean_project
super
@vendor_projects.each { |vendor| vendor.clean(platforms) }
@targets.each { |target| target.clean }
end
attr_accessor :targets
# App Extensions are required to include a 64-bit slice for App Store
# submission, so do not exclude `arm64` by default.
#
# From https://developer.apple.com/library/prerelease/iOS/documentation/General/Conceptual/ExtensibilityPG/ExtensionCreation.html:
#
# NOTE ABOUT 64-BIT ARCHITECTURE
#
# An app extension target must include the arm64 (iOS) or x86_64
# architecture (OS X) in its Architectures build settings or it will be
# rejected by the App Store. Xcode includes the appropriate 64-bit
# architecture with its "Standard architectures" setting when you create a
# new app extension target.
#
# If your containing app target links to an embedded framework, the app
# must also include 64-bit architecture or it will be rejected by the App
# Store.
#
# From https://developer.apple.com/library/ios/documentation/General/Conceptual/ExtensibilityPG/ExtensionScenarios.html#//apple_ref/doc/uid/TP40014214-CH21-SW5
#
# A containing app that links to an embedded framework must include the
# arm64 (iOS) or x86_64 (OS X) architecture build setting or it will be
# rejected by the App Store.
#
def target(path, type, opts={})
unless File.exist?(path)
App.fail "Could not find target of type '#{type}' at '#{path}'"
end
unless archs['iPhoneOS'].include?('arm64')
App.warn "Device builds of App Extensions and Frameworks are " \
"required to have a 64-bit slice for App Store submissions " \
"to be accepted."
App.warn "Your application will now have 64-bit enabled by default, " \
"be sure to properly test it on a 64-bit device."
archs['iPhoneOS'] << 'arm64'
end
case type
when :framework
opts[:load] = true unless opts[:load] == false
@targets << Motion::Project::FrameworkTarget.new(path, type, self, opts)
when :extension
@targets << Motion::Project::ExtensionTarget.new(path, type, self, opts)
when :watchapp
opts = { env: { "WATCHV2" => "1" } }.merge(opts)
@targets << Motion::Project::WatchTarget.new(path, type, self, opts)
else
App.fail("Unsupported target type '#{type}'")
end
end
# Creates a temporary file that lists all the symbols that the application
# (or extension) should not strip.
#
# At the moment these are only symbols that an iOS framework depends on.
#
# @return [String] Extra arguments for the `strip` command.
#
def strip_args
args = super
args << " -x"
frameworks = targets.select { |t| t.type == :framework }
required_symbols = frameworks.map(&:required_symbols).flatten.uniq.sort
unless required_symbols.empty?
require 'tempfile'
required_symbols_file = Tempfile.new('required-framework-symbols')
required_symbols.each { |symbol| required_symbols_file.puts(symbol) }
required_symbols_file.close
# Note: If the symbols file contains a symbol that is not present, or
# is present but undefined (U) in the executable to strip, the command
# fails. The '-i' option ignores this error.
args << " -i -s '#{required_symbols_file.path}'"
end
args
end
def ctags_files
ctags_files = bridgesupport_files
ctags_files += vendor_projects.map { |p| Dir.glob(File.join(p.path, '*.bridgesupport')) }.flatten
ctags_files + files.flatten
end
def ctags_config_file
File.join(motiondir, 'data', 'bridgesupport-ctags.cfg')
end
def local_repl_port(platform)
@local_repl_port ||= begin
ports_file = File.join(versionized_build_dir(platform), 'repl_ports.txt')
if File.exist?(ports_file)
File.read(ports_file)
else
local_repl_port = TCPServer.new('localhost', 0).addr[1]
File.open(ports_file, 'w') { |io| io.write(local_repl_port.to_s) }
local_repl_port
end
end
end
end
end; end
[Xcode 8] recognize with path of Xcode 8 beta for 'rake profile'
Since Xcode 8 beta, the Xcode path changed form Xcode-Beta.app to Xcode-beta.app
# encoding: utf-8
# Copyright (c) 2012, HipByte SPRL and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'motion/project/config'
require 'motion/util/code_sign'
require 'motion/project/target'
require 'socket'
module Motion; module Project;
class XcodeConfig < Config
variable :xcode_dir, :sdk_version, :deployment_target, :frameworks,
:weak_frameworks, :embedded_frameworks, :external_frameworks, :framework_search_paths,
:libs, :identifier, :codesign_certificate, :short_version, :entitlements, :delegate_class, :embed_dsym,
:version
def initialize(project_dir, build_mode)
super
@info_plist = {}
@frameworks = []
@weak_frameworks = []
@embedded_frameworks = []
@external_frameworks = []
@framework_search_paths = []
@libs = []
@targets = []
@bundle_signature = '????'
@short_version = nil
@entitlements = {}
@delegate_class = 'AppDelegate'
@spec_mode = false
@embed_dsym = (development? ? true : false)
@vendor_projects = []
@version = '1.0'
end
def xcode_dir=(xcode_dir)
@xcode_version = nil
@xcode_dir = unescape_path(File.path(xcode_dir))
end
def xcode_dir
@xcode_dir ||= begin
if ENV['RM_TARGET_XCODE_DIR']
ENV['RM_TARGET_XCODE_DIR']
else
xcodeselect = '/usr/bin/xcode-select'
xcode_dir = unescape_path(`#{xcodeselect} -print-path`.strip)
App.fail "Can't locate any version of Xcode on the system." unless File.exist?(xcode_dir)
xcode_dir
end
end
end
def xcode_version
@xcode_version ||= begin
failed = false
vers = `/usr/libexec/PlistBuddy -c "Print :CFBundleShortVersionString" "#{xcode_dir}/../Info.plist"`.strip
failed = true if !$?.success?
build = `/usr/libexec/PlistBuddy -c "Print :ProductBuildVersion" "#{xcode_dir}/../version.plist"`.strip
failed = true if !$?.success?
if failed
txt = `#{locate_binary('xcodebuild')} -version`
vers = txt.scan(/Xcode\s(.+)/)[0][0]
build = txt.scan(/(BuildVersion:|Build version)\s(.+)/)[0][1]
end
[vers, build]
end
end
def platforms; raise; end
def local_platform; raise; end
def deploy_platform; raise; end
def validate
# Xcode version
App.fail "Xcode 6.x or greater is required" if Util::Version.new(xcode_version[0]) < Util::Version.new('6.0')
# sdk_version
platforms.each do |platform|
sdk_path = File.join(platforms_dir, platform + '.platform',
"Developer/SDKs/#{platform}#{sdk_version}.sdk")
unless File.exist?(sdk_path)
App.fail "Can't locate #{platform} SDK #{sdk_version} at `#{sdk_path}'"
end
end
# deployment_target
if Util::Version.new(deployment_target) > Util::Version.new(sdk_version)
App.fail "Deployment target `#{deployment_target}' must be equal or lesser than SDK version `#{sdk_version}'"
end
unless File.exist?(datadir)
App.fail "iOS deployment target #{deployment_target} is not supported by this version of RubyMotion"
end
# embedded_frameworks
%w(embedded_frameworks external_frameworks).each do |attr|
value = send(attr)
if !(value.is_a?(Array) and value.all? { |x| File.exist?(x) and File.extname(x) == '.framework' })
App.fail "app.#{attr} should be an array of framework paths"
end
end
super
end
def platforms_dir
File.join(xcode_dir, 'Platforms')
end
def platform_dir(platform)
File.join(platforms_dir, platform + '.platform')
end
def sdk_version
@sdk_version ||= begin
versions = Dir.glob(File.join(platforms_dir, "#{deploy_platform}.platform/Developer/SDKs/#{deploy_platform}[1-9]*.sdk")).map do |path|
File.basename(path).scan(/#{deploy_platform}(.*)\.sdk/)[0][0]
end
if versions.size == 0
App.fail "Can't find an iOS SDK in `#{platforms_dir}'"
end
supported_version = supported_sdk_versions(versions)
unless supported_version
# We don't have BridgeSupport data for any of the available SDKs. So
# use the latest available SDK of which the major version is the same
# as the latest available BridgeSupport version.
supported_sdks = supported_versions.map do |version|
Util::Version.new(version)
end.sort.reverse
available_sdks = versions.map do |version|
Util::Version.new(version)
end.sort.reverse
available_sdks.each do |available_sdk|
major_version = available_sdk.segments.first
compatible_sdk = supported_sdks.find do |supported_sdk|
supported_sdk.segments.first == major_version
end
if compatible_sdk
# Never override a user's setting!
@deployment_target ||= compatible_sdk.to_s
supported_version = available_sdk.to_s
App.warn("The available SDK (#{available_sdk}) is newer than " \
"the latest available RubyMotion BridgeSupport " \
"metadata (#{compatible_sdk}). The `sdk_version` and " \
"`deployment_target` settings will be configured " \
"accordingly.")
break
end
end
end
supported_version || App.fail("The requested deployment target SDK " \
"is not available or supported by " \
"RubyMotion at this time.")
end
end
def sdk_build_version(platform)
@sdk_build_version ||= begin
sdk_path = sdk(platform)
plist_path = "#{sdk_path}/System/Library/CoreServices/SystemVersion.plist"
sdk_build_version = `/usr/libexec/PlistBuddy -c 'Print :ProductBuildVersion' "#{plist_path}" 2>&1`.strip
if !$?.success?
`#{locate_binary('xcodebuild')} -version -sdk '#{sdk_path}' ProductBuildVersion`.strip
else
sdk_build_version
end
end
end
def deployment_target
@deployment_target ||= sdk_version
end
def sdk(platform)
path = File.join(platform_dir(platform), 'Developer/SDKs',
platform + sdk_version + '.sdk')
escape_path(path)
end
def frameworks_stubs_objects(platform)
stubs = []
deps = frameworks + weak_frameworks
# Look in the 'bridgesupport_files' method for explanation
if deps.include?('ApplicationServices') && deployment_target == '10.7' && sdk_version != '10.7'
deps << 'CoreGraphics'
end
deps.uniq.each do |framework|
stubs_obj = File.join(datadir(sdk_version), platform, "#{framework}_stubs.o")
stubs << stubs_obj if File.exist?(stubs_obj)
end
stubs
end
def bridgesupport_files
@bridgesupport_files ||= begin
bs_files = []
deps = ['RubyMotion'] + (frameworks + weak_frameworks).uniq
# In 10.7 CoreGraphics is a subframework of ApplicationServices. In 10.8 and up
# it is a system framework too. Since in 10.8 and up we ignore the subframework
# version of CoreGraphics and do not generate stubs or BS files for it, we have
# to add them manually if we use the ApplicationServices framework and target 10.7
if deps.include?('ApplicationServices') && deployment_target == '10.7' && sdk_version != '10.7'
deps << 'CoreGraphics'
end
deps << 'UIAutomation' if spec_mode
deps.each do |framework|
bs_path = File.join(datadir(sdk_version), 'BridgeSupport', framework + '.bridgesupport')
if File.exist?(bs_path)
bs_files << bs_path
elsif frameworks.include?(framework)
self.frameworks.delete(framework)
App.warn("Could not find .bridgesupport file for framework \"#{framework}\".")
end
end
bs_files
end
end
def default_archs
h = {}
platforms.each do |platform|
h[platform] = Dir.glob(File.join(datadir, platform, '*.bc')).map do |path|
path.scan(/kernel-(.+).bc$/)[0][0]
end
end
h
end
def archs
@archs ||= default_archs
end
def arch_flags(platform)
archs[platform].map { |x| "-arch #{x}" }.join(' ')
end
def common_flags(platform)
"#{arch_flags(platform)} -isysroot \"#{unescape_path(sdk(platform))}\" -F#{sdk(platform)}/System/Library/Frameworks"
end
def cflags(platform, cplusplus)
optz_level = development? ? '-O0' : '-O3'
"#{common_flags(platform)} #{optz_level} -fexceptions -fblocks" + (cplusplus ? '' : ' -std=c99') + ' -fmodules'
end
def ldflags(platform)
common_flags(platform) + ' -Wl,-no_pie'
end
# @return [String] The application bundle name, excluding extname.
#
def bundle_name
name + (spec_mode ? '_spec' : '')
end
# @return [String] The application bundle filename, including extname.
#
def bundle_filename
bundle_name + '.app'
end
def versionized_build_dir(platform)
File.join(build_dir, platform + '-' + deployment_target + '-' + build_mode_name)
end
def app_bundle_dsym(platform)
File.join(versionized_build_dir(platform), bundle_filename + '.dSYM')
end
def archive_extension
raise "not implemented"
end
def archive
File.join(versionized_build_dir(deploy_platform), bundle_name + archive_extension)
end
def identifier
@identifier ||= "com.yourcompany.#{name.gsub(/\s/, '')}"
spec_mode ? @identifier + '_spec' : @identifier
end
def info_plist
@info_plist
end
def dt_info_plist
{}
end
def generic_info_plist
{
'BuildMachineOSBuild' => osx_host_build_version,
'CFBundleDevelopmentRegion' => 'en',
'CFBundleName' => name,
'CFBundleDisplayName' => name,
'CFBundleIdentifier' => identifier,
'CFBundleExecutable' => name,
'CFBundleInfoDictionaryVersion' => '6.0',
'CFBundlePackageType' => 'APPL',
'CFBundleShortVersionString' => (@short_version || @version),
'CFBundleSignature' => @bundle_signature,
'CFBundleVersion' => @version
}
end
# @return [Hash] A hash that contains all the various `Info.plist` data
# merged into one hash.
#
def merged_info_plist(platform)
generic_info_plist.merge(dt_info_plist).merge(info_plist)
end
# @param [String] platform
# The platform identifier that's being build for, such as
# `iPhoneSimulator`, `iPhoneOS`, or `MacOSX`.
#
#
# @return [String] A serialized version of the `merged_info_plist` hash.
#
def info_plist_data(platform)
Motion::PropertyList.to_s(merged_info_plist(platform))
end
# TODO
# * Add env vars from user.
# * Add optional Instruments template to use.
def profiler_config_plist(platform, args, template, builtin_templates, set_build_env = true)
working_dir = File.expand_path(versionized_build_dir(platform))
optional_data = {}
if template
template_path = nil
if File.exist?(template)
template_path = template
elsif !builtin_templates.grep(/#{template}/i).empty?
template = template.downcase
template_path = profiler_known_templates.find do |path|
File.basename(path, File.extname(path)).downcase == template
end
else
App.fail("Invalid Instruments template path or name.")
end
unless xcode_dir.downcase.include?("xcode-beta.app")
# workaround for RM-599, RM-672 and RM-832. Xcode 6.x beta doesn't need this workaround
template_path = File.expand_path("#{xcode_dir}/../Applications/Instruments.app/Contents/Resources/templates/#{template_path}.tracetemplate")
end
optional_data['XrayTemplatePath'] = template_path
end
env = ENV.to_hash
if set_build_env
env.merge!({
'DYLD_FRAMEWORK_PATH' => working_dir,
'DYLD_LIBRARY_PATH' => working_dir,
'__XCODE_BUILT_PRODUCTS_DIR_PATHS' => working_dir,
'__XPC_DYLD_FRAMEWORK_PATH' => working_dir,
'__XPC_DYLD_LIBRARY_PATH' => working_dir,
})
end
{
'CFBundleIdentifier' => identifier,
'absolutePathOfLaunchable' => File.expand_path(app_bundle_executable(platform)),
'argumentEntries' => (args or ''),
'workingDirectory' => working_dir,
'workspacePath' => '', # Normally: /path/to/Project.xcodeproj/project.xcworkspace
'environmentEntries' => env,
'optionalData' => {
'launchOptions' => {
'architectureType' => 1,
},
}.merge(optional_data),
}
end
def profiler_known_templates
# Get a list of just the templates (ignoring devices)
list = `#{locate_binary('instruments')} -s 2>&1`.strip.split("\n")
start = list.index('Known Templates:') + 1
list = list[start..-1]
# Only interested in the template (file base) names
list.map { |line| line.sub(/^\s*"/, '').sub(/",*$/, '') }
end
def profiler_config_device_identifier(device_name, target)
re = /#{device_name} \(#{target}.*\) \[(.+)\]/
`#{locate_binary('instruments')} -s 2>&1`.strip.split("\n").each { |line|
if m = re.match(line)
return m[1]
end
}
end
def pkginfo_data
"AAPL#{@bundle_signature}"
end
# Unless a certificate has been assigned by the user, this method tries to
# find the certificate for the current configuration, based on the platform
# prefix used in the certificate name and whether or not the current mode is
# set to release.
#
# @param [Array<String>] platform_prefixes
# The prefixes used in the certificate name, specified in the
# preferred order.
#
# @return [String] The name of the certificate.
#
def codesign_certificate(*platform_prefixes)
@codesign_certificate ||= begin
type = (distribution_mode ? 'Distribution' : 'Developer')
regex = /(#{platform_prefixes.join('|')}) #{type}/
certs = Util::CodeSign.identity_names(release?).grep(regex)
if platform_prefixes.size > 1
certs = certs.sort do |x, y|
x_index = platform_prefixes.index(x.match(regex)[1])
y_index = platform_prefixes.index(y.match(regex)[1])
x_index <=> y_index
end
end
if certs.size == 0
App.fail "Cannot find any #{platform_prefixes.join('/')} #{type} " \
"certificate in the keychain."
elsif certs.size > 1
App.warn "Found #{certs.size} #{platform_prefixes.join('/')} " \
"#{type} certificates in the keychain. Set the " \
"`codesign_certificate' project setting to explicitly " \
"use one of (defaults to the first): #{certs.join(', ')}"
end
certs.first
end
end
def gen_bridge_metadata(platform, headers, bs_file, c_flags, exceptions=[])
# Instead of potentially passing hundreds of arguments to the
# `gen_bridge_metadata` command, which can lead to a 'too many arguments'
# error, we list them in a temp file and pass that to the command.
require 'tempfile'
headers_file = Tempfile.new('gen_bridge_metadata-headers-list')
headers.each { |header| headers_file.puts(header) }
headers_file.close # flush
# Prepare rest of options.
sdk_path = self.sdk(local_platform)
includes = ['-I.'] + headers.map { |header| "-I'#{File.dirname(header)}'" }.uniq
exceptions = exceptions.map { |x| "\"#{x}\"" }.join(' ')
c_flags = "#{c_flags} -isysroot '#{sdk_path}' #{bridgesupport_cflags} #{includes.join(' ')}"
sh "RUBYOPT='' '#{File.join(bindir, 'gen_bridge_metadata')}' #{bridgesupport_flags} --cflags \"#{c_flags}\" --headers \"#{headers_file.path}\" -o '#{bs_file}' #{ "-e #{exceptions}" if exceptions.length != 0}"
end
def define_global_env_txt
"rb_define_global_const(\"RUBYMOTION_ENV\", @\"#{rubymotion_env_value}\");\nrb_define_global_const(\"RUBYMOTION_VERSION\", @\"#{Motion::Version}\");\n"
end
def spritekit_texture_atlas_compiler
path = File.join(xcode_dir, 'usr/bin/TextureAtlas')
File.exist?(path) ? path : nil
end
def assets_bundles
xcassets_bundles = []
resources_dirs.each do |dir|
if File.exist?(dir)
xcassets_bundles.concat(Dir.glob(File.join(dir, '*.xcassets')))
end
end
xcassets_bundles
end
# @return [String] The path to the `Info.plist` file that gets generated by
# compiling the asset bundles and contains the data that should be
# merged into the final `Info.plist` file.
#
def asset_bundle_partial_info_plist_path(platform)
File.expand_path(File.join(versionized_build_dir(platform), 'AssetCatalog-Info.plist'))
end
# @return [String, nil] The path to the asset bundle that contains
# application icons, if any.
#
def app_icons_asset_bundle
app_icons_asset_bundles = assets_bundles.map { |b| Dir.glob(File.join(b, '*.appiconset')) }.flatten
if app_icons_asset_bundles.size > 1
App.warn "Found #{app_icons_asset_bundles.size} app icon sets across all " \
"xcasset bundles. Only the first one (alphabetically) " \
"will be used."
end
app_icons_asset_bundles.sort.first
end
# @return [String, nil] The name of the application icon set, without any
# extension.
#
def app_icon_name_from_asset_bundle
if bundle = app_icons_asset_bundle
File.basename(bundle, '.appiconset')
end
end
# Assigns the application icon information, found in the `Info.plist`
# generated by compiling the asset bundles, to the configuration's `icons`.
#
# @return [void]
#
def add_images_from_asset_bundles(platform)
if app_icons_asset_bundle
path = asset_bundle_partial_info_plist_path(platform)
if File.exist?(path)
content = `/usr/libexec/PlistBuddy -c 'Print :CFBundleIcons:CFBundlePrimaryIcon:CFBundleIconFiles' "#{path}" 2>&1`.strip
if $?.success?
self.icons = content.split("\n")[1..-2].map(&:strip)
end
end
end
end
attr_reader :vendor_projects
def vendor_project(path, type, opts={})
opts[:force_load] = true unless opts[:force_load] == false
@vendor_projects << Motion::Project::Vendor.new(path, type, self, opts)
end
def unvendor_project(path)
@vendor_projects.delete_if { |x| x.path == path }
end
def clean_project
super
@vendor_projects.each { |vendor| vendor.clean(platforms) }
@targets.each { |target| target.clean }
end
attr_accessor :targets
# App Extensions are required to include a 64-bit slice for App Store
# submission, so do not exclude `arm64` by default.
#
# From https://developer.apple.com/library/prerelease/iOS/documentation/General/Conceptual/ExtensibilityPG/ExtensionCreation.html:
#
# NOTE ABOUT 64-BIT ARCHITECTURE
#
# An app extension target must include the arm64 (iOS) or x86_64
# architecture (OS X) in its Architectures build settings or it will be
# rejected by the App Store. Xcode includes the appropriate 64-bit
# architecture with its "Standard architectures" setting when you create a
# new app extension target.
#
# If your containing app target links to an embedded framework, the app
# must also include 64-bit architecture or it will be rejected by the App
# Store.
#
# From https://developer.apple.com/library/ios/documentation/General/Conceptual/ExtensibilityPG/ExtensionScenarios.html#//apple_ref/doc/uid/TP40014214-CH21-SW5
#
# A containing app that links to an embedded framework must include the
# arm64 (iOS) or x86_64 (OS X) architecture build setting or it will be
# rejected by the App Store.
#
def target(path, type, opts={})
unless File.exist?(path)
App.fail "Could not find target of type '#{type}' at '#{path}'"
end
unless archs['iPhoneOS'].include?('arm64')
App.warn "Device builds of App Extensions and Frameworks are " \
"required to have a 64-bit slice for App Store submissions " \
"to be accepted."
App.warn "Your application will now have 64-bit enabled by default, " \
"be sure to properly test it on a 64-bit device."
archs['iPhoneOS'] << 'arm64'
end
case type
when :framework
opts[:load] = true unless opts[:load] == false
@targets << Motion::Project::FrameworkTarget.new(path, type, self, opts)
when :extension
@targets << Motion::Project::ExtensionTarget.new(path, type, self, opts)
when :watchapp
opts = { env: { "WATCHV2" => "1" } }.merge(opts)
@targets << Motion::Project::WatchTarget.new(path, type, self, opts)
else
App.fail("Unsupported target type '#{type}'")
end
end
# Creates a temporary file that lists all the symbols that the application
# (or extension) should not strip.
#
# At the moment these are only symbols that an iOS framework depends on.
#
# @return [String] Extra arguments for the `strip` command.
#
def strip_args
args = super
args << " -x"
frameworks = targets.select { |t| t.type == :framework }
required_symbols = frameworks.map(&:required_symbols).flatten.uniq.sort
unless required_symbols.empty?
require 'tempfile'
required_symbols_file = Tempfile.new('required-framework-symbols')
required_symbols.each { |symbol| required_symbols_file.puts(symbol) }
required_symbols_file.close
# Note: If the symbols file contains a symbol that is not present, or
# is present but undefined (U) in the executable to strip, the command
# fails. The '-i' option ignores this error.
args << " -i -s '#{required_symbols_file.path}'"
end
args
end
def ctags_files
ctags_files = bridgesupport_files
ctags_files += vendor_projects.map { |p| Dir.glob(File.join(p.path, '*.bridgesupport')) }.flatten
ctags_files + files.flatten
end
def ctags_config_file
File.join(motiondir, 'data', 'bridgesupport-ctags.cfg')
end
def local_repl_port(platform)
@local_repl_port ||= begin
ports_file = File.join(versionized_build_dir(platform), 'repl_ports.txt')
if File.exist?(ports_file)
File.read(ports_file)
else
local_repl_port = TCPServer.new('localhost', 0).addr[1]
File.open(ports_file, 'w') { |io| io.write(local_repl_port.to_s) }
local_repl_port
end
end
end
end
end; end
|
module Mutant
class Reporter
class CLI
# CLI runner status printer base class
class Printer
include AbstractType, Delegator, Adamantium::Flat, Concord.new(:output, :object)
delegate(:success?)
NL = "\n".freeze
# Run printer on object to output
#
# @param [IO] output
# @param [Object] object
#
# @return [self]
#
# @api private
#
def self.run(output, object)
new(output, object).run
end
# Run printer
#
# @return [self]
#
# @api private
#
abstract_method :run
private
# Return status color
#
# @return [Color]
#
# @api private
#
def status_color
success? ? Color::GREEN : Color::RED
end
# Visit a collection of objects
#
# @return [Class::Printer] printer
# @return [Enumerable<Object>] collection
#
# @return [undefined]
#
# @api private
#
def visit_collection(printer, collection)
collection.each do |object|
visit(printer, object)
end
end
# Visit object
#
# @param [Class::Printer] printer
# @param [Object] object
#
# @return [undefined]
#
# @api private
#
def visit(printer, object)
printer.run(output, object)
end
# Print an info line to output
#
# @return [undefined]
#
# @api private
#
def info(string, *arguments)
puts(format(string, *arguments))
end
# Print a status line to output
#
# @return [undefined]
#
# @api private
#
def status(string, *arguments)
puts(colorize(status_color, format(string, *arguments)))
end
# Print a line to output
#
# @return [undefined]
#
# @api private
#
def puts(string)
output.puts(string)
end
# Colorize message
#
# @param [Color] color
# @param [String] message
#
# @api private
#
# @return [String]
# if color is enabled
# unmodified message otherwise
#
def colorize(color, message)
color = Color::NONE unless tty?
color.format(message)
end
# Test if output is a tty
#
# @return [Boolean]
#
# @api private
#
def tty?
output.tty?
end
# Test if output can be colored
#
# @return [Boolean]
#
# @api private
#
alias_method :color?, :tty?
# Printer for runner status
class Status < self
delegate(:active_jobs, :payload)
# Print progress for collector
#
# @return [self]
#
# @api private
#
def run
visit(EnvProgress, payload)
info('Active subjects: %d', active_subject_results.length)
visit_collection(SubjectProgress, active_subject_results)
job_status
self
end
private
# Print worker status
#
# @return [undefined]
#
# @api private
#
def job_status
return if active_jobs.empty?
info('Active Jobs:')
active_jobs.sort_by(&:index).each do |job|
info('%d: %s', job.index, job.payload.identification)
end
end
# Return active subject results
#
# @return [Array<Result::Subject>]
#
# @api private
#
def active_subject_results
active_mutation_jobs = active_jobs.select { |job| job.payload.kind_of?(Mutation) }
active_subjects = active_mutation_jobs.map(&:payload).flat_map(&:subject).to_set
payload.subject_results.select do |subject_result|
active_subjects.include?(subject_result.subject)
end
end
end # Status
# Progress printer for configuration
class Config < self
# Report configuration
#
# @param [Mutant::Config] config
#
# @return [self]
#
# @api private
#
# rubocop:disable AbcSize
#
def run
info 'Mutant configuration:'
info 'Matcher: %s', object.matcher.inspect
info 'Integration: %s', object.integration.name
info 'Expect Coverage: %0.2f%%', (object.expected_coverage * 100)
info 'Jobs: %d', object.jobs
info 'Includes: %s', object.includes.inspect
info 'Requires: %s', object.requires.inspect
self
end
end # Config
# Env progress printer
class EnvProgress < self
delegate(
:coverage,
:amount_subjects,
:amount_mutations,
:amount_mutations_alive,
:amount_mutations_killed,
:runtime,
:killtime,
:overhead,
:env
)
# Run printer
#
# @return [self]
#
# @api private
#
# rubocop:disable MethodLength
#
def run
visit(Config, env.config)
info 'Subjects: %s', amount_subjects
info 'Mutations: %s', amount_mutations
info 'Kills: %s', amount_mutations_killed
info 'Alive: %s', amount_mutations_alive
info 'Runtime: %0.2fs', runtime
info 'Killtime: %0.2fs', killtime
info 'Overhead: %0.2f%%', overhead_percent
status 'Coverage: %0.2f%%', coverage_percent
status 'Expected: %0.2f%%', (env.config.expected_coverage * 100)
self
end
private
# Return coverage percent
#
# @return [Float]
#
# @api private
#
def coverage_percent
(coverage * 100).to_f
end
# Return overhead percent
#
# @return [Float]
#
# @api private
#
def overhead_percent
(overhead / killtime) * 100
end
end # EnvProgress
# Full env result reporter
class EnvResult < self
delegate(:failed_subject_results)
# Run printer
#
# @return [self]
#
# @api private
#
def run
visit_collection(SubjectResult, failed_subject_results)
visit(EnvProgress, object)
self
end
end # EnvResult
# Subject report printer
class SubjectResult < self
delegate :subject, :failed_mutations, :tests
# Run report printer
#
# @return [self]
#
# @api private
#
def run
status(subject.identification)
tests.each do |test|
puts("- #{test.identification}")
end
visit_collection(MutationResult, object.alive_mutation_results)
self
end
end # Subject
# Printer for mutation progress results
class MutationProgressResult < self
SUCCESS = '.'.freeze
FAILURE = 'F'.freeze
# Run printer
#
# @return [self]
#
# @api private
#
def run
char(success? ? SUCCESS : FAILURE)
end
private
# Write colorized char
#
# @param [String] char
#
# @return [undefined]
#
# @api private
#
def char(char)
output.write(colorize(status_color, char))
end
end # MutationProgressResult
# Reporter for progressive output format on scheduler Status objects
class StatusProgressive < self
FORMAT = '(%02d/%02d) %3d%% - killtime: %0.02fs runtime: %0.02fs overhead: %0.02fs'.freeze
delegate(
:coverage,
:runtime,
:amount_mutations_killed,
:amount_mutations,
:amount_mutation_results,
:killtime,
:overhead
)
# Run printer
#
# @return [self]
#
# @api private
#
def run
status(
FORMAT,
amount_mutations_killed,
amount_mutations,
coverage * 100,
killtime,
runtime,
overhead
)
self
end
private
# Return object being printed
#
# @return [Result::Env]
#
# @api private
#
def object
super().payload
end
end
# Reporter for subject progress
class SubjectProgress < self
FORMAT = '(%02d/%02d) %3d%% - killtime: %0.02fs runtime: %0.02fs overhead: %0.02fs'.freeze
delegate(
:tests,
:subject,
:coverage,
:runtime,
:amount_mutations_killed,
:amount_mutations,
:amount_mutation_results,
:killtime,
:overhead
)
# Run printer
#
# @return [self]
#
# @api private
#
def run
puts("#{subject.identification} mutations: #{amount_mutations}")
print_tests
print_mutation_results
print_progress_bar_finish
print_stats
self
end
private
# Print stats
#
# @return [undefined]
#
# @api private
#
def print_stats
status(
FORMAT,
amount_mutations_killed,
amount_mutations,
coverage * 100,
killtime,
runtime,
overhead
)
end
# Print tests
#
# @return [undefined]
#
# @api private
#
def print_tests
tests.each do |test|
puts "- #{test.identification}"
end
end
# Print progress bar finish
#
# @return [undefined]
#
# @api private
#
def print_progress_bar_finish
puts(NL) unless amount_mutation_results.zero?
end
# Print mutation results
#
# @return [undefined]
#
# @api private
#
def print_mutation_results
visit_collection(MutationProgressResult, object.mutation_results)
end
end # Subject
# Reporter for mutation results
class MutationResult < self
delegate :mutation, :test_result
DIFF_ERROR_MESSAGE =
'BUG: Mutation NOT resulted in exactly one diff hunk. Please report a reproduction!'.freeze
MAP = {
Mutant::Mutation::Evil => :evil_details,
Mutant::Mutation::Neutral => :neutral_details,
Mutant::Mutation::Noop => :noop_details
}.freeze
NEUTRAL_MESSAGE =
"--- Neutral failure ---\n" \
"Original code was inserted unmutated. And the test did NOT PASS.\n" \
"Your tests do not pass initially or you found a bug in mutant / unparser.\n" \
"Subject AST:\n" \
"%s\n" \
"Unparsed Source:\n" \
"%s\n" \
"Test Result:\n".freeze
NOOP_MESSAGE =
"---- Noop failure -----\n" \
"No code was inserted. And the test did NOT PASS.\n" \
"This is typically a problem of your specs not passing unmutated.\n" \
"Test Result:\n".freeze
FOOTER = '-----------------------'.freeze
# Run report printer
#
# @return [self]
#
# @api private
#
def run
puts(mutation.identification)
print_details
puts(FOOTER)
self
end
private
# Return details
#
# @return [undefined]
#
# @api private
#
def print_details
send(MAP.fetch(mutation.class))
end
# Return evil details
#
# @return [String]
#
# @api private
#
def evil_details
original, current = mutation.original_source, mutation.source
diff = Mutant::Diff.build(original, current)
diff = color? ? diff.colorized_diff : diff.diff
puts(diff || ['Original source:', original, 'Mutated Source:', current, DIFF_ERROR_MESSAGE])
end
# Noop details
#
# @return [String]
#
# @api private
#
def noop_details
info(NOOP_MESSAGE)
visit_test_result
end
# Neutral details
#
# @return [String]
#
# @api private
#
def neutral_details
info(NEUTRAL_MESSAGE, mutation.subject.node.inspect, mutation.source)
visit_test_result
end
# Visit failed test results
#
# @return [undefined]
#
# @api private
#
def visit_test_result
visit(TestResult, test_result)
end
end # MutationResult
# Test result reporter
class TestResult < self
delegate :tests, :runtime
# Run test result reporter
#
# @return [self]
#
# @api private
#
def run
status('- %d @ runtime: %s', tests.length, runtime)
tests.each do |test|
puts(" - #{test.identification}")
end
puts('Test Output:')
puts(object.output)
end
# Test if test result is successful
#
# Only used to determine color.
#
# @return [false]
#
# @api private
#
def success?
false
end
end # TestResult
end # Printer
end # CLI
end # Reporter
end # Mutant
Remove unnecessary #to_f call from coverage percent method
module Mutant
class Reporter
class CLI
# CLI runner status printer base class
class Printer
include AbstractType, Delegator, Adamantium::Flat, Concord.new(:output, :object)
delegate(:success?)
NL = "\n".freeze
# Run printer on object to output
#
# @param [IO] output
# @param [Object] object
#
# @return [self]
#
# @api private
#
def self.run(output, object)
new(output, object).run
end
# Run printer
#
# @return [self]
#
# @api private
#
abstract_method :run
private
# Return status color
#
# @return [Color]
#
# @api private
#
def status_color
success? ? Color::GREEN : Color::RED
end
# Visit a collection of objects
#
# @return [Class::Printer] printer
# @return [Enumerable<Object>] collection
#
# @return [undefined]
#
# @api private
#
def visit_collection(printer, collection)
collection.each do |object|
visit(printer, object)
end
end
# Visit object
#
# @param [Class::Printer] printer
# @param [Object] object
#
# @return [undefined]
#
# @api private
#
def visit(printer, object)
printer.run(output, object)
end
# Print an info line to output
#
# @return [undefined]
#
# @api private
#
def info(string, *arguments)
puts(format(string, *arguments))
end
# Print a status line to output
#
# @return [undefined]
#
# @api private
#
def status(string, *arguments)
puts(colorize(status_color, format(string, *arguments)))
end
# Print a line to output
#
# @return [undefined]
#
# @api private
#
def puts(string)
output.puts(string)
end
# Colorize message
#
# @param [Color] color
# @param [String] message
#
# @api private
#
# @return [String]
# if color is enabled
# unmodified message otherwise
#
def colorize(color, message)
color = Color::NONE unless tty?
color.format(message)
end
# Test if output is a tty
#
# @return [Boolean]
#
# @api private
#
def tty?
output.tty?
end
# Test if output can be colored
#
# @return [Boolean]
#
# @api private
#
alias_method :color?, :tty?
# Printer for runner status
class Status < self
delegate(:active_jobs, :payload)
# Print progress for collector
#
# @return [self]
#
# @api private
#
def run
visit(EnvProgress, payload)
info('Active subjects: %d', active_subject_results.length)
visit_collection(SubjectProgress, active_subject_results)
job_status
self
end
private
# Print worker status
#
# @return [undefined]
#
# @api private
#
def job_status
return if active_jobs.empty?
info('Active Jobs:')
active_jobs.sort_by(&:index).each do |job|
info('%d: %s', job.index, job.payload.identification)
end
end
# Return active subject results
#
# @return [Array<Result::Subject>]
#
# @api private
#
def active_subject_results
active_mutation_jobs = active_jobs.select { |job| job.payload.kind_of?(Mutation) }
active_subjects = active_mutation_jobs.map(&:payload).flat_map(&:subject).to_set
payload.subject_results.select do |subject_result|
active_subjects.include?(subject_result.subject)
end
end
end # Status
# Progress printer for configuration
class Config < self
# Report configuration
#
# @param [Mutant::Config] config
#
# @return [self]
#
# @api private
#
# rubocop:disable AbcSize
#
def run
info 'Mutant configuration:'
info 'Matcher: %s', object.matcher.inspect
info 'Integration: %s', object.integration.name
info 'Expect Coverage: %0.2f%%', (object.expected_coverage * 100)
info 'Jobs: %d', object.jobs
info 'Includes: %s', object.includes.inspect
info 'Requires: %s', object.requires.inspect
self
end
end # Config
# Env progress printer
class EnvProgress < self
delegate(
:coverage,
:amount_subjects,
:amount_mutations,
:amount_mutations_alive,
:amount_mutations_killed,
:runtime,
:killtime,
:overhead,
:env
)
# Run printer
#
# @return [self]
#
# @api private
#
# rubocop:disable MethodLength
#
def run
visit(Config, env.config)
info 'Subjects: %s', amount_subjects
info 'Mutations: %s', amount_mutations
info 'Kills: %s', amount_mutations_killed
info 'Alive: %s', amount_mutations_alive
info 'Runtime: %0.2fs', runtime
info 'Killtime: %0.2fs', killtime
info 'Overhead: %0.2f%%', overhead_percent
status 'Coverage: %0.2f%%', coverage_percent
status 'Expected: %0.2f%%', (env.config.expected_coverage * 100)
self
end
private
# Return coverage percent
#
# @return [Float]
#
# @api private
#
def coverage_percent
coverage * 100
end
# Return overhead percent
#
# @return [Float]
#
# @api private
#
def overhead_percent
(overhead / killtime) * 100
end
end # EnvProgress
# Full env result reporter
class EnvResult < self
delegate(:failed_subject_results)
# Run printer
#
# @return [self]
#
# @api private
#
def run
visit_collection(SubjectResult, failed_subject_results)
visit(EnvProgress, object)
self
end
end # EnvResult
# Subject report printer
class SubjectResult < self
delegate :subject, :failed_mutations, :tests
# Run report printer
#
# @return [self]
#
# @api private
#
def run
status(subject.identification)
tests.each do |test|
puts("- #{test.identification}")
end
visit_collection(MutationResult, object.alive_mutation_results)
self
end
end # Subject
# Printer for mutation progress results
class MutationProgressResult < self
SUCCESS = '.'.freeze
FAILURE = 'F'.freeze
# Run printer
#
# @return [self]
#
# @api private
#
def run
char(success? ? SUCCESS : FAILURE)
end
private
# Write colorized char
#
# @param [String] char
#
# @return [undefined]
#
# @api private
#
def char(char)
output.write(colorize(status_color, char))
end
end # MutationProgressResult
# Reporter for progressive output format on scheduler Status objects
class StatusProgressive < self
FORMAT = '(%02d/%02d) %3d%% - killtime: %0.02fs runtime: %0.02fs overhead: %0.02fs'.freeze
delegate(
:coverage,
:runtime,
:amount_mutations_killed,
:amount_mutations,
:amount_mutation_results,
:killtime,
:overhead
)
# Run printer
#
# @return [self]
#
# @api private
#
def run
status(
FORMAT,
amount_mutations_killed,
amount_mutations,
coverage * 100,
killtime,
runtime,
overhead
)
self
end
private
# Return object being printed
#
# @return [Result::Env]
#
# @api private
#
def object
super().payload
end
end
# Reporter for subject progress
class SubjectProgress < self
FORMAT = '(%02d/%02d) %3d%% - killtime: %0.02fs runtime: %0.02fs overhead: %0.02fs'.freeze
delegate(
:tests,
:subject,
:coverage,
:runtime,
:amount_mutations_killed,
:amount_mutations,
:amount_mutation_results,
:killtime,
:overhead
)
# Run printer
#
# @return [self]
#
# @api private
#
def run
puts("#{subject.identification} mutations: #{amount_mutations}")
print_tests
print_mutation_results
print_progress_bar_finish
print_stats
self
end
private
# Print stats
#
# @return [undefined]
#
# @api private
#
def print_stats
status(
FORMAT,
amount_mutations_killed,
amount_mutations,
coverage * 100,
killtime,
runtime,
overhead
)
end
# Print tests
#
# @return [undefined]
#
# @api private
#
def print_tests
tests.each do |test|
puts "- #{test.identification}"
end
end
# Print progress bar finish
#
# @return [undefined]
#
# @api private
#
def print_progress_bar_finish
puts(NL) unless amount_mutation_results.zero?
end
# Print mutation results
#
# @return [undefined]
#
# @api private
#
def print_mutation_results
visit_collection(MutationProgressResult, object.mutation_results)
end
end # Subject
# Reporter for mutation results
class MutationResult < self
delegate :mutation, :test_result
DIFF_ERROR_MESSAGE =
'BUG: Mutation NOT resulted in exactly one diff hunk. Please report a reproduction!'.freeze
MAP = {
Mutant::Mutation::Evil => :evil_details,
Mutant::Mutation::Neutral => :neutral_details,
Mutant::Mutation::Noop => :noop_details
}.freeze
NEUTRAL_MESSAGE =
"--- Neutral failure ---\n" \
"Original code was inserted unmutated. And the test did NOT PASS.\n" \
"Your tests do not pass initially or you found a bug in mutant / unparser.\n" \
"Subject AST:\n" \
"%s\n" \
"Unparsed Source:\n" \
"%s\n" \
"Test Result:\n".freeze
NOOP_MESSAGE =
"---- Noop failure -----\n" \
"No code was inserted. And the test did NOT PASS.\n" \
"This is typically a problem of your specs not passing unmutated.\n" \
"Test Result:\n".freeze
FOOTER = '-----------------------'.freeze
# Run report printer
#
# @return [self]
#
# @api private
#
def run
puts(mutation.identification)
print_details
puts(FOOTER)
self
end
private
# Return details
#
# @return [undefined]
#
# @api private
#
def print_details
send(MAP.fetch(mutation.class))
end
# Return evil details
#
# @return [String]
#
# @api private
#
def evil_details
original, current = mutation.original_source, mutation.source
diff = Mutant::Diff.build(original, current)
diff = color? ? diff.colorized_diff : diff.diff
puts(diff || ['Original source:', original, 'Mutated Source:', current, DIFF_ERROR_MESSAGE])
end
# Noop details
#
# @return [String]
#
# @api private
#
def noop_details
info(NOOP_MESSAGE)
visit_test_result
end
# Neutral details
#
# @return [String]
#
# @api private
#
def neutral_details
info(NEUTRAL_MESSAGE, mutation.subject.node.inspect, mutation.source)
visit_test_result
end
# Visit failed test results
#
# @return [undefined]
#
# @api private
#
def visit_test_result
visit(TestResult, test_result)
end
end # MutationResult
# Test result reporter
class TestResult < self
delegate :tests, :runtime
# Run test result reporter
#
# @return [self]
#
# @api private
#
def run
status('- %d @ runtime: %s', tests.length, runtime)
tests.each do |test|
puts(" - #{test.identification}")
end
puts('Test Output:')
puts(object.output)
end
# Test if test result is successful
#
# Only used to determine color.
#
# @return [false]
#
# @api private
#
def success?
false
end
end # TestResult
end # Printer
end # CLI
end # Reporter
end # Mutant
|
module NeqaHighCharts
class HighChart
attr_accessor :chart, :title, :subtitle, :xAxis, :yAxis, :tooltip, :legend, :labels, :plotOptions, :series, :scrollbar, :credits, :colors
def initialize
self.tap do |high_chart|
# high_chart.defaults_options
yield high_chart if block_given?
end
end
def defaults_options
self.title = {text: nil}
self.legend = {layout: "vertical", labels: {}}
self.xAxis = {}
self.yAxis = {title: {text: nil}, labels: {}}
self.tooltip = {enabled: true}
self.subtitle = {}
end
end
end
add all highcharts options
module NeqaHighCharts
class HighChart
attr_accessor :chart, :colors, :credits, :drilldown, :exporting, :labels, :legend, :loading, :navigation, :noData, :pane, :plotOptions, :series, :subtitle, :title, :tooltip, :xAxis, :yAxis, :scrollbar
def initialize
self.tap do |high_chart|
# high_chart.defaults_options
yield high_chart if block_given?
end
end
def defaults_options
self.title = {text: nil}
self.legend = {layout: "vertical", labels: {}}
self.xAxis = {}
self.yAxis = {title: {text: nil}, labels: {}}
self.tooltip = {enabled: true}
self.subtitle = {}
end
end
end
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = "extract"
s.version = "0.1.2"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Mike Harris"]
s.date = "2013-03-25"
s.description = "extract"
s.email = "mharris717@gmail.com"
s.extra_rdoc_files = [
"LICENSE.txt",
"README.rdoc"
]
s.files = [
".document",
".lre",
".rspec",
"Gemfile",
"Gemfile.lock",
"Guardfile",
"LICENSE.txt",
"README.rdoc",
"Rakefile",
"VERSION",
"extract.gemspec",
"lib/extract.rb",
"lib/extract/cell.rb",
"lib/extract/excel_formulas.rb",
"lib/extract/export/ddl.rb",
"lib/extract/export/table.rb",
"lib/extract/formula.treetop",
"lib/extract/inline_def.rb",
"lib/extract/math.treetop",
"lib/extract/math_calc.rb",
"lib/extract/parser.rb",
"lib/extract/persist/sheet.rb",
"lib/extract/sheet.rb",
"lib/extract/sheet_comp.rb",
"lib/extract/sheet_definition.rb",
"lib/extract/table.rb",
"lib/extract/tables.rb",
"lib/extract/tree/base.rb",
"lib/extract/tree/cell.rb",
"lib/extract/tree/cond_exp.rb",
"lib/extract/tree/formula.rb",
"lib/extract/tree/formula_args.rb",
"lib/extract/tree/math.rb",
"lib/extract/tree/num.rb",
"lib/extract/tree/operator.rb",
"lib/extract/tree/range.rb",
"lib/extract/tree/string.rb",
"samples/baseball.xlsx",
"samples/div.xlsx",
"samples/salescalls.xlsx",
"spec/cell_spec.rb",
"spec/config/mongoid.yml",
"spec/deps_spec.rb",
"spec/export/table_spec.rb",
"spec/extract_spec.rb",
"spec/inline_def_spec.rb",
"spec/math_spec.rb",
"spec/parser_spec.rb",
"spec/persist_spec.rb",
"spec/sheet_definition_spec.rb",
"spec/sheet_spec.rb",
"spec/spec_helper.rb",
"spec/table_spec.rb",
"vol/excel_test.rb",
"vol/parse_test.rb",
"vol/scratch.rb",
"vol/web.rb",
"vol/yaml_test.rb",
"web/file.tmp",
"web/file.xlsx",
"web/main.rb",
"web/mongoid.yml",
"web/views/index.haml",
"web/views/upload.haml"
]
s.homepage = "http://github.com/mharris717/extract"
s.licenses = ["MIT"]
s.require_paths = ["lib"]
s.rubygems_version = "1.8.23"
s.summary = "extract"
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<guard>, [">= 0"])
s.add_runtime_dependency(%q<guard-rspec>, [">= 0"])
s.add_runtime_dependency(%q<guard-spork>, [">= 0"])
s.add_runtime_dependency(%q<mharris_ext>, [">= 0"])
s.add_runtime_dependency(%q<treetop>, [">= 0"])
s.add_runtime_dependency(%q<lre>, [">= 0"])
s.add_runtime_dependency(%q<roo>, [">= 0"])
s.add_runtime_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_runtime_dependency(%q<mongoid>, [">= 0"])
s.add_development_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
s.add_development_dependency(%q<bundler>, ["~> 1.2"])
s.add_development_dependency(%q<jeweler>, ["~> 1.8.4"])
else
s.add_dependency(%q<guard>, [">= 0"])
s.add_dependency(%q<guard-rspec>, [">= 0"])
s.add_dependency(%q<guard-spork>, [">= 0"])
s.add_dependency(%q<mharris_ext>, [">= 0"])
s.add_dependency(%q<treetop>, [">= 0"])
s.add_dependency(%q<lre>, [">= 0"])
s.add_dependency(%q<roo>, [">= 0"])
s.add_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_dependency(%q<mongoid>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.2"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
end
else
s.add_dependency(%q<guard>, [">= 0"])
s.add_dependency(%q<guard-rspec>, [">= 0"])
s.add_dependency(%q<guard-spork>, [">= 0"])
s.add_dependency(%q<mharris_ext>, [">= 0"])
s.add_dependency(%q<treetop>, [">= 0"])
s.add_dependency(%q<lre>, [">= 0"])
s.add_dependency(%q<roo>, [">= 0"])
s.add_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_dependency(%q<mongoid>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.2"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
end
end
gemspec
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = "extract"
s.version = "0.1.2"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Mike Harris"]
s.date = "2013-03-25"
s.description = "extract"
s.email = "mharris717@gmail.com"
s.extra_rdoc_files = [
"LICENSE.txt",
"README.rdoc"
]
s.files = [
".document",
".lre",
".rspec",
"Gemfile",
"Gemfile.lock",
"Guardfile",
"LICENSE.txt",
"README.rdoc",
"Rakefile",
"VERSION",
"extract.gemspec",
"lib/extract.rb",
"lib/extract/cell.rb",
"lib/extract/excel_formulas.rb",
"lib/extract/export/ddl.rb",
"lib/extract/export/table.rb",
"lib/extract/formula.treetop",
"lib/extract/inline_def.rb",
"lib/extract/math.treetop",
"lib/extract/math_calc.rb",
"lib/extract/parser.rb",
"lib/extract/persist/sheet.rb",
"lib/extract/sheet.rb",
"lib/extract/sheet_comp.rb",
"lib/extract/sheet_definition.rb",
"lib/extract/table.rb",
"lib/extract/tables.rb",
"lib/extract/tree/base.rb",
"lib/extract/tree/cell.rb",
"lib/extract/tree/cond_exp.rb",
"lib/extract/tree/formula.rb",
"lib/extract/tree/formula_args.rb",
"lib/extract/tree/math.rb",
"lib/extract/tree/num.rb",
"lib/extract/tree/operator.rb",
"lib/extract/tree/range.rb",
"lib/extract/tree/string.rb",
"samples/baseball.xlsx",
"samples/div.xlsx",
"samples/salescalls.xlsx",
"spec/cell_spec.rb",
"spec/config/mongoid.yml",
"spec/deps_spec.rb",
"spec/export/table_spec.rb",
"spec/extract_spec.rb",
"spec/inline_def_spec.rb",
"spec/math_spec.rb",
"spec/parser_spec.rb",
"spec/persist_spec.rb",
"spec/sheet_definition_spec.rb",
"spec/sheet_spec.rb",
"spec/spec_helper.rb",
"spec/table_spec.rb",
"vol/excel_test.rb",
"vol/parse_test.rb",
"vol/scratch.rb",
"vol/web.rb",
"vol/yaml_test.rb",
"web/file.tmp",
"web/file.xlsx",
"web/main.rb",
"web/mongoid.yml",
"web/views/index.haml",
"web/views/upload.haml"
]
s.homepage = "http://github.com/mharris717/extract"
s.licenses = ["MIT"]
s.require_paths = ["lib"]
s.rubygems_version = "1.8.23"
s.summary = "extract"
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<mharris_ext>, [">= 0"])
s.add_runtime_dependency(%q<treetop>, [">= 0"])
s.add_runtime_dependency(%q<roo>, [">= 0"])
s.add_runtime_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_runtime_dependency(%q<mongoid>, [">= 0"])
s.add_development_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
s.add_development_dependency(%q<bundler>, ["~> 1.2"])
s.add_development_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_development_dependency(%q<guard>, [">= 0"])
s.add_development_dependency(%q<guard-rspec>, [">= 0"])
s.add_development_dependency(%q<guard-spork>, [">= 0"])
s.add_development_dependency(%q<lre>, [">= 0"])
else
s.add_dependency(%q<mharris_ext>, [">= 0"])
s.add_dependency(%q<treetop>, [">= 0"])
s.add_dependency(%q<roo>, [">= 0"])
s.add_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_dependency(%q<mongoid>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.2"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<guard>, [">= 0"])
s.add_dependency(%q<guard-rspec>, [">= 0"])
s.add_dependency(%q<guard-spork>, [">= 0"])
s.add_dependency(%q<lre>, [">= 0"])
end
else
s.add_dependency(%q<mharris_ext>, [">= 0"])
s.add_dependency(%q<treetop>, [">= 0"])
s.add_dependency(%q<roo>, [">= 0"])
s.add_dependency(%q<rb-fsevent>, ["~> 0.9.1"])
s.add_dependency(%q<mongoid>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.8.0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.2"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<guard>, [">= 0"])
s.add_dependency(%q<guard-rspec>, [">= 0"])
s.add_dependency(%q<guard-spork>, [">= 0"])
s.add_dependency(%q<lre>, [">= 0"])
end
end
|
module ObjectPatch
module Operations
class Add
def initialize(patch_hash)
@path = ObjectPatch::Pointer.decode(patch_hash.fetch("path"))
@value = patch_hash.fetch("value", nil)
end
def apply(source_hash)
recursive_set(source_hash, @path, @value)
end
def recursive_set(obj, path, new_value)
return obj if path.nil? || path.empty?
# Grab our key off the stack
key = path.shift
# Ensure we have an actual object to set the value on
key_type = (key == "-" || key.is_a?(Fixnum)) ? Array : Hash
if key == "-"
# Hyphen is a special case where we append to the array
obj = key_type.new if obj.nil?
obj.push(recursive_set(obj, path, new_value))
else
obj = key_type.new if obj.nil?
recursion_result = recursive_set(obj[key], path, new_value)
obj[key] = recursion_result if key_type == Hash
obj.insert(key, recursion_result) if key_type == Array
end
obj
end
end
end
end
Refactor of add 55/107 failing
module ObjectPatch
module Operations
class Add
def initialize(patch_hash)
@path = ObjectPatch::Pointer.decode(patch_hash.fetch("path"))
@value = patch_hash.fetch("value", nil)
end
def apply(source_hash)
recursive_set(source_hash, @path, @value)
end
def recursive_set(obj, path, new_value)
raise ArgumentError unless key = path.shift
key_type = obj.class
key = key.to_i if key_type == Array && key != "-"
raise ArgumentError if key_type == Array && key == "-" || obj.size >= key
raise ArgumentError if key_type == Hash && !obj.keys.include?(key)
if path.empty?
if key == "-"
obj.push(new_value)
else
obj[key] = new_value
end
else
recursive_set(obj[key], path, test_value)
end
# Ensure we have an actual object to set the value on
key_type = (key == "-" || key.is_a?(Fixnum)) ? Array : Hash
if key == "-"
# Hyphen is a special case where we append to the array
obj = key_type.new if obj.nil?
obj.push(recursive_set(obj, path, new_value))
else
obj = key_type.new if obj.nil?
recursion_result = recursive_set(obj[key], path, new_value)
obj[key] = recursion_result if key_type == Hash
obj.insert(key, recursion_result) if key_type == Array
end
obj
end
end
end
end
|
module Octokit
class Client
module Repositories
# Legacy repository search
#
# @see http://developer.github.com/v3/search/#search-repositories
# @param q [String] Search keyword
# @return [Array<Hashie::Mash>] List of repositories found
def search_repositories(q, options={})
get("legacy/repos/search/#{q}", options, 3)['repositories']
end
alias :search_repos :search_repositories
# Get a single repository
#
# @see http://developer.github.com/v3/repos/#get
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Repository information
def repository(repo, options={})
get "repos/#{Repository.new repo}", options, 3
end
alias :repo :repository
# Edit a repository
#
# @see http://developer.github.com/v3/repos/#edit
# @param repo [String, Hash, Repository] A GitHub repository
# @param options [Hash] Repository information to update
# @option options [String] :name Name of the repo
# @option options [String] :description Description of the repo
# @option options [String] :homepage Home page of the repo
# @option options [String] :private `true` makes the repository private, and `false` makes it public.
# @option options [String] :has_issues `true` enables issues for this repo, `false` disables issues.
# @option options [String] :has_wiki `true` enables wiki for this repo, `false` disables wiki.
# @option options [String] :has_downloads `true` enables downloads for this repo, `false` disables downloads.
# @return [Hashie::Mash] Repository information
def edit_repository(repo, options={})
patch "repos/#{Repository.new repo}", options, 3
end
alias :edit :edit_repository
alias :update_repository :edit_repository
alias :update :edit_repository
# List repositories
#
# If username is not supplied, repositories for the current
# authenticated user are returned
#
# @see http://developer.github.com/v3/repos/#list-your-repositories
# @param username [String] Optional username for which to list repos
# @return [Array<Hashie::Mash>] List of repositories
def repositories(username=nil, options={})
if username.nil?
get 'user/repos', options, 3
else
get "users/#{username}/repos", options, 3
end
end
alias :list_repositories :repositories
alias :list_repos :repositories
alias :repos :repositories
# Star a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully starred
def star(repo, options={})
begin
put "user/starred/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Unstar a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully unstarred
def unstar(repo, options={})
begin
delete "user/starred/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Watch a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully watched
# @deprecated Use #star instead
def watch(repo, options={})
begin
put "user/watched/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Unwatch a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully unwatched
# @deprecated Use #unstar instead
def unwatch(repo, options={})
begin
delete "user/watched/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Fork a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Repository info for the new fork
def fork(repo, options={})
post "repos/#{Repository.new repo}/forks", options, 3
end
# Create a repository for a user or organization
#
# @param name [String] Name of the new repo
# @option options [String] :description Description of the repo
# @option options [String] :homepage Home page of the repo
# @option options [String] :private `true` makes the repository private, and `false` makes it public.
# @option options [String] :has_issues `true` enables issues for this repo, `false` disables issues.
# @option options [String] :has_wiki `true` enables wiki for this repo, `false` disables wiki.
# @option options [String] :has_downloads `true` enables downloads for this repo, `false` disables downloads.
# @option options [String] :organization Short name for the org under which to create the repo.
# @option options [Integer] :team_id The id of the team that will be granted access to this repository. This is only valid when creating a repo in an organization.
# @option options [Boolean] :auto_init `true` to create an initial commit with empty README. Default is `false`.
# @option options [String] :gitignore_template Desired language or platform .gitignore template to apply. Ignored if auto_init parameter is not provided.
# @return [Hashie::Mash] Repository info for the new repository
# @see http://developer.github.com/v3/repos/#create
def create_repository(name, options={})
organization = options.delete :organization
options.merge! :name => name
if organization.nil?
post 'user/repos', options, 3
else
post "orgs/#{organization}/repos", options, 3
end
end
alias :create_repo :create_repository
alias :create :create_repository
# Delete repository
#
# Note: If OAuth is used, 'delete_repo' scope is required
#
# @see http://developer.github.com/v3/repos/#delete-a-repository
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if repository was deleted
def delete_repository(repo, options={})
begin
delete "repos/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
alias :delete_repo :delete_repository
# Hide a public repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Updated repository info
def set_private(repo, options={})
# GitHub Api for setting private updated to use private attr, rather than public
update_repository repo, options.merge({ :private => true })
end
# Unhide a private repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Updated repository info
def set_public(repo, options={})
# GitHub Api for setting private updated to use private attr, rather than public
update_repository repo, options.merge({ :private => false })
end
# Get deploy keys on a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Array<Hashie::Mash>] Array of hashes representing deploy keys.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#get
# @example
# @client.deploy_keys('pengwynn/octokit')
# @example
# @client.list_deploy_keys('pengwynn/octokit')
def deploy_keys(repo, options={})
get "repos/#{Repository.new repo}/keys", options, 3
end
alias :list_deploy_keys :deploy_keys
# Add deploy key to a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param title [String] Title reference for the deploy key.
# @param key [String] Public key.
# @return [Hashie::Mash] Hash representing newly added key.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#create
# @example
# @client.add_deploy_key('pengwynn/octokit', 'Staging server', 'ssh-rsa AAA...')
def add_deploy_key(repo, title, key, options={})
post "repos/#{Repository.new repo}/keys", options.merge(:title => title, :key => key), 3
end
# Remove deploy key from a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the deploy key to remove.
# @return [Boolean] True if key removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#delete
# @example
# @client.remove_deploy_key('pengwynn/octokit', 100000)
def remove_deploy_key(repo, id, options={})
delete "repos/#{Repository.new repo}/keys/#{id}", options, 3
end
# List collaborators
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing collaborating users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#list
# @example
# Octokit.collaborators('pengwynn/octokit')
# @example
# Octokit.collabs('pengwynn/octokit')
# @example
# @client.collabs('pengwynn/octokit')
def collaborators(repo, options={})
get "repos/#{Repository.new repo}/collaborators", options, 3
end
alias :collabs :collaborators
# Add collaborator to repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param collaborator [String] Collaborator GitHub username to add.
# @return [Boolean] True if collaborator added, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#add-collaborator
# @example
# @client.add_collaborator('pengwynn/octokit', 'holman')
# @example
# @client.add_collab('pengwynn/octokit', 'holman')
def add_collaborator(repo, collaborator, options={})
put "repos/#{Repository.new repo}/collaborators/#{collaborator}", options, 3
end
alias :add_collab :add_collaborator
# Remove collaborator from repo.
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param collaborator [String] Collaborator GitHub username to remove.
# @return [Boolean] True if collaborator removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#remove-collaborator
# @example
# @client.remove_collaborator('pengwynn/octokit', 'holman')
# @example
# @client.remove_collab('pengwynn/octokit', 'holman')
def remove_collaborator(repo, collaborator, options={})
delete "repos/#{Repository.new repo}/collaborators/#{collaborator}", options, 3
end
alias :remove_collab :remove_collaborator
# List teams for a repo
#
# Requires authenticated client that is an owner or collaborator of the repo.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing teams.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-teams
# @example
# @client.repository_teams('octokit/pengwynn')
# @example
# @client.repo_teams('octokit/pengwynn')
# @example
# @client.teams('octokit/pengwynn')
def repository_teams(repo, options={})
get "repos/#{Repository.new repo}/teams", options, 3
end
alias :repo_teams :repository_teams
alias :teams :repository_teams
# List contributors to a repo
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param anon [Boolean] Set true to include annonymous contributors.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-contributors
# @example
# Octokit.contributors('pengwynn/octokit', true)
# @example
# Octokit.contribs('pengwynn/octokit')
# @example
# @client.contribs('pengwynn/octokit')
def contributors(repo, anon=false, options={})
get "repos/#{Repository.new repo}/contributors", options.merge(:anon => anon), 3
end
alias :contribs :contributors
# List stargazers of a repo
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/starring/#list-stargazers
# @example
# Octokit.stargazers('pengwynn/octokit')
# @example
# @client.stargazers('pengwynn/octokit')
def stargazers(repo, options={})
get "repos/#{Repository.new repo}/stargazers", options, 3
end
# @deprecated Use #stargazers instead
#
# List watchers of repo.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client::Repositories#stargazers
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/watching/#list-watchers
# @example
# Octokit.watchers('pengwynn/octokit')
# @example
# @client.watchers('pengwynn/octokit')
def watchers(repo, options={})
get "repos/#{Repository.new repo}/watchers", options, 3
end
# List forks
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing repos.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/forks/#list-forks
# @example
# Octokit.forks('pengwynn/octokit')
# @example
# Octokit.network('pengwynn/octokit')
# @example
# @client.forks('pengwynn/octokit')
def forks(repo, options={})
get "repos/#{Repository.new repo}/forks", options, 3
end
alias :network :forks
# List languages of code in the repo.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of Hashes representing languages.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-languages
# @example
# Octokit.langauges('pengwynn/octokit')
# @example
# @client.languages('pengwynn/octokit')
def languages(repo, options={})
get "repos/#{Repository.new repo}/languages", options, 3
end
# List tags
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing tags.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-tags
# @example
# Octokit.tags('pengwynn/octokit')
# @example
# @client.tags('pengwynn/octokit')
def tags(repo, options={})
get "repos/#{Repository.new repo}/tags", options, 3
end
# List branches
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing branches.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-branches
# @example
# Octokit.branches('pengwynn/octokit')
# @example
# @client.branches('pengwynn/octokit')
def branches(repo, options={})
get "repos/#{Repository.new repo}/branches", options, 3
end
# Get a single branch from a repository
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param branch [String] Branch name
# @return [Branch] The branch requested, if it exists
# @see http://developer.github.com/v3/repos/#get-branch
# @example Get branch 'master` from pengwynn/octokit
# Octokit.issue("pengwynn/octokit", "master")
def branch(repo, branch, options={})
get "repos/#{Repository.new repo}/branches/#{branch}", options, 3
end
alias :get_branch :branch
# List repo hooks
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing hooks.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#list
# @example
# @client.hooks('pengwynn/octokit')
def hooks(repo, options={})
get "repos/#{Repository.new repo}/hooks", options, 3
end
# Get single hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to get.
# @return [Hashie::Mash] Hash representing hook.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#get-single-hook
# @example
# @client.hook('pengwynn/octokit', 100000)
def hook(repo, id, options={})
get "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Create a hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param name [String] The name of the service that is being called. See
# {https://api.github.com/hooks Hooks} for the possible names.
# @param config [Hash] A Hash containing key/value pairs to provide
# settings for this hook. These settings vary between the services and
# are defined in the {https://github.com/github/github-services github-services} repo.
# @option options [Array<String>] :events ('["push"]') Determines what
# events the hook is triggered for.
# @option options [Boolean] :active Determines whether the hook is
# actually triggered on pushes.
# @see Octokit::Client
# @see https://api.github.com/hooks
# @see https://github.com/github/github-services
# @see http://developer.github.com/v3/repos/hooks/#create-a-hook
# @example
# @client.create_hook(
# 'pengwynn/octokit',
# 'web',
# {
# :url => 'http://something.com/webhook',
# :content_type => 'json'
# },
# {
# :events => ['push', 'pull_request'],
# :active => true
# }
# )
def create_hook(repo, name, config, options={})
options = {:name => name, :config => config, :events => ["push"], :active => true}.merge(options)
post "repos/#{Repository.new repo}/hooks", options, 3
end
# Edit a hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook being updated.
# @param name [String] The name of the service that is being called. See
# {https://api.github.com/hooks Hooks} for the possible names.
# @param config [Hash] A Hash containing key/value pairs to provide
# settings for this hook. These settings vary between the services and
# are defined in the {https://github.com/github/github-services github-services} repo.
# @option options [Array<String>] :events ('["push"]') Determines what
# events the hook is triggered for.
# @option options [Array<String>] :add_events Determines a list of events
# to be added to the list of events that the Hook triggers for.
# @option options [Array<String>] :remove_events Determines a list of events
# to be removed from the list of events that the Hook triggers for.
# @option options [Boolean] :active Determines whether the hook is
# actually triggered on pushes.
# @see Octokit::Client
# @see https://api.github.com/hooks
# @see https://github.com/github/github-services
# @see http://developer.github.com/v3/repos/hooks/#edit-a-hook
# @example
# @client.edit_hook(
# 'pengwynn/octokit',
# 'web',
# {
# :url => 'http://something.com/webhook',
# :content_type => 'json'
# },
# {
# :add_events => ['status'],
# :remove_events => ['pull_request'],
# :active => true
# }
# )
def edit_hook(repo, id, name, config, options={})
options = {:name => name, :config => config, :events => ["push"], :active => true}.merge(options)
patch "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Delete hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to remove.
# @return [Boolean] True if hook removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#delete-a-hook
# @example
# @client.remove_hook('pengwynn/octokit', 1000000)
def remove_hook(repo, id, options={})
delete "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Test hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to test.
# @return [nil]
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#test-a-hook
# @example
# @client.test_hook('pengwynn/octokit', 1000000)
def test_hook(repo, id, options={})
post "repos/#{Repository.new repo}/hooks/#{id}/test", options, 3
end
# Get all Issue Events for a given Repository
#
# @param repo [String, Repository, Hash] A GitHub repository
#
# @return [Array] Array of all Issue Events for this Repository
# @see http://developer.github.com/v3/issues/events/#list-events-for-a-repository
# @example Get all Issue Events for Octokit
# Octokit.repository_issue_events("pengwynn/octokit")
def repository_issue_events(repo, options={})
get "repos/#{Repository.new repo}/issues/events", options, 3
end
alias :repo_issue_events :repository_issue_events
# List users available for assigning to issues.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @returns [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/issues/assignees/#list-assignees
# @example
# Octokit.repository_assignees('pengwynn/octokit')
# @example
# Octokit.repo_assignees('pengwynn/octokit')
# @example
# @client.repository_assignees('pengwynn/octokit')
def repository_assignees(repo, options={})
get "repos/#{Repository.new repo}/assignees", options, 3
end
alias :repo_assignees :repository_assignees
end
end
end
Document default_branch parameter for repos
module Octokit
class Client
module Repositories
# Legacy repository search
#
# @see http://developer.github.com/v3/search/#search-repositories
# @param q [String] Search keyword
# @return [Array<Hashie::Mash>] List of repositories found
def search_repositories(q, options={})
get("legacy/repos/search/#{q}", options, 3)['repositories']
end
alias :search_repos :search_repositories
# Get a single repository
#
# @see http://developer.github.com/v3/repos/#get
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Repository information
def repository(repo, options={})
get "repos/#{Repository.new repo}", options, 3
end
alias :repo :repository
# Edit a repository
#
# @see http://developer.github.com/v3/repos/#edit
# @param repo [String, Hash, Repository] A GitHub repository
# @param options [Hash] Repository information to update
# @option options [String] :name Name of the repo
# @option options [String] :description Description of the repo
# @option options [String] :homepage Home page of the repo
# @option options [String] :private `true` makes the repository private, and `false` makes it public.
# @option options [String] :has_issues `true` enables issues for this repo, `false` disables issues.
# @option options [String] :has_wiki `true` enables wiki for this repo, `false` disables wiki.
# @option options [String] :has_downloads `true` enables downloads for this repo, `false` disables downloads.
# @option options [String] :default_branch Update the default branch for this repository.
# @return [Hashie::Mash] Repository information
def edit_repository(repo, options={})
patch "repos/#{Repository.new repo}", options, 3
end
alias :edit :edit_repository
alias :update_repository :edit_repository
alias :update :edit_repository
# List repositories
#
# If username is not supplied, repositories for the current
# authenticated user are returned
#
# @see http://developer.github.com/v3/repos/#list-your-repositories
# @param username [String] Optional username for which to list repos
# @return [Array<Hashie::Mash>] List of repositories
def repositories(username=nil, options={})
if username.nil?
get 'user/repos', options, 3
else
get "users/#{username}/repos", options, 3
end
end
alias :list_repositories :repositories
alias :list_repos :repositories
alias :repos :repositories
# Star a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully starred
def star(repo, options={})
begin
put "user/starred/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Unstar a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully unstarred
def unstar(repo, options={})
begin
delete "user/starred/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Watch a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully watched
# @deprecated Use #star instead
def watch(repo, options={})
begin
put "user/watched/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Unwatch a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if successfully unwatched
# @deprecated Use #unstar instead
def unwatch(repo, options={})
begin
delete "user/watched/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
# Fork a repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Repository info for the new fork
def fork(repo, options={})
post "repos/#{Repository.new repo}/forks", options, 3
end
# Create a repository for a user or organization
#
# @param name [String] Name of the new repo
# @option options [String] :description Description of the repo
# @option options [String] :homepage Home page of the repo
# @option options [String] :private `true` makes the repository private, and `false` makes it public.
# @option options [String] :has_issues `true` enables issues for this repo, `false` disables issues.
# @option options [String] :has_wiki `true` enables wiki for this repo, `false` disables wiki.
# @option options [String] :has_downloads `true` enables downloads for this repo, `false` disables downloads.
# @option options [String] :organization Short name for the org under which to create the repo.
# @option options [Integer] :team_id The id of the team that will be granted access to this repository. This is only valid when creating a repo in an organization.
# @option options [Boolean] :auto_init `true` to create an initial commit with empty README. Default is `false`.
# @option options [String] :gitignore_template Desired language or platform .gitignore template to apply. Ignored if auto_init parameter is not provided.
# @return [Hashie::Mash] Repository info for the new repository
# @see http://developer.github.com/v3/repos/#create
def create_repository(name, options={})
organization = options.delete :organization
options.merge! :name => name
if organization.nil?
post 'user/repos', options, 3
else
post "orgs/#{organization}/repos", options, 3
end
end
alias :create_repo :create_repository
alias :create :create_repository
# Delete repository
#
# Note: If OAuth is used, 'delete_repo' scope is required
#
# @see http://developer.github.com/v3/repos/#delete-a-repository
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Boolean] `true` if repository was deleted
def delete_repository(repo, options={})
begin
delete "repos/#{Repository.new repo}", options, 3
return true
rescue Octokit::NotFound
return false
end
end
alias :delete_repo :delete_repository
# Hide a public repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Updated repository info
def set_private(repo, options={})
# GitHub Api for setting private updated to use private attr, rather than public
update_repository repo, options.merge({ :private => true })
end
# Unhide a private repository
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Hashie::Mash] Updated repository info
def set_public(repo, options={})
# GitHub Api for setting private updated to use private attr, rather than public
update_repository repo, options.merge({ :private => false })
end
# Get deploy keys on a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository
# @return [Array<Hashie::Mash>] Array of hashes representing deploy keys.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#get
# @example
# @client.deploy_keys('pengwynn/octokit')
# @example
# @client.list_deploy_keys('pengwynn/octokit')
def deploy_keys(repo, options={})
get "repos/#{Repository.new repo}/keys", options, 3
end
alias :list_deploy_keys :deploy_keys
# Add deploy key to a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param title [String] Title reference for the deploy key.
# @param key [String] Public key.
# @return [Hashie::Mash] Hash representing newly added key.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#create
# @example
# @client.add_deploy_key('pengwynn/octokit', 'Staging server', 'ssh-rsa AAA...')
def add_deploy_key(repo, title, key, options={})
post "repos/#{Repository.new repo}/keys", options.merge(:title => title, :key => key), 3
end
# Remove deploy key from a repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the deploy key to remove.
# @return [Boolean] True if key removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/keys/#delete
# @example
# @client.remove_deploy_key('pengwynn/octokit', 100000)
def remove_deploy_key(repo, id, options={})
delete "repos/#{Repository.new repo}/keys/#{id}", options, 3
end
# List collaborators
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing collaborating users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#list
# @example
# Octokit.collaborators('pengwynn/octokit')
# @example
# Octokit.collabs('pengwynn/octokit')
# @example
# @client.collabs('pengwynn/octokit')
def collaborators(repo, options={})
get "repos/#{Repository.new repo}/collaborators", options, 3
end
alias :collabs :collaborators
# Add collaborator to repo
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param collaborator [String] Collaborator GitHub username to add.
# @return [Boolean] True if collaborator added, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#add-collaborator
# @example
# @client.add_collaborator('pengwynn/octokit', 'holman')
# @example
# @client.add_collab('pengwynn/octokit', 'holman')
def add_collaborator(repo, collaborator, options={})
put "repos/#{Repository.new repo}/collaborators/#{collaborator}", options, 3
end
alias :add_collab :add_collaborator
# Remove collaborator from repo.
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param collaborator [String] Collaborator GitHub username to remove.
# @return [Boolean] True if collaborator removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/collaborators/#remove-collaborator
# @example
# @client.remove_collaborator('pengwynn/octokit', 'holman')
# @example
# @client.remove_collab('pengwynn/octokit', 'holman')
def remove_collaborator(repo, collaborator, options={})
delete "repos/#{Repository.new repo}/collaborators/#{collaborator}", options, 3
end
alias :remove_collab :remove_collaborator
# List teams for a repo
#
# Requires authenticated client that is an owner or collaborator of the repo.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing teams.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-teams
# @example
# @client.repository_teams('octokit/pengwynn')
# @example
# @client.repo_teams('octokit/pengwynn')
# @example
# @client.teams('octokit/pengwynn')
def repository_teams(repo, options={})
get "repos/#{Repository.new repo}/teams", options, 3
end
alias :repo_teams :repository_teams
alias :teams :repository_teams
# List contributors to a repo
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param anon [Boolean] Set true to include annonymous contributors.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-contributors
# @example
# Octokit.contributors('pengwynn/octokit', true)
# @example
# Octokit.contribs('pengwynn/octokit')
# @example
# @client.contribs('pengwynn/octokit')
def contributors(repo, anon=false, options={})
get "repos/#{Repository.new repo}/contributors", options.merge(:anon => anon), 3
end
alias :contribs :contributors
# List stargazers of a repo
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/starring/#list-stargazers
# @example
# Octokit.stargazers('pengwynn/octokit')
# @example
# @client.stargazers('pengwynn/octokit')
def stargazers(repo, options={})
get "repos/#{Repository.new repo}/stargazers", options, 3
end
# @deprecated Use #stargazers instead
#
# List watchers of repo.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client::Repositories#stargazers
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/watching/#list-watchers
# @example
# Octokit.watchers('pengwynn/octokit')
# @example
# @client.watchers('pengwynn/octokit')
def watchers(repo, options={})
get "repos/#{Repository.new repo}/watchers", options, 3
end
# List forks
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing repos.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/forks/#list-forks
# @example
# Octokit.forks('pengwynn/octokit')
# @example
# Octokit.network('pengwynn/octokit')
# @example
# @client.forks('pengwynn/octokit')
def forks(repo, options={})
get "repos/#{Repository.new repo}/forks", options, 3
end
alias :network :forks
# List languages of code in the repo.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of Hashes representing languages.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-languages
# @example
# Octokit.langauges('pengwynn/octokit')
# @example
# @client.languages('pengwynn/octokit')
def languages(repo, options={})
get "repos/#{Repository.new repo}/languages", options, 3
end
# List tags
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing tags.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-tags
# @example
# Octokit.tags('pengwynn/octokit')
# @example
# @client.tags('pengwynn/octokit')
def tags(repo, options={})
get "repos/#{Repository.new repo}/tags", options, 3
end
# List branches
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing branches.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/#list-branches
# @example
# Octokit.branches('pengwynn/octokit')
# @example
# @client.branches('pengwynn/octokit')
def branches(repo, options={})
get "repos/#{Repository.new repo}/branches", options, 3
end
# Get a single branch from a repository
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param branch [String] Branch name
# @return [Branch] The branch requested, if it exists
# @see http://developer.github.com/v3/repos/#get-branch
# @example Get branch 'master` from pengwynn/octokit
# Octokit.issue("pengwynn/octokit", "master")
def branch(repo, branch, options={})
get "repos/#{Repository.new repo}/branches/#{branch}", options, 3
end
alias :get_branch :branch
# List repo hooks
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @return [Array<Hashie::Mash>] Array of hashes representing hooks.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#list
# @example
# @client.hooks('pengwynn/octokit')
def hooks(repo, options={})
get "repos/#{Repository.new repo}/hooks", options, 3
end
# Get single hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to get.
# @return [Hashie::Mash] Hash representing hook.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#get-single-hook
# @example
# @client.hook('pengwynn/octokit', 100000)
def hook(repo, id, options={})
get "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Create a hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param name [String] The name of the service that is being called. See
# {https://api.github.com/hooks Hooks} for the possible names.
# @param config [Hash] A Hash containing key/value pairs to provide
# settings for this hook. These settings vary between the services and
# are defined in the {https://github.com/github/github-services github-services} repo.
# @option options [Array<String>] :events ('["push"]') Determines what
# events the hook is triggered for.
# @option options [Boolean] :active Determines whether the hook is
# actually triggered on pushes.
# @see Octokit::Client
# @see https://api.github.com/hooks
# @see https://github.com/github/github-services
# @see http://developer.github.com/v3/repos/hooks/#create-a-hook
# @example
# @client.create_hook(
# 'pengwynn/octokit',
# 'web',
# {
# :url => 'http://something.com/webhook',
# :content_type => 'json'
# },
# {
# :events => ['push', 'pull_request'],
# :active => true
# }
# )
def create_hook(repo, name, config, options={})
options = {:name => name, :config => config, :events => ["push"], :active => true}.merge(options)
post "repos/#{Repository.new repo}/hooks", options, 3
end
# Edit a hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook being updated.
# @param name [String] The name of the service that is being called. See
# {https://api.github.com/hooks Hooks} for the possible names.
# @param config [Hash] A Hash containing key/value pairs to provide
# settings for this hook. These settings vary between the services and
# are defined in the {https://github.com/github/github-services github-services} repo.
# @option options [Array<String>] :events ('["push"]') Determines what
# events the hook is triggered for.
# @option options [Array<String>] :add_events Determines a list of events
# to be added to the list of events that the Hook triggers for.
# @option options [Array<String>] :remove_events Determines a list of events
# to be removed from the list of events that the Hook triggers for.
# @option options [Boolean] :active Determines whether the hook is
# actually triggered on pushes.
# @see Octokit::Client
# @see https://api.github.com/hooks
# @see https://github.com/github/github-services
# @see http://developer.github.com/v3/repos/hooks/#edit-a-hook
# @example
# @client.edit_hook(
# 'pengwynn/octokit',
# 'web',
# {
# :url => 'http://something.com/webhook',
# :content_type => 'json'
# },
# {
# :add_events => ['status'],
# :remove_events => ['pull_request'],
# :active => true
# }
# )
def edit_hook(repo, id, name, config, options={})
options = {:name => name, :config => config, :events => ["push"], :active => true}.merge(options)
patch "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Delete hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to remove.
# @return [Boolean] True if hook removed, false otherwise.
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#delete-a-hook
# @example
# @client.remove_hook('pengwynn/octokit', 1000000)
def remove_hook(repo, id, options={})
delete "repos/#{Repository.new repo}/hooks/#{id}", options, 3
end
# Test hook
#
# Requires authenticated client.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @param id [Integer] Id of the hook to test.
# @return [nil]
# @see Octokit::Client
# @see http://developer.github.com/v3/repos/hooks/#test-a-hook
# @example
# @client.test_hook('pengwynn/octokit', 1000000)
def test_hook(repo, id, options={})
post "repos/#{Repository.new repo}/hooks/#{id}/test", options, 3
end
# Get all Issue Events for a given Repository
#
# @param repo [String, Repository, Hash] A GitHub repository
#
# @return [Array] Array of all Issue Events for this Repository
# @see http://developer.github.com/v3/issues/events/#list-events-for-a-repository
# @example Get all Issue Events for Octokit
# Octokit.repository_issue_events("pengwynn/octokit")
def repository_issue_events(repo, options={})
get "repos/#{Repository.new repo}/issues/events", options, 3
end
alias :repo_issue_events :repository_issue_events
# List users available for assigning to issues.
#
# Requires authenticated client for private repos.
#
# @param repo [String, Hash, Repository] A GitHub repository.
# @returns [Array<Hashie::Mash>] Array of hashes representing users.
# @see Octokit::Client
# @see http://developer.github.com/v3/issues/assignees/#list-assignees
# @example
# Octokit.repository_assignees('pengwynn/octokit')
# @example
# Octokit.repo_assignees('pengwynn/octokit')
# @example
# @client.repository_assignees('pengwynn/octokit')
def repository_assignees(repo, options={})
get "repos/#{Repository.new repo}/assignees", options, 3
end
alias :repo_assignees :repository_assignees
end
end
end
|
#
# Author:: Benjamin Black (<bb@opscode.com>)
# Copyright:: Copyright (c) 2009 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require_plugin "#{os}::virtualization"
unless virtualization.nil? || !(virtualization[:role].eql?("host"))
require 'libvirt'
virtconn = Libvirt::open("#{virtualization[:system]}:///system") # connect to default hypervisor
virtualization[:uri] = virtconn.uri
virtualization[:capabilities] = virtconn.capabilities
virtualization[:nodeinfo] = virtconn.nodeinfo
if virtconn.num_of_domains
virtualization[:domains] = Mash.new
virtconn.list_domains.each do |d|
virtualization[:domains][d] = virtconn.lookup_domain_by_id(d).info.attributes
virtualization[:domains][d]["xml_desc"] = virtconn.lookup_domain_by_id(d).xml_desc
end
end
if virtconn.num_of_networks
virtualization[:networks] = Mash.new
virtconn.list_networks.each do |n|
virtualization[:networks][n] = Mash.new
virtualization[:networks][n]["xml_desc"] = virtconn.lookup_network_by_name(n).xml_desc
virtualization[:networks][n]["bridge"] = virtconn.lookup_network_by_name(n).bridge_name
end
end
if virtconn.num_of_storage_pools
virtualization[:storage] = Mash.new
virtconn.list_storage_pools.each do |pool|
virtualization[:storage][pool] = Mash.new
virtualization[:storage][pool][:info] = virtconn.lookup_storage_pool_by_name(pool).info.attributes
virtualization[:storage][pool][:volumes] = Mash.new
virtconn.list_volumes.each {|v| virtualization[:storage][pool][:volumes][v] = virtconn.list_volume_by_name(pool).info.attributes}
end
end
virtconn.close
end
[OHAI-36] proper collection of node info
#
# Author:: Benjamin Black (<bb@opscode.com>)
# Copyright:: Copyright (c) 2009 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require_plugin "#{os}::virtualization"
unless virtualization.nil? || !(virtualization[:role].eql?("host"))
require 'libvirt'
virtconn = Libvirt::open("#{(virtualization[:system].eql?('kvm') ? 'qemu' : virtualization[:system])}:///system")
virtualization[:uri] = virtconn.uri
virtualization[:capabilities] = virtconn.capabilities
virtualization[:nodeinfo] = Mash.new
# why doesn't the NodeInfo object respond to attributes? argh.
['cores','cpus','memory','mhz','model','nodes','sockets','threads'].each {|a| virtualization[:nodeinfo][a] = virtconn.node_get_info.send(a)}
if virtconn.num_of_domains > 0
virtualization[:domains] = Mash.new
virtconn.list_domains.each do |d|
virtualization[:domains][d] = virtconn.lookup_domain_by_id(d).info.attributes
virtualization[:domains][d]["xml_desc"] = virtconn.lookup_domain_by_id(d).xml_desc
end
end
if virtconn.num_of_networks > 0
virtualization[:networks] = Mash.new
virtconn.list_networks.each do |n|
virtualization[:networks][n] = Mash.new
virtualization[:networks][n]["xml_desc"] = virtconn.lookup_network_by_name(n).xml_desc
virtualization[:networks][n]["bridge"] = virtconn.lookup_network_by_name(n).bridge_name
end
end
if virtconn.num_of_storage_pools > 0
virtualization[:storage] = Mash.new
virtconn.list_storage_pools.each do |pool|
virtualization[:storage][pool] = Mash.new
virtualization[:storage][pool][:info] = virtconn.lookup_storage_pool_by_name(pool).info.attributes
virtualization[:storage][pool][:volumes] = Mash.new
virtconn.list_volumes.each {|v| virtualization[:storage][pool][:volumes][v] = virtconn.list_volume_by_name(pool).info.attributes}
end
end
virtconn.close
end
|
require 'omniauth'
require 'uri'
require 'yaml'
require 'httparty'
module OmniAuth
module Strategies
#
# Authentication to CASPORT
#
# @example Basic Usage
#
# use OmniAuth::Strategies::Casport
#
# @example Full Options Usage
#
# use OmniAuth::Strategies::Casport, {
# :cas_server => 'http://cas.slkdemos.com/users/',
# :format => 'json', 'xml', 'html', etc. || Defaults to 'xml'
# :format_header => 'application/json', 'application/xml' || Defaults to 'application/xml'
# :ssl_ca_file => 'path/to/ca_file.crt',
# :pem_cert => '/path/to/cert.pem',
# :pem_cert_pass => 'keep it secret, keep it safe.',
# }
class Casport
include OmniAuth::Strategy
option :uid_field, 'dn'
option :setup, true
option :cas_server, 'http://default_setting_changeme.casport.dev'
option :ssl_ca_file, nil
option :pem_cert, 'default_path_changeme/path/to/cert.pem'
option :pem_cert_pass, nil
option :format_header, 'application/json'
option :format, 'json'
option :dn_header, 'HTTP_SSL_CLIENT_S_DN'
option :debug, nil
option :log_file, nil
option :fake_dn, nil
CASPORT_DEFAULTS = {
:dn => nil,
:full_name => nil,
:last_name => nil,
:uid => nil,
:first_name => "",
:display_name => "",
:title => "",
:email => "",
:employee_id => "",
:personal_title => "",
:telephone_number => "",
}
@user = {}
@user_uid = ""
def request_phase
if !$LOG && @options[:debug] && @options[:log_file]
require 'logger'
$LOG ||= Logger.new(@options[:log_file])
end
$LOG.debug "#request_phase IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
# Setup HTTParty
$LOG.debug "Setting up HTTParty" if $LOG
CasportHTTParty.setup_httparty @options
# Call to fill the user object
get_user
# Return response to the callback_url
$LOG.debug "#request_phase OUT" if $LOG
redirect callback_url
end
def auth_hash
$LOG.debug "#auth_hash IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
user_obj = get_user
$LOG.debug "#auth_hash OUT" if $LOG
OmniAuth::Utils.deep_merge(super, {
'uid' => user_obj[@options[:uid_field]],
'info' => {
'name' => user_obj['full_name'],
'email' => user_obj['email']
},
'extra' => {'user_hash' => user_obj}
})
end
# Query for the user against CASPORT, return as nil or parsed object
def get_user
$LOG.debug "#get_user IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
return if @user # no extra http calls
$LOG.debug "Must get user from CASPORT" if $LOG
#$LOG.debug @options[:fake_dn].nil?
if @user_uid.nil? or @user_uid.empty?
# Checking for DN
if request.env[@options[:dn_header]].nil? or request.env[@options[:dn_header]].empty? and @options[:fake_dn].nil?
# No clue what the DN or UID is...
$LOG.debug @options[:fake_dn]
$LOG.debug "#request_phase Error: No DN provided for UID in request.env[#{@options[:dn_header]}]" if $LOG
raise "#request_phase Error: No DN provided for UID"
else
# Set UID to DN
if !@options[:fake_dn].nil?
@user_uid=@options[:fake_dn]
else
@user_uid = request.env[@options[:dn_header]]
end
end
end
# Fix DN order (if we have a DN) for CASPORT to work properly
if @user_uid.include?('/') or @user_uid.include?(',')
# Convert '/' to ',' and split on ','
@user_uid = @user_uid.gsub('/',',').split(',').reject{|array| array.empty? }
# See if the DN is in the order CASPORT expects (and fix it if needed)
@user_uid = @user_uid.reverse if @user_uid.first.downcase.include? 'c='
# Join our array of DN elements back together with a comma as expected by CASPORT
@user_uid = @user_uid.join(',')
end
url = URI.escape("#{@options[:cas_server]}/#{@user_uid}")
puts "#get_user Requesting URI: #{url}"
$LOG.debug "#get_user Requesting URI: #{url}" if $LOG
response = CasportHTTParty.get(url)
if response.success?
$LOG.debug "#get_user Response: Success!" if $LOG
$LOG.debug "#get_user response contents: #{response}" if $LOG
$LOG.debug "#get_user OUT" if $LOG
@user = response.parsed_response
else
$LOG.error "#get_user Response: failure." if $LOG
@user = nil
end
end
end
#Helper class to setup HTTParty, as OmniAuth 1.0+ seems to conflict with HTTParty.
class CasportHTTParty
include HTTParty
def self.setup_httparty(options)
options[:format] ||= 'json'
options[:format_header] ||= 'application/json'
headers 'Accept' => options[:format_header]
headers 'Content-Type' => options[:format_header]
headers 'X-XSRF-UseProtection' => 'false' if options[:format_header]
if options[:ssl_ca_file]
ssl_ca_file options[:ssl_ca_file]
if options[:pem_cert_pass]
pem File.read(options[:pem_cert]), options[:pem_cert_pass]
else
pem File.read(options[:pem_cert])
end
end
end
end
end
end
ADVSVC-385 Correcting the default name returned to gitlab as returned by official govport
require 'omniauth'
require 'uri'
require 'yaml'
require 'httparty'
module OmniAuth
module Strategies
#
# Authentication to CASPORT
#
# @example Basic Usage
#
# use OmniAuth::Strategies::Casport
#
# @example Full Options Usage
#
# use OmniAuth::Strategies::Casport, {
# :cas_server => 'http://cas.slkdemos.com/users/',
# :format => 'json', 'xml', 'html', etc. || Defaults to 'xml'
# :format_header => 'application/json', 'application/xml' || Defaults to 'application/xml'
# :ssl_ca_file => 'path/to/ca_file.crt',
# :pem_cert => '/path/to/cert.pem',
# :pem_cert_pass => 'keep it secret, keep it safe.',
# }
class Casport
include OmniAuth::Strategy
option :uid_field, 'dn'
option :setup, true
option :cas_server, 'http://default_setting_changeme.casport.dev'
option :ssl_ca_file, nil
option :pem_cert, 'default_path_changeme/path/to/cert.pem'
option :pem_cert_pass, nil
option :format_header, 'application/json'
option :format, 'json'
option :dn_header, 'HTTP_SSL_CLIENT_S_DN'
option :debug, nil
option :log_file, nil
option :fake_dn, nil
CASPORT_DEFAULTS = {
:dn => nil,
:fullName => nil,
:lastName => nil,
:uid => nil,
:firstName => "",
:displayName => "",
:title => "",
:email => "",
:employee_id => "",
:personal_title => "",
:telephone_number => "",
}
@user = {}
@user_uid = ""
def request_phase
if !$LOG && @options[:debug] && @options[:log_file]
require 'logger'
$LOG ||= Logger.new(@options[:log_file])
end
$LOG.debug "#request_phase IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
# Setup HTTParty
$LOG.debug "Setting up HTTParty" if $LOG
CasportHTTParty.setup_httparty @options
# Call to fill the user object
get_user
# Return response to the callback_url
$LOG.debug "#request_phase OUT" if $LOG
redirect callback_url
end
def auth_hash
$LOG.debug "#auth_hash IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
user_obj = get_user
$LOG.debug "#auth_hash OUT" if $LOG
username = user_obj['firstName']+" "+user_obj['lastName']
OmniAuth::Utils.deep_merge(super, {
'uid' => user_obj[@options[:uid_field]],
'info' => {
'name' => username,
'email' => user_obj['email']
},
'extra' => {'user_hash' => user_obj}
})
end
# Query for the user against CASPORT, return as nil or parsed object
def get_user
$LOG.debug "#get_user IN, user_uid: '#{@user_uid}', reqenv: #{request.env[@options[:dn_header]]}" if $LOG
return if @user # no extra http calls
$LOG.debug "Must get user from CASPORT" if $LOG
#$LOG.debug @options[:fake_dn].nil?
if @user_uid.nil? or @user_uid.empty?
# Checking for DN
if request.env[@options[:dn_header]].nil? or request.env[@options[:dn_header]].empty? and @options[:fake_dn].nil?
# No clue what the DN or UID is...
$LOG.debug @options[:fake_dn]
$LOG.debug "#request_phase Error: No DN provided for UID in request.env[#{@options[:dn_header]}]" if $LOG
raise "#request_phase Error: No DN provided for UID"
else
# Set UID to DN
if !@options[:fake_dn].nil?
@user_uid=@options[:fake_dn]
else
@user_uid = request.env[@options[:dn_header]]
end
end
end
# Fix DN order (if we have a DN) for CASPORT to work properly
if @user_uid.include?('/') or @user_uid.include?(',')
# Convert '/' to ',' and split on ','
@user_uid = @user_uid.gsub('/',',').split(',').reject{|array| array.empty? }
# See if the DN is in the order CASPORT expects (and fix it if needed)
@user_uid = @user_uid.reverse if @user_uid.first.downcase.include? 'c='
# Join our array of DN elements back together with a comma as expected by CASPORT
@user_uid = @user_uid.join(',')
end
url = URI.escape("#{@options[:cas_server]}/#{@user_uid}")
puts "#get_user Requesting URI: #{url}"
$LOG.debug "#get_user Requesting URI: #{url}" if $LOG
response = CasportHTTParty.get(url)
if response.success?
$LOG.debug "#get_user Response: Success!" if $LOG
$LOG.debug "#get_user response contents: #{response}" if $LOG
$LOG.debug "#get_user OUT" if $LOG
@user = response.parsed_response
else
$LOG.error "#get_user Response: failure." if $LOG
@user = nil
end
end
end
#Helper class to setup HTTParty, as OmniAuth 1.0+ seems to conflict with HTTParty.
class CasportHTTParty
include HTTParty
def self.setup_httparty(options)
options[:format] ||= 'json'
options[:format_header] ||= 'application/json'
headers 'Accept' => options[:format_header]
headers 'Content-Type' => options[:format_header]
headers 'X-XSRF-UseProtection' => 'false' if options[:format_header]
if options[:ssl_ca_file]
ssl_ca_file options[:ssl_ca_file]
if options[:pem_cert_pass]
pem File.read(options[:pem_cert]), options[:pem_cert_pass]
else
pem File.read(options[:pem_cert])
end
end
end
end
end
end
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = "faceted"
s.version = "0.8.1"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Corey Ehmke", "Max Thom Stahl"]
s.date = "2012-09-27"
s.description = "Faceted provides set of tools, patterns, and modules for use in API implementations."
s.email = "corey@trunkclub.com"
s.extra_rdoc_files = [
"LICENSE.txt",
"README.md"
]
s.files = [
".rspec",
"Gemfile",
"Gemfile.lock",
"LICENSE.txt",
"README.md",
"Rakefile",
"VERSION",
"faceted.gemspec",
"lib/faceted.rb",
"lib/faceted/collector.rb",
"lib/faceted/controller.rb",
"lib/faceted/has_object.rb",
"lib/faceted/interface.rb",
"lib/faceted/model.rb",
"lib/faceted/presenter.rb",
"spec/collector_spec.rb",
"spec/presenter_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = "http://github.com/trunkclub/faceted"
s.licenses = ["MIT"]
s.require_paths = ["lib"]
s.rubygems_version = "1.8.24"
s.summary = "Faceted provides set of tools, patterns, and modules for use in API implementations."
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_runtime_dependency(%q<activemodel>, [">= 0"])
s.add_development_dependency(%q<shoulda>, [">= 0"])
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
s.add_development_dependency(%q<rspec>, [">= 0"])
s.add_development_dependency(%q<bundler>, [">= 0"])
s.add_development_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_development_dependency(%q<simplecov>, [">= 0"])
else
s.add_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_dependency(%q<activemodel>, [">= 0"])
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<rspec>, [">= 0"])
s.add_dependency(%q<bundler>, [">= 0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<simplecov>, [">= 0"])
end
else
s.add_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_dependency(%q<activemodel>, [">= 0"])
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<rspec>, [">= 0"])
s.add_dependency(%q<bundler>, [">= 0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<simplecov>, [">= 0"])
end
end
Regenerate gemspec for version 0.8.2
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = "faceted"
s.version = "0.8.2"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Corey Ehmke", "Max Thom Stahl"]
s.date = "2012-09-27"
s.description = "Faceted provides set of tools, patterns, and modules for use in API implementations."
s.email = "corey@trunkclub.com"
s.extra_rdoc_files = [
"LICENSE.txt",
"README.md"
]
s.files = [
".rspec",
"Gemfile",
"Gemfile.lock",
"LICENSE.txt",
"README.md",
"Rakefile",
"VERSION",
"faceted.gemspec",
"lib/faceted.rb",
"lib/faceted/collector.rb",
"lib/faceted/controller.rb",
"lib/faceted/has_object.rb",
"lib/faceted/interface.rb",
"lib/faceted/model.rb",
"lib/faceted/presenter.rb",
"spec/collector_spec.rb",
"spec/presenter_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = "http://github.com/trunkclub/faceted"
s.licenses = ["MIT"]
s.require_paths = ["lib"]
s.rubygems_version = "1.8.24"
s.summary = "Faceted provides set of tools, patterns, and modules for use in API implementations."
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_runtime_dependency(%q<activemodel>, [">= 0"])
s.add_development_dependency(%q<shoulda>, [">= 0"])
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
s.add_development_dependency(%q<rspec>, [">= 0"])
s.add_development_dependency(%q<bundler>, [">= 0"])
s.add_development_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_development_dependency(%q<simplecov>, [">= 0"])
else
s.add_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_dependency(%q<activemodel>, [">= 0"])
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<rspec>, [">= 0"])
s.add_dependency(%q<bundler>, [">= 0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<simplecov>, [">= 0"])
end
else
s.add_dependency(%q<activesupport>, [">= 2.3.5"])
s.add_dependency(%q<activemodel>, [">= 0"])
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<rspec>, [">= 0"])
s.add_dependency(%q<bundler>, [">= 0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.4"])
s.add_dependency(%q<simplecov>, [">= 0"])
end
end
|
require 'omniauth-oauth2'
require 'omniauth/swagger/oauth2_definition'
require 'omniauth/swagger/default_provider_lookup'
require 'omniauth/swagger/uid_options'
require 'diesel'
module OmniAuth
module Strategies
class Swagger < OmniAuth::Strategies::OAuth2
OPTION_UID = 'uid'.freeze
OPTION_SPECIFICATION = 'specification'.freeze
OPTION_SUBDOMAIN = 'subdomain'.freeze
option :providers, nil
option :provider_lookup, nil
option :provider_param, 'provider'
def setup_phase
load_definition
@definition.load_options(options)
super
end
def authorize_params
super.tap do |params|
passthru_params = @definition.authorize_params || []
if @definition.scopes != nil && @definition.scopes.any?
passthru_params << 'scope'
end
passthru_params.each do |v|
if request.params[v]
params[v.to_sym] = request.params[v]
end
end
end
end
def callback_url
url = super
url + (url.index('?') ? '&' : '?') + "#{options[:provider_param]}=#{provider_name}"
end
uid do
if uid_options.nil?
raise "Missing #{OPTION_UID} setting for provider '#{provider_name}'"
elsif uid_options.api?
uid_options.
api_value_path.
split('.').
reduce(raw_info) { |memo, key| memo[key] }.
to_s
elsif uid_options.access_token_param?
access_token.params[uid_options.param]
else
raise "Unsupported UID option: #{uid_options.inspect}"
end
end
extra do
{ "raw_info" => raw_info }
end
protected
def provider_name
@provider_name ||= request.params[options[:provider_param]].to_sym
end
def provider_options
@provider_options ||= provider_lookup.get(provider_name, env)
end
def provider_lookup
@provider_lookup ||= begin
if lookup_opt = options[:provider_lookup]
if lookup_opt.kind_of? Class
lookup_opt.new
else
lookup_opt
end
else
OmniAuth::Swagger::DefaultProviderLookup.new(options[:providers])
end
end
end
def uid_options
@uid_options ||= OmniAuth::Swagger::UIDOptions.from_options(provider_options[OPTION_UID])
end
def raw_info
if uid_options
api_options = {@definition.oauth2_key => {token: access_token.token}}
if provider_options[OPTION_SUBDOMAIN]
api_options[:subdomain] = provider_options[OPTION_SUBDOMAIN]
end
api_class = Diesel.build_api(specification)
api = api_class.new(api_options)
api.__send__(uid_options.api_operation, uid_options.api_params)
else
{}
end
end
def load_definition
specification.security_definitions.each_pair do |name, definition|
if definition.type == 'oauth2'
@definition = OmniAuth::Swagger::OAuth2Definition.new(definition, specification, provider_options)
end
end
nil
end
def specification
@specification ||= load_specification
end
def load_specification
provider_options[OPTION_SPECIFICATION].call
end
end
end
end
Report UID lookup errors better
require 'omniauth-oauth2'
require 'omniauth/swagger/oauth2_definition'
require 'omniauth/swagger/default_provider_lookup'
require 'omniauth/swagger/uid_options'
require 'diesel'
module OmniAuth
module Strategies
class Swagger < OmniAuth::Strategies::OAuth2
OPTION_UID = 'uid'.freeze
OPTION_SPECIFICATION = 'specification'.freeze
OPTION_SUBDOMAIN = 'subdomain'.freeze
option :providers, nil
option :provider_lookup, nil
option :provider_param, 'provider'
def setup_phase
load_definition
@definition.load_options(options)
super
end
def authorize_params
super.tap do |params|
passthru_params = @definition.authorize_params || []
if @definition.scopes != nil && @definition.scopes.any?
passthru_params << 'scope'
end
passthru_params.each do |v|
if request.params[v]
params[v.to_sym] = request.params[v]
end
end
end
end
def callback_url
url = super
url + (url.index('?') ? '&' : '?') + "#{options[:provider_param]}=#{provider_name}"
end
uid do
if uid_options.nil?
raise "Missing #{OPTION_UID} setting for provider '#{provider_name}'"
elsif uid_options.api?
val = uid_options.
api_value_path.
split('.').
reduce(raw_info) { |memo, key| memo && memo[key] }
if val
val.to_s
else
raise "Invalid UID value path #{uid_options.api_value_path}: #{raw_info.inspect}"
end
elsif uid_options.access_token_param?
access_token.params[uid_options.param]
else
raise "Unsupported UID option: #{provider_options[OPTION_UID].inspect}"
end
end
extra do
{ "raw_info" => raw_info }
end
protected
def provider_name
@provider_name ||= request.params[options[:provider_param]].to_sym
end
def provider_options
@provider_options ||= provider_lookup.get(provider_name, env)
end
def provider_lookup
@provider_lookup ||= begin
if lookup_opt = options[:provider_lookup]
if lookup_opt.kind_of? Class
lookup_opt.new
else
lookup_opt
end
else
OmniAuth::Swagger::DefaultProviderLookup.new(options[:providers])
end
end
end
def uid_options
@uid_options ||= OmniAuth::Swagger::UIDOptions.from_options(provider_options[OPTION_UID])
end
def raw_info
if uid_options
api_options = {@definition.oauth2_key => {token: access_token.token}}
if provider_options[OPTION_SUBDOMAIN]
api_options[:subdomain] = provider_options[OPTION_SUBDOMAIN]
end
api_class = Diesel.build_api(specification)
api = api_class.new(api_options)
api.__send__(uid_options.api_operation, uid_options.api_params)
else
{}
end
end
def load_definition
specification.security_definitions.each_pair do |name, definition|
if definition.type == 'oauth2'
@definition = OmniAuth::Swagger::OAuth2Definition.new(definition, specification, provider_options)
end
end
nil
end
def specification
@specification ||= load_specification
end
def load_specification
provider_options[OPTION_SPECIFICATION].call
end
end
end
end
|
# encoding: utf-8
require 'open_classes/string/justify_table'
# Array
class Array
# Array(Array, Array...) to table format.
#
# === Example
#
# [['header1', 'header2', 'header3'],['line1_1', 'line1_2', 'line1_3']].to_table
#
# result
# |header1|header2|header3|
# |line1_1|line1_2|line1_3|
#
def to_table
reduce([]) do |rets, lines|
ret = lines.reduce([]) { |ret, column|ret << column; ret }
rets << "|#{ret.join("|")}|"
end.join("\n") + "\n".justify_table(:center)
end
end
fix Array#to_table
# encoding: utf-8
require 'open_classes/string/justify_table'
# Array
class Array
# Array(Array, Array...) to table format.
#
# === Example
#
# [['header1', 'header2', 'header3'],['line1_1', 'line1_2', 'line1_3']].to_table
#
# result
# |header1|header2|header3|
# |line1_1|line1_2|line1_3|
#
def to_table
ret = reduce([]) do |rets, lines|
ret = lines.reduce([]) { |ret, column|ret << column; ret }
rets << "|#{ret.join("|")}|"
end.join("\n") + "\n"
ret.justify_table(:center)
end
end
|
# encoding: utf-8
module PhatPgsearch
class IndexBuilder
attr_reader :base, :definition, :build, :connection
def initialize(base, definition)
@base = base
@definition = definition
@connection = base.class.connection
build_index
end
protected
def build_index
partials = []
definition.fields.each do |field_definition|
field = field_definition.first
field_options = field_definition.extract_options!
field_content = base.respond_to?(field) ? base.send(field.to_s) : ''
if not field_options[:weight].nil? and [:a, :b, :c, :d].include? field_options[:weight].to_sym
partial = "setweight(to_tsvector(#{base.class.sanitize(definition.catalog)}, #{base.class.sanitize(field_content)}), '#{field_options[:weight].to_s.upcase}')"
else
partial = "to_tsvector(#{base.class.sanitize(definition.catalog)}, #{base.class.sanitize(field_content.to_s)})"
end
partials << partial
end
base.send("#{definition.index_field}=", base.class.connection.select_value("SELECT #{partials.join(' || ')}"))
end
end
end
cast search to string
# encoding: utf-8
module PhatPgsearch
class IndexBuilder
attr_reader :base, :definition, :build, :connection
def initialize(base, definition)
@base = base
@definition = definition
@connection = base.class.connection
build_index
end
protected
def build_index
partials = []
definition.fields.each do |field_definition|
field = field_definition.first
field_options = field_definition.extract_options!
field_content = base.respond_to?(field) ? base.send(field.to_s) : ''
if not field_options[:weight].nil? and [:a, :b, :c, :d].include? field_options[:weight].to_sym
partial = "setweight(to_tsvector(#{base.class.sanitize(definition.catalog)}, #{base.class.sanitize(field_content.to_s)}), '#{field_options[:weight].to_s.upcase}')"
else
partial = "to_tsvector(#{base.class.sanitize(definition.catalog)}, #{base.class.sanitize(field_content.to_s)})"
end
partials << partial
end
base.send("#{definition.index_field}=", base.class.connection.select_value("SELECT #{partials.join(' || ')}"))
end
end
end |
# -*- encoding : utf-8 -*-
require 'pragmatic_segmenter/list'
require 'pragmatic_segmenter/abbreviation_replacer'
require 'pragmatic_segmenter/number'
require 'pragmatic_segmenter/rules/ellipsis'
require 'pragmatic_segmenter/exclamation_words'
require 'pragmatic_segmenter/punctuation_replacer'
require 'pragmatic_segmenter/between_punctuation'
module PragmaticSegmenter
# This class processing segmenting the text.
class Process
attr_reader :text
def initialize(text:, language: Languages::Common)
@text = text
@language = language
end
def process
reformatted_text = List.new(text: text).add_line_break
reformatted_text = replace_abbreviations(reformatted_text)
reformatted_text = replace_numbers(reformatted_text)
reformatted_text = replace_continuous_punctuation(reformatted_text)
reformatted_text.apply(@language::AbbreviationsWithMultiplePeriodsAndEmailRule)
reformatted_text.apply(@language::GeoLocationRule)
split_into_segments(reformatted_text)
end
private
def split_into_segments(txt)
check_for_parens_between_quotes(txt).split("\r")
.map! { |segment| segment.apply(@language::SingleNewLineRule, @language::EllipsisRules::All) }
.map { |segment| check_for_punctuation(segment) }.flatten
.map! { |segment| segment.apply(@language::SubSymbolsRules::All) }
.map { |segment| post_process_segments(segment) }
.flatten.compact.delete_if(&:empty?)
.map! { |segment| segment.apply(@language::SubSingleQuoteRule) }
end
def post_process_segments(txt)
return if consecutive_underscore?(txt) || txt.length < 2
txt.apply(@language::ReinsertEllipsisRules::All).apply(@language::ExtraWhiteSpaceRule)
if txt =~ @language::QUOTATION_AT_END_OF_SENTENCE_REGEX
txt.split(@language::SPLIT_SPACE_QUOTATION_AT_END_OF_SENTENCE_REGEX)
else
txt.tr("\n", '').strip
end
end
def check_for_parens_between_quotes(txt)
return txt unless txt =~ @language::PARENS_BETWEEN_DOUBLE_QUOTES_REGEX
txt.gsub!(@language::PARENS_BETWEEN_DOUBLE_QUOTES_REGEX) do |match|
match.gsub(/\s(?=\()/, "\r").gsub(/(?<=\))\s/, "\r")
end
end
def replace_continuous_punctuation(txt)
return txt unless txt =~ @language::CONTINUOUS_PUNCTUATION_REGEX
txt.gsub!(@language::CONTINUOUS_PUNCTUATION_REGEX) do |match|
match.gsub(/!/, '&ᓴ&').gsub(/\?/, '&ᓷ&')
end
end
def consecutive_underscore?(txt)
# Rubular: http://rubular.com/r/fTF2Ff3WBL
txt.gsub(/_{3,}/, '').length.eql?(0)
end
def check_for_punctuation(txt)
if @language::Punctuations.any? { |p| txt.include?(p) }
process_text(txt)
else
txt
end
end
def process_text(txt)
txt << 'ȸ' unless @language::Punctuations.any? { |p| txt[-1].include?(p) }
ExclamationWords.apply_rules(txt)
between_punctuation(txt)
txt = txt.apply(
@language::DoublePunctuationRules::All,
@language::QuestionMarkInQuotationRule,
@language::ExclamationPointRules::All
)
txt = List.new(text: txt).replace_parens
sentence_boundary_punctuation(txt)
end
def replace_numbers(txt)
Number.new(text: txt).replace
end
def replace_abbreviations(txt)
AbbreviationReplacer.new(text: txt).replace
end
def between_punctuation(txt)
BetweenPunctuation.new(text: txt).replace
end
def sentence_boundary_punctuation(txt)
txt.scan(@language::SENTENCE_BOUNDARY_REGEX)
end
end
end
Use language param in process for abb.replacer
# -*- encoding : utf-8 -*-
require 'pragmatic_segmenter/list'
require 'pragmatic_segmenter/abbreviation_replacer'
require 'pragmatic_segmenter/number'
require 'pragmatic_segmenter/rules/ellipsis'
require 'pragmatic_segmenter/exclamation_words'
require 'pragmatic_segmenter/punctuation_replacer'
require 'pragmatic_segmenter/between_punctuation'
module PragmaticSegmenter
# This class processing segmenting the text.
class Process
attr_reader :text
def initialize(text:, language: Languages::Common)
@text = text
@language = language
end
def process
reformatted_text = List.new(text: text).add_line_break
reformatted_text = replace_abbreviations(reformatted_text)
reformatted_text = replace_numbers(reformatted_text)
reformatted_text = replace_continuous_punctuation(reformatted_text)
reformatted_text.apply(@language::AbbreviationsWithMultiplePeriodsAndEmailRule)
reformatted_text.apply(@language::GeoLocationRule)
split_into_segments(reformatted_text)
end
private
def split_into_segments(txt)
check_for_parens_between_quotes(txt).split("\r")
.map! { |segment| segment.apply(@language::SingleNewLineRule, @language::EllipsisRules::All) }
.map { |segment| check_for_punctuation(segment) }.flatten
.map! { |segment| segment.apply(@language::SubSymbolsRules::All) }
.map { |segment| post_process_segments(segment) }
.flatten.compact.delete_if(&:empty?)
.map! { |segment| segment.apply(@language::SubSingleQuoteRule) }
end
def post_process_segments(txt)
return if consecutive_underscore?(txt) || txt.length < 2
txt.apply(@language::ReinsertEllipsisRules::All).apply(@language::ExtraWhiteSpaceRule)
if txt =~ @language::QUOTATION_AT_END_OF_SENTENCE_REGEX
txt.split(@language::SPLIT_SPACE_QUOTATION_AT_END_OF_SENTENCE_REGEX)
else
txt.tr("\n", '').strip
end
end
def check_for_parens_between_quotes(txt)
return txt unless txt =~ @language::PARENS_BETWEEN_DOUBLE_QUOTES_REGEX
txt.gsub!(@language::PARENS_BETWEEN_DOUBLE_QUOTES_REGEX) do |match|
match.gsub(/\s(?=\()/, "\r").gsub(/(?<=\))\s/, "\r")
end
end
def replace_continuous_punctuation(txt)
return txt unless txt =~ @language::CONTINUOUS_PUNCTUATION_REGEX
txt.gsub!(@language::CONTINUOUS_PUNCTUATION_REGEX) do |match|
match.gsub(/!/, '&ᓴ&').gsub(/\?/, '&ᓷ&')
end
end
def consecutive_underscore?(txt)
# Rubular: http://rubular.com/r/fTF2Ff3WBL
txt.gsub(/_{3,}/, '').length.eql?(0)
end
def check_for_punctuation(txt)
if @language::Punctuations.any? { |p| txt.include?(p) }
process_text(txt)
else
txt
end
end
def process_text(txt)
txt << 'ȸ' unless @language::Punctuations.any? { |p| txt[-1].include?(p) }
ExclamationWords.apply_rules(txt)
between_punctuation(txt)
txt = txt.apply(
@language::DoublePunctuationRules::All,
@language::QuestionMarkInQuotationRule,
@language::ExclamationPointRules::All
)
txt = List.new(text: txt).replace_parens
sentence_boundary_punctuation(txt)
end
def replace_numbers(txt)
Number.new(text: txt).replace
end
def replace_abbreviations(txt)
AbbreviationReplacer.new(text: txt, language: @language).replace
end
def between_punctuation(txt)
BetweenPunctuation.new(text: txt).replace
end
def sentence_boundary_punctuation(txt)
txt.scan(@language::SENTENCE_BOUNDARY_REGEX)
end
end
end
|
# frozen_string_literal: true
module ProxyFetcher
class Document
# Abstract class for storing HTML elements that was parsed by
# one of the <code>ProxyFetcher::Document<code> adapters class.
class Node
# @!attribute [r] node
# @return [Object] original DOM node, parsed by adapter backend
attr_reader :node
# Initialize new HTML node
#
# @return [Node]
#
def initialize(node)
@node = node
end
# Searches for node in children using some selector (CSS or XPath).
#
# @param selector [String] selector (CSS or XPath)
#
# @return [Node] child node
#
def find(selector, method = :at_xpath)
self.class.new(node.public_send(method, selector))
end
# Searches exact HTML element by XPath. Returns only one element.
#
# @return [ProxyFetcher::Document::Node]
# node
#
def at_xpath(*args)
self.class.new(node.at_xpath(*args))
end
# Searches exact HTML element by CSS. Returns only one element.
#
# @return [ProxyFetcher::Document::Node]
# node
#
def at_css(*args)
self.class.new(node.at_css(*args))
end
# Returns clean content (text) for the specific element.
#
# @return [String]
# HTML node content
#
def content_at(*args)
clear(find(*args).content)
end
# Returns HTML node content.
#
# Abstract method, must be implemented for specific adapter class.
#
def content
raise "`#{__method__}` must be implemented for specific adapter class!"
end
# Returns HTML node inner HTML.
#
# Abstract method, must be implemented for specific adapter class.
#
def html
raise "`#{__method__}` must be implemented for specific adapter class!"
end
protected
# Removes whitespaces, tabulation and other "garbage" for the text.
#
# @param text [String]
# text to clear
#
# @return [String]
# clean text
#
def clear(text)
return if text.nil? || text.empty?
text.strip.gsub(/[ \t]/i, '')
end
end
end
end
[ci skip] Small docs skip
# frozen_string_literal: true
module ProxyFetcher
class Document
# Abstract class for storing HTML elements that was parsed by
# one of the <code>ProxyFetcher::Document</code> adapters class.
class Node
# @!attribute [r] node
# @return [Object] original DOM node, parsed by adapter backend
attr_reader :node
# Initialize new HTML node
#
# @return [Node]
#
def initialize(node)
@node = node
end
# Searches for node in children using some selector (CSS or XPath).
#
# @param selector [String] selector (CSS or XPath)
#
# @return [Node] child node
#
def find(selector, method = :at_xpath)
self.class.new(node.public_send(method, selector))
end
# Searches exact HTML element by XPath. Returns only one element.
#
# @return [ProxyFetcher::Document::Node]
# node
#
def at_xpath(*args)
self.class.new(node.at_xpath(*args))
end
# Searches exact HTML element by CSS. Returns only one element.
#
# @return [ProxyFetcher::Document::Node]
# node
#
def at_css(*args)
self.class.new(node.at_css(*args))
end
# Returns clean content (text) for the specific element.
#
# @return [String]
# HTML node content
#
def content_at(*args)
clear(find(*args).content)
end
# Returns HTML node content.
#
# Abstract method, must be implemented for specific adapter class.
#
def content
raise "`#{__method__}` must be implemented for specific adapter class!"
end
# Returns HTML node inner HTML.
#
# Abstract method, must be implemented for specific adapter class.
#
def html
raise "`#{__method__}` must be implemented for specific adapter class!"
end
protected
# Removes whitespaces, tabulation and other "garbage" for the text.
#
# @param text [String]
# text to clear
#
# @return [String]
# clean text
#
def clear(text)
return if text.nil? || text.empty?
text.strip.gsub(/[ \t]/i, '')
end
end
end
end
|
class Puppet::Provider::Rabbitmqctl < Puppet::Provider
initvars
commands :rabbitmqctl => 'rabbitmqctl'
def self.rabbitmq_version
output = rabbitmqctl('-q', 'status')
version = output.match(/\{rabbit,"RabbitMQ","([\d\.]+)"\}/)
version[1] if version
end
# Retry the given code block 'count' retries or until the
# command suceeeds. Use 'step' delay between retries.
# Limit each query time by 'timeout'.
# For example:
# users = self.class.run_with_retries { rabbitmqctl 'list_users' }
def self.run_with_retries(count=30, step=6, timeout=10)
count.times do |n|
begin
output = Timeout::timeout(timeout) do
yield
end
rescue Puppet::ExecutionFailure, Timeout
Puppet.debug 'Command failed, retrying'
sleep step
else
Puppet.debug 'Command succeeded'
return output
end
end
raise Puppet::Error, "Command is still failing after #{count * step} seconds expired!"
end
end
Fix timeout(10 to 60)
class Puppet::Provider::Rabbitmqctl < Puppet::Provider
initvars
commands :rabbitmqctl => 'rabbitmqctl'
def self.rabbitmq_version
output = rabbitmqctl('-q', 'status')
version = output.match(/\{rabbit,"RabbitMQ","([\d\.]+)"\}/)
version[1] if version
end
# Retry the given code block 'count' retries or until the
# command suceeeds. Use 'step' delay between retries.
# Limit each query time by 'timeout'.
# For example:
# users = self.class.run_with_retries { rabbitmqctl 'list_users' }
def self.run_with_retries(count=30, step=6, timeout=60)
count.times do |n|
begin
output = Timeout::timeout(timeout) do
yield
end
rescue Puppet::ExecutionFailure, Timeout
Puppet.debug 'Command failed, retrying'
sleep step
else
Puppet.debug 'Command succeeded'
return output
end
end
raise Puppet::Error, "Command is still failing after #{count * step} seconds expired!"
end
end
|
class FontWinitzkiCyrillic < Formula
desc "X.Org Fonts: font winitzki cyrillic"
homepage "https://www.x.org/"
### http://www.linuxfromscratch.org/blfs/view/svn/x/x7font.html
url "https://www.x.org/pub/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
mirror "https://xorg.freedesktop.org/archive/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
mirror "https://ftp.x.org/archive/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
sha256 "abd13b63d02fcaec488686c23683e5cf640b43bd32f8ca22eeae6f84df0a36a0"
revision 2
depends_on "bdftopcf" => :build
depends_on "bzip2" => [:build, :recommended]
depends_on "font-util" => :build
depends_on "fontconfig" => :build
depends_on "mkfontscale" => :build
depends_on "pkg-config" => :build
def install
args = %W[
--prefix=#{prefix}
--sysconfdir=#{etc}
--localstatedir=#{var}
--disable-dependency-tracking
--disable-silent-rules
--with-fontrootdir=#{share}/fonts/X11
]
args << "--with-compression=bzip2" if build.with? "bzip2"
system "./configure", *args
system "make"
system "make", "install"
end
end
font-winitzki-cyrillic: add 1.0.3_2 bottle.
class FontWinitzkiCyrillic < Formula
desc "X.Org Fonts: font winitzki cyrillic"
homepage "https://www.x.org/"
### http://www.linuxfromscratch.org/blfs/view/svn/x/x7font.html
url "https://www.x.org/pub/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
mirror "https://xorg.freedesktop.org/archive/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
mirror "https://ftp.x.org/archive/individual/font/font-winitzki-cyrillic-1.0.3.tar.bz2"
sha256 "abd13b63d02fcaec488686c23683e5cf640b43bd32f8ca22eeae6f84df0a36a0"
revision 2
bottle do
root_url "https://github.com/maxim-belkin/homebrew-xorg/releases/download/font-winitzki-cyrillic-1.0.3_2"
rebuild 1
sha256 cellar: :any_skip_relocation, x86_64_linux: "f55accd3fa5e0aeeb9802137f63bff5c75b7519b4d925c727bf6d9d4c2a12c13"
end
depends_on "bdftopcf" => :build
depends_on "bzip2" => [:build, :recommended]
depends_on "font-util" => :build
depends_on "fontconfig" => :build
depends_on "mkfontscale" => :build
depends_on "pkg-config" => :build
def install
args = %W[
--prefix=#{prefix}
--sysconfdir=#{etc}
--localstatedir=#{var}
--disable-dependency-tracking
--disable-silent-rules
--with-fontrootdir=#{share}/fonts/X11
]
args << "--with-compression=bzip2" if build.with? "bzip2"
system "./configure", *args
system "make"
system "make", "install"
end
end
|
class IosWebkitDebugProxy < Formula
desc "DevTools proxy for iOS devices"
homepage "https://github.com/google/ios-webkit-debug-proxy"
url "https://github.com/google/ios-webkit-debug-proxy/archive/v1.8.4.tar.gz"
sha256 "4e919a4d3dae329e3b4db7bfb2bd1b22c938fa86d59acaa6e99ef4eb65793dae"
head "https://github.com/google/ios-webkit-debug-proxy.git"
bottle do
cellar :any
sha256 "c16647d7595ca6e09c09a6e6d4c7399326d88b1c1da5697a2ace225730550486" => :mojave
sha256 "30ed9c9006d712cf6b550966978006323505840fff8522449664733d72f9600b" => :high_sierra
sha256 "deb979ca5f8df37e7323f8d0169e51f0fb4781936e15e9f7135811e1fe6f152e" => :sierra
end
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "libtool" => :build
depends_on "pkg-config" => :build
depends_on "libimobiledevice"
depends_on "libplist"
depends_on "usbmuxd"
def install
system "./autogen.sh"
system "./configure", "--disable-dependency-tracking", "--prefix=#{prefix}"
system "make", "install"
end
test do
system "#{bin}/ios_webkit_debug_proxy", "--help"
end
end
ios-webkit-debug-proxy: update 1.8.4 bottle.
class IosWebkitDebugProxy < Formula
desc "DevTools proxy for iOS devices"
homepage "https://github.com/google/ios-webkit-debug-proxy"
url "https://github.com/google/ios-webkit-debug-proxy/archive/v1.8.4.tar.gz"
sha256 "4e919a4d3dae329e3b4db7bfb2bd1b22c938fa86d59acaa6e99ef4eb65793dae"
head "https://github.com/google/ios-webkit-debug-proxy.git"
bottle do
cellar :any
sha256 "b8058d0cc84cd94c50c1e0c66a783f5ea4328d10ef1a166dd87e6ca236e533d9" => :mojave
sha256 "e7b603f9c7beb0d3a30cc770596ecb1d56dadfa7cea6c3b299119dfa00b868b6" => :high_sierra
sha256 "8f25c038350c5a577589d5a91b2f7a0a2f4a6823c0e385a6cf5ec90836bf4b50" => :sierra
end
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "libtool" => :build
depends_on "pkg-config" => :build
depends_on "libimobiledevice"
depends_on "libplist"
depends_on "usbmuxd"
def install
system "./autogen.sh"
system "./configure", "--disable-dependency-tracking", "--prefix=#{prefix}"
system "make", "install"
end
test do
system "#{bin}/ios_webkit_debug_proxy", "--help"
end
end
|
Added Amazon Relational Database Service (RDS) command line tools
Signed-off-by: Adam Vandenberg <flangy@gmail.com>
* Updated to use aws tools base class.
require 'formula'
# Require ec2-api-tools to get the base class
require "#{File.dirname __FILE__}/ec2-api-tools.rb"
class RdsCommandLineTools <AmazonWebServicesFormula
homepage 'http://developer.amazonwebservices.com/connect/entry.jspa?externalID=2928'
url 'http://s3.amazonaws.com/rds-downloads/RDSCli.zip'
md5 'a4c7f9efca4c19b9f9073945a5bbc7b9'
version '1.1.005'
def install
standard_install
end
def caveats
s = standard_instructions "AWS_RDS_HOME"
s += <<-EOS.undent
To check that your setup works properly, run the following command:
rds-describe-db-instances --headers
You should see a header line. If you have database instances already configured,
you will see a description line for each database instance.
EOS
return s
end
end
|
class RdsCommandLineTools < Formula
desc "Amazon RDS command-line toolkit"
homepage "https://aws.amazon.com/developertools/2928"
url "https://rds-downloads.s3.amazonaws.com/RDSCli-1.19.004.zip"
sha256 "298c15ccd04bd91f1be457645d233455364992e7dd27e09c48230fbc20b5950c"
bottle :unneeded
depends_on :java
def install
env = Language::Java.java_home_env.merge(:AWS_RDS_HOME => libexec)
rm Dir["bin/*.cmd"] # Remove Windows versions
etc.install "credential-file-path.template"
libexec.install Dir["*"]
Pathname.glob("#{libexec}/bin/*") do |file|
next if file.directory?
basename = file.basename
next if basename.to_s == "service"
(bin/basename).write_env_script file, env
end
end
def caveats
<<~EOS
Before you can use these tools you must export a variable to your $SHELL.
export AWS_CREDENTIAL_FILE="<Path to the credentials file>"
To check that your setup works properly, run the following command:
rds-describe-db-instances --headers
You should see a header line. If you have database instances already configured,
you will see a description line for each database instance.
EOS
end
test do
assert_match version.to_s, shell_output("#{bin}/rds-version")
end
end
rds-command-line-tools: depend on openjdk
class RdsCommandLineTools < Formula
desc "Amazon RDS command-line toolkit"
homepage "https://aws.amazon.com/developertools/2928"
url "https://rds-downloads.s3.amazonaws.com/RDSCli-1.19.004.zip"
sha256 "298c15ccd04bd91f1be457645d233455364992e7dd27e09c48230fbc20b5950c"
revision 1
bottle :unneeded
depends_on "openjdk"
def install
env = { :JAVA_HOME => Formula["openjdk"].opt_prefix, :AWS_RDS_HOME => libexec }
rm Dir["bin/*.cmd"] # Remove Windows versions
etc.install "credential-file-path.template"
libexec.install Dir["*"]
Pathname.glob("#{libexec}/bin/*") do |file|
next if file.directory?
basename = file.basename
next if basename.to_s == "service"
(bin/basename).write_env_script file, env
end
end
def caveats
<<~EOS
Before you can use these tools you must export a variable to your $SHELL.
export AWS_CREDENTIAL_FILE="<Path to the credentials file>"
To check that your setup works properly, run the following command:
rds-describe-db-instances --headers
You should see a header line. If you have database instances already configured,
you will see a description line for each database instance.
EOS
end
test do
assert_match version.to_s, shell_output("#{bin}/rds-version")
end
end
|
module Rails
module API
class PublicExceptions
attr_accessor :public_path
def initialize(public_path)
@public_path = public_path
end
def call(env)
exception = env["action_dispatch.exception"]
status = env["PATH_INFO"][1..-1]
request = ActionDispatch::Request.new(env)
content_type = request.formats.first
format = content_type && "to_#{content_type.to_sym}"
body = { :status => status, :error => exception.message }
render(status, body, :format => format, :content_type => content_type)
end
private
def render(status, body, options)
format = options[:format]
if format && body.respond_to?(format)
render_format(status, body.public_send(format), options)
else
render_html(status)
end
end
def render_format(status, body, options)
[status, {'Content-Type' => "#{options[:content_type]}; charset=#{ActionDispatch::Response.default_charset}",
'Content-Length' => body.bytesize.to_s}, [body]]
end
def render_html(status)
found = false
path = "#{public_path}/#{status}.#{I18n.locale}.html" if I18n.locale
path = "#{public_path}/#{status}.html" unless path && (found = File.exist?(path))
if found || File.exist?(path)
body = File.read(path)
[status, {'Content-Type' => "text/html; charset=#{ActionDispatch::Response.default_charset}", 'Content-Length' => body.bytesize.to_s}, [body]]
else
[404, { "X-Cascade" => "pass" }, []]
end
end
end
end
end
Refactor public exceptions to reuse render format method
module Rails
module API
class PublicExceptions
attr_accessor :public_path
def initialize(public_path)
@public_path = public_path
end
def call(env)
exception = env["action_dispatch.exception"]
status = env["PATH_INFO"][1..-1]
request = ActionDispatch::Request.new(env)
content_type = request.formats.first
format = content_type && "to_#{content_type.to_sym}"
body = { :status => status, :error => exception.message }
render(status, body, format, content_type)
end
private
def render(status, body, format, content_type)
if format && body.respond_to?(format)
render_format(status, content_type, body.public_send(format))
else
render_html(status)
end
end
def render_format(status, content_type, body)
[status, {'Content-Type' => "#{content_type}; charset=#{ActionDispatch::Response.default_charset}",
'Content-Length' => body.bytesize.to_s}, [body]]
end
def render_html(status)
found = false
path = "#{public_path}/#{status}.#{I18n.locale}.html" if I18n.locale
path = "#{public_path}/#{status}.html" unless path && (found = File.exist?(path))
if found || File.exist?(path)
render_format(status, 'text/html', File.read(path))
else
[404, { "X-Cascade" => "pass" }, []]
end
end
end
end
end
|
module RakutenWebService
VERSION = "0.6.3".freeze
end
1.0.0.rc1 :tada:
module RakutenWebService
VERSION = "1.0.0.rc1".freeze
end
|
# -*- coding:utf-8; mode:ruby; -*-
require "rbindkeys/key_event_handler/configurer"
module Rbindkeys
# retrive key binds with key event
class KeyEventHandler
include Revdev
LOG = LogUtils.get_logger name
# device operator
attr_reader :operator
# defaulut key bind set which retrive key binds with a key event
attr_reader :default_bind_resolver
# current key bind set which retrive key binds with a key event
attr_reader :bind_resolver
# proccessed resolver before bind_resolver
attr_reader :pre_bind_resolver
# code set of pressed key on the event device
attr_reader :pressed_key_set
# pressed key binds
attr_reader :active_bind_set
def initialize device_operator
@operator = device_operator
@default_bind_resolver = BindResolver.new
@bind_resolver = @default_bind_resolver
@pre_bind_resolver = {}
@pressed_key_set = []
@active_bind_set = []
end
def load_config file
code = File.read file
instance_eval code, file
end
def handle event
if LOG.info?
LOG.info ""
LOG.info "read\t#{KeyEventHandler.get_state_by_value event} "+
"#{event.hr_code}(#{event.code})"
end
# handle pre_key_bind_set
event.code = (@pre_bind_resolver[event.code] or event.code)
# swich to handle event with event.value
result =
case event.value
when 0; handle_release_event event
when 1; handle_press_event event
when 2; handle_pressing_event event
else raise UnknownKeyValue, "expect 0, 1 or 2 as event.value(#{event.value})"
end
LOG.debug "handle result: #{result}" if LOG.debug?
if result == :through
fill_gap_pressed_state
@operator.send_event event
end
handle_pressed_keys event
LOG.info "pressed_keys real:#{@pressed_key_set.inspect} "+
"virtual:#{@operator.pressed_key_set.inspect}" if LOG.info?
end
def handle_release_event event
release_bind_set = []
@active_bind_set.reject! do |key_bind|
if key_bind.input.include? event.code
release_bind_set << key_bind
true
else
false
end
end
if release_bind_set.empty?
@bind_resolver.default_value
else
release_bind_set.each do |kb|
kb.output.each {|c|@operator.release_key c}
if kb.input_recovery
kb.input.clone.delete_if {|c|c==event.code}.each {|c|@operator.press_key c}
end
end
:ignore
end
end
# TODO fix bug: on Google Chrome, pressing C-fn invoke new window creation.
# (C-fn mean pressing the n key with pressing C-f)
def handle_press_event event
r = @bind_resolver.resolve event.code, @pressed_key_set
if r.kind_of? KeyBind
if r.output.kind_of? Array
r.input.clone.delete_if{|c|c==event.code}.each {|c| @operator.release_key c}
r.output.each {|c| @operator.press_key c}
@active_bind_set << r
elsif r.output.kind_of? BindResolver
@bind_resolver = r
end
:ignore
else
r
end
end
def handle_pressing_event event
if @active_bind_set.empty?
:through
else
@active_bind_set.each {|kb| kb.output.each {|c| @operator.pressing_key c}}
:ignore
end
end
def fill_gap_pressed_state
return if @operator.pressed_key_set == @pressed_key_set
sub = @pressed_key_set - @operator.pressed_key_set
sub.each {|code| @operator.press_key code}
end
def handle_pressed_keys event
if event.value == 1
@pressed_key_set << event.code
@pressed_key_set.sort! # TODO do not sort. implement an insertion like bubble
elsif event.value == 0
if @pressed_key_set.delete(event.code).nil?
LOG.warn "#{event.code} does not exists on @pressed_keys" if LOG.warn?
end
end
end
class << self
# parse and normalize to Fixnum/Array
def parse_code code, depth = 0
if code.kind_of? Symbol
code = parse_symbol code
elsif code.kind_of? Array
raise ArgumentError, "expect Array is the depth less than 1" if depth >= 1
code.map!{|c| parse_code c, (depth+1)}
elsif code.kind_of? Fixnum and depth == 0
code = [code]
elsif not code.kind_of? Fixnum
raise ArgumentError, "expect Symbol / Fixnum / Array"
end
code
end
# TODO convert :j -> KEY_J, :ctrl -> KEY_LEFTCTRL
def parse_symbol sym
if not sym.kind_of? Symbol
raise ArgumentError, "expect Symbol / Fixnum / Array"
end
Revdev.const_get sym
end
def get_state_by_value ev
case ev.value
when 0; 'released '
when 1; 'pressed '
when 2; 'pressing '
end
end
end
end
end
fix bug
# -*- coding:utf-8; mode:ruby; -*-
require "rbindkeys/key_event_handler/configurer"
module Rbindkeys
# retrive key binds with key event
class KeyEventHandler
include Revdev
LOG = LogUtils.get_logger name
# device operator
attr_reader :operator
# defaulut key bind set which retrive key binds with a key event
attr_reader :default_bind_resolver
# current key bind set which retrive key binds with a key event
attr_reader :bind_resolver
# proccessed resolver before bind_resolver
attr_reader :pre_bind_resolver
# code set of pressed key on the event device
attr_reader :pressed_key_set
# pressed key binds
attr_reader :active_bind_set
def initialize device_operator
@operator = device_operator
@default_bind_resolver = BindResolver.new
@bind_resolver = @default_bind_resolver
@pre_bind_resolver = {}
@pressed_key_set = []
@active_bind_set = []
end
def load_config file
code = File.read file
instance_eval code, file
end
def handle event
if LOG.info?
LOG.info ""
LOG.info "read\t#{KeyEventHandler.get_state_by_value event} "+
"#{event.hr_code}(#{event.code})"
end
# handle pre_key_bind_set
event.code = (@pre_bind_resolver[event.code] or event.code)
# swich to handle event with event.value
result =
case event.value
when 0; handle_release_event event
when 1; handle_press_event event
when 2; handle_pressing_event event
else raise UnknownKeyValue, "expect 0, 1 or 2 as event.value(#{event.value})"
end
LOG.debug "handle result: #{result}" if LOG.debug?
if result == :through
fill_gap_pressed_state event
@operator.send_event event
end
handle_pressed_keys event
LOG.info "pressed_keys real:#{@pressed_key_set.inspect} "+
"virtual:#{@operator.pressed_key_set.inspect}" if LOG.info?
end
def handle_release_event event
release_bind_set = []
@active_bind_set.reject! do |key_bind|
if key_bind.input.include? event.code
release_bind_set << key_bind
true
else
false
end
end
if release_bind_set.empty?
@bind_resolver.default_value
else
release_bind_set.each do |kb|
kb.output.each {|c|@operator.release_key c}
if kb.input_recovery
kb.input.clone.delete_if {|c|c==event.code}.each {|c|@operator.press_key c}
end
end
:ignore
end
end
# TODO fix bug: on Google Chrome, pressing C-fn invoke new window creation.
# (C-fn mean pressing the n key with pressing C-f)
def handle_press_event event
r = @bind_resolver.resolve event.code, @pressed_key_set
if r.kind_of? KeyBind
if r.output.kind_of? Array
r.input.clone.delete_if{|c|c==event.code}.each {|c| @operator.release_key c}
r.output.each {|c| @operator.press_key c}
@active_bind_set << r
elsif r.output.kind_of? BindResolver
@bind_resolver = r
end
:ignore
else
r
end
end
def handle_pressing_event event
if @active_bind_set.empty?
:through
else
@active_bind_set.each {|kb| kb.output.each {|c| @operator.pressing_key c}}
:ignore
end
end
def fill_gap_pressed_state event
return if @operator.pressed_key_set == @pressed_key_set
sub = @pressed_key_set - @operator.pressed_key_set
if event.value == 0
sub.delete event.code
end
sub.each {|code| @operator.press_key code}
end
def handle_pressed_keys event
if event.value == 1
@pressed_key_set << event.code
@pressed_key_set.sort! # TODO do not sort. implement an insertion like bubble
elsif event.value == 0
if @pressed_key_set.delete(event.code).nil?
LOG.warn "#{event.code} does not exists on @pressed_keys" if LOG.warn?
end
end
end
class << self
# parse and normalize to Fixnum/Array
def parse_code code, depth = 0
if code.kind_of? Symbol
code = parse_symbol code
elsif code.kind_of? Array
raise ArgumentError, "expect Array is the depth less than 1" if depth >= 1
code.map!{|c| parse_code c, (depth+1)}
elsif code.kind_of? Fixnum and depth == 0
code = [code]
elsif not code.kind_of? Fixnum
raise ArgumentError, "expect Symbol / Fixnum / Array"
end
code
end
# TODO convert :j -> KEY_J, :ctrl -> KEY_LEFTCTRL
def parse_symbol sym
if not sym.kind_of? Symbol
raise ArgumentError, "expect Symbol / Fixnum / Array"
end
Revdev.const_get sym
end
def get_state_by_value ev
case ev.value
when 0; 'released '
when 1; 'pressed '
when 2; 'pressing '
end
end
end
end
end
|
module Remotipart
# Responder used to automagically wrap any non-xml replies in a text-area
# as expected by iframe-transport.
module RenderOverrides
include ERB::Util
def self.included(base)
base.class_eval do
alias_method_chain :render, :remotipart
end
end
def render_with_remotipart *args
render_without_remotipart *args
if remotipart_submitted?
textarea_body = response.content_type == 'text/html' ? html_escape(response.body) : response.body
response.body = %{<script type=\"text/javascript\">try{window.parent.document;}catch(err){document.domain=document.domain;}</script>#{textarea_body}}
response.content_type = Mime::HTML
end
response_body
end
end
end
Restore response textarea wrapping
module Remotipart
# Responder used to automagically wrap any non-xml replies in a text-area
# as expected by iframe-transport.
module RenderOverrides
include ERB::Util
def self.included(base)
base.class_eval do
alias_method_chain :render, :remotipart
end
end
def render_with_remotipart *args
render_without_remotipart *args
if remotipart_submitted?
textarea_body = response.content_type == 'text/html' ? html_escape(response.body) : response.body
response.body = %{<script type=\"text/javascript\">try{window.parent.document;}catch(err){document.domain=document.domain;}</script> <textarea data-type=\"#{response.content_type}\" data-status=\"#{response.response_code}\" data-statusText=\"#{response.message}\">#{textarea_body}</textarea>}
response.content_type = Mime::HTML
end
response_body
end
end
end
|
module RequirejsOptimizer
VERSION = "0.4.0"
end
Patch version bump
module RequirejsOptimizer
VERSION = "0.4.1"
end
|
module RestPack
module Serializer
VERSION = '0.4.24'
end
end
v0.4.25
module RestPack
module Serializer
VERSION = '0.4.25'
end
end
|
module RestPack
module Serializer
VERSION = '0.4.10'
end
end
v0.4.11
module RestPack
module Serializer
VERSION = '0.4.11'
end
end
|
require 'stringio'
class Riddle::Configuration::Parser
SOURCE_CLASSES = {
'mysql' => Riddle::Configuration::SQLSource,
'pgsql' => Riddle::Configuration::SQLSource,
'mssql' => Riddle::Configuration::SQLSource,
'xmlpipe' => Riddle::Configuration::XMLSource,
'xmlpipe2' => Riddle::Configuration::XMLSource,
'odbc' => Riddle::Configuration::SQLSource
}
INDEX_CLASSES = {
'plain' => Riddle::Configuration::Index,
'distributed' => Riddle::Configuration::DistributedIndex,
'rt' => Riddle::Configuration::RealtimeIndex
}
def initialize(input)
@input = input
end
def parse!
set_indexer
set_searchd
set_sources
set_indices
configuration
end
private
def inner
@inner ||= InnerParser.new(@input).parse!
end
def configuration
@configuration ||= Riddle::Configuration.new
end
def sources
@sources ||= {}
end
def each_with_prefix(prefix)
inner.keys.select { |key| key[/^#{prefix}\s+/] }.each do |key|
yield key.gsub(/^#{prefix}\s+/, ''), inner[key]
end
end
def set_indexer
set_settings configuration.indexer, inner['indexer']
end
def set_searchd
set_settings configuration.searchd, inner['searchd']
end
def set_sources
each_with_prefix 'source' do |name, settings|
names = name.split(/\s*:\s*/)
type = settings.delete('type').first
source = SOURCE_CLASSES[type].new names.first, type
source.parent = names.last if names.length > 1
set_settings source, settings
sources[source.name] = source
end
end
def set_indices
each_with_prefix 'index' do |name, settings|
names = name.split(/\s*:\s*/)
type = (settings.delete('type') || ['plain']).first
index = INDEX_CLASSES[type].new names.first
index.parent = names.last if names.length > 1
(settings.delete('source') || []).each do |source_name|
index.sources << sources[source_name]
end
set_settings index, settings
configuration.indices << index
end
end
def set_settings(object, hash)
hash.each do |key, values|
values.each do |value|
set_setting object, key, value
end
end
end
def set_setting(object, key, value)
if object.send(key).is_a?(Array)
object.send(key) << value
else
object.send "#{key}=", value
end
end
class InnerParser
SETTING_PATTERN = /^(\w+)\s*=\s*(.*)$/
def initialize(input)
@stream = StringIO.new(input)
@sections = {}
end
def parse!
while label = next_line do
@sections[label] = next_settings
end
@sections
end
private
def next_line
line = @stream.gets
return line if line.nil?
line = line.strip
line.empty? ? next_line : line
end
def next_settings
settings = Hash.new { |hash, key| hash[key] = [] }
line = ''
while line.empty? || line == '{' do
line = next_line
end
while line != '}' do
key, value = *SETTING_PATTERN.match(line).captures
settings[key] << value
while value[/\\$/] do
value = next_line
settings[key].last << "\n" << value
end
line = next_line
end
settings
end
end
end
Handle UTF-8 when parsing configuration
# encoding: UTF-8
require 'stringio'
class Riddle::Configuration::Parser
SOURCE_CLASSES = {
'mysql' => Riddle::Configuration::SQLSource,
'pgsql' => Riddle::Configuration::SQLSource,
'mssql' => Riddle::Configuration::SQLSource,
'xmlpipe' => Riddle::Configuration::XMLSource,
'xmlpipe2' => Riddle::Configuration::XMLSource,
'odbc' => Riddle::Configuration::SQLSource
}
INDEX_CLASSES = {
'plain' => Riddle::Configuration::Index,
'distributed' => Riddle::Configuration::DistributedIndex,
'rt' => Riddle::Configuration::RealtimeIndex
}
def initialize(input)
@input = input
end
def parse!
set_indexer
set_searchd
set_sources
set_indices
configuration
end
private
def inner
@inner ||= InnerParser.new(@input).parse!
end
def configuration
@configuration ||= Riddle::Configuration.new
end
def sources
@sources ||= {}
end
def each_with_prefix(prefix)
inner.keys.select { |key| key[/^#{prefix}\s+/] }.each do |key|
yield key.gsub(/^#{prefix}\s+/, ''), inner[key]
end
end
def set_indexer
set_settings configuration.indexer, inner['indexer']
end
def set_searchd
set_settings configuration.searchd, inner['searchd']
end
def set_sources
each_with_prefix 'source' do |name, settings|
names = name.split(/\s*:\s*/)
type = settings.delete('type').first
source = SOURCE_CLASSES[type].new names.first, type
source.parent = names.last if names.length > 1
set_settings source, settings
sources[source.name] = source
end
end
def set_indices
each_with_prefix 'index' do |name, settings|
names = name.split(/\s*:\s*/)
type = (settings.delete('type') || ['plain']).first
index = INDEX_CLASSES[type].new names.first
index.parent = names.last if names.length > 1
(settings.delete('source') || []).each do |source_name|
index.sources << sources[source_name]
end
set_settings index, settings
configuration.indices << index
end
end
def set_settings(object, hash)
hash.each do |key, values|
values.each do |value|
set_setting object, key, value
end
end
end
def set_setting(object, key, value)
if object.send(key).is_a?(Array)
object.send(key) << value
else
object.send "#{key}=", value
end
end
class InnerParser
SETTING_PATTERN = /^(\w+)\s*=\s*(.*)$/
def initialize(input)
@stream = StringIO.new(input)
@sections = {}
end
def parse!
while label = next_line do
@sections[label] = next_settings
end
@sections
end
private
def next_line
line = @stream.gets
return line if line.nil?
line = line.strip
line.empty? ? next_line : line
end
def next_settings
settings = Hash.new { |hash, key| hash[key] = [] }
line = ''
while line.empty? || line == '{' do
line = next_line
end
while line != '}' do
key, value = *SETTING_PATTERN.match(line).captures
settings[key] << value
while value[/\\$/] do
value = next_line
settings[key].last << "\n" << value
end
line = next_line
end
settings
end
end
end
|
namespace :db do
namespace :schema do
#FIXME
SCHEMA_FILE = "db/schema/schema.rb"
desc "Show diff between schema file and table configuration"
task :diff => :environment do
configs.each do |connection_name, config|
ridgepole_diff(SCHEMA_FILE, connection_name, config)
end
end
desc "Apply schema files to databases"
task :apply => :environment do
execute_ridgepole(configs, "--apply")
end
desc "Dry run apply schema"
task :apply_dry_run => :environment do
execute_ridgepole(configs, "--apply", dry_run: true)
end
desc "Merge schema file and table configutation"
task :merge => :environment do
execute_ridgepole(configs, "--merge")
end
desc "Dry run merge"
task :merge_dry_run => :environment do
execute_ridgepole(configs, "--merge", dry_run: true)
end
def execute_ridgepole(configs, mode, dry_run: false)
configs.each do |connection_name, config|
ridgepole_apply(SCHEMA_FILE, connection_name, config, mode, dry_run: dry_run)
end
end
def ridgepole_diff(schema_file, connection_name, config)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] BEGIN")
output = `RAILS_ENV=#{ Rails.env } CONNECTION=#{ connection_name } bundle exec ridgepole --enable-mysql-awesome --diff '#{ config.to_json }' #{ schema_file }`
puts highlight_sql(output)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] END")
end
def ridgepole_apply(schema_file, connection_name, config, mode, dry_run: false)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] BEGIN")
command = "RAILS_ENV=#{ Rails.env } CONNECTION=#{ connection_name } bundle exec ridgepole --enable-mysql-awesome #{ mode } -c '#{ config.to_json }' -f #{ schema_file }"
command += " --dry-run" if dry_run
output = `#{ command }`
puts highlight_sql(output)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] END")
end
def highlight_sql(text)
require "colorize"
text
.gsub(/CREATE\s+.+(?=\()/, "\\0".colorize(:light_green))
.gsub(/ALTER\s+TABLE\s+.+\s+ADD.+/, "\\0".colorize(:light_green))
.gsub(/ALTER\s+TABLE\s+.+\s+DROP.+/, "\\0".colorize(:light_red))
.gsub(/ALTER\s+TABLE\s+.+\s+(CHANGE|MODIFY).+/, "\\0".colorize(:light_yellow))
.gsub(/DROP\s+.+/, "\\0".colorize(:light_red))
end
def format_label(text)
result = "=== #{ text } "
result + "=" * [0, 70 - result.size].max
end
def configs
@configs ||= ActiveRecord::Base.configurations.dup
end
end
end
Fix to fail rake task if ridgepole execution failed
namespace :db do
namespace :schema do
#FIXME
SCHEMA_FILE = "db/schema/schema.rb"
desc "Show diff between schema file and table configuration"
task :diff => :environment do
configs.each do |connection_name, config|
ridgepole_diff(SCHEMA_FILE, connection_name, config)
end
end
desc "Apply schema files to databases"
task :apply => :environment do
execute_ridgepole(configs, "--apply")
end
desc "Dry run apply schema"
task :apply_dry_run => :environment do
execute_ridgepole(configs, "--apply", dry_run: true)
end
desc "Merge schema file and table configutation"
task :merge => :environment do
execute_ridgepole(configs, "--merge")
end
desc "Dry run merge"
task :merge_dry_run => :environment do
execute_ridgepole(configs, "--merge", dry_run: true)
end
def execute_ridgepole(configs, mode, dry_run: false)
configs.each do |connection_name, config|
ridgepole_apply(SCHEMA_FILE, connection_name, config, mode, dry_run: dry_run)
end
end
def ridgepole_diff(schema_file, connection_name, config)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] BEGIN")
output = `RAILS_ENV=#{ Rails.env } CONNECTION=#{ connection_name } bundle exec ridgepole --enable-mysql-awesome --diff '#{ config.to_json }' #{ schema_file }`
fail_if_last_command_failed
puts highlight_sql(output)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] END")
end
def ridgepole_apply(schema_file, connection_name, config, mode, dry_run: false)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] BEGIN")
command = "RAILS_ENV=#{ Rails.env } CONNECTION=#{ connection_name } bundle exec ridgepole --enable-mysql-awesome #{ mode } -c '#{ config.to_json }' -f #{ schema_file }"
command += " --dry-run" if dry_run
output = `#{ command }`
fail_if_last_command_failed
puts highlight_sql(output)
puts format_label("CONNECTION [#{ connection_name } (#{ config['host'] })] END")
end
def fail_if_last_command_failed
fail unless $? == 0
end
def highlight_sql(text)
require "colorize"
text
.gsub(/CREATE\s+.+(?=\()/, "\\0".colorize(:light_green))
.gsub(/ALTER\s+TABLE\s+.+\s+ADD.+/, "\\0".colorize(:light_green))
.gsub(/ALTER\s+TABLE\s+.+\s+DROP.+/, "\\0".colorize(:light_red))
.gsub(/ALTER\s+TABLE\s+.+\s+(CHANGE|MODIFY).+/, "\\0".colorize(:light_yellow))
.gsub(/DROP\s+.+/, "\\0".colorize(:light_red))
end
def format_label(text)
result = "=== #{ text } "
result + "=" * [0, 70 - result.size].max
end
def configs
@configs ||= ActiveRecord::Base.configurations.dup
end
end
end
|
require 'base64'
require 'mime'
require 'mime/types'
require 'multi_json'
module RingCentralSdk::Helpers
class CreateFaxRequest < RingCentralSdk::Helpers::Request
attr_reader :msg
def initialize(path_params=nil,metadata=nil,options=nil)
@msg = MIME::Multipart::Mixed.new
@msg.headers.delete('Content-Id')
@path_params = path_params
if metadata.is_a?(Hash) || metadata.is_a?(String)
add_metadata(metadata)
end
if options.is_a?(Hash)
if options.has_key?(:file_name)
add_file(options[:file_name], options[:file_content_type], options[:base64_encode])
elsif options.has_key?(:text)
add_file_text(options[:text])
end
end
end
def add_metadata(meta=nil)
meta = inflate_metadata(meta)
json = MultiJson.encode(meta)
if json.is_a?(String)
json_part = MIME::Text.new(json)
json_part.headers.delete('Content-Id')
json_part.headers.set('Content-Type','application/json')
@msg.add(json_part)
return true
end
return false
end
def inflate_metadata(meta=nil)
if meta.is_a?(String)
meta = MultiJson.decode(meta,:symbolize_keys=>true)
end
if meta.is_a?(Hash)
inf = RingCentralSdk::Helpers::Inflator::ContactInfo.new
if meta.has_key?(:to)
meta[:to] = inf.inflate_to_array( meta[:to] )
elsif meta.has_key?("to")
meta["to"] = inf.inflate_to_array( meta["to"] )
else
meta[:to] = inf.inflate_to_array( nil )
end
end
return meta
end
def add_file_text(text=nil, charset='UTF-8')
return unless text.is_a?(String)
text_part = MIME::Text.new(text,'plain')
text_part.headers.delete('Content-Id')
@msg.add(text_part)
end
def add_file(file_name=nil, content_type=nil, base64_encode=false)
if not File.file?(file_name.to_s)
raise "File \"#{file_name.to_s}\" does not exist or cannot be read"
end
content_type = (content_type.is_a?(String) && content_type =~ /^[^\/\s]+\/[^\/\s]+/) \
? content_type : MIME::Types.type_for(file_name).first.content_type || 'application/octet-stream'
file_part = base64_encode \
? MIME::Text.new(Base64.encode64(File.binread(file_name))) \
: MIME::Application.new(File.binread(file_name))
file_part.headers.delete('Content-Id')
file_part.headers.set('Content-Type', content_type)
# Add file name
base_name = File.basename(file_name)
if base_name.is_a?(String) && base_name.length>0
file_part.headers.set('Content-Disposition', "attachment; filename=\"#{base_name}\"")
else
file_part.headers.set('Content-Disposition', 'attachment')
end
# Base64 Encoding
if base64_encode
file_part.headers.set('Content-Transfer-Encoding','base64')
end
@msg.add(file_part)
return true
end
def method()
return 'post'
end
def url()
vals = {:account_id => '~', :extension_id => '~'}
account_id = "~"
extension_id = "~"
if @path_params.is_a?(Hash)
vals.keys.each do |key|
next unless @path_params.has_key?(key)
if @path_params[key].is_a?(String) && @path_params[key].length>0
vals[key] = @path_params[key]
elsif @path_params[key].is_a?(Integer) && @path_params[key]>0
vals[key] = @path_params[key].to_s
end
end
end
url = "account/#{vals[:account_id].to_s}/extension/#{vals[:extension_id].to_s}/fax"
return url
end
def content_type()
return @msg.headers.get('Content-Type').to_s
end
def body()
return @msg.body.to_s
end
# Experimental
def _add_file(file_name=nil)
if file_name.is_a?(String) && File.file?(file_name)
file_msg = MIME::DiscreteMediaFactory.create(file_name)
file_msg.headers.delete('Content-Id')
@msg.add(file_msg)
return true
end
return false
end
private :_add_file
end
end
improve climate
require 'base64'
require 'mime'
require 'mime/types'
require 'multi_json'
module RingCentralSdk::Helpers
class CreateFaxRequest < RingCentralSdk::Helpers::Request
attr_reader :msg
def initialize(path_params=nil,metadata=nil,options=nil)
@msg = MIME::Multipart::Mixed.new
@msg.headers.delete('Content-Id')
@path_params = path_params
if metadata.is_a?(Hash) || metadata.is_a?(String)
add_metadata(metadata)
end
if options.is_a?(Hash)
if options.has_key?(:file_name)
add_file(options[:file_name], options[:file_content_type], options[:base64_encode])
elsif options.has_key?(:text)
add_file_text(options[:text])
end
end
end
def add_metadata(meta=nil)
meta = inflate_metadata(meta)
json = MultiJson.encode(meta)
if json.is_a?(String)
json_part = MIME::Text.new(json)
json_part.headers.delete('Content-Id')
json_part.headers.set('Content-Type','application/json')
@msg.add(json_part)
return true
end
return false
end
def inflate_metadata(meta=nil)
if meta.is_a?(String)
meta = MultiJson.decode(meta,:symbolize_keys=>true)
end
if meta.is_a?(Hash)
inf = RingCentralSdk::Helpers::Inflator::ContactInfo.new
if meta.has_key?(:to)
meta[:to] = inf.inflate_to_array( meta[:to] )
elsif meta.has_key?("to")
meta["to"] = inf.inflate_to_array( meta["to"] )
else
meta[:to] = inf.inflate_to_array( nil )
end
end
return meta
end
def add_file_text(text=nil, charset='UTF-8')
return unless text.is_a?(String)
text_part = MIME::Text.new(text,'plain')
text_part.headers.delete('Content-Id')
@msg.add(text_part)
end
def add_file(file_name=nil, content_type=nil, base64_encode=false)
unless File.file?(file_name.to_s)
raise "File \"#{file_name.to_s}\" does not exist or cannot be read"
end
content_type = (content_type.is_a?(String) && content_type =~ /^[^\/\s]+\/[^\/\s]+/) \
? content_type : MIME::Types.type_for(file_name).first.content_type || 'application/octet-stream'
file_part = base64_encode \
? MIME::Text.new(Base64.encode64(File.binread(file_name))) \
: MIME::Application.new(File.binread(file_name))
file_part.headers.delete('Content-Id')
file_part.headers.set('Content-Type', content_type)
# Add file name
base_name = File.basename(file_name)
if base_name.is_a?(String) && base_name.length>0
file_part.headers.set('Content-Disposition', "attachment; filename=\"#{base_name}\"")
else
file_part.headers.set('Content-Disposition', 'attachment')
end
# Base64 Encoding
if base64_encode
file_part.headers.set('Content-Transfer-Encoding','base64')
end
@msg.add(file_part)
return true
end
def method()
return 'post'
end
def url()
vals = {:account_id => '~', :extension_id => '~'}
account_id = "~"
extension_id = "~"
if @path_params.is_a?(Hash)
vals.keys.each do |key|
next unless @path_params.has_key?(key)
if @path_params[key].is_a?(String) && @path_params[key].length>0
vals[key] = @path_params[key]
elsif @path_params[key].is_a?(Integer) && @path_params[key]>0
vals[key] = @path_params[key].to_s
end
end
end
url = "account/#{vals[:account_id].to_s}/extension/#{vals[:extension_id].to_s}/fax"
return url
end
def content_type()
return @msg.headers.get('Content-Type').to_s
end
def body()
return @msg.body.to_s
end
# Experimental
def _add_file(file_name=nil)
if file_name.is_a?(String) && File.file?(file_name)
file_msg = MIME::DiscreteMediaFactory.create(file_name)
file_msg.headers.delete('Content-Id')
@msg.add(file_msg)
return true
end
return false
end
private :_add_file
end
end |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.