CombinedText stringlengths 4 3.42M |
|---|
module ETSource
module Util
# Links ------------------------------------------------------------------
# Extracts data about a link from the raw link string.
LINK_RE = /
(?<child>[\w_]+)- # Child node key
\([^)]+\)\s # Carrier key (ignored)
(?<reversed><)? # Arrow indicating a reversed link?
--\s(?<type>\w)\s-->?\s # Link type and arrow
\((?<carrier>[^)]+)\)- # Carrier key
(?<parent>[\w_]+) # Parent node key
/xi
# Public: Given a path to a directory, yields an array of links for each
# "links" file in that directory. Does not recurse into subdirectories.
#
# directory - Path to a directory in which there are ".links" files.
#
# For example:
#
# ETSource::Util.foreach_link(path) do |links, filename|
# links # => [ "...", "..." ]
# end
#
# Returns nothing.
def self.foreach_link(directory)
Dir["#{ directory }/*.links"].map do |filename|
File.foreach(filename) do |line, *rest|
yield(line.strip, *rest, filename) if line.match(/\S/)
end
end
nil
end
# Public: Given an array of nodes, sets up Turbine edges between them in
# accordance with the +links+ strings held in the nodes.
#
# links - An array containing all the link definitions (as strings).
# nodes - An array containing zero or more Nodes.
#
# Returns nothing.
def self.establish_links!(nodes, links)
nodes = Collection.new(nodes)
carriers = Collection.new(Carrier.all)
links.each { |link| establish_link(link, nodes, carriers) }
nil
end
# Internal: Given a string defining a link, sets up a Turbine::Edge
# between the nodes.
#
# link - The raw link definition string.
# nodes - All the nodes defined in the graph in a Collection.
# carriers - A list of all carriers in the dataset, in a Collection
#
# Raises Turbine::DuplicateEdgeError if the edge already exists.
#
# Returns the edge which was created.
def self.establish_link(link, nodes, carriers)
data = LINK_RE.match(link)
raise InvalidLinkError.new(link) if data.nil?
type = case data['type']
when 's' then :share
when 'f' then :flexible
when 'c' then :constant
when 'd' then :dependent
when 'i' then :inverse_flexible
end
parent = nodes.find(data['parent'])
child = nodes.find(data['child'])
props = { type: type, reversed: ! data['reversed'].nil? }
carrier = carriers.find(data['carrier'])
raise UnknownLinkNodeError.new(link, data['parent']) if parent.nil?
raise UnknownLinkNodeError.new(link, data['child']) if child.nil?
raise UnknownLinkTypeError.new(link, data['type']) if type.nil?
raise UnknownLinkCarrierError.new(link, data['carrier']) if carrier.nil?
parent.turbine.connect_to(child.turbine, carrier.key, props)
end
end # Util
end # ETSource
Trying to debug the Semaphore failures.
Tests are passing locally, but failing on Semaphore.
module ETSource
module Util
# Links ------------------------------------------------------------------
# Extracts data about a link from the raw link string.
LINK_RE = /
(?<child>[\w_]+)- # Child node key
\([^)]+\)\s # Carrier key (ignored)
(?<reversed><)? # Arrow indicating a reversed link?
--\s(?<type>\w)\s-->?\s # Link type and arrow
\((?<carrier>[^)]+)\)- # Carrier key
(?<parent>[\w_]+) # Parent node key
/xi
# Public: Given a path to a directory, yields an array of links for each
# "links" file in that directory. Does not recurse into subdirectories.
#
# directory - Path to a directory in which there are ".links" files.
#
# For example:
#
# ETSource::Util.foreach_link(path) do |links, filename|
# links # => [ "...", "..." ]
# end
#
# Returns nothing.
def self.foreach_link(directory)
Dir["#{ directory }/*.links"].map do |filename|
File.foreach(filename) do |line, *rest|
yield(line.strip, *rest, filename) if line.match(/\S/)
end
end
nil
end
# Public: Given an array of nodes, sets up Turbine edges between them in
# accordance with the +links+ strings held in the nodes.
#
# links - An array containing all the link definitions (as strings).
# nodes - An array containing zero or more Nodes.
#
# Returns nothing.
def self.establish_links!(nodes, links)
nodes = Collection.new(nodes)
carriers = Collection.new(Carrier.all)
# Debugging failures on Semaphore, depite it passing locally...
if first_carrier = carriers.first
puts first_carrier.file_path
else
puts "No carriers found"
end
links.each { |link| establish_link(link, nodes, carriers) }
nil
end
# Internal: Given a string defining a link, sets up a Turbine::Edge
# between the nodes.
#
# link - The raw link definition string.
# nodes - All the nodes defined in the graph in a Collection.
# carriers - A list of all carriers in the dataset, in a Collection
#
# Raises Turbine::DuplicateEdgeError if the edge already exists.
#
# Returns the edge which was created.
def self.establish_link(link, nodes, carriers)
data = LINK_RE.match(link)
raise InvalidLinkError.new(link) if data.nil?
type = case data['type']
when 's' then :share
when 'f' then :flexible
when 'c' then :constant
when 'd' then :dependent
when 'i' then :inverse_flexible
end
parent = nodes.find(data['parent'])
child = nodes.find(data['child'])
props = { type: type, reversed: ! data['reversed'].nil? }
carrier = carriers.find(data['carrier'])
raise UnknownLinkNodeError.new(link, data['parent']) if parent.nil?
raise UnknownLinkNodeError.new(link, data['child']) if child.nil?
raise UnknownLinkTypeError.new(link, data['type']) if type.nil?
raise UnknownLinkCarrierError.new(link, data['carrier']) if carrier.nil?
parent.turbine.connect_to(child.turbine, carrier.key, props)
end
end # Util
end # ETSource
|
if defined?(ChefSpec)
def create_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :create, resource_name)
end
def start_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :start, resource_name)
end
def stop_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :stop, resource_name)
end
def enable_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :enable, resource_name)
end
def disable_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :disable, resource_name)
end
def load_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :load, resource_name)
end
def restart_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :restart, resource_name)
end
def reload_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :reload, resource_name)
end
end
adding define_runner_method to the chefspec matchers which allows us to do things like chef_run.java_service
if defined?(ChefSpec)
ChefSpec::Runner.define_runner_method(:java_service)
def create_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :create, resource_name)
end
def start_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :start, resource_name)
end
def stop_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :stop, resource_name)
end
def enable_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :enable, resource_name)
end
def disable_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :disable, resource_name)
end
def load_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :load, resource_name)
end
def restart_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :restart, resource_name)
end
def reload_java_service(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:java_service, :reload, resource_name)
end
end
|
module MiniTest
module Reporters
VERSION = '0.3.0'
end
end
Bump version.
module MiniTest
module Reporters
VERSION = '0.4.0'
end
end |
if defined?(ChefSpec)
ChefSpec.define_matcher :php_pear
def install_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :install, resource_name)
end
def remove_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :remove, resource_name)
end
def upgrade_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :upgrade, resource_name)
end
def purge_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :purge, resource_name)
end
ChefSpec.define_matcher :php_pear_channel
def discover_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :discover, resource_name)
end
def remove_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :remove, resource_name)
end
def update_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :update, resource_name)
end
def add_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :add, resource_name)
end
ChefSpec.define_matcher :php_fpm_pool
def install_php_fpm_pool(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_fpm_pool, :install, resource_name)
end
def uninstall_php_fpm_pool(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_fpm_pool, :uninstall, resource_name)
end
end
add 'option' to php_pear this might help if we want to specify some option in the installation process like the "-Z" for example
if defined?(ChefSpec)
ChefSpec.define_matcher :php_pear
def install_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :install, resource_name)
end
def remove_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :remove, resource_name)
end
def upgrade_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :upgrade, resource_name)
end
def purge_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :purge, resource_name)
end
def purge_php_pear(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear, :option, resource_name)
end
ChefSpec.define_matcher :php_pear_channel
def discover_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :discover, resource_name)
end
def remove_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :remove, resource_name)
end
def update_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :update, resource_name)
end
def add_php_pear_channel(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_pear_channel, :add, resource_name)
end
ChefSpec.define_matcher :php_fpm_pool
def install_php_fpm_pool(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_fpm_pool, :install, resource_name)
end
def uninstall_php_fpm_pool(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:php_fpm_pool, :uninstall, resource_name)
end
end
|
require 'event_counter/version'
# This class defines model that stores all counters.
class EventCounter < ActiveRecord::Base
include EventCounterVersion
belongs_to :countable, polymorphic: true
scope :within, ->(range) { range && where(created_at: range) }
def increase_by(val)
self.class.where(id: id).update_all(['value = value + ?', val])
increment(:value, val)
self
end
def decrease_by(decrement)
self.class.where(id: id).update_all(['value = value - ?', decrement])
decrement(:value, decrement)
self
end
def reset_value(val = 0)
self.class.where(id: id).update_all(['value = ?', val])
self.value = val
self
end
def self.make(val = 1, on_time: nil, force: false)
on_time = normalize_on_time!(on_time)
attrs = { created_at: on_time }
if force && (found = scoped_relatiion.where(attrs).first)
found.reset_value(val)
else
attrs.merge!(value: val)
scoped_relatiion.create!(attrs)
end
end
def self.current_interval
scoped_relatiion.proxy_association.owner.event_counters[counter_name]
end
def self.counter_name
scoped_relatiion.proxy_association.reflection.name
end
def self.change(val = 1, vector: :up, on_time: nil, force: nil)
counter_error!(:direction) unless [:up, :down].include?(vector)
val ||= 1
on_time = normalize_on_time!(on_time)
counter = where(created_at: on_time).first
return counter.update!(vector, val, force) if counter
val = -val if vector == :down
make(val, on_time: on_time, force: force)
end
def update!(vector, val = 1, force = false)
if force
val = -val if vector == :down
reset_value(val)
else
vector == :up ? increase_by(val) : decrease_by(val)
end
end
def self.scoped_relatiion
ActiveRecord::VERSION::MAJOR > 3 ? where(nil) : scoped
end
def self.up!(*args)
change(:up, *args)
end
def self.down!(*args)
change(:down, *args)
end
def self.counter_error!(*args)
fail CounterError, args
end
def self.normalize_on_time!(on_time)
on_time ||= Time.zone.now
counter_error!(:time_zone) unless on_time.is_a?(ActiveSupport::TimeWithZone)
on_time =
case current_interval
when Symbol
on_time.in_time_zone.send(:"beginning_of_#{current_interval}")
else
on_time.in_time_zone.floor(current_interval)
end
on_time
end
# Default error class
class CounterError < StandardError
MESSAGES = {
not_found: 'Unable to find counter (%{name}).',
direction: 'Wrong direction for counter.' \
'Possible values are :up and :down as symbols.',
less: 'Specified interval (%{interval}) could not be less then ' \
'a defined (%{default_interval}) in a countable model (%{model}).',
multiple: 'Specified interval (%{interval}) should be a multiple of ' \
'a defined (%{default_interval}) in a countable model (%{model}).',
time_zone: 'The :on_time option should be defined with time zone, e.x.: ' \
'Time.zone.local(2014, 1, 1, 1, 1)'
}
attr_accessor :extra
def initialize(*args)
@msg, self.extra = args.flatten!
super(@msg)
end
def to_s
@msg.is_a?(Symbol) ? MESSAGES[@msg] % extra : super
end
end
end
require 'event_counter/active_record_extension'
ActiveRecord::Base.send(:include, EventCounter::ActiveRecordExtension)
if ActiveSupport::VERSION::MAJOR > 3
require 'active_support/core_ext/time'
else
require 'active_support/time'
end
# :nodoc:
class ActiveSupport::TimeWithZone
def round_off(seconds = 60)
Time.zone.at((to_f / seconds).round * seconds)
end
def floor(seconds = 60)
Time.zone.at((to_f / seconds).floor * seconds)
end
end
# :nodoc:
class String
unless method_defined?(:squish!)
# Stolen from Rails
def squish!
gsub!(/\A[[:space:]]+/, '')
gsub!(/[[:space:]]+\z/, '')
gsub!(/[[:space:]]+/, ' ')
self
end
end
end
Fixed typo
require 'event_counter/version'
# This class defines model that stores all counters.
class EventCounter < ActiveRecord::Base
include EventCounterVersion
belongs_to :countable, polymorphic: true
scope :within, ->(range) { range && where(created_at: range) }
def increase_by(val)
self.class.where(id: id).update_all(['value = value + ?', val])
increment(:value, val)
self
end
def decrease_by(decrement)
self.class.where(id: id).update_all(['value = value - ?', decrement])
decrement(:value, decrement)
self
end
def reset_value(val = 0)
self.class.where(id: id).update_all(['value = ?', val])
self.value = val
self
end
def self.make(val = 1, on_time: nil, force: false)
on_time = normalize_on_time!(on_time)
attrs = { created_at: on_time }
if force && (found = scoped_relation.where(attrs).first)
found.reset_value(val)
else
attrs.merge!(value: val)
scoped_relation.create!(attrs)
end
end
def self.current_interval
scoped_relation.proxy_association.owner.event_counters[counter_name]
end
def self.counter_name
scoped_relation.proxy_association.reflection.name
end
def self.change(val = 1, vector: :up, on_time: nil, force: nil)
counter_error!(:direction) unless [:up, :down].include?(vector)
val ||= 1
on_time = normalize_on_time!(on_time)
counter = where(created_at: on_time).first
return counter.update!(vector, val, force) if counter
val = -val if vector == :down
make(val, on_time: on_time, force: force)
end
def update!(vector, val = 1, force = false)
if force
val = -val if vector == :down
reset_value(val)
else
vector == :up ? increase_by(val) : decrease_by(val)
end
end
def self.scoped_relation
ActiveRecord::VERSION::MAJOR > 3 ? where(nil) : scoped
end
def self.up!(*args)
change(:up, *args)
end
def self.down!(*args)
change(:down, *args)
end
def self.counter_error!(*args)
fail CounterError, args
end
def self.normalize_on_time!(on_time)
on_time ||= Time.zone.now
counter_error!(:time_zone) unless on_time.is_a?(ActiveSupport::TimeWithZone)
on_time =
case current_interval
when Symbol
on_time.in_time_zone.send(:"beginning_of_#{current_interval}")
else
on_time.in_time_zone.floor(current_interval)
end
on_time
end
# Default error class
class CounterError < StandardError
MESSAGES = {
not_found: 'Unable to find counter (%{name}).',
direction: 'Wrong direction for counter.' \
'Possible values are :up and :down as symbols.',
less: 'Specified interval (%{interval}) could not be less then ' \
'a defined (%{default_interval}) in a countable model (%{model}).',
multiple: 'Specified interval (%{interval}) should be a multiple of ' \
'a defined (%{default_interval}) in a countable model (%{model}).',
time_zone: 'The :on_time option should be defined with time zone, e.x.: ' \
'Time.zone.local(2014, 1, 1, 1, 1)'
}
attr_accessor :extra
def initialize(*args)
@msg, self.extra = args.flatten!
super(@msg)
end
def to_s
@msg.is_a?(Symbol) ? MESSAGES[@msg] % extra : super
end
end
end
require 'event_counter/active_record_extension'
ActiveRecord::Base.send(:include, EventCounter::ActiveRecordExtension)
if ActiveSupport::VERSION::MAJOR > 3
require 'active_support/core_ext/time'
else
require 'active_support/time'
end
# :nodoc:
class ActiveSupport::TimeWithZone
def round_off(seconds = 60)
Time.zone.at((to_f / seconds).round * seconds)
end
def floor(seconds = 60)
Time.zone.at((to_f / seconds).floor * seconds)
end
end
# :nodoc:
class String
unless method_defined?(:squish!)
# Stolen from Rails
def squish!
gsub!(/\A[[:space:]]+/, '')
gsub!(/[[:space:]]+\z/, '')
gsub!(/[[:space:]]+/, ' ')
self
end
end
end
|
if defined?(ChefSpec)
def enable_iptables_rule(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:iptables_rule, :enable, resource_name)
end
def disable_iptables_rule(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:iptables_rule, :disable, resource_name)
end
end
Remove ChefSpec matchers that are no longer needed
ChefSpec auto generates these now
Signed-off-by: Tim Smith <764ef62106582a09ed09dfa0b6bff7c05fd7d1e4@chef.io>
|
# just an error to ensure this doesn't get used while it's crashing the systme
#1/0
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.4' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
require File.join(File.dirname(__FILE__), '../lib/localization.rb')
Localization.load
JAVA = RUBY_PLATFORM =~ /java/
if JAVA
require 'rubygems'
gem 'activerecord-jdbc-adapter'
require 'jdbc_adapter'
end
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Skip frameworks you're not going to use. To use Rails without a database,
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Use the database for sessions instead of the file system
# (create the session table with 'rake create_sessions_table')
config.action_controller.session_store = :active_record_store
# Enable page/fragment caching by setting a file-based store
# (remember to create the caching directory and make it readable to the application)
config.action_controller.cache_store = :file_store, "#{RAILS_ROOT}/tmp/cache"
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Rotate logs when they reach 50Mb and keep 5 old logs
config.logger = Logger.new(config.log_path, 5, 50*1024*1024)
config.gem 'splattael-activerecord_base_without_table', :lib => 'activerecord_base_without_table', :source => 'http://gems.github.com'
config.gem 'daemons', :version => '1.0.10'
config.gem 'eventmachine', :version => '0.12.8'
config.gem 'mislav-will_paginate', :version => '2.3.11', :lib => 'will_paginate', :source => 'http://gems.github.com'
config.gem 'icalendar', :version => '1.1.0'
config.gem 'tzinfo'
config.gem 'RedCloth', :version => '4.2.2'
config.gem 'gchartrb', :version => '0.8', :lib => 'google_chart'
if !JAVA
config.gem 'mysql'
config.gem 'rmagick', :lib => 'RMagick'
config.gem 'json', :version => '1.1.9'
end
if RUBY_VERSION < "1.9"
# fastercsv has been moved in as default csv engine in 1.9
config.gem 'fastercsv', :version => '1.5.0'
else
require "csv"
if !defined?(FasterCSV)
class Object
FasterCSV = CSV
alias_method :FasterCSV, :CSV
end
end
end
# Gems used for automated testing
config.gem "thoughtbot-shoulda", :lib => "shoulda", :source => "http://gems.github.com"
config.gem "nokogiri"
config.gem "webrat"
config.gem "faker"
config.gem "notahat-machinist", :lib => "machinist", :source => "http://gems.github.com"
# Juggernaut is installed as a plugin and heavily customised, therefore it cannot be listed here.
# CUSTOM GEMS
# Any gem files which aren't needed for the system to work, but may
# be required for your own development should be in this file:
custom_gems_file = "#{ RAILS_ROOT }/config/custom.gems.rb"
load custom_gems_file if File.exist?(custom_gems_file)
load_custom_gems(config) if respond_to?(:load_custom_gems)
end
ActionController::Base.session_options[:session_expires]= Time.local(2015,"jan")
#
# Add new inflection rules using the following format
# (all these examples are active by default):
# Inflector.inflections do |inflect|
# inflect.plural /^(ox)$/i, '\1en'
# inflect.singular /^(ox)en/i, '\1'
# inflect.irregular 'person', 'people'
# inflect.uncountable %w( fish sheep )
# end
require File.join(File.dirname(__FILE__), '../lib/rails_extensions')
load File.join(File.dirname(__FILE__), 'environment.local.rb')
require File.join(File.dirname(__FILE__), '../lib/misc.rb')
require_dependency 'tzinfo'
include TZInfo
Allow any version of json to be used since this is needed for Heroku.
# just an error to ensure this doesn't get used while it's crashing the systme
#1/0
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.4' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
require File.join(File.dirname(__FILE__), '../lib/localization.rb')
Localization.load
JAVA = RUBY_PLATFORM =~ /java/
if JAVA
require 'rubygems'
gem 'activerecord-jdbc-adapter'
require 'jdbc_adapter'
end
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Use the database for sessions instead of the file system
# (create the session table with 'rake create_sessions_table')
config.action_controller.session_store = :active_record_store
# Enable page/fragment caching by setting a file-based store
# (remember to create the caching directory and make it readable to the application)
config.action_controller.cache_store = :file_store, "#{RAILS_ROOT}/tmp/cache"
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Rotate logs when they reach 50Mb and keep 5 old logs
config.logger = Logger.new(config.log_path, 5, 50*1024*1024)
config.gem 'splattael-activerecord_base_without_table', :lib => 'activerecord_base_without_table', :source => 'http://gems.github.com'
config.gem 'daemons', :version => '1.0.10'
config.gem 'eventmachine', :version => '0.12.8'
config.gem 'mislav-will_paginate', :version => '2.3.11', :lib => 'will_paginate', :source => 'http://gems.github.com'
config.gem 'icalendar', :version => '1.1.0'
config.gem 'tzinfo'
config.gem 'RedCloth', :version => '4.2.2'
config.gem 'gchartrb', :version => '0.8', :lib => 'google_chart'
if !JAVA
config.gem 'mysql'
config.gem 'rmagick', :lib => 'RMagick'
config.gem 'json'
end
if RUBY_VERSION < "1.9"
# fastercsv has been moved in as default csv engine in 1.9
config.gem 'fastercsv', :version => '1.5.0'
else
require "csv"
if !defined?(FasterCSV)
class Object
FasterCSV = CSV
alias_method :FasterCSV, :CSV
end
end
end
# Gems used for automated testing
config.gem "thoughtbot-shoulda", :lib => "shoulda", :source => "http://gems.github.com"
config.gem "nokogiri"
config.gem "webrat"
config.gem "faker"
config.gem "notahat-machinist", :lib => "machinist", :source => "http://gems.github.com"
# Juggernaut is installed as a plugin and heavily customised, therefore it cannot be listed here.
# CUSTOM GEMS
# Any gem files which aren't needed for the system to work, but may
# be required for your own development should be in this file:
custom_gems_file = "#{ RAILS_ROOT }/config/custom.gems.rb"
load custom_gems_file if File.exist?(custom_gems_file)
load_custom_gems(config) if respond_to?(:load_custom_gems)
end
ActionController::Base.session_options[:session_expires]= Time.local(2015,"jan")
#
# Add new inflection rules using the following format
# (all these examples are active by default):
# Inflector.inflections do |inflect|
# inflect.plural /^(ox)$/i, '\1en'
# inflect.singular /^(ox)en/i, '\1'
# inflect.irregular 'person', 'people'
# inflect.uncountable %w( fish sheep )
# end
require File.join(File.dirname(__FILE__), '../lib/rails_extensions')
load File.join(File.dirname(__FILE__), 'environment.local.rb')
require File.join(File.dirname(__FILE__), '../lib/misc.rb')
require_dependency 'tzinfo'
include TZInfo
|
module Exact
VERSION = '0.4.22'
end
Updated version number
module Exact
VERSION = '0.4.23'
end
|
RACK_ENV = ENV['RACK_ENV'] || 'development'
BASE_PATH = File.expand_path('../..', __FILE__)
require 'bundler'
Bundler.setup(:default, RACK_ENV)
puts "Initializing App in #{RACK_ENV} mode..."
# Load App Path
$LOAD_PATH.unshift(File.join(BASE_PATH, 'app'))
$LOAD_PATH.unshift(File.join(BASE_PATH, 'config'))
# Database connection
require 'sequel'
require 'yaml'
db_config = if RACK_ENV == 'production'
ENV['DATABASE_URL']
else
YAML.load_file(File.join(BASE_PATH, 'config/database.yml'))[RACK_ENV]
end
DB = Sequel.connect(db_config)
Sequel.extension :migration
Heroku will automatically fill in database info
RACK_ENV = ENV['RACK_ENV'] || 'development'
BASE_PATH = File.expand_path('../..', __FILE__)
require 'bundler'
Bundler.setup(:default, RACK_ENV)
puts "Initializing App in #{RACK_ENV} mode..."
# Load App Path
$LOAD_PATH.unshift(File.join(BASE_PATH, 'app'))
$LOAD_PATH.unshift(File.join(BASE_PATH, 'config'))
# Database connection
require 'sequel'
require 'yaml'
db_config = ENV['DATABASE_URL']
db_config ||= YAML.load_file(File.join(BASE_PATH, 'config/database.yml'))[RACK_ENV]
DB = Sequel.connect(db_config)
Sequel.extension :migration
|
$KCODE='u'
AUTHORIZATION_MIXIN = "object roles"
DEFAULT_REDIRECTION_HASH = { :controller => '/manage/access', :action => 'denied' }
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
ENV['RAILS_ENV'] ||= 'production'
ENV['HOME'] ||= '/home/webiva'
# Specifies gem version of Rails to use when vendor/rails is not present
# RAILS_GEM_VERSION = '2.3.4'
require 'yaml'
# Set up some constants
defaults_config_file = YAML.load_file(File.join(File.dirname(__FILE__), "defaults.yml"))
CMS_DEFAULTS = defaults_config_file
WEBIVA_LOGO_FILE = defaults_config_file['logo_override'] || nil
CMS_DEFAULT_LANGUAGE = defaults_config_file['default_language'] || 'en'
CMS_DEFAULT_CONTRY = defaults_config_file['default_country'] || 'US'
CMS_CACHE_ACTIVE = defaults_config_file['active_cache'] || true
CMS_DEFAULT_DOMAIN = defaults_config_file['domain']
CMS_SYSTEM_ADMIN_EMAIL = defaults_config_file['system_admin']
CMS_EDITOR_LOGIN_SUPPORT = defaults_config_file['editor_login'] || false
DEFAULT_DATETIME_FORMAT = defaults_config_file['default_datetime_format'] || "%m/%d/%Y %I:%M %p"
DEFAULT_DATE_FORMAT = defaults_config_file['default_date_format'] || "%m/%d/%Y"
BETA_CODE = defaults_config_file['enable_beta_code'] || false
GIT_REPOSITORY = defaults_config_file['git_repository'] || nil
CMS_DEFAULT_TIME_ZONE = defaults_config_file['time_zone'] || 'Eastern Time (US & Canada)'
#RAILS_ROOT = File.dirname(__FILE__) + "../" unless defined?(RAILS_ROOT)
require File.join(File.dirname(__FILE__), 'boot')
class Rails::Plugin
def webiva_remove_load_paths(file)
dir = File.dirname(file)
begin
load_paths.each do |path|
ActiveSupport::Dependencies.load_once_paths.delete(path) if path.include?(dir)
end
rescue Exception => e
load_paths.each do |path|
Dependencies.load_once_paths.delete(path) if path.include?(dir)
end
end
end
end
Rails::Initializer.run do |config|
config.database_configuration_file = "#{RAILS_ROOT}/config/cms.yml"
config.action_controller.session_store = :mem_cache_store
config.plugin_paths = ["#{RAILS_ROOT}/vendor/plugins", "#{RAILS_ROOT}/vendor/modules" ]
config.time_zone = CMS_DEFAULT_TIME_ZONE
#config.load_paths += Dir["#{RAILS_ROOT}/vendor/gems/**"].map do |dir|
# File.directory?(lib = "#{dir}/lib") ? lib : dir
#end
config.gem 'mysql'
config.gem 'mime-types', :lib => 'mime/types'
config.gem 'radius'
config.gem 'RedCloth', :lib => 'redcloth'
config.gem 'BlueCloth', :lib => 'bluecloth'
config.gem 'gruff'
config.gem 'slave'
config.gem 'hpricot'
config.gem 'daemons'
config.gem 'maruku'
config.gem 'net-ssh', :lib => 'net/ssh'
config.gem 'rmagick', :lib => 'RMagick'
config.gem 'libxml-ruby', :lib => 'xml'
config.gem 'soap4r', :lib => 'soap/soap'
config.gem "json"
if CMS_CACHE_ACTIVE
config.gem 'memcache-client', :lib => 'memcache'
end
end
# Only use X_SEND_FILE if it's enabled and we're not in test mode
USE_X_SEND_FILE = (RAILS_ENV == 'test' || RAILS_ENV == 'cucumber' || RAILS_ENV == 'selenium') ? false : (defaults_config_file['use_x_send_file'] || false)
memcache_options = {
:c_threshold => 10_000,
:compression => true,
:debug => false,
:namespace => 'Webiva',
:readonly => false,
:urlencode => false
}
CACHE = MemCache.new memcache_options
# Workling::Remote.dispatcher = Workling::Remote::Runners::StarlingRunner.new
Workling::Remote.dispatcher = Workling::Remote::Runners::StarlingRunner.new
Workling::Return::Store::Base # Load the base module first
Workling::Return::Store.instance = CACHE
ActionMailer::Base.logger = nil unless RAILS_ENV == 'development'
# Copy Assets over
Dir.glob("#{RAILS_ROOT}/vendor/modules/[a-z]*") do |file|
if file =~ /\/([a-z_-]+)\/{0,1}$/
mod_name = $1
if File.directory?(file + "/public")
FileUtils.mkpath("#{RAILS_ROOT}/public/components/#{mod_name}")
FileUtils.cp_r(Dir.glob(file + "/public/*"),"#{RAILS_ROOT}/public/components/#{mod_name}/")
end
end
end
ActionMailer::Base.logger = nil unless ENV['RAILS_ENV'] == 'development'
if RAILS_ENV == 'test'
if defaults_config_file['testing_domain']
ActiveRecord::Base.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['test'])
SystemModel.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['test'])
DomainModel.activate_domain(Domain.find(defaults_config_file['testing_domain']).attributes,'production',false)
else
raise 'No Available Testing Database!'
end
end
if RAILS_ENV == 'cucumber' || RAILS_ENV == 'selenium'
if defaults_config_file['cucumber_domain']
ActiveRecord::Base.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['cucumber'])
SystemModel.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['cucumber'])
dmn = Domain.find(defaults_config_file['cucumber_domain']).attributes
DomainModel.activate_domain(dmn,'production',false)
else
raise 'No Available Cucumber Database!'
end
end
module Globalize
class ModelTranslation
def self.connection
DomainModel.connection
end
end
end
Globalize::ModelTranslation.set_table_name('globalize_translations')
CACHE.servers = [ 'localhost:11211' ]
ActionController::Base.session_options[:cache] = CACHE
# Globalize Setup
include Globalize
# Load up some monkey patches
# For: Globalize and Date and Time classes
require 'webiva_monkey_patches'
# Base Language is always en-US - Language application was written in
Locale.set_base_language('en-US')
gem 'soap4r'
Added a 30 minute limit on sessions
$KCODE='u'
AUTHORIZATION_MIXIN = "object roles"
DEFAULT_REDIRECTION_HASH = { :controller => '/manage/access', :action => 'denied' }
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
ENV['RAILS_ENV'] ||= 'production'
ENV['HOME'] ||= '/home/webiva'
# Specifies gem version of Rails to use when vendor/rails is not present
# RAILS_GEM_VERSION = '2.3.4'
require 'yaml'
# Set up some constants
defaults_config_file = YAML.load_file(File.join(File.dirname(__FILE__), "defaults.yml"))
CMS_DEFAULTS = defaults_config_file
WEBIVA_LOGO_FILE = defaults_config_file['logo_override'] || nil
CMS_DEFAULT_LANGUAGE = defaults_config_file['default_language'] || 'en'
CMS_DEFAULT_CONTRY = defaults_config_file['default_country'] || 'US'
CMS_CACHE_ACTIVE = defaults_config_file['active_cache'] || true
CMS_DEFAULT_DOMAIN = defaults_config_file['domain']
CMS_SYSTEM_ADMIN_EMAIL = defaults_config_file['system_admin']
CMS_EDITOR_LOGIN_SUPPORT = defaults_config_file['editor_login'] || false
DEFAULT_DATETIME_FORMAT = defaults_config_file['default_datetime_format'] || "%m/%d/%Y %I:%M %p"
DEFAULT_DATE_FORMAT = defaults_config_file['default_date_format'] || "%m/%d/%Y"
BETA_CODE = defaults_config_file['enable_beta_code'] || false
GIT_REPOSITORY = defaults_config_file['git_repository'] || nil
CMS_DEFAULT_TIME_ZONE = defaults_config_file['time_zone'] || 'Eastern Time (US & Canada)'
#RAILS_ROOT = File.dirname(__FILE__) + "../" unless defined?(RAILS_ROOT)
require File.join(File.dirname(__FILE__), 'boot')
class Rails::Plugin
def webiva_remove_load_paths(file)
dir = File.dirname(file)
begin
load_paths.each do |path|
ActiveSupport::Dependencies.load_once_paths.delete(path) if path.include?(dir)
end
rescue Exception => e
load_paths.each do |path|
Dependencies.load_once_paths.delete(path) if path.include?(dir)
end
end
end
end
Rails::Initializer.run do |config|
config.database_configuration_file = "#{RAILS_ROOT}/config/cms.yml"
config.action_controller.session_store = :mem_cache_store
config.plugin_paths = ["#{RAILS_ROOT}/vendor/plugins", "#{RAILS_ROOT}/vendor/modules" ]
config.time_zone = CMS_DEFAULT_TIME_ZONE
#config.load_paths += Dir["#{RAILS_ROOT}/vendor/gems/**"].map do |dir|
# File.directory?(lib = "#{dir}/lib") ? lib : dir
#end
config.gem 'mysql'
config.gem 'mime-types', :lib => 'mime/types'
config.gem 'radius'
config.gem 'RedCloth', :lib => 'redcloth'
config.gem 'BlueCloth', :lib => 'bluecloth'
config.gem 'gruff'
config.gem 'slave'
config.gem 'hpricot'
config.gem 'daemons'
config.gem 'maruku'
config.gem 'net-ssh', :lib => 'net/ssh'
config.gem 'rmagick', :lib => 'RMagick'
config.gem 'libxml-ruby', :lib => 'xml'
config.gem 'soap4r', :lib => 'soap/soap'
config.gem "json"
if CMS_CACHE_ACTIVE
config.gem 'memcache-client', :lib => 'memcache'
end
end
# Only use X_SEND_FILE if it's enabled and we're not in test mode
USE_X_SEND_FILE = (RAILS_ENV == 'test' || RAILS_ENV == 'cucumber' || RAILS_ENV == 'selenium') ? false : (defaults_config_file['use_x_send_file'] || false)
memcache_options = {
:c_threshold => 10_000,
:compression => true,
:debug => false,
:namespace => 'Webiva',
:readonly => false,
:urlencode => false
}
CACHE = MemCache.new memcache_options
# Workling::Remote.dispatcher = Workling::Remote::Runners::StarlingRunner.new
Workling::Remote.dispatcher = Workling::Remote::Runners::StarlingRunner.new
Workling::Return::Store::Base # Load the base module first
Workling::Return::Store.instance = CACHE
ActionMailer::Base.logger = nil unless RAILS_ENV == 'development'
# Copy Assets over
Dir.glob("#{RAILS_ROOT}/vendor/modules/[a-z]*") do |file|
if file =~ /\/([a-z_-]+)\/{0,1}$/
mod_name = $1
if File.directory?(file + "/public")
FileUtils.mkpath("#{RAILS_ROOT}/public/components/#{mod_name}")
FileUtils.cp_r(Dir.glob(file + "/public/*"),"#{RAILS_ROOT}/public/components/#{mod_name}/")
end
end
end
ActionMailer::Base.logger = nil unless ENV['RAILS_ENV'] == 'development'
if RAILS_ENV == 'test'
if defaults_config_file['testing_domain']
ActiveRecord::Base.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['test'])
SystemModel.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['test'])
DomainModel.activate_domain(Domain.find(defaults_config_file['testing_domain']).attributes,'production',false)
else
raise 'No Available Testing Database!'
end
end
if RAILS_ENV == 'cucumber' || RAILS_ENV == 'selenium'
if defaults_config_file['cucumber_domain']
ActiveRecord::Base.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['cucumber'])
SystemModel.establish_connection(YAML.load_file("#{RAILS_ROOT}/config/cms.yml")['cucumber'])
dmn = Domain.find(defaults_config_file['cucumber_domain']).attributes
DomainModel.activate_domain(dmn,'production',false)
else
raise 'No Available Cucumber Database!'
end
end
module Globalize
class ModelTranslation
def self.connection
DomainModel.connection
end
end
end
Globalize::ModelTranslation.set_table_name('globalize_translations')
CACHE.servers = [ 'localhost:11211' ]
ActionController::Base.session_options[:expires] = 1800
ActionController::Base.session_options[:cache] = CACHE
# Globalize Setup
include Globalize
# Load up some monkey patches
# For: Globalize and Date and Time classes
require 'webiva_monkey_patches'
# Base Language is always en-US - Language application was written in
Locale.set_base_language('en-US')
gem 'soap4r'
|
module Excon
class Headers < Hash
SENTINEL = {}
alias_method :raw_writer, :[]=
alias_method :raw_reader, :[]
alias_method :raw_assoc, :assoc if SENTINEL.respond_to? :assoc
alias_method :raw_delete, :delete
alias_method :raw_fetch, :fetch
alias_method :raw_has_key?, :has_key?
alias_method :raw_include?, :include?
alias_method :raw_key?, :key?
alias_method :raw_member?, :member?
alias_method :raw_rehash, :rehash
alias_method :raw_store, :store
alias_method :raw_values_at, :values_at
def [](key)
should_delegate?(key) ? @downcased[key.downcase] : raw_reader(key)
end
alias_method :[]=, :store
def []=(key, value)
raw_writer(key, value)
@downcased[key.downcase] = value unless @downcased.nil?
end
def assoc(obj)
should_delegate?(obj) ? @downcased.assoc(obj.downcase) : raw_assoc(obj)
end
def delete(key, &proc)
should_delegate?(key) ? @downcased.delete(key.downcase, &proc) : raw_delete(key, &proc)
end
def fetch(key, default = nil, &proc)
if should_delegate?(key)
proc ? @downcased.fetch(key.downcase, &proc) : @downcased.fetch(key.downcase, default)
else
proc ? raw_fetch(key, &proc) : raw_fetch(key, default)
end
end
alias_method :has_key?, :key?
alias_method :has_key?, :member?
def has_key?(key)
raw_has_key?(key) || begin
index_case_insensitive if @downcased.nil?
@downcased.has_key?(key.downcase)
end
end
def rehash
raw_rehash
@downcased.rehash if @downcased
end
def values_at(*keys)
raw_values_at(*keys).zip(keys).map do |v, k|
if v.nil?
index_case_insensitive if @downcased.nil?
@downcased[k.downcase]
end
end
end
private
def should_delegate?(key)
if raw_has_key?(key)
false
else
index_case_insensitive if @downcased.nil?
true
end
end
def index_case_insensitive
@downcased = {}
each_pair do |key, value|
@downcased[key.downcase] = value
end
end
end
end
Conditionally define `:assoc`.
module Excon
class Headers < Hash
SENTINEL = {}
alias_method :raw_writer, :[]=
alias_method :raw_reader, :[]
alias_method :raw_assoc, :assoc if SENTINEL.respond_to? :assoc
alias_method :raw_delete, :delete
alias_method :raw_fetch, :fetch
alias_method :raw_has_key?, :has_key?
alias_method :raw_include?, :include?
alias_method :raw_key?, :key?
alias_method :raw_member?, :member?
alias_method :raw_rehash, :rehash
alias_method :raw_store, :store
alias_method :raw_values_at, :values_at
def [](key)
should_delegate?(key) ? @downcased[key.downcase] : raw_reader(key)
end
alias_method :[]=, :store
def []=(key, value)
raw_writer(key, value)
@downcased[key.downcase] = value unless @downcased.nil?
end
if SENTINEL.respond_to? :assoc
def assoc(obj)
should_delegate?(obj) ? @downcased.assoc(obj.downcase) : raw_assoc(obj)
end
end
def delete(key, &proc)
should_delegate?(key) ? @downcased.delete(key.downcase, &proc) : raw_delete(key, &proc)
end
def fetch(key, default = nil, &proc)
if should_delegate?(key)
proc ? @downcased.fetch(key.downcase, &proc) : @downcased.fetch(key.downcase, default)
else
proc ? raw_fetch(key, &proc) : raw_fetch(key, default)
end
end
alias_method :has_key?, :key?
alias_method :has_key?, :member?
def has_key?(key)
raw_has_key?(key) || begin
index_case_insensitive if @downcased.nil?
@downcased.has_key?(key.downcase)
end
end
def rehash
raw_rehash
@downcased.rehash if @downcased
end
def values_at(*keys)
raw_values_at(*keys).zip(keys).map do |v, k|
if v.nil?
index_case_insensitive if @downcased.nil?
@downcased[k.downcase]
end
end
end
private
def should_delegate?(key)
if raw_has_key?(key)
false
else
index_case_insensitive if @downcased.nil?
true
end
end
def index_case_insensitive
@downcased = {}
each_pair do |key, value|
@downcased[key.downcase] = value
end
end
end
end
|
# Be sure to restart your server when you modify this file
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.11' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
# MySociety specific helper functions
$:.push(File.join(File.dirname(__FILE__), '../commonlib/rblib'))
# ... if these fail to include, you need the commonlib submodule from git
load "config.rb"
# load "email.rb"
load "format.rb"
load "mapit.rb"
load "mask.rb"
load "url_mapper.rb"
load "util.rb"
load "validate.rb"
load "voting_area.rb"
Rails::Initializer.run do |config|
# Load intial mySociety config
MySociety::Config.set_file(File.join(config.root_path, 'config', 'general'), true)
MySociety::Config.load_default
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
config.autoload_paths += %W( #{RAILS_ROOT}/app/sweepers )
# Specify gems that this application depends on and have them installed with rake gems:install
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "rack", :version => '1.1.0'
config.gem "erubis", :version => '2.6.6'
config.gem "fastercsv", :version => '1.5.3'
config.gem 'will_paginate', :version => '2.3.15'
config.gem 'foreigner', :version => '0.9.1'
config.gem "friendly_id", :version => '3.1.7'
config.gem 'paper_trail', :version => '1.5.1'
config.gem 'authlogic', :version => '2.1.6'
config.gem 'session', :version => '3.1.0'
config.gem 'text', :version => '0.2.0'
config.gem 'rspec', :lib => false, :version => '1.3.1'
config.gem 'rspec-rails', :lib => false, :version => '1.3.3'
config.gem "paperclip", :version => "~> 2.3"
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Skip frameworks you're not going to use. To use Rails without a database,
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
# Set the schema format to sql
config.active_record.schema_format :sql
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
config.i18n.load_path += Dir[File.join(RAILS_ROOT, 'config', 'locales', '**', '*.{rb,yml}')]
# Set the cache store
config.cache_store = :file_store, File.join(RAILS_ROOT, 'cache')
# override default fieldWithError divs in model-associated forms
config.action_view.field_error_proc = Proc.new{ |html_tag, instance| html_tag }
end
# Use an asset host setting so that the admin interface can always get css, images, js.
if (MySociety::Config.get("DOMAIN", "") != "")
ActionController::Base.asset_host = MySociety::Config.get("DOMAIN", 'localhost:3000')
end
# Domain for URLs (so can work for scripts, not just web pages)
ActionMailer::Base.default_url_options[:host] = MySociety::Config.get("DOMAIN", '192.168.0.222:3000')
# settings for exception notification
ExceptionNotification::Notifier.exception_recipients = MySociety::Config.get("BUGS_EMAIL", "")
ExceptionNotification::Notifier.sender_address = [MySociety::Config.get("BUGS_EMAIL", "")]
ExceptionNotification::Notifier.email_prefix = "[FixMyTransport] "
Whoops, putting my environment back to localhost
# Be sure to restart your server when you modify this file
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.11' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
# MySociety specific helper functions
$:.push(File.join(File.dirname(__FILE__), '../commonlib/rblib'))
# ... if these fail to include, you need the commonlib submodule from git
load "config.rb"
# load "email.rb"
load "format.rb"
load "mapit.rb"
load "mask.rb"
load "url_mapper.rb"
load "util.rb"
load "validate.rb"
load "voting_area.rb"
Rails::Initializer.run do |config|
# Load intial mySociety config
MySociety::Config.set_file(File.join(config.root_path, 'config', 'general'), true)
MySociety::Config.load_default
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
config.autoload_paths += %W( #{RAILS_ROOT}/app/sweepers )
# Specify gems that this application depends on and have them installed with rake gems:install
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "rack", :version => '1.1.0'
config.gem "erubis", :version => '2.6.6'
config.gem "fastercsv", :version => '1.5.3'
config.gem 'will_paginate', :version => '2.3.15'
config.gem 'foreigner', :version => '0.9.1'
config.gem "friendly_id", :version => '3.1.7'
config.gem 'paper_trail', :version => '1.5.1'
config.gem 'authlogic', :version => '2.1.6'
config.gem 'session', :version => '3.1.0'
config.gem 'text', :version => '0.2.0'
config.gem 'rspec', :lib => false, :version => '1.3.1'
config.gem 'rspec-rails', :lib => false, :version => '1.3.3'
config.gem "paperclip", :version => "~> 2.3"
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Skip frameworks you're not going to use. To use Rails without a database,
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
# Set the schema format to sql
config.active_record.schema_format :sql
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
config.i18n.load_path += Dir[File.join(RAILS_ROOT, 'config', 'locales', '**', '*.{rb,yml}')]
# Set the cache store
config.cache_store = :file_store, File.join(RAILS_ROOT, 'cache')
# override default fieldWithError divs in model-associated forms
config.action_view.field_error_proc = Proc.new{ |html_tag, instance| html_tag }
end
# Use an asset host setting so that the admin interface can always get css, images, js.
if (MySociety::Config.get("DOMAIN", "") != "")
ActionController::Base.asset_host = MySociety::Config.get("DOMAIN", 'localhost:3000')
end
# Domain for URLs (so can work for scripts, not just web pages)
ActionMailer::Base.default_url_options[:host] = MySociety::Config.get("DOMAIN", 'localhost:3000')
# settings for exception notification
ExceptionNotification::Notifier.exception_recipients = MySociety::Config.get("BUGS_EMAIL", "")
ExceptionNotification::Notifier.sender_address = [MySociety::Config.get("BUGS_EMAIL", "")]
ExceptionNotification::Notifier.email_prefix = "[FixMyTransport] " |
modify playlist example
#!/usr/bin/env ruby
require 'rubygems'
require 'rest_client'
require 'json'
require 'pp'
#spotify:user:jberkel:playlist:51QyZ8kHWdx1wuetLfs571
#spotify:user:jberkel:playlist:52vlrAr9RdphpgVtP2SAGP (empty)
#spotify:user:jberkel:playlist:5aZg2QhSogW4Ukw7Q6kjRO
#spotify:user:jberkel:playlist:1G1BCuefz7bvZdQ5QbGLPR
#spotify:user:jberkel:playlist:28C9HrzCMmlSReG4mSQeuQ
#spotify:user:jberkel:playlist:4h9wgIOBjlqMoMD0C7LdDs
p_id = '4h9wgIOBjlqMoMD0C7LdDs'
p_url = "http://localhost:3000/playlists/#{p_id}"
puts "getting playlist #{p_id}"
resp = RestClient.get p_url
playlist = JSON.parse(resp)
pp playlist
existing_songs = playlist['result']['tracks'].map { |t| t['id'] }
add_songs = ['1VaucR6Bsks5Q9bYBsXvuF', 'spotify:track:3RIgfgKZm7khbOokcYeFn0']
#add_songs = ['spotify:track:2jpVApJaYkYGYRL7WQHnvu']
#add_songs = [ 'spotify:track:4hXA0NkPLFf6mXMxzsQicd' ]
RestClient.put p_url, {
'tracks' => (existing_songs + add_songs).map { |s| { 'id'=>s } }
}.to_json
|
# Be sure to restart your server when you modify this file
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.4' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Specify gems that this application depends on and have them installed with rake gems:install
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem 'bcdatabase', :version => '>= 0.6.3'
config.gem 'calendar_date_select', :version => '>= 1.1.5'
config.gem 'rubycas-client', :version => '>= 2.1.0'
config.gem 'will_paginate', :version => '>= 2.3.11'
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Skip frameworks you're not going to use. To use Rails without a database,
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
config.active_record.observers = :audit_observer
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
config.active_record.schema_format = :sql
inav_config_file =
case RAILS_ENV
when 'development','test'; "config/inav.yml"
when 'staging', 'produciton';
if defined?(JRUBY_VERSION)
require 'jruby'
catalina_home = Java::JavaLang::System.getProperty('catalina.home')
"#{catalina_home}/conf/inav/inav.yml"
else
"config/inav.yml"
end
end
require 'erb'
INAV_CONFIG = YAML.load(ERB.new(File.read(inav_config_file)).result)
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = {
:address => INAV_CONFIG[:smtp][:address],
:port => INAV_CONFIG[:smtp][:port],
:domain => INAV_CONFIG[:smtp][:domain]
}
end
Fix typo.
# Be sure to restart your server when you modify this file
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.4' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Specify gems that this application depends on and have them installed with rake gems:install
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem 'bcdatabase', :version => '>= 0.6.3'
config.gem 'calendar_date_select', :version => '>= 1.1.5'
config.gem 'rubycas-client', :version => '>= 2.1.0'
config.gem 'will_paginate', :version => '>= 2.3.11'
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Skip frameworks you're not going to use. To use Rails without a database,
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
config.active_record.observers = :audit_observer
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names.
config.time_zone = 'UTC'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
config.active_record.schema_format = :sql
inav_config_file =
case RAILS_ENV
when 'development','test'; "config/inav.yml"
when 'staging', 'production';
if defined?(JRUBY_VERSION)
require 'jruby'
catalina_home = Java::JavaLang::System.getProperty('catalina.home')
"#{catalina_home}/conf/inav/inav.yml"
else
"config/inav.yml"
end
end
require 'erb'
INAV_CONFIG = YAML.load(ERB.new(File.read(inav_config_file)).result)
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = {
:address => INAV_CONFIG[:smtp][:address],
:port => INAV_CONFIG[:smtp][:port],
:domain => INAV_CONFIG[:smtp][:domain]
}
end
|
class FacebookUser
attr_accessor :uid, :session_key, :secret, :expires, :base_domain, :access_token
def name
get_attributes["name"]
end
def timezone
get_attributes["timezone"]
end
def birthday
get_attributes["birthday"]
end
def last_name
get_attributes["last_name"]
end
def first_name
get_attributes["first_name"]
end
def verified
get_attributes["verified"]
end
def updated_time
get_attributes["updated_time"]
end
def gender
get_attributes["gender"]
end
def email
get_attributes["email"]
end
def picture
picture_square
end
def albums
get_albums
end
def picture_large
"https://graph.facebook.com/#{self.uid}/picture?type=large"
end
def picture_square
"https://graph.facebook.com/#{self.uid}/picture?type=square"
end
def picture_small
"https://graph.facebook.com/#{self.uid}/picture?type=small"
end
def get_attributes
unless @cached_attributes
@cached_attributes = FacebookRequest.new.get_userdata(self.access_token)
end
return @cached_attributes
end
def get_albums
unless @user_albums
@user_albums = FacebookRequest.new.get_albums(self.access_token)
end
return @user_albums
end
def profile_pictures
get_profile_pictures
end
def get_profile_pictures
unless @user_profile_pictures
@user_profile_pictures = FacebookRequest.new.get_profile_pictures(self.access_token, self.uid)
end
return @user_profile_pictures
end
end
class FacebookRequest
require 'rubygems'
require 'httparty'
include HTTParty
def get_userdata(access_token)
self.class.get("https://graph.facebook.com/me?access_token=#{CGI.escape(access_token)}&locale=en_US")
end
def get_userpicture(access_token)
self.class.get("https://graph.facebook.com/me/photo?access_token=#{CGI.escape(access_token)}")
end
def get_albums(access_token)
self.class.get("https://graph.facebook.com/me/albums?access_token=#{CGI.escape(access_token)}&metadata=1")
end
def get_profile_pictures(access_token,user_id)
query = "SELECT src_small, src_big FROM photo where aid IN (SELECT aid FROM album WHERE owner = #{user_id} AND type = 'profile')"
self.class.get("https://api.facebook.com/method/fql.query?access_token=#{CGI.escape(access_token)}&query=#{CGI.escape(query)}&format=json")
end
end
support for wallposts
class FacebookUser
attr_accessor :uid, :session_key, :secret, :expires, :base_domain, :access_token
def name
get_attributes["name"]
end
def timezone
get_attributes["timezone"]
end
def birthday
get_attributes["birthday"]
end
def last_name
get_attributes["last_name"]
end
def first_name
get_attributes["first_name"]
end
def verified
get_attributes["verified"]
end
def updated_time
get_attributes["updated_time"]
end
def gender
get_attributes["gender"]
end
def email
get_attributes["email"]
end
def picture
picture_square
end
def albums
get_albums
end
def picture_large
"https://graph.facebook.com/#{self.uid}/picture?type=large"
end
def picture_square
"https://graph.facebook.com/#{self.uid}/picture?type=square"
end
def picture_small
"https://graph.facebook.com/#{self.uid}/picture?type=small"
end
def get_attributes
unless @cached_attributes
@cached_attributes = FacebookRequest.new.get_userdata(self.access_token)
end
return @cached_attributes
end
def get_albums
unless @user_albums
@user_albums = FacebookRequest.new.get_albums(self.access_token)
end
return @user_albums
end
def profile_pictures
get_profile_pictures
end
def get_profile_pictures
unless @user_profile_pictures
@user_profile_pictures = FacebookRequest.new.get_profile_pictures(self.access_token, self.uid)
end
return @user_profile_pictures
end
def wallpost(options = {})
FacebookRequest.new.wallpost(self.access_token, options)
end
end
class FacebookRequest
require 'rubygems'
require 'httparty'
include HTTParty
def get_userdata(access_token)
self.class.get("https://graph.facebook.com/me?access_token=#{CGI.escape(access_token)}&locale=en_US")
end
def get_userpicture(access_token)
self.class.get("https://graph.facebook.com/me/photo?access_token=#{CGI.escape(access_token)}")
end
def get_albums(access_token)
self.class.get("https://graph.facebook.com/me/albums?access_token=#{CGI.escape(access_token)}&metadata=1")
end
def wallpost(access_token, options = {})
query = {:body => options.merge({:access_token => access_token})}
self.class.post("https://graph.facebook.com/me/feed", query)
end
def get_profile_pictures(access_token,user_id)
query = "SELECT src_small, src_big FROM photo where aid IN (SELECT aid FROM album WHERE owner = #{user_id} AND type = 'profile')"
self.class.get("https://api.facebook.com/method/fql.query?access_token=#{CGI.escape(access_token)}&query=#{CGI.escape(query)}&format=json")
end
end
|
# Load the Rails application.
require File.expand_path('../application', __FILE__)
# Initialize the Rails application.
Documentreview::Application.initialize!
ActionMailer::Base.default_url_options = { :host => 'artygeek-rails-demo1.heroku.com' }
ActionMailer::Base.default from: 'rails-demo@artygeek.com'
ActionMailer.smtp_settings = {
address: "smtp.gmail.com",
port: 587,
domain: ENV["DOMAIN_NAME"],
authentication: "plain",
enable_starttls_auto: true,
user_name: ENV["GMAIL_USERNAME"],
password: ENV["GMAIL_PASSWORD"]
}
mailer configuration #2
# Load the Rails application.
require File.expand_path('../application', __FILE__)
# Initialize the Rails application.
Documentreview::Application.initialize!
ActionMailer::Base.default_url_options = { :host => 'artygeek-rails-demo1.heroku.com' }
ActionMailer::Base.default from: 'rails-demo@artygeek.com'
ActionMailer::Base.smtp_settings = {
address: "smtp.gmail.com",
port: 587,
domain: ENV["DOMAIN_NAME"],
authentication: "plain",
enable_starttls_auto: true,
user_name: ENV["GMAIL_USERNAME"],
password: ENV["GMAIL_PASSWORD"]
} |
require 'reviewed/cache'
module Faraday
class Cache
def initialize(app)
@app = app
end
def store
Reviewed::Cache.store
end
def call(env)
@url = env[:url]
@website_id = env[:request_headers]['x-reviewed-website']
if serve_from_cache? && store.exist?(cache_key)
begin
Hashie::Mash.new(MultiJson.load( store.read(cache_key) ))
rescue => e
raise e.message + ": #{cache_key}"
end
else
@app.call(env).on_complete do |response|
if store_response?(response)
store.delete(cache_key)
store.write(cache_key, MultiJson.dump(response), write_options)
end
end
end
end
private
def serve_from_cache?
@url.query.blank? || !@url.query.match(/\b(skip|reset)-cache\b/)
end
def store_response?(resp)
return false if resp[:status] != 200
@url.query.blank? || !@url.query.match(/\bskip-cache\b/)
end
def cache_key
[@website_id, @url.request_uri].join(':')
end
def write_options
{ expires_in: Integer(ENV['REVIEWED_CACHE_TIMEOUT'] || 90).minutes }
end
end
end
slice response
require 'reviewed/cache'
module Faraday
class Cache
def initialize(app)
@app = app
end
def store
Reviewed::Cache.store
end
def call(env)
@url = env[:url]
@website_id = env[:request_headers]['x-reviewed-website']
if serve_from_cache? && store.exist?(cache_key)
begin
Hashie::Mash.new(MultiJson.load( store.read(cache_key) ))
rescue => e
raise e.message + ": #{cache_key}"
end
else
@app.call(env).on_complete do |response|
if store_response?(response)
store.delete(cache_key)
storeable_response = MultiJson.dump(response.slice(:status, :body, :response_headers))
store.write(cache_key, storeable_response, write_options)
end
end
end
end
private
def serve_from_cache?
@url.query.blank? || !@url.query.match(/\b(skip|reset)-cache\b/)
end
def store_response?(resp)
return false if resp[:status] != 200
@url.query.blank? || !@url.query.match(/\bskip-cache\b/)
end
def cache_key
[@website_id, @url.request_uri].join(':')
end
def write_options
{ expires_in: Integer(ENV['REVIEWED_CACHE_TIMEOUT'] || 90).minutes }
end
end
end
|
# Load the rails application
require File.expand_path('../application', __FILE__)
# Load SRNSW module
require 'srnsw.rb'
require 'magic_multi_connections'
connection_names = ActiveRecord::Base.configurations.keys.select do |name|
name =~ /^#{ENV['RAILS_ENV']}_clone/
end
@@connection_pool = connection_names.map do |connection_name|
Object.class_eval <<-EOS
module #{connection_name.camelize}
establish_connection :#{connection_name}
end
EOS
connection_name.camelize.constantize
end
# Initialize the rails application
Collection::Application.initialize!
Dereference multi connections
PILOT
# Load the rails application
require File.expand_path('../application', __FILE__)
# Load SRNSW module
require 'srnsw.rb'
#require 'magic_multi_connections'
#connection_names = ActiveRecord::Base.configurations.keys.select do |name|
#name =~ /^#{ENV['RAILS_ENV']}_clone/
#end
@@connection_pool = connection_names.map do |connection_name|
Object.class_eval <<-EOS
module #{connection_name.camelize}
establish_connection :#{connection_name}
end
EOS
connection_name.camelize.constantize
end
# Initialize the rails application
Collection::Application.initialize!
|
# frozen_string_literal: true
require 'json'
require 'cgi'
require 'net/http' # also requires uri
require 'openssl'
class Fastly
# The UserAgent to communicate with the API
class Client #:nodoc: all
DEFAULT_URL = 'https://api.fastly.com'.freeze
attr_accessor :api_key, :base_url, :debug, :user, :password, :customer
def initialize(opts)
@api_key = opts.fetch(:api_key, nil)
@base_url = opts.fetch(:base_url, DEFAULT_URL)
@customer = opts.fetch(:customer, nil)
@oldpurge = opts.fetch(:use_old_purge_method, false)
@password = opts.fetch(:password, nil)
@user = opts.fetch(:user, nil)
@debug = opts.fetch(:debug, nil)
@thread_http_client = if defined?(Concurrent::ThreadLocalVar)
Concurrent::ThreadLocalVar.new { build_http_client }
end
warn("DEPRECATION WARNING: Username/password authentication is deprecated
and will not be available starting September 2020;
please migrate to API tokens as soon as possible.")
if api_key.nil?
fail Unauthorized, "Invalid auth credentials. Check api_key."
end
self
end
def require_key!
raise Fastly::KeyAuthRequired.new("This request requires an API key") if api_key.nil?
@require_key = true
end
def require_key?
!!@require_key
end
def authed?
!api_key.nil? || fully_authed?
end
# Some methods require full username and password rather than just auth token
def fully_authed?
!(user.nil? || password.nil?)
end
def get(path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
path += "?#{make_params(params)}" unless params.empty?
resp = http.get(path, headers(extras, include_auth))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def get_stats(path, params = {})
resp = get(path, params)
# return meta data, not just the actual stats data
if resp['status'] == 'success'
resp
else
fail Error, resp['msg']
end
end
def post(path, params = {})
post_and_put(:post, path, params)
end
def put(path, params = {})
post_and_put(:put, path, params)
end
def delete(path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
resp = http.delete(path, headers(extras, include_auth))
resp.kind_of?(Net::HTTPSuccess)
end
def purge(url, params = {})
return post("/purge/#{url}", params) if @oldpurge
extras = params.delete(:headers) || {}
uri = URI.parse(url)
http = Net::HTTP.new(uri.host, uri.port)
if uri.is_a? URI::HTTPS
http.use_ssl = true
end
resp = http.request Net::HTTP::Purge.new(uri.request_uri, headers(extras))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def http
return @thread_http_client.value if @thread_http_client
return Thread.current[:fastly_net_http] if Thread.current[:fastly_net_http]
Thread.current[:fastly_net_http] = build_http_client
end
private
def build_http_client
uri = URI.parse(base_url)
net_http = Net::HTTP.new(uri.host, uri.port, :ENV, nil, nil, nil)
# handle TLS connections outside of development
net_http.verify_mode = OpenSSL::SSL::VERIFY_PEER
net_http.use_ssl = uri.scheme.downcase == 'https'
# debug http interactions if specified
net_http.set_debug_output(debug) if debug
net_http
end
def post_and_put(method, path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
query = make_params(params)
resp = http.send(method, path, query, headers(extras, include_auth).merge('Content-Type' => 'application/x-www-form-urlencoded'))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def headers(extras={}, include_auth=true)
headers = {}
if include_auth
headers['Fastly-Key'] = api_key if api_key
end
headers.merge('Content-Accept' => 'application/json', 'User-Agent' => "fastly-ruby-v#{Fastly::VERSION}").merge(extras.keep_if {|k,v| !v.nil? })
end
def make_params(params)
param_ary = params.map do |key, value|
next if value.nil?
key = key.to_s
if value.is_a?(Hash)
value.map do |sub_key, sub_value|
"#{CGI.escape("#{key}[#{sub_key}]")}=#{CGI.escape(sub_value.to_s)}"
end
else
"#{CGI.escape(key)}=#{CGI.escape(value.to_s)}"
end
end
param_ary.flatten.delete_if { |v| v.nil? }.join('&')
end
end
end
# See Net::HTTPGenericRequest for attributes and methods.
class Net::HTTP::Purge < Net::HTTPRequest
METHOD = 'PURGE'
REQUEST_HAS_BODY = false
RESPONSE_HAS_BODY = true
end
[fix] Remove deprecation warning about Username and password signin in the client
**What**
Now that the code pretaining to Authing in with Username and Password is removed, the deprecation warning in the client is not needed. To avoid further confusion, the warning is removed.
# frozen_string_literal: true
require 'json'
require 'cgi'
require 'net/http' # also requires uri
require 'openssl'
class Fastly
# The UserAgent to communicate with the API
class Client #:nodoc: all
DEFAULT_URL = 'https://api.fastly.com'.freeze
attr_accessor :api_key, :base_url, :debug, :user, :password, :customer
def initialize(opts)
@api_key = opts.fetch(:api_key, nil)
@base_url = opts.fetch(:base_url, DEFAULT_URL)
@customer = opts.fetch(:customer, nil)
@oldpurge = opts.fetch(:use_old_purge_method, false)
@password = opts.fetch(:password, nil)
@user = opts.fetch(:user, nil)
@debug = opts.fetch(:debug, nil)
@thread_http_client = if defined?(Concurrent::ThreadLocalVar)
Concurrent::ThreadLocalVar.new { build_http_client }
end
if api_key.nil?
fail Unauthorized, "Invalid auth credentials. Check api_key."
end
self
end
def require_key!
raise Fastly::KeyAuthRequired.new("This request requires an API key") if api_key.nil?
@require_key = true
end
def require_key?
!!@require_key
end
def authed?
!api_key.nil? || fully_authed?
end
# Some methods require full username and password rather than just auth token
def fully_authed?
!(user.nil? || password.nil?)
end
def get(path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
path += "?#{make_params(params)}" unless params.empty?
resp = http.get(path, headers(extras, include_auth))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def get_stats(path, params = {})
resp = get(path, params)
# return meta data, not just the actual stats data
if resp['status'] == 'success'
resp
else
fail Error, resp['msg']
end
end
def post(path, params = {})
post_and_put(:post, path, params)
end
def put(path, params = {})
post_and_put(:put, path, params)
end
def delete(path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
resp = http.delete(path, headers(extras, include_auth))
resp.kind_of?(Net::HTTPSuccess)
end
def purge(url, params = {})
return post("/purge/#{url}", params) if @oldpurge
extras = params.delete(:headers) || {}
uri = URI.parse(url)
http = Net::HTTP.new(uri.host, uri.port)
if uri.is_a? URI::HTTPS
http.use_ssl = true
end
resp = http.request Net::HTTP::Purge.new(uri.request_uri, headers(extras))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def http
return @thread_http_client.value if @thread_http_client
return Thread.current[:fastly_net_http] if Thread.current[:fastly_net_http]
Thread.current[:fastly_net_http] = build_http_client
end
private
def build_http_client
uri = URI.parse(base_url)
net_http = Net::HTTP.new(uri.host, uri.port, :ENV, nil, nil, nil)
# handle TLS connections outside of development
net_http.verify_mode = OpenSSL::SSL::VERIFY_PEER
net_http.use_ssl = uri.scheme.downcase == 'https'
# debug http interactions if specified
net_http.set_debug_output(debug) if debug
net_http
end
def post_and_put(method, path, params = {})
extras = params.delete(:headers) || {}
include_auth = params.key?(:include_auth) ? params.delete(:include_auth) : true
query = make_params(params)
resp = http.send(method, path, query, headers(extras, include_auth).merge('Content-Type' => 'application/x-www-form-urlencoded'))
fail Error, resp.body unless resp.kind_of?(Net::HTTPSuccess)
JSON.parse(resp.body)
end
def headers(extras={}, include_auth=true)
headers = {}
if include_auth
headers['Fastly-Key'] = api_key if api_key
end
headers.merge('Content-Accept' => 'application/json', 'User-Agent' => "fastly-ruby-v#{Fastly::VERSION}").merge(extras.keep_if {|k,v| !v.nil? })
end
def make_params(params)
param_ary = params.map do |key, value|
next if value.nil?
key = key.to_s
if value.is_a?(Hash)
value.map do |sub_key, sub_value|
"#{CGI.escape("#{key}[#{sub_key}]")}=#{CGI.escape(sub_value.to_s)}"
end
else
"#{CGI.escape(key)}=#{CGI.escape(value.to_s)}"
end
end
param_ary.flatten.delete_if { |v| v.nil? }.join('&')
end
end
end
# See Net::HTTPGenericRequest for attributes and methods.
class Net::HTTP::Purge < Net::HTTPRequest
METHOD = 'PURGE'
REQUEST_HAS_BODY = false
RESPONSE_HAS_BODY = true
end
|
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.4' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# You have to specify the :lib option for libraries, where the Gem name (sqlite3-ruby) differs from the file itself (sqlite3)
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "authlogic"
config.gem "lockdown", :version => "1.3.1"
config.gem "mbleigh-acts-as-taggable-on", :source => "http://gems.github.com", :lib => "acts-as-taggable-on"
config.gem "settingslogic"
config.gem "haml"
config.gem "ancestry"
config.gem "searchlogic"
config.gem "liquid"
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'Berlin'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# add config/locales/admin to the load path
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', '*.{rb,yml}') ]
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', 'pages', '*.{rb,yml}') ]
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', 'content_elements', '*.{rb,yml}') ]
config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_palani_session',
:secret => 'a201e13c5007f51e7856e6f68d403b09eb3bb01e8ec71422374bfb03612be4aec762cf59a60da43f57965d86869826d7e8c280795c78553f5ff7c37437cd1542'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
end
updated to Rails 2.3.5
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.5' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# You have to specify the :lib option for libraries, where the Gem name (sqlite3-ruby) differs from the file itself (sqlite3)
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "authlogic"
config.gem "lockdown", :version => "1.3.1"
config.gem "mbleigh-acts-as-taggable-on", :source => "http://gems.github.com", :lib => "acts-as-taggable-on"
config.gem "settingslogic"
config.gem "haml"
config.gem "ancestry"
config.gem "searchlogic"
config.gem "liquid"
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'Berlin'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# add config/locales/admin to the load path
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', '*.{rb,yml}') ]
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', 'pages', '*.{rb,yml}') ]
config.i18n.load_path << Dir[ File.join(RAILS_ROOT, 'config', 'locales', 'admin', 'content_elements', '*.{rb,yml}') ]
config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_palani_session',
:secret => 'a201e13c5007f51e7856e6f68d403b09eb3bb01e8ec71422374bfb03612be4aec762cf59a60da43f57965d86869826d7e8c280795c78553f5ff7c37437cd1542'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
end
|
# encoding: UTF-8
module Mojito::Rendering
module Templates
require 'tilt'
require 'where'
require 'mime/types'
def template(*args, &block)
locals = Hash === args.last ? args.pop : self.request.locals
template = if args.size == 2
Tilt[args.first].new { args.last }
elsif args.size == 1
file = Where.cdir(1) + args.first
Mojito::R::StatusCodes.instance_method(:not_found!).bind(self).call unless file.exist?
if %r{\.(?<extension>\w+)\.\w+$} =~ file.to_s
response['Content-Type'] = MIME::Types.type_for(extension)
end
Tilt[file.to_s].new file.to_s
end
response.write template.render(self, locals, &block)
end
end
end
check for previously set content-type when using type-guessing
# encoding: UTF-8
module Mojito::Rendering
module Templates
require 'tilt'
require 'where'
require 'mime/types'
def template(*args, &block)
locals = Hash === args.last ? args.pop : self.request.locals
template = if args.size == 2
Tilt[args.first].new { args.last }
elsif args.size == 1
file = Where.cdir(1) + args.first
Mojito::R::StatusCodes.instance_method(:not_found!).bind(self).call unless file.exist?
if not response.include?('Content-Type') and %r{\.(?<extension>\w+)\.\w+$} =~ file.to_s
response['Content-Type'] = MIME::Types.type_for(extension)
end
Tilt[file.to_s].new file.to_s
end
response.write template.render(self, locals, &block)
end
end
end |
require 'feedjira'
require 'feedjira/opml/version'
require 'feedjira/parser/opml_outline'
require 'feedjira/parser/opml_head'
require 'feedjira/parser/opml_body'
require 'feedjira/parser/opml'
Feedjira::Feed.add_feed_class(Feedjira::Parser::OPML)
Require URI
require 'uri'
require 'feedjira'
require 'feedjira/opml/version'
require 'feedjira/parser/opml_outline'
require 'feedjira/parser/opml_head'
require 'feedjira/parser/opml_body'
require 'feedjira/parser/opml'
Feedjira::Feed.add_feed_class(Feedjira::Parser::OPML)
|
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.2' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# You have to specify the :lib option for libraries, where the Gem name (sqlite3-ruby) differs from the file itself (sqlite3)
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "sqlite3-ruby", :lib => "sqlite3"
config.gem "searchlogic", :version => ">= 1.6.0"
config.gem "populator", :version => ">= 0.2.4"
config.gem "faker", :version => ">= 0.3.1"
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# config.i18n.load_path << Dir[File.join(RAILS_ROOT, 'my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_cute_admin_example_session',
:secret => 'd26da9cff391ec45f01d8bca6152ea311d6500d69138385dc41c4147640fd9d52ecbfa3165ee82d2e222eb9f8418e40f4eb1180b45f88fd0b60589ada529c67c'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
end
correct searchlogic gem version requirement
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.2' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# You have to specify the :lib option for libraries, where the Gem name (sqlite3-ruby) differs from the file itself (sqlite3)
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "sqlite3-ruby", :lib => "sqlite3"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem "sqlite3-ruby", :lib => "sqlite3"
config.gem "searchlogic", :version => "~> 1.6.6"
config.gem "populator", :version => ">= 0.2.4"
config.gem "faker", :version => ">= 0.3.1"
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# config.i18n.load_path << Dir[File.join(RAILS_ROOT, 'my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_cute_admin_example_session',
:secret => 'd26da9cff391ec45f01d8bca6152ea311d6500d69138385dc41c4147640fd9d52ecbfa3165ee82d2e222eb9f8418e40f4eb1180b45f88fd0b60589ada529c67c'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
end
|
# -*- encoding : utf-8 -*-
module Mongoid
module Userstamps
VERSION = '1.0.0'
end
end
v2.0.0
# -*- encoding : utf-8 -*-
module Mongoid
module Userstamps
VERSION = '2.0.0'
end
end
|
module FilmBuff
class IMDb
# @return [String] The locale currently used by the IMDb instance
attr_accessor :locale
# Create a new IMDb instance
#
# @param [String] locale
# The locale to search with. The IMDb instance will also return
# results in the language matching the given locale. Defaults to `en_US`
#
# @param [Boolean] ssl
# Whether or not to use SSL when searching by IMDb ID (IMDb does not
# currently support SSL when searching by title). Defaults to `true`
def initialize(locale: 'en_US', ssl: true)
@locale = locale
@protocol = ssl ? 'https' : 'http'
end
private
def connection
connection ||= Faraday.new(:url => "#{@protocol}://app.imdb.com") do |c|
c.response :json
c.adapter Faraday.default_adapter
end
end
def build_hash(type, value)
{
:type => type,
:imdb_id => value['id'],
:title => value['title'],
:release_year => value['description'][/\A\d{4}/]
}
end
public
# Looks up the title with the IMDb ID imdb_id and returns a
# FilmBuff::Title object with information on that title
#
# @param [String] imdb_id
# The IMDb ID for the title to look up
#
# @return [Title]
# The FilmBuff::Title object containing information on the title
#
# @example Basic usage
# movie = imdb_instance.find_by_id('tt0032138')
def find_by_id(imdb_id)
response = connection.get '/title/maindetails', {
:tconst => imdb_id, :locale => @locale
}
Title.new(response.body['data'])
end
# Searches IMDb for the title provided and returns an array with results
#
# @param [String] title The title to search for
#
# @param [Integer] limit The maximum number of results to return
#
# @param [Array] types The types of matches to search for.
# These types will be searched in the provided order. Can be
# `title_popular`, `title_exact`, `title_approx`, and `title_substring`
#
# @return [Array<Hash>] An array of hashes, each representing a search
# result
#
# @example Basic usage
# movie = imdb_instance.find_by_title('The Wizard of Oz')
#
# @example Only return 3 results
# movie = imdb_instance.find_by_title('The Wizard of Oz', limit: 3)
#
# @example Only return results containing the exact title provided
# movie = imdb_instance.find_by_title('The Wizard of Oz',
# types: %w(title_exact))
def find_by_title(title, limit: nil, types: %w(title_popular
title_exact
title_approx
title_substring))
response = connection.get 'http://www.imdb.com/xml/find', {
:q => title,
:json => '1',
:tt => 'on'
}
output = []
results = response.body.select { |key| types.include? key }
results.each_key do |key|
response.body[key].each do |row|
break unless output.size < limit if limit
next unless row['id'] && row['title'] && row['description']
output << build_hash(key, row)
end
end
output
end
end
end
Stupid git...
module FilmBuff
class IMDb
# @return [String] The locale currently used by the IMDb instance
attr_accessor :locale
# Create a new IMDb instance
#
# @param [String] locale
# The locale to search with. The IMDb instance will also return
# results in the language matching the given locale. Defaults to `en_US`
#
# @param [Boolean] ssl
# Whether or not to use SSL when searching by IMDb ID (IMDb does not
# currently support SSL when searching by title). Defaults to `true`
def initialize(locale: 'en_US', ssl: true)
@locale = locale
@protocol = ssl ? 'https' : 'http'
end
private
def connection
connection ||= Faraday.new(:url => "#{@protocol}://app.imdb.com") do |c|
c.response :json
c.adapter Faraday.default_adapter
end
end
def build_hash(type, value)
{
:type => type,
:imdb_id => value['id'],
:title => value['title'],
:release_year => value['description'][/\A\d{4}/]
}
end
public
# Looks up the title with the IMDb ID imdb_id and returns a
# FilmBuff::Title object with information on that title
#
# @param [String] imdb_id
# The IMDb ID for the title to look up
#
# @return [Title]
# The FilmBuff::Title object containing information on the title
#
# @example Basic usage
# movie = imdb_instance.find_by_id('tt0032138')
def find_by_id(imdb_id)
response = connection.get '/title/maindetails', {
:tconst => imdb_id, :locale => @locale
}
Title.new(response.body['data'])
end
# Searches IMDb for the title provided and returns an array with results
#
# @param [String] title The title to search for
#
# @param [Integer] limit The maximum number of results to return
#
# @param [Array] types The types of matches to search for.
# These types will be searched in the provided order. Can be
# `title_popular`, `title_exact`, `title_approx`, and `title_substring`
#
# @return [Array<Hash>] An array of hashes, each representing a search
# result
#
# @example Basic usage
# movie = imdb_instance.find_by_title('The Wizard of Oz')
#
# @example Only return 3 results
# movie = imdb_instance.find_by_title('The Wizard of Oz', limit: 3)
#
# @example Only return results containing the exact title provided
# movie = imdb_instance.find_by_title('The Wizard of Oz',
# types: %w(title_exact))
def find_by_title(title, limit: nil, types: %w(title_popular
title_exact
title_approx
title_substring))
response = connection.get 'http://www.imdb.com/xml/find', {
:q => title,
:json => '1',
:tt => 'on'
}
output = []
results = response.body.select { |key| types.include? key }
results.each_key do |key|
response.body[key].each do |row|
break unless output.size < limit if limit
next unless row['id'] && row['title'] && row['description']
output << build_hash(key, row)
end
end
output
end
end
end
|
# Be sure to restart your server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.14' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
if Gem::VERSION >= "1.3.6"
module Rails
class GemDependency
def requirement
r = super
(r == Gem::Requirement.default) ? nil : r
end
end
end
end
Rails::Initializer.run do |config|
gitorious_yaml = YAML.load_file(File.join(RAILS_ROOT, "config/gitorious.yml"))[RAILS_ENV]
raise "Your config/gitorious.yml does not have an entry for your current Rails environment. Please consult config/gitorious.sample.yml for instructions." unless gitorious_yaml
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
config.load_paths += %W( #{RAILS_ROOT}/lib/gitorious )
config.load_once_paths << File.expand_path("#{RAILS_ROOT}/lib/gitorious")
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# config.i18n.load_path << Dir[File.join(RAILS_ROOT, 'my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
#config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
# Activate observers that should always be running
config.active_record.observers = [
:user_observer
]
config.after_initialize do
OAuth::Consumer.class_eval {
remove_const(:CA_FILE) if const_defined?(:CA_FILE)
}
OAuth::Consumer::CA_FILE = nil
Gitorious::Plugin::post_load
Grit::Git.git_binary = GitoriousConfig["git_binary"]
end
end
Use autoload_paths in place of load_paths
# Be sure to restart your server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.14' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
if Gem::VERSION >= "1.3.6"
module Rails
class GemDependency
def requirement
r = super
(r == Gem::Requirement.default) ? nil : r
end
end
end
end
Rails::Initializer.run do |config|
gitorious_yaml = YAML.load_file(File.join(RAILS_ROOT, "config/gitorious.yml"))[RAILS_ENV]
raise "Your config/gitorious.yml does not have an entry for your current Rails environment. Please consult config/gitorious.sample.yml for instructions." unless gitorious_yaml
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
config.autoload_paths += %W( #{RAILS_ROOT}/lib/gitorious )
# Avoid class cache errors like "A copy of Gitorious::XYZ has been removed
# from the module tree but is still active!"
config.autoload_once_paths << File.expand_path("#{RAILS_ROOT}/lib/gitorious")
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# The internationalization framework can be changed to have another default locale (standard is :en) or more load paths.
# All files from config/locales/*.rb,yml are added automatically.
# config.i18n.load_path << Dir[File.join(RAILS_ROOT, 'my', 'locales', '*.{rb,yml}')]
# config.i18n.default_locale = :de
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
#config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# Please note that observers generated using script/generate observer need to have an _observer suffix
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
# Activate observers that should always be running
config.active_record.observers = [
:user_observer
]
config.after_initialize do
OAuth::Consumer.class_eval {
remove_const(:CA_FILE) if const_defined?(:CA_FILE)
}
OAuth::Consumer::CA_FILE = nil
Gitorious::Plugin::post_load
Grit::Git.git_binary = GitoriousConfig["git_binary"]
end
end
|
module MultiArmedBandit
class Softmax
attr_accessor :temperature, :counts, :values, :probs
def initialize(temperature, n_arms)
@temperature = temperature
reset(n_arms)
end
def reset(n_arms)
@counts = Array.new(n_arms, 0)
@values = Array.new(n_arms, 0.0)
@probs = Array.new(n_arms, 0.0)
end
def bulk_update(new_counts, new_rewards)
# update expectations of each arm
new_values = []
@counts.zip(@values, new_counts, new_rewards).each do |n, value, nn, reward|
new_values << (n * value + reward) / (n + nn)
end
@values = new_values
# update the numbers of each arm's trial
@counts = @counts.zip(new_counts).map{|f, s| f + s}
# calcurate probabilities
z = @values.collect{|i| Math.exp(i/@temperature)}.reduce(:+)
@probs = @values.collect{|i| Math.exp(i/@temperature)/z}
return probs
end
end
end
add update method
module MultiArmedBandit
class Softmax
attr_accessor :temperature, :counts, :values, :probs
# Initialize an object
def initialize(temperature, n_arms)
@temperature = temperature
reset(n_arms)
end
# Reset instance variables
def reset(n_arms)
@counts = Array.new(n_arms, 0)
@values = Array.new(n_arms, 0.0)
@probs = Array.new(n_arms, 0.0)
end
# Update in a lump. new_counts is a list of each arm's trial number and
# new_rewards means a list of rewards.
def bulk_update(new_counts, new_rewards)
# update expectations of each arm
new_values = []
@counts.zip(@values, new_counts, new_rewards).each do |n, value, nn, reward|
new_values << (n * value + reward) / (n + nn)
end
@values = new_values
# update the numbers of each arm's trial
@counts = @counts.zip(new_counts).map{|f, s| f + s}
# calcurate probabilities
z = @values.collect{|i| Math.exp(i/@temperature)}.reduce(:+)
@probs = @values.collect{|i| Math.exp(i/@temperature)/z}
return probs
end
def update(chosen_arm, reward)
@counts[chosen_arm] = @counts[chosen_arm] + 1
n = @counts[chosen_arm]
value = @values[chosen_arm]
new_value = ((n - 1) / n.to_f) * value + (1 / n.to_f) * reward
@values[chosen_arm] = new_value
return
end
def select_arm
z = @values.collect{|i| Math.exp(i/@temperature)}.reduce(:+)
@probs = @values.collect{|i| Map.exp(i/@temperature)/z}
return categorical_draw(@probs)
end
private
def categorical_draw(probs)
z = rand()
cum_prob = 0.0
probs.size().times do |i|
prob = probs[i]
cum_prob += prob
if cum_prob > z
return i
end
end
return probs.size() - 1
end
end
end
|
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.11' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
# MySociety specific helper functions
$:.push(File.join(File.dirname(__FILE__), '../commonlib/rblib'))
# ... if these fail to include, you need the commonlib submodule from git
# (type "git submodule update --init" in the whatdotheyknow directory)
# ruby-ole and ruby-msg. We use a custom ruby-msg to avoid a name conflict
$:.unshift(File.join(File.dirname(__FILE__), '../vendor/ruby-ole/lib'))
$:.unshift(File.join(File.dirname(__FILE__), '../vendor/ruby-msg/lib'))
# FIXME: These gems needed to be installed using 'sude gem install ...' -
# apt-get install does not seem to install them where they can be found:
#
# fcgi
# memcache-client
# mongrel
# thin
require 'memcache'
load "validate.rb"
load "config.rb"
load "format.rb"
load "debug_helpers.rb"
load "util.rb"
Rails::Initializer.run do |config|
# Load intial mySociety config
MySociety::Config.set_file(File.join(config.root_path, 'config', 'general'), true)
MySociety::Config.load_default
# Settings in config/environments/* take precedence over those specified here
# Skip frameworks you're not going to use (only works if using vendor/rails)
# config.frameworks -= [ :action_web_service, :action_mailer ]
# Only load the plugins named here, by default all plugins in vendor/plugins are loaded
# config.plugins = %W( exception_notification ssl_requirement )
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# TEMP: uncomment this to turn on logging in production environments
# config.log_level = :debug
#
# Specify gems that this application depends on and have them installed with rake gems:install
config.gem "rack", :version => '1.1.0'
config.gem 'rspec-rails', :lib => false, :version => '1.3.3'
config.gem "recaptcha", :lib => "recaptcha/rails"
config.gem 'will_paginate', :version => '~> 2.3.11', :source => 'http://gemcutter.org'
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:key => '_wdtk_cookie_session',
:secret => MySociety::Config.get("COOKIE_STORE_SESSION_SECRET", 'this default is insecure as code is open source, please override for live sites in config/general; this will do for local development')
}
config.action_controller.session_store = :cookie_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
config.active_record.schema_format = :sql
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
# Make Active Record use UTC-base instead of local time
config.active_record.default_timezone = :utc
# See Rails::Configuration for more options
ENV['RECAPTCHA_PUBLIC_KEY'] = MySociety::Config::get("RECAPTCHA_PUBLIC_KEY", 'x');
ENV['RECAPTCHA_PRIVATE_KEY'] = MySociety::Config::get("RECAPTCHA_PRIVATE_KEY", 'x');
end
# Add new inflection rules using the following format
# (all these examples are active by default):
# Inflector.inflections do |inflect|
# inflect.plural /^(ox)$/i, '\1en'
# inflect.singular /^(ox)en/i, '\1'
# inflect.irregular 'person', 'people'
# inflect.uncountable %w( fish sheep )
# end
# Add new mime types for use in respond_to blocks:
# Mime::Type.register "text/richtext", :rtf
# Mime::Type.register "application/x-mobile", :mobile
# The Rails cache is set up by the Interlock plugin to use memcached
# Domain for URLs (so can work for scripts, not just web pages)
ActionMailer::Base.default_url_options[:host] = MySociety::Config.get("DOMAIN", 'localhost:3000')
# So that javascript assets use full URL, so proxied admin URLs read javascript OK
if (MySociety::Config.get("DOMAIN", "") != "")
ActionController::Base.asset_host = Proc.new { |source, request|
if request.fullpath.match(/^\/admin\//)
MySociety::Config.get("ADMIN_PUBLIC_URL", "/")
else
MySociety::Config.get("DOMAIN", 'localhost:3000')
end
}
end
# Load monkey patches and other things from lib/
require 'tmail_extensions.rb'
require 'activesupport_cache_extensions.rb'
require 'public_body_categories.rb'
require 'timezone_fixes.rb'
require 'use_spans_for_errors.rb'
require 'make_html_4_compliant.rb'
require 'activerecord_errors_extensions.rb'
require 'willpaginate_hack.rb'
require 'sendmail_return_path.rb'
require 'tnef.rb'
Remove TODO - extra gems not needed in deployment environment.
# Be sure to restart your web server when you modify this file.
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.3.11' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
# MySociety specific helper functions
$:.push(File.join(File.dirname(__FILE__), '../commonlib/rblib'))
# ... if these fail to include, you need the commonlib submodule from git
# (type "git submodule update --init" in the whatdotheyknow directory)
# ruby-ole and ruby-msg. We use a custom ruby-msg to avoid a name conflict
$:.unshift(File.join(File.dirname(__FILE__), '../vendor/ruby-ole/lib'))
$:.unshift(File.join(File.dirname(__FILE__), '../vendor/ruby-msg/lib'))
require 'memcache'
load "validate.rb"
load "config.rb"
load "format.rb"
load "debug_helpers.rb"
load "util.rb"
Rails::Initializer.run do |config|
# Load intial mySociety config
MySociety::Config.set_file(File.join(config.root_path, 'config', 'general'), true)
MySociety::Config.load_default
# Settings in config/environments/* take precedence over those specified here
# Skip frameworks you're not going to use (only works if using vendor/rails)
# config.frameworks -= [ :action_web_service, :action_mailer ]
# Only load the plugins named here, by default all plugins in vendor/plugins are loaded
# config.plugins = %W( exception_notification ssl_requirement )
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# TEMP: uncomment this to turn on logging in production environments
# config.log_level = :debug
#
# Specify gems that this application depends on and have them installed with rake gems:install
config.gem "rack", :version => '1.1.0'
config.gem 'rspec-rails', :lib => false, :version => '1.3.3'
config.gem "recaptcha", :lib => "recaptcha/rails"
config.gem 'will_paginate', :version => '~> 2.3.11', :source => 'http://gemcutter.org'
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:key => '_wdtk_cookie_session',
:secret => MySociety::Config.get("COOKIE_STORE_SESSION_SECRET", 'this default is insecure as code is open source, please override for live sites in config/general; this will do for local development')
}
config.action_controller.session_store = :cookie_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
config.active_record.schema_format = :sql
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
# Make Active Record use UTC-base instead of local time
config.active_record.default_timezone = :utc
# See Rails::Configuration for more options
ENV['RECAPTCHA_PUBLIC_KEY'] = MySociety::Config::get("RECAPTCHA_PUBLIC_KEY", 'x');
ENV['RECAPTCHA_PRIVATE_KEY'] = MySociety::Config::get("RECAPTCHA_PRIVATE_KEY", 'x');
end
# Add new inflection rules using the following format
# (all these examples are active by default):
# Inflector.inflections do |inflect|
# inflect.plural /^(ox)$/i, '\1en'
# inflect.singular /^(ox)en/i, '\1'
# inflect.irregular 'person', 'people'
# inflect.uncountable %w( fish sheep )
# end
# Add new mime types for use in respond_to blocks:
# Mime::Type.register "text/richtext", :rtf
# Mime::Type.register "application/x-mobile", :mobile
# The Rails cache is set up by the Interlock plugin to use memcached
# Domain for URLs (so can work for scripts, not just web pages)
ActionMailer::Base.default_url_options[:host] = MySociety::Config.get("DOMAIN", 'localhost:3000')
# So that javascript assets use full URL, so proxied admin URLs read javascript OK
if (MySociety::Config.get("DOMAIN", "") != "")
ActionController::Base.asset_host = Proc.new { |source, request|
if request.fullpath.match(/^\/admin\//)
MySociety::Config.get("ADMIN_PUBLIC_URL", "/")
else
MySociety::Config.get("DOMAIN", 'localhost:3000')
end
}
end
# Load monkey patches and other things from lib/
require 'tmail_extensions.rb'
require 'activesupport_cache_extensions.rb'
require 'public_body_categories.rb'
require 'timezone_fixes.rb'
require 'use_spans_for_errors.rb'
require 'make_html_4_compliant.rb'
require 'activerecord_errors_extensions.rb'
require 'willpaginate_hack.rb'
require 'sendmail_return_path.rb'
require 'tnef.rb'
|
module Mumukit
module Inspection
VERSION = '5.0.0'
end
end
Welcome 5.1.0!
module Mumukit
module Inspection
VERSION = '5.1.0'
end
end
|
require 'serialport'
module Firmata
class Board
Pin = Struct.new(:supportedModes, :mode, :value, :analog_channel)
# pin modes
INPUT = 0x00
OUTPUT = 0x01
ANALOG = 0x02
PWM = 0x03
SERVO = 0x04
LOW = 0
HIGH = 1
DIGITAL_MESSAGE = 0x90
ANALOG_MESSAGE = 0xE0
ANALOG_MESSAGE_RANGE = 0xE0..0xEF
SET_PIN_MODE = 0xF4
REPORT_ANALOG = 0xC0
REPORT_DIGITAL = 0xD0
REPORT_VERSION = 0xF9
CAPABILITY_QUERY = 0x6B
CAPABILITY_RESPONSE = 0x6C
START_SYSEX = 0xF0
END_SYSEX = 0xF7
PIN_STATE_QUERY = 0x6D
PIN_STATE_RESPONSE = 0x6E
SYSTEM_RESET = 0xFF
ANALOG_MAPPING_QUERY = 0x69
ANALOG_MAPPING_RESPONSE = 0x6A
FIRMWARE_QUERY = 0x79
PIN_MODE = 0xF4
attr_reader :serial_port, :pins, :analog_pins
def initialize(port)
@serial_port = port.is_a?(String) ? SerialPort.new(port, 57600, 8, 1, SerialPort::NONE) : serial_port
@serial_port.read_timeout = 2
@major_version = 0
@minor_version = 0
@pins = []
@analog_pins = []
@started = false
start_up
end
def start_up
unless @started
delay 3
query_capabilities
query_analog_mapping
turn_pins_on
reset
delay 1
reset
@started = true
end
self
end
def process
bytes = serial_port.bytes
bytes.each do |byte|
case byte
when REPORT_VERSION
@major_version = bytes.next
@minor_version = bytes.next
when ANALOG_MESSAGE_RANGE
least_significant_byte = bytes.next
most_significant_byte = bytes.next
value = least_significant_byte | (most_significant_byte << 7)
pin = byte & 0x0F
if analog_pin = analog_pins[pin]
pins[analog_pin].value = value
end
when START_SYSEX
current_buffer = [byte]
begin
current_buffer.push(bytes.next)
end until current_buffer.last == END_SYSEX
command = current_buffer[1]
case command
when CAPABILITY_RESPONSE
supportedModes = 0
n = 0
current_buffer.slice(2, current_buffer.length - 3).each do |byte|
if byte == 127
modesArray = []
# the pin modes
[ INPUT, OUTPUT, ANALOG, PWM, SERVO ].each do |mode|
modesArray.push(mode) unless (supportedModes & (1 << mode)).zero?
end
@pins.push(Pin.new(modesArray, OUTPUT, 0))
supportedModes = 0
n = 0
next
end
supportedModes |= (1 << byte) if n.zero?
n ^= 1
end
when ANALOG_MAPPING_RESPONSE
pin_index = 0
current_buffer.slice(2, current_buffer.length - 3).each do |byte|
@pins[pin_index].analog_channel = byte
@analog_pins.push(pin_index) unless byte == 127
pin_index += 1
end
when PIN_STATE_RESPONSE
pin = pins[current_buffer[2]]
pin.mode = current_buffer[3]
pin.value = current_buffer[4]
pin.value |= (current_buffer[5] << 7) if current_buffer.size > 6
pin.value |= (current_buffer[6] << 14) if current_buffer.size > 7
when FIRMWARE_QUERY
else
# TODO decide what to do with unknown message
end
end
end
rescue StopIteration
# do nadda
end
def reset
write(SYSTEM_RESET)
end
def write(*commands)
@serial_port.write(commands.map(&:chr).join)
end
def pin_mode(pin, mode)
pins[pin].mode = mode
write(PIN_MODE, pin, mode)
end
def digital_write(pin, value)
port = (pin / 8).floor
port_value = 0
@pins[pin].value = value
8.times do |i|
port_value |= (1 << i) unless @pins[8 * port + i].value.zero?
end
write(DIGITAL_MESSAGE | port, port_value & 0x7F, (port_value >> 7) & 0x7F)
end
def delay(seconds)
sleep(seconds)
end
def version
[@major_version, @minor_version].join('.')
end
def report_version
write(REPORT_VERSION)
end
def query_pin_state(pin)
write(START_SYSEX, PIN_STATE_QUERY, pin.to_i, END_SYSEX)
end
def query_capabilities
write(START_SYSEX, CAPABILITY_QUERY, END_SYSEX)
end
alias_method :qc, :query_capabilities
def query_analog_mapping
write(START_SYSEX, ANALOG_MAPPING_QUERY, END_SYSEX)
end
alias_method :qam, :query_analog_mapping
def turn_pins_on
16.times do |i|
write(REPORT_DIGITAL | i, 1)
write(REPORT_ANALOG | i, 1)
end
end
def turn_pins_off
16.times do |i|
write(REPORT_DIGITAL | i, 0)
write(REPORT_ANALOG | i, 0)
end
end
end
Fix missing "end".
require 'serialport'
module Firmata
class Board
Pin = Struct.new(:supportedModes, :mode, :value, :analog_channel)
# pin modes
INPUT = 0x00
OUTPUT = 0x01
ANALOG = 0x02
PWM = 0x03
SERVO = 0x04
LOW = 0
HIGH = 1
DIGITAL_MESSAGE = 0x90
ANALOG_MESSAGE = 0xE0
ANALOG_MESSAGE_RANGE = 0xE0..0xEF
SET_PIN_MODE = 0xF4
REPORT_ANALOG = 0xC0
REPORT_DIGITAL = 0xD0
REPORT_VERSION = 0xF9
CAPABILITY_QUERY = 0x6B
CAPABILITY_RESPONSE = 0x6C
START_SYSEX = 0xF0
END_SYSEX = 0xF7
PIN_STATE_QUERY = 0x6D
PIN_STATE_RESPONSE = 0x6E
SYSTEM_RESET = 0xFF
ANALOG_MAPPING_QUERY = 0x69
ANALOG_MAPPING_RESPONSE = 0x6A
FIRMWARE_QUERY = 0x79
PIN_MODE = 0xF4
attr_reader :serial_port, :pins, :analog_pins
def initialize(port)
@serial_port = port.is_a?(String) ? SerialPort.new(port, 57600, 8, 1, SerialPort::NONE) : serial_port
@serial_port.read_timeout = 2
@major_version = 0
@minor_version = 0
@pins = []
@analog_pins = []
@started = false
start_up
end
def start_up
unless @started
delay 3
query_capabilities
query_analog_mapping
turn_pins_on
reset
delay 1
reset
@started = true
end
self
end
def process
bytes = serial_port.bytes
bytes.each do |byte|
case byte
when REPORT_VERSION
@major_version = bytes.next
@minor_version = bytes.next
when ANALOG_MESSAGE_RANGE
least_significant_byte = bytes.next
most_significant_byte = bytes.next
value = least_significant_byte | (most_significant_byte << 7)
pin = byte & 0x0F
if analog_pin = analog_pins[pin]
pins[analog_pin].value = value
end
when START_SYSEX
current_buffer = [byte]
begin
current_buffer.push(bytes.next)
end until current_buffer.last == END_SYSEX
command = current_buffer[1]
case command
when CAPABILITY_RESPONSE
supportedModes = 0
n = 0
current_buffer.slice(2, current_buffer.length - 3).each do |byte|
if byte == 127
modesArray = []
# the pin modes
[ INPUT, OUTPUT, ANALOG, PWM, SERVO ].each do |mode|
modesArray.push(mode) unless (supportedModes & (1 << mode)).zero?
end
@pins.push(Pin.new(modesArray, OUTPUT, 0))
supportedModes = 0
n = 0
next
end
supportedModes |= (1 << byte) if n.zero?
n ^= 1
end
when ANALOG_MAPPING_RESPONSE
pin_index = 0
current_buffer.slice(2, current_buffer.length - 3).each do |byte|
@pins[pin_index].analog_channel = byte
@analog_pins.push(pin_index) unless byte == 127
pin_index += 1
end
when PIN_STATE_RESPONSE
pin = pins[current_buffer[2]]
pin.mode = current_buffer[3]
pin.value = current_buffer[4]
pin.value |= (current_buffer[5] << 7) if current_buffer.size > 6
pin.value |= (current_buffer[6] << 14) if current_buffer.size > 7
when FIRMWARE_QUERY
else
# TODO decide what to do with unknown message
end
end
end
rescue StopIteration
# do nadda
end
def reset
write(SYSTEM_RESET)
end
def write(*commands)
@serial_port.write(commands.map(&:chr).join)
end
def pin_mode(pin, mode)
pins[pin].mode = mode
write(PIN_MODE, pin, mode)
end
def digital_write(pin, value)
port = (pin / 8).floor
port_value = 0
@pins[pin].value = value
8.times do |i|
port_value |= (1 << i) unless @pins[8 * port + i].value.zero?
end
write(DIGITAL_MESSAGE | port, port_value & 0x7F, (port_value >> 7) & 0x7F)
end
def delay(seconds)
sleep(seconds)
end
def version
[@major_version, @minor_version].join('.')
end
def report_version
write(REPORT_VERSION)
end
def query_pin_state(pin)
write(START_SYSEX, PIN_STATE_QUERY, pin.to_i, END_SYSEX)
end
def query_capabilities
write(START_SYSEX, CAPABILITY_QUERY, END_SYSEX)
end
alias_method :qc, :query_capabilities
def query_analog_mapping
write(START_SYSEX, ANALOG_MAPPING_QUERY, END_SYSEX)
end
alias_method :qam, :query_analog_mapping
def turn_pins_on
16.times do |i|
write(REPORT_DIGITAL | i, 1)
write(REPORT_ANALOG | i, 1)
end
end
def turn_pins_off
16.times do |i|
write(REPORT_DIGITAL | i, 0)
write(REPORT_ANALOG | i, 0)
end
end
end
end |
RAILS_GEM_VERSION = '2.3.2' unless defined? RAILS_GEM_VERSION
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
config.time_zone = 'UTC'
config.gem 'haml',
:version => '2.1.0'
config.gem "thoughtbot-clearance",
:lib => 'clearance',
:source => 'http://gems.github.com',
:version => '0.6.6'
config.gem "thoughtbot-pacecar",
:lib => 'pacecar',
:source => 'http://gems.github.com',
:version => '1.1.5'
config.gem "ismasan-sluggable_finder",
:lib => 'sluggable_finder',
:version => '2.0.6'
config.gem 'mislav-will_paginate',
:version => '~> 2.3.11',
:lib => 'will_paginate',
:source => 'http://gems.github.com'
config.gem 'aws-s3',
:version => '0.6.2',
:lib => 'aws/s3'
config.gem "ambethia-smtp-tls",
:lib => "smtp-tls",
:version => "1.1.2",
:source => "http://gems.github.com"
config.action_mailer.delivery_method = :smtp
end
DO_NOT_REPLY = "donotreply@gemcutter.org"
require 'lib/rubygems'
require 'lib/rubygems/format'
require 'lib/rubygems/indexer'
require 'lib/indexer'
require 'lib/core_ext/string'
Gem.configuration.verbose = false
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/sessions_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/passwords_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/confirmations_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/users_controller'
Bring in the source index as well
RAILS_GEM_VERSION = '2.3.2' unless defined? RAILS_GEM_VERSION
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
config.time_zone = 'UTC'
config.gem 'haml',
:version => '2.1.0'
config.gem "thoughtbot-clearance",
:lib => 'clearance',
:source => 'http://gems.github.com',
:version => '0.6.6'
config.gem "thoughtbot-pacecar",
:lib => 'pacecar',
:source => 'http://gems.github.com',
:version => '1.1.5'
config.gem "ismasan-sluggable_finder",
:lib => 'sluggable_finder',
:version => '2.0.6'
config.gem 'mislav-will_paginate',
:version => '~> 2.3.11',
:lib => 'will_paginate',
:source => 'http://gems.github.com'
config.gem 'aws-s3',
:version => '0.6.2',
:lib => 'aws/s3'
config.gem "ambethia-smtp-tls",
:lib => "smtp-tls",
:version => "1.1.2",
:source => "http://gems.github.com"
config.action_mailer.delivery_method = :smtp
end
DO_NOT_REPLY = "donotreply@gemcutter.org"
require 'lib/rubygems'
require 'lib/rubygems/format'
require 'lib/rubygems/indexer'
require 'lib/rubygems/source_index'
require 'lib/indexer'
require 'lib/core_ext/string'
Gem.configuration.verbose = false
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/sessions_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/passwords_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/confirmations_controller'
require 'vendor/gems/thoughtbot-clearance-0.6.6/app/controllers/clearance/users_controller'
|
module Mumukit::Service
module Collection
def all
project
end
def count
mongo_collection.find.count
end
def exists?(id)
mongo_collection.find(id: id).count > 0
end
def delete!(id)
mongo_collection.delete_one(id: id)
end
def find(id)
find_by(id: id)
end
def find_by(args)
wrap _find_by(args)
end
def find_by!(args)
wrap _find_by(args).tap do |first|
raise Mumukit::Service::DocumentNotFoundError, "document #{args.to_json} not found" unless first
end
end
def insert!(guide)
guide.validate!
with_id new_id do |id|
mongo_collection.insert_one guide.raw.merge(id)
end
end
private
def _find_by(args)
mongo_collection.find(args).projection(_id: 0).first
end
def mongo_collection
mongo_database.client[mongo_collection_name]
end
def project(&block)
raw = mongo_collection.find.projection(_id: 0).map { |it| wrap it }
raw = raw.select(&block) if block_given?
wrap_array raw
end
def wrap(mongo_document)
Mumukit::Service::JsonWrapper.new mongo_document
end
def wrap_array(array)
Mumukit::Service::JsonArrayWrapper.new array
end
def new_id
Mumukit::Service::IdGenerator.next
end
def with_id(id)
id_object = {id: id}
yield id_object
id_object
end
end
end
Validating presence of document
module Mumukit::Service
module Collection
def all
project
end
def count
mongo_collection.find.count
end
def exists?(id)
mongo_collection.find(id: id).count > 0
end
def delete!(id)
mongo_collection.delete_one(id: id)
end
def find(id)
find_by(id: id)
end
def find_by(args)
_find_by(args).
try { |it| wrap it }
end
def find_by!(args)
_find_by(args).
tap { |first| validate_presence(args, first) }.
try { |it| wrap it }
end
def insert!(guide)
guide.validate!
with_id new_id do |id|
mongo_collection.insert_one guide.raw.merge(id)
end
end
private
def validate_presence(args, first)
raise Mumukit::Service::DocumentNotFoundError, "document #{args.to_json} not found" unless first
end
def _find_by(args)
mongo_collection.find(args).projection(_id: 0).first
end
def mongo_collection
mongo_database.client[mongo_collection_name]
end
def project(&block)
raw = mongo_collection.find.projection(_id: 0).map { |it| wrap it }
raw = raw.select(&block) if block_given?
wrap_array raw
end
def wrap(mongo_document)
Mumukit::Service::JsonWrapper.new mongo_document
end
def wrap_array(array)
Mumukit::Service::JsonArrayWrapper.new array
end
def new_id
Mumukit::Service::IdGenerator.next
end
def with_id(id)
id_object = {id: id}
yield id_object
id_object
end
end
end |
module FlagShihTzu
TRUE_VALUES = [true, 1, '1', 't', 'T', 'true', 'TRUE'] # taken from ActiveRecord::ConnectionAdapters::Column
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def has_flags(flag_hash, options = {})
options = {:named_scopes => true, :column => 'flags'}.update(options)
@flag_column = options[:column]
check_flag_column
@flag_mapping = {}
flag_hash.each do |flag_key, flag_name|
raise ArgumentError, "has_flags: flag keys should be positive integers, and #{flag_key} is not" unless is_valid_flag_key(flag_key)
raise ArgumentError, "has_flags: flag names should be symbols, and #{flag_name} is not" unless is_valid_flag_name(flag_name)
@flag_mapping[flag_name] = 2**(flag_key - 1)
class_eval <<-EVAL
def #{flag_name}
flag_enabled?(:#{flag_name})
end
def #{flag_name}?
flag_enabled?(:#{flag_name})
end
def #{flag_name}=(value)
FlagShihTzu::TRUE_VALUES.include?(value) ? enable_flag(:#{flag_name}) : disable_flag(:#{flag_name})
end
def self.#{flag_name}_condition
sql_condition_for_flag(:#{flag_name}, true)
end
def self.not_#{flag_name}_condition
sql_condition_for_flag(:#{flag_name}, false)
end
EVAL
if options[:named_scopes]
class_eval <<-EVAL
named_scope :#{flag_name}, lambda { { :conditions => #{flag_name}_condition } }
named_scope :not_#{flag_name}, lambda { { :conditions => not_#{flag_name}_condition } }
EVAL
end
end
end
def flag_mapping
@flag_mapping
end
def flag_column
@flag_column
end
def check_flag(flag)
raise ArgumentError, "Invalid flag '#{flag}'" unless flag_mapping.include?(flag)
end
private
def check_flag_column
if not ActiveRecord::Base.connection.tables.include?(table_name)
puts "Error: Table '#{table_name}' doesn't exist"
elsif not columns.any? { |column| column.name == flag_column && column.type == :integer }
puts "Error: Table '#{table_name}' must have an integer column named '#{flag_column}' in order to use FlagShihTzu"
end
end
def sql_condition_for_flag(flag, enabled = true)
check_flag(flag)
"(#{table_name}.#{flag_column} & #{flag_mapping[flag]} = #{enabled ? flag_mapping[flag] : 0})"
end
def is_valid_flag_key(flag_key)
flag_key > 0 && flag_key == flag_key.to_i
end
def is_valid_flag_name(flag_name)
flag_name.is_a?(Symbol)
end
end
def enable_flag(flag)
self.class.check_flag(flag)
self.flags = self.flags | self.class.flag_mapping[flag]
end
def disable_flag(flag)
self.class.check_flag(flag)
self.flags = self.flags & ~self.class.flag_mapping[flag]
end
def flag_enabled?(flag)
self.class.check_flag(flag)
get_bit_for(flag) == 0 ? false : true
end
def flag_disabled?(flag)
self.class.check_flag(flag)
!flag_enabled?(flag)
end
def flags
self[self.class.flag_column] || 0
end
def flags=(value)
self[self.class.flag_column] = value
end
private
def get_bit_for(flag)
self.flags & self.class.flag_mapping[flag]
end
end
small change
module FlagShihTzu
TRUE_VALUES = [true, 1, '1', 't', 'T', 'true', 'TRUE'] # taken from ActiveRecord::ConnectionAdapters::Column
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def has_flags(flag_hash, options = {})
options = {:named_scopes => true, :column => 'flags'}.update(options)
@flag_column = options[:column]
check_flag_column
@flag_mapping = {}
flag_hash.each do |flag_key, flag_name|
raise ArgumentError, "has_flags: flag keys should be positive integers, and #{flag_key} is not" unless is_valid_flag_key(flag_key)
raise ArgumentError, "has_flags: flag names should be symbols, and #{flag_name} is not" unless is_valid_flag_name(flag_name)
@flag_mapping[flag_name] = 2**(flag_key - 1)
class_eval <<-EVAL
def #{flag_name}
flag_enabled?(:#{flag_name})
end
def #{flag_name}?
flag_enabled?(:#{flag_name})
end
def #{flag_name}=(value)
FlagShihTzu::TRUE_VALUES.include?(value) ? enable_flag(:#{flag_name}) : disable_flag(:#{flag_name})
end
def self.#{flag_name}_condition
sql_condition_for_flag(:#{flag_name}, true)
end
def self.not_#{flag_name}_condition
sql_condition_for_flag(:#{flag_name}, false)
end
EVAL
if options[:named_scopes]
class_eval <<-EVAL
named_scope :#{flag_name}, lambda { { :conditions => #{flag_name}_condition } }
named_scope :not_#{flag_name}, lambda { { :conditions => not_#{flag_name}_condition } }
EVAL
end
end
end
def flag_mapping
@flag_mapping
end
def flag_column
@flag_column
end
def check_flag(flag)
raise ArgumentError, "Invalid flag '#{flag}'" unless flag_mapping.include?(flag)
end
private
def check_flag_column
if not table_exists?
puts "Error: Table '#{table_name}' doesn't exist"
elsif not columns.any? { |column| column.name == flag_column && column.type == :integer }
puts "Error: Table '#{table_name}' must have an integer column named '#{flag_column}' in order to use FlagShihTzu"
end
end
def sql_condition_for_flag(flag, enabled = true)
check_flag(flag)
"(#{table_name}.#{flag_column} & #{flag_mapping[flag]} = #{enabled ? flag_mapping[flag] : 0})"
end
def is_valid_flag_key(flag_key)
flag_key > 0 && flag_key == flag_key.to_i
end
def is_valid_flag_name(flag_name)
flag_name.is_a?(Symbol)
end
end
def enable_flag(flag)
self.class.check_flag(flag)
self.flags = self.flags | self.class.flag_mapping[flag]
end
def disable_flag(flag)
self.class.check_flag(flag)
self.flags = self.flags & ~self.class.flag_mapping[flag]
end
def flag_enabled?(flag)
self.class.check_flag(flag)
get_bit_for(flag) == 0 ? false : true
end
def flag_disabled?(flag)
self.class.check_flag(flag)
!flag_enabled?(flag)
end
def flags
self[self.class.flag_column] || 0
end
def flags=(value)
self[self.class.flag_column] = value
end
private
def get_bit_for(flag)
self.flags & self.class.flag_mapping[flag]
end
end
|
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.1.0' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "aws-s3", :lib => "aws/s3"
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_mevo_session',
:secret => 'd742108271488dfdc3c8d055f5394778cf9f7522ac0334921ec3578cc03f16a3bd477c2bd79eb83d30f74025948c887426fb04af12d2a94d4c69444e2000d191'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
end
set up mislav-will_paginate and mattetti-googlecharts
# Be sure to restart your server when you modify this file
# Uncomment below to force Rails into production mode when
# you don't control web/app server and can't set it the proper way
# ENV['RAILS_ENV'] ||= 'production'
# Specifies gem version of Rails to use when vendor/rails is not present
RAILS_GEM_VERSION = '2.1.0' unless defined? RAILS_GEM_VERSION
# Bootstrap the Rails environment, frameworks, and default configuration
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# See Rails::Configuration for more options.
# Skip frameworks you're not going to use. To use Rails without a database
# you must remove the Active Record framework.
# config.frameworks -= [ :active_record, :active_resource, :action_mailer ]
# Specify gems that this application depends on.
# They can then be installed with "rake gems:install" on new installations.
# config.gem "bj"
# config.gem "hpricot", :version => '0.6', :source => "http://code.whytheluckystiff.net"
# config.gem "aws-s3", :lib => "aws/s3"
config.gem 'mislav-will_paginate', :version => '~> 2.3.2', :lib => 'will_paginate', :source => 'http://gems.github.com'
config.gem 'mattetti-googlecharts', :version => '~> 1.3.5', :lib => 'gchart', :source => 'http://gems.github.com'
# Only load the plugins named here, in the order given. By default, all plugins
# in vendor/plugins are loaded in alphabetical order.
# :all can be used as a placeholder for all plugins not explicitly named
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Add additional load paths for your own custom dirs
# config.load_paths += %W( #{RAILS_ROOT}/extras )
# Force all environments to use the same logger level
# (by default production uses :info, the others :debug)
# config.log_level = :debug
# Make Time.zone default to the specified zone, and make Active Record store time values
# in the database in UTC, and return them converted to the specified local zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Comment line to use default local time.
config.time_zone = 'UTC'
# Your secret key for verifying cookie session data integrity.
# If you change this key, all old sessions will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
config.action_controller.session = {
:session_key => '_mevo_session',
:secret => 'd742108271488dfdc3c8d055f5394778cf9f7522ac0334921ec3578cc03f16a3bd477c2bd79eb83d30f74025948c887426fb04af12d2a94d4c69444e2000d191'
}
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
# (create the session table with "rake db:sessions:create")
# config.action_controller.session_store = :active_record_store
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Activate observers that should always be running
# config.active_record.observers = :cacher, :garbage_collector
end
|
require 'rubygems'
require 'dm-core'
require 'dm-types'
require 'dm-validations'
require 'omf_common/lobject'
require 'set'
#require 'omf-sfa/resource/oproperty'
autoload :OProperty, 'omf-sfa/resource/oproperty'
#require 'omf-sfa/resource/group_membership'
autoload :GroupMembership, 'omf-sfa/resource/group_membership'
autoload :OAccount, 'omf-sfa/resource/oaccount'
autoload :OGroup, 'omf-sfa/resource/ogroup'
autoload :OLease, 'omf-sfa/resource/olease'
# module OMF::SFA::Resource
# class OResource; end
# end
#require 'omf-sfa/resource/oaccount'
module OMF::SFA::Resource
# This is the basic resource from which all other
# resources descend.
#
# Note: Can't call it 'Resource' to avoid any confusion
# with DataMapper::Resource
#
class OResource
include OMF::Common::Loggable
extend OMF::Common::Loggable
include DataMapper::Resource
include DataMapper::Validations
#@@default_href_prefix = 'http://somehost/resources/'
@@default_href_prefix = '/resources'
@@oprops = {}
# managing dm object
property :id, Serial
property :type, Discriminator
property :uuid, UUID
property :name, String
#property :href, String, :length => 255, :default => lambda {|r, m| r.def_href() }
property :urn, String, :length => 255
property :resource_type, String
has n, :o_properties, 'OProperty'
alias oproperties o_properties
#has n, :contained_in_groups, :model => :Group, :through => GroupMembership
#has n, :contained_in_groups, 'Group' #, :through => :group_membership #GroupMembership
#has n, :group_memberships
#has n, :groups, 'Group', :through => :group_membership #, :via => :groups
has n, :group_memberships, :child_key => [ :o_resource_id ]
has n, :included_in_groups, 'OGroup', :through => :group_memberships, :via => :o_group
belongs_to :account, :model => 'OAccount', :child_key => [ :account_id ], :required => false
def self.oproperty(name, type, opts = {})
name = name.to_s
# should check if +name+ is already used
op = @@oprops[self] ||= {}
opts[:__type__] = type
if opts[:functional] == false
# property is an array
pname = DataMapper::Inflector.pluralize(name)
op[pname] = opts
define_method pname do
res = oproperty_get(pname)
if res == nil
oproperty_set(pname, res = [])
# We make a oproperty_get in order to get the extended Array with
# the overidden '<<' method. Check module ArrayProxy in oproperty.rb
res = oproperty_get(pname)
end
#puts "PROPERTY_GET #{res}"
res
end
define_method "#{pname}=" do |v|
#unless v.kind_of? Enumerable
# raise "property '#{pname}' expects a value of type Enumerable"
#end
#val = self.eval("#{pname}")
#puts "RESPOND: '#{respond_to?(pname.to_sym)}' self:'#{self.inspect}'"
#val = send(pname.to_sym).value#.dup
#val = oproperty_get(pname)
#unless v.is_a? PropValueArray
unless v.is_a? Array
# we really want to store it as a PropValueArray
#c = PropValueArray.new
#if v.respond_to?(:each)
# v.each {|e| c << e}
#else
# c << v
#end
#v = c
v = [v]
#puts "VAL is '#{val}'"
end
#puts "NAME is '#{name}'"
#puts "V is '#{v}'"
oproperty_set(pname, v)
end
else
op[name] = opts
define_method name do
res = oproperty_get(name)
if res.nil?
res = opts[:default]
if res.nil? && (self.respond_to?(m = "default_#{name}".to_sym))
res = send(m)
end
end
res
end
define_method "#{name}=" do |v|
oproperty_set(name, v)
end
end
end
# Clone this resource this resource. However, the clone will have a unique UUID
#
def clone()
clone = self.class.new
attributes.each do |k, v|
next if k == :id || k == :uuid
clone.attribute_set(k, DataMapper::Ext.try_dup(v))
end
oproperties.each do |p|
clone.oproperty_set(p.name, DataMapper::Ext.try_dup(p.value))
end
clone.uuid = UUIDTools::UUID.random_create
return clone
end
def uuid()
unless uuid = attribute_get(:uuid)
uuid = self.uuid = UUIDTools::UUID.random_create
end
uuid
end
def href(opts = {})
if prefix = opts[:name_prefix]
href = "#{prefix}/#{self.name || self.uuid.to_s}"
# if self.name.start_with? '_'
# h[:href] = prefix
# else
# h[:href] = "#{prefix}/#{self.name || uuid}"
# end
elsif prefix = opts[:href_prefix] || @@default_href_prefix
href = "#{prefix}/#{self.uuid.to_s}"
end
href
end
# Return the status of the resource. Should be
# one of: _configuring_, _ready_, _failed_, and _unknown_
#
def status
'unknown'
end
def oproperty(pname)
self.oproperties.first(:name => pname.to_sym)
end
def oproperty_get(pname)
#puts "OPROPERTY_GET: pname:'#{pname}'"
pname = pname.to_sym
return self.name if pname == :name
prop = self.oproperties.first(:name => pname)
prop.nil? ? nil : prop.value
end
alias_method :[], :oproperty_get
def oproperty_set(pname, value)
#puts "OPROPERTY_SET pname:'#{pname}', value:'#{value.class}', self:'#{self.inspect}'"
pname = pname.to_sym
if pname == :name
self.name = value
else
self.save
prop = self.oproperties.first_or_create(:name => pname)
prop.value = value
end
value
end
alias_method :[]=, :oproperty_set
def oproperties_as_hash
res = {}
oproperties.each do |p|
res[p.name] = p.value
end
res
end
def each_resource(&block)
# resources don't contain other resources, groups do'
end
# alias_method :_dirty_children?, :dirty_children?
# def dirty_children?
# puts "CHECKING CHILDREN DIRTY: #{_dirty_children?}"
# _dirty_children?
# end
alias_method :_dirty_self?, :dirty_self?
def dirty_self?
#puts "CHECKING DIRTY #{_dirty_self?}"
return true if _dirty_self?
o_properties.each do |p|
return true if p.dirty_self?
end
false
end
# alias_method :_dirty_attributes, :dirty_attributes
# def dirty_attributes
# dirty = _dirty_attributes
# puts "DIRTY ATTRIBUTE #{dirty.inspect}"
# dirty
# end
# Return true if this resource is a Group
def group?
false
end
# Remove this resource from all groups it currently belongs.
#
def remove_from_all_groups
self.group_memberships.each {|m| m.destroy}
end
# Add this resource and all contained to +set+.
def all_resources(set = Set.new)
set << self
set
end
before :save do
unless self.uuid
self.uuid = UUIDTools::UUID.random_create
end
unless self.name
self.name = self.urn ? GURN.create(self.urn).short_name : "r#{self.object_id}"
end
unless self.urn
# The purpose or function of a URN is to provide a globally unique,
# persistent identifier used for recognition, for access to
# characteristics of the resource or for access to the resource
# itself.
# source: http://tools.ietf.org/html/rfc1737
#
#name = self.name
self.urn = GURN.create(self.uuid.to_s, self.class).to_s
end
end
def destroy
#debug "ORESOURCE destroy #{self}"
self.remove_from_all_groups
#if p = self.provided_by
# pa = p.provides
# pa.delete self
# r = p.save
# i = 0
#end
# first destroy all properties
self.oproperties.all().each do |p|
#debug "ORESOURCE destroying property '#{p.inspect}'"
r = p.destroy
r
end
#p = self.oproperties.all()
super
end
def destroy!
#debug "ORESOURCE destroy! #{self}"
destroy
super
end
def to_json(*a)
unless self.id
# need an id, means I haven't been saved yet
save
end
{
'json_class' => self.class.name,
'id' => self.id
}.to_json(*a)
end
#def self.from_json(o)
# puts "FROM_JSON"
# klass = o['json_class']
# id = o['id']
# eval(klass).first(:id => id)
#end
def self.json_create(o)
klass = o['json_class']
id = o['id']
r = eval(klass).first(:id => id)
r
end
def to_hash(objs = {}, opts = {})
#debug "to_hash:opts: #{opts.keys.inspect}::#{objs.keys.inspect}::"
h = {}
uuid = h[:uuid] = self.uuid.to_s
h[:href] = self.href(opts)
name = self.name
if name && ! name.start_with?('_')
h[:name] = self.name
end
h[:type] = self.resource_type || 'unknown'
return h if objs.key?(self)
objs[self] = true
_oprops_to_hash(h)
h
end
def default_href_prefix
@@default_href_prefix
end
def _oprops_to_hash(h)
klass = self.class
while klass
if op = @@oprops[klass]
op.each do |k, v|
k = k.to_sym
unless (value = send(k)).nil?
if value.kind_of? OResource
value = value.uuid.to_s
end
if value.kind_of? Array
next if value.empty?
value = value.collect do |e|
(e.kind_of? OResource) ? e.uuid.to_s : e
end
end
h[k] = value
end
end
end
klass = klass.superclass
end
h
end
end
# Extend array to add functionality dealing with property values
#class PropValueArray < Array
# def to_json(*a)
# {
# 'json_class' => self.class.name,
# 'els' => self.to_a.to_json
# }.to_json(*a)
# end
# def self.json_create(o)
# # http://www.ruby-lang.org/en/news/2013/02/22/json-dos-cve-2013-0269/
# v = JSON.load(o['els'])
# v
# end
#end
end # OMF::SFA
Fixed JSON serialization by overriding as_json which is being used by Active_Support
require 'rubygems'
require 'dm-core'
require 'dm-types'
require 'dm-validations'
require 'omf_common/lobject'
require 'set'
#require 'omf-sfa/resource/oproperty'
autoload :OProperty, 'omf-sfa/resource/oproperty'
#require 'omf-sfa/resource/group_membership'
autoload :GroupMembership, 'omf-sfa/resource/group_membership'
autoload :OAccount, 'omf-sfa/resource/oaccount'
autoload :OGroup, 'omf-sfa/resource/ogroup'
autoload :OLease, 'omf-sfa/resource/olease'
# module OMF::SFA::Resource
# class OResource; end
# end
#require 'omf-sfa/resource/oaccount'
module OMF::SFA::Resource
# This is the basic resource from which all other
# resources descend.
#
# Note: Can't call it 'Resource' to avoid any confusion
# with DataMapper::Resource
#
class OResource
include OMF::Common::Loggable
extend OMF::Common::Loggable
include DataMapper::Resource
include DataMapper::Validations
#@@default_href_prefix = 'http://somehost/resources/'
@@default_href_prefix = '/resources'
@@oprops = {}
# managing dm object
property :id, Serial
property :type, Discriminator
property :uuid, UUID
property :name, String
#property :href, String, :length => 255, :default => lambda {|r, m| r.def_href() }
property :urn, String, :length => 255
property :resource_type, String
has n, :o_properties, 'OProperty'
alias oproperties o_properties
#has n, :contained_in_groups, :model => :Group, :through => GroupMembership
#has n, :contained_in_groups, 'Group' #, :through => :group_membership #GroupMembership
#has n, :group_memberships
#has n, :groups, 'Group', :through => :group_membership #, :via => :groups
has n, :group_memberships, :child_key => [ :o_resource_id ]
has n, :included_in_groups, 'OGroup', :through => :group_memberships, :via => :o_group
belongs_to :account, :model => 'OAccount', :child_key => [ :account_id ], :required => false
def self.oproperty(name, type, opts = {})
name = name.to_s
# should check if +name+ is already used
op = @@oprops[self] ||= {}
opts[:__type__] = type
if opts[:functional] == false
# property is an array
pname = DataMapper::Inflector.pluralize(name)
op[pname] = opts
define_method pname do
res = oproperty_get(pname)
if res == nil
oproperty_set(pname, res = [])
# We make a oproperty_get in order to get the extended Array with
# the overidden '<<' method. Check module ArrayProxy in oproperty.rb
res = oproperty_get(pname)
end
#puts "PROPERTY_GET #{res}"
res
end
define_method "#{pname}=" do |v|
#unless v.kind_of? Enumerable
# raise "property '#{pname}' expects a value of type Enumerable"
#end
#val = self.eval("#{pname}")
#puts "RESPOND: '#{respond_to?(pname.to_sym)}' self:'#{self.inspect}'"
#val = send(pname.to_sym).value#.dup
#val = oproperty_get(pname)
#unless v.is_a? PropValueArray
unless v.is_a? Array
# we really want to store it as a PropValueArray
#c = PropValueArray.new
#if v.respond_to?(:each)
# v.each {|e| c << e}
#else
# c << v
#end
#v = c
v = [v]
#puts "VAL is '#{val}'"
end
#puts "NAME is '#{name}'"
#puts "V is '#{v}'"
oproperty_set(pname, v)
end
else
op[name] = opts
define_method name do
res = oproperty_get(name)
if res.nil?
res = opts[:default]
if res.nil? && (self.respond_to?(m = "default_#{name}".to_sym))
res = send(m)
end
end
res
end
define_method "#{name}=" do |v|
oproperty_set(name, v)
end
end
end
# Clone this resource this resource. However, the clone will have a unique UUID
#
def clone()
clone = self.class.new
attributes.each do |k, v|
next if k == :id || k == :uuid
clone.attribute_set(k, DataMapper::Ext.try_dup(v))
end
oproperties.each do |p|
clone.oproperty_set(p.name, DataMapper::Ext.try_dup(p.value))
end
clone.uuid = UUIDTools::UUID.random_create
return clone
end
def uuid()
unless uuid = attribute_get(:uuid)
uuid = self.uuid = UUIDTools::UUID.random_create
end
uuid
end
def href(opts = {})
if prefix = opts[:name_prefix]
href = "#{prefix}/#{self.name || self.uuid.to_s}"
# if self.name.start_with? '_'
# h[:href] = prefix
# else
# h[:href] = "#{prefix}/#{self.name || uuid}"
# end
elsif prefix = opts[:href_prefix] || @@default_href_prefix
href = "#{prefix}/#{self.uuid.to_s}"
end
href
end
# Return the status of the resource. Should be
# one of: _configuring_, _ready_, _failed_, and _unknown_
#
def status
'unknown'
end
def oproperty(pname)
self.oproperties.first(:name => pname.to_sym)
end
def oproperty_get(pname)
#puts "OPROPERTY_GET: pname:'#{pname}'"
pname = pname.to_sym
return self.name if pname == :name
prop = self.oproperties.first(:name => pname)
prop.nil? ? nil : prop.value
end
alias_method :[], :oproperty_get
def oproperty_set(pname, value)
#puts "OPROPERTY_SET pname:'#{pname}', value:'#{value.class}', self:'#{self.inspect}'"
pname = pname.to_sym
if pname == :name
self.name = value
else
self.save
prop = self.oproperties.first_or_create(:name => pname)
prop.value = value
end
value
end
alias_method :[]=, :oproperty_set
def oproperties_as_hash
res = {}
oproperties.each do |p|
res[p.name] = p.value
end
res
end
def each_resource(&block)
# resources don't contain other resources, groups do'
end
# alias_method :_dirty_children?, :dirty_children?
# def dirty_children?
# puts "CHECKING CHILDREN DIRTY: #{_dirty_children?}"
# _dirty_children?
# end
alias_method :_dirty_self?, :dirty_self?
def dirty_self?
#puts "CHECKING DIRTY #{_dirty_self?}"
return true if _dirty_self?
o_properties.each do |p|
return true if p.dirty_self?
end
false
end
# alias_method :_dirty_attributes, :dirty_attributes
# def dirty_attributes
# dirty = _dirty_attributes
# puts "DIRTY ATTRIBUTE #{dirty.inspect}"
# dirty
# end
# Return true if this resource is a Group
def group?
false
end
# Remove this resource from all groups it currently belongs.
#
def remove_from_all_groups
self.group_memberships.each {|m| m.destroy}
end
# Add this resource and all contained to +set+.
def all_resources(set = Set.new)
set << self
set
end
before :save do
unless self.uuid
self.uuid = UUIDTools::UUID.random_create
end
unless self.name
self.name = self.urn ? GURN.create(self.urn).short_name : "r#{self.object_id}"
end
unless self.urn
# The purpose or function of a URN is to provide a globally unique,
# persistent identifier used for recognition, for access to
# characteristics of the resource or for access to the resource
# itself.
# source: http://tools.ietf.org/html/rfc1737
#
#name = self.name
self.urn = GURN.create(self.uuid.to_s, self.class).to_s
end
end
def destroy
#debug "ORESOURCE destroy #{self}"
self.remove_from_all_groups
#if p = self.provided_by
# pa = p.provides
# pa.delete self
# r = p.save
# i = 0
#end
# first destroy all properties
self.oproperties.all().each do |p|
#debug "ORESOURCE destroying property '#{p.inspect}'"
r = p.destroy
r
end
#p = self.oproperties.all()
super
end
def destroy!
#debug "ORESOURCE destroy! #{self}"
destroy
super
end
def to_json(*a)
unless self.id
# need an id, means I haven't been saved yet
save
end
{
'json_class' => self.class.name,
'id' => self.id
}.to_json(*a)
end
def as_json(options = { })
{
"json_class" => self.class.name,
"id" => self.id
}
end
#def self.from_json(o)
# puts "FROM_JSON"
# klass = o['json_class']
# id = o['id']
# eval(klass).first(:id => id)
#end
def self.json_create(o)
klass = o['json_class']
id = o['id']
r = eval(klass).first(:id => id)
r
end
def to_hash(objs = {}, opts = {})
#debug "to_hash:opts: #{opts.keys.inspect}::#{objs.keys.inspect}::"
h = {}
uuid = h[:uuid] = self.uuid.to_s
h[:href] = self.href(opts)
name = self.name
if name && ! name.start_with?('_')
h[:name] = self.name
end
h[:type] = self.resource_type || 'unknown'
return h if objs.key?(self)
objs[self] = true
_oprops_to_hash(h)
h
end
def default_href_prefix
@@default_href_prefix
end
def _oprops_to_hash(h)
klass = self.class
while klass
if op = @@oprops[klass]
op.each do |k, v|
k = k.to_sym
unless (value = send(k)).nil?
if value.kind_of? OResource
value = value.uuid.to_s
end
if value.kind_of? Array
next if value.empty?
value = value.collect do |e|
(e.kind_of? OResource) ? e.uuid.to_s : e
end
end
h[k] = value
end
end
end
klass = klass.superclass
end
h
end
end
# Extend array to add functionality dealing with property values
#class PropValueArray < Array
# def to_json(*a)
# {
# 'json_class' => self.class.name,
# 'els' => self.to_a.to_json
# }.to_json(*a)
# end
# def self.json_create(o)
# # http://www.ruby-lang.org/en/news/2013/02/22/json-dos-cve-2013-0269/
# v = JSON.load(o['els'])
# v
# end
#end
end # OMF::SFA
|
java_import org.lwjgl.opengl.GL11
java_import org.lwjgl.opengl.GL15
java_import org.lwjgl.opengl.GL20
java_import org.lwjgl.opengl.GL30
java_import org.lwjgl.opengl.GL32
java_import org.lwjgl.BufferUtils
java_import org.lwjgl.input.Keyboard
java_import org.lwjgl.util.vector.Matrix4f
java_import org.lwjgl.util.vector.Vector3f
require "opengl/gl_utils"
require "pry"
#
# Attempting to do a pyramid that is interactive.
#
class OpenGL::InteractivePyramid
include OpenGL::GLUtils
add_start
#position constants
RIGHT_EXTENT = 0.5
LEFT_EXTENT = -RIGHT_EXTENT
TOP_EXTENT = 0.5
BOTTOM_EXTENT = -TOP_EXTENT
FRONT_EXTENT = -3
REAR_EXTENT = -3.5
#colour constants
GREEN_COLOUR = [0.75, 0.75, 1.0, 1.0]
BLUE_COLOUR = [0.0, 0.5, 0.0, 1.0]
RED_COLOUR = [1.0, 0.0, 0.0, 1.0]
GREY_COLOUR = [0.8, 0.8, 0.8, 1.0]
BROWN_COLOUR = [0.5, 0.5, 0.0, 1.0]
# Constructor
def initialize
init_vertex_data
create_display("Interactive Pyramid");
#initialise the viewport
GL11.gl_viewport(0, 0, Display.width, Display.height)
init_program
init_vertex_buffer
init_vertex_array_objects
GL11.gl_enable(GL11::GL_CULL_FACE)
GL11.gl_cull_face(GL11::GL_BACK)
GL11.gl_front_face(GL11::GL_CW)
GL11.gl_enable(GL11::GL_DEPTH_TEST)
GL11.gl_depth_mask(true)
GL11.gl_depth_func(GL11::GL_LEQUAL)
GL11.gl_depth_range(0.0, 1.0)
@y_rotation = 0
@x_rotation = 0
@x_rotation_matrix = Matrix4f.new
@x_rotation_buffer = BufferUtils.create_float_buffer 16
@y_rotation_matrix = Matrix4f.new
@y_rotation_buffer = BufferUtils.create_float_buffer 16
render_loop do
input
display
end
destroy_display
end
#initialise the vertex buffer
def init_vertex_buffer
@vertex_buffer_id = GL15.gl_gen_buffers
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, @vertex_buffer_id)
buffer = BufferUtils.create_float_buffer(@vertex_data.size).put(@vertex_data.to_java(:float)).flip
GL15.gl_buffer_data(GL15::GL_ARRAY_BUFFER, buffer, GL15::GL_STATIC_DRAW)
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, 0)
@index_buffer_id = GL15.gl_gen_buffers
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, @index_buffer_id)
buffer = BufferUtils.create_short_buffer(@index_data.size).put(@index_data.to_java(:short)).flip
GL15.gl_buffer_data(GL15::GL_ELEMENT_ARRAY_BUFFER, buffer, GL15::GL_STATIC_DRAW)
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, 0)
end
# initialise the vertex array objects
def init_vertex_array_objects
#first object
@vao_id = GL30.gl_gen_vertex_arrays
GL30.gl_bind_vertex_array(@vao_id)
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, @vertex_buffer_id)
GL20.gl_enable_vertex_attrib_array(0)
GL20.gl_enable_vertex_attrib_array(1)
GL20.gl_vertex_attrib_pointer(0, 3, GL11::GL_FLOAT, false, 0, 0)
GL20.gl_vertex_attrib_pointer(1, 4, GL11::GL_FLOAT, false, 0, 5 * 3 * FLOAT_SIZE)
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, @index_buffer_id)
GL30.gl_bind_vertex_array(0)
end
#
# Manage the input for this program
#
def input
@y_rotation -= 0.01 if Keyboard.is_key_down Keyboard::KEY_LEFT
@y_rotation += 0.01 if Keyboard.is_key_down Keyboard::KEY_RIGHT
calc_y_rotation
end
# render a frame
def display
#set the colour to clear.
GL11.gl_clear_color(0.0, 0.0, 0.0, 0.0)
#clear the buffer. Remember that Java static types come back as Ruby Constants.
GL11.gl_clear(GL11::GL_COLOR_BUFFER_BIT | GL11::GL_DEPTH_BUFFER_BIT)
GL20.gl_use_program(@program_id)
GL20.gl_uniform_matrix4(@transform_matrix_location, false, @y_rotation_buffer)
GL30.gl_bind_vertex_array(@vao_id)
GL11.gl_draw_elements(GL11::GL_TRIANGLES, @index_data.size, GL11::GL_UNSIGNED_SHORT, 0)
#cleanup
GL30.gl_bind_vertex_array(0)
GL20.gl_use_program(0)
end
# initialise the program
def init_program
@program_id = compile_program('perspective_matrix_vertex_basic.glsl', 'colour_passthrough.glsl')
@perspective_matrix_location = GL20.gl_get_uniform_location(@program_id, "cameraToClipMatrix")
@transform_matrix_location = GL20.gl_get_uniform_location(@program_id, "modelToCameraMatrix")
#set up the perspective matrix
z_near = 1.0
z_far = 10.0
@frustrum_scale = calculate_frustum_scale(45.0)
perspective_matrix_buffer = BufferUtils.create_float_buffer(16);
perspective_matrix = Matrix4f.new
perspective_matrix.m00 = @frustrum_scale
perspective_matrix.m11 = @frustrum_scale
perspective_matrix.m22 = (z_far + z_near) / (z_near - z_far)
perspective_matrix.m32 = (2.0 * z_far * z_near) / (z_near - z_far)
perspective_matrix.m23 = -1.0
# make sure to make this 0, as this is an identity matrix to start.
perspective_matrix.m33 = 0.0
puts perspective_matrix.to_s
perspective_matrix.store(perspective_matrix_buffer)
GL20.gl_use_program(@program_id)
GL20.gl_uniform_matrix4(@perspective_matrix_location, false, perspective_matrix_buffer.flip)
GL20.gl_use_program(0)
end
# calculate the frustrum scale
# @param [Float] angle in degrees.
def calculate_frustum_scale(angle)
return (1.0 / Math.tan((angle * (Math::PI / 180)) / 2.0))
end
#initialise the vertex data
def init_vertex_data
@vertex_data = [
#pyramid positions
# -- bottom square
LEFT_EXTENT, BOTTOM_EXTENT, FRONT_EXTENT,
RIGHT_EXTENT, BOTTOM_EXTENT, FRONT_EXTENT,
RIGHT_EXTENT, BOTTOM_EXTENT, REAR_EXTENT,
LEFT_EXTENT, BOTTOM_EXTENT, REAR_EXTENT,
# -- top position
((LEFT_EXTENT + RIGHT_EXTENT)/2), TOP_EXTENT, ((FRONT_EXTENT + REAR_EXTENT)/2),
#Colours
RED_COLOUR,
RED_COLOUR,
RED_COLOUR,
RED_COLOUR,
RED_COLOUR
]
puts "Vertex Data", #{@vertex_data}
#flatten out all the colours.
@vertex_data.flatten!
@index_data = [
0, 4, 1, #front
3, 4, 0, #left
2, 1, 4, #right
3, 2, 4 #rear
]
end
=begin
m00[0] m10[4] m20[8] m30[12]
m01[1] m12[5] m21[9] m31[13]
m02[2] m13[6] m22[10] m32[14]
m03[3] m14[7] m23[11] m33[15]
=end
# calculate the y rotation
def calc_y_rotation
#translate in on the z axis
translation_to_origin = Matrix4f.new
translation_to_origin.m32 = -(@vertex_data[14])
#the rotate it on the Y axis.
cos = Math.cos @y_rotation
sin = Math.sin @y_rotation
@y_rotation_matrix.m00 = cos
@y_rotation_matrix.m02 = sin
@y_rotation_matrix.m20 = -sin
@y_rotation_matrix.m22 = cos
#then translate it back out on the z axis.
temp = Matrix4f.new
Matrix4f.mul(@y_rotation_matrix, translation_to_origin, temp)
translation_to_origin.m32 = (@vertex_data[14])
result = Matrix4f.new
Matrix4f.mul(translation_to_origin, temp, result)
result.store(@y_rotation_buffer)
@y_rotation_buffer.flip
end
end
(wip) rotation with logging.
java_import org.lwjgl.opengl.GL11
java_import org.lwjgl.opengl.GL15
java_import org.lwjgl.opengl.GL20
java_import org.lwjgl.opengl.GL30
java_import org.lwjgl.opengl.GL32
java_import org.lwjgl.BufferUtils
java_import org.lwjgl.input.Keyboard
java_import org.lwjgl.util.vector.Matrix4f
java_import org.lwjgl.util.vector.Vector3f
require "opengl/gl_utils"
require "pry"
#
# Attempting to do a pyramid that is interactive.
#
class OpenGL::InteractivePyramid
include OpenGL::GLUtils
add_start
#position constants
RIGHT_EXTENT = 0.5
LEFT_EXTENT = -RIGHT_EXTENT
TOP_EXTENT = 0.5
BOTTOM_EXTENT = -TOP_EXTENT
FRONT_EXTENT = -3
REAR_EXTENT = -3.5
#colour constants
GREEN_COLOUR = [0.75, 0.75, 1.0, 1.0]
BLUE_COLOUR = [0.0, 0.5, 0.0, 1.0]
RED_COLOUR = [1.0, 0.0, 0.0, 1.0]
GREY_COLOUR = [0.8, 0.8, 0.8, 1.0]
BROWN_COLOUR = [0.5, 0.5, 0.0, 1.0]
# Constructor
def initialize
init_vertex_data
create_display("Interactive Pyramid");
#initialise the viewport
GL11.gl_viewport(0, 0, Display.width, Display.height)
init_program
init_vertex_buffer
init_vertex_array_objects
GL11.gl_enable(GL11::GL_CULL_FACE)
GL11.gl_cull_face(GL11::GL_BACK)
GL11.gl_front_face(GL11::GL_CW)
GL11.gl_enable(GL11::GL_DEPTH_TEST)
GL11.gl_depth_mask(true)
GL11.gl_depth_func(GL11::GL_LEQUAL)
GL11.gl_depth_range(0.0, 1.0)
@y_rotation = 0
@x_rotation = 0
@x_rotation_matrix = Matrix4f.new
@x_rotation_buffer = BufferUtils.create_float_buffer 16
@y_rotation_matrix = Matrix4f.new
@y_rotation_buffer = BufferUtils.create_float_buffer 16
render_loop do
input
display
end
destroy_display
end
#initialise the vertex buffer
def init_vertex_buffer
@vertex_buffer_id = GL15.gl_gen_buffers
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, @vertex_buffer_id)
buffer = BufferUtils.create_float_buffer(@vertex_data.size).put(@vertex_data.to_java(:float)).flip
GL15.gl_buffer_data(GL15::GL_ARRAY_BUFFER, buffer, GL15::GL_STATIC_DRAW)
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, 0)
@index_buffer_id = GL15.gl_gen_buffers
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, @index_buffer_id)
buffer = BufferUtils.create_short_buffer(@index_data.size).put(@index_data.to_java(:short)).flip
GL15.gl_buffer_data(GL15::GL_ELEMENT_ARRAY_BUFFER, buffer, GL15::GL_STATIC_DRAW)
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, 0)
end
# initialise the vertex array objects
def init_vertex_array_objects
#first object
@vao_id = GL30.gl_gen_vertex_arrays
GL30.gl_bind_vertex_array(@vao_id)
GL15.gl_bind_buffer(GL15::GL_ARRAY_BUFFER, @vertex_buffer_id)
GL20.gl_enable_vertex_attrib_array(0)
GL20.gl_enable_vertex_attrib_array(1)
GL20.gl_vertex_attrib_pointer(0, 3, GL11::GL_FLOAT, false, 0, 0)
GL20.gl_vertex_attrib_pointer(1, 4, GL11::GL_FLOAT, false, 0, 5 * 3 * FLOAT_SIZE)
GL15.gl_bind_buffer(GL15::GL_ELEMENT_ARRAY_BUFFER, @index_buffer_id)
GL30.gl_bind_vertex_array(0)
end
#
# Manage the input for this program
#
def input
@y_rotation -= 0.01 if Keyboard.is_key_down Keyboard::KEY_LEFT
@y_rotation += 0.01 if Keyboard.is_key_down Keyboard::KEY_RIGHT
calc_y_rotation
end
# render a frame
def display
#set the colour to clear.
GL11.gl_clear_color(0.0, 0.0, 0.0, 0.0)
#clear the buffer. Remember that Java static types come back as Ruby Constants.
GL11.gl_clear(GL11::GL_COLOR_BUFFER_BIT | GL11::GL_DEPTH_BUFFER_BIT)
GL20.gl_use_program(@program_id)
GL20.gl_uniform_matrix4(@transform_matrix_location, false, @y_rotation_buffer)
GL30.gl_bind_vertex_array(@vao_id)
GL11.gl_draw_elements(GL11::GL_TRIANGLES, @index_data.size, GL11::GL_UNSIGNED_SHORT, 0)
#cleanup
GL30.gl_bind_vertex_array(0)
GL20.gl_use_program(0)
end
# initialise the program
def init_program
@program_id = compile_program('perspective_matrix_vertex_basic.glsl', 'colour_passthrough.glsl')
@perspective_matrix_location = GL20.gl_get_uniform_location(@program_id, "cameraToClipMatrix")
@transform_matrix_location = GL20.gl_get_uniform_location(@program_id, "modelToCameraMatrix")
#set up the perspective matrix
z_near = 1.0
z_far = 10.0
@frustrum_scale = calculate_frustum_scale(45.0)
perspective_matrix_buffer = BufferUtils.create_float_buffer(16);
perspective_matrix = Matrix4f.new
perspective_matrix.m00 = @frustrum_scale
perspective_matrix.m11 = @frustrum_scale
perspective_matrix.m22 = (z_far + z_near) / (z_near - z_far)
perspective_matrix.m32 = (2.0 * z_far * z_near) / (z_near - z_far)
perspective_matrix.m23 = -1.0
# make sure to make this 0, as this is an identity matrix to start.
perspective_matrix.m33 = 0.0
puts perspective_matrix.to_s
perspective_matrix.store(perspective_matrix_buffer)
GL20.gl_use_program(@program_id)
GL20.gl_uniform_matrix4(@perspective_matrix_location, false, perspective_matrix_buffer.flip)
GL20.gl_use_program(0)
end
# calculate the frustrum scale
# @param [Float] angle in degrees.
def calculate_frustum_scale(angle)
return (1.0 / Math.tan((angle * (Math::PI / 180)) / 2.0))
end
#initialise the vertex data
def init_vertex_data
@vertex_data = [
#pyramid positions
# -- bottom square
LEFT_EXTENT, BOTTOM_EXTENT, FRONT_EXTENT,
RIGHT_EXTENT, BOTTOM_EXTENT, FRONT_EXTENT,
RIGHT_EXTENT, BOTTOM_EXTENT, REAR_EXTENT,
LEFT_EXTENT, BOTTOM_EXTENT, REAR_EXTENT,
# -- top position
((LEFT_EXTENT + RIGHT_EXTENT)/2), TOP_EXTENT, ((FRONT_EXTENT + REAR_EXTENT)/2),
#Colours
RED_COLOUR,
RED_COLOUR,
RED_COLOUR,
RED_COLOUR,
RED_COLOUR
]
puts "Vertex Data", #{@vertex_data}
#flatten out all the colours.
@vertex_data.flatten!
@index_data = [
0, 4, 1, #front
3, 4, 0, #left
2, 1, 4, #right
3, 2, 4 #rear
]
end
=begin
m00[0] m10[4] m20[8] m30[12]
m01[1] m12[5] m21[9] m31[13]
m02[2] m13[6] m22[10] m32[14]
m03[3] m14[7] m23[11] m33[15]
=end
# calculate the y rotation
def calc_y_rotation
#translate in on the z axis
translation_to_origin = Matrix4f.new
translation_to_origin.m32 = -(@vertex_data[14])
#the rotate it on the Y axis.
cos = Math.cos @y_rotation
sin = Math.sin @y_rotation
@y_rotation_matrix.m00 = cos
@y_rotation_matrix.m02 = sin
@y_rotation_matrix.m20 = -sin
@y_rotation_matrix.m22 = cos
#then translate it back out on the z axis.
temp = Matrix4f.new
Matrix4f.mul(@y_rotation_matrix, translation_to_origin, temp)
translation_to_origin.m32 = (@vertex_data[14])
result = Matrix4f.new
@y_rotation_matrix.set_identity
Matrix4f.mul(translation_to_origin, temp, result)
puts "\\- MATRIX [#{@y_rotation}] -/"
puts result.to_s
puts '/- MATRIX -\\'
=begin
\- MATRIX [0.4000000000000002] -/
0.921061 0.0 -0.38941833 -1.2656096
0.0 1.0 0.0 0.0
0.38941833 0.0 0.921061 -0.25655174
0.0 0.0 0.0 1.0
/- MATRIX -\
=end
result.store(@y_rotation_buffer)
@y_rotation_buffer.flip
end
end |
require 'fog/core'
module Fog
module OpenStack
extend Fog::Provider
module Errors
class ServiceError < Fog::Errors::Error
attr_reader :response_data
def self.slurp(error)
if error.response.body.empty?
data = nil
message = nil
else
data = Fog::JSON.decode(error.response.body)
message = data['message']
if message.nil? and !data.values.first.nil?
message = data.values.first['message']
end
end
new_error = super(error, message)
new_error.instance_variable_set(:@response_data, data)
new_error
end
end
class InternalServerError < ServiceError; end
class Conflict < ServiceError; end
class NotFound < ServiceError; end
class ServiceUnavailable < ServiceError; end
class BadRequest < ServiceError
attr_reader :validation_errors
def self.slurp(error)
new_error = super(error)
unless new_error.response_data.nil? or new_error.response_data['badRequest'].nil?
new_error.instance_variable_set(:@validation_errors, new_error.response_data['badRequest']['validationErrors'])
end
new_error
end
end
end
service(:compute , 'openstack/compute' , 'Compute' )
service(:identity, 'openstack/identity', 'Identity')
service(:network, 'openstack/network', 'Network')
# legacy v1.0 style auth
def self.authenticate_v1(options, connection_options = {})
uri = options[:openstack_auth_uri]
connection = Fog::Connection.new(uri.to_s, false, connection_options)
@openstack_api_key = options[:openstack_api_key]
@openstack_username = options[:openstack_username]
response = connection.request({
:expects => [200, 204],
:headers => {
'X-Auth-Key' => @openstack_api_key,
'X-Auth-User' => @openstack_username
},
:host => uri.host,
:method => 'GET',
:path => (uri.path and not uri.path.empty?) ? uri.path : 'v1.0'
})
return {
:token => response.headers['X-Auth-Token'],
:server_management_url => response.headers['X-Server-Management-Url'],
:identity_public_endpoint => response.headers['X-Keystone']
}
end
# Keystone Style Auth
def self.authenticate_v2(options, connection_options = {})
uri = options[:openstack_auth_uri]
tenant_name = options[:openstack_tenant]
service_name = options[:openstack_service_name]
identity_service_name = options[:openstack_identity_service_name]
endpoint_type = (options[:openstack_endpoint_type] || 'publicURL').to_s
openstack_region = options[:openstack_region]
body = retrieve_tokens_v2(options, connection_options)
service = body['access']['serviceCatalog'].
detect {|s| service_name.include?(s['type']) }
options[:unscoped_token] = body['access']['token']['id']
unless service
unless tenant_name
response = Fog::Connection.new(
"#{uri.scheme}://#{uri.host}:#{uri.port}/v2.0/tenants", false, connection_options).request({
:expects => [200, 204],
:headers => {'Content-Type' => 'application/json',
'Accept' => 'application/json',
'X-Auth-Token' => body['access']['token']['id']},
:host => uri.host,
:method => 'GET'
})
body = Fog::JSON.decode(response.body)
if body['tenants'].empty?
raise Errors::NotFound.new('No Tenant Found')
else
options[:openstack_tenant] = body['tenants'].first['name']
end
end
body = retrieve_tokens_v2(options, connection_options)
service = body['access']['serviceCatalog'].
detect{|s| service_name.include?(s['type']) }
end
service['endpoints'] = service['endpoints'].select do |endpoint|
endpoint['region'] == openstack_region
end if openstack_region
unless service
available = body['access']['serviceCatalog'].map { |endpoint|
endpoint['type']
}.sort.join ', '
missing = service_name.join ', '
message = "Could not find service #{missing}. Have #{available}"
raise Errors::NotFound, message
end
if service['endpoints'].count > 1
regions = service["endpoints"].map{ |e| e['region'] }.uniq.join(',')
raise Errors::NotFound.new("Multiple regions available choose one of these '#{regions}'")
end
identity_service = body['access']['serviceCatalog'].
detect{|x| identity_service_name.include?(x['type']) } if identity_service_name
tenant = body['access']['token']['tenant']
user = body['access']['user']
management_url = service['endpoints'].detect{|s| s[endpoint_type]}[endpoint_type]
identity_url = identity_service['endpoints'].detect{|s| s['publicURL']}['publicURL'] if identity_service
{
:user => user,
:tenant => tenant,
:identity_public_endpoint => identity_url,
:server_management_url => management_url,
:token => body['access']['token']['id'],
:expires => body['access']['token']['expires'],
:current_user_id => body['access']['user']['id'],
:unscoped_token => options[:unscoped_token]
}
end
def self.retrieve_tokens_v2(options, connection_options = {})
api_key = options[:openstack_api_key].to_s
username = options[:openstack_username].to_s
tenant_name = options[:openstack_tenant].to_s
auth_token = options[:openstack_auth_token] || options[:unscoped_token]
uri = options[:openstack_auth_uri]
connection = Fog::Connection.new(uri.to_s, false, connection_options)
request_body = {:auth => Hash.new}
if auth_token
request_body[:auth][:token] = {
:id => auth_token
}
else
request_body[:auth][:passwordCredentials] = {
:username => username,
:password => api_key
}
end
request_body[:auth][:tenantName] = tenant_name if tenant_name
response = connection.request({
:expects => [200, 204],
:headers => {'Content-Type' => 'application/json'},
:body => Fog::JSON.encode(request_body),
:host => uri.host,
:method => 'POST',
:path => (uri.path and not uri.path.empty?) ? uri.path : 'v2.0'
})
Fog::JSON.decode(response.body)
end
end
end
[openstack] Show an error message when there aren't any endpoints available for a region
require 'fog/core'
module Fog
module OpenStack
extend Fog::Provider
module Errors
class ServiceError < Fog::Errors::Error
attr_reader :response_data
def self.slurp(error)
if error.response.body.empty?
data = nil
message = nil
else
data = Fog::JSON.decode(error.response.body)
message = data['message']
if message.nil? and !data.values.first.nil?
message = data.values.first['message']
end
end
new_error = super(error, message)
new_error.instance_variable_set(:@response_data, data)
new_error
end
end
class InternalServerError < ServiceError; end
class Conflict < ServiceError; end
class NotFound < ServiceError; end
class ServiceUnavailable < ServiceError; end
class BadRequest < ServiceError
attr_reader :validation_errors
def self.slurp(error)
new_error = super(error)
unless new_error.response_data.nil? or new_error.response_data['badRequest'].nil?
new_error.instance_variable_set(:@validation_errors, new_error.response_data['badRequest']['validationErrors'])
end
new_error
end
end
end
service(:compute , 'openstack/compute' , 'Compute' )
service(:identity, 'openstack/identity', 'Identity')
service(:network, 'openstack/network', 'Network')
# legacy v1.0 style auth
def self.authenticate_v1(options, connection_options = {})
uri = options[:openstack_auth_uri]
connection = Fog::Connection.new(uri.to_s, false, connection_options)
@openstack_api_key = options[:openstack_api_key]
@openstack_username = options[:openstack_username]
response = connection.request({
:expects => [200, 204],
:headers => {
'X-Auth-Key' => @openstack_api_key,
'X-Auth-User' => @openstack_username
},
:host => uri.host,
:method => 'GET',
:path => (uri.path and not uri.path.empty?) ? uri.path : 'v1.0'
})
return {
:token => response.headers['X-Auth-Token'],
:server_management_url => response.headers['X-Server-Management-Url'],
:identity_public_endpoint => response.headers['X-Keystone']
}
end
# Keystone Style Auth
def self.authenticate_v2(options, connection_options = {})
uri = options[:openstack_auth_uri]
tenant_name = options[:openstack_tenant]
service_name = options[:openstack_service_name]
identity_service_name = options[:openstack_identity_service_name]
endpoint_type = (options[:openstack_endpoint_type] || 'publicURL').to_s
openstack_region = options[:openstack_region]
body = retrieve_tokens_v2(options, connection_options)
service = body['access']['serviceCatalog'].
detect {|s| service_name.include?(s['type']) }
options[:unscoped_token] = body['access']['token']['id']
unless service
unless tenant_name
response = Fog::Connection.new(
"#{uri.scheme}://#{uri.host}:#{uri.port}/v2.0/tenants", false, connection_options).request({
:expects => [200, 204],
:headers => {'Content-Type' => 'application/json',
'Accept' => 'application/json',
'X-Auth-Token' => body['access']['token']['id']},
:host => uri.host,
:method => 'GET'
})
body = Fog::JSON.decode(response.body)
if body['tenants'].empty?
raise Errors::NotFound.new('No Tenant Found')
else
options[:openstack_tenant] = body['tenants'].first['name']
end
end
body = retrieve_tokens_v2(options, connection_options)
service = body['access']['serviceCatalog'].
detect{|s| service_name.include?(s['type']) }
end
service['endpoints'] = service['endpoints'].select do |endpoint|
endpoint['region'] == openstack_region
end if openstack_region
if service['endpoints'].empty?
raise Errors::NotFound.new("No endpoints available for region '#{openstack_region}'")
end if openstack_region
unless service
available = body['access']['serviceCatalog'].map { |endpoint|
endpoint['type']
}.sort.join ', '
missing = service_name.join ', '
message = "Could not find service #{missing}. Have #{available}"
raise Errors::NotFound, message
end
if service['endpoints'].count > 1
regions = service["endpoints"].map{ |e| e['region'] }.uniq.join(',')
raise Errors::NotFound.new("Multiple regions available choose one of these '#{regions}'")
end
identity_service = body['access']['serviceCatalog'].
detect{|x| identity_service_name.include?(x['type']) } if identity_service_name
tenant = body['access']['token']['tenant']
user = body['access']['user']
management_url = service['endpoints'].detect{|s| s[endpoint_type]}[endpoint_type]
identity_url = identity_service['endpoints'].detect{|s| s['publicURL']}['publicURL'] if identity_service
{
:user => user,
:tenant => tenant,
:identity_public_endpoint => identity_url,
:server_management_url => management_url,
:token => body['access']['token']['id'],
:expires => body['access']['token']['expires'],
:current_user_id => body['access']['user']['id'],
:unscoped_token => options[:unscoped_token]
}
end
def self.retrieve_tokens_v2(options, connection_options = {})
api_key = options[:openstack_api_key].to_s
username = options[:openstack_username].to_s
tenant_name = options[:openstack_tenant].to_s
auth_token = options[:openstack_auth_token] || options[:unscoped_token]
uri = options[:openstack_auth_uri]
connection = Fog::Connection.new(uri.to_s, false, connection_options)
request_body = {:auth => Hash.new}
if auth_token
request_body[:auth][:token] = {
:id => auth_token
}
else
request_body[:auth][:passwordCredentials] = {
:username => username,
:password => api_key
}
end
request_body[:auth][:tenantName] = tenant_name if tenant_name
response = connection.request({
:expects => [200, 204],
:headers => {'Content-Type' => 'application/json'},
:body => Fog::JSON.encode(request_body),
:host => uri.host,
:method => 'POST',
:path => (uri.path and not uri.path.empty?) ? uri.path : 'v2.0'
})
Fog::JSON.decode(response.body)
end
end
end
|
module Origen
class Generator
class Compiler
module DocHelpers
# Helpers to create Yammer widgets
module Yammer
def yammer_comments(options = {})
options = {
prompt: 'Comment on this page'
}.merge(options)
options[:group_id] ||= Origen.app.config.yammer_group
<<END
<div style="position: relative">
<hr>
<h4>Comments</h4>
<div id="embedded-follow" style="position:absolute; top: 18px; left: 100px;"></div>
<div id="embedded-feed" style="height:800px;width:600px;"></div>
</div>
<script type="text/javascript" src="https://c64.assets-yammer.com/assets/platform_embed.js"></script>
<script>
yam.connect.actionButton({
container: "#embedded-follow",
network: "freescale.com",
action: "follow"
});
</script>
<script>
yam.connect.embedFeed({
container: "#embedded-feed",
feedType: "open-graph",
config: {
header: false,
footer: false,
defaultGroupId: '#{options[:group_id]}',
promptText: '#{options[:prompt]}'
},
objectProperties: {
type: 'page',
url: '#{current_latest_url}'
}
});
</script>
END
end
end
include Yammer
module Disqus
def disqus_comments(options = {})
options = {
disqus_shortname: Origen.app.config.disqus_shortname || 'origen-sdk'
}.merge(options)
# Created this other channel in error, don't use it
if options[:disqus_shortname].to_s == 'origensdk'
options[:disqus_shortname] = 'origen-sdk'
end
<<END
<div style="position: relative">
<hr>
<h4>Comments</h4>
</div>
<div id="disqus_thread"></div>
<script type="text/javascript">
/* * * CONFIGURATION VARIABLES * * */
var disqus_shortname = '#{options[:disqus_shortname]}';
var disqus_title;
var disqus_url = 'http://' + window.location.hostname + window.location.pathname;
disqus_title = $("h1").text();
if (disqus_title.length == 0) {
disqus_title = $("h2").text();
}
if (disqus_title.length == 0) {
disqus_title = $("h3").text();
}
if (disqus_title.length == 0) {
disqus_title = $("title").text();
} else {
disqus_title = disqus_title + ' (' + $("title").text() + ')';
}
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript" rel="nofollow">comments powered by Disqus.</a></noscript>
END
end
end
include Disqus
# Helpers for the register diagrams
module RegisterHelpers
# Returns true if some portion of the given bits falls
# within the given range
def _bit_in_range?(bits, max, min)
upper = bits.position + bits.size - 1
lower = bits.position
!((lower > max) || (upper < min))
end
# Returns the number of bits from the given bits that
# fall within the given range
def _num_bits_in_range(bits, max, min)
upper = bits.position + bits.size - 1
lower = bits.position
[upper, max].min - [lower, min].max + 1
end
# Returns true if the given number is is the
# given range
def _index_in_range?(i, max, min)
!((i > max) || (i < min))
end
def _bit_rw(bits)
str = ''
if bits.readable?
str += 'readable'
else
str += 'not-readable'
end
if bits.writable?
str += ' writable'
else
str += ' not-writable'
end
str.strip
end
def _max_bit_in_range(bits, max, _min)
upper = bits.position + bits.size - 1
[upper, max].min - bits.position
end
def _min_bit_in_range(bits, _max, min)
lower = bits.position
[lower, min].max - bits.position
end
end
include RegisterHelpers
# Helpers for the test flow documentation
module TestFlowHelpers
def _test_to_local_link(test)
name = _test_name(test)
number = _test_number(test)
"<a href='##{name}_#{number}'>#{name}</a>"
end
def _test_name(test)
test[:flow][:name] || test[:instance].first[:name]
end
def _test_number(test)
flow = test[:flow]
flow[:number] || flow[:test_number] || flow[:tnum]
end
def _bin_number(test)
flow = test[:flow]
flow[:bin] || flow[:hard_bin] || flow[:hardbin] || flow[:soft_bin] || flow[:softbin]
end
def _sbin_number(test)
flow = test[:flow]
flow[:soft_bin] || flow[:softbin]
end
def _start_accordion(heading, options = {})
options = {
panel: :default
}.merge(options)
@_accordion_index ||= 0
@_accordion_index += 1
<<-END
<div class="panel panel-#{options[:panel]}">
<a href="#_" class="expand-collapse-switch btn btn-xs pull-right btn-default" state="0"><i class='fa fa-plus'></i></a>
<div class="panel-heading clickable" data-toggle="collapse" data-parent="#blah2" href="#collapseAccordion#{@_accordion_index}">
#{heading}
</div>
<div id="collapseAccordion#{@_accordion_index}" class="panel-collapse collapse">
<div class="panel-body" markdown="1">
END
end
def _stop_accordion
<<-END
</div>
</div>
</div>
END
end
def _start_test_flow_table
if @_test_flow_table_open
''
else
@_test_flow_table_open = true
<<-END
<table class="table table-condensed table-bordered flow-table">
<thead>
<tr>
<th class="col1">Test</th>
<th class="col2">Number</th>
<th class="col3">HBin</th>
<th class="col3">SBin</th>
<th class="col5">Attributes</th>
<th class="col6">Description</th>
</tr>
</thead>
<tbody>
END
end
end
def _stop_test_flow_table
if @_test_flow_table_open
@_test_flow_table_open = false
<<-END
</tbody>
</table>
END
else
''
end
end
end
include TestFlowHelpers
# Helpers for the searchable doc layout
module SearchableHelpers
def _doc_root_dir(options)
f = options[:root]
@_doc_root_dirs ||= {}
return @_doc_root_dirs[f] if @_doc_root_dirs[f]
unless File.exist?(f)
f = Pathname.new("#{Origen.root}/templates/web/#{f}")
unless f.exist?
fail "#{options[:root]} does not exist!"
end
end
f = Pathname.new(f) if f.is_a?(String)
@_doc_root_dirs[options[:root]] = f
end
def _resolve_tab(options)
tab = tab.to_s.downcase
active = false
if options[:tab]
options[:tab]
else
rel = options[:top_level_file].relative_path_from(_doc_root_dir(options)).sub_ext('').sub_ext('').to_s
# If the file lives outside of the current app (e.g. it comes from a plugin), then the above approach
# doesn't work, so let's just take the last dirname and the filename
if rel =~ /\.\./
dir = options[:top_level_file].dirname.basename
file = options[:top_level_file].basename('.*').basename('.*') # twice to allow for my_file.md.erb
rel = "#{dir}_#{file}"
end
rel.gsub(/(\/|\\)/, '_').downcase.to_sym
end
end
def _root_path(options)
root = Pathname.new("#{Origen.root}/templates/web")
_doc_root_dir(options).relative_path_from(root)
end
end
include SearchableHelpers
end
include DocHelpers
end
end
end
rescue for mismatched drive issue in Windows
module Origen
class Generator
class Compiler
module DocHelpers
# Helpers to create Yammer widgets
module Yammer
def yammer_comments(options = {})
options = {
prompt: 'Comment on this page'
}.merge(options)
options[:group_id] ||= Origen.app.config.yammer_group
<<END
<div style="position: relative">
<hr>
<h4>Comments</h4>
<div id="embedded-follow" style="position:absolute; top: 18px; left: 100px;"></div>
<div id="embedded-feed" style="height:800px;width:600px;"></div>
</div>
<script type="text/javascript" src="https://c64.assets-yammer.com/assets/platform_embed.js"></script>
<script>
yam.connect.actionButton({
container: "#embedded-follow",
network: "freescale.com",
action: "follow"
});
</script>
<script>
yam.connect.embedFeed({
container: "#embedded-feed",
feedType: "open-graph",
config: {
header: false,
footer: false,
defaultGroupId: '#{options[:group_id]}',
promptText: '#{options[:prompt]}'
},
objectProperties: {
type: 'page',
url: '#{current_latest_url}'
}
});
</script>
END
end
end
include Yammer
module Disqus
def disqus_comments(options = {})
options = {
disqus_shortname: Origen.app.config.disqus_shortname || 'origen-sdk'
}.merge(options)
# Created this other channel in error, don't use it
if options[:disqus_shortname].to_s == 'origensdk'
options[:disqus_shortname] = 'origen-sdk'
end
<<END
<div style="position: relative">
<hr>
<h4>Comments</h4>
</div>
<div id="disqus_thread"></div>
<script type="text/javascript">
/* * * CONFIGURATION VARIABLES * * */
var disqus_shortname = '#{options[:disqus_shortname]}';
var disqus_title;
var disqus_url = 'http://' + window.location.hostname + window.location.pathname;
disqus_title = $("h1").text();
if (disqus_title.length == 0) {
disqus_title = $("h2").text();
}
if (disqus_title.length == 0) {
disqus_title = $("h3").text();
}
if (disqus_title.length == 0) {
disqus_title = $("title").text();
} else {
disqus_title = disqus_title + ' (' + $("title").text() + ')';
}
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript" rel="nofollow">comments powered by Disqus.</a></noscript>
END
end
end
include Disqus
# Helpers for the register diagrams
module RegisterHelpers
# Returns true if some portion of the given bits falls
# within the given range
def _bit_in_range?(bits, max, min)
upper = bits.position + bits.size - 1
lower = bits.position
!((lower > max) || (upper < min))
end
# Returns the number of bits from the given bits that
# fall within the given range
def _num_bits_in_range(bits, max, min)
upper = bits.position + bits.size - 1
lower = bits.position
[upper, max].min - [lower, min].max + 1
end
# Returns true if the given number is is the
# given range
def _index_in_range?(i, max, min)
!((i > max) || (i < min))
end
def _bit_rw(bits)
str = ''
if bits.readable?
str += 'readable'
else
str += 'not-readable'
end
if bits.writable?
str += ' writable'
else
str += ' not-writable'
end
str.strip
end
def _max_bit_in_range(bits, max, _min)
upper = bits.position + bits.size - 1
[upper, max].min - bits.position
end
def _min_bit_in_range(bits, _max, min)
lower = bits.position
[lower, min].max - bits.position
end
end
include RegisterHelpers
# Helpers for the test flow documentation
module TestFlowHelpers
def _test_to_local_link(test)
name = _test_name(test)
number = _test_number(test)
"<a href='##{name}_#{number}'>#{name}</a>"
end
def _test_name(test)
test[:flow][:name] || test[:instance].first[:name]
end
def _test_number(test)
flow = test[:flow]
flow[:number] || flow[:test_number] || flow[:tnum]
end
def _bin_number(test)
flow = test[:flow]
flow[:bin] || flow[:hard_bin] || flow[:hardbin] || flow[:soft_bin] || flow[:softbin]
end
def _sbin_number(test)
flow = test[:flow]
flow[:soft_bin] || flow[:softbin]
end
def _start_accordion(heading, options = {})
options = {
panel: :default
}.merge(options)
@_accordion_index ||= 0
@_accordion_index += 1
<<-END
<div class="panel panel-#{options[:panel]}">
<a href="#_" class="expand-collapse-switch btn btn-xs pull-right btn-default" state="0"><i class='fa fa-plus'></i></a>
<div class="panel-heading clickable" data-toggle="collapse" data-parent="#blah2" href="#collapseAccordion#{@_accordion_index}">
#{heading}
</div>
<div id="collapseAccordion#{@_accordion_index}" class="panel-collapse collapse">
<div class="panel-body" markdown="1">
END
end
def _stop_accordion
<<-END
</div>
</div>
</div>
END
end
def _start_test_flow_table
if @_test_flow_table_open
''
else
@_test_flow_table_open = true
<<-END
<table class="table table-condensed table-bordered flow-table">
<thead>
<tr>
<th class="col1">Test</th>
<th class="col2">Number</th>
<th class="col3">HBin</th>
<th class="col3">SBin</th>
<th class="col5">Attributes</th>
<th class="col6">Description</th>
</tr>
</thead>
<tbody>
END
end
end
def _stop_test_flow_table
if @_test_flow_table_open
@_test_flow_table_open = false
<<-END
</tbody>
</table>
END
else
''
end
end
end
include TestFlowHelpers
# Helpers for the searchable doc layout
module SearchableHelpers
def _doc_root_dir(options)
f = options[:root]
@_doc_root_dirs ||= {}
return @_doc_root_dirs[f] if @_doc_root_dirs[f]
unless File.exist?(f)
f = Pathname.new("#{Origen.root}/templates/web/#{f}")
unless f.exist?
fail "#{options[:root]} does not exist!"
end
end
f = Pathname.new(f) if f.is_a?(String)
@_doc_root_dirs[options[:root]] = f
end
def _resolve_tab(options)
tab = tab.to_s.downcase
active = false
if options[:tab]
options[:tab]
else
rel = '..'
begin
rel = options[:top_level_file].relative_path_from(_doc_root_dir(options)).sub_ext('').sub_ext('').to_s
rescue
rel = '..'
end
# If the file lives outside of the current app (e.g. it comes from a plugin), then the above approach
# doesn't work, so let's just take the last dirname and the filename
if rel =~ /\.\./
dir = options[:top_level_file].dirname.basename
file = options[:top_level_file].basename('.*').basename('.*') # twice to allow for my_file.md.erb
rel = "#{dir}_#{file}"
end
rel.gsub(/(\/|\\)/, '_').downcase.to_sym
end
end
def _root_path(options)
root = Pathname.new("#{Origen.root}/templates/web")
_doc_root_dir(options).relative_path_from(root)
end
end
include SearchableHelpers
end
include DocHelpers
end
end
end
|
module Freya
VERSION = "0.3.0"
end
Bump version to 0.3.1
module Freya
VERSION = "0.3.1"
end
|
module Pacer::Pipes
class ExpandablePipe < RubyPipe
def initialize
super()
@queue = java.util.LinkedList.new
end
def add(element, metadata, path)
@queue.add [element, metadata, path]
end
def metadata
@metadata
end
def next
super
ensure
@path = @next_path
@metadata = @next_metadata
end
protected
def processNextStart
if @queue.isEmpty
@next_metadata = nil
r = @starts.next
if @path_enabled
if @starts.respond_to? :path
@next_path = @starts.path
else
@next_path = java.util.ArrayList.new
end
end
r
else
element, @next_metadata, @next_path = @queue.remove
element
end
end
def getPathToHere
path = java.util.ArrayList.new
if @path
@path.each do |e|
path.add e
end
end
path
end
end
end
Optional args when adding to expandable pipe.
module Pacer::Pipes
class ExpandablePipe < RubyPipe
def initialize
super()
@queue = java.util.LinkedList.new
end
def add(element, metadata = nil, path = nil)
@queue.add [element, metadata, path]
end
def metadata
@metadata
end
def next
super
ensure
@path = @next_path
@metadata = @next_metadata
end
protected
def processNextStart
if @queue.isEmpty
@next_metadata = nil
r = @starts.next
if @path_enabled
if @starts.respond_to? :path
@next_path = @starts.path
else
@next_path = java.util.ArrayList.new
end
end
r
else
element, @next_metadata, @next_path = @queue.remove
element
end
end
def getPathToHere
path = java.util.ArrayList.new
if @path
@path.each do |e|
path.add e
end
end
path
end
end
end
|
module GELF
# Graylog2 notifier.
class Notifier
@last_chunk_id = 0
class << self
attr_accessor :last_chunk_id
end
attr_accessor :host, :port, :default_options, :enabled
attr_reader :max_chunk_size, :level
# +host+ and +port+ are host/ip and port of graylog2-server.
# +max_size+ is passed to max_chunk_size=.
# +default_options+ is used in notify!
def initialize(host = 'localhost', port = 12201, max_size = 'WAN', default_options = {})
@enabled = true
self.level = GELF::DEBUG
self.host, self.port, self.max_chunk_size = host, port, max_size
self.default_options = default_options
self.default_options['version'] = SPEC_VERSION
self.default_options['host'] ||= Socket.gethostname
self.default_options['level'] ||= GELF::UNKNOWN
self.default_options['facility'] ||= 'gelf-rb'
@sender = RubyUdpSender.new(host, port)
end
# +size+ may be a number of bytes, 'WAN' (1420 bytes) or 'LAN' (8154).
# Default (safe) value is 'WAN'.
def max_chunk_size=(size)
size_s = size.to_s.downcase
if size_s == 'wan'
@max_chunk_size = 1420
elsif size_s == 'lan'
@max_chunk_size = 8154
else
@max_chunk_size = size.to_int
end
end
def level=(new_level)
@level = if new_level.is_a?(Fixnum)
new_level
else
GELF.const_get(new_level.to_s.upcase)
end
end
def disable
@enabled = false
end
def enable
@enabled = true
end
# Same as notify!, but rescues all exceptions (including +ArgumentError+)
# and sends them instead.
def notify(*args)
notify_with_level(nil, *args)
end
# Sends message to Graylog2 server.
# +args+ can be:
# - hash-like object (any object which responds to +to_hash+, including +Hash+ instance):
# notify!(:short_message => 'All your rebase are belong to us', :user => 'AlekSi')
# - exception with optional hash-like object:
# notify!(SecurityError.new('ALARM!'), :trespasser => 'AlekSi')
# - string-like object (anything which responds to +to_s+) with optional hash-like object:
# notify!('Plain olde text message', :scribe => 'AlekSi')
# Resulted fields are merged with +default_options+, the latter will never overwrite the former.
# This method will raise +ArgumentError+ if arguments are wrong. Consider using notify instead.
def notify!(*args)
notify_with_level!(nil, *args)
end
GELF::Levels.constants.each do |const|
class_eval <<-EOT, __FILE__, __LINE__ + 1
def #{const.downcase}(*args) # def debug(*args)
notify_with_level(GELF::#{const}, *args) # notify_with_level(GELF::DEBUG, *args)
end # end
EOT
end
private
def notify_with_level(message_level, *args)
notify_with_level!(message_level, *args)
rescue Exception => exception
notify_with_level!(GELF::UNKNOWN, exception)
end
def notify_with_level!(message_level, *args)
return unless @enabled
extract_hash(*args)
@hash['level'] = message_level unless message_level.nil?
if @hash['level'] >= level
@sender.send_datagrams(datagrams_from_hash)
end
end
def extract_hash(object = nil, args = {})
primary_data = if object.respond_to?(:to_hash)
object.to_hash
elsif object.is_a?(Exception)
args['level'] ||= GELF::ERROR
self.class.extract_hash_from_exception(object)
else
args['level'] ||= GELF::INFO
{ 'short_message' => object.to_s }
end
@hash = default_options.merge(args.merge(primary_data))
stringify_hash_keys
convert_hoptoad_keys_to_graylog2
set_file_and_line
set_timestamp
check_presence_of_mandatory_attributes
@hash
end
def self.extract_hash_from_exception(exception)
bt = exception.backtrace || ["Backtrace is not available."]
{ 'short_message' => "#{exception.class}: #{exception.message}", 'full_message' => "Backtrace:\n" + bt.join("\n") }
end
# Converts Hoptoad-specific keys in +@hash+ to Graylog2-specific.
def convert_hoptoad_keys_to_graylog2
if @hash['short_message'].to_s.empty?
if @hash.has_key?('error_class') && @hash.has_key?('error_message')
@hash['short_message'] = @hash.delete('error_class') + ': ' + @hash.delete('error_message')
end
end
end
CALLER_REGEXP = /^(.*):(\d+).*/
LIB_GELF_PATTERN = File.join('lib', 'gelf')
def set_file_and_line
stack = caller
begin
frame = stack.shift
end while frame.include?(LIB_GELF_PATTERN)
match = CALLER_REGEXP.match(frame)
@hash['file'] = match[1]
@hash['line'] = match[2].to_i
end
def set_timestamp
@hash['timestamp'] = Time.now.utc.to_f
end
def check_presence_of_mandatory_attributes
%w(version short_message host).each do |attribute|
if @hash[attribute].to_s.empty?
raise ArgumentError.new("#{attribute} is missing. Options version, short_message and host must be set.")
end
end
end
def datagrams_from_hash
data = serialize_hash
datagrams = []
# Maximum total size is 8192 byte for UDP datagram. Split to chunks if bigger. (GELFv2 supports chunking)
if data.count > @max_chunk_size
id = self.class.last_chunk_id += 1
msg_id = Digest::MD5.digest("#{Time.now.to_f}-#{id}")[0, 8]
num, count = 0, (data.count.to_f / @max_chunk_size).ceil
data.each_slice(@max_chunk_size) do |slice|
datagrams << "\x1e\x0f" + msg_id + [num, count, *slice].pack('C*')
num += 1
end
else
datagrams << data.to_a.pack('C*')
end
datagrams
end
def serialize_hash
raise ArgumentError.new("Hash is empty.") if @hash.nil? || @hash.empty?
@hash['level'] = GELF::LEVELS_MAPPING[@hash['level']]
Zlib::Deflate.deflate(@hash.to_json).bytes
end
def stringify_hash_keys
@hash.keys.each do |key|
value, key_s = @hash.delete(key), key.to_s
@hash[key_s] = value
end
end
end
end
updated docs
module GELF
# Graylog2 notifier.
class Notifier
@last_chunk_id = 0
class << self
attr_accessor :last_chunk_id
end
attr_accessor :host, :port, :default_options, :enabled
attr_reader :max_chunk_size, :level
# +host+ and +port+ are host/ip and port of graylog2-server.
# +max_size+ is passed to max_chunk_size=.
# +default_options+ is used in notify!
def initialize(host = 'localhost', port = 12201, max_size = 'WAN', default_options = {})
@enabled = true
self.level = GELF::DEBUG
self.host, self.port, self.max_chunk_size = host, port, max_size
self.default_options = default_options
self.default_options['version'] = SPEC_VERSION
self.default_options['host'] ||= Socket.gethostname
self.default_options['level'] ||= GELF::UNKNOWN
self.default_options['facility'] ||= 'gelf-rb'
@sender = RubyUdpSender.new(host, port)
end
# +size+ may be a number of bytes, 'WAN' (1420 bytes) or 'LAN' (8154).
# Default (safe) value is 'WAN'.
def max_chunk_size=(size)
size_s = size.to_s.downcase
if size_s == 'wan'
@max_chunk_size = 1420
elsif size_s == 'lan'
@max_chunk_size = 8154
else
@max_chunk_size = size.to_int
end
end
def level=(new_level)
@level = if new_level.is_a?(Fixnum)
new_level
else
GELF.const_get(new_level.to_s.upcase)
end
end
def disable
@enabled = false
end
def enable
@enabled = true
end
# Same as notify!, but rescues all exceptions (including +ArgumentError+)
# and sends them instead.
def notify(*args)
notify_with_level(nil, *args)
end
# Sends message to Graylog2 server.
# +args+ can be:
# - hash-like object (any object which responds to +to_hash+, including +Hash+ instance):
# notify!(:short_message => 'All your rebase are belong to us', :user => 'AlekSi')
# - exception with optional hash-like object:
# notify!(SecurityError.new('ALARM!'), :trespasser => 'AlekSi')
# - string-like object (anything which responds to +to_s+) with optional hash-like object:
# notify!('Plain olde text message', :scribe => 'AlekSi')
# Resulted fields are merged with +default_options+, the latter will never overwrite the former.
def notify!(*args)
notify_with_level!(nil, *args)
end
GELF::Levels.constants.each do |const|
class_eval <<-EOT, __FILE__, __LINE__ + 1
def #{const.downcase}(*args) # def debug(*args)
notify_with_level(GELF::#{const}, *args) # notify_with_level(GELF::DEBUG, *args)
end # end
EOT
end
private
def notify_with_level(message_level, *args)
notify_with_level!(message_level, *args)
rescue Exception => exception
notify_with_level!(GELF::UNKNOWN, exception)
end
def notify_with_level!(message_level, *args)
return unless @enabled
extract_hash(*args)
@hash['level'] = message_level unless message_level.nil?
if @hash['level'] >= level
@sender.send_datagrams(datagrams_from_hash)
end
end
def extract_hash(object = nil, args = {})
primary_data = if object.respond_to?(:to_hash)
object.to_hash
elsif object.is_a?(Exception)
args['level'] ||= GELF::ERROR
self.class.extract_hash_from_exception(object)
else
args['level'] ||= GELF::INFO
{ 'short_message' => object.to_s }
end
@hash = default_options.merge(args.merge(primary_data))
stringify_hash_keys
convert_hoptoad_keys_to_graylog2
set_file_and_line
set_timestamp
check_presence_of_mandatory_attributes
@hash
end
def self.extract_hash_from_exception(exception)
bt = exception.backtrace || ["Backtrace is not available."]
{ 'short_message' => "#{exception.class}: #{exception.message}", 'full_message' => "Backtrace:\n" + bt.join("\n") }
end
# Converts Hoptoad-specific keys in +@hash+ to Graylog2-specific.
def convert_hoptoad_keys_to_graylog2
if @hash['short_message'].to_s.empty?
if @hash.has_key?('error_class') && @hash.has_key?('error_message')
@hash['short_message'] = @hash.delete('error_class') + ': ' + @hash.delete('error_message')
end
end
end
CALLER_REGEXP = /^(.*):(\d+).*/
LIB_GELF_PATTERN = File.join('lib', 'gelf')
def set_file_and_line
stack = caller
begin
frame = stack.shift
end while frame.include?(LIB_GELF_PATTERN)
match = CALLER_REGEXP.match(frame)
@hash['file'] = match[1]
@hash['line'] = match[2].to_i
end
def set_timestamp
@hash['timestamp'] = Time.now.utc.to_f
end
def check_presence_of_mandatory_attributes
%w(version short_message host).each do |attribute|
if @hash[attribute].to_s.empty?
raise ArgumentError.new("#{attribute} is missing. Options version, short_message and host must be set.")
end
end
end
def datagrams_from_hash
data = serialize_hash
datagrams = []
# Maximum total size is 8192 byte for UDP datagram. Split to chunks if bigger. (GELFv2 supports chunking)
if data.count > @max_chunk_size
id = self.class.last_chunk_id += 1
msg_id = Digest::MD5.digest("#{Time.now.to_f}-#{id}")[0, 8]
num, count = 0, (data.count.to_f / @max_chunk_size).ceil
data.each_slice(@max_chunk_size) do |slice|
datagrams << "\x1e\x0f" + msg_id + [num, count, *slice].pack('C*')
num += 1
end
else
datagrams << data.to_a.pack('C*')
end
datagrams
end
def serialize_hash
raise ArgumentError.new("Hash is empty.") if @hash.nil? || @hash.empty?
@hash['level'] = GELF::LEVELS_MAPPING[@hash['level']]
Zlib::Deflate.deflate(@hash.to_json).bytes
end
def stringify_hash_keys
@hash.keys.each do |key|
value, key_s = @hash.delete(key), key.to_s
@hash[key_s] = value
end
end
end
end
|
# Create a ruby project that use RSpec and Cucumber for BDD
module Genem
class Project
def initialize(messenger, project_file_content_generator)
@messenger = messenger
@project_file_content_generator = project_file_content_generator
@project_name = ''
end
# Create _project_name_ project containing a _class_name_.
#
# :call-seq:
# project = Genem::Project(STDOUT, Genem::ProjectFileContentGenerator.new(project_name))
# project.create(project_name, class_name) -> nil
#
def create(project_name, class_name)
@project_name = project_name
@class_name = class_name
@project_dir = generate_path_from_name(@project_name)
@class_file = generate_path_from_name(@class_name)
unless File.exist?(File.join(".", @project_dir))
create_project
else
@messenger.puts("Project #{@project_name} already exists")
end
end
def Project.create(messenger, project_name, class_name)
project_file_content_generator = ProjectFileContentGenerator.new(project_name, class_name)
project = Project.new(messenger, project_file_content_generator)
project.create(project_name, class_name)
end
private
def generate_path_from_name(name)
name.scan(/[A-Z][a-z0-9]+/).join('_').downcase
end
def create_project
create_project_tree
create_project_files
@messenger.puts("Project #{@project_name} created")
end
def create_project_tree
Dir.mkdir File.join(".", @project_dir)
Dir.mkdir File.join(".", @project_dir, "features")
Dir.mkdir File.join(".", @project_dir, "spec")
Dir.mkdir File.join(".", @project_dir, "lib")
Dir.mkdir File.join(".", @project_dir, "bin")
Dir.mkdir File.join(".", @project_dir, "features/step_definitions")
Dir.mkdir File.join(".", @project_dir, "features/support")
Dir.mkdir File.join(".", @project_dir, "lib", @project_dir)
Dir.mkdir File.join(".", @project_dir, "spec", @project_dir)
end
def create_project_files
create_rakefile
create_autotest_file
create_cucumber_config_file
create_cucumber_environment_file
create_cucumber_step_definitions_file
create_cucumber_feature_file
create_project_file
create_project_first_class_file
create_project_spec_options_file
create_project_spec_helper_file
create_project_first_class_spec_file
create_project_executable_file
end
def create_rakefile
create_file(File.join(".", @project_dir, "Rakefile"),
@project_file_content_generator.generate_rakefile_content)
end
def create_autotest_file
create_file(File.join(".", @project_dir, ".autotest"),
@project_file_content_generator.generate_autotest_file_content)
end
def create_cucumber_config_file
create_file(File.join(".", @project_dir, "cucumber.yml"),
@project_file_content_generator.generate_cucumber_config_file_content)
end
def create_cucumber_environment_file
create_file(File.join(".", @project_dir, "features/support", "env.rb"),
@project_file_content_generator.generate_cucumber_environment_file_content)
end
def create_cucumber_step_definitions_file
create_file(File.join(".", @project_dir, "features/step_definitions", "#{@project_dir}_steps.rb"))
end
def create_cucumber_feature_file
create_file(File.join(".", @project_dir, "features", "first_#{@project_dir}.feature"),
@project_file_content_generator.generate_cucumber_feature_file_content)
end
def create_project_file
create_file(File.join(".", @project_dir, "lib", "#{@project_dir}.rb"),
@project_file_content_generator.generate_project_file_content)
end
def create_project_first_class_file
create_file(File.join(".", @project_dir, "lib", "#{@project_dir}", "#{@class_file}.rb"),
@project_file_content_generator.generate_project_first_class_file_content)
end
def create_project_spec_options_file
create_file(File.join(".", @project_dir, "spec", "spec.opts"),
@project_file_content_generator.generate_project_spec_options_file_content)
end
def create_project_spec_helper_file
create_file(File.join(".", @project_dir, "spec", "spec_helper.rb"),
@project_file_content_generator.generate_project_spec_helper_file_content)
end
def create_project_first_class_spec_file
create_file(File.join(".", @project_dir, "spec", "#{@project_dir}", "#{@class_file}_spec.rb"),
@project_file_content_generator.generate_project_first_class_spec_file_content)
end
def create_project_executable_file
create_file(File.join(".", @project_dir, "bin", "#{@project_dir}"),
@project_file_content_generator.generate_project_executable_file_content)
end
def create_file(path, content='')
open(path, "w") do |f|
f << content
end
end
end
end
add missing RDoc comment
# Create a ruby project that use RSpec and Cucumber for BDD
module Genem
class Project
def initialize(messenger, project_file_content_generator)
@messenger = messenger
@project_file_content_generator = project_file_content_generator
@project_name = ''
end
# Create _project_name_ project containing a _class_name_.
#
# :call-seq:
# project = Genem::Project(STDOUT, Genem::ProjectFileContentGenerator.new(project_name))
# project.create(project_name, class_name) -> nil
#
def create(project_name, class_name)
@project_name = project_name
@class_name = class_name
@project_dir = generate_path_from_name(@project_name)
@class_file = generate_path_from_name(@class_name)
unless File.exist?(File.join(".", @project_dir))
create_project
else
@messenger.puts("Project #{@project_name} already exists")
end
end
# Create _project_name_ project containing a _class_name_.
#
# :call-seq:
# Project.create(STDOUT, project_name, class_name) -> nil
#
def Project.create(messenger, project_name, class_name)
project_file_content_generator = ProjectFileContentGenerator.new(project_name, class_name)
project = Project.new(messenger, project_file_content_generator)
project.create(project_name, class_name)
end
private
def generate_path_from_name(name)
name.scan(/[A-Z][a-z0-9]+/).join('_').downcase
end
def create_project
create_project_tree
create_project_files
@messenger.puts("Project #{@project_name} created")
end
def create_project_tree
Dir.mkdir File.join(".", @project_dir)
Dir.mkdir File.join(".", @project_dir, "features")
Dir.mkdir File.join(".", @project_dir, "spec")
Dir.mkdir File.join(".", @project_dir, "lib")
Dir.mkdir File.join(".", @project_dir, "bin")
Dir.mkdir File.join(".", @project_dir, "features/step_definitions")
Dir.mkdir File.join(".", @project_dir, "features/support")
Dir.mkdir File.join(".", @project_dir, "lib", @project_dir)
Dir.mkdir File.join(".", @project_dir, "spec", @project_dir)
end
def create_project_files
create_rakefile
create_autotest_file
create_cucumber_config_file
create_cucumber_environment_file
create_cucumber_step_definitions_file
create_cucumber_feature_file
create_project_file
create_project_first_class_file
create_project_spec_options_file
create_project_spec_helper_file
create_project_first_class_spec_file
create_project_executable_file
end
def create_rakefile
create_file(File.join(".", @project_dir, "Rakefile"),
@project_file_content_generator.generate_rakefile_content)
end
def create_autotest_file
create_file(File.join(".", @project_dir, ".autotest"),
@project_file_content_generator.generate_autotest_file_content)
end
def create_cucumber_config_file
create_file(File.join(".", @project_dir, "cucumber.yml"),
@project_file_content_generator.generate_cucumber_config_file_content)
end
def create_cucumber_environment_file
create_file(File.join(".", @project_dir, "features/support", "env.rb"),
@project_file_content_generator.generate_cucumber_environment_file_content)
end
def create_cucumber_step_definitions_file
create_file(File.join(".", @project_dir, "features/step_definitions", "#{@project_dir}_steps.rb"))
end
def create_cucumber_feature_file
create_file(File.join(".", @project_dir, "features", "first_#{@project_dir}.feature"),
@project_file_content_generator.generate_cucumber_feature_file_content)
end
def create_project_file
create_file(File.join(".", @project_dir, "lib", "#{@project_dir}.rb"),
@project_file_content_generator.generate_project_file_content)
end
def create_project_first_class_file
create_file(File.join(".", @project_dir, "lib", "#{@project_dir}", "#{@class_file}.rb"),
@project_file_content_generator.generate_project_first_class_file_content)
end
def create_project_spec_options_file
create_file(File.join(".", @project_dir, "spec", "spec.opts"),
@project_file_content_generator.generate_project_spec_options_file_content)
end
def create_project_spec_helper_file
create_file(File.join(".", @project_dir, "spec", "spec_helper.rb"),
@project_file_content_generator.generate_project_spec_helper_file_content)
end
def create_project_first_class_spec_file
create_file(File.join(".", @project_dir, "spec", "#{@project_dir}", "#{@class_file}_spec.rb"),
@project_file_content_generator.generate_project_first_class_spec_file_content)
end
def create_project_executable_file
create_file(File.join(".", @project_dir, "bin", "#{@project_dir}"),
@project_file_content_generator.generate_project_executable_file_content)
end
def create_file(path, content='')
open(path, "w") do |f|
f << content
end
end
end
end |
require 'fileutils'
require 'bio-blastxmlparser'
require 'genevalidator/arg_validation'
require 'genevalidator/blast'
require 'genevalidator/exceptions'
require 'genevalidator/get_raw_sequences'
require 'genevalidator/json_to_gv_results'
require 'genevalidator/output'
require 'genevalidator/output_files'
require 'genevalidator/tabular_parser'
require 'genevalidator/validation'
# Top level module / namespace.
module GeneValidator
class << self
attr_accessor :opt, :config, :overview, :dirs
attr_reader :raw_seq_file_index
attr_reader :raw_seq_file_load
# array of indexes for the start offsets of each query in the fasta file
attr_reader :query_idx
attr_accessor :mutex, :mutex_array
def init(opt, start_idx = 1)
warn '==> Analysing input arguments'
@opt = opt
GVArgValidation.validate_args # validates @opt
number_of_sequences = index_the_input
@config = setup_config(start_idx, number_of_sequences)
@dirs = setup_dirnames(@opt[:input_fasta_file])
@mutex = Mutex.new
@mutex_array = Mutex.new
resume_from_previous_run(opt[:resumable]) unless opt[:resumable].nil?
RawSequences.index_raw_seq_file if @opt[:raw_sequences]
end
##
# Parse the blast output and run validations
def run
# Run BLAST on all sequences (generates @opt[:blast_xml_file])
# if no BLAST OUTPUT file provided...
unless @opt[:blast_xml_file] || @opt[:blast_tabular_file]
BlastUtils.run_blast_on_input_file
end
# Obtain fasta file of all BLAST hits if running align or dup validations
if @opt[:validations].include?('align') ||
@opt[:validations].include?('dup')
RawSequences.run unless @opt[:raw_sequences]
end
# Run Validations
iterator = parse_blast_output_file
Validations.new.run_validations(iterator)
produce_output
print_directories_locations
end
##
# Params:
# +output+: filename or stream, according to the type
# +type+: file or stream
# Returns an iterator..
def parse_blast_output_file
if @opt[:blast_xml_file]
Bio::BlastXMLParser::XmlIterator.new(@opt[:blast_xml_file]).to_enum
else
TabularParser.new
end
## TODO: Add a Rescue statement - e.g. if unable to create the Object...
end
# Also called by json_to_gv script
def setup_dirnames(input_file)
fname = File.basename(input_file, File.extname(input_file))
out_dir = setup_output_dir(fname)
{ filename: fname,
output_dir: out_dir,
tmp_dir: File.join(out_dir, 'tmp'),
json_dir: File.join(out_dir, 'tmp/json'),
html_file: File.join(out_dir, "#{fname}_results*.html"),
json_file: File.join(out_dir, "#{fname}_results.json"),
csv_file: File.join(out_dir, "#{fname}_results.csv"),
summary_file: File.join(out_dir, "#{fname}_summary.csv"),
fasta_file: File.join(out_dir, "#{fname}_results.fa"),
aux_dir: File.expand_path('../aux', __dir__) }
end
def extract_input_fasta_sequence(index)
start_offset = @query_idx[index + 1] - @query_idx[index]
end_offset = @query_idx[index]
IO.binread(@opt[:input_fasta_file], start_offset, end_offset)
end
def produce_output
@overview = Output.generate_overview(@config[:json_output],
@opt[:min_blast_hits])
eval_text = Output.generate_evaluation_text(@overview)
Output.print_console_footer(eval_text, @opt)
output_files = OutputFiles.new
output_files.write_json
output_files.write_html(eval_text)
output_files.write_csv
output_files.write_summary
output_files.print_best_fasta
end
private
def setup_config(start_idx, seq_length)
{
idx: 0,
start_idx: start_idx,
type: BlastUtils.guess_sequence_type_from_input_file,
json_output: Array.new(seq_length),
run_no: 0,
output_max: 2500 # max no. of queries in the output html file
}
end
##
# Creates the output folder and copies the auxiliar folders to this folder
def setup_output_dir(fname)
dir_name = "#{fname}_" + Time.now.strftime('%Y_%m_%d_%H_%M_%S')
default_outdir = File.join(Dir.pwd, dir_name)
output_dir = @opt[:output_dir].nil? ? default_outdir : @opt[:output_dir]
assert_output_dir_does_not_exist(output_dir)
Dir.mkdir(output_dir)
Dir.mkdir(File.join(output_dir, 'tmp'))
cp_html_files(output_dir)
output_dir
end
def assert_output_dir_does_not_exist(output_dir)
return unless Dir.exist?(output_dir)
FileUtils.rm_r(output_dir) if @opt[:force_rewrite]
return if @opt[:force_rewrite]
warn 'The output directory already exists for this fasta file.'
warn "\nPlease remove the following directory: #{output_dir}\n"
warn "You can run the following command to remove the folder.\n"
warn "\n $ rm -r #{output_dir} \n"
exit 1
end
def cp_html_files(output_dir)
if @opt[:output_formats].include? 'html'
aux_files = File.expand_path('../aux/html_files/', __dir__)
FileUtils.cp_r(aux_files, output_dir)
FileUtils.ln_s(File.join('..', 'html_files', 'json'),
File.join(output_dir, 'tmp', 'json'))
else
Dir.mkdir(File.join(output_dir, 'tmp', 'json'))
end
end
##
# create a list of index of the queries in the FASTA
# These offset can then be used to quickly read the input file using the
# start and end positions of each query.
def index_the_input
fasta_content = IO.binread(@opt[:input_fasta_file])
@query_idx = fasta_content.enum_for(:scan, /(>[^>]+)/).map do
Regexp.last_match.begin(0)
end
@query_idx.push(fasta_content.length)
@query_idx.length - 1
end
def print_directories_locations
warn '==> GeneValidator output files have been saved to:'
warn " #{File.expand_path(@dirs[:output_dir])}"
end
def resume_from_previous_run(prev_dir)
prev_tmp_dir = File.join(prev_dir, 'tmp')
return unless Dir.exist? prev_tmp_dir
copy_blast_xml_files(prev_tmp_dir)
copy_raw_seq_files(prev_tmp_dir)
copy_prev_json_output(prev_tmp_dir)
end
def copy_blast_xml_files(prev_tmp_dir)
return if @opt[:blast_xml_file] || @opt[:blast_tabular_file]
prev_blast_xml = Dir[File.join(prev_tmp_dir, '*blast_xml')]
return if prev_blast_xml.empty?
blast_xml_fname = "#{@dirs[:filename]}.blast_xml"
@opt[:blast_xml_file] = File.join(@dirs[:tmp_dir], blast_xml_fname)
FileUtils.cp(prev_blast_xml[0], @opt[:blast_xml_file])
end
def copy_raw_seq_files(prev_tmp_dir)
return if @opt[:raw_sequences]
return unless @opt[:validations].include?('align') ||
@opt[:validations].include?('dup')
prev_raw_seq = Dir[File.join(prev_tmp_dir, '*raw_seq')]
return if prev_raw_seq.empty?
raw_seq_fname = "#{@dirs[:filename]}.blast_xml.raw_seq"
@opt[:raw_sequences] = File.join(@dirs[:tmp_dir], raw_seq_fname)
FileUtils.cp(prev_raw_seq[0], @opt[:raw_sequences])
end
def copy_prev_json_output(prev_tmp_dir)
prev_json_dir = File.join(prev_tmp_dir, 'json')
return unless Dir.exist? prev_json_dir
all_jsons = Dir[File.join(prev_json_dir, '*.json')]
FileUtils.cp(all_jsons, @dirs[:json_dir])
overview_json = Dir[File.join(prev_json_dir, 'overview.json')]
data_jsons = all_jsons - overview_json
parse_prev_json(data_jsons)
end
def parse_prev_json(data_jsons)
data_jsons.each do |json|
json_contents = File.read(File.expand_path(json))
data = JSON.parse(json_contents, symbolize_names: true)
idx = json.match(/(\d+).json/)[1].to_i - 1
@config[:json_output][idx] = data
print_prev_json_to_console(data)
end
end
def print_prev_json_to_console(data)
JsonToGVResults.print_console_header(data)
JsonToGVResults.print_output_console(data)
end
end
end
Hint at the --force option if the output dir already exists
require 'fileutils'
require 'bio-blastxmlparser'
require 'genevalidator/arg_validation'
require 'genevalidator/blast'
require 'genevalidator/exceptions'
require 'genevalidator/get_raw_sequences'
require 'genevalidator/json_to_gv_results'
require 'genevalidator/output'
require 'genevalidator/output_files'
require 'genevalidator/tabular_parser'
require 'genevalidator/validation'
# Top level module / namespace.
module GeneValidator
class << self
attr_accessor :opt, :config, :overview, :dirs
attr_reader :raw_seq_file_index
attr_reader :raw_seq_file_load
# array of indexes for the start offsets of each query in the fasta file
attr_reader :query_idx
attr_accessor :mutex, :mutex_array
def init(opt, start_idx = 1)
warn '==> Analysing input arguments'
@opt = opt
GVArgValidation.validate_args # validates @opt
number_of_sequences = index_the_input
@config = setup_config(start_idx, number_of_sequences)
@dirs = setup_dirnames(@opt[:input_fasta_file])
@mutex = Mutex.new
@mutex_array = Mutex.new
resume_from_previous_run(opt[:resumable]) unless opt[:resumable].nil?
RawSequences.index_raw_seq_file if @opt[:raw_sequences]
end
##
# Parse the blast output and run validations
def run
# Run BLAST on all sequences (generates @opt[:blast_xml_file])
# if no BLAST OUTPUT file provided...
unless @opt[:blast_xml_file] || @opt[:blast_tabular_file]
BlastUtils.run_blast_on_input_file
end
# Obtain fasta file of all BLAST hits if running align or dup validations
if @opt[:validations].include?('align') ||
@opt[:validations].include?('dup')
RawSequences.run unless @opt[:raw_sequences]
end
# Run Validations
iterator = parse_blast_output_file
Validations.new.run_validations(iterator)
produce_output
print_directories_locations
end
##
# Params:
# +output+: filename or stream, according to the type
# +type+: file or stream
# Returns an iterator..
def parse_blast_output_file
if @opt[:blast_xml_file]
Bio::BlastXMLParser::XmlIterator.new(@opt[:blast_xml_file]).to_enum
else
TabularParser.new
end
## TODO: Add a Rescue statement - e.g. if unable to create the Object...
end
# Also called by json_to_gv script
def setup_dirnames(input_file)
fname = File.basename(input_file, File.extname(input_file))
out_dir = setup_output_dir(fname)
{ filename: fname,
output_dir: out_dir,
tmp_dir: File.join(out_dir, 'tmp'),
json_dir: File.join(out_dir, 'tmp/json'),
html_file: File.join(out_dir, "#{fname}_results*.html"),
json_file: File.join(out_dir, "#{fname}_results.json"),
csv_file: File.join(out_dir, "#{fname}_results.csv"),
summary_file: File.join(out_dir, "#{fname}_summary.csv"),
fasta_file: File.join(out_dir, "#{fname}_results.fa"),
aux_dir: File.expand_path('../aux', __dir__) }
end
def extract_input_fasta_sequence(index)
start_offset = @query_idx[index + 1] - @query_idx[index]
end_offset = @query_idx[index]
IO.binread(@opt[:input_fasta_file], start_offset, end_offset)
end
def produce_output
@overview = Output.generate_overview(@config[:json_output],
@opt[:min_blast_hits])
eval_text = Output.generate_evaluation_text(@overview)
Output.print_console_footer(eval_text, @opt)
output_files = OutputFiles.new
output_files.write_json
output_files.write_html(eval_text)
output_files.write_csv
output_files.write_summary
output_files.print_best_fasta
end
private
def setup_config(start_idx, seq_length)
{
idx: 0,
start_idx: start_idx,
type: BlastUtils.guess_sequence_type_from_input_file,
json_output: Array.new(seq_length),
run_no: 0,
output_max: 2500 # max no. of queries in the output html file
}
end
##
# Creates the output folder and copies the auxiliar folders to this folder
def setup_output_dir(fname)
dir_name = "#{fname}_" + Time.now.strftime('%Y_%m_%d_%H_%M_%S')
default_outdir = File.join(Dir.pwd, dir_name)
output_dir = @opt[:output_dir].nil? ? default_outdir : @opt[:output_dir]
assert_output_dir_does_not_exist(output_dir)
Dir.mkdir(output_dir)
Dir.mkdir(File.join(output_dir, 'tmp'))
cp_html_files(output_dir)
output_dir
end
def assert_output_dir_does_not_exist(output_dir)
return unless Dir.exist?(output_dir)
FileUtils.rm_r(output_dir) if @opt[:force_rewrite]
return if @opt[:force_rewrite]
warn "The output directory (#{output_dir}) already exists."
warn ''
warn 'Please remove this directory before continuing.'
warn 'Alternatively, you rerun GeneValidator with the `--force` argument,'
warn 'which rewrites over any previous output.'
exit 1
end
def cp_html_files(output_dir)
if @opt[:output_formats].include? 'html'
aux_files = File.expand_path('../aux/html_files/', __dir__)
FileUtils.cp_r(aux_files, output_dir)
FileUtils.ln_s(File.join('..', 'html_files', 'json'),
File.join(output_dir, 'tmp', 'json'))
else
Dir.mkdir(File.join(output_dir, 'tmp', 'json'))
end
end
##
# create a list of index of the queries in the FASTA
# These offset can then be used to quickly read the input file using the
# start and end positions of each query.
def index_the_input
fasta_content = IO.binread(@opt[:input_fasta_file])
@query_idx = fasta_content.enum_for(:scan, /(>[^>]+)/).map do
Regexp.last_match.begin(0)
end
@query_idx.push(fasta_content.length)
@query_idx.length - 1
end
def print_directories_locations
warn '==> GeneValidator output files have been saved to:'
warn " #{File.expand_path(@dirs[:output_dir])}"
end
def resume_from_previous_run(prev_dir)
prev_tmp_dir = File.join(prev_dir, 'tmp')
return unless Dir.exist? prev_tmp_dir
copy_blast_xml_files(prev_tmp_dir)
copy_raw_seq_files(prev_tmp_dir)
copy_prev_json_output(prev_tmp_dir)
end
def copy_blast_xml_files(prev_tmp_dir)
return if @opt[:blast_xml_file] || @opt[:blast_tabular_file]
prev_blast_xml = Dir[File.join(prev_tmp_dir, '*blast_xml')]
return if prev_blast_xml.empty?
blast_xml_fname = "#{@dirs[:filename]}.blast_xml"
@opt[:blast_xml_file] = File.join(@dirs[:tmp_dir], blast_xml_fname)
FileUtils.cp(prev_blast_xml[0], @opt[:blast_xml_file])
end
def copy_raw_seq_files(prev_tmp_dir)
return if @opt[:raw_sequences]
return unless @opt[:validations].include?('align') ||
@opt[:validations].include?('dup')
prev_raw_seq = Dir[File.join(prev_tmp_dir, '*raw_seq')]
return if prev_raw_seq.empty?
raw_seq_fname = "#{@dirs[:filename]}.blast_xml.raw_seq"
@opt[:raw_sequences] = File.join(@dirs[:tmp_dir], raw_seq_fname)
FileUtils.cp(prev_raw_seq[0], @opt[:raw_sequences])
end
def copy_prev_json_output(prev_tmp_dir)
prev_json_dir = File.join(prev_tmp_dir, 'json')
return unless Dir.exist? prev_json_dir
all_jsons = Dir[File.join(prev_json_dir, '*.json')]
FileUtils.cp(all_jsons, @dirs[:json_dir])
overview_json = Dir[File.join(prev_json_dir, 'overview.json')]
data_jsons = all_jsons - overview_json
parse_prev_json(data_jsons)
end
def parse_prev_json(data_jsons)
data_jsons.each do |json|
json_contents = File.read(File.expand_path(json))
data = JSON.parse(json_contents, symbolize_names: true)
idx = json.match(/(\d+).json/)[1].to_i - 1
@config[:json_output][idx] = data
print_prev_json_to_console(data)
end
end
def print_prev_json_to_console(data)
JsonToGVResults.print_console_header(data)
JsonToGVResults.print_output_console(data)
end
end
end
|
module Killbill #:nodoc:
module PaypalExpress #:nodoc:
class PrivatePaymentPlugin < ::Killbill::Plugin::ActiveMerchant::PrivatePaymentPlugin
ONE_HOUR_AGO = 3600
STATUS = {:CAPTURE => {:success_status => 'Completed', :type => 'Payment'},
:AUTHORIZE => {:success_status => 'Pending', :type => 'Authorization'},
:REFUND => {:success_status => 'Completed', :type => 'Refund'}}
def initialize(session = {})
super(:paypal_express,
::Killbill::PaypalExpress::PaypalExpressPaymentMethod,
::Killbill::PaypalExpress::PaypalExpressTransaction,
::Killbill::PaypalExpress::PaypalExpressResponse,
session)
end
# See https://cms.paypal.com/uk/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_ECReferenceTxns
def initiate_express_checkout(kb_account_id, kb_tenant_id, amount_in_cents=0, currency='USD', with_baid=true, options = {})
options[:currency] ||= currency
# Required arguments
options[:return_url] ||= 'http://www.example.com/success'
options[:cancel_return_url] ||= 'http://www.example.com/sad_panda'
if with_baid
options[:billing_agreement] ||= {}
options[:billing_agreement][:type] ||= 'MerchantInitiatedBilling'
options[:billing_agreement][:description] ||= 'Kill Bill billing agreement'
end
# Go to Paypal (SetExpressCheckout call)
payment_processor_account_id = options[:payment_processor_account_id] || :default
paypal_express_response = gateway(payment_processor_account_id, kb_tenant_id).setup_authorization(amount_in_cents, options)
response, transaction = save_response_and_transaction(paypal_express_response, :initiate_express_checkout, kb_account_id, kb_tenant_id, payment_processor_account_id)
response
end
def to_express_checkout_url(response, kb_tenant_id = nil, options = {})
payment_processor_account_id = options[:payment_processor_account_id] || :default
gateway = gateway(payment_processor_account_id, kb_tenant_id)
review = ::Killbill::Plugin::ActiveMerchant::Utils.normalized(options, :review)
gateway.redirect_url_for(response.token, :review => review)
end
def get_external_keys_for_accounts(kb_account_ids, kb_tenant_id)
context = kb_apis.create_context(kb_tenant_id)
kb_account_ids.map {|id| kb_apis.account_user_api.get_account_by_id(id, context).external_key }
end
def fix_unknown_transaction(plugin_response, trx_plugin_info, gateway, kb_account_id, kb_tenant_id)
status, transaction_id, type = search_transaction(trx_plugin_info.created_date - ONE_HOUR_AGO,
trx_plugin_info.amount,
trx_plugin_info.currency,
gateway,
trx_plugin_info.kb_transaction_payment_id)
return false if status.blank? || transaction_id.blank? || type.blank?
if type == STATUS[trx_plugin_info.transaction_type][:type] &&
status == STATUS[trx_plugin_info.transaction_type][:success_status]
plugin_response.transition_to_success transaction_id, trx_plugin_info
logger.info("Fix UNDEFINED kb_transaction_id='#{trx_plugin_info.kb_transaction_payment_id}' to PROCESSED")
return true
end
false
end
def search_transaction(start_time, amount, currency, gateway, kb_payment_transaction_id)
options = {:start_date => start_time, :invoice_id => kb_payment_transaction_id, :amount => amount, :currency => currency}
response = gateway.transaction_search options
[response.params['status'], response.authorization, response.params['type']]
end
end
end
end
Update comment
module Killbill #:nodoc:
module PaypalExpress #:nodoc:
class PrivatePaymentPlugin < ::Killbill::Plugin::ActiveMerchant::PrivatePaymentPlugin
ONE_HOUR_AGO = 3600
STATUS = {:CAPTURE => {:success_status => 'Completed', :type => 'Payment'},
:AUTHORIZE => {:success_status => 'Pending', :type => 'Authorization'},
:REFUND => {:success_status => 'Completed', :type => 'Refund'}}
def initialize(session = {})
super(:paypal_express,
::Killbill::PaypalExpress::PaypalExpressPaymentMethod,
::Killbill::PaypalExpress::PaypalExpressTransaction,
::Killbill::PaypalExpress::PaypalExpressResponse,
session)
end
# See https://cms.paypal.com/uk/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_ECReferenceTxns
def initiate_express_checkout(kb_account_id, kb_tenant_id, amount_in_cents=0, currency='USD', with_baid=true, options = {})
options[:currency] ||= currency
# Required arguments
options[:return_url] ||= 'http://www.example.com/success'
options[:cancel_return_url] ||= 'http://www.example.com/sad_panda'
if with_baid
options[:billing_agreement] ||= {}
options[:billing_agreement][:type] ||= 'MerchantInitiatedBilling'
options[:billing_agreement][:description] ||= 'Kill Bill billing agreement'
end
# Go to Paypal (SetExpressCheckout call)
payment_processor_account_id = options[:payment_processor_account_id] || :default
paypal_express_response = gateway(payment_processor_account_id, kb_tenant_id).setup_authorization(amount_in_cents, options)
response, transaction = save_response_and_transaction(paypal_express_response, :initiate_express_checkout, kb_account_id, kb_tenant_id, payment_processor_account_id)
response
end
def to_express_checkout_url(response, kb_tenant_id = nil, options = {})
payment_processor_account_id = options[:payment_processor_account_id] || :default
gateway = gateway(payment_processor_account_id, kb_tenant_id)
review = ::Killbill::Plugin::ActiveMerchant::Utils.normalized(options, :review)
gateway.redirect_url_for(response.token, :review => review)
end
def get_external_keys_for_accounts(kb_account_ids, kb_tenant_id)
context = kb_apis.create_context(kb_tenant_id)
kb_account_ids.map {|id| kb_apis.account_user_api.get_account_by_id(id, context).external_key }
end
def fix_unknown_transaction(plugin_response, trx_plugin_info, gateway, kb_account_id, kb_tenant_id)
status, transaction_id, type = search_transaction(trx_plugin_info.created_date - ONE_HOUR_AGO,
trx_plugin_info.amount,
trx_plugin_info.currency,
gateway,
trx_plugin_info.kb_transaction_payment_id)
return false if status.blank? || transaction_id.blank? || type.blank?
if type == STATUS[trx_plugin_info.transaction_type][:type] &&
status == STATUS[trx_plugin_info.transaction_type][:success_status]
plugin_response.transition_to_success transaction_id, trx_plugin_info
logger.info("Fixed UNDEFINED kb_transaction_id='#{trx_plugin_info.kb_transaction_payment_id}' to PROCESSED")
return true
end
false
end
def search_transaction(start_time, amount, currency, gateway, kb_payment_transaction_id)
options = {:start_date => start_time, :invoice_id => kb_payment_transaction_id, :amount => amount, :currency => currency}
response = gateway.transaction_search options
[response.params['status'], response.authorization, response.params['type']]
end
end
end
end
|
require 'gh'
require 'time'
module GH
# Public: A Wrapper class that deals with normalizing Github responses.
class Normalizer < Wrapper
# Public: Fetches and normalizes a github entity.
#
# Returns normalized Response.
def [](key)
result = super
links(result)['self'] ||= { 'href' => full_url(key).to_s } if result.respond_to? :to_hash
result
end
private
double_dispatch
def links(hash)
hash = hash.data if hash.respond_to? :data
hash["_links"] ||= {}
end
def set_link(hash, type, href)
links(hash)[type] = {"href" => href}
end
def modify_response(response)
response = response.dup
response.data = modify response.data
response
end
def modify_hash(hash)
corrected = {}
corrected.default_proc = hash.default_proc if hash.default_proc
hash.each_pair do |key, value|
key = modify_key(key, value)
next if modify_url(corrected, key, value)
next if modify_time(corrected, key, value)
corrected[key] = modify(value)
end
modify_user(corrected)
corrected
end
def modify_time(hash, key, value)
return unless key == 'timestamp'
time = Time.at(value)
rescue TypeError
time = Time.parse(value.to_s)
ensure
hash['date'] = time.xmlschema if time
end
def modify_user(hash)
hash['owner'] ||= hash.delete('user') if hash['created_at'] and hash['user']
hash['author'] ||= hash.delete('user') if hash['committed_at'] and hash['user']
hash['committer'] ||= hash['author'] if hash['author']
hash['author'] ||= hash['committer'] if hash['committer']
modify_user_fields hash['owner']
modify_user_fields hash['user']
end
def modify_user_fields(hash)
return unless hash
hash['login'] ||= hash.delete('name')
set_link hash, 'self', "users/#{hash['login']}" unless links(hash).include? 'self'
end
def modify_url(hash, key, value)
case key
when "blog"
set_link(hash, key, value)
when "url"
type = Addressable::URI.parse(value).host == api_host.host ? "self" : "html"
set_link(hash, type, value)
when /^(.+)_url$/
set_link(hash, $1, value)
end
end
def modify_key(key, value = nil)
case key
when 'gravatar_url' then 'avatar_url'
when 'org' then 'organization'
when 'orgs' then 'organizations'
when 'username' then 'login'
when 'repo' then 'repository'
when 'repos' then modify_key('repositories', value)
when /^repos?_(.*)$/ then "repository_#{$1}"
when /^(.*)_repo$/ then "#{$1}_repository"
when /^(.*)_repos$/ then "#{$1}_repositories"
when 'commit', 'commit_id', 'id' then value =~ /^\w{40}$/ ? 'sha' : key
when 'comments' then Numeric === value ? 'comment_count' : key
when 'forks' then Numeric === value ? 'fork_count' : key
when 'repositories' then Numeric === value ? 'repository_count' : key
when /^(.*)s_count$/ then "#{$1}_count"
else key
end
end
end
end
fix user normalization
require 'gh'
require 'time'
module GH
# Public: A Wrapper class that deals with normalizing Github responses.
class Normalizer < Wrapper
# Public: Fetches and normalizes a github entity.
#
# Returns normalized Response.
def [](key)
result = super
links(result)['self'] ||= { 'href' => full_url(key).to_s } if result.respond_to? :to_hash
result
end
private
double_dispatch
def links(hash)
hash = hash.data if hash.respond_to? :data
hash["_links"] ||= {}
end
def set_link(hash, type, href)
links(hash)[type] = {"href" => href}
end
def modify_response(response)
response = response.dup
response.data = modify response.data
response
end
def modify_hash(hash)
corrected = {}
corrected.default_proc = hash.default_proc if hash.default_proc
hash.each_pair do |key, value|
key = modify_key(key, value)
next if modify_url(corrected, key, value)
next if modify_time(corrected, key, value)
corrected[key] = modify(value)
end
modify_user(corrected)
corrected
end
def modify_time(hash, key, value)
return unless key == 'timestamp'
time = Time.at(value)
rescue TypeError
time = Time.parse(value.to_s)
ensure
hash['date'] = time.xmlschema if time
end
def modify_user(hash)
hash['owner'] ||= hash.delete('user') if hash['created_at'] and hash['user']
hash['author'] ||= hash.delete('user') if hash['committed_at'] and hash['user']
hash['committer'] ||= hash['author'] if hash['author']
hash['author'] ||= hash['committer'] if hash['committer']
modify_user_fields hash['owner']
modify_user_fields hash['user']
end
def modify_user_fields(hash)
return unless Hash === hash
hash['login'] = hash.delete('name') if hash['name']
set_link hash, 'self', "users/#{hash['login']}" unless links(hash).include? 'self'
end
def modify_url(hash, key, value)
case key
when "blog"
set_link(hash, key, value)
when "url"
type = Addressable::URI.parse(value).host == api_host.host ? "self" : "html"
set_link(hash, type, value)
when /^(.+)_url$/
set_link(hash, $1, value)
end
end
def modify_key(key, value = nil)
case key
when 'gravatar_url' then 'avatar_url'
when 'org' then 'organization'
when 'orgs' then 'organizations'
when 'username' then 'login'
when 'repo' then 'repository'
when 'repos' then modify_key('repositories', value)
when /^repos?_(.*)$/ then "repository_#{$1}"
when /^(.*)_repo$/ then "#{$1}_repository"
when /^(.*)_repos$/ then "#{$1}_repositories"
when 'commit', 'commit_id', 'id' then value =~ /^\w{40}$/ ? 'sha' : key
when 'comments' then Numeric === value ? 'comment_count' : key
when 'forks' then Numeric === value ? 'fork_count' : key
when 'repositories' then Numeric === value ? 'repository_count' : key
when /^(.*)s_count$/ then "#{$1}_count"
else key
end
end
end
end
|
desc "initialize named durable queues"
namespace :philotic do
task :init_queues, :filename do |t, args|
raise "You must specify a file name for #{t.name}: rake #{t.name}[FILENAME] #yes, you need the brackets, no space." if !args[:filename]
require 'philotic'
philotic = Philotic::Connection.new
# philotic.config.initialize_named_queues must be truthy to run Philotic.initialize_named_queue!
philotic.config.initialize_named_queues = true
@filename = args[:filename]
queues = YAML.load_file(@filename)
philotic.connect!
queues.each_pair do |queue_name, queue_options|
philotic.initialize_named_queue!(queue_name, queue_options)
end
end
end
update init_queues rake task
desc "initialize named durable queues"
namespace :philotic do
task :init_queues, :filename do |t, args|
raise "You must specify a file name for #{t.name}: rake #{t.name}[FILENAME] #yes, you need the brackets, no space." if !args[:filename]
require 'philotic'
# philotic.config.initialize_named_queues must be truthy to run Philotic.initialize_named_queue!
Philotic.config.initialize_named_queues = true
@filename = args[:filename]
queues = YAML.load_file(@filename)
queues.each_pair do |queue_name, queue_options|
Philotic.initialize_named_queue!(queue_name, queue_options)
end
end
end
|
module GitFlow
# This class represents a node in the structure of a legal document.
# A node consists of a metadata JSON file and any other files in the same
# directory that share the name of that metadata file but have a different
# extension.
class Node < WorkingFile
include ActiveModel::Validations
before_create :initialize_container_file, :initialize_text_file,
:attributes_to_content
before_update :attributes_to_content
before_destroy :remove_child_nodes
after_destroy :remove_text_file, :remove_container_file
JSON_WRITE_OPTIONS = {
indent: ' ',
space: ' ',
object_nl: "\n",
array_nl: "\n"
}
# Generate appropriate file_name for a sibling node with new attributes
# that are different from the present node
def self.file_name( attributes, node_type )
name =
if attributes["number"] && node_type && node_type["label"]
"#{node_type['label']}-#{attributes['number']}"
else
attributes["title"]
end
name.downcase!
name.gsub!( /[^a-z0-9]+/, '-' )
name.gsub!( /^-/, '' )
name.gsub!( /-$/, '' )
"#{name}.json"
end
def <=>(other)
comp = node_type["label"] <=> other.node_type["label"]
return comp unless comp == 0
comp = attributes["number"].to_i <=> other.attributes["number"].to_i
return comp unless comp == 0
attributes["title"] <=> other.attributes["title"]
end
# Create a new child node of the current node, assigning it the next
# available number
def new_child_node( attributes )
child_nodes.sort!
last_node = child_nodes.last
number = ( last_node ? last_node.attributes["number"].to_i : 0 )
intrinsic_attributes = {
"number" => "#{number + 1}"
}
node_type = allowed_child_node_types.first
node = git_flow_repo.working_file(
File.join( child_container_file.tree,
GitFlow::Node.file_name( intrinsic_attributes, node_type )
)
).node
node.attributes = intrinsic_attributes.merge( attributes )
node
end
def initialize( git_flow_repo, tree )
super( git_flow_repo, tree )
end
def self.to_reference( tree ); tree.chomp('.json').gsub( /\// , '_' ); end
def to_reference; self.class.to_reference tree; end
# Takes a reference (usually to another node)
# Returns array of the full reference and the tree
# The tree can be used to pull up metadata for referenced node
def to_interpolated_reference( target )
current = tree.split("/")
target_parts = target.split("/")
parts = []
while current.any? && current.first != target_parts.first do
parts << current.shift
end
parts += target_parts
target_tree = parts.join "/"
target_node = git_flow_repo.working_file( target_tree + ".json" ).node
if parts.length > 1
if target_node.exists?
[ self.class.to_reference( parts.join('/') ), target_node ]
else
raise "Target node does not exist (#{tree}): #{target_node.tree}"
end
else
[ target ]
end
end
def ancestor_of_node?( node )
node.tree =~ /^#{Regexp.escape tree_base}/
end
def descendent_nodes
@descendent_nodes ||= child_nodes.inject([]) do |memo, node|
memo << node
memo + node.descendent_nodes
end
end
# Find child nodes of this node with an attribute matching this node
def find( key, value )
descendent_nodes.select do |node|
if node.attributes[key]
node.attributes[key] =~ /#{value}/
else
false
end
end
end
# Returns textual content file associated with the node
def text_file
return @text_file unless @text_file.nil?
@text_file = git_flow_repo.working_file tree_text_file
end
# Initialize the text file associated with this node, if applicable
def initialize_text_file
text_file.create if node_type && node_type["text"] && !text_file.exists?
end
# Remove the text file associated with this node
def remove_text_file
text_file.destroy if text_file.exists?
end
# Get container file in which this node is located
def container_file
return @container_file unless @container_file.nil?
@container_file =
if parent_node && !parent_node.root?
git_flow_repo.working_file( parent_node.tree_base )
else
false
end
end
# Get the working file where children of this node are located
def child_container_file
return @child_container_file unless @child_container_file.nil?
@child_container_file =
if !root?
git_flow_repo.working_file( tree_base )
else
git_flow_repo.working_file( '' )
end
end
# Initialize the directory in which this node is placed, if applicable
def initialize_container_file
container_file.create true if container_file && !container_file.exists?
end
# Removes the container directory for this node, if applicable
def remove_container_file
if container_file && container_file.exists? && container_file.children.empty?
container_file.destroy
end
end
# Removes child nodes of this node
def remove_child_nodes
child_nodes.each { |node| node.destroy }
end
# Moves node and associated files to new tree location
# Returns reference to the moved node
def move( to_tree )
return false unless to_node = move_to_node( to_tree )
new_file = super( to_tree, force: true )
if text_file.exists?
text_file.move to_node.text_file.tree, force: true
end
if child_container_file.exists?
child_container_file.move to_node.child_container_file.tree
end
new_file.node
end
# Set up the node to which this node is being moved
def move_to_node( to_tree )
to_node = git_flow_repo.working_file( to_tree ).node
return false if to_node.exists?
return false if to_node.text_file.exists?
return false if to_node.child_container_file.exists?
return false unless to_node.create
to_node
end
# TODO destroy vs. repeal
# Destroy should simply delete all files for this and child nodes
# Destroy should only be an option for nodes not in current law
# Repeal should remove content and add repeal metadata to this and child nodes
# Repeal should only be an option for nodes in current law
# This must be figured out before submission, so legislature votes on repeals
# only.
# What structure do child nodes of this node have?
# Pulls from custom settings of this node or pulls down settings from above
# Returns empty array when no children are supported
def child_node_structure
return @child_node_structure unless @child_node_structure.nil?
@child_node_structure = compute_child_node_structure
end
# Compute the structure for child nodes of this node
def compute_child_node_structure
# Root node children are always codes
if root?
[ { "label" => "code",
"number" => false,
"title" => true,
"text" => false } ]
# Otherwise, check if this node defines a structure for its children
elsif attributes["structure"]
attributes["structure"]
# Otherwise, compute structure from parent
else
compute_child_node_structure_from_parent
end
end
# Where does this node appear in its parent's structure?
# False if it does not
def compute_child_node_index_from_parent
# The node must have a type to check against parent structure
if attributes['type']
parent_node.child_node_structure.index do |s|
s['label'] == attributes['type']
end
# If not, return false -- we cannot use the parent structure
else
nil
end
end
# Compute the child node structure from the parent
def compute_child_node_structure_from_parent
parent_index = compute_child_node_index_from_parent
return [] unless parent_index
# Child node structure starts to right of parent's child node structure
start = parent_index + 1
# If a child structure exists to the right of the parent's child node
# structure, grab that for this node
if parent_node.child_node_structure.length > start
return parent_node.child_node_structure[
start..(parent_node.child_node_structure.length - 1)
]
# Otherwise, there is no structure from the parent to inherit
else
return []
end
end
# Given an array representing structure of a node, return the members that
# are allowed as children
# This will return the first entry if it is not optional
# Otherwise, it will continue until it reaches the end of the structure
# or a required level
def self.allowed_node_types( structure )
out = []
structure.each do |type|
out << type
break unless type['optional']
end
out
end
# Returns the allowed types for children of this node
def allowed_child_node_types
@allowed_child_node_types ||=
self.class.allowed_node_types( child_node_structure )
end
# Returns the node types allowed for this node
def allowed_node_types
@allowed_node_types ||=
self.class.allowed_node_types( node_structure )
end
# The type of this node
def node_type
return @node_type unless @node_type.nil?
@node_type =
if root?
{ title: true }
elsif attributes["type"]
allowed_node_types.find { |type| type["label"] == attributes["type"] }
elsif allowed_node_types.length == 1
allowed_node_types.first
else
false
end
end
# Get the properly formatted number for this node
def node_number
return unless node_type && attributes["number"]
number = attributes["number"].to_i
case node_type["number"]
when 'R'
number.to_roman
when 'r'
number.to_roman.downcase
when 'A'
number.to_alpha.upcase
when 'a'
number.to_alpha
else
number
end
end
# Render the label for the node type
def node_label
node_type['label']
end
# Render the full title of the node
def node_title
title = node_number ? "#{node_label.capitalize} #{node_number}. " : ""
title = "#{title}#{attributes['title']}" if attributes['title']
title
end
# Short title for node
def node_short_title
if node_number
"#{node_label.capitalize} #{node_number}"
else
"#{attributes['title']}"
end
end
# Render titles of parent nodes
def node_title_context
ancestor_nodes.reject(&:root?).reject { |ancestor|
ancestor.tree == tree
}.map(&:node_short_title).join(', ')
end
# Returns structural configuration for this node
# If the node has no parent, it is a root-level "code"
def node_structure
return @node_structure unless @node_structure.nil?
@node_structure =
if parent_node && !parent_node.child_node_structure.empty?
parent_node.child_node_structure
else
[]
end
end
# Returns child nodes of this node
def child_nodes
return @child_nodes unless @child_nodes.nil?
@child_nodes =
if child_container_file && child_container_file.exists?
child_container_file.children.select(&:is_node?).map(&:node)
else
[]
end
end
# Return ancestor nodes, including self
def ancestor_nodes
return @ancestor_nodes unless @ancestor_nodes.nil?
@ancestor_nodes =
if parent_node
parent_node.ancestor_nodes << self
else
[ self ]
end
end
# Retrieves the parent node
def parent_node
return @parent_node unless @parent_node.nil?
@parent_node =
if tree_parent.empty? && !root?
git_flow_repo.working_file( '' ).node
elsif File.exist? absolute_parent_node_path
git_flow_repo.working_file( tree_parent_node ).node
else
false
end
parent_node
end
# Retrieves the attributes of this node
# JSON contents of node are parsed and returned as a hash
def attributes
return @attributes unless @attributes.nil?
@attributes =
if root?
{ "title" => "/" }
elsif exists? || content.present?
JSON.parse content
else
{ }
end
end
# Recursive sorting algorithm for hash, array, value nest structures
def self.sorted_attributes(attributes)
if attributes.is_a? Hash
attributes.keys.sort.inject({}) do |memo, key|
memo[key] = sorted_attributes( attributes[key] )
memo
end
elsif attributes.is_a? Array
# Do not sort an array if the entries are not comparable
attributes.each do |attribute|
unless attribute.class.included_modules.include? Comparable
return attributes.map { |value| sorted_attributes( value ) }
end
end
attributes.sort.map { |value| sorted_attributes( value ) }
else
attributes
end
end
# Returns attributes sorted
# Useful for writing attributes to JSON in predictable order
def sorted_attributes
self.class.sorted_attributes attributes
end
# Returns sorted_attributes as JSON content suitable to write to file
def attributes_to_content
self.content = JSON.generate( sorted_attributes, JSON_WRITE_OPTIONS )
end
# Set attribute values from hash
def attributes=(values)
attributes.merge! values
attributes
end
# What is the path to the parent node in the git repo?
def tree_parent_node
tree_parent + ".json"
end
# What is the path to the parent node in the git repo?
def absolute_parent_node_path
absolute_parent_path + ".json"
end
# What is the path to the parent directory in the git repo?
def tree_parent
return @tree_parent if @tree_parent
@tree_parent = File.dirname tree
@tree_parent = '' if @tree_parent == '.'
tree_parent
end
# What is the path to the parent directory in the working directory?
def absolute_parent_path
File.dirname absolute_path
end
# In the git repo, what is the path to the node without extension?
def tree_base
tree.gsub File.extname( tree ), ''
end
# In absolute path, where is the working file for the node, without extension?
def absolute_path_base
absolute_path.gsub File.extname( absolute_path ), ''
end
# In git repo, where is the content file?
def tree_text_file
tree_base + ".asc"
end
# Compile node and children using specified compiler
# If no base is specified, create a base in the build location root
def compile(compiler_type)
compiler =
case compiler_type
when :node
GitLaw::Compilers::NodeCompiler
else
raise ArgumentError, "Unsupported compiler type: #{compiler_type}"
end
compiler.new( self )
end
end
end
Do not combine assignment and checking for an error.
module GitFlow
# This class represents a node in the structure of a legal document.
# A node consists of a metadata JSON file and any other files in the same
# directory that share the name of that metadata file but have a different
# extension.
class Node < WorkingFile
include ActiveModel::Validations
before_create :initialize_container_file, :initialize_text_file,
:attributes_to_content
before_update :attributes_to_content
before_destroy :remove_child_nodes
after_destroy :remove_text_file, :remove_container_file
JSON_WRITE_OPTIONS = {
indent: ' ',
space: ' ',
object_nl: "\n",
array_nl: "\n"
}
# Generate appropriate file_name for a sibling node with new attributes
# that are different from the present node
def self.file_name( attributes, node_type )
name =
if attributes["number"] && node_type && node_type["label"]
"#{node_type['label']}-#{attributes['number']}"
else
attributes["title"]
end
name.downcase!
name.gsub!( /[^a-z0-9]+/, '-' )
name.gsub!( /^-/, '' )
name.gsub!( /-$/, '' )
"#{name}.json"
end
def <=>(other)
comp = node_type["label"] <=> other.node_type["label"]
return comp unless comp == 0
comp = attributes["number"].to_i <=> other.attributes["number"].to_i
return comp unless comp == 0
attributes["title"] <=> other.attributes["title"]
end
# Create a new child node of the current node, assigning it the next
# available number
def new_child_node( attributes )
child_nodes.sort!
last_node = child_nodes.last
number = ( last_node ? last_node.attributes["number"].to_i : 0 )
intrinsic_attributes = {
"number" => "#{number + 1}"
}
node_type = allowed_child_node_types.first
node = git_flow_repo.working_file(
File.join( child_container_file.tree,
GitFlow::Node.file_name( intrinsic_attributes, node_type )
)
).node
node.attributes = intrinsic_attributes.merge( attributes )
node
end
def initialize( git_flow_repo, tree )
super( git_flow_repo, tree )
end
def self.to_reference( tree ); tree.chomp('.json').gsub( /\// , '_' ); end
def to_reference; self.class.to_reference tree; end
# Takes a reference (usually to another node)
# Returns array of the full reference and the tree
# The tree can be used to pull up metadata for referenced node
def to_interpolated_reference( target )
current = tree.split("/")
target_parts = target.split("/")
parts = []
while current.any? && current.first != target_parts.first do
parts << current.shift
end
parts += target_parts
target_tree = parts.join "/"
target_node = git_flow_repo.working_file( target_tree + ".json" ).node
if parts.length > 1
if target_node.exists?
[ self.class.to_reference( parts.join('/') ), target_node ]
else
raise "Target node does not exist (#{tree}): #{target_node.tree}"
end
else
[ target ]
end
end
def ancestor_of_node?( node )
node.tree =~ /^#{Regexp.escape tree_base}/
end
def descendent_nodes
@descendent_nodes ||= child_nodes.inject([]) do |memo, node|
memo << node
memo + node.descendent_nodes
end
end
# Find child nodes of this node with an attribute matching this node
def find( key, value )
descendent_nodes.select do |node|
if node.attributes[key]
node.attributes[key] =~ /#{value}/
else
false
end
end
end
# Returns textual content file associated with the node
def text_file
return @text_file unless @text_file.nil?
@text_file = git_flow_repo.working_file tree_text_file
end
# Initialize the text file associated with this node, if applicable
def initialize_text_file
text_file.create if node_type && node_type["text"] && !text_file.exists?
end
# Remove the text file associated with this node
def remove_text_file
text_file.destroy if text_file.exists?
end
# Get container file in which this node is located
def container_file
return @container_file unless @container_file.nil?
@container_file =
if parent_node && !parent_node.root?
git_flow_repo.working_file( parent_node.tree_base )
else
false
end
end
# Get the working file where children of this node are located
def child_container_file
return @child_container_file unless @child_container_file.nil?
@child_container_file =
if !root?
git_flow_repo.working_file( tree_base )
else
git_flow_repo.working_file( '' )
end
end
# Initialize the directory in which this node is placed, if applicable
def initialize_container_file
container_file.create true if container_file && !container_file.exists?
end
# Removes the container directory for this node, if applicable
def remove_container_file
if container_file && container_file.exists? && container_file.children.empty?
container_file.destroy
end
end
# Removes child nodes of this node
def remove_child_nodes
child_nodes.each { |node| node.destroy }
end
# Moves node and associated files to new tree location
# Returns reference to the moved node
def move( to_tree )
to_node = move_to_node( to_tree )
return false unless to_node
new_file = super( to_tree, force: true )
if text_file.exists?
text_file.move to_node.text_file.tree, force: true
end
if child_container_file.exists?
child_container_file.move to_node.child_container_file.tree
end
new_file.node
end
# Set up the node to which this node is being moved
def move_to_node( to_tree )
to_node = git_flow_repo.working_file( to_tree ).node
return false if to_node.exists?
return false if to_node.text_file.exists?
return false if to_node.child_container_file.exists?
return false unless to_node.create
to_node
end
# TODO destroy vs. repeal
# Destroy should simply delete all files for this and child nodes
# Destroy should only be an option for nodes not in current law
# Repeal should remove content and add repeal metadata to this and child nodes
# Repeal should only be an option for nodes in current law
# This must be figured out before submission, so legislature votes on repeals
# only.
# What structure do child nodes of this node have?
# Pulls from custom settings of this node or pulls down settings from above
# Returns empty array when no children are supported
def child_node_structure
return @child_node_structure unless @child_node_structure.nil?
@child_node_structure = compute_child_node_structure
end
# Compute the structure for child nodes of this node
def compute_child_node_structure
# Root node children are always codes
if root?
[ { "label" => "code",
"number" => false,
"title" => true,
"text" => false } ]
# Otherwise, check if this node defines a structure for its children
elsif attributes["structure"]
attributes["structure"]
# Otherwise, compute structure from parent
else
compute_child_node_structure_from_parent
end
end
# Where does this node appear in its parent's structure?
# False if it does not
def compute_child_node_index_from_parent
# The node must have a type to check against parent structure
if attributes['type']
parent_node.child_node_structure.index do |s|
s['label'] == attributes['type']
end
# If not, return false -- we cannot use the parent structure
else
nil
end
end
# Compute the child node structure from the parent
def compute_child_node_structure_from_parent
parent_index = compute_child_node_index_from_parent
return [] unless parent_index
# Child node structure starts to right of parent's child node structure
start = parent_index + 1
# If a child structure exists to the right of the parent's child node
# structure, grab that for this node
if parent_node.child_node_structure.length > start
return parent_node.child_node_structure[
start..(parent_node.child_node_structure.length - 1)
]
# Otherwise, there is no structure from the parent to inherit
else
return []
end
end
# Given an array representing structure of a node, return the members that
# are allowed as children
# This will return the first entry if it is not optional
# Otherwise, it will continue until it reaches the end of the structure
# or a required level
def self.allowed_node_types( structure )
out = []
structure.each do |type|
out << type
break unless type['optional']
end
out
end
# Returns the allowed types for children of this node
def allowed_child_node_types
@allowed_child_node_types ||=
self.class.allowed_node_types( child_node_structure )
end
# Returns the node types allowed for this node
def allowed_node_types
@allowed_node_types ||=
self.class.allowed_node_types( node_structure )
end
# The type of this node
def node_type
return @node_type unless @node_type.nil?
@node_type =
if root?
{ title: true }
elsif attributes["type"]
allowed_node_types.find { |type| type["label"] == attributes["type"] }
elsif allowed_node_types.length == 1
allowed_node_types.first
else
false
end
end
# Get the properly formatted number for this node
def node_number
return unless node_type && attributes["number"]
number = attributes["number"].to_i
case node_type["number"]
when 'R'
number.to_roman
when 'r'
number.to_roman.downcase
when 'A'
number.to_alpha.upcase
when 'a'
number.to_alpha
else
number
end
end
# Render the label for the node type
def node_label
node_type['label']
end
# Render the full title of the node
def node_title
title = node_number ? "#{node_label.capitalize} #{node_number}. " : ""
title = "#{title}#{attributes['title']}" if attributes['title']
title
end
# Short title for node
def node_short_title
if node_number
"#{node_label.capitalize} #{node_number}"
else
"#{attributes['title']}"
end
end
# Render titles of parent nodes
def node_title_context
ancestor_nodes.reject(&:root?).reject { |ancestor|
ancestor.tree == tree
}.map(&:node_short_title).join(', ')
end
# Returns structural configuration for this node
# If the node has no parent, it is a root-level "code"
def node_structure
return @node_structure unless @node_structure.nil?
@node_structure =
if parent_node && !parent_node.child_node_structure.empty?
parent_node.child_node_structure
else
[]
end
end
# Returns child nodes of this node
def child_nodes
return @child_nodes unless @child_nodes.nil?
@child_nodes =
if child_container_file && child_container_file.exists?
child_container_file.children.select(&:is_node?).map(&:node)
else
[]
end
end
# Return ancestor nodes, including self
def ancestor_nodes
return @ancestor_nodes unless @ancestor_nodes.nil?
@ancestor_nodes =
if parent_node
parent_node.ancestor_nodes << self
else
[ self ]
end
end
# Retrieves the parent node
def parent_node
return @parent_node unless @parent_node.nil?
@parent_node =
if tree_parent.empty? && !root?
git_flow_repo.working_file( '' ).node
elsif File.exist? absolute_parent_node_path
git_flow_repo.working_file( tree_parent_node ).node
else
false
end
parent_node
end
# Retrieves the attributes of this node
# JSON contents of node are parsed and returned as a hash
def attributes
return @attributes unless @attributes.nil?
@attributes =
if root?
{ "title" => "/" }
elsif exists? || content.present?
JSON.parse content
else
{ }
end
end
# Recursive sorting algorithm for hash, array, value nest structures
def self.sorted_attributes(attributes)
if attributes.is_a? Hash
attributes.keys.sort.inject({}) do |memo, key|
memo[key] = sorted_attributes( attributes[key] )
memo
end
elsif attributes.is_a? Array
# Do not sort an array if the entries are not comparable
attributes.each do |attribute|
unless attribute.class.included_modules.include? Comparable
return attributes.map { |value| sorted_attributes( value ) }
end
end
attributes.sort.map { |value| sorted_attributes( value ) }
else
attributes
end
end
# Returns attributes sorted
# Useful for writing attributes to JSON in predictable order
def sorted_attributes
self.class.sorted_attributes attributes
end
# Returns sorted_attributes as JSON content suitable to write to file
def attributes_to_content
self.content = JSON.generate( sorted_attributes, JSON_WRITE_OPTIONS )
end
# Set attribute values from hash
def attributes=(values)
attributes.merge! values
attributes
end
# What is the path to the parent node in the git repo?
def tree_parent_node
tree_parent + ".json"
end
# What is the path to the parent node in the git repo?
def absolute_parent_node_path
absolute_parent_path + ".json"
end
# What is the path to the parent directory in the git repo?
def tree_parent
return @tree_parent if @tree_parent
@tree_parent = File.dirname tree
@tree_parent = '' if @tree_parent == '.'
tree_parent
end
# What is the path to the parent directory in the working directory?
def absolute_parent_path
File.dirname absolute_path
end
# In the git repo, what is the path to the node without extension?
def tree_base
tree.gsub File.extname( tree ), ''
end
# In absolute path, where is the working file for the node, without extension?
def absolute_path_base
absolute_path.gsub File.extname( absolute_path ), ''
end
# In git repo, where is the content file?
def tree_text_file
tree_base + ".asc"
end
# Compile node and children using specified compiler
# If no base is specified, create a base in the build location root
def compile(compiler_type)
compiler =
case compiler_type
when :node
GitLaw::Compilers::NodeCompiler
else
raise ArgumentError, "Unsupported compiler type: #{compiler_type}"
end
compiler.new( self )
end
end
end
|
require 'photish/gallery/album'
require 'photish/gallery/traits/albumable'
module Photish
module Gallery
class Collection
include ::Photish::Gallery::Traits::Urlable
include ::Photish::Gallery::Traits::Albumable
attr_reader :qualities
def initialize(path, qualities)
@path = path
@qualities = qualities
end
def base_url_parts
[]
end
def all_photos
all_albums.map(&:photos)
.flatten
end
private
attr_reader :path
def album_class
Album
end
def url_end
'index.html'
end
end
end
end
all images method
require 'photish/gallery/album'
require 'photish/gallery/traits/albumable'
module Photish
module Gallery
class Collection
include ::Photish::Gallery::Traits::Urlable
include ::Photish::Gallery::Traits::Albumable
attr_reader :qualities
def initialize(path, qualities)
@path = path
@qualities = qualities
end
def base_url_parts
[]
end
def all_photos
all_albums.map(&:photos)
.flatten
end
def all_images
all_photos.map(&:images)
.flatten
end
private
attr_reader :path
def album_class
Album
end
def url_end
'index.html'
end
end
end
end
|
module Github
class Import
class MergeRequest < ::MergeRequest
self.table_name = 'merge_requests'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
class Issue < ::Issue
self.table_name = 'issues'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
class Note < ::Note
self.table_name = 'notes'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
attr_reader :project, :repository, :options, :cached_label_ids,
:cached_gitlab_users, :cached_user_ids, :errors
def initialize(project, options)
@project = project
@repository = project.repository
@options = options
@cached_label_ids = {}
@cached_user_ids = {}
@cached_gitlab_users = {}
@errors = []
end
def execute(owner, repo)
# Fetch repository
begin
project.create_repository
project.repository.add_remote('github', "https://{token}@github.com/#{owner}/#{repo}.git")
project.repository.set_remote_as_mirror('github')
project.repository.fetch_remote('github', forced: true)
rescue Gitlab::Shell::Error => e
error(:project, "https://github.com/#{owner}/#{repo}.git", e.message)
end
# Fetch labels
url = "/repos/#{owner}/#{repo}/labels"
loop do
response = Github::Client.new(options).get(url)
response.body.each do |raw|
begin
label = Github::Representation::Label.new(raw)
# TODO: we should take group labels in account
next if project.labels.where(title: label.title).exists?
project.labels.create!(title: label.title, color: label.color)
rescue => e
error(:label, label.url, e.message)
end
end
break unless url = response.rels[:next]
end
# Cache labels
# TODO: we should take group labels in account
project.labels.select(:id, :title).find_each do |label|
@cached_label_ids[label.title] = label.id
end
# Fetch milestones
url = "/repos/#{owner}/#{repo}/milestones"
loop do
response = Github::Client.new(options).get(url, state: :all)
response.body.each do |raw|
begin
milestone = Github::Representation::Milestone.new(raw)
next if project.milestones.where(iid: milestone.iid).exists?
project.milestones.create!(
iid: milestone.iid,
title: milestone.title,
description: milestone.description,
due_date: milestone.due_date,
state: milestone.state,
created_at: milestone.created_at,
updated_at: milestone.updated_at
)
rescue => e
error(:milestone, milestone.url, e.message)
end
end
break unless url = response.rels[:next]
end
# Fetch pull requests
url = "/repos/#{owner}/#{repo}/pulls"
loop do
response = Github::Client.new(options).get(url, state: :all, sort: :created, direction: :asc)
response.body.each do |raw|
pull_request = Github::Representation::PullRequest.new(project, raw, options)
merge_request = MergeRequest.find_or_initialize_by(iid: pull_request.iid, source_project_id: project.id)
next unless merge_request.new_record? && pull_request.valid?
begin
restore_source_branch(pull_request) unless pull_request.source_branch_exists?
restore_target_branch(pull_request) unless pull_request.target_branch_exists?
author_id = user_id(pull_request.author, project.creator_id)
merge_request.iid = pull_request.iid
merge_request.title = pull_request.title
merge_request.description = format_description(pull_request.description, pull_request.author)
merge_request.source_project = pull_request.source_project
merge_request.source_branch = pull_request.source_branch_name
merge_request.source_branch_sha = pull_request.source_branch_sha
merge_request.target_project = pull_request.target_project
merge_request.target_branch = pull_request.target_branch_name
merge_request.target_branch_sha = pull_request.target_branch_sha
merge_request.state = pull_request.state
merge_request.milestone_id = milestone_id(pull_request.milestone)
merge_request.author_id = author_id
merge_request.assignee_id = user_id(pull_request.assignee)
merge_request.created_at = pull_request.created_at
merge_request.updated_at = pull_request.updated_at
merge_request.save!(validate: false)
merge_request.merge_request_diffs.create
# Fetch review comments
review_comments_url = "/repos/#{owner}/#{repo}/pulls/#{pull_request.iid}/comments"
fetch_comments(merge_request, :review_comment, review_comments_url)
# Fetch comments
comments_url = "/repos/#{owner}/#{repo}/issues/#{pull_request.iid}/comments"
fetch_comments(merge_request, :comment, comments_url)
rescue => e
error(:pull_request, pull_request.url, e.message)
ensure
clean_up_restored_branches(pull_request)
end
end
break unless url = response.rels[:next]
end
# Fetch issues
url = "/repos/#{owner}/#{repo}/issues"
loop do
response = Github::Client.new(options).get(url, state: :all, sort: :created, direction: :asc)
response.body.each do |raw|
representation = Github::Representation::Issue.new(raw, options)
begin
# Every pull request is an issue, but not every issue
# is a pull request. For this reason, "shared" actions
# for both features, like manipulating assignees, labels
# and milestones, are provided within the Issues API.
if representation.pull_request?
next unless representation.has_labels?
merge_request = MergeRequest.find_by!(target_project_id: project.id, iid: representation.iid)
merge_request.update_attribute(:label_ids, label_ids(representation.labels))
else
next if Issue.where(iid: representation.iid, project_id: project.id).exists?
author_id = user_id(representation.author, project.creator_id)
issue = Issue.new
issue.iid = representation.iid
issue.project_id = project.id
issue.title = representation.title
issue.description = format_description(representation.description, representation.author)
issue.state = representation.state
issue.label_ids = label_ids(representation.labels)
issue.milestone_id = milestone_id(representation.milestone)
issue.author_id = author_id
issue.assignee_id = user_id(representation.assignee)
issue.created_at = representation.created_at
issue.updated_at = representation.updated_at
issue.save!(validate: false)
if representation.has_comments?
# Fetch comments
comments_url = "/repos/#{owner}/#{repo}/issues/#{issue.iid}/comments"
fetch_comments(issue, :comment, comments_url)
end
end
rescue => e
error(:issue, representation.url, e.message)
end
end
break unless url = response.rels[:next]
end
repository.expire_content_cache
errors
end
private
def fetch_comments(noteable, type, url)
loop do
comments = Github::Client.new(options).get(url)
ActiveRecord::Base.no_touching do
comments.body.each do |raw|
begin
representation = Github::Representation::Comment.new(raw, options)
author_id = user_id(representation.author, project.creator_id)
note = Note.new
note.project_id = project.id
note.noteable = noteable
note.note = format_description(representation.note, representation.author)
note.commit_id = representation.commit_id
note.line_code = representation.line_code
note.author_id = author_id
note.type = representation.type
note.created_at = representation.created_at
note.updated_at = representation.updated_at
note.save!(validate: false)
rescue => e
error(type, representation.url, e.message)
end
end
end
break unless url = comments.rels[:next]
end
end
def restore_source_branch(pull_request)
repository.create_branch(pull_request.source_branch_name, pull_request.source_branch_sha)
end
def restore_target_branch(pull_request)
repository.create_branch(pull_request.target_branch_name, pull_request.target_branch_sha)
end
def remove_branch(name)
repository.delete_branch(name)
rescue Rugged::ReferenceError
errors << { type: :branch, url: nil, error: "Could not clean up restored branch: #{name}" }
end
def clean_up_restored_branches(pull_request)
return if pull_request.opened?
remove_branch(pull_request.source_branch_name) unless pull_request.source_branch_exists?
remove_branch(pull_request.target_branch_name) unless pull_request.target_branch_exists?
end
def label_ids(issuable)
issuable.map { |attrs| cached_label_ids[attrs.fetch('name')] }.compact
end
def milestone_id(milestone)
return unless milestone.present?
project.milestones.select(:id).find_by(iid: milestone.iid)&.id
end
def user_id(user, fallback_id = nil)
return unless user.present?
return cached_user_ids[user.id] if cached_user_ids.key?(user.id)
gitlab_user_id = find_by_external_uid(user.id) || find_by_email(user.email)
cached_gitlab_users[user.id] = gitlab_user_id.present?
cached_user_ids[user.id] = gitlab_user_id || fallback_id
end
def find_by_email(email)
return nil unless email
::User.find_by_any_email(email)&.id
end
def find_by_external_uid(id)
return nil unless id
identities = ::Identity.arel_table
::User.select(:id)
.joins(:identities)
.where(identities[:provider].eq(:github).and(identities[:extern_uid].eq(id)))
.first&.id
end
def format_description(body, author)
return body if cached_gitlab_users[author.id]
"*Created by: #{author.username}*\n\n#{body}"
end
def error(type, url, message)
errors << { type: type, url: Gitlab::UrlSanitizer.sanitize(url), error: message }
end
end
end
Use while instead of loop/break
module Github
class Import
class MergeRequest < ::MergeRequest
self.table_name = 'merge_requests'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
class Issue < ::Issue
self.table_name = 'issues'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
class Note < ::Note
self.table_name = 'notes'
self.reset_callbacks :save
self.reset_callbacks :commit
self.reset_callbacks :update
self.reset_callbacks :validate
end
attr_reader :project, :repository, :options, :cached_label_ids,
:cached_gitlab_users, :cached_user_ids, :errors
def initialize(project, options)
@project = project
@repository = project.repository
@options = options
@cached_label_ids = {}
@cached_user_ids = {}
@cached_gitlab_users = {}
@errors = []
end
def execute(owner, repo)
# Fetch repository
begin
project.create_repository
project.repository.add_remote('github', "https://{token}@github.com/#{owner}/#{repo}.git")
project.repository.set_remote_as_mirror('github')
project.repository.fetch_remote('github', forced: true)
rescue Gitlab::Shell::Error => e
error(:project, "https://github.com/#{owner}/#{repo}.git", e.message)
end
# Fetch labels
url = "/repos/#{owner}/#{repo}/labels"
while url
response = Github::Client.new(options).get(url)
response.body.each do |raw|
begin
label = Github::Representation::Label.new(raw)
next if project.labels.where(title: label.title).exists?
project.labels.create!(title: label.title, color: label.color)
rescue => e
error(:label, label.url, e.message)
end
end
url = response.rels[:next]
end
# Cache labels
# TODO: we should take group labels in account
project.labels.select(:id, :title).find_each do |label|
@cached_label_ids[label.title] = label.id
end
# Fetch milestones
url = "/repos/#{owner}/#{repo}/milestones"
while url
response = Github::Client.new(options).get(url, state: :all)
response.body.each do |raw|
begin
milestone = Github::Representation::Milestone.new(raw)
next if project.milestones.where(iid: milestone.iid).exists?
project.milestones.create!(
iid: milestone.iid,
title: milestone.title,
description: milestone.description,
due_date: milestone.due_date,
state: milestone.state,
created_at: milestone.created_at,
updated_at: milestone.updated_at
)
rescue => e
error(:milestone, milestone.url, e.message)
end
end
url = response.rels[:next]
end
# Fetch pull requests
url = "/repos/#{owner}/#{repo}/pulls"
while url
response = Github::Client.new(options).get(url, state: :all, sort: :created, direction: :asc)
response.body.each do |raw|
pull_request = Github::Representation::PullRequest.new(project, raw, options)
merge_request = MergeRequest.find_or_initialize_by(iid: pull_request.iid, source_project_id: project.id)
next unless merge_request.new_record? && pull_request.valid?
begin
restore_source_branch(pull_request) unless pull_request.source_branch_exists?
restore_target_branch(pull_request) unless pull_request.target_branch_exists?
author_id = user_id(pull_request.author, project.creator_id)
merge_request.iid = pull_request.iid
merge_request.title = pull_request.title
merge_request.description = format_description(pull_request.description, pull_request.author)
merge_request.source_project = pull_request.source_project
merge_request.source_branch = pull_request.source_branch_name
merge_request.source_branch_sha = pull_request.source_branch_sha
merge_request.target_project = pull_request.target_project
merge_request.target_branch = pull_request.target_branch_name
merge_request.target_branch_sha = pull_request.target_branch_sha
merge_request.state = pull_request.state
merge_request.milestone_id = milestone_id(pull_request.milestone)
merge_request.author_id = author_id
merge_request.assignee_id = user_id(pull_request.assignee)
merge_request.created_at = pull_request.created_at
merge_request.updated_at = pull_request.updated_at
merge_request.save!(validate: false)
merge_request.merge_request_diffs.create
# Fetch review comments
review_comments_url = "/repos/#{owner}/#{repo}/pulls/#{pull_request.iid}/comments"
fetch_comments(merge_request, :review_comment, review_comments_url)
# Fetch comments
comments_url = "/repos/#{owner}/#{repo}/issues/#{pull_request.iid}/comments"
fetch_comments(merge_request, :comment, comments_url)
rescue => e
error(:pull_request, pull_request.url, e.message)
ensure
clean_up_restored_branches(pull_request)
end
end
url = response.rels[:next]
end
# Fetch issues
url = "/repos/#{owner}/#{repo}/issues"
while url
response = Github::Client.new(options).get(url, state: :all, sort: :created, direction: :asc)
response.body.each do |raw|
representation = Github::Representation::Issue.new(raw, options)
begin
# Every pull request is an issue, but not every issue
# is a pull request. For this reason, "shared" actions
# for both features, like manipulating assignees, labels
# and milestones, are provided within the Issues API.
if representation.pull_request?
next unless representation.has_labels?
merge_request = MergeRequest.find_by!(target_project_id: project.id, iid: representation.iid)
merge_request.update_attribute(:label_ids, label_ids(representation.labels))
else
next if Issue.where(iid: representation.iid, project_id: project.id).exists?
author_id = user_id(representation.author, project.creator_id)
issue = Issue.new
issue.iid = representation.iid
issue.project_id = project.id
issue.title = representation.title
issue.description = format_description(representation.description, representation.author)
issue.state = representation.state
issue.label_ids = label_ids(representation.labels)
issue.milestone_id = milestone_id(representation.milestone)
issue.author_id = author_id
issue.assignee_id = user_id(representation.assignee)
issue.created_at = representation.created_at
issue.updated_at = representation.updated_at
issue.save!(validate: false)
if representation.has_comments?
# Fetch comments
comments_url = "/repos/#{owner}/#{repo}/issues/#{issue.iid}/comments"
fetch_comments(issue, :comment, comments_url)
end
end
rescue => e
error(:issue, representation.url, e.message)
end
end
url = response.rels[:next]
end
repository.expire_content_cache
errors
end
private
def fetch_comments(noteable, type, url)
while url
comments = Github::Client.new(options).get(url)
ActiveRecord::Base.no_touching do
comments.body.each do |raw|
begin
representation = Github::Representation::Comment.new(raw, options)
author_id = user_id(representation.author, project.creator_id)
note = Note.new
note.project_id = project.id
note.noteable = noteable
note.note = format_description(representation.note, representation.author)
note.commit_id = representation.commit_id
note.line_code = representation.line_code
note.author_id = author_id
note.type = representation.type
note.created_at = representation.created_at
note.updated_at = representation.updated_at
note.save!(validate: false)
rescue => e
error(type, representation.url, e.message)
end
end
end
url = comments.rels[:next]
end
end
def restore_source_branch(pull_request)
repository.create_branch(pull_request.source_branch_name, pull_request.source_branch_sha)
end
def restore_target_branch(pull_request)
repository.create_branch(pull_request.target_branch_name, pull_request.target_branch_sha)
end
def remove_branch(name)
repository.delete_branch(name)
rescue Rugged::ReferenceError
errors << { type: :branch, url: nil, error: "Could not clean up restored branch: #{name}" }
end
def clean_up_restored_branches(pull_request)
return if pull_request.opened?
remove_branch(pull_request.source_branch_name) unless pull_request.source_branch_exists?
remove_branch(pull_request.target_branch_name) unless pull_request.target_branch_exists?
end
def label_ids(issuable)
issuable.map { |attrs| cached_label_ids[attrs.fetch('name')] }.compact
end
def milestone_id(milestone)
return unless milestone.present?
project.milestones.select(:id).find_by(iid: milestone.iid)&.id
end
def user_id(user, fallback_id = nil)
return unless user.present?
return cached_user_ids[user.id] if cached_user_ids.key?(user.id)
gitlab_user_id = find_by_external_uid(user.id) || find_by_email(user.email)
cached_gitlab_users[user.id] = gitlab_user_id.present?
cached_user_ids[user.id] = gitlab_user_id || fallback_id
end
def find_by_email(email)
return nil unless email
::User.find_by_any_email(email)&.id
end
def find_by_external_uid(id)
return nil unless id
identities = ::Identity.arel_table
::User.select(:id)
.joins(:identities)
.where(identities[:provider].eq(:github).and(identities[:extern_uid].eq(id)))
.first&.id
end
def format_description(body, author)
return body if cached_gitlab_users[author.id]
"*Created by: #{author.username}*\n\n#{body}"
end
def error(type, url, message)
errors << { type: type, url: Gitlab::UrlSanitizer.sanitize(url), error: message }
end
end
end
|
module PkgForge
##
# Add upload methods to Forge
class Forge
Contract None => nil
def push!
upload_artifacts!
end
private
Contract HashOf[Symbol => String] => nil
def add_artifact(params)
state[:artifacts] ||= []
state[:artifacts] << params
nil
end
Contract None => nil
def expose_artifacts!
FileUtils.mkdir_p 'pkg'
return unless state[:artifacts]
state[:artifacts].each do |artifact|
dest = File.join('pkg', artifact[:long_name] || artifact[:name])
FileUtils.cp artifact[:source], dest
FileUtils.chmod 0o0644, dest
end
nil
end
Contract None => String
def version
@version ||= `git describe --abbrev=0 --tags`.rstrip
end
Contract None => nil
def upload_artifacts!
return unless state[:artifacts]
state[:artifacts].each do |artifact|
run_local ['targit', '--authfile', '.github', '--create',
'--name', artifact[:name],
"#{org}/#{name}", version, artifact[:source]]
end
nil
end
end
end
support passing endpoint to upload
module PkgForge
##
# Add upload methods to Forge
class Forge
attr_accessor :endpoint
Contract None => nil
def push!
upload_artifacts!
end
private
Contract HashOf[Symbol => String] => nil
def add_artifact(params)
state[:artifacts] ||= []
state[:artifacts] << params
nil
end
Contract None => nil
def expose_artifacts!
FileUtils.mkdir_p 'pkg'
return unless state[:artifacts]
state[:artifacts].each do |artifact|
dest = File.join('pkg', artifact[:long_name] || artifact[:name])
FileUtils.cp artifact[:source], dest
FileUtils.chmod 0o0644, dest
end
nil
end
Contract None => String
def version
@version ||= `git describe --abbrev=0 --tags`.rstrip
end
Contract None => nil
def upload_artifacts!
return unless state[:artifacts]
state[:artifacts].each do |artifact|
args = ['targit', '--authfile', '.github', '--create']
args += ['--name', artifact[:name]]
args += ['--endpoint', endpoint] if endpoint
args += ["#{org}/#{name}", version, artifact[:source]]
run_local args
end
nil
end
end
module DSL
##
# Add upload methods to Forge DSL
class Forge
Contract String => nil
def endpoint(value)
@forge.endpoint = value
nil
end
end
end
end
|
#
# Cookbook: poise-service-aix
# License: Apache 2.0
#
# Copyright 2015, Noah Kantrowitz
# Copyright 2015, Bloomberg Finance L.P.
#
require 'chef/mash'
require 'poise_service/error'
require 'poise_service/service_providers/base'
module PoiseService
module Aix
# Poise-service provider for AIX.
# @since 1.0.0
class Provider
provides(:aix_service, os: 'aix')
# The reload action for the AIX service provider.
def action_reload
return if options['never_reload']
# TODO: not sure?
end
# Parse the PID from `lssrc -s <name>` output.
# @return [Integer]
def pid
# TODO: implement this
end
private
def create_service
command = new_resource.command.split(' ')
aix_subsystem "create #{new_resource.service_name}" do
subsystem_name new_resource.service_name
program command.first
arguments command.shift
user new_resource.user
end
end
def enable_service
options['inittab']['runlevel'] ||= 2
aix_inittab "enable #{new_resource.service_name}" do
runlevel options['inittab']['runlevel']
command "/usr/bin/startsrc -s #{new_resource.service_name} >/dev/console 2>&1"
end
end
def disable_service
options['inittab']['runlevel'] ||= 2
aix_inittab "disable #{new_resource.service_name}" do
runlevel options['inittab']['runlevel']
command "/usr/bin/startsrc -s #{new_resource.service_name} >/dev/console 2>&1"
action :disable
end
end
def destroy_service
aix_subsystem "disable #{new_resource.service_name}" do
subsystem_name new_resource.service_name
action :delete
end
end
def service_provider
super.tap do |r|
r.provider(Chef::Provider::Service::Aix)
end
end
end
end
end
Implemented pid function for poise aix service provider
#
# Cookbook: poise-service-aix
# License: Apache 2.0
#
# Copyright 2015, Noah Kantrowitz
# Copyright 2015, Bloomberg Finance L.P.
#
require 'chef/mash'
require 'poise_service/error'
require 'poise_service/service_providers/base'
module PoiseService
module ServiceProviders
# Poise-service provider for AIX.
# @since 1.0.0
class Provider < Base
include Chef::Mixin::ShellOut
provides(:aix_service, os: 'aix')
# Parse the PID from `lssrc -s <name>` output.
# @return [Integer]
def pid
service = shell_out!("lssrc -s #{@new_resource.service_name}").stdout
service.split(' ')[-1].to_i
end
private
def create_service
Chef::Log.debug("Creating aix service #{new_resource.service_name}")
command = new_resource.command.split(' ')
aix_subsystem "create #{new_resource.service_name}" do
subsystem_name new_resource.service_name
program command.first
arguments command.shift
user new_resource.user
end
end
def enable_service
Chef::Log.debug("Enabling aix service #{new_resource.service_name}")
options['inittab']['runlevel'] ||= 2
aix_inittab "enable #{new_resource.service_name}" do
runlevel options['inittab']['runlevel']
command "/usr/bin/startsrc -s #{new_resource.service_name} >/dev/console 2>&1"
end
end
def disable_service
Chef::Log.debug("Disabling aix service #{new_resource.service_name}")
options['inittab']['runlevel'] ||= 2
aix_inittab "disable #{new_resource.service_name}" do
runlevel options['inittab']['runlevel']
command "/usr/bin/startsrc -s #{new_resource.service_name} >/dev/console 2>&1"
action :disable
end
end
def destroy_service
Chef::Log.debug("Destroying aix service #{new_resource.service_name}")
aix_subsystem "delete #{new_resource.service_name}" do
subsystem_name new_resource.service_name
action :delete
end
end
def service_provider
super.tap do |r|
r.provider(Chef::Provider::Service::Aix)
end
end
end
end
end
|
require 'project_compat'
module Acts
module Authorized
module PolicyBasedAuthorization
def self.included klass
klass.extend ClassMethods
klass.extend AuthLookupClassMethods
klass.class_eval do
attr_accessor :is_published_before_save #this is for logging purpose, to store the publish status of an item before it is updated
belongs_to :contributor, :polymorphic => true unless method_defined? :contributor
after_initialize :contributor_or_default_if_new
#checks a policy exists, and if missing resorts to using a private policy
after_initialize :policy_or_default_if_new, :assign_is_published_before_save
include ProjectCompat unless method_defined? :projects
belongs_to :policy, :required_access_to_owner => :manage, :autosave => true
before_validation :change_policy_if_cannot_publish, :publishing_auth unless Seek::Config.is_virtualliver
after_save :queue_update_auth_table
after_destroy :remove_from_lookup_table
end
end
module ClassMethods
end
module AuthLookupClassMethods
#returns all the authorised items for a given action and optionally a user and set of projects. If user is nil, the items authorised for an
#anonymous user are returned. If one or more projects are provided, then only the assets linked to those projects are included.
def all_authorized_for action, user=User.current_user, projects=nil
projects=Array(projects) unless projects.nil?
user_id = user.nil? ? 0 : user.id
assets = []
programatic_project_filter = !projects.nil? && (!Seek::Config.auth_lookup_enabled || (self==Assay || self==Study))
if Seek::Config.auth_lookup_enabled
if (lookup_table_consistent?(user_id))
Rails.logger.info("Lookup table #{lookup_table_name} is complete for user_id = #{user_id}")
assets = lookup_for_action_and_user action, user_id,projects
else
Rails.logger.info("Lookup table #{lookup_table_name} is incomplete for user_id = #{user_id} - doing things the slow way")
assets = all.select { |df| df.send("can_#{action}?") }
programatic_project_filter = !projects.nil?
end
else
assets = all.select { |df| df.send("can_#{action}?") }
end
if programatic_project_filter
assets.select{|a| !(a.projects & projects).empty?}
else
assets
end
end
#determines whether the lookup table records are consistent with the number of asset items in the database and the last id of the item added
def lookup_table_consistent? user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
#cannot rely purely on the count, since an item could have been deleted and a new one added
c = lookup_count_for_user user_id
last_stored_asset_id = last_asset_id_for_user user_id
last_asset_id = self.last(:order=>:id).try(:id)
#trigger off a full update for that user if the count is zero and items should exist for that type
if (c==0 && !last_asset_id.nil?)
AuthLookupUpdateJob.add_items_to_queue User.find_by_id(user_id)
end
c==count && (count==0 || (last_stored_asset_id == last_asset_id))
end
#the name of the lookup table, holding authorisation lookup information, for this given authorised type
def lookup_table_name
"#{self.name.underscore}_auth_lookup"
end
#removes all entries from the authorization lookup type for this authorized type
def clear_lookup_table
ActiveRecord::Base.connection.execute("delete from #{lookup_table_name}")
end
#the record count for entries within the authorization lookup table for a given user_id or user. Used to determine if the table is complete
def lookup_count_for_user user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
ActiveRecord::Base.connection.select_one("select count(*) from #{lookup_table_name} where user_id = #{user_id}").values[0].to_i
end
def lookup_for_action_and_user action,user_id,projects
#Study's and Assays have to be treated differently, as they are linked to a project through the investigation'
if (projects.nil? || (self == Study || self == Assay))
sql = "select asset_id from #{lookup_table_name} where user_id = #{user_id} and can_#{action}=#{ActiveRecord::Base.connection.quoted_true}"
ids = ActiveRecord::Base.connection.select_all(sql).collect{|k| k["asset_id"]}
else
project_map_table = ["#{self.name.underscore.pluralize}", 'projects'].sort.join('_')
project_map_asset_id = "#{self.name.underscore}_id"
project_clause = projects.collect{|p| "#{project_map_table}.project_id = #{p.id}"}.join(" or ")
sql = "select asset_id,#{project_map_asset_id} from #{lookup_table_name}"
sql << " inner join #{project_map_table}"
sql << " on #{lookup_table_name}.asset_id = #{project_map_table}.#{project_map_asset_id}"
sql << " where #{lookup_table_name}.user_id = #{user_id} and (#{project_clause})"
sql << " and can_#{action}=#{ActiveRecord::Base.connection.quoted_true}"
ids = ActiveRecord::Base.connection.select_all(sql).collect{|k| k["asset_id"]}
end
find_all_by_id(ids)
end
#the highest asset id recorded in authorization lookup table for a given user_id or user. Used to determine if the table is complete
def last_asset_id_for_user user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
v = ActiveRecord::Base.connection.select_one("select max(asset_id) from #{lookup_table_name} where user_id = #{user_id}").values[0]
v.nil? ? -1 : v.to_i
end
#looks up the entry in the authorization lookup table for a single authorised type, for a given action, user_id and asset_id. A user id of zero
#indicates an anonymous user. Returns nil if there is no record available
def lookup_for_asset action,user_id,asset_id
attribute = "can_#{action}"
@@expected_true_value ||= ActiveRecord::Base.connection.quoted_true.gsub("'","")
res = ActiveRecord::Base.connection.select_one("select #{attribute} from #{lookup_table_name} where user_id=#{user_id} and asset_id=#{asset_id}")
if res.nil?
nil
else
res[attribute]==@@expected_true_value
end
end
end
#removes all entries related to this item from the authorization lookup table
def remove_from_lookup_table
id=self.id
ActiveRecord::Base.connection.execute("delete from #{self.class.lookup_table_name} where asset_id=#{id}")
end
#triggers a background task to update or create the authorization lookup table records for this item
def queue_update_auth_table
#FIXME: somewhat aggressively does this after every save can be refined in the future
unless (self.changed - ["updated_at", "last_used_at"]).empty?
AuthLookupUpdateJob.add_items_to_queue self
end
end
#updates or creates the authorization lookup entries for this item and the provided user (nil indicating anonymous user)
def update_lookup_table user=nil
user_id = user.nil? ? 0 : user.id
can_view = ActiveRecord::Base.connection.quote perform_auth(user,"view")
can_edit = ActiveRecord::Base.connection.quote perform_auth(user,"edit")
can_download = ActiveRecord::Base.connection.quote perform_auth(user,"download")
can_manage = ActiveRecord::Base.connection.quote perform_auth(user,"manage")
can_delete = ActiveRecord::Base.connection.quote perform_auth(user,"delete")
#check to see if an insert of update is needed, action used is arbitary
lookup = self.class.lookup_for_asset("view",user_id,self.id)
insert = lookup.nil?
if insert
sql = "insert into #{self.class.lookup_table_name} (user_id,asset_id,can_view,can_edit,can_download,can_manage,can_delete) values (#{user_id},#{id},#{can_view},#{can_edit},#{can_download},#{can_manage},#{can_delete});"
else
sql = "update #{self.class.lookup_table_name} set can_view=#{can_view}, can_edit=#{can_edit}, can_download=#{can_download},can_manage=#{can_manage},can_delete=#{can_delete} where user_id=#{user_id} and asset_id=#{id}"
end
ActiveRecord::Base.connection.execute(sql)
end
AUTHORIZATION_ACTIONS.each do |action|
eval <<-END_EVAL
def can_#{action}? user = User.current_user
return true if new_record?
user_id = user.nil? ? 0 : user.id
if Seek::Config.auth_lookup_enabled
lookup = self.class.lookup_for_asset("#{action}", user_id,self.id)
else
lookup=nil
end
if lookup.nil?
perform_auth(user,"#{action}")
else
lookup
end
end
END_EVAL
end
def contributor_credited?
true
end
def private?
policy.private?
end
def public?
policy.public?
end
def default_policy
Policy.default
end
def policy_or_default
if self.policy.nil?
self.policy = default_policy
end
end
def policy_or_default_if_new
if self.new_record?
policy_or_default
end
end
def default_contributor
User.current_user
end
#when having a sharing_scope policy of Policy::ALL_SYSMO_USERS it is concidered to have advanced permissions if any of the permissions do not relate to the projects associated with the resource (ISA or Asset))
#this is a temporary work-around for the loss of the custom_permissions flag when defining a pre-canned permission of shared with sysmo, but editable/downloadable within mhy project
#other policy sharing scopes are simpler, and are concidered to have advanced permissions if there are more than zero permissions defined
def has_advanced_permissions?
if policy.sharing_scope==Policy::ALL_SYSMO_USERS
!(policy.permissions.collect{|p| p.contributor} - projects).empty?
else
policy.permissions.count > 0
end
end
def contributor_or_default_if_new
if self.new_record? && contributor.nil?
self.contributor = default_contributor
end
end
#(gatekeeper also manager) or (manager and projects have no gatekeeper) or (manager and the item was published)
def can_publish? user=User.current_user
if self.new_record?
(Ability.new(user).can? :publish, self) || (self.can_manage? && self.gatekeepers.empty?) || Seek::Config.is_virtualliver
else
(Ability.new(user).can? :publish, self) || (self.can_manage? && self.gatekeepers.empty?) || (self.can_manage? && (self.policy.sharing_scope_was == Policy::EVERYONE)) || Seek::Config.is_virtualliver
end
end
#use request_permission_summary to retrieve who can manage the item
def people_can_manage
contributor = self.contributor.kind_of?(Person) ? self.contributor : self.contributor.try(:person)
return [[contributor.id, "#{contributor.first_name} #{contributor.last_name}", Policy::MANAGING]] if policy.blank?
creators = is_downloadable? ? self.creators : []
asset_managers = projects.collect(&:asset_managers).flatten
grouped_people_by_access_type = policy.summarize_permissions creators,asset_managers, contributor
grouped_people_by_access_type[Policy::MANAGING]
end
def perform_auth user,action
(Authorization.is_authorized? action, nil, self, user) || (Ability.new(user).can? action.to_sym, self) || (Ability.new(user).can? "#{action}_asset".to_sym, self)
end
#returns a list of the people that can manage this file
#which will be the contributor, and those that have manage permissions
def managers
#FIXME: how to handle projects as contributors - return all people or just specific people (pals or other role)?
people=[]
unless self.contributor.nil?
people << self.contributor.person if self.contributor.kind_of?(User)
people << self.contributor if self.contributor.kind_of?(Person)
end
self.policy.permissions.each do |perm|
unless perm.contributor.nil? || perm.access_type!=Policy::MANAGING
people << (perm.contributor) if perm.contributor.kind_of?(Person)
people << (perm.contributor.person) if perm.contributor.kind_of?(User)
end
end
people.uniq
end
def contributing_user
unless self.kind_of?(Assay)
if contributor.kind_of?Person
contributor.try(:user)
elsif contributor.kind_of?User
contributor
else
nil
end
else
owner.try(:user)
end
end
def gatekeepers
self.projects.collect(&:gatekeepers).flatten
end
def publishing_auth
return true if $authorization_checks_disabled
#only check if doing publishing
if self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication)
unless self.can_publish?
errors.add_to_base("You are not permitted to publish this #{self.class.name.underscore.humanize}")
return false
end
end
end
def assign_is_published_before_save
if self.policy.try(:sharing_scope_was) == Policy::EVERYONE
self.is_published_before_save=true
else
self.is_published_before_save=false
end
end
#this is for changing policy from public to sysmo_and_projects_policy, if the item can not be published
def change_policy_if_cannot_publish
if self.new_record? && self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication) && !self.can_publish?
self.policy = Policy.sysmo_and_projects_policy self.projects
elsif !self.new_record? && self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication) && !self.can_publish?
self.policy = Policy.find_by_id(self.policy.id)
end
end
end
end
end
rename a function
require 'project_compat'
module Acts
module Authorized
module PolicyBasedAuthorization
def self.included klass
klass.extend ClassMethods
klass.extend AuthLookupClassMethods
klass.class_eval do
attr_accessor :is_published_before_save #this is for logging purpose, to store the publish status of an item before it is updated
belongs_to :contributor, :polymorphic => true unless method_defined? :contributor
after_initialize :contributor_or_default_if_new
#checks a policy exists, and if missing resorts to using a private policy
after_initialize :policy_or_default_if_new, :assign_is_published_before_save
include ProjectCompat unless method_defined? :projects
belongs_to :policy, :required_access_to_owner => :manage, :autosave => true
before_validation :temporary_policy_while_waiting_for_publishing_approval, :publishing_auth unless Seek::Config.is_virtualliver
after_save :queue_update_auth_table
after_destroy :remove_from_lookup_table
end
end
module ClassMethods
end
module AuthLookupClassMethods
#returns all the authorised items for a given action and optionally a user and set of projects. If user is nil, the items authorised for an
#anonymous user are returned. If one or more projects are provided, then only the assets linked to those projects are included.
def all_authorized_for action, user=User.current_user, projects=nil
projects=Array(projects) unless projects.nil?
user_id = user.nil? ? 0 : user.id
assets = []
programatic_project_filter = !projects.nil? && (!Seek::Config.auth_lookup_enabled || (self==Assay || self==Study))
if Seek::Config.auth_lookup_enabled
if (lookup_table_consistent?(user_id))
Rails.logger.info("Lookup table #{lookup_table_name} is complete for user_id = #{user_id}")
assets = lookup_for_action_and_user action, user_id,projects
else
Rails.logger.info("Lookup table #{lookup_table_name} is incomplete for user_id = #{user_id} - doing things the slow way")
assets = all.select { |df| df.send("can_#{action}?") }
programatic_project_filter = !projects.nil?
end
else
assets = all.select { |df| df.send("can_#{action}?") }
end
if programatic_project_filter
assets.select{|a| !(a.projects & projects).empty?}
else
assets
end
end
#determines whether the lookup table records are consistent with the number of asset items in the database and the last id of the item added
def lookup_table_consistent? user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
#cannot rely purely on the count, since an item could have been deleted and a new one added
c = lookup_count_for_user user_id
last_stored_asset_id = last_asset_id_for_user user_id
last_asset_id = self.last(:order=>:id).try(:id)
#trigger off a full update for that user if the count is zero and items should exist for that type
if (c==0 && !last_asset_id.nil?)
AuthLookupUpdateJob.add_items_to_queue User.find_by_id(user_id)
end
c==count && (count==0 || (last_stored_asset_id == last_asset_id))
end
#the name of the lookup table, holding authorisation lookup information, for this given authorised type
def lookup_table_name
"#{self.name.underscore}_auth_lookup"
end
#removes all entries from the authorization lookup type for this authorized type
def clear_lookup_table
ActiveRecord::Base.connection.execute("delete from #{lookup_table_name}")
end
#the record count for entries within the authorization lookup table for a given user_id or user. Used to determine if the table is complete
def lookup_count_for_user user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
ActiveRecord::Base.connection.select_one("select count(*) from #{lookup_table_name} where user_id = #{user_id}").values[0].to_i
end
def lookup_for_action_and_user action,user_id,projects
#Study's and Assays have to be treated differently, as they are linked to a project through the investigation'
if (projects.nil? || (self == Study || self == Assay))
sql = "select asset_id from #{lookup_table_name} where user_id = #{user_id} and can_#{action}=#{ActiveRecord::Base.connection.quoted_true}"
ids = ActiveRecord::Base.connection.select_all(sql).collect{|k| k["asset_id"]}
else
project_map_table = ["#{self.name.underscore.pluralize}", 'projects'].sort.join('_')
project_map_asset_id = "#{self.name.underscore}_id"
project_clause = projects.collect{|p| "#{project_map_table}.project_id = #{p.id}"}.join(" or ")
sql = "select asset_id,#{project_map_asset_id} from #{lookup_table_name}"
sql << " inner join #{project_map_table}"
sql << " on #{lookup_table_name}.asset_id = #{project_map_table}.#{project_map_asset_id}"
sql << " where #{lookup_table_name}.user_id = #{user_id} and (#{project_clause})"
sql << " and can_#{action}=#{ActiveRecord::Base.connection.quoted_true}"
ids = ActiveRecord::Base.connection.select_all(sql).collect{|k| k["asset_id"]}
end
find_all_by_id(ids)
end
#the highest asset id recorded in authorization lookup table for a given user_id or user. Used to determine if the table is complete
def last_asset_id_for_user user_id
unless user_id.is_a?(Numeric)
user_id = user_id.nil? ? 0 : user_id.id
end
v = ActiveRecord::Base.connection.select_one("select max(asset_id) from #{lookup_table_name} where user_id = #{user_id}").values[0]
v.nil? ? -1 : v.to_i
end
#looks up the entry in the authorization lookup table for a single authorised type, for a given action, user_id and asset_id. A user id of zero
#indicates an anonymous user. Returns nil if there is no record available
def lookup_for_asset action,user_id,asset_id
attribute = "can_#{action}"
@@expected_true_value ||= ActiveRecord::Base.connection.quoted_true.gsub("'","")
res = ActiveRecord::Base.connection.select_one("select #{attribute} from #{lookup_table_name} where user_id=#{user_id} and asset_id=#{asset_id}")
if res.nil?
nil
else
res[attribute]==@@expected_true_value
end
end
end
#removes all entries related to this item from the authorization lookup table
def remove_from_lookup_table
id=self.id
ActiveRecord::Base.connection.execute("delete from #{self.class.lookup_table_name} where asset_id=#{id}")
end
#triggers a background task to update or create the authorization lookup table records for this item
def queue_update_auth_table
#FIXME: somewhat aggressively does this after every save can be refined in the future
unless (self.changed - ["updated_at", "last_used_at"]).empty?
AuthLookupUpdateJob.add_items_to_queue self
end
end
#updates or creates the authorization lookup entries for this item and the provided user (nil indicating anonymous user)
def update_lookup_table user=nil
user_id = user.nil? ? 0 : user.id
can_view = ActiveRecord::Base.connection.quote perform_auth(user,"view")
can_edit = ActiveRecord::Base.connection.quote perform_auth(user,"edit")
can_download = ActiveRecord::Base.connection.quote perform_auth(user,"download")
can_manage = ActiveRecord::Base.connection.quote perform_auth(user,"manage")
can_delete = ActiveRecord::Base.connection.quote perform_auth(user,"delete")
#check to see if an insert of update is needed, action used is arbitary
lookup = self.class.lookup_for_asset("view",user_id,self.id)
insert = lookup.nil?
if insert
sql = "insert into #{self.class.lookup_table_name} (user_id,asset_id,can_view,can_edit,can_download,can_manage,can_delete) values (#{user_id},#{id},#{can_view},#{can_edit},#{can_download},#{can_manage},#{can_delete});"
else
sql = "update #{self.class.lookup_table_name} set can_view=#{can_view}, can_edit=#{can_edit}, can_download=#{can_download},can_manage=#{can_manage},can_delete=#{can_delete} where user_id=#{user_id} and asset_id=#{id}"
end
ActiveRecord::Base.connection.execute(sql)
end
AUTHORIZATION_ACTIONS.each do |action|
eval <<-END_EVAL
def can_#{action}? user = User.current_user
return true if new_record?
user_id = user.nil? ? 0 : user.id
if Seek::Config.auth_lookup_enabled
lookup = self.class.lookup_for_asset("#{action}", user_id,self.id)
else
lookup=nil
end
if lookup.nil?
perform_auth(user,"#{action}")
else
lookup
end
end
END_EVAL
end
def contributor_credited?
true
end
def private?
policy.private?
end
def public?
policy.public?
end
def default_policy
Policy.default
end
def policy_or_default
if self.policy.nil?
self.policy = default_policy
end
end
def policy_or_default_if_new
if self.new_record?
policy_or_default
end
end
def default_contributor
User.current_user
end
#when having a sharing_scope policy of Policy::ALL_SYSMO_USERS it is concidered to have advanced permissions if any of the permissions do not relate to the projects associated with the resource (ISA or Asset))
#this is a temporary work-around for the loss of the custom_permissions flag when defining a pre-canned permission of shared with sysmo, but editable/downloadable within mhy project
#other policy sharing scopes are simpler, and are concidered to have advanced permissions if there are more than zero permissions defined
def has_advanced_permissions?
if policy.sharing_scope==Policy::ALL_SYSMO_USERS
!(policy.permissions.collect{|p| p.contributor} - projects).empty?
else
policy.permissions.count > 0
end
end
def contributor_or_default_if_new
if self.new_record? && contributor.nil?
self.contributor = default_contributor
end
end
#(gatekeeper also manager) or (manager and projects have no gatekeeper) or (manager and the item was published)
def can_publish? user=User.current_user
if self.new_record?
(Ability.new(user).can? :publish, self) || (self.can_manage? && self.gatekeepers.empty?) || Seek::Config.is_virtualliver
else
(Ability.new(user).can? :publish, self) || (self.can_manage? && self.gatekeepers.empty?) || (self.can_manage? && (self.policy.sharing_scope_was == Policy::EVERYONE)) || Seek::Config.is_virtualliver
end
end
#use request_permission_summary to retrieve who can manage the item
def people_can_manage
contributor = self.contributor.kind_of?(Person) ? self.contributor : self.contributor.try(:person)
return [[contributor.id, "#{contributor.first_name} #{contributor.last_name}", Policy::MANAGING]] if policy.blank?
creators = is_downloadable? ? self.creators : []
asset_managers = projects.collect(&:asset_managers).flatten
grouped_people_by_access_type = policy.summarize_permissions creators,asset_managers, contributor
grouped_people_by_access_type[Policy::MANAGING]
end
def perform_auth user,action
(Authorization.is_authorized? action, nil, self, user) || (Ability.new(user).can? action.to_sym, self) || (Ability.new(user).can? "#{action}_asset".to_sym, self)
end
#returns a list of the people that can manage this file
#which will be the contributor, and those that have manage permissions
def managers
#FIXME: how to handle projects as contributors - return all people or just specific people (pals or other role)?
people=[]
unless self.contributor.nil?
people << self.contributor.person if self.contributor.kind_of?(User)
people << self.contributor if self.contributor.kind_of?(Person)
end
self.policy.permissions.each do |perm|
unless perm.contributor.nil? || perm.access_type!=Policy::MANAGING
people << (perm.contributor) if perm.contributor.kind_of?(Person)
people << (perm.contributor.person) if perm.contributor.kind_of?(User)
end
end
people.uniq
end
def contributing_user
unless self.kind_of?(Assay)
if contributor.kind_of?Person
contributor.try(:user)
elsif contributor.kind_of?User
contributor
else
nil
end
else
owner.try(:user)
end
end
def gatekeepers
self.projects.collect(&:gatekeepers).flatten
end
def publishing_auth
return true if $authorization_checks_disabled
#only check if doing publishing
if self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication)
unless self.can_publish?
errors.add_to_base("You are not permitted to publish this #{self.class.name.underscore.humanize}")
return false
end
end
end
def assign_is_published_before_save
if self.policy.try(:sharing_scope_was) == Policy::EVERYONE
self.is_published_before_save=true
else
self.is_published_before_save=false
end
end
#while item is waiting for publishing approval,set the policy of the item to:
#new item: sysmo_and_project_policy
#updated item: keep the policy as before
def temporary_policy_while_waiting_for_publishing_approval
if self.new_record? && self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication) && !self.can_publish?
self.policy = Policy.sysmo_and_projects_policy self.projects
elsif !self.new_record? && self.policy.sharing_scope == Policy::EVERYONE && !self.kind_of?(Publication) && !self.can_publish?
self.policy = Policy.find_by_id(self.policy.id)
end
end
end
end
end
|
# encoding: utf-8
module GithubCLI
class UI
attr_writer :shell
def initialize(shell)
@shell = shell
end
def confirm(message)
@shell.say message, :green
end
def info(message)
@shell.say message, nil
end
def warn(message)
@shell.say message, :yellow
end
def error(message)
@shell.say message, :red
end
def debug(message)
@shell.say message
end
def debug!
@debug = true
end
def print_table(table, options={})
@shell.print_table table, options
end
def print_wrapped(message, options={})
@shell.print_wrapped message, options
end
def terminal_width
@shell.terminal_width
end
end # UI
end # GithubCLI
Allow for new line.
# encoding: utf-8
module GithubCLI
class UI
attr_writer :shell
def initialize(shell)
@shell = shell
@quite = false
@debug = ENV['DEBUG']
end
def confirm(message, newline=nil)
@shell.say message, :green, newline
end
def info(message, newline=nil)
@shell.say message, nil, newline
end
def warn(message, newline=nil)
@shell.say message, :yellow, newline
end
def error(message, newline=nil)
@shell.say message, :red, newline
end
def debug(message, newline=nil)
@shell.say message, nil, newline
end
def quite!
@quite = true
end
def debug!
@debug = true
end
def print_table(table, options={})
@shell.print_table table, options
end
def print_wrapped(message, options={})
@shell.print_wrapped message, options
end
def terminal_width
@shell.terminal_width
end
end # UI
end # GithubCLI
|
require 'redis'
module PrivatePub
# This class is an extension for the Faye::RackAdapter.
# It is used inside of PrivatePub.faye_app.
class FayeExtension
Redis.current = Redis.new(:host => '127.0.0.1', :port => 6379)
# Callback to handle incoming Faye messages. This authenticates both
# subscribe and publish calls.
def incoming(message, callback)
hash_string = Redis.current.hgetall('subscriptions')
if hash_string && !hash_string.empty?
# subscriptions = eval(hash_string)
key = hash_string.find{|k, v| eval(v)[:client_id] == message['clientId']}
if key
Redis.current.hset('subscriptions', key.first, {time: Time.now.to_i, client_id: eval(hash_string[key.first])[:client_id]})
end
end
# Redis.current.set(channel, client_id)
if message["channel"] == "/meta/subscribe"
authenticate_subscribe(message)
elsif message["channel"] !~ %r{^/meta/}
authenticate_publish(message)
end
message['data']['channel'] ||= message['channel'] if message['data']
callback.call(message)
end
private
# Ensure the subscription signature is correct and that it has not expired.
def authenticate_subscribe(message)
subscription = PrivatePub.subscription(:channel => message["subscription"], :timestamp => message["ext"]["private_pub_timestamp"])
if message["ext"]["private_pub_signature"] != subscription[:signature]
message["error"] = "Incorrect signature."
elsif PrivatePub.signature_expired? message["ext"]["private_pub_timestamp"].to_i
message["error"] = "Signature has expired."
else
Redis.current.hset('subscriptions', message["subscription"], {time: Time.now.to_i, client_id: message['clientId']})
end
end
# Ensures the secret token is correct before publishing.
def authenticate_publish(message)
if PrivatePub.config[:secret_token].nil?
raise Error, "No secret_token config set, ensure private_pub.yml is loaded properly."
elsif message["ext"].nil? || (message["ext"]["private_pub_token"] != PrivatePub.config[:secret_token] && !credentials_valid?(message))
message["error"] = "Incorrect or no token."
else
message["ext"]["private_pub_token"] = nil
end
end
def credentials_valid?(message)
return message['ext']['private_pub_signature'] == Digest::SHA1.hexdigest([PrivatePub.config[:secret_token], message['channel'], message['ext']['private_pub_timestamp']].join)
end
end
end
del commented line
require 'redis'
module PrivatePub
# This class is an extension for the Faye::RackAdapter.
# It is used inside of PrivatePub.faye_app.
class FayeExtension
Redis.current = Redis.new(:host => '127.0.0.1', :port => 6379)
# Callback to handle incoming Faye messages. This authenticates both
# subscribe and publish calls.
def incoming(message, callback)
hash_string = Redis.current.hgetall('subscriptions')
if hash_string && !hash_string.empty?
key = hash_string.find{|k, v| eval(v)[:client_id] == message['clientId']}
if key
Redis.current.hset('subscriptions', key.first, {time: Time.now.to_i, client_id: eval(hash_string[key.first])[:client_id]})
end
end
# Redis.current.set(channel, client_id)
if message["channel"] == "/meta/subscribe"
authenticate_subscribe(message)
elsif message["channel"] !~ %r{^/meta/}
authenticate_publish(message)
end
message['data']['channel'] ||= message['channel'] if message['data']
callback.call(message)
end
private
# Ensure the subscription signature is correct and that it has not expired.
def authenticate_subscribe(message)
subscription = PrivatePub.subscription(:channel => message["subscription"], :timestamp => message["ext"]["private_pub_timestamp"])
if message["ext"]["private_pub_signature"] != subscription[:signature]
message["error"] = "Incorrect signature."
elsif PrivatePub.signature_expired? message["ext"]["private_pub_timestamp"].to_i
message["error"] = "Signature has expired."
else
Redis.current.hset('subscriptions', message["subscription"], {time: Time.now.to_i, client_id: message['clientId']})
end
end
# Ensures the secret token is correct before publishing.
def authenticate_publish(message)
if PrivatePub.config[:secret_token].nil?
raise Error, "No secret_token config set, ensure private_pub.yml is loaded properly."
elsif message["ext"].nil? || (message["ext"]["private_pub_token"] != PrivatePub.config[:secret_token] && !credentials_valid?(message))
message["error"] = "Incorrect or no token."
else
message["ext"]["private_pub_token"] = nil
end
end
def credentials_valid?(message)
return message['ext']['private_pub_signature'] == Digest::SHA1.hexdigest([PrivatePub.config[:secret_token], message['channel'], message['ext']['private_pub_timestamp']].join)
end
end
end
|
require 'mime/message'
module Gmail
class Message
# Raised when given label doesn't exists.
class NoLabelError < Exception; end
attr_reader :uid
def initialize(mailbox, uid)
@uid = uid
@mailbox = mailbox
@gmail = mailbox.instance_variable_get("@gmail") if mailbox
end
def labels
@gmail.conn.uid_fetch(uid, "X-GM-LABELS")[0].attr["X-GM-LABELS"]
end
def uid
@uid ||= @gmail.conn.uid_search(['HEADER', 'Message-ID', message_id])[0]
end
# Mark message with given flag.
def flag(name)
!!@gmail.mailbox(@mailbox.name) { @gmail.conn.uid_store(uid, "+FLAGS", [name]) }
end
# Unmark message.
def unflag(name)
!!@gmail.mailbox(@mailbox.name) { @gmail.conn.uid_store(uid, "-FLAGS", [name]) }
end
# Do commonly used operations on message.
def mark(flag)
case flag
when :read then read!
when :unread then unread!
when :deleted then delete!
when :spam then spam!
else
flag(flag)
end
end
# Mark this message as a spam.
def spam!
move_to('[Gmail]/Spam')
end
# Mark as read.
def read!
flag(:Seen)
end
# Mark as unread.
def unread!
unflag(:Seen)
end
# Mark message with star.
def star!
flag('[Gmail]/Starred')
end
# Remove message from list of starred.
def unstar!
unflag('[Gmail]/Starred')
end
# Move to trash / bin.
def delete!
@mailbox.messages.delete(uid)
flag(:deleted)
# For some, it's called "Trash", for others, it's called "Bin". Support both.
trash = @gmail.labels.exist?('[Gmail]/Bin') ? '[Gmail]/Bin' : '[Gmail]/Trash'
move_to(trash) unless %w[[Gmail]/Spam [Gmail]/Bin [Gmail]/Trash].include?(@mailbox.name)
end
# Archive this message.
def archive!
move_to('[Gmail]/All Mail')
end
# Move to given box and delete from others.
def move_to(name, from=nil)
label(name, from)
delete! if !%w[[Gmail]/Bin [Gmail]/Trash].include?(name)
end
alias :move :move_to
# Move message to given and delete from others. When given mailbox doesn't
# exist then it will be automaticaly created.
def move_to!(name, from=nil)
label!(name, from) && delete!
end
alias :move! :move_to!
# Mark this message with given label. When given label doesn't exist then
# it will raise <tt>NoLabelError</tt>.
#
# See also <tt>Gmail::Message#label!</tt>.
def label(name, from=nil)
@gmail.mailbox(Net::IMAP.encode_utf7(from || @mailbox.external_name)) { @gmail.conn.uid_copy(uid, Net::IMAP.encode_utf7(name)) }
rescue Net::IMAP::NoResponseError
raise NoLabelError, "Label '#{name}' doesn't exist!"
end
# Mark this message with given label. When given label doesn't exist then
# it will be automaticaly created.
#
# See also <tt>Gmail::Message#label</tt>.
def label!(name, from=nil)
label(name, from)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7(name))
label(name, from)
end
alias :add_label :label!
alias :add_label! :label!
# Remove given label from this message.
def remove_label!(name)
move_to('[Gmail]/All Mail', name)
end
alias :delete_label! :remove_label!
def inspect
"#<Gmail::Message#{'0x%04x' % (object_id << 1)} mailbox=#{@mailbox.external_name}#{' uid='+@uid.to_s if @uid}#{' message_id='+@message_id.to_s if @message_id}>"
end
def method_missing(meth, *args, &block)
# Delegate rest directly to the message.
if envelope.respond_to?(meth)
envelope.send(meth, *args, &block)
elsif message.respond_to?(meth)
message.send(meth, *args, &block)
else
super(meth, *args, &block)
end
end
def respond_to?(meth, *args, &block)
if envelope.respond_to?(meth)
return true
elsif message.respond_to?(meth)
return true
else
super(meth, *args, &block)
end
end
def envelope
@envelope ||= @gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, "ENVELOPE")[0].attr["ENVELOPE"]
}
end
def message
@message ||= Mail.new(@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, "RFC822")[0].attr["RFC822"] # RFC822
})
end
alias_method :raw_message, :message
end # Message
end # Gmail
removing faulty require
module Gmail
class Message
# Raised when given label doesn't exists.
class NoLabelError < Exception; end
attr_reader :uid
def initialize(mailbox, uid)
@uid = uid
@mailbox = mailbox
@gmail = mailbox.instance_variable_get("@gmail") if mailbox
end
def labels
@gmail.conn.uid_fetch(uid, "X-GM-LABELS")[0].attr["X-GM-LABELS"]
end
def uid
@uid ||= @gmail.conn.uid_search(['HEADER', 'Message-ID', message_id])[0]
end
# Mark message with given flag.
def flag(name)
!!@gmail.mailbox(@mailbox.name) { @gmail.conn.uid_store(uid, "+FLAGS", [name]) }
end
# Unmark message.
def unflag(name)
!!@gmail.mailbox(@mailbox.name) { @gmail.conn.uid_store(uid, "-FLAGS", [name]) }
end
# Do commonly used operations on message.
def mark(flag)
case flag
when :read then read!
when :unread then unread!
when :deleted then delete!
when :spam then spam!
else
flag(flag)
end
end
# Mark this message as a spam.
def spam!
move_to('[Gmail]/Spam')
end
# Mark as read.
def read!
flag(:Seen)
end
# Mark as unread.
def unread!
unflag(:Seen)
end
# Mark message with star.
def star!
flag('[Gmail]/Starred')
end
# Remove message from list of starred.
def unstar!
unflag('[Gmail]/Starred')
end
# Move to trash / bin.
def delete!
@mailbox.messages.delete(uid)
flag(:deleted)
# For some, it's called "Trash", for others, it's called "Bin". Support both.
trash = @gmail.labels.exist?('[Gmail]/Bin') ? '[Gmail]/Bin' : '[Gmail]/Trash'
move_to(trash) unless %w[[Gmail]/Spam [Gmail]/Bin [Gmail]/Trash].include?(@mailbox.name)
end
# Archive this message.
def archive!
move_to('[Gmail]/All Mail')
end
# Move to given box and delete from others.
def move_to(name, from=nil)
label(name, from)
delete! if !%w[[Gmail]/Bin [Gmail]/Trash].include?(name)
end
alias :move :move_to
# Move message to given and delete from others. When given mailbox doesn't
# exist then it will be automaticaly created.
def move_to!(name, from=nil)
label!(name, from) && delete!
end
alias :move! :move_to!
# Mark this message with given label. When given label doesn't exist then
# it will raise <tt>NoLabelError</tt>.
#
# See also <tt>Gmail::Message#label!</tt>.
def label(name, from=nil)
@gmail.mailbox(Net::IMAP.encode_utf7(from || @mailbox.external_name)) { @gmail.conn.uid_copy(uid, Net::IMAP.encode_utf7(name)) }
rescue Net::IMAP::NoResponseError
raise NoLabelError, "Label '#{name}' doesn't exist!"
end
# Mark this message with given label. When given label doesn't exist then
# it will be automaticaly created.
#
# See also <tt>Gmail::Message#label</tt>.
def label!(name, from=nil)
label(name, from)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7(name))
label(name, from)
end
alias :add_label :label!
alias :add_label! :label!
# Remove given label from this message.
def remove_label!(name)
move_to('[Gmail]/All Mail', name)
end
alias :delete_label! :remove_label!
def inspect
"#<Gmail::Message#{'0x%04x' % (object_id << 1)} mailbox=#{@mailbox.external_name}#{' uid='+@uid.to_s if @uid}#{' message_id='+@message_id.to_s if @message_id}>"
end
def method_missing(meth, *args, &block)
# Delegate rest directly to the message.
if envelope.respond_to?(meth)
envelope.send(meth, *args, &block)
elsif message.respond_to?(meth)
message.send(meth, *args, &block)
else
super(meth, *args, &block)
end
end
def respond_to?(meth, *args, &block)
if envelope.respond_to?(meth)
return true
elsif message.respond_to?(meth)
return true
else
super(meth, *args, &block)
end
end
def envelope
@envelope ||= @gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, "ENVELOPE")[0].attr["ENVELOPE"]
}
end
def message
@message ||= Mail.new(@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, "RFC822")[0].attr["RFC822"] # RFC822
})
end
alias_method :raw_message, :message
end # Message
end # Gmail
|
require 'socket'
module Progenitor
class UdpBroadcaster
def initialize(port)
@port = port
@socket = UDPSocket.new
@socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_BROADCAST, true)
end
def go
network_interfaces.each do |network|
ip = network.ip_address
broadcast_ip = to_broadcast( ip )
message_to_broadcast = "[Maestro@#{ip}:#{@port}]"
send_message_to( message_to_broadcast, broadcast_ip, @port )
end
end
private
def send_message_to( message, ip, port )
@socket.send(message, 0, ip, port)
rescue Exception => e
end
def network_interfaces
Socket.ip_address_list.select{|interface| interface.ipv4_private?}
end
def to_broadcast( ip )
ip.gsub(/^(\d+\.\d+\.\d+\.)\d+$/, '\1255')
end
end
end
Increasing broadcast mask.
require 'socket'
module Progenitor
class UdpBroadcaster
def initialize(port)
@port = port
@socket = UDPSocket.new
@socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_BROADCAST, true)
end
def go
network_interfaces.each do |network|
ip = network.ip_address
broadcast_ip = to_broadcast( ip )
message_to_broadcast = "[Maestro@#{ip}:#{@port}]"
send_message_to( message_to_broadcast, broadcast_ip, @port )
end
end
private
def send_message_to( message, ip, port )
@socket.send(message, 0, ip, port)
rescue Exception => e
end
def network_interfaces
Socket.ip_address_list.select{|interface| interface.ipv4_private?}
end
def to_broadcast( ip )
ip.gsub(/^(\d+\.\d+\.)\d+\.\d+$/, '\1255.255')
end
end
end
|
module Gmail
class Message
PREFETCH_ATTRS = ["UID", "ENVELOPE", "BODY.PEEK[]", "FLAGS", "X-GM-LABELS", "X-GM-MSGID"]
# Raised when given label doesn't exists.
class NoLabelError < Exception; end
attr_reader :uid, :envelope, :message, :flags, :labels
def initialize(mailbox, uid, _attrs = nil)
@uid = uid
@mailbox = mailbox
@gmail = mailbox.instance_variable_get("@gmail") if mailbox
@_attrs = _attrs
end
def uid
@uid ||= fetch("UID")
end
def msg_id
@msg_id ||= fetch("X-GM-MSGID")
end
def envelope
@envelope ||= fetch("ENVELOPE")
end
def message
@message ||= Mail.new(fetch("BODY[]"))
end
alias_method :raw_message, :message
def flags
@flags ||= fetch("FLAGS")
end
def labels
@labels ||= fetch("X-GM-LABELS")
end
# Mark message with given flag.
def flag(name)
!!@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_store(uid, "+FLAGS", [name])
}
end
# Unmark message.
def unflag(name)
!!@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_store(uid, "-FLAGS", [name])
}
end
# Do commonly used operations on message.
def mark(flag)
case flag
when :read then read!
when :unread then unread!
when :deleted then delete!
when :spam then spam!
else
flag(flag)
end
end
# Mark this message as a spam.
def spam!
move_to('[Gmail]/Spam')
end
# Check whether message is read
def read?
flags.include?(:Seen)
end
# Mark as read.
def read!
flag(:Seen)
end
# Mark as unread.
def unread!
unflag(:Seen)
end
# Check whether message is starred
def starred?
flags.include?(:Flagged)
end
# Mark message with star.
def star!
flag('[Gmail]/Starred')
end
# Remove message from list of starred.
def unstar!
unflag('[Gmail]/Starred')
end
# Move to trash / bin.
def delete!
@mailbox.messages.delete(uid)
flag(:deleted)
# For some, it's called "Trash", for others, it's called "Bin". Support both.
trash = @gmail.labels.exist?('[Gmail]/Bin') ? '[Gmail]/Bin' : '[Gmail]/Trash'
move_to(trash) unless %w[[Gmail]/Spam [Gmail]/Bin [Gmail]/Trash].include?(@mailbox.name)
end
# Archive this message.
def archive!
move_to('[Gmail]/All Mail')
end
# Move to given box and delete from others.
def move_to(name, from=nil)
label(name, from)
delete! if !%w[[Gmail]/Bin [Gmail]/Trash].include?(name)
end
alias :move :move_to
# Move message to given and delete from others. When given mailbox doesn't
# exist then it will be automaticaly created.
def move_to!(name, from=nil)
label!(name, from) && delete!
end
alias :move! :move_to!
# Mark this message with given label. When given label doesn't exist then
# it will raise <tt>NoLabelError</tt>.
#
# See also <tt>Gmail::Message#label!</tt>.
def label(name, from=nil)
@gmail.mailbox(Net::IMAP.encode_utf7(from || @mailbox.external_name)) {
@gmail.conn.uid_copy(uid, Net::IMAP.encode_utf7(name))
}
rescue Net::IMAP::NoResponseError
raise NoLabelError, "Label '#{name}' doesn't exist!"
end
# Mark this message with given label. When given label doesn't exist then
# it will be automaticaly created.
#
# See also <tt>Gmail::Message#label</tt>.
def label!(name, from=nil)
label(name, from)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7(name))
label(name, from)
end
alias :add_label :label!
alias :add_label! :label!
# Remove given label from this message.
def remove_label!(name)
move_to('[Gmail]/All Mail', name)
rescue NoLabelError
move_to('[Google Mail]/All Mail', name)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7('[Gmail]/All Mail'))
label('[Gmail]/All Mail')
end
end
end
alias :delete_label! :remove_label!
def inspect
"#<Gmail::Message#{'0x%04x' % (object_id << 1)} mailbox=#{@mailbox.external_name}#{' uid='+@uid.to_s if @uid}#{' message_id='+@message_id.to_s if @message_id}>"
end
def method_missing(meth, *args, &block)
# Delegate rest directly to the message.
if envelope.respond_to?(meth)
envelope.send(meth, *args, &block)
elsif message.respond_to?(meth)
message.send(meth, *args, &block)
else
super(meth, *args, &block)
end
end
def respond_to?(meth, *args, &block)
if envelope.respond_to?(meth)
return true
elsif message.respond_to?(meth)
return true
else
super(meth, *args, &block)
end
end
private
def fetch(value)
@_attrs ||= begin
@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, PREFETCH_ATTRS)[0]
}
end
@_attrs.attr[value]
end
end # Message
end # Gmail
fix remove_label exceptions
module Gmail
class Message
PREFETCH_ATTRS = ["UID", "ENVELOPE", "BODY.PEEK[]", "FLAGS", "X-GM-LABELS", "X-GM-MSGID"]
# Raised when given label doesn't exists.
class NoLabelError < Exception; end
attr_reader :uid, :envelope, :message, :flags, :labels
def initialize(mailbox, uid, _attrs = nil)
@uid = uid
@mailbox = mailbox
@gmail = mailbox.instance_variable_get("@gmail") if mailbox
@_attrs = _attrs
end
def uid
@uid ||= fetch("UID")
end
def msg_id
@msg_id ||= fetch("X-GM-MSGID")
end
def envelope
@envelope ||= fetch("ENVELOPE")
end
def message
@message ||= Mail.new(fetch("BODY[]"))
end
alias_method :raw_message, :message
def flags
@flags ||= fetch("FLAGS")
end
def labels
@labels ||= fetch("X-GM-LABELS")
end
# Mark message with given flag.
def flag(name)
!!@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_store(uid, "+FLAGS", [name])
}
end
# Unmark message.
def unflag(name)
!!@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_store(uid, "-FLAGS", [name])
}
end
# Do commonly used operations on message.
def mark(flag)
case flag
when :read then read!
when :unread then unread!
when :deleted then delete!
when :spam then spam!
else
flag(flag)
end
end
# Mark this message as a spam.
def spam!
move_to('[Gmail]/Spam')
end
# Check whether message is read
def read?
flags.include?(:Seen)
end
# Mark as read.
def read!
flag(:Seen)
end
# Mark as unread.
def unread!
unflag(:Seen)
end
# Check whether message is starred
def starred?
flags.include?(:Flagged)
end
# Mark message with star.
def star!
flag('[Gmail]/Starred')
end
# Remove message from list of starred.
def unstar!
unflag('[Gmail]/Starred')
end
# Move to trash / bin.
def delete!
@mailbox.messages.delete(uid)
flag(:deleted)
# For some, it's called "Trash", for others, it's called "Bin". Support both.
trash = @gmail.labels.exist?('[Gmail]/Bin') ? '[Gmail]/Bin' : '[Gmail]/Trash'
move_to(trash) unless %w[[Gmail]/Spam [Gmail]/Bin [Gmail]/Trash].include?(@mailbox.name)
end
# Archive this message.
def archive!
move_to('[Gmail]/All Mail')
end
# Move to given box and delete from others.
def move_to(name, from=nil)
label(name, from)
delete! if !%w[[Gmail]/Bin [Gmail]/Trash].include?(name)
end
alias :move :move_to
# Move message to given and delete from others. When given mailbox doesn't
# exist then it will be automaticaly created.
def move_to!(name, from=nil)
label!(name, from) && delete!
end
alias :move! :move_to!
# Mark this message with given label. When given label doesn't exist then
# it will raise <tt>NoLabelError</tt>.
#
# See also <tt>Gmail::Message#label!</tt>.
def label(name, from=nil)
@gmail.mailbox(Net::IMAP.encode_utf7(from || @mailbox.external_name)) {
@gmail.conn.uid_copy(uid, Net::IMAP.encode_utf7(name))
}
rescue Net::IMAP::NoResponseError
raise NoLabelError, "Label '#{name}' doesn't exist!"
end
# Mark this message with given label. When given label doesn't exist then
# it will be automaticaly created.
#
# See also <tt>Gmail::Message#label</tt>.
def label!(name, from=nil)
label(name, from)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7(name))
label(name, from)
end
alias :add_label :label!
alias :add_label! :label!
# Remove given label from this message.
def remove_label!(name)
begin
move_to('[Gmail]/All Mail', name)
rescue NoLabelError
begin
move_to('[Google Mail]/All Mail', name)
rescue NoLabelError
@gmail.labels.add(Net::IMAP.encode_utf7('[Gmail]/All Mail'))
label('[Gmail]/All Mail')
end
end
end
alias :delete_label! :remove_label!
def inspect
"#<Gmail::Message#{'0x%04x' % (object_id << 1)} mailbox=#{@mailbox.external_name}#{' uid='+@uid.to_s if @uid}#{' message_id='+@message_id.to_s if @message_id}>"
end
def method_missing(meth, *args, &block)
# Delegate rest directly to the message.
if envelope.respond_to?(meth)
envelope.send(meth, *args, &block)
elsif message.respond_to?(meth)
message.send(meth, *args, &block)
else
super(meth, *args, &block)
end
end
def respond_to?(meth, *args, &block)
if envelope.respond_to?(meth)
return true
elsif message.respond_to?(meth)
return true
else
super(meth, *args, &block)
end
end
private
def fetch(value)
@_attrs ||= begin
@gmail.mailbox(@mailbox.name) {
@gmail.conn.uid_fetch(uid, PREFETCH_ATTRS)[0]
}
end
@_attrs.attr[value]
end
end # Message
end # Gmail
|
module Gnucash
class Value
include Comparable
attr_accessor :val
def initialize(val)
if val.is_a?(String)
if val =~ /^(-?\d+)\/100$/
@val = $1.to_i
else
raise "Unexpected value string: #{val.inspect}"
end
elsif val.is_a?(Fixnum)
@val = val
else
raise "Unexpected value type: #{val.class}"
end
end
def +(other)
Value.new(@val + other.val)
end
def -(other)
Value.new(@val - other.val)
end
def to_s
sprintf("%.02f", @val / 100.0)
end
def <=>(other)
@val <=> other.val
end
end
end
Value: keep track of divisor
module Gnucash
class Value
include Comparable
attr_accessor :val
def initialize(val, div = 100)
if val.is_a?(String)
if val =~ /^(-?\d+)\/(\d+)$/
@val = $1.to_i
@div = $2.to_i
else
raise "Unexpected value string: #{val.inspect}"
end
elsif val.is_a?(Fixnum)
@val = val
@div = div
else
raise "Unexpected value type: #{val.class}"
end
end
def +(other)
Value.new(@val + other.val)
end
def -(other)
Value.new(@val - other.val)
end
def to_s
sprintf("%.02f", to_f)
end
def to_f
@val / @div.to_f
end
def <=>(other)
@val <=> other.val
end
end
end
|
require 'net/http'
require 'json'
class GoogleClient
# NOTE: this is currently unused because data recorded by Google Fit
# (instead of 3rd-party apps) is not saved in sessions.
# def self.fit_sessions(refresh_token)
# auth_token = fetch_new_auth_token(refresh_token)
# return { error: 'internal error' } if auth_token.nil?
# uri = URI('https://www.googleapis.com/fitness/v1/users/me/sessions')
# http = Net::HTTP.new(uri.host, uri.port)
# http.use_ssl = true
# http.verify_mode = OpenSSL::SSL::VERIFY_PEER
# req = Net::HTTP::Get.new(uri.request_uri)
# req.content_type = 'application/json;encoding=utf-8'
# req['Authorization'] = 'Bearer ' + auth_token
# response = http.request(req)
# JSON.parse(response.body)
# end
def self.fit_segments(params)
end_time = (Time.now.beginning_of_day.to_r * 1000).round
if params[:start_time] > end_time
begin
raise "Start time (#{params[:start_time]}) is after the end time(#{end_time})."
rescue StandardError => e
puts e
return false
end
end
auth_token = fetch_new_auth_token(params[:refresh_token])
if auth_token.nil?
begin
raise 'Auth token fetch failed. Refresh token has likely expired.'
rescue StandardError => e
puts e
return false
end
end
uri = URI('https://www.googleapis.com/fitness/v1/users/me/dataset:aggregate')
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
req_body = {
aggregateBy: [
{
dataSourceId: "derived:com.google.activity.segment:com.google.android.gms:merge_activity_segments"
}
],
startTimeMillis: params[:start_time],
endTimeMillis: end_time,
bucketByActivitySegment: {
minDurationMillis: 300000 # will only return activities 5+ minutes long
}
}
req = Net::HTTP::Post.new(uri.request_uri)
req.content_type = 'application/json;encoding=utf-8'
req['Authorization'] = 'Bearer ' + auth_token
req.body = req_body.to_json
response = http.request(req)
if response.code == '200'
return format_segments(JSON.parse(response.body))
else
begin
raise "Google Fit API error: #{response.body}"
rescue StandardError => e
puts e
return false
end
end
end
# OPTIMIZE: could cache the auth token
def self.fetch_new_auth_token(refresh_token)
uri = URI('https://www.googleapis.com/oauth2/v3/token')
params = {
refresh_token: refresh_token,
client_id: ENV['GOOGLE_OAUTH_CLIENT_ID'],
client_secret: ENV['GOOGLE_CLIENT_SECRET'],
grant_type: 'refresh_token'
}
uri.query = URI.encode_www_form(params)
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
req = Net::HTTP::Post.new(uri.request_uri)
req.content_type = 'application/json;encoding=utf-8'
response = http.request(req)
response = JSON.parse(response.body)
response['access_token']
end
def self.format_segments(data)
activity_type_map = {}
activities = []
if data['bucket']
data['bucket'].each do |record|
unless [0, 3, 4, 72, 109, 110, 111].include?(record['activity']) # OPTIMIZE: the 'exclude' list should be defined elsewhere
activity = {}
activity[:start_time] = record['startTimeMillis'].to_i
activity[:end_time] = record['endTimeMillis'].to_i
activity[:data_source] = record['dataset'][0]['point'][0]['originDataSourceId'][36..-1] # trims 'derived:com.google.activity.segment:'
activity[:activity_type_id] = activity_type_map.fetch(record['activity'].to_i) { |google_id|
activity_type_map[google_id] = ActivityType.find_by(googleID: google_id).id
}
activities << activity
end
end
end
return activities
end
end
Make a tweak to a comment.
require 'net/http'
require 'json'
class GoogleClient
# # NOTE: this is currently unused because data recorded by Google Fit
# # (instead of 3rd-party apps) is not saved in sessions.
# def self.fit_sessions(refresh_token)
# auth_token = fetch_new_auth_token(refresh_token)
# return { error: 'internal error' } if auth_token.nil?
# uri = URI('https://www.googleapis.com/fitness/v1/users/me/sessions')
# http = Net::HTTP.new(uri.host, uri.port)
# http.use_ssl = true
# http.verify_mode = OpenSSL::SSL::VERIFY_PEER
# req = Net::HTTP::Get.new(uri.request_uri)
# req.content_type = 'application/json;encoding=utf-8'
# req['Authorization'] = 'Bearer ' + auth_token
# response = http.request(req)
# JSON.parse(response.body)
# end
def self.fit_segments(params)
end_time = (Time.now.beginning_of_day.to_r * 1000).round
if params[:start_time] > end_time
begin
raise "Start time (#{params[:start_time]}) is after the end time(#{end_time})."
rescue StandardError => e
puts e
return false
end
end
auth_token = fetch_new_auth_token(params[:refresh_token])
if auth_token.nil?
begin
raise 'Auth token fetch failed. Refresh token has likely expired.'
rescue StandardError => e
puts e
return false
end
end
uri = URI('https://www.googleapis.com/fitness/v1/users/me/dataset:aggregate')
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
req_body = {
aggregateBy: [
{
dataSourceId: "derived:com.google.activity.segment:com.google.android.gms:merge_activity_segments"
}
],
startTimeMillis: params[:start_time],
endTimeMillis: end_time,
bucketByActivitySegment: {
minDurationMillis: 300000 # will only return activities 5+ minutes long
}
}
req = Net::HTTP::Post.new(uri.request_uri)
req.content_type = 'application/json;encoding=utf-8'
req['Authorization'] = 'Bearer ' + auth_token
req.body = req_body.to_json
response = http.request(req)
if response.code == '200'
return format_segments(JSON.parse(response.body))
else
begin
raise "Google Fit API error: #{response.body}"
rescue StandardError => e
puts e
return false
end
end
end
# OPTIMIZE: could cache the auth token
def self.fetch_new_auth_token(refresh_token)
uri = URI('https://www.googleapis.com/oauth2/v3/token')
params = {
refresh_token: refresh_token,
client_id: ENV['GOOGLE_OAUTH_CLIENT_ID'],
client_secret: ENV['GOOGLE_CLIENT_SECRET'],
grant_type: 'refresh_token'
}
uri.query = URI.encode_www_form(params)
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
req = Net::HTTP::Post.new(uri.request_uri)
req.content_type = 'application/json;encoding=utf-8'
response = http.request(req)
response = JSON.parse(response.body)
response['access_token']
end
def self.format_segments(data)
activity_type_map = {}
activities = []
if data['bucket']
data['bucket'].each do |record|
unless [0, 3, 4, 72, 109, 110, 111].include?(record['activity']) # OPTIMIZE: the 'exclude' list should be defined elsewhere
activity = {}
activity[:start_time] = record['startTimeMillis'].to_i
activity[:end_time] = record['endTimeMillis'].to_i
activity[:data_source] = record['dataset'][0]['point'][0]['originDataSourceId'][36..-1] # trims 'derived:com.google.activity.segment:'
activity[:activity_type_id] = activity_type_map.fetch(record['activity'].to_i) { |google_id|
activity_type_map[google_id] = ActivityType.find_by(googleID: google_id).id
}
activities << activity
end
end
end
return activities
end
end
|
require 'kramdown'
module Grape
class Entity
class << self
attr_accessor :class_name
def class_name(name = nil)
@class_name = name if name
@class_name
end
end
end
class API
class << self
attr_reader :combined_routes
alias original_mount mount
def mount(mounts)
original_mount mounts
@combined_routes ||= {}
mounts::routes.each do |route|
resource = route.route_path.match('\/(\w*?)[\.\/\(]').captures.first || '/'
@combined_routes[resource.downcase] ||= []
@combined_routes[resource.downcase] << route
end
end
def add_swagger_documentation(options={})
documentation_class = create_documentation_class
documentation_class.setup({:target_class => self}.merge(options))
mount(documentation_class)
end
private
def create_documentation_class
Class.new(Grape::API) do
class << self
def name
@@class_name
end
end
def self.setup(options)
defaults = {
:target_class => nil,
:mount_path => '/swagger_doc',
:base_path => nil,
:api_version => '0.1',
:markdown => false,
:hide_documentation_path => false
}
options = defaults.merge(options)
@@target_class = options[:target_class]
@@mount_path = options[:mount_path]
@@class_name = options[:class_name] || options[:mount_path].gsub('/','')
@@markdown = options[:markdown]
@@hide_documentation_path = options[:hide_documentation_path]
api_version = options[:api_version]
base_path = options[:base_path]
desc 'Swagger compatible API description'
get @@mount_path do
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Request-Method'] = '*'
routes = @@target_class::combined_routes
if @@hide_documentation_path
routes.reject!{ |route, value| "/#{route}/".index(parse_path(@@mount_path, nil) << '/') == 0 }
end
routes_array = routes.keys.map do |local_route|
{ :path => "#{parse_path(route.route_path.gsub('(.:format)', ''),route.route_version)}/#{local_route}.{format}" }
end
{
apiVersion: api_version,
swaggerVersion: "1.1",
basePath: base_path || request.base_url,
operations:[],
apis: routes_array
}
end
desc 'Swagger compatible API description for specific API', :params =>
{
"name" => { :desc => "Resource name of mounted API", :type => "string", :required => true },
}
get "#{@@mount_path}/:name" do
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Request-Method'] = '*'
routes = @@target_class::combined_routes[params[:name]]
routes_array = routes.map do |route|
notes = route.route_notes && @@markdown ? Kramdown::Document.new(route.route_notes.strip_heredoc).to_html : route.route_notes
if entity = route.route_entity
if entity.is_a?(Array)
entity = entity.first
response_class = "List[#{entity.class_name}]"
else
response_class = entity.class_name
end
end
route_hash = {
:path => parse_path(route.route_path, api_version),
:operations => [{
:notes => notes,
:summary => route.route_description || '',
:nickname => route.route_method + route.route_path.gsub(/[\/:\(\)\.]/,'-'),
:httpMethod => route.route_method,
:parameters => parse_header_params(route.route_headers) +
parse_params(route.route_params, route.route_path, route.route_method)
}]
}
route_hash[:operations].first[:responseClass] = response_class if response_class
#route_hash[:operations].first[:responseClass] = 'Pet'
route_hash
end
routes_entities = {}
entities = []
routes.each do |route|
entity = route.route_entity
entities << entity if entity
entities += route.route_entities if route.route_entities
end
while entities.any? do
entity = entities.pop
entity = entity.first if entity.is_a?(Array)
class_name = entity.class_name
properties = {}
exposures = entity.exposures
entity.documentation.each_pair do |field, documentation|
field = exposures[field][:as] if exposures[field][:as]
properties[field] = {
'description' => documentation[:desc]
}
if documentation[:type].is_a?(Array)
properties[field]['type'] = 'Array'
refclass = documentation[:type].first
refclass_name = refclass.class_name
properties[field]['items'] = { '$ref' => refclass_name }
unless routes_entities[refclass_name]
entities << refclass
end
else
properties[field]['type'] = documentation[:type]
end
end
routes_entities[class_name] = {
'id' => class_name,
'properties' => properties
}
end
{
apiVersion: api_version,
swaggerVersion: "1.1",
basePath: base_path || request.base_url,
resourcePath: "",
apis: routes_array,
models: routes_entities
}
end
end
helpers do
def parse_params(params, path, method)
if params
params.map do |param, value|
value[:type] = 'file' if value.is_a?(Hash) && value[:type] == 'Rack::Multipart::UploadedFile'
dataType = value.is_a?(Hash) ? value[:type]||'String' : 'String'
description = value.is_a?(Hash) ? value[:desc] : ''
required = value.is_a?(Hash) ? !!value[:required] : false
paramType = path.match(":#{param}") ? 'path' : (method == 'POST') ? 'body' : 'query'
name = (value.is_a?(Hash) && value[:full_name]) || param
{
paramType: paramType,
name: name,
description: description,
dataType: dataType,
required: required
}
end
else
[]
end
end
def parse_header_params(params)
if params
params.map do |param, value|
dataType = 'String'
description = value.is_a?(Hash) ? value[:description] : ''
required = value.is_a?(Hash) ? !!value[:required] : false
paramType = "header"
{
paramType: paramType,
name: param,
description: description,
dataType: dataType,
required: required
}
end
else
[]
end
end
def parse_path(path, version)
# adapt format to swagger format
parsed_path = path.gsub('(.:format)', '.{format}')
# This is attempting to emulate the behavior of
# Rack::Mount::Strexp. We cannot use Strexp directly because
# all it does is generate regular expressions for parsing URLs.
# TODO: Implement a Racc tokenizer to properly generate the
# parsed path.
parsed_path = parsed_path.gsub(/:([a-zA-Z_]\w*)/, '{\1}')
# add the version
parsed_path = parsed_path.gsub('{version}', version) if version
parsed_path
end
end
end
end
end
end
end
class Object
##
# @person ? @person.name : nil
# vs
# @person.try(:name)
#
# File activesupport/lib/active_support/core_ext/object/try.rb#L32
def try(*a, &b)
if a.empty? && block_given?
yield self
else
__send__(*a, &b)
end
end
end
class String
# strip_heredoc from rails
# File activesupport/lib/active_support/core_ext/string/strip.rb, line 22
def strip_heredoc
indent = scan(/^[ \t]*(?=\S)/).min.try(:size) || 0
gsub(/^[ \t]{#{indent}}/, '')
end
end
hack to filter paths in a different way
require 'kramdown'
module Grape
class Entity
class << self
attr_accessor :class_name
def class_name(name = nil)
@class_name = name if name
@class_name
end
end
end
class API
class << self
attr_reader :mounts
alias original_mount mount
def mount(mounts)
original_mount mounts
(@mounts ||= []) << mounts
end
def add_swagger_documentation(options={})
documentation_class = create_documentation_class
documentation_class.setup({:target_class => self}.merge(options))
mount(documentation_class)
end
private
def create_documentation_class
Class.new(Grape::API) do
class << self
def name
@@class_name
end
end
def self.setup(options)
defaults = {
:target_class => nil,
:mount_path => '/swagger_doc',
:base_path => nil,
:api_version => '0.1',
:markdown => false,
:hide_documentation_path => false
}
options = defaults.merge(options)
@@target_class = options[:target_class]
@@mount_path = options[:mount_path]
@@class_name = options[:class_name] || options[:mount_path].gsub('/','')
@@markdown = options[:markdown]
@@hide_documentation_path = options[:hide_documentation_path]
api_version = options[:api_version]
base_path = options[:base_path]
@@combined_routes ||= {}
@@target_class.mounts.each do |mount|
mount::routes.each do |route|
if options[:prefix_hack]
next unless mount.prefix
resource = mount.prefix.split('/').last
else
resource = route.route_path.match('\/(\w*?)[\.\/\(]').captures.first || '/'
end
@@combined_routes[resource.downcase] ||= []
@@combined_routes[resource.downcase] << route
end
end
desc 'Swagger compatible API description'
get @@mount_path do
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Request-Method'] = '*'
routes = @@combined_routes
if @@hide_documentation_path
routes.reject!{ |route, value| "/#{route}/".index(parse_path(@@mount_path, nil) << '/') == 0 }
end
routes_array = routes.keys.map do |local_route|
{ :path => "#{parse_path(route.route_path.gsub('(.:format)', ''),route.route_version)}/#{local_route}.{format}" }
end
{
apiVersion: api_version,
swaggerVersion: "1.1",
basePath: base_path || request.base_url,
operations:[],
apis: routes_array
}
end
desc 'Swagger compatible API description for specific API', :params =>
{
"name" => { :desc => "Resource name of mounted API", :type => "string", :required => true },
}
get "#{@@mount_path}/:name" do
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Request-Method'] = '*'
routes = @@combined_routes[params[:name]]
routes_array = routes.map do |route|
notes = route.route_notes && @@markdown ? Kramdown::Document.new(route.route_notes.strip_heredoc).to_html : route.route_notes
if entity = route.route_entity
if entity.is_a?(Array)
entity = entity.first
response_class = "List[#{entity.class_name}]"
else
response_class = entity.class_name
end
end
route_hash = {
:path => parse_path(route.route_path, api_version),
:operations => [{
:notes => notes,
:summary => route.route_description || '',
:nickname => route.route_method + route.route_path.gsub(/[\/:\(\)\.]/,'-'),
:httpMethod => route.route_method,
:parameters => parse_header_params(route.route_headers) +
parse_params(route.route_params, route.route_path, route.route_method)
}]
}
route_hash[:operations].first[:responseClass] = response_class if response_class
#route_hash[:operations].first[:responseClass] = 'Pet'
route_hash
end
routes_entities = {}
entities = []
routes.each do |route|
entity = route.route_entity
entities << entity if entity
entities += route.route_entities if route.route_entities
end
while entities.any? do
entity = entities.pop
entity = entity.first if entity.is_a?(Array)
class_name = entity.class_name
properties = {}
exposures = entity.exposures
entity.documentation.each_pair do |field, documentation|
field = exposures[field][:as] if exposures[field][:as]
properties[field] = {
'description' => documentation[:desc]
}
if documentation[:type].is_a?(Array)
properties[field]['type'] = 'Array'
refclass = documentation[:type].first
refclass_name = refclass.class_name
properties[field]['items'] = { '$ref' => refclass_name }
unless routes_entities[refclass_name]
entities << refclass
end
else
properties[field]['type'] = documentation[:type]
end
end
routes_entities[class_name] = {
'id' => class_name,
'properties' => properties
}
end
{
apiVersion: api_version,
swaggerVersion: "1.1",
basePath: base_path || request.base_url,
resourcePath: "",
apis: routes_array,
models: routes_entities
}
end
end
helpers do
def parse_params(params, path, method)
if params
params.map do |param, value|
value[:type] = 'file' if value.is_a?(Hash) && value[:type] == 'Rack::Multipart::UploadedFile'
dataType = value.is_a?(Hash) ? value[:type]||'String' : 'String'
description = value.is_a?(Hash) ? value[:desc] : ''
required = value.is_a?(Hash) ? !!value[:required] : false
paramType = path.match(":#{param}") ? 'path' : (method == 'POST') ? 'body' : 'query'
name = (value.is_a?(Hash) && value[:full_name]) || param
{
paramType: paramType,
name: name,
description: description,
dataType: dataType,
required: required
}
end
else
[]
end
end
def parse_header_params(params)
if params
params.map do |param, value|
dataType = 'String'
description = value.is_a?(Hash) ? value[:description] : ''
required = value.is_a?(Hash) ? !!value[:required] : false
paramType = "header"
{
paramType: paramType,
name: param,
description: description,
dataType: dataType,
required: required
}
end
else
[]
end
end
def parse_path(path, version)
# adapt format to swagger format
parsed_path = path.gsub('(.:format)', '.{format}')
# This is attempting to emulate the behavior of
# Rack::Mount::Strexp. We cannot use Strexp directly because
# all it does is generate regular expressions for parsing URLs.
# TODO: Implement a Racc tokenizer to properly generate the
# parsed path.
parsed_path = parsed_path.gsub(/:([a-zA-Z_]\w*)/, '{\1}')
# add the version
parsed_path = parsed_path.gsub('{version}', version) if version
parsed_path
end
end
end
end
end
end
end
class Object
##
# @person ? @person.name : nil
# vs
# @person.try(:name)
#
# File activesupport/lib/active_support/core_ext/object/try.rb#L32
def try(*a, &b)
if a.empty? && block_given?
yield self
else
__send__(*a, &b)
end
end
end
class String
# strip_heredoc from rails
# File activesupport/lib/active_support/core_ext/string/strip.rb, line 22
def strip_heredoc
indent = scan(/^[ \t]*(?=\S)/).min.try(:size) || 0
gsub(/^[ \t]{#{indent}}/, '')
end
end
|
module PulseMeter
module Sensor
class Counter < Base
def cleanup
redis.del(value_key)
super
end
def incr
event(1)
end
def event(value)
redis.incrby(value_key, value.to_i)
end
def value
val = redis.get(value_key)
val.nil? ? 0 : val.to_i
end
def value_key
@value_key ||= "#{name}:value"
end
end
end
end
val.nil? ? 0 : val.to_i is just the same as val.to_i
module PulseMeter
module Sensor
class Counter < Base
def cleanup
redis.del(value_key)
super
end
def incr
event(1)
end
def event(value)
redis.incrby(value_key, value.to_i)
end
def value
redis.get(value_key).to_i
end
def value_key
@value_key ||= "#{name}:value"
end
end
end
end
|
module Grit
class Blob
def id
@repo.git.rev_parse({}, @id)
end
end
end
Add create_tempfile method for Grit::Blob
module Grit
class Blob
def id
@repo.git.rev_parse({}, @id)
end
def create_tempfile
file = Tempfile.new(id)
file.write(data)
file.close
file
end
end
end
|
class QBWC::ActiveRecord::Session < QBWC::Session
class QbwcSession < ActiveRecord::Base
attr_accessible :company, :ticket, :user
end
def self.get(ticket)
session = QbwcSession.find_by_ticket(ticket)
self.new(session) if session
end
def initialize(session_or_user = nil, company = nil, ticket = nil)
if session_or_user.is_a? QbwcSession
@session = session_or_user
# Restore current job from saved one on QbwcSession
@current_job = QBWC.jobs[@session.current_job.to_sym] if @session.current_job
# Restore pending jobs from saved list on QbwcSession
@pending_jobs = @session.pending_jobs.split(',').map { |job| QBWC.jobs[job.to_sym] }
super(@session.user, @session.company, @session.ticket)
else
super
@session = QbwcSession.new(:user => self.user, :company => self.company, :ticket => self.ticket)
self.save
@session
end
end
def save
@session.pending_jobs = pending_jobs.map(&:name).join(',')
@session.current_job = current_job.try(:name)
@session.save
super
end
def destroy
@session.destroy
super
end
[:error, :progress, :iterator_id].each do |method|
define_method method do
@session.send(method)
end
define_method "#{method}=" do |value|
@session.send("#{method}=", value)
end
end
protected :progress=, :iterator_id=, :iterator_id
end
fix for rails4
class QBWC::ActiveRecord::Session < QBWC::Session
class QbwcSession < ActiveRecord::Base
attr_accessible :company, :ticket, :user unless Rails::VERSION::MAJOR >= 4
end
def self.get(ticket)
session = QbwcSession.find_by_ticket(ticket)
self.new(session) if session
end
def initialize(session_or_user = nil, company = nil, ticket = nil)
if session_or_user.is_a? QbwcSession
@session = session_or_user
# Restore current job from saved one on QbwcSession
@current_job = QBWC.jobs[@session.current_job.to_sym] if @session.current_job
# Restore pending jobs from saved list on QbwcSession
@pending_jobs = @session.pending_jobs.split(',').map { |job| QBWC.jobs[job.to_sym] }
super(@session.user, @session.company, @session.ticket)
else
super
@session = QbwcSession.new(:user => self.user, :company => self.company, :ticket => self.ticket)
self.save
@session
end
end
def save
@session.pending_jobs = pending_jobs.map(&:name).join(',')
@session.current_job = current_job.try(:name)
@session.save
super
end
def destroy
@session.destroy
super
end
[:error, :progress, :iterator_id].each do |method|
define_method method do
@session.send(method)
end
define_method "#{method}=" do |value|
@session.send("#{method}=", value)
end
end
protected :progress=, :iterator_id=, :iterator_id
end
|
module Grunt
class Handler < Wheaties::Handler
include Grunt::Concerns::Commands
include Grunt::Responses::Channel
include Grunt::Responses::Messages
EXPOSED_EVENTS = [ :on_ctcp, :on_join, :on_nick, :on_part, :on_privmsg ]
alias :original_handle :handle
def handle
original_handle
if EXPOSED_EVENTS.include?(response.method_name)
handle_event(response.method_name)
end
end
protected
def handle_command(command)
return unless command.is_a?(Hash)
begin
locals = {
:event => response.method_name,
:response => response.dup,
:sender => response.sender.dup,
:from => response.from,
:channel => response.channel.dup.tap { |c| c.users.sender = response.sender.dup },
:history => Grunt.history[response.channel] || []
}.merge(command[:locals] || {})
timeout = (Grunt.config["timeout"] || 10).to_i
GruntTimeout.timeout(timeout) do
result = Evaluator.new(command[:name], command[:args], locals).eval!
privmsg(result, response.from) if result
end
rescue NoCommandError
rescue ArgumentParseError => e
notice(%{You made a mistake somewhere in your arguments for "#{e.command}"!}, response.sender.nick)
rescue Timeout::Error
notice(%{"#{command[:name]}" timed out after #{timeout} seconds!}, response.sender.nick)
rescue => e
notice(%{Error in "#{command[:name]}": #{e.message}}, response.sender.nick)
log(:debug, e.message)
log(:debug, e.backtrace.join("\n"))
end
end
def handle_event(event)
locals = { :is_event => true }
command_hash = { :args => "", :locals => locals }
Models::Command.all(:events => event).each do |command|
command_hash[:name] = command.name
handle_command(command_hash)
end
end
def handle_assignment(assignment)
return unless assignment.is_a?(Hash)
command = Models::Command.first_or_new(:name => /^#{assignment[:name]}$/i)
if command.new?
command.name = assignment[:name]
command.type = "plain_text"
command.body = ""
command.created_by = response.sender.nick
else
if %w(plain_text plain_text_random).include?(command.type)
command.updated_by = response.sender.nick
else
notice(%{"#{command.name}" is a #{command.type.capitalize} command and may not be modified.}, response.sender.nick)
return
end
end
command.body << "#{command.body.empty? ? "" : "\n"}#{normalize(assignment[:text])}"
begin
command.save!
notice(%{Saved "#{command.name}"!}, response.sender.nick)
rescue MongoMapper::DocumentNotValid
command.errors.each do |field, error|
notice("Command #{field} #{error}!", response.sender.nick)
end
end
end
def normalize(body)
body.gsub(/^\s*(\\)?(<.*?>)/i) do |match|
$~[1].nil? ? "" : match
end.gsub('\n', "\n")
end
end
end
Only set locals[:channel] if command originates from a channel (as opposed to a private message)
module Grunt
class Handler < Wheaties::Handler
include Grunt::Concerns::Commands
include Grunt::Responses::Channel
include Grunt::Responses::Messages
EXPOSED_EVENTS = [ :on_ctcp, :on_join, :on_nick, :on_part, :on_privmsg ]
alias :original_handle :handle
def handle
original_handle
if EXPOSED_EVENTS.include?(response.method_name)
handle_event(response.method_name)
end
end
protected
def handle_command(command)
return unless command.is_a?(Hash)
begin
locals = {
:event => response.method_name,
:response => response.dup,
:sender => response.sender.dup,
:from => response.from,
:history => Grunt.history[response.channel] || []
}.merge(command[:locals] || {})
unless response.pm?
locals[:channel] = response.channel.dup.tap do |c|
c.users.sender = response.sender.dup
end
end
timeout = (Grunt.config["timeout"] || 10).to_i
GruntTimeout.timeout(timeout) do
result = Evaluator.new(command[:name], command[:args], locals).eval!
privmsg(result, response.from) if result
end
rescue NoCommandError
rescue ArgumentParseError => e
notice(%{You made a mistake somewhere in your arguments for "#{e.command}"!}, response.sender.nick)
rescue Timeout::Error
notice(%{"#{command[:name]}" timed out after #{timeout} seconds!}, response.sender.nick)
rescue => e
notice(%{Error in "#{command[:name]}": #{e.message}}, response.sender.nick)
log(:debug, e.message)
log(:debug, e.backtrace.join("\n"))
end
end
def handle_event(event)
locals = { :is_event => true }
command_hash = { :args => "", :locals => locals }
Models::Command.all(:events => event).each do |command|
command_hash[:name] = command.name
handle_command(command_hash)
end
end
def handle_assignment(assignment)
return unless assignment.is_a?(Hash)
command = Models::Command.first_or_new(:name => /^#{assignment[:name]}$/i)
if command.new?
command.name = assignment[:name]
command.type = "plain_text"
command.body = ""
command.created_by = response.sender.nick
else
if %w(plain_text plain_text_random).include?(command.type)
command.updated_by = response.sender.nick
else
notice(%{"#{command.name}" is a #{command.type.capitalize} command and may not be modified.}, response.sender.nick)
return
end
end
command.body << "#{command.body.empty? ? "" : "\n"}#{normalize(assignment[:text])}"
begin
command.save!
notice(%{Saved "#{command.name}"!}, response.sender.nick)
rescue MongoMapper::DocumentNotValid
command.errors.each do |field, error|
notice("Command #{field} #{error}!", response.sender.nick)
end
end
end
def normalize(body)
body.gsub(/^\s*(\\)?(<.*?>)/i) do |match|
$~[1].nil? ? "" : match
end.gsub('\n', "\n")
end
end
end
|
# -*- encoding: utf-8 -*-
module Rack
module Policy
# This is the class for limiting cookie storage on client machine.
class CookieLimiter
include ::Rack::Utils
HTTP_COOKIE = "HTTP_COOKIE".freeze
SET_COOKIE = "Set-Cookie".freeze
CACHE_CONTROL = "Cache-Control".freeze
CONSENT_TOKEN = "cookie_limiter".freeze
attr_reader :app, :options
attr_accessor :status, :headers, :body
# @option options [String] :consent_token
#
def initialize(app, options={})
@app, @options = app, options
end
def consent_token
@consent_token ||= options[:consent_token] || CONSENT_TOKEN
end
def expires
Time.parse(options[:expires]) if options[:expires]
end
def call(env)
self.status, self.headers, self.body = @app.call(env)
request = Rack::Request.new(env)
response = Rack::Response.new body, status, headers
clear_cookies!(request, response) unless allowed?(request)
finish(env)
end
# Returns `false` if the cookie policy disallows cookie storage
# for a given request, or `true` otherwise.
#
def allowed?(request)
if ( request.cookies.has_key?(consent_token.to_s) ||
parse_cookies.has_key?(consent_token.to_s) )
true
else
false
end
end
# Finish http response with proper headers
def finish(env)
if [204, 304].include?(status.to_i)
headers.delete "Content-Type"
[status.to_i, headers, []]
elsif env['REQUEST_METHOD'] == 'HEAD'
[status.to_i, headers, []]
else
[status.to_i, headers, body]
end
end
protected
# Returns the response cookies converted to Hash
#
def parse_cookies
cookies = {}
if header = headers[SET_COOKIE]
header = header.split("\n") if header.respond_to?(:to_str)
header.each do |cookie|
if pair = cookie.split(';').first
key, value = pair.split('=').map { |v| ::Rack::Utils.unescape(v) }
cookies[key] = value
end
end
end
cookies
end
def clear_cookies!(request, response)
cookies = parse_cookies
headers.delete(SET_COOKIE)
request.env.delete(HTTP_COOKIE)
revalidate_cache!
cookies.merge(request.cookies).each do |key, value|
response.delete_cookie key.to_sym
end
headers
end
def revalidate_cache!
headers.merge!({ CACHE_CONTROL => 'must-revalidate, max-age=0' })
end
def set_cookie(key, value)
::Rack::Utils.set_cookie_header!(headers, key, value)
end
def delete_cookie(key, value)
::Rack::Utils.delete_cookie_header!(headers, key, value)
end
end # CookieLimiter
end # Policy
end # Rack
Ensure thread safety.
# -*- encoding: utf-8 -*-
module Rack
module Policy
# This is the class for limiting cookie storage on client machine.
class CookieLimiter
include ::Rack::Utils
HTTP_COOKIE = "HTTP_COOKIE".freeze
SET_COOKIE = "Set-Cookie".freeze
CACHE_CONTROL = "Cache-Control".freeze
CONSENT_TOKEN = "cookie_limiter".freeze
attr_reader :app, :options
attr_accessor :status, :headers, :body
# @option options [String] :consent_token
#
def initialize(app, options={})
@app, @options = app, options
end
def consent_token
@consent_token ||= options[:consent_token] || CONSENT_TOKEN
end
def expires
Time.parse(options[:expires]) if options[:expires]
end
def call(env)
dup.call!(env)
end
def call!(env)
self.status, self.headers, self.body = @app.call(env)
request = Rack::Request.new(env)
response = Rack::Response.new body, status, headers
clear_cookies!(request, response) unless allowed?(request)
finish(env)
end
# Returns `false` if the cookie policy disallows cookie storage
# for a given request, or `true` otherwise.
#
def allowed?(request)
if ( request.cookies.has_key?(consent_token.to_s) ||
parse_cookies.has_key?(consent_token.to_s) )
true
else
false
end
end
# Finish http response with proper headers
def finish(env)
if [204, 304].include?(status.to_i)
headers.delete "Content-Type"
[status.to_i, headers, []]
elsif env['REQUEST_METHOD'] == 'HEAD'
[status.to_i, headers, []]
else
[status.to_i, headers, body]
end
end
protected
# Returns the response cookies converted to Hash
#
def parse_cookies
cookies = {}
if header = headers[SET_COOKIE]
header = header.split("\n") if header.respond_to?(:to_str)
header.each do |cookie|
if pair = cookie.split(';').first
key, value = pair.split('=').map { |v| ::Rack::Utils.unescape(v) }
cookies[key] = value
end
end
end
cookies
end
def clear_cookies!(request, response)
cookies = parse_cookies
headers.delete(SET_COOKIE)
request.env.delete(HTTP_COOKIE)
revalidate_cache!
cookies.merge(request.cookies).each do |key, value|
response.delete_cookie key.to_sym
end
headers
end
def revalidate_cache!
headers.merge!({ CACHE_CONTROL => 'must-revalidate, max-age=0' })
end
def set_cookie(key, value)
::Rack::Utils.set_cookie_header!(headers, key, value)
end
def delete_cookie(key, value)
::Rack::Utils.delete_cookie_header!(headers, key, value)
end
end # CookieLimiter
end # Policy
end # Rack
|
require 'guard'
require 'guard/plugin'
module Guard
class Foreman < Plugin
# Default log location (Rails in mind)
DEFAULT_LOG_LOCATION = "log/foreman.log"
# Initialize a Guard.
# @param [Array<Guard::Watcher>] watchers the Guard file watchers
# @param [Hash] options the custom Guard options
def initialize(options = {})
@log_file = options.fetch(:log_file, DEFAULT_LOG_LOCATION)
@concurrency = options[:concurrency]
@env = options[:env]
@procfile = options[:procfile]
@port = options[:port]
@root = options[:root]
super
end
# Call once when Guard starts. Please override initialize method to init stuff.
# @raise [:task_has_failed] when start has failed
def start
# Stop if running
stop if @pid
cmd = []
cmd << "foreman start"
cmd << "-c #{@concurrency}" if @concurrency
cmd << "-e #{@env}" if @env
cmd << "-f #{@procfile}" if @procfile
cmd << "-p #{@port}" if @port
cmd << "-d #{@root}" if @root
cmd << "> #{@log_file}"
@pid = ::Process.fork do
system "#{cmd.join " "}"
end
success "Foreman started."
end
# Called when `stop|quit|exit|s|q|e + enter` is pressed (when Guard quits).
# @raise [:task_has_failed] when stop has failed
def stop
begin
::Process.kill("QUIT", @pid) if ::Process.getpgid(@pid)
# foreman won't always shut down right away, so we're waiting for
# the getpgid method to raise an Errno::ESRCH that will tell us
# the process is not longer active.
sleep 1 while ::Process.getpgid(@pid)
success "Foreman stopped."
rescue Errno::ESRCH
# Don't do anything, the process does not exist
end
end
# Called when `reload|r|z + enter` is pressed.
# This method should be mainly used for "reload" (really!) actions like
# reloading passenger/spork/bundler/...
# @raise [:task_has_failed] when reload has failed
def reload
UI.info "Restarting Foreman..."
stop
start
end
# Called when just `enter` is pressed
# This method should be principally used for long action like running all specs/tests/...
# @raise [:task_has_failed] when run_all has failed
def run_all
start
end
# Called on file(s) modifications that the Guard watches.
# @param [Array<String>] paths the changes files or paths
# @raise [:task_has_failed] when run_on_change has failed
def run_on_changes(paths)
reload
end
def run_on_additions(paths)
reload
end
def run_on_modifications(paths)
reload
end
def run_on_removals(paths)
reload
end
private
def info(msg)
UI.info(msg)
end
def pending message
notify message, :image => :pending
end
def success message
notify message, :image => :success
end
def failed message
notify message, :image => :failed
end
def notify(message, options = {})
Notifier.notify(message, options)
end
end
end
Used info dialogs that actually work
require 'guard'
require 'guard/plugin'
module Guard
class Foreman < Plugin
# Default log location (Rails in mind)
DEFAULT_LOG_LOCATION = "log/foreman.log"
# Initialize a Guard.
# @param [Array<Guard::Watcher>] watchers the Guard file watchers
# @param [Hash] options the custom Guard options
def initialize(options = {})
@log_file = options.fetch(:log_file, DEFAULT_LOG_LOCATION)
@concurrency = options[:concurrency]
@env = options[:env]
@procfile = options[:procfile]
@port = options[:port]
@root = options[:root]
super
end
# Call once when Guard starts. Please override initialize method to init stuff.
# @raise [:task_has_failed] when start has failed
def start
# Stop if running
stop if @pid
cmd = []
cmd << "foreman start"
cmd << "-c #{@concurrency}" if @concurrency
cmd << "-e #{@env}" if @env
cmd << "-f #{@procfile}" if @procfile
cmd << "-p #{@port}" if @port
cmd << "-d #{@root}" if @root
cmd << "> #{@log_file}"
@pid = ::Process.fork do
system "#{cmd.join " "}"
end
info "Foreman started."
end
# Called when `stop|quit|exit|s|q|e + enter` is pressed (when Guard quits).
# @raise [:task_has_failed] when stop has failed
def stop
begin
::Process.kill("QUIT", @pid) if ::Process.getpgid(@pid)
# foreman won't always shut down right away, so we're waiting for
# the getpgid method to raise an Errno::ESRCH that will tell us
# the process is not longer active.
sleep 1 while ::Process.getpgid(@pid)
info "Foreman stopped."
rescue Errno::ESRCH
# Don't do anything, the process does not exist
end
end
# Called when `reload|r|z + enter` is pressed.
# This method should be mainly used for "reload" (really!) actions like
# reloading passenger/spork/bundler/...
# @raise [:task_has_failed] when reload has failed
def reload
UI.info "Restarting Foreman..."
stop
start
end
# Called when just `enter` is pressed
# This method should be principally used for long action like running all specs/tests/...
# @raise [:task_has_failed] when run_all has failed
def run_all
start
end
# Called on file(s) modifications that the Guard watches.
# @param [Array<String>] paths the changes files or paths
# @raise [:task_has_failed] when run_on_change has failed
def run_on_changes(paths)
reload
end
def run_on_additions(paths)
reload
end
def run_on_modifications(paths)
reload
end
def run_on_removals(paths)
reload
end
private
def info(msg)
UI.info(msg)
end
end
end
|
desc 'Setup rails application for the first time on a server'
task :setup do
on roles(:all) do
with fetch(:environment) do
if test "[ -d #{fetch(:deploy_to)} ]"
invoke :fetch_and_reset_git_repository
else
execute :git, :clone, fetch(:repo_url), fetch(:deploy_to)
invoke :sync_local_dirs_to_server
end
server_conf_dir = "#{fetch(:deploy_to)}/config/server"
execute :su_cp, "#{server_conf_dir}/puma.service /lib/systemd/system/#{fetch(:application)}.service"
execute :su_cp, "#{server_conf_dir}/sidekiq.service /lib/systemd/system/#{fetch(:application)}_sidekiq.service"
execute :su_ln, "-s -f #{server_conf_dir}/logrotate.conf /etc/logrotate.d/#{fetch(:application)}"
within fetch(:deploy_to) do
upload! './config/master.key', "#{fetch(:deploy_to)}/config/master.key"
execute :bundle, :install, '--without development test'
invoke :create_database_from_sql_file
execute :rake, 'assets:precompile'
execute :systemctl, 'daemon-reload'
execute :systemctl, :start, fetch(:application)
execute :systemctl, :start, "#{fetch(:application)}_sidekiq"
execute :systemctl, :enable, fetch(:application)
execute :systemctl, :enable, "#{fetch(:application)}_sidekiq"
# copy temporary simple nginx.conf only for getting letsencrypt certificate
nginx_conf = File.read(File.join(File.dirname(__FILE__), 'nginx.conf'))
nginx_conf.gsub!('DOMAINS', fetch(:domains).join(' '))
nginx_conf.gsub!('APPLICATION', fetch(:application))
upload! StringIO.new(nginx_conf), "#{fetch(:deploy_to)}/tmp/nginx.conf"
execute :su_cp, "#{fetch(:deploy_to)}/tmp/nginx.conf /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
execute :certbot, "certonly --webroot -w /home/deploy/apps/#{fetch(:application)}/public #{fetch(:domains).collect { |d| '-d ' + d }.join(' ')} -n --agree-tos -m #{fetch(:certbot_email)} --deploy-hook 'systemctl reload nginx'"
# remove temporary nginx.conf and link config/server/nginx.conf to /etc/nginx/conf.d
execute :su_rm, "/etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :su_ln, "-s -f #{server_conf_dir}/nginx.conf /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
end
end
end
end
desc 'Remove the application completely from the server'
task :remove do
on roles(:all) do
with fetch(:environment) do
# stop, disable and remove systemd service files
execute :systemctl, :stop, fetch(:application)
execute :systemctl, :stop, "#{fetch(:application)}_sidekiq"
execute :systemctl, :disable, fetch(:application)
execute :systemctl, :disable, "#{fetch(:application)}_sidekiq"
execute :su_rm, "-f /lib/systemd/system/#{fetch(:application)}.service"
execute :su_rm, "-f /lib/systemd/system/#{fetch(:application)}_sidekiq.service"
# dropt the database and remove the application directory from /home/deploy/apps
within fetch(:deploy_to) do
execute :rake, 'db:drop'
execute :su_rm, "-rf #{fetch(:deploy_to)}"
end if test "[ -d #{fetch(:deploy_to)} ]"
# remove application nginx configuration
execute :su_rm, "-f /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
# remove logrotate configuration
execute :su_rm, "-f /etc/logrotate.d/#{fetch(:application)}"
end
end
end
desc 'Deploy rails application'
task :deploy do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
invoke :fetch_and_reset_git_repository
execute :bundle, :install
execute :rake, 'db:migrate'
execute :rake, 'assets:precompile'
execute :systemctl, :restart, fetch(:application)
execute :systemctl, :restart, "#{fetch(:application)}_sidekiq"
execute :systemctl, :restart, :nginx
end
end
end
end
desc 'Copy database from the server to the local machine'
task :update do
on roles(:all) do
within fetch(:deploy_to) do
execute :pg_dump, "-U rails -h localhost --clean #{fetch(:application)}_production > db/#{fetch(:application)}.sql"
download! "#{fetch(:deploy_to)}/db/#{fetch(:application)}.sql", 'db'
end
end
run_locally do
execute "psql -d #{fetch(:application)}_development -f db/#{fetch(:application)}.sql"
invoke :sync_local_dirs_from_server
end
end
task :sync_local_dirs_to_server do
on roles(:all) do
fetch(:sync_dirs, []).each do |sync_dir|
if File.exists?("./#{sync_dir}")
run_locally do
execute "rsync -avz --delete -e ssh ./#{sync_dir}/ #{fetch(:user)}@#{fetch(:server)}:#{fetch(:deploy_to)}/#{sync_dir}/"
end
end
end
end
end
task :sync_local_dirs_from_server do
on roles(:all) do
fetch(:sync_dirs, []).each do |sync_dir|
if test "[ -f #{fetch(:deploy_to)}//#{sync_dir} ]"
run_locally do
execute "rsync -avzm --delete --force -e ssh #{fetch(:user)}@#{fetch(:server)}:#{fetch(:deploy_to)}/#{sync_dir}/ ./#{sync_dir}/"
end
end
end
end
end
task :fetch_and_reset_git_repository do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
execute :git, :fetch, 'origin'
execute :git, :reset, "--hard origin/#{fetch(:deploy_branch, 'master')}"
end
end
end
end
task :create_database_from_sql_file do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
execute :rake, 'db:create'
execute :rake, 'db:migrate'
execute :rake, 'db:seed'
if test "[ -f #{fetch(:deploy_to)}/db/#{fetch(:application)}.sql ]"
execute :psql, "-U rails -h localhost -d #{fetch(:application)}_production", "-f db/#{fetch(:application)}.sql"
execute :rake, 'db:migrate'
end
end
end
end
end
use --no-owner when exporting database so it can be imported with different user
desc 'Setup rails application for the first time on a server'
task :setup do
on roles(:all) do
with fetch(:environment) do
if test "[ -d #{fetch(:deploy_to)} ]"
invoke :fetch_and_reset_git_repository
else
execute :git, :clone, fetch(:repo_url), fetch(:deploy_to)
invoke :sync_local_dirs_to_server
end
server_conf_dir = "#{fetch(:deploy_to)}/config/server"
execute :su_cp, "#{server_conf_dir}/puma.service /lib/systemd/system/#{fetch(:application)}.service"
execute :su_cp, "#{server_conf_dir}/sidekiq.service /lib/systemd/system/#{fetch(:application)}_sidekiq.service"
execute :su_ln, "-s -f #{server_conf_dir}/logrotate.conf /etc/logrotate.d/#{fetch(:application)}"
within fetch(:deploy_to) do
upload! './config/master.key', "#{fetch(:deploy_to)}/config/master.key"
execute :bundle, :install, '--without development test'
invoke :create_database_from_sql_file
execute :rake, 'assets:precompile'
execute :systemctl, 'daemon-reload'
execute :systemctl, :start, fetch(:application)
execute :systemctl, :start, "#{fetch(:application)}_sidekiq"
execute :systemctl, :enable, fetch(:application)
execute :systemctl, :enable, "#{fetch(:application)}_sidekiq"
# copy temporary simple nginx.conf only for getting letsencrypt certificate
nginx_conf = File.read(File.join(File.dirname(__FILE__), 'nginx.conf'))
nginx_conf.gsub!('DOMAINS', fetch(:domains).join(' '))
nginx_conf.gsub!('APPLICATION', fetch(:application))
upload! StringIO.new(nginx_conf), "#{fetch(:deploy_to)}/tmp/nginx.conf"
execute :su_cp, "#{fetch(:deploy_to)}/tmp/nginx.conf /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
execute :certbot, "certonly --webroot -w /home/deploy/apps/#{fetch(:application)}/public #{fetch(:domains).collect { |d| '-d ' + d }.join(' ')} -n --agree-tos -m #{fetch(:certbot_email)} --deploy-hook 'systemctl reload nginx'"
# remove temporary nginx.conf and link config/server/nginx.conf to /etc/nginx/conf.d
execute :su_rm, "/etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :su_ln, "-s -f #{server_conf_dir}/nginx.conf /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
end
end
end
end
desc 'Remove the application completely from the server'
task :remove do
on roles(:all) do
with fetch(:environment) do
# stop, disable and remove systemd service files
execute :systemctl, :stop, fetch(:application)
execute :systemctl, :stop, "#{fetch(:application)}_sidekiq"
execute :systemctl, :disable, fetch(:application)
execute :systemctl, :disable, "#{fetch(:application)}_sidekiq"
execute :su_rm, "-f /lib/systemd/system/#{fetch(:application)}.service"
execute :su_rm, "-f /lib/systemd/system/#{fetch(:application)}_sidekiq.service"
# dropt the database and remove the application directory from /home/deploy/apps
within fetch(:deploy_to) do
execute :rake, 'db:drop'
execute :su_rm, "-rf #{fetch(:deploy_to)}"
end if test "[ -d #{fetch(:deploy_to)} ]"
# remove application nginx configuration
execute :su_rm, "-f /etc/nginx/conf.d/#{fetch(:application)}.conf"
execute :systemctl, :restart, :nginx
# remove logrotate configuration
execute :su_rm, "-f /etc/logrotate.d/#{fetch(:application)}"
end
end
end
desc 'Deploy rails application'
task :deploy do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
invoke :fetch_and_reset_git_repository
execute :bundle, :install
execute :rake, 'db:migrate'
execute :rake, 'assets:precompile'
execute :systemctl, :restart, fetch(:application)
execute :systemctl, :restart, "#{fetch(:application)}_sidekiq"
execute :systemctl, :restart, :nginx
end
end
end
end
desc 'Copy database from the server to the local machine'
task :update do
on roles(:all) do
within fetch(:deploy_to) do
execute :pg_dump, "-U rails -h localhost --clean --no-owner #{fetch(:application)}_production > db/#{fetch(:application)}.sql"
download! "#{fetch(:deploy_to)}/db/#{fetch(:application)}.sql", 'db'
end
end
run_locally do
execute "psql -d #{fetch(:application)}_development -f db/#{fetch(:application)}.sql"
invoke :sync_local_dirs_from_server
end
end
task :sync_local_dirs_to_server do
on roles(:all) do
fetch(:sync_dirs, []).each do |sync_dir|
if File.exists?("./#{sync_dir}")
run_locally do
execute "rsync -avz --delete -e ssh ./#{sync_dir}/ #{fetch(:user)}@#{fetch(:server)}:#{fetch(:deploy_to)}/#{sync_dir}/"
end
end
end
end
end
task :sync_local_dirs_from_server do
on roles(:all) do
fetch(:sync_dirs, []).each do |sync_dir|
if test "[ -f #{fetch(:deploy_to)}//#{sync_dir} ]"
run_locally do
execute "rsync -avzm --delete --force -e ssh #{fetch(:user)}@#{fetch(:server)}:#{fetch(:deploy_to)}/#{sync_dir}/ ./#{sync_dir}/"
end
end
end
end
end
task :fetch_and_reset_git_repository do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
execute :git, :fetch, 'origin'
execute :git, :reset, "--hard origin/#{fetch(:deploy_branch, 'master')}"
end
end
end
end
task :create_database_from_sql_file do
on roles(:all) do
with fetch(:environment) do
within fetch(:deploy_to) do
execute :rake, 'db:create'
execute :rake, 'db:migrate'
execute :rake, 'db:seed'
if test "[ -f #{fetch(:deploy_to)}/db/#{fetch(:application)}.sql ]"
execute :psql, "-U rails -h localhost -d #{fetch(:application)}_production", "-f db/#{fetch(:application)}.sql"
execute :rake, 'db:migrate'
end
end
end
end
end |
module Guard
class Watcher
attr_accessor :pattern, :action
def initialize(pattern, action = nil)
@pattern, @action = pattern, action
end
def self.match_files(guard, files)
guard.watchers.inject([]) do |paths, watcher|
files.each do |file|
if matches = file.match(watcher.pattern)
if watcher.action
begin
case watcher.action.arity
when -1
result = watcher.action.call
when 1
result = watcher.action.call(matches)
end
rescue
UI.info "Problem with watch action"
end
paths << result if result.is_a?(String) && result != ''
else
paths << matches[0]
end
end
end
paths
end
end
end
end
Fixed watch block without params on 1.9.2
module Guard
class Watcher
attr_accessor :pattern, :action
def initialize(pattern, action = nil)
@pattern, @action = pattern, action
end
def self.match_files(guard, files)
guard.watchers.inject([]) do |paths, watcher|
files.each do |file|
if matches = file.match(watcher.pattern)
if watcher.action
begin
if watcher.action.arity == 1
result = watcher.action.call(matches)
else
result = watcher.action.call
end
rescue
UI.info "Problem with watch action"
end
paths << result if result.is_a?(String) && result != ''
else
paths << matches[0]
end
end
end
paths
end
end
end
end |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Redfish
module Tasks
class AsadminTask < Redfish::Task
self.mark_as_abstract!
protected
# Return a struct representing the domain version
def domain_version
context.property_cache? ?
context.domain_version :
context.domain_version(get_property('domain.version'))
end
#
# Many glassfish resources have an "extensible" set of properties with declared under the resource.
# This method assumes there is a method "properties" that returns a list of said properties. It then
# adds each these properties to a map with name relative to the resource. It also adds empty properties
# to the map for any properties that need to be removed.
#
# e.g.
# collect_property_sets('resources.jdbc-connection-pool.MyDbPool.', property_map)
#
def collect_property_sets(property_prefix, property_map)
self.properties.each_pair do |key, value|
property_map["property.#{key}"] = as_property_value(value)
end
full_prefix = "#{property_prefix}property."
extra_property_keys =
context.property_cache? ?
context.property_cache.get_keys_starting_with(full_prefix) :
load_properties("#{full_prefix}*").keys
self.properties.keys.each do |k|
extra_property_keys.delete("#{full_prefix}#{k}")
end
extra_property_keys.each do |key|
k = key[full_prefix.length..-1]
property_map["property.#{k}"] = ''
end
end
def load_properties(pattern)
output = context.exec('get', [pattern], :terse => true, :echo => false)
parse_properties(output)
end
def load_property(key)
result = load_properties(key)
result.empty? ? nil : result.values[0]
end
def get_property(key)
context.property_cache? ? context.property_cache[key] : load_property(key)
end
def parse_properties(output)
properties = {}
output.split("\n").each do |line|
index = line.index('=')
key = line[0, index]
value = line[index + 1, line.size]
properties[key] = value
end
properties
end
def as_property_value(value)
value.nil? ? '' : value.to_s
end
def encode_options(options)
"#{options.collect { |v| escape_property(v) }.join(':')}"
end
def encode_parameters(properties)
"#{properties.collect { |k, v| "#{k}=#{escape_property(v)}" }.join(':')}"
end
def escape_property(string)
string.to_s.gsub(/([#{Regexp.escape('+\/,=:.!$%^&*|{}[]"`~;')}])/) { |match| "\\#{match}" }
end
end
end
end
Make it possible to pass options when loading properties
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Redfish
module Tasks
class AsadminTask < Redfish::Task
self.mark_as_abstract!
protected
# Return a struct representing the domain version
def domain_version
context.property_cache? ?
context.domain_version :
context.domain_version(get_property('domain.version'))
end
#
# Many glassfish resources have an "extensible" set of properties with declared under the resource.
# This method assumes there is a method "properties" that returns a list of said properties. It then
# adds each these properties to a map with name relative to the resource. It also adds empty properties
# to the map for any properties that need to be removed.
#
# e.g.
# collect_property_sets('resources.jdbc-connection-pool.MyDbPool.', property_map)
#
def collect_property_sets(property_prefix, property_map)
self.properties.each_pair do |key, value|
property_map["property.#{key}"] = as_property_value(value)
end
full_prefix = "#{property_prefix}property."
extra_property_keys =
context.property_cache? ?
context.property_cache.get_keys_starting_with(full_prefix) :
load_properties("#{full_prefix}*").keys
self.properties.keys.each do |k|
extra_property_keys.delete("#{full_prefix}#{k}")
end
extra_property_keys.each do |key|
k = key[full_prefix.length..-1]
property_map["property.#{k}"] = ''
end
end
def load_properties(pattern, options = {})
output = context.exec('get', [pattern], {:terse => true, :echo => false}.merge(options))
parse_properties(output)
end
def load_property(key, options = {})
result = load_properties(key, options)
result.empty? ? nil : result.values[0]
end
def get_property(key, options = {})
context.property_cache? ? context.property_cache[key] : load_property(key, options)
end
def parse_properties(output)
properties = {}
output.split("\n").each do |line|
index = line.index('=')
key = line[0, index]
value = line[index + 1, line.size]
properties[key] = value
end
properties
end
def as_property_value(value)
value.nil? ? '' : value.to_s
end
def encode_options(options)
"#{options.collect { |v| escape_property(v) }.join(':')}"
end
def encode_parameters(properties)
"#{properties.collect { |k, v| "#{k}=#{escape_property(v)}" }.join(':')}"
end
def escape_property(string)
string.to_s.gsub(/([#{Regexp.escape('+\/,=:.!$%^&*|{}[]"`~;')}])/) { |match| "\\#{match}" }
end
end
end
end
|
# TODO - test if this works when src_files and dest_dir contain whitespace
module Gusteau
module Rsync
def default_rsync_opts
"avz"
end
def sync_files(src_files, dest_dir, opts={})
rsync_cmd = "rsync -#{opts[:rsync_opts] || default_rsync_opts} #{src_files.join(' ')} #{user}@#{host}:#{dest_dir}"
log "#syncing local chef source files to remote dir ..." do
@password ? system_using_password(rsync_cmd , @password) : system(rsync_cmd)
end
end
private
def system_using_password(cmd, password)
system %{
expect -c '
set timeout -1
set send_human {.05 0.1 1 .07 1.5}
eval spawn #{cmd}
match_max 100000
expect {
-re " password: "
{ sleep 0.1 ; send -- "#{password}\r" ; sleep 0.3 }
}
interact
'
}
end
end
end
2nd attempt to fix rsync not working on jenkins
# TODO - test if this works when src_files and dest_dir contain whitespace
module Gusteau
module Rsync
def default_rsync_opts
"az"
end
def sync_files(src_files, dest_dir, opts={})
rsync_cmd = "rsync -#{opts[:rsync_opts] || default_rsync_opts} #{src_files.join(' ')} #{user}@#{host}:#{dest_dir}"
log "#syncing local chef source files to remote dir ..." do
@password ? system_using_password(rsync_cmd , @password) : system(rsync_cmd)
end
end
private
def system_using_password(cmd, password)
system %{
expect -c '
set timeout -1
set send_human {.05 0.1 1 .07 1.5}
eval spawn #{cmd}
match_max 100000
expect {
-re " password: "
{ sleep 0.1 ; send -- "#{password}\r" ; sleep 0.3 }
}
interact
'
}
end
end
end
|
module RedmineGitHosting
module Config
include Config::GitoliteAccess
include Config::GitoliteBase
include Config::GitoliteCache
include Config::GitoliteConfigTests
include Config::GitoliteHooks
include Config::GitoliteInfos
include Config::GitoliteNotifications
include Config::GitoliteStorage
include Config::Mirroring
include Config::RedmineConfig
GITHUB_ISSUE = 'https://github.com/jbox-web/redmine_git_hosting/issues'
GITHUB_WIKI = 'https://jbox-web.github.io/redmine_git_hosting/configuration/variables/'
GITOLITE_DEFAULT_CONFIG_FILE = 'gitolite.conf'
GITOLITE_IDENTIFIER_DEFAULT_PREFIX = 'redmine_'
###############################
## ##
## CONFIGURATION ACCESSORS ##
## ##
###############################
class << self
def logger
RedmineGitHosting.logger
end
def get_setting(setting, bool = false)
if bool
return_bool do_get_setting(setting)
else
return do_get_setting(setting)
end
end
def reload_from_file!(opts = {})
reload!(nil, opts)
end
### PRIVATE ###
def return_bool(value)
value == 'true' ? true : false
end
def do_get_setting(setting)
setting = setting.to_sym
## Wrap this in a begin/rescue statement because Setting table
## may not exist on first migration
begin
value = Setting.plugin_redmine_git_hosting[setting]
rescue => e
value = Redmine::Plugin.find('redmine_git_hosting').settings[:default][setting]
else
## The Setting table exist but does not contain the value yet, fallback to default
if value.nil?
value = Redmine::Plugin.find('redmine_git_hosting').settings[:default][setting]
end
end
value
end
def reload!(config = nil, opts = {})
logger = ConsoleLogger.new(opts)
if !config.nil?
default_hash = config
else
## Get default config from init.rb
default_hash = Redmine::Plugin.find('redmine_git_hosting').settings[:default]
end
if default_hash.nil? || default_hash.empty?
logger.info('No defaults specified in init.rb!')
else
do_reload_config(default_hash, logger)
end
end
def do_reload_config(default_hash, logger)
## Refresh Settings cache
Setting.check_cache
## Get actual values
valuehash = (Setting.plugin_redmine_git_hosting).clone
## Update!
changes = 0
default_hash.each do |key, value|
if valuehash[key] != value
logger.info("Changing '#{key}' : #{valuehash[key]} => #{value}")
valuehash[key] = value
changes += 1
end
end
if changes == 0
logger.info('No changes necessary.')
else
logger.info('Committing changes ... ')
begin
## Update Settings
Setting.plugin_redmine_git_hosting = valuehash
## Refresh Settings cache
Setting.check_cache
logger.info('Success!')
rescue => e
logger.error('Failure.')
logger.error(e.message)
end
end
end
end
private_class_method :return_bool,
:do_get_setting,
:reload!,
:do_reload_config
class ConsoleLogger
attr_reader :console
attr_reader :logger
def initialize(opts = {})
@console = opts[:console] || false
@logger ||= RedmineGitHosting.logger
end
def info(message)
puts message if console
logger.info(message)
end
def error(message)
puts message if console
logger.error(message)
end
# Handle everything else with base object
def method_missing(m, *args, &block)
logger.send m, *args, &block
end
end
end
end
Update project's website address
module RedmineGitHosting
module Config
include Config::GitoliteAccess
include Config::GitoliteBase
include Config::GitoliteCache
include Config::GitoliteConfigTests
include Config::GitoliteHooks
include Config::GitoliteInfos
include Config::GitoliteNotifications
include Config::GitoliteStorage
include Config::Mirroring
include Config::RedmineConfig
GITHUB_ISSUE = 'https://github.com/jbox-web/redmine_git_hosting/issues'
GITHUB_WIKI = 'http://redmine-git-hosting.io/configuration/variables/'
GITOLITE_DEFAULT_CONFIG_FILE = 'gitolite.conf'
GITOLITE_IDENTIFIER_DEFAULT_PREFIX = 'redmine_'
###############################
## ##
## CONFIGURATION ACCESSORS ##
## ##
###############################
class << self
def logger
RedmineGitHosting.logger
end
def get_setting(setting, bool = false)
if bool
return_bool do_get_setting(setting)
else
return do_get_setting(setting)
end
end
def reload_from_file!(opts = {})
reload!(nil, opts)
end
### PRIVATE ###
def return_bool(value)
value == 'true' ? true : false
end
def do_get_setting(setting)
setting = setting.to_sym
## Wrap this in a begin/rescue statement because Setting table
## may not exist on first migration
begin
value = Setting.plugin_redmine_git_hosting[setting]
rescue => e
value = Redmine::Plugin.find('redmine_git_hosting').settings[:default][setting]
else
## The Setting table exist but does not contain the value yet, fallback to default
if value.nil?
value = Redmine::Plugin.find('redmine_git_hosting').settings[:default][setting]
end
end
value
end
def reload!(config = nil, opts = {})
logger = ConsoleLogger.new(opts)
if !config.nil?
default_hash = config
else
## Get default config from init.rb
default_hash = Redmine::Plugin.find('redmine_git_hosting').settings[:default]
end
if default_hash.nil? || default_hash.empty?
logger.info('No defaults specified in init.rb!')
else
do_reload_config(default_hash, logger)
end
end
def do_reload_config(default_hash, logger)
## Refresh Settings cache
Setting.check_cache
## Get actual values
valuehash = (Setting.plugin_redmine_git_hosting).clone
## Update!
changes = 0
default_hash.each do |key, value|
if valuehash[key] != value
logger.info("Changing '#{key}' : #{valuehash[key]} => #{value}")
valuehash[key] = value
changes += 1
end
end
if changes == 0
logger.info('No changes necessary.')
else
logger.info('Committing changes ... ')
begin
## Update Settings
Setting.plugin_redmine_git_hosting = valuehash
## Refresh Settings cache
Setting.check_cache
logger.info('Success!')
rescue => e
logger.error('Failure.')
logger.error(e.message)
end
end
end
end
private_class_method :return_bool,
:do_get_setting,
:reload!,
:do_reload_config
class ConsoleLogger
attr_reader :console
attr_reader :logger
def initialize(opts = {})
@console = opts[:console] || false
@logger ||= RedmineGitHosting.logger
end
def info(message)
puts message if console
logger.info(message)
end
def error(message)
puts message if console
logger.error(message)
end
# Handle everything else with base object
def method_missing(m, *args, &block)
logger.send m, *args, &block
end
end
end
end
|
require 'tempfile'
#require 'yaml'
#require 'marshal'
module ParseTreeComm
SERIALIZE=Marshal
def put o
o=SERIALIZE.dump o
msg= o.size.to_s+"\n"+o+"\n"
begin
@out.write msg
@out.flush
rescue Exception
@out=@in=nil
raise
end
end
def get
begin
len=@in.gets.to_i
msg=@in.read(len)
@in.getc #read trailing \n
rescue Exception
@in=@out=nil
raise
end
result=SERIALIZE.load msg
return result
end
end
class ParseTreeServer
include ParseTreeComm
def self.path_to_server_command
File.expand_path __FILE__
end
def ensure_parse_tree_and_1_8
if ::RUBY_VERSION[/^\d+\.\d+/].to_f>1.8
ruby18=ENV['RUBY1_8']||fail("ruby > 1.8 used and no RUBY1_8 with parse_tree to chain to")
exec ruby18, $0
else
require 'rubygems'
require 'parse_tree'
end
rescue Exception=>e
put e
put nil
put nil
raise
end
def main
si=STDIN
so=STDOUT
@out=so; @in=si
ensure_parse_tree_and_1_8
begin
warnstash=Tempfile.new "warnstash"
STDERR.reopen warnstash
instance=ParseTree.new
while true
str=get
exit! if str==:exit!
if str==:version
put ::RUBY_VERSION
next
end
pos=STDERR.pos
tree=
begin
instance.parse_tree_for_string(str) #tree
rescue Exception=>e;
tree=e
end
put tree
open(STDERR.path){|f|
f.pos=pos
put warnings=f.read.split #warnings
}
end
rescue Exception=>e; put e; raise
ensure exit!
end
end
end
ParseTreeServer.new.main if $0==__FILE__
slightly better error message if ruby 1.8+parse_tree not found
require 'tempfile'
#require 'yaml'
#require 'marshal'
module ParseTreeComm
SERIALIZE=Marshal
def put o
o=SERIALIZE.dump o
msg= o.size.to_s+"\n"+o+"\n"
begin
@out.write msg
@out.flush
rescue Exception
@out=@in=nil
raise
end
end
def get
begin
len=@in.gets.to_i
msg=@in.read(len)
@in.getc #read trailing \n
rescue Exception
@in=@out=nil
raise
end
result=SERIALIZE.load msg
return result
end
end
class ParseTreeServer
include ParseTreeComm
def self.path_to_server_command
File.expand_path __FILE__
end
def ensure_parse_tree_and_1_8
if ::RUBY_VERSION[/^\d+\.\d+/].to_f>1.8
ruby18=ENV['RUBY1_8']||fail("you must use ruby <= 1.8 (with parse_tree) or set RUBY1_8 env to a 1.8 interpreter")
exec ruby18, $0
else
require 'rubygems'
require 'parse_tree'
end
rescue Exception=>e
put e
put nil
put nil
raise
end
def main
si=STDIN
so=STDOUT
@out=so; @in=si
ensure_parse_tree_and_1_8
begin
warnstash=Tempfile.new "warnstash"
STDERR.reopen warnstash
instance=ParseTree.new
while true
str=get
exit! if str==:exit!
if str==:version
put ::RUBY_VERSION
next
end
pos=STDERR.pos
tree=
begin
instance.parse_tree_for_string(str) #tree
rescue Exception=>e;
tree=e
end
put tree
open(STDERR.path){|f|
f.pos=pos
put warnings=f.read.split #warnings
}
end
rescue Exception=>e; put e; raise
ensure exit!
end
end
end
ParseTreeServer.new.main if $0==__FILE__
|
module RedshiftConnector
VERSION = '5.6.0'
end
version 6.0.0
module RedshiftConnector
VERSION = '6.0.0'
end
|
module Helpi
VERSION = "0.0.1"
end
increase version number
module Helpi
VERSION = "0.0.2"
end
|
module Risu
module Base
class ModuleManager
attr_accessor :registered_templates
#
#
def initialize (path)
@registered_templates = Array.new
@templates = Array.new
load_templates(path)
end
#
#
def load_templates(path)
base_dir = __FILE__.gsub("risu/base/template_manager.rb", "")
Dir["#{base_dir + path}/**/*.rb"].each{ |x| load x }
TemplateBase.possible_templates.each do |p|
if validate(p) == true
@registered_templates << p
end
end
end
#
#
def validate(template)
t = template.new
if t == nil
return false
end
if t.respond_to?(:render) == false
return false
end
return true
end
#
#
def find_plugins(file_name)
Dir.new("#{file_name}").each do |file|
next if file.match(/^\.+/)
path = "#{file_name}/#{file}"
if FileTest.directory?("#{path}")
list("#{path}")
else
self.register_template path
end
end
end
#
#
def register_template(plugin)
load plugin
@templates.push(plugin)
end
#
#
def display_templates
@registered_modules.each do |x|
p = x.new
printf "[*] Template: %s\n", p.template_info[:name]
end
end
end
end
end
Added error handling for malformed templates
module Risu
module Base
class TemplateManager
attr_accessor :registered_templates
#
#
def initialize (path)
@registered_templates = Array.new
@templates = Array.new
load_templates(path)
end
#
#
def load_templates(path)
begin
base_dir = __FILE__.gsub("risu/base/template_manager.rb", "")
Dir["#{base_dir + path}/**/*.rb"].each do |x|
begin
load x
rescue => e
next
end
end
TemplateBase.possible_templates.each do |p|
if validate(p) == true
@registered_templates << p
end
end
rescue => e
puts "Bad plugin"
end
end
#
#
def validate(template)
t = template.new
if t == nil
return false
end
if t.respond_to?(:render) == false
return false
end
return true
end
#
#
def find_plugins(file_name)
Dir.new("#{file_name}").each do |file|
next if file.match(/^\.+/)
path = "#{file_name}/#{file}"
if FileTest.directory?("#{path}")
list("#{path}")
else
self.register_template path
end
end
end
#
#
def register_template(plugin)
load plugin
@templates.push(plugin)
end
#
#
def display_templates
@registered_templates.each do |x|
p = x.new
puts "[*] Template: #{p.template_info[:name]}\n",
end
end
end
end
end
|
module Herd
module Zip
class Base
def class_from_path(path)
path.split('/').classify.constantize
end
def path_from_class(klass)
klass.to_s.split('::').join '/'
end
end
end
end
oops, misused classify slightly
module Herd
module Zip
class Base
def class_from_path(path)
path.classify.constantize
end
def path_from_class(klass)
klass.to_s.split('::').join '/'
end
end
end
end
|
module Ruboty
module TrainDelay
VERSION = "0.1.0"
end
end
Bump to 0.1.1
module Ruboty
module TrainDelay
VERSION = "0.1.1"
end
end
|
require "hermes_beacon/version"
require 'ffi'
module Hermes
module Beacon
extend FFI::Library
ffi_lib 'objc', '/System/Library/Frameworks/Foundation.framework/Foundation', '/System/Library/Frameworks/IOBluetooth.framework/IOBluetooth',
'bin/hermes-beacon'
def self.scan(interval=1.1)
scan = {}
callback = Proc.new do |uuid, major, minor, power, rssi|
scan = {uuid: uuid, major: major, minor: minor, power: power, rssi: rssi}
return scan
end
Beacon.startWithTimeInterval(interval,callback)
end
attach_function :print_version, [], :void
attach_function :version, [], :int
callback :completion_function, [:string, :int, :int, :int, :int], :int
attach_function :startWithTimeInterval, [:double, :completion_function], :void
end
end
Made the bin relitive so we can actually use it in production yo
require "hermes_beacon/version"
require 'ffi'
module Hermes
module Beacon
extend FFI::Library
ffi_lib 'objc', '/System/Library/Frameworks/Foundation.framework/Foundation', '/System/Library/Frameworks/IOBluetooth.framework/IOBluetooth',
"#{File.dirname(__FILE__)}/../bin/hermes-beacon"
def self.scan(interval=1.1)
scan = {}
callback = Proc.new do |uuid, major, minor, power, rssi|
scan = {uuid: uuid, major: major, minor: minor, power: power, rssi: rssi}
return scan
end
Beacon.startWithTimeInterval(interval,callback)
end
attach_function :print_version, [], :void
attach_function :version, [], :int
callback :completion_function, [:string, :int, :int, :int, :int], :int
attach_function :startWithTimeInterval, [:double, :completion_function], :void
end
end
|
require "ruby_es6_module_transpiler/version"
require 'execjs'
require 'json'
# Convert JavaScript files written using the ES6 draft specification
# module syntax to existing library-based module systems such as AMD,
# CommonJS, or simply globals. Based off of Square's ES6 Module
# Transpiler project
module RubyES6ModuleTranspiler
class << self
# JS code to be transpiled
attr_accessor :js_code
# ExecJS object used to execute Javascript within Ruby
Node = ::ExecJS::ExternalRuntime.new(
name: 'Node.js (V8)',
command: ['nodejs', 'node'],
runner_path: File.expand_path('../support/es6-node-runner.js', __FILE__),
encoding: 'UTF-8'
)
# Transpiles given JS code into library-based module systems.
# Defaults to AMD. Also allows optional parameters typically passed
# to Square's ES6 Transpiler in options hash.
#
# @param code [String] the Javscript code to be transpiled
# @option opts [String] :type specify whether to transpile into AMD, CommonJS or globals
# @option opts [String] :module_name overriding default module name
def transpile(code, options = {})
@js_code = code
Node.exec(generate_source(options))
end
private
def transpiler_js_path
File.expand_path('../support/es6-module-transpiler.min.js', __FILE__)
end
def generate_source(options)
source = <<-SOURCE
var Compiler, compiler, output;
Compiler = require("#{transpiler_js_path}").Compiler;
compiler = new Compiler(#{::JSON.generate(@js_code, quirks_mode: true)}, '#{module_name(options)}', #{options.to_json});
return output = compiler.#{compiler_type(options)}();
SOURCE
end
def read_js_file(path)
file = File.open(path, "rb")
data = file.read
file.close
data
end
def compiler_type(options)
available_types = {
amd: 'AMD',
cjs: 'CJS',
yui: 'YUI',
globals: 'Globals'
}
if options[:type]
type = available_types[options[:type].downcase.to_sym] || 'AMD'
else
type = 'AMD'
end
"to#{type}"
end
def module_name(options)
options[:moduleName]
end
end
end
document return for transpile method
require "ruby_es6_module_transpiler/version"
require 'execjs'
require 'json'
# Convert JavaScript files written using the ES6 draft specification
# module syntax to existing library-based module systems such as AMD,
# CommonJS, or simply globals. Based off of Square's ES6 Module
# Transpiler project
module RubyES6ModuleTranspiler
class << self
# JS code to be transpiled
attr_accessor :js_code
# ExecJS object used to execute Javascript within Ruby
Node = ::ExecJS::ExternalRuntime.new(
name: 'Node.js (V8)',
command: ['nodejs', 'node'],
runner_path: File.expand_path('../support/es6-node-runner.js', __FILE__),
encoding: 'UTF-8'
)
# Transpiles given JS code into library-based module systems.
# Defaults to AMD. Also allows optional parameters typically passed
# to Square's ES6 Transpiler in options hash.
#
# @param code [String] the Javscript code to be transpiled
# @option opts [String] :type specify whether to transpile into AMD, CommonJS or globals
# @option opts [String] :module_name overriding default module name
# @return [String] transpiled verison of input Javscript
def transpile(code, options = {})
@js_code = code
Node.exec(generate_source(options))
end
private
def transpiler_js_path
File.expand_path('../support/es6-module-transpiler.min.js', __FILE__)
end
def generate_source(options)
source = <<-SOURCE
var Compiler, compiler, output;
Compiler = require("#{transpiler_js_path}").Compiler;
compiler = new Compiler(#{::JSON.generate(@js_code, quirks_mode: true)}, '#{module_name(options)}', #{options.to_json});
return output = compiler.#{compiler_type(options)}();
SOURCE
end
def read_js_file(path)
file = File.open(path, "rb")
data = file.read
file.close
data
end
def compiler_type(options)
available_types = {
amd: 'AMD',
cjs: 'CJS',
yui: 'YUI',
globals: 'Globals'
}
if options[:type]
type = available_types[options[:type].downcase.to_sym] || 'AMD'
else
type = 'AMD'
end
"to#{type}"
end
def module_name(options)
options[:moduleName]
end
end
end |
# -*- coding: utf-8 -*-
# (C) Copyright 2003-2017 by Masahiro TANAKA
# This program is free software under MIT license.
# NO WARRANTY.
require "date"
module HolidayJapan
VERSION = "1.2.3"
WEEK1 = 1
WEEK2 = 8
WEEK3 = 15
WEEK4 = 22
SUN,MON,TUE,WED,THU,FRU,SAT = (0..6).to_a
INF = (defined? Float::INFINITY) ? Float::INFINITY : 1e34
# 祝日データ: 1948年7月20日以降で有効
DATA = [
["元日", 1949..INF , 1, 1 ],
["成人の日", 1949..1999, 1, 15 ],
["成人の日", 2000..INF , 1, WEEK2, MON ],
["建国記念の日",1967..INF , 2, 11 ],
["天皇誕生日", 2019..INF , 2, 23 ],
["天皇誕生日", 1949..1988, 4, 29 ],
["みどりの日", 1989..2006, 4, 29 ],
["昭和の日", 2007..INF , 4, 29 ],
["憲法記念日", 1949..INF , 5, 3 ],
["みどりの日", 2007..INF , 5, 4 ],
["こどもの日", 1949..INF , 5, 5 ],
["海の日", 1996..2002, 7, 20 ],
["海の日", 2003..INF , 7, WEEK3, MON ],
["山の日", 2016..INF , 8, 11 ],
["敬老の日", 1966..2002, 9, 15 ],
["敬老の日", 2003..INF , 9, WEEK3, MON ],
["体育の日", 1966..1999, 10, 10 ],
["体育の日", 2000..INF , 10, WEEK2, MON ],
["文化の日", 1948..INF , 11, 3 ],
["勤労感謝の日",1948..INF , 11, 23 ],
["天皇誕生日", 1989..2018, 12, 23 ],
["春分の日", 1949..1979, 3,
proc{|y|Integer(20.8357+0.242194*(y-1980))-Integer((y-1983)/4.0)} ],
["春分の日", 1980..2099, 3,
proc{|y|Integer(20.8431+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["春分の日", 2100..2150, 3,
proc{|y|Integer(21.8510+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["秋分の日" , 1948..1979, 9,
proc{|y|Integer(23.2588+0.242194*(y-1980))-Integer((y-1983)/4.0)} ],
["秋分の日" , 1980..2099, 9,
proc{|y|Integer(23.2488+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["秋分の日" , 2100..2150, 9,
proc{|y|Integer(24.2488+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["皇太子明仁親王の結婚の儀", 1959..1959, 4, 10 ],
["昭和天皇の大喪の礼", 1989..1989, 2, 24 ],
["即位礼正殿の儀", 1990..1990, 11, 12 ],
["皇太子徳仁親王の結婚の儀", 1993..1993, 6, 9 ]
]
DATA.each{|x| x[0].freeze; x.freeze }
DATA.freeze
TABLE = {}
FURIKAE_START = Date.new(1973,4,12).freeze
module_function
def holiday_date(year,data)
year_range,mon,day,wday = data[1..4]
if year_range === year
case day
when Integer
if wday
wday0 = Date.new(year,mon,day).wday
Date.new( year, mon, day+(wday-wday0+7)%7 )
else
Date.new( year, mon, day )
end
when Proc
Date.new( year, mon, day.call(year) )
end
end
end
def create_table(y)
h={}
a=[]
# list holidays
DATA.each do |x|
if d = holiday_date(y,x)
h[d] = x[0]
a << d
end
end
# compensating holiday
if y >= 2007
a.each do |d|
if d.wday==SUN
d+=1 while h[d]
h[d] = "振替休日"
end
end
elsif y >= 1973
a.each do |d|
if d.wday==SUN and d>=FURIKAE_START
h[d+1] = "振替休日"
end
end
end
# consecutive holiday
if y >= 1986
a.each do |d|
if h[d+2] and !h[d+1] and d.wday!=SAT
h[d+1] = "国民の休日"
end
end
end
h.freeze
end
def name(date)
y = date.year
(TABLE[y] ||= create_table(y))[date]
end
def check(date)
!HolidayJapan.name(date).nil?
end
def list_year(year)
year = Integer(year)
TABLE[year] ||= create_table(year)
TABLE[year].sort_by{|x| x[0]}
end
def hash_year(year)
TABLE[year] ||= create_table(year)
end
def between(from_date,to_date)
if from_date > to_date
raise ArgumentError, "to_date is earlier than from_date"
end
y1 = from_date.year
y2 = to_date.year
if y1 == y2
result = hash_year(y1).select{|d,n| d >= from_date && d <= to_date}
else
result = hash_year(y1).select{|d,n| d >= from_date}
y = y1 + 1
while y < y2
result.merge!(hash_year(y))
y += 1
end
hash_year(y).each{|d,n| result[d]=n if d <= to_date}
end
result
end
def _print_year(year)
puts "listing year #{year}..."
list_year(year).each do |y|
puts "#{y[0].strftime('%Y-%m-%d %a')} #{y[1]}"
end
end
def print_year(year)
case year
when Range
year.each do |y|
_print_year(y)
end
else
_print_year(year)
end
end
end
# compatible with Funaba-san's holiday.rb
class Date
def national_holiday?
HolidayJapan.name(self) ? true : false
end
end
# command line
if __FILE__ == $0
# print holiday list of the year
begin
arg = eval(ARGV[0])
rescue
raise ArgumentError,"invalid argument : #{ARGV[0].inspect}
usage:
ruby holiday_japan.rb year"
end
HolidayJapan.print_year(arg)
end
v1.2.4
# -*- coding: utf-8 -*-
# (C) Copyright 2003-2017 by Masahiro TANAKA
# This program is free software under MIT license.
# NO WARRANTY.
require "date"
module HolidayJapan
VERSION = "1.2.4"
WEEK1 = 1
WEEK2 = 8
WEEK3 = 15
WEEK4 = 22
SUN,MON,TUE,WED,THU,FRU,SAT = (0..6).to_a
INF = (defined? Float::INFINITY) ? Float::INFINITY : 1e34
# 祝日データ: 1948年7月20日以降で有効
DATA = [
["元日", 1949..INF , 1, 1 ],
["成人の日", 1949..1999, 1, 15 ],
["成人の日", 2000..INF , 1, WEEK2, MON ],
["建国記念の日",1967..INF , 2, 11 ],
["天皇誕生日", 2019..INF , 2, 23 ],
["天皇誕生日", 1949..1988, 4, 29 ],
["みどりの日", 1989..2006, 4, 29 ],
["昭和の日", 2007..INF , 4, 29 ],
["憲法記念日", 1949..INF , 5, 3 ],
["みどりの日", 2007..INF , 5, 4 ],
["こどもの日", 1949..INF , 5, 5 ],
["海の日", 1996..2002, 7, 20 ],
["海の日", 2003..INF , 7, WEEK3, MON ],
["山の日", 2016..INF , 8, 11 ],
["敬老の日", 1966..2002, 9, 15 ],
["敬老の日", 2003..INF , 9, WEEK3, MON ],
["体育の日", 1966..1999, 10, 10 ],
["体育の日", 2000..INF , 10, WEEK2, MON ],
["文化の日", 1948..INF , 11, 3 ],
["勤労感謝の日",1948..INF , 11, 23 ],
["天皇誕生日", 1989..2018, 12, 23 ],
["春分の日", 1949..1979, 3,
proc{|y|Integer(20.8357+0.242194*(y-1980))-Integer((y-1983)/4.0)} ],
["春分の日", 1980..2099, 3,
proc{|y|Integer(20.8431+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["春分の日", 2100..2150, 3,
proc{|y|Integer(21.8510+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["秋分の日" , 1948..1979, 9,
proc{|y|Integer(23.2588+0.242194*(y-1980))-Integer((y-1983)/4.0)} ],
["秋分の日" , 1980..2099, 9,
proc{|y|Integer(23.2488+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["秋分の日" , 2100..2150, 9,
proc{|y|Integer(24.2488+0.242194*(y-1980))-Integer((y-1980)/4.0)} ],
["皇太子明仁親王の結婚の儀", 1959..1959, 4, 10 ],
["昭和天皇の大喪の礼", 1989..1989, 2, 24 ],
["即位礼正殿の儀", 1990..1990, 11, 12 ],
["皇太子徳仁親王の結婚の儀", 1993..1993, 6, 9 ]
]
DATA.each{|x| x[0].freeze; x.freeze }
DATA.freeze
TABLE = {}
FURIKAE_START = Date.new(1973,4,12).freeze
module_function
def holiday_date(year,data)
year_range,mon,day,wday = data[1..4]
if year_range === year
case day
when Integer
if wday
wday0 = Date.new(year,mon,day).wday
Date.new( year, mon, day+(wday-wday0+7)%7 )
else
Date.new( year, mon, day )
end
when Proc
Date.new( year, mon, day.call(year) )
end
end
end
def create_table(y)
h={}
a=[]
# list holidays
DATA.each do |x|
if d = holiday_date(y,x)
h[d] = x[0]
a << d
end
end
# compensating holiday
if y >= 2007
a.each do |d|
if d.wday==SUN
d+=1 while h[d]
h[d] = "振替休日"
end
end
elsif y >= 1973
a.each do |d|
if d.wday==SUN and d>=FURIKAE_START
h[d+1] = "振替休日"
end
end
end
# consecutive holiday
if y >= 1986
a.each do |d|
if h[d+2] and !h[d+1] and d.wday!=SAT
h[d+1] = "国民の休日"
end
end
end
h.freeze
end
def name(date)
y = date.year
(TABLE[y] ||= create_table(y))[date]
end
def check(date)
!HolidayJapan.name(date).nil?
end
def list_year(year)
year = Integer(year)
TABLE[year] ||= create_table(year)
TABLE[year].sort_by{|x| x[0]}
end
def hash_year(year)
TABLE[year] ||= create_table(year)
end
def between(from_date,to_date)
if from_date > to_date
raise ArgumentError, "to_date is earlier than from_date"
end
y1 = from_date.year
y2 = to_date.year
if y1 == y2
result = hash_year(y1).select{|d,n| d >= from_date && d <= to_date}
else
result = hash_year(y1).select{|d,n| d >= from_date}
y = y1 + 1
while y < y2
result.merge!(hash_year(y))
y += 1
end
hash_year(y).each{|d,n| result[d]=n if d <= to_date}
end
result
end
def _print_year(year)
puts "listing year #{year}..."
list_year(year).each do |y|
puts "#{y[0].strftime('%Y-%m-%d %a')} #{y[1]}"
end
end
def print_year(year)
case year
when Range
year.each do |y|
_print_year(y)
end
else
_print_year(year)
end
end
end
# compatible with Funaba-san's holiday.rb
class Date
def national_holiday?
HolidayJapan.name(self) ? true : false
end
end
# command line
if __FILE__ == $0
# print holiday list of the year
begin
arg = eval(ARGV[0])
rescue
raise ArgumentError,"invalid argument : #{ARGV[0].inspect}
usage:
ruby holiday_japan.rb year"
end
HolidayJapan.print_year(arg)
end
|
module RushAnalyticsApi
class Request
URL = 'https://api.rush-analytics.ru/api.php?wsdl'
def initialize(action, api_key = nil, message = {})
@action = action
@api_key = api_key || ENV['rush_api_key']
@message = { 'hash' => @api_key }.merge(message)
end
def call
@client = Savon.client(wsdl: URL, log_level: :debug, pretty_print_xml: true, log: true)
@client.call(@action, message: @message)
end
end
end
Пробуем не падать с OpenSSL::SSL::SSLError: hostname "api.rush-analytics.ru" does not match the server certificate
module RushAnalyticsApi
class Request
URL = 'https://api.rush-analytics.ru/api.php?wsdl'
def initialize(action, api_key = nil, message = {})
@action = action
@api_key = api_key || ENV['rush_api_key']
@message = { 'hash' => @api_key }.merge(message)
end
def call
@client = Savon.client(wsdl: URL, log_level: :debug, pretty_print_xml: true, log: true, ssl_verify_mode: :none)
@client.call(@action, message: @message)
end
end
end
|
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/light_service/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Attila Domokos"]
gem.email = ["adomokos@gmail.com"]
gem.description = %q{TODO: Write a gem description}
gem.summary = %q{TODO: Write a gem summary}
gem.homepage = ""
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "light_service"
gem.require_paths = ["lib"]
gem.version = LightService::VERSION
end
Adding description
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/light_service/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Attila Domokos"]
gem.email = ["adomokos@gmail.com"]
gem.description = %q{A service skeleton with an emphasis on simplicity}
gem.summary = %q{A service skeleton with an emphasis on simplicity}
gem.description = %q{TODO: Write a gem description}
gem.summary = %q{TODO: Write a gem summary}
gem.homepage = ""
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "light_service"
gem.require_paths = ["lib"]
gem.version = LightService::VERSION
end
|
Sequel.require 'adapters/utils/emulate_offset_with_row_number'
module Sequel
module DB2
@use_clob_as_blob = false
class << self
# Whether to use clob as the generic File type, true by default.
attr_accessor :use_clob_as_blob
end
module DatabaseMethods
extend Sequel::Database::ResetIdentifierMangling
AUTOINCREMENT = 'GENERATED ALWAYS AS IDENTITY'.freeze
NOT_NULL = ' NOT NULL'.freeze
NULL = ''.freeze
# DB2 always uses :db2 as it's database type
def database_type
:db2
end
# Return the database version as a string. Don't rely on this,
# it may return an integer in the future.
def db2_version
return @db2_version if @db2_version
@db2_version = metadata_dataset.with_sql("select service_level from sysibmadm.env_inst_info").first[:service_level]
end
alias_method :server_version, :db2_version
# Use SYSIBM.SYSCOLUMNS to get the information on the tables.
def schema_parse_table(table, opts = OPTS)
m = output_identifier_meth(opts[:dataset])
im = input_identifier_meth(opts[:dataset])
metadata_dataset.with_sql("SELECT * FROM SYSIBM.SYSCOLUMNS WHERE TBNAME = #{literal(im.call(table))} ORDER BY COLNO").
collect do |column|
column[:db_type] = column.delete(:typename)
if column[:db_type] == "DECIMAL"
column[:db_type] << "(#{column[:longlength]},#{column[:scale]})"
end
column[:allow_null] = column.delete(:nulls) == 'Y'
column[:primary_key] = column.delete(:identity) == 'Y' || !column[:keyseq].nil?
column[:type] = schema_column_type(column[:db_type])
[ m.call(column.delete(:name)), column]
end
end
# Use SYSCAT.TABLES to get the tables for the database
def tables
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='T' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.TABLES to get the views for the database
def views
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='V' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.INDEXES to get the indexes for the table
def indexes(table, opts = OPTS)
m = output_identifier_meth
indexes = {}
metadata_dataset.
from(:syscat__indexes).
select(:indname, :uniquerule, :colnames).
where(:tabname=>input_identifier_meth.call(table), :system_required=>0).
each do |r|
indexes[m.call(r[:indname])] = {:unique=>(r[:uniquerule]=='U'), :columns=>r[:colnames][1..-1].split('+').map{|v| m.call(v)}}
end
indexes
end
# DB2 supports transaction isolation levels.
def supports_transaction_isolation_levels?
true
end
private
# Handle DB2 specific alter table operations.
def alter_table_sql(table, op)
case op[:op]
when :add_column
if op[:primary_key] && op[:auto_increment] && op[:type] == Integer
[
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op.merge(:auto_increment=>false, :primary_key=>false, :default=>0, :null=>false))}",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} DROP DEFAULT",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} SET #{AUTOINCREMENT}"
]
else
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
end
when :drop_column
"ALTER TABLE #{quote_schema_table(table)} DROP #{column_definition_sql(op)}"
when :rename_column # renaming is only possible after db2 v9.7
"ALTER TABLE #{quote_schema_table(table)} RENAME COLUMN #{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name])}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DATA TYPE #{type_literal(op)}"
when :set_column_default
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DEFAULT #{literal(op[:default])}"
when :add_constraint
if op[:type] == :unique
sqls = op[:columns].map{|c| ["ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(c)} SET NOT NULL", reorg_sql(table)]}
sqls << super
sqls.flatten
else
super
end
else
super
end
end
# DB2 uses an identity column for autoincrement.
def auto_increment_sql
AUTOINCREMENT
end
# Add null/not null SQL fragment to column creation SQL.
def column_definition_null_sql(sql, column)
null = column.fetch(:null, column[:allow_null])
null = false if column[:primary_key]
sql << NOT_NULL if null == false
sql << NULL if null == true
end
# Supply columns with NOT NULL if they are part of a composite
# primary key or unique constraint
def column_list_sql(g)
ks = []
g.constraints.each{|c| ks = c[:columns] if [:primary_key, :unique].include?(c[:type])}
g.columns.each{|c| c[:null] = false if ks.include?(c[:name]) }
super
end
# Insert data from the current table into the new table after
# creating the table, since it is not possible to do it in one step.
def create_table_as(name, sql, options)
super
from(name).insert(sql.is_a?(Dataset) ? sql : dataset.with_sql(sql))
end
# DB2 requires parens around the SELECT, and DEFINITION ONLY at the end.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS (#{sql}) DEFINITION ONLY"
end
# Here we use DGTT which has most backward compatibility, which uses
# DECLARE instead of CREATE. CGTT can only be used after version 9.7.
# http://www.ibm.com/developerworks/data/library/techarticle/dm-0912globaltemptable/
def create_table_prefix_sql(name, options)
if options[:temp]
"DECLARE GLOBAL TEMPORARY TABLE #{quote_identifier(name)}"
else
super
end
end
DATABASE_ERROR_REGEXPS = {
/DB2 SQL Error: SQLCODE=-803, SQLSTATE=23505|One or more values in the INSERT statement, UPDATE statement, or foreign key update caused by a DELETE statement are not valid because the primary key, unique constraint or unique index/ => UniqueConstraintViolation,
/DB2 SQL Error: (SQLCODE=-530, SQLSTATE=23503|SQLCODE=-532, SQLSTATE=23504)|The insert or update value of the FOREIGN KEY .+ is not equal to any value of the parent key of the parent table|A parent row cannot be deleted because the relationship .+ restricts the deletion/ => ForeignKeyConstraintViolation,
/DB2 SQL Error: SQLCODE=-545, SQLSTATE=23513|The requested operation is not allowed because a row does not satisfy the check constraint/ => CheckConstraintViolation,
/DB2 SQL Error: SQLCODE=-407, SQLSTATE=23502|Assignment of a NULL value to a NOT NULL column/ => NotNullConstraintViolation,
/DB2 SQL Error: SQLCODE=-911, SQLSTATE=40001|The current transaction has been rolled back because of a deadlock or timeout/ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# DB2 has issues with quoted identifiers, so
# turn off database quoting by default.
def quote_identifiers_default
false
end
# DB2 uses RENAME TABLE to rename tables.
def rename_table_sql(name, new_name)
"RENAME TABLE #{quote_schema_table(name)} TO #{quote_schema_table(new_name)}"
end
# Run the REORG TABLE command for the table, necessary when
# the table has been altered.
def reorg(table)
synchronize(opts[:server]){|c| c.execute(reorg_sql(table))}
end
# The SQL to use for REORGing a table.
def reorg_sql(table)
"CALL ADMIN_CMD(#{literal("REORG TABLE #{table}")})"
end
# Treat clob as blob if use_clob_as_blob is true
def schema_column_type(db_type)
(::Sequel::DB2::use_clob_as_blob && db_type.downcase == 'clob') ? :blob : super
end
# SQL to set the transaction isolation level
def set_transaction_isolation_sql(level)
"SET CURRENT ISOLATION #{Database::TRANSACTION_ISOLATION_LEVELS[level]}"
end
# We uses the clob type by default for Files.
# Note: if user select to use blob, then insert statement should use
# use this for blob value:
# cast(X'fffefdfcfbfa' as blob(2G))
def type_literal_generic_file(column)
::Sequel::DB2::use_clob_as_blob ? :clob : :blob
end
# DB2 uses smallint to store booleans.
def type_literal_generic_trueclass(column)
:smallint
end
alias type_literal_generic_falseclass type_literal_generic_trueclass
# DB2 uses clob for text types.
def uses_clob_for_text?
true
end
# DB2 supports views with check option.
def view_with_check_option_support
:local
end
end
module DatasetMethods
include EmulateOffsetWithRowNumber
PAREN_CLOSE = Dataset::PAREN_CLOSE
PAREN_OPEN = Dataset::PAREN_OPEN
BITWISE_METHOD_MAP = {:& =>:BITAND, :| => :BITOR, :^ => :BITXOR, :'B~'=>:BITNOT}
EMULATED_FUNCTION_MAP = {:char_length=>'length'.freeze}
BOOL_TRUE = '1'.freeze
BOOL_FALSE = '0'.freeze
CAST_STRING_OPEN = "RTRIM(CHAR(".freeze
CAST_STRING_CLOSE = "))".freeze
FETCH_FIRST_ROW_ONLY = " FETCH FIRST ROW ONLY".freeze
FETCH_FIRST = " FETCH FIRST ".freeze
ROWS_ONLY = " ROWS ONLY".freeze
EMPTY_FROM_TABLE = ' FROM "SYSIBM"."SYSDUMMY1"'.freeze
HSTAR = "H*".freeze
BLOB_OPEN = "BLOB(X'".freeze
BLOB_CLOSE = "')".freeze
# DB2 casts strings using RTRIM and CHAR instead of VARCHAR.
def cast_sql_append(sql, expr, type)
if(type == String)
sql << CAST_STRING_OPEN
literal_append(sql, expr)
sql << CAST_STRING_CLOSE
else
super
end
end
def complex_expression_sql_append(sql, op, args)
case op
when :&, :|, :^, :%, :<<, :>>
complex_expression_emulate_append(sql, op, args)
when :'B~'
literal_append(sql, SQL::Function.new(:BITNOT, *args))
when :extract
sql << args.at(0).to_s
sql << PAREN_OPEN
literal_append(sql, args.at(1))
sql << PAREN_CLOSE
else
super
end
end
def supports_cte?(type=:select)
type == :select
end
# DB2 supports GROUP BY CUBE
def supports_group_cube?
true
end
# DB2 supports GROUP BY ROLLUP
def supports_group_rollup?
true
end
# DB2 does not support IS TRUE.
def supports_is_true?
false
end
# DB2 supports lateral subqueries
def supports_lateral_subqueries?
true
end
# DB2 does not support multiple columns in IN.
def supports_multiple_column_in?
false
end
# DB2 only allows * in SELECT if it is the only thing being selected.
def supports_select_all_and_column?
false
end
# DB2 does not support fractional seconds in timestamps.
def supports_timestamp_usecs?
false
end
# DB2 supports window functions
def supports_window_functions?
true
end
# DB2 does not support WHERE 1.
def supports_where_true?
false
end
private
def empty_from_sql
EMPTY_FROM_TABLE
end
# DB2 needs the standard workaround to insert all default values into
# a table with more than one column.
def insert_supports_empty_values?
false
end
# Use 0 for false on DB2
def literal_false
BOOL_FALSE
end
# Use 1 for true on DB2
def literal_true
BOOL_TRUE
end
# DB2 uses a literal hexidecimal number for blob strings
def literal_blob_append(sql, v)
if ::Sequel::DB2.use_clob_as_blob
super
else
sql << BLOB_OPEN << v.unpack(HSTAR).first << BLOB_CLOSE
end
end
# DB2 can insert multiple rows using a UNION
def multi_insert_sql_strategy
:union
end
# DB2 does not require that ROW_NUMBER be ordered.
def require_offset_order?
false
end
# Modify the sql to limit the number of rows returned
# Note:
#
# After db2 v9.7, MySQL flavored "LIMIT X OFFSET Y" can be enabled using
#
# db2set DB2_COMPATIBILITY_VECTOR=MYSQL
# db2stop
# db2start
#
# Support for this feature is not used in this adapter however.
def select_limit_sql(sql)
if l = @opts[:limit]
if l == 1
sql << FETCH_FIRST_ROW_ONLY
else
sql << FETCH_FIRST
literal_append(sql, l)
sql << ROWS_ONLY
end
end
end
# DB2 supports quoted function names.
def supports_quoted_function_names?
true
end
def _truncate_sql(table)
# "TRUNCATE #{table} IMMEDIATE" is only for newer version of db2, so we
# use the following one
"ALTER TABLE #{quote_schema_table(table)} ACTIVATE NOT LOGGED INITIALLY WITH EMPTY TABLE"
end
end
end
end
Add max_length schema parsing support on DB2
Sequel.require 'adapters/utils/emulate_offset_with_row_number'
module Sequel
module DB2
@use_clob_as_blob = false
class << self
# Whether to use clob as the generic File type, true by default.
attr_accessor :use_clob_as_blob
end
module DatabaseMethods
extend Sequel::Database::ResetIdentifierMangling
AUTOINCREMENT = 'GENERATED ALWAYS AS IDENTITY'.freeze
NOT_NULL = ' NOT NULL'.freeze
NULL = ''.freeze
# DB2 always uses :db2 as it's database type
def database_type
:db2
end
# Return the database version as a string. Don't rely on this,
# it may return an integer in the future.
def db2_version
return @db2_version if @db2_version
@db2_version = metadata_dataset.with_sql("select service_level from sysibmadm.env_inst_info").first[:service_level]
end
alias_method :server_version, :db2_version
# Use SYSIBM.SYSCOLUMNS to get the information on the tables.
def schema_parse_table(table, opts = OPTS)
m = output_identifier_meth(opts[:dataset])
im = input_identifier_meth(opts[:dataset])
metadata_dataset.with_sql("SELECT * FROM SYSIBM.SYSCOLUMNS WHERE TBNAME = #{literal(im.call(table))} ORDER BY COLNO").
collect do |column|
column[:db_type] = column.delete(:typename)
if column[:db_type] == "DECIMAL"
column[:db_type] << "(#{column[:longlength]},#{column[:scale]})"
end
column[:allow_null] = column.delete(:nulls) == 'Y'
column[:primary_key] = column.delete(:identity) == 'Y' || !column[:keyseq].nil?
column[:type] = schema_column_type(column[:db_type])
column[:max_length] = column[:longlength] if column[:type] == :string
[ m.call(column.delete(:name)), column]
end
end
# Use SYSCAT.TABLES to get the tables for the database
def tables
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='T' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.TABLES to get the views for the database
def views
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='V' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.INDEXES to get the indexes for the table
def indexes(table, opts = OPTS)
m = output_identifier_meth
indexes = {}
metadata_dataset.
from(:syscat__indexes).
select(:indname, :uniquerule, :colnames).
where(:tabname=>input_identifier_meth.call(table), :system_required=>0).
each do |r|
indexes[m.call(r[:indname])] = {:unique=>(r[:uniquerule]=='U'), :columns=>r[:colnames][1..-1].split('+').map{|v| m.call(v)}}
end
indexes
end
# DB2 supports transaction isolation levels.
def supports_transaction_isolation_levels?
true
end
private
# Handle DB2 specific alter table operations.
def alter_table_sql(table, op)
case op[:op]
when :add_column
if op[:primary_key] && op[:auto_increment] && op[:type] == Integer
[
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op.merge(:auto_increment=>false, :primary_key=>false, :default=>0, :null=>false))}",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} DROP DEFAULT",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} SET #{AUTOINCREMENT}"
]
else
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
end
when :drop_column
"ALTER TABLE #{quote_schema_table(table)} DROP #{column_definition_sql(op)}"
when :rename_column # renaming is only possible after db2 v9.7
"ALTER TABLE #{quote_schema_table(table)} RENAME COLUMN #{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name])}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DATA TYPE #{type_literal(op)}"
when :set_column_default
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DEFAULT #{literal(op[:default])}"
when :add_constraint
if op[:type] == :unique
sqls = op[:columns].map{|c| ["ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(c)} SET NOT NULL", reorg_sql(table)]}
sqls << super
sqls.flatten
else
super
end
else
super
end
end
# DB2 uses an identity column for autoincrement.
def auto_increment_sql
AUTOINCREMENT
end
# Add null/not null SQL fragment to column creation SQL.
def column_definition_null_sql(sql, column)
null = column.fetch(:null, column[:allow_null])
null = false if column[:primary_key]
sql << NOT_NULL if null == false
sql << NULL if null == true
end
# Supply columns with NOT NULL if they are part of a composite
# primary key or unique constraint
def column_list_sql(g)
ks = []
g.constraints.each{|c| ks = c[:columns] if [:primary_key, :unique].include?(c[:type])}
g.columns.each{|c| c[:null] = false if ks.include?(c[:name]) }
super
end
# Insert data from the current table into the new table after
# creating the table, since it is not possible to do it in one step.
def create_table_as(name, sql, options)
super
from(name).insert(sql.is_a?(Dataset) ? sql : dataset.with_sql(sql))
end
# DB2 requires parens around the SELECT, and DEFINITION ONLY at the end.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS (#{sql}) DEFINITION ONLY"
end
# Here we use DGTT which has most backward compatibility, which uses
# DECLARE instead of CREATE. CGTT can only be used after version 9.7.
# http://www.ibm.com/developerworks/data/library/techarticle/dm-0912globaltemptable/
def create_table_prefix_sql(name, options)
if options[:temp]
"DECLARE GLOBAL TEMPORARY TABLE #{quote_identifier(name)}"
else
super
end
end
DATABASE_ERROR_REGEXPS = {
/DB2 SQL Error: SQLCODE=-803, SQLSTATE=23505|One or more values in the INSERT statement, UPDATE statement, or foreign key update caused by a DELETE statement are not valid because the primary key, unique constraint or unique index/ => UniqueConstraintViolation,
/DB2 SQL Error: (SQLCODE=-530, SQLSTATE=23503|SQLCODE=-532, SQLSTATE=23504)|The insert or update value of the FOREIGN KEY .+ is not equal to any value of the parent key of the parent table|A parent row cannot be deleted because the relationship .+ restricts the deletion/ => ForeignKeyConstraintViolation,
/DB2 SQL Error: SQLCODE=-545, SQLSTATE=23513|The requested operation is not allowed because a row does not satisfy the check constraint/ => CheckConstraintViolation,
/DB2 SQL Error: SQLCODE=-407, SQLSTATE=23502|Assignment of a NULL value to a NOT NULL column/ => NotNullConstraintViolation,
/DB2 SQL Error: SQLCODE=-911, SQLSTATE=40001|The current transaction has been rolled back because of a deadlock or timeout/ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# DB2 has issues with quoted identifiers, so
# turn off database quoting by default.
def quote_identifiers_default
false
end
# DB2 uses RENAME TABLE to rename tables.
def rename_table_sql(name, new_name)
"RENAME TABLE #{quote_schema_table(name)} TO #{quote_schema_table(new_name)}"
end
# Run the REORG TABLE command for the table, necessary when
# the table has been altered.
def reorg(table)
synchronize(opts[:server]){|c| c.execute(reorg_sql(table))}
end
# The SQL to use for REORGing a table.
def reorg_sql(table)
"CALL ADMIN_CMD(#{literal("REORG TABLE #{table}")})"
end
# Treat clob as blob if use_clob_as_blob is true
def schema_column_type(db_type)
(::Sequel::DB2::use_clob_as_blob && db_type.downcase == 'clob') ? :blob : super
end
# SQL to set the transaction isolation level
def set_transaction_isolation_sql(level)
"SET CURRENT ISOLATION #{Database::TRANSACTION_ISOLATION_LEVELS[level]}"
end
# We uses the clob type by default for Files.
# Note: if user select to use blob, then insert statement should use
# use this for blob value:
# cast(X'fffefdfcfbfa' as blob(2G))
def type_literal_generic_file(column)
::Sequel::DB2::use_clob_as_blob ? :clob : :blob
end
# DB2 uses smallint to store booleans.
def type_literal_generic_trueclass(column)
:smallint
end
alias type_literal_generic_falseclass type_literal_generic_trueclass
# DB2 uses clob for text types.
def uses_clob_for_text?
true
end
# DB2 supports views with check option.
def view_with_check_option_support
:local
end
end
module DatasetMethods
include EmulateOffsetWithRowNumber
PAREN_CLOSE = Dataset::PAREN_CLOSE
PAREN_OPEN = Dataset::PAREN_OPEN
BITWISE_METHOD_MAP = {:& =>:BITAND, :| => :BITOR, :^ => :BITXOR, :'B~'=>:BITNOT}
EMULATED_FUNCTION_MAP = {:char_length=>'length'.freeze}
BOOL_TRUE = '1'.freeze
BOOL_FALSE = '0'.freeze
CAST_STRING_OPEN = "RTRIM(CHAR(".freeze
CAST_STRING_CLOSE = "))".freeze
FETCH_FIRST_ROW_ONLY = " FETCH FIRST ROW ONLY".freeze
FETCH_FIRST = " FETCH FIRST ".freeze
ROWS_ONLY = " ROWS ONLY".freeze
EMPTY_FROM_TABLE = ' FROM "SYSIBM"."SYSDUMMY1"'.freeze
HSTAR = "H*".freeze
BLOB_OPEN = "BLOB(X'".freeze
BLOB_CLOSE = "')".freeze
# DB2 casts strings using RTRIM and CHAR instead of VARCHAR.
def cast_sql_append(sql, expr, type)
if(type == String)
sql << CAST_STRING_OPEN
literal_append(sql, expr)
sql << CAST_STRING_CLOSE
else
super
end
end
def complex_expression_sql_append(sql, op, args)
case op
when :&, :|, :^, :%, :<<, :>>
complex_expression_emulate_append(sql, op, args)
when :'B~'
literal_append(sql, SQL::Function.new(:BITNOT, *args))
when :extract
sql << args.at(0).to_s
sql << PAREN_OPEN
literal_append(sql, args.at(1))
sql << PAREN_CLOSE
else
super
end
end
def supports_cte?(type=:select)
type == :select
end
# DB2 supports GROUP BY CUBE
def supports_group_cube?
true
end
# DB2 supports GROUP BY ROLLUP
def supports_group_rollup?
true
end
# DB2 does not support IS TRUE.
def supports_is_true?
false
end
# DB2 supports lateral subqueries
def supports_lateral_subqueries?
true
end
# DB2 does not support multiple columns in IN.
def supports_multiple_column_in?
false
end
# DB2 only allows * in SELECT if it is the only thing being selected.
def supports_select_all_and_column?
false
end
# DB2 does not support fractional seconds in timestamps.
def supports_timestamp_usecs?
false
end
# DB2 supports window functions
def supports_window_functions?
true
end
# DB2 does not support WHERE 1.
def supports_where_true?
false
end
private
def empty_from_sql
EMPTY_FROM_TABLE
end
# DB2 needs the standard workaround to insert all default values into
# a table with more than one column.
def insert_supports_empty_values?
false
end
# Use 0 for false on DB2
def literal_false
BOOL_FALSE
end
# Use 1 for true on DB2
def literal_true
BOOL_TRUE
end
# DB2 uses a literal hexidecimal number for blob strings
def literal_blob_append(sql, v)
if ::Sequel::DB2.use_clob_as_blob
super
else
sql << BLOB_OPEN << v.unpack(HSTAR).first << BLOB_CLOSE
end
end
# DB2 can insert multiple rows using a UNION
def multi_insert_sql_strategy
:union
end
# DB2 does not require that ROW_NUMBER be ordered.
def require_offset_order?
false
end
# Modify the sql to limit the number of rows returned
# Note:
#
# After db2 v9.7, MySQL flavored "LIMIT X OFFSET Y" can be enabled using
#
# db2set DB2_COMPATIBILITY_VECTOR=MYSQL
# db2stop
# db2start
#
# Support for this feature is not used in this adapter however.
def select_limit_sql(sql)
if l = @opts[:limit]
if l == 1
sql << FETCH_FIRST_ROW_ONLY
else
sql << FETCH_FIRST
literal_append(sql, l)
sql << ROWS_ONLY
end
end
end
# DB2 supports quoted function names.
def supports_quoted_function_names?
true
end
def _truncate_sql(table)
# "TRUNCATE #{table} IMMEDIATE" is only for newer version of db2, so we
# use the following one
"ALTER TABLE #{quote_schema_table(table)} ACTIVATE NOT LOGGED INITIALLY WITH EMPTY TABLE"
end
end
end
end
|
module HowIs
class UnsupportedExportFormat < StandardError
def initialize(format)
super("Unsupported export format: #{format}")
end
end
##
# Represents a completed report.
class BaseReport < Struct.new(:analysis, :file)
def to_h
analysis.to_h
end
alias :to_hash :to_h
def to_json
to_h.to_json
end
private
def issue_or_pr_summary(type, type_label)
oldest_date_format = "%b %e, %Y"
a = analysis
number_of_type = a.send("number_of_#{type}s")
"There are #{number_of_type} #{type_label}s open. " +
"The average #{type_label} age is #{a.send("average_#{type}_age")}, and the " +
"oldest was opened on #{a.send("oldest_#{type}_date").strftime(oldest_date_format)}."
end
end
class Report
require 'how_is/report/pdf'
require 'how_is/report/json'
def self.export!(analysis, file)
extension = file.split('.').last
class_name = "#{extension.capitalize}Report"
raise UnsupportedExportFormat, extension unless HowIs.const_defined?(class_name)
report = HowIs.const_get(class_name).new(analysis, file)
report.export! {
title "How is #{analysis.repository}?"
header "Pull Requests"
text issue_or_pr_summary "pull", "pull request"
header "Issues"
text issue_or_pr_summary "issue", "issue"
header "Issues Per Label"
issues_per_label = analysis.issues_with_label.to_a.sort_by { |(k, v)| v.to_i }.reverse
horizontal_bar_graph issues_per_label
}
report
end
end
end
Add issues with no label to PDF report.
module HowIs
class UnsupportedExportFormat < StandardError
def initialize(format)
super("Unsupported export format: #{format}")
end
end
##
# Represents a completed report.
class BaseReport < Struct.new(:analysis, :file)
def to_h
analysis.to_h
end
alias :to_hash :to_h
def to_json
to_h.to_json
end
private
def issue_or_pr_summary(type, type_label)
oldest_date_format = "%b %e, %Y"
a = analysis
number_of_type = a.send("number_of_#{type}s")
"There are #{number_of_type} #{type_label}s open. " +
"The average #{type_label} age is #{a.send("average_#{type}_age")}, and the " +
"oldest was opened on #{a.send("oldest_#{type}_date").strftime(oldest_date_format)}."
end
end
class Report
require 'how_is/report/pdf'
require 'how_is/report/json'
def self.export!(analysis, file)
extension = file.split('.').last
class_name = "#{extension.capitalize}Report"
raise UnsupportedExportFormat, extension unless HowIs.const_defined?(class_name)
report = HowIs.const_get(class_name).new(analysis, file)
report.export! {
title "How is #{analysis.repository}?"
header "Pull Requests"
text issue_or_pr_summary "pull", "pull request"
header "Issues"
text issue_or_pr_summary "issue", "issue"
header "Issues Per Label"
issues_per_label = analysis.issues_with_label.to_a.sort_by { |(k, v)| v.to_i }.reverse
issues_per_label << ["(No label)", analysis.issues_with_no_label]
horizontal_bar_graph issues_per_label
}
report
end
end
end
|
module Sequel
class Database
# ---------------------
# :section: Methods relating to adapters, connecting, disconnecting, and sharding.
# This methods involve the Database's connection pool.
# ---------------------
# Array of supported database adapters
ADAPTERS = %w'ado amalgalite db2 dbi do firebird informix jdbc mysql odbc openbase oracle postgres sqlite'.collect{|x| x.to_sym}
# Whether to use the single threaded connection pool by default
@@single_threaded = false
# The Database subclass for the given adapter scheme.
# Raises Sequel::AdapterNotFound if the adapter
# could not be loaded.
def self.adapter_class(scheme)
scheme = scheme.to_s.gsub('-', '_').to_sym
unless klass = ADAPTER_MAP[scheme]
# attempt to load the adapter file
begin
Sequel.tsk_require "sequel/adapters/#{scheme}"
rescue LoadError => e
raise Sequel.convert_exception_class(e, AdapterNotFound)
end
# make sure we actually loaded the adapter
unless klass = ADAPTER_MAP[scheme]
raise AdapterNotFound, "Could not load #{scheme} adapter"
end
end
klass
end
# Returns the scheme for the Database class.
def self.adapter_scheme
@scheme
end
# Connects to a database. See Sequel.connect.
def self.connect(conn_string, opts = {})
case conn_string
when String
if match = /\A(jdbc|do):/o.match(conn_string)
c = adapter_class(match[1].to_sym)
opts = {:uri=>conn_string}.merge(opts)
else
uri = URI.parse(conn_string)
scheme = uri.scheme
scheme = :dbi if scheme =~ /\Adbi-/
c = adapter_class(scheme)
uri_options = c.send(:uri_to_options, uri)
uri.query.split('&').collect{|s| s.split('=')}.each{|k,v| uri_options[k.to_sym] = v if k && !k.empty?} unless uri.query.to_s.strip.empty?
uri_options.entries.each{|k,v| uri_options[k] = URI.unescape(v) if v.is_a?(String)}
opts = uri_options.merge(opts)
end
when Hash
opts = conn_string.merge(opts)
c = adapter_class(opts[:adapter] || opts['adapter'])
else
raise Error, "Sequel::Database.connect takes either a Hash or a String, given: #{conn_string.inspect}"
end
# process opts a bit
opts = opts.inject({}) do |m, kv| k, v = *kv
k = :user if k.to_s == 'username'
m[k.to_sym] = v
m
end
begin
db = c.new(opts)
db.test_connection if opts[:test] && db.send(:typecast_value_boolean, opts[:test])
result = yield(db) if block_given?
ensure
if block_given?
db.disconnect if db
::Sequel::DATABASES.delete(db)
end
end
block_given? ? result : db
end
# Sets the default single_threaded mode for new databases.
# See Sequel.single_threaded=.
def self.single_threaded=(value)
@@single_threaded = value
end
# Sets the adapter scheme for the Database class. Call this method in
# descendants of Database to allow connection using a URL. For example the
# following:
#
# class Sequel::MyDB::Database < Sequel::Database
# set_adapter_scheme :mydb
# ...
# end
#
# would allow connection using:
#
# Sequel.connect('mydb://user:password@dbserver/mydb')
def self.set_adapter_scheme(scheme) # :nodoc:
@scheme = scheme
ADAPTER_MAP[scheme.to_sym] = self
end
private_class_method :set_adapter_scheme
# The connection pool for this database
attr_reader :pool
# Dynamically add new servers or modify server options at runtime. Also adds new
# servers to the connection pool. Intended for use with master/slave or shard
# configurations where it is useful to add new server hosts at runtime.
#
# servers argument should be a hash with server name symbol keys and hash or
# proc values. If a servers key is already in use, it's value is overridden
# with the value provided.
#
# DB.add_servers(:f=>{:host=>"hash_host_f"})
def add_servers(servers)
@opts[:servers] = @opts[:servers] ? @opts[:servers].merge(servers) : servers
@pool.add_servers(servers.keys)
end
# Connects to the database. This method should be overridden by descendants.
def connect(server)
raise NotImplementedError, "#connect should be overridden by adapters"
end
# The database type for this database object, the same as the adapter scheme
# by default. Should be overridden in adapters (especially shared adapters)
# to be the correct type, so that even if two separate Database objects are
# using different adapters you can tell that they are using the same database
# type. Even better, you can tell that two Database objects that are using
# the same adapter are connecting to different database types (think JDBC or
# DataObjects).
def database_type
self.class.adapter_scheme
end
# Disconnects all available connections from the connection pool. Any
# connections currently in use will not be disconnected. Options:
# * :servers - Should be a symbol specifing the server to disconnect from,
# or an array of symbols to specify multiple servers.
def disconnect(opts = {})
pool.disconnect(opts)
end
# Yield a new database object for every server in the connection pool.
# Intended for use in sharded environments where there is a need to make schema
# modifications (DDL queries) on each shard.
#
# DB.each_server{|db| db.create_table(:users){primary_key :id; String :name}}
def each_server(&block)
servers.each{|s| self.class.connect(server_opts(s), &block)}
end
# Dynamically remove existing servers from the connection pool. Intended for
# use with master/slave or shard configurations where it is useful to remove
# existing server hosts at runtime.
#
# servers should be symbols or arrays of symbols. If a nonexistent server
# is specified, it is ignored. If no servers have been specified for
# this database, no changes are made. If you attempt to remove the :default server,
# an error will be raised.
#
# DB.remove_servers(:f1, :f2)
def remove_servers(*servers)
if @opts[:servers] && !@opts[:servers].empty?
servs = @opts[:servers].dup
servers.flatten!
servers.each{|s| servs.delete(s)}
@opts[:servers] = servs
@pool.remove_servers(servers)
end
end
# An array of servers/shards for this Database object.
def servers
pool.servers
end
# Returns true if the database is using a single-threaded connection pool.
def single_threaded?
@single_threaded
end
# Acquires a database connection, yielding it to the passed block.
def synchronize(server=nil, &block)
@pool.hold(server || :default, &block)
end
# Attempts to acquire a database connection. Returns true if successful.
# Will probably raise an error if unsuccessful.
def test_connection(server=nil)
synchronize(server){|conn|}
true
end
private
# The default options for the connection pool.
def connection_pool_default_options
{}
end
# Return the options for the given server by merging the generic
# options for all server with the specific options for the given
# server specified in the :servers option.
def server_opts(server)
opts = if @opts[:servers] && server_options = @opts[:servers][server]
case server_options
when Hash
@opts.merge(server_options)
when Proc
@opts.merge(server_options.call(self))
else
raise Error, 'Server opts should be a hash or proc'
end
else
@opts.dup
end
opts.delete(:servers)
opts
end
end
end
Remove extra .
module Sequel
class Database
# ---------------------
# :section: Methods relating to adapters, connecting, disconnecting, and sharding
# This methods involve the Database's connection pool.
# ---------------------
# Array of supported database adapters
ADAPTERS = %w'ado amalgalite db2 dbi do firebird informix jdbc mysql odbc openbase oracle postgres sqlite'.collect{|x| x.to_sym}
# Whether to use the single threaded connection pool by default
@@single_threaded = false
# The Database subclass for the given adapter scheme.
# Raises Sequel::AdapterNotFound if the adapter
# could not be loaded.
def self.adapter_class(scheme)
scheme = scheme.to_s.gsub('-', '_').to_sym
unless klass = ADAPTER_MAP[scheme]
# attempt to load the adapter file
begin
Sequel.tsk_require "sequel/adapters/#{scheme}"
rescue LoadError => e
raise Sequel.convert_exception_class(e, AdapterNotFound)
end
# make sure we actually loaded the adapter
unless klass = ADAPTER_MAP[scheme]
raise AdapterNotFound, "Could not load #{scheme} adapter"
end
end
klass
end
# Returns the scheme for the Database class.
def self.adapter_scheme
@scheme
end
# Connects to a database. See Sequel.connect.
def self.connect(conn_string, opts = {})
case conn_string
when String
if match = /\A(jdbc|do):/o.match(conn_string)
c = adapter_class(match[1].to_sym)
opts = {:uri=>conn_string}.merge(opts)
else
uri = URI.parse(conn_string)
scheme = uri.scheme
scheme = :dbi if scheme =~ /\Adbi-/
c = adapter_class(scheme)
uri_options = c.send(:uri_to_options, uri)
uri.query.split('&').collect{|s| s.split('=')}.each{|k,v| uri_options[k.to_sym] = v if k && !k.empty?} unless uri.query.to_s.strip.empty?
uri_options.entries.each{|k,v| uri_options[k] = URI.unescape(v) if v.is_a?(String)}
opts = uri_options.merge(opts)
end
when Hash
opts = conn_string.merge(opts)
c = adapter_class(opts[:adapter] || opts['adapter'])
else
raise Error, "Sequel::Database.connect takes either a Hash or a String, given: #{conn_string.inspect}"
end
# process opts a bit
opts = opts.inject({}) do |m, kv| k, v = *kv
k = :user if k.to_s == 'username'
m[k.to_sym] = v
m
end
begin
db = c.new(opts)
db.test_connection if opts[:test] && db.send(:typecast_value_boolean, opts[:test])
result = yield(db) if block_given?
ensure
if block_given?
db.disconnect if db
::Sequel::DATABASES.delete(db)
end
end
block_given? ? result : db
end
# Sets the default single_threaded mode for new databases.
# See Sequel.single_threaded=.
def self.single_threaded=(value)
@@single_threaded = value
end
# Sets the adapter scheme for the Database class. Call this method in
# descendants of Database to allow connection using a URL. For example the
# following:
#
# class Sequel::MyDB::Database < Sequel::Database
# set_adapter_scheme :mydb
# ...
# end
#
# would allow connection using:
#
# Sequel.connect('mydb://user:password@dbserver/mydb')
def self.set_adapter_scheme(scheme) # :nodoc:
@scheme = scheme
ADAPTER_MAP[scheme.to_sym] = self
end
private_class_method :set_adapter_scheme
# The connection pool for this database
attr_reader :pool
# Dynamically add new servers or modify server options at runtime. Also adds new
# servers to the connection pool. Intended for use with master/slave or shard
# configurations where it is useful to add new server hosts at runtime.
#
# servers argument should be a hash with server name symbol keys and hash or
# proc values. If a servers key is already in use, it's value is overridden
# with the value provided.
#
# DB.add_servers(:f=>{:host=>"hash_host_f"})
def add_servers(servers)
@opts[:servers] = @opts[:servers] ? @opts[:servers].merge(servers) : servers
@pool.add_servers(servers.keys)
end
# Connects to the database. This method should be overridden by descendants.
def connect(server)
raise NotImplementedError, "#connect should be overridden by adapters"
end
# The database type for this database object, the same as the adapter scheme
# by default. Should be overridden in adapters (especially shared adapters)
# to be the correct type, so that even if two separate Database objects are
# using different adapters you can tell that they are using the same database
# type. Even better, you can tell that two Database objects that are using
# the same adapter are connecting to different database types (think JDBC or
# DataObjects).
def database_type
self.class.adapter_scheme
end
# Disconnects all available connections from the connection pool. Any
# connections currently in use will not be disconnected. Options:
# * :servers - Should be a symbol specifing the server to disconnect from,
# or an array of symbols to specify multiple servers.
def disconnect(opts = {})
pool.disconnect(opts)
end
# Yield a new database object for every server in the connection pool.
# Intended for use in sharded environments where there is a need to make schema
# modifications (DDL queries) on each shard.
#
# DB.each_server{|db| db.create_table(:users){primary_key :id; String :name}}
def each_server(&block)
servers.each{|s| self.class.connect(server_opts(s), &block)}
end
# Dynamically remove existing servers from the connection pool. Intended for
# use with master/slave or shard configurations where it is useful to remove
# existing server hosts at runtime.
#
# servers should be symbols or arrays of symbols. If a nonexistent server
# is specified, it is ignored. If no servers have been specified for
# this database, no changes are made. If you attempt to remove the :default server,
# an error will be raised.
#
# DB.remove_servers(:f1, :f2)
def remove_servers(*servers)
if @opts[:servers] && !@opts[:servers].empty?
servs = @opts[:servers].dup
servers.flatten!
servers.each{|s| servs.delete(s)}
@opts[:servers] = servs
@pool.remove_servers(servers)
end
end
# An array of servers/shards for this Database object.
def servers
pool.servers
end
# Returns true if the database is using a single-threaded connection pool.
def single_threaded?
@single_threaded
end
# Acquires a database connection, yielding it to the passed block.
def synchronize(server=nil, &block)
@pool.hold(server || :default, &block)
end
# Attempts to acquire a database connection. Returns true if successful.
# Will probably raise an error if unsuccessful.
def test_connection(server=nil)
synchronize(server){|conn|}
true
end
private
# The default options for the connection pool.
def connection_pool_default_options
{}
end
# Return the options for the given server by merging the generic
# options for all server with the specific options for the given
# server specified in the :servers option.
def server_opts(server)
opts = if @opts[:servers] && server_options = @opts[:servers][server]
case server_options
when Hash
@opts.merge(server_options)
when Proc
@opts.merge(server_options.call(self))
else
raise Error, 'Server opts should be a hash or proc'
end
else
@opts.dup
end
opts.delete(:servers)
opts
end
end
end
|
#!/usr/bin/ruby
# Minimal version of the api.hsmty.org web service
require 'sinatra'
require 'json'
require 'sequel'
# Load configuration scheme to access the database
load 'conf.rb'
get '/' do
"HSMTY API Web Service"
end
get '/status.json' do
status = get_status()
headers "Content-type" => "application/json"
status.to_json
end
post '/status' do
is_open = false
status = params[:status]
if status == "open"
is_open = true
elsif status == "close"
is_open = false
else
# Bad request
halt 400
end
dbh = get_dbh()
state = dbh[:status].reverse_order(:changed).get(:state)
if state != is_open
# Update the DB only if the state of the space has
# changed
dbh[:status].insert(
:state => is_open,
:changed => Time.now().to_i
)
end
# Return the string 'updated' to the client
return {:status => "Updated"}.to_json
end
def get_status()
dbh = get_dbh()
status_file = open("status.json")
status = JSON.parse(status_file.read)
if (status and status["state"]) then
status["state"]["open"] = dbh[:status].reverse_order(:changed).get(:state)
else
status = {}
end
if status["state"]["open"].nil? then
status["state"]["open"] = false
end
return status
end
def get_dbh()
if settings.db_engine == :sqlite
file = settings.db_file || "/tmp/hsmty.db"
dbh = Sequel.sqlite(file)
elsif settings.db_engine == :postgres
info = {
:host => settings.db_host || "localhost",
:database => settings.db_name || "api",
:user => settings.db_user || "postgres",
:password => settings.db_pass || nil,
}
dbh = Sequel.postgres(info)
else
dbh = nil
end
return dbh
end
Removed unessesary line in insert, datbase has now() as default
#!/usr/bin/ruby
# Minimal version of the api.hsmty.org web service
require 'sinatra'
require 'json'
require 'sequel'
# Load configuration scheme to access the database
load 'conf.rb'
get '/' do
"HSMTY API Web Service"
end
get '/status.json' do
status = get_status()
headers "Content-type" => "application/json"
status.to_json
end
post '/status' do
is_open = false
status = params[:status]
if status == "open"
is_open = true
elsif status == "close"
is_open = false
else
# Bad request
halt 400
end
dbh = get_dbh()
state = dbh[:status].reverse_order(:changed).get(:state)
if state != is_open
# Update the DB only if the state of the space has
# changed
dbh[:status].insert(
:state => is_open
)
end
# Return the string 'updated' to the client
return {:status => "Updated"}.to_json
end
def get_status()
dbh = get_dbh()
status_file = open("status.json")
status = JSON.parse(status_file.read)
if (status and status["state"]) then
status["state"]["open"] = dbh[:status].reverse_order(:changed).get(:state)
else
status = {}
end
if status["state"]["open"].nil? then
status["state"]["open"] = false
end
return status
end
def get_dbh()
if settings.db_engine == :sqlite
file = settings.db_file || "/tmp/hsmty.db"
dbh = Sequel.sqlite(file)
elsif settings.db_engine == :postgres
info = {
:host => settings.db_host || "localhost",
:database => settings.db_name || "api",
:user => settings.db_user || "postgres",
:password => settings.db_pass || nil,
}
dbh = Sequel.postgres(info)
else
dbh = nil
end
return dbh
end
|
require "sequel_mapper/belongs_to_association_proxy"
require "sequel_mapper/association_proxy"
module SequelMapper
module Associations
class Association
include MapperMethods
def initialize(datastore:, mappings:, mapping:, dirty_map:)
@datastore = datastore
@mappings = mappings
@mapping_name = mapping
@dirty_map = dirty_map
end
attr_reader :datastore, :mapping, :dirty_map
def load(_row)
raise NotImplementedError
end
def dump(_source_object, _collection)
raise NotImplementedError
end
def foreign_key_field(_label, _object)
{}
end
private
def mapping
@mappings.fetch(@mapping_name)
end
def loaded?(collection)
if collection.respond_to?(:loaded?)
collection.loaded?
else
true
end
end
def added_nodes(collection)
collection.respond_to?(:added_nodes) ? collection.added_nodes : collection
end
def removed_nodes(collection)
collection.respond_to?(:removed_nodes) ? collection.removed_nodes : []
end
def nodes_to_persist(collection)
if loaded?(collection)
collection
else
added_nodes(collection)
end
end
end
# Association loads the correct associated row from the database,
# constructs the correct proxy delegating to the RowMapper
class BelongsTo < Association
def initialize(foreign_key:, **args)
@foreign_key = foreign_key
super(**args)
end
attr_reader :foreign_key
private :foreign_key
def load(row)
BelongsToAssociationProxy.new(
datastore[relation_name]
.where(:id => row.fetch(foreign_key))
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
.public_method(:first)
)
end
def dump(_source_object, object)
unless_already_persisted(object) do |object|
if loaded?(object)
upsert_if_dirty(mapping.dump(object))
end
end
end
def foreign_key_field(name, object)
{
foreign_key => object.public_send(name).public_send(:id)
}
end
end
class HasMany < Association
def initialize(key:, foreign_key:, order_by: [], **args)
@key = key
@foreign_key = foreign_key
super(**args)
end
attr_reader :key, :foreign_key
private :key, :foreign_key
def load(row)
data_enum = datastore[relation_name]
.where(foreign_key => row.fetch(key))
AssociationProxy.new(
data_enum
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
)
end
def dump(_source_object, collection)
unless_already_persisted(collection) do |collection_proxy|
persist_nodes(collection)
remove_deleted_nodes(collection_proxy)
end
end
private
def persist_nodes(collection)
nodes_to_persist(collection).each do |object|
upsert_if_dirty(mapping.dump(object))
end
end
def remove_deleted_nodes(collection)
removed_nodes(collection).each do |node|
delete_node(node)
end
end
def delete_node(node)
relation.where(id: node.id).delete
end
end
class HasManyThrough < Association
def initialize(through_relation_name:, foreign_key:, association_foreign_key:, **args)
@through_relation_name = through_relation_name
@foreign_key = foreign_key
@association_foreign_key = association_foreign_key
super(**args)
end
attr_reader :through_relation_name, :foreign_key, :association_foreign_key
private :through_relation_name, :foreign_key, :association_foreign_key
def load(row)
AssociationProxy.new(
datastore[relation_name]
.join(through_relation_name, association_foreign_key => :id)
.where(foreign_key => row.fetch(:id))
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
)
end
def dump(source_object, collection)
unless_already_persisted(collection) do |collection|
persist_nodes(collection)
associate_new_nodes(source_object, collection)
dissociate_removed_nodes(source_object, collection)
end
end
private
def persist_nodes(collection)
nodes_to_persist(collection).each do |object|
upsert_if_dirty(mapping.dump(object))
end
end
def associate_new_nodes(source_object, collection)
new_nodes = collection.respond_to?(:added_nodes) ?
collection.added_nodes : []
new_nodes.each do |node|
through_relation.insert(
foreign_key => source_object.public_send(:id),
association_foreign_key => node.public_send(:id),
)
end
end
def dissociate_removed_nodes(source_object, collection)
through_relation
.where(foreign_key => source_object.public_send(:id))
.exclude(association_foreign_key => collection.map(&:id))
.delete
end
def through_relation
datastore[through_relation_name]
end
end
end
end
Remove join in favour of subselect
require "sequel_mapper/belongs_to_association_proxy"
require "sequel_mapper/association_proxy"
module SequelMapper
module Associations
class Association
include MapperMethods
def initialize(datastore:, mappings:, mapping:, dirty_map:)
@datastore = datastore
@mappings = mappings
@mapping_name = mapping
@dirty_map = dirty_map
end
attr_reader :datastore, :mapping, :dirty_map
def load(_row)
raise NotImplementedError
end
def dump(_source_object, _collection)
raise NotImplementedError
end
def foreign_key_field(_label, _object)
{}
end
private
def mapping
@mappings.fetch(@mapping_name)
end
def loaded?(collection)
if collection.respond_to?(:loaded?)
collection.loaded?
else
true
end
end
def added_nodes(collection)
collection.respond_to?(:added_nodes) ? collection.added_nodes : collection
end
def removed_nodes(collection)
collection.respond_to?(:removed_nodes) ? collection.removed_nodes : []
end
def nodes_to_persist(collection)
if loaded?(collection)
collection
else
added_nodes(collection)
end
end
end
# Association loads the correct associated row from the database,
# constructs the correct proxy delegating to the RowMapper
class BelongsTo < Association
def initialize(foreign_key:, **args)
@foreign_key = foreign_key
super(**args)
end
attr_reader :foreign_key
private :foreign_key
def load(row)
BelongsToAssociationProxy.new(
datastore[relation_name]
.where(:id => row.fetch(foreign_key))
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
.public_method(:first)
)
end
def dump(_source_object, object)
unless_already_persisted(object) do |object|
if loaded?(object)
upsert_if_dirty(mapping.dump(object))
end
end
end
def foreign_key_field(name, object)
{
foreign_key => object.public_send(name).public_send(:id)
}
end
end
class HasMany < Association
def initialize(key:, foreign_key:, order_by: [], **args)
@key = key
@foreign_key = foreign_key
super(**args)
end
attr_reader :key, :foreign_key
private :key, :foreign_key
def load(row)
data_enum = datastore[relation_name]
.where(foreign_key => row.fetch(key))
AssociationProxy.new(
data_enum
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
)
end
def dump(_source_object, collection)
unless_already_persisted(collection) do |collection_proxy|
persist_nodes(collection)
remove_deleted_nodes(collection_proxy)
end
end
private
def persist_nodes(collection)
nodes_to_persist(collection).each do |object|
upsert_if_dirty(mapping.dump(object))
end
end
def remove_deleted_nodes(collection)
removed_nodes(collection).each do |node|
delete_node(node)
end
end
def delete_node(node)
relation.where(id: node.id).delete
end
end
class HasManyThrough < Association
def initialize(through_relation_name:, foreign_key:, association_foreign_key:, **args)
@through_relation_name = through_relation_name
@foreign_key = foreign_key
@association_foreign_key = association_foreign_key
super(**args)
end
attr_reader :through_relation_name, :foreign_key, :association_foreign_key
private :through_relation_name, :foreign_key, :association_foreign_key
def load(row)
ids = datastore[through_relation_name]
.select(association_foreign_key)
.where(foreign_key => row.fetch(:id))
AssociationProxy.new(
datastore[relation_name]
.where(:id => ids)
.lazy
.map { |row| dirty_map.store(row.fetch(:id), row) }
.map { |row| mapping.load(row) }
)
end
def dump(source_object, collection)
unless_already_persisted(collection) do |collection|
persist_nodes(collection)
associate_new_nodes(source_object, collection)
dissociate_removed_nodes(source_object, collection)
end
end
private
def persist_nodes(collection)
nodes_to_persist(collection).each do |object|
upsert_if_dirty(mapping.dump(object))
end
end
def associate_new_nodes(source_object, collection)
new_nodes = collection.respond_to?(:added_nodes) ?
collection.added_nodes : []
new_nodes.each do |node|
through_relation.insert(
foreign_key => source_object.public_send(:id),
association_foreign_key => node.public_send(:id),
)
end
end
def dissociate_removed_nodes(source_object, collection)
through_relation
.where(foreign_key => source_object.public_send(:id))
.exclude(association_foreign_key => collection.map(&:id))
.delete
end
def through_relation
datastore[through_relation_name]
end
end
end
end
|
module HTTP
class Response
STATUS_CODES = {
100 => 'Continue',
101 => 'Switching Protocols',
102 => 'Processing',
200 => 'OK',
201 => 'Created',
202 => 'Accepted',
203 => 'Non-Authoritative Information',
204 => 'No Content',
205 => 'Reset Content',
206 => 'Partial Content',
207 => 'Multi-Status',
226 => 'IM Used',
300 => 'Multiple Choices',
301 => 'Moved Permanently',
302 => 'Found',
303 => 'See Other',
304 => 'Not Modified',
305 => 'Use Proxy',
306 => 'Reserved',
307 => 'Temporary Redirect',
400 => 'Bad Request',
401 => 'Unauthorized',
402 => 'Payment Required',
403 => 'Forbidden',
404 => 'Not Found',
405 => 'Method Not Allowed',
406 => 'Not Acceptable',
407 => 'Proxy Authentication Required',
408 => 'Request Timeout',
409 => 'Conflict',
410 => 'Gone',
411 => 'Length Required',
412 => 'Precondition Failed',
413 => 'Request Entity Too Large',
414 => 'Request-URI Too Long',
415 => 'Unsupported Media Type',
416 => 'Requested Range Not Satisfiable',
417 => 'Expectation Failed',
418 => "I'm a Teapot",
422 => 'Unprocessable Entity',
423 => 'Locked',
424 => 'Failed Dependency',
426 => 'Upgrade Required',
500 => 'Internal Server Error',
501 => 'Not Implemented',
502 => 'Bad Gateway',
503 => 'Service Unavailable',
504 => 'Gateway Timeout',
505 => 'HTTP Version Not Supported',
506 => 'Variant Also Negotiates',
507 => 'Insufficient Storage',
510 => 'Not Extended'
}
STATUS_CODES.freeze
SYMBOL_TO_STATUS_CODE = Hash[STATUS_CODES.map { |code, msg| [msg.downcase.gsub(/\s|-/, '_').to_sym, code] }]
SYMBOL_TO_STATUS_CODE.freeze
attr_reader :status
attr_reader :headers
# Status aliases! TIMTOWTDI!!! (Want to be idiomatic? Just use status :)
alias_method :code, :status
alias_method :status_code, :status
def initialize(status = nil, version = "1.1", headers = {}, body = nil, &body_proc)
@status, @version, @body, @body_proc = status, version, body, body_proc
@headers = {}
headers.each do |field, value|
@headers[Http.canonicalize_header(field)] = value
end
end
# Set a header
def []=(name, value)
# If we have a canonical header, we're done
key = name[CANONICAL_HEADER]
# Convert to canonical capitalization
key ||= Http.canonicalize_header(name)
# Check if the header has already been set and group
old_value = @headers[key]
if old_value
@headers[key] = [old_value].flatten << key
else
@headers[key] = value
end
end
# Get a header value
def [](name)
@headers[name] || @headers[Http.canonicalize_header(name)]
end
# Obtain the response body
def body
@body ||= begin
raise "no body available for this response" unless @body_proc
body = "" unless block_given?
while (chunk = @body_proc.call)
if block_given?
yield chunk
else
body << chunk
end
end
body unless block_given?
end
end
# Parse the response body according to its content type
def parse_body
if @headers['Content-Type']
mime_type = MimeType[@headers['Content-Type'].split(/;\s*/).first]
return mime_type.parse(body) if mime_type
end
body
end
# Returns an Array ala Rack: `[status, headers, body]`
def to_a
[status, headers, parse_body]
end
end
end
Nicer string inspect for HTTP::Response objects
module HTTP
class Response
STATUS_CODES = {
100 => 'Continue',
101 => 'Switching Protocols',
102 => 'Processing',
200 => 'OK',
201 => 'Created',
202 => 'Accepted',
203 => 'Non-Authoritative Information',
204 => 'No Content',
205 => 'Reset Content',
206 => 'Partial Content',
207 => 'Multi-Status',
226 => 'IM Used',
300 => 'Multiple Choices',
301 => 'Moved Permanently',
302 => 'Found',
303 => 'See Other',
304 => 'Not Modified',
305 => 'Use Proxy',
306 => 'Reserved',
307 => 'Temporary Redirect',
400 => 'Bad Request',
401 => 'Unauthorized',
402 => 'Payment Required',
403 => 'Forbidden',
404 => 'Not Found',
405 => 'Method Not Allowed',
406 => 'Not Acceptable',
407 => 'Proxy Authentication Required',
408 => 'Request Timeout',
409 => 'Conflict',
410 => 'Gone',
411 => 'Length Required',
412 => 'Precondition Failed',
413 => 'Request Entity Too Large',
414 => 'Request-URI Too Long',
415 => 'Unsupported Media Type',
416 => 'Requested Range Not Satisfiable',
417 => 'Expectation Failed',
418 => "I'm a Teapot",
422 => 'Unprocessable Entity',
423 => 'Locked',
424 => 'Failed Dependency',
426 => 'Upgrade Required',
500 => 'Internal Server Error',
501 => 'Not Implemented',
502 => 'Bad Gateway',
503 => 'Service Unavailable',
504 => 'Gateway Timeout',
505 => 'HTTP Version Not Supported',
506 => 'Variant Also Negotiates',
507 => 'Insufficient Storage',
510 => 'Not Extended'
}
STATUS_CODES.freeze
SYMBOL_TO_STATUS_CODE = Hash[STATUS_CODES.map { |code, msg| [msg.downcase.gsub(/\s|-/, '_').to_sym, code] }]
SYMBOL_TO_STATUS_CODE.freeze
attr_reader :status
attr_reader :headers
# Status aliases! TIMTOWTDI!!! (Want to be idiomatic? Just use status :)
alias_method :code, :status
alias_method :status_code, :status
def initialize(status = nil, version = "1.1", headers = {}, body = nil, &body_proc)
@status, @version, @body, @body_proc = status, version, body, body_proc
@headers = {}
headers.each do |field, value|
@headers[Http.canonicalize_header(field)] = value
end
end
# Set a header
def []=(name, value)
# If we have a canonical header, we're done
key = name[CANONICAL_HEADER]
# Convert to canonical capitalization
key ||= Http.canonicalize_header(name)
# Check if the header has already been set and group
old_value = @headers[key]
if old_value
@headers[key] = [old_value].flatten << key
else
@headers[key] = value
end
end
# Obtain the 'Reason-Phrase' for the response
def reason
# FIXME: should get the real reason phrase from the parser
STATUS_CODES[@status]
end
# Get a header value
def [](name)
@headers[name] || @headers[Http.canonicalize_header(name)]
end
# Obtain the response body
def body
@body ||= begin
raise "no body available for this response" unless @body_proc
body = "" unless block_given?
while (chunk = @body_proc.call)
if block_given?
yield chunk
else
body << chunk
end
end
body unless block_given?
end
end
# Parse the response body according to its content type
def parse_body
if @headers['Content-Type']
mime_type = MimeType[@headers['Content-Type'].split(/;\s*/).first]
return mime_type.parse(body) if mime_type
end
body
end
# Returns an Array ala Rack: `[status, headers, body]`
def to_a
[status, headers, parse_body]
end
# Inspect a response
def inspect
"#<HTTP/#{@version} #{status} #{reason} @headers=#{@headers.inspect}>"
end
end
end
|
require 'hashie'
require 'json'
module SidekiqScheduler
module Schedule
# Accepts a new schedule configuration of the form:
#
# {
# "MakeTea" => {
# "every" => "1m" },
# "some_name" => {
# "cron" => "5/* * * *",
# "class" => "DoSomeWork",
# "args" => "work on this string",
# "description" => "this thing works it"s butter off" },
# ...
# }
#
# Hash keys can be anything and are used to describe and reference
# the scheduled job. If the "class" argument is missing, the key
# is used implicitly as "class" argument - in the "MakeTea" example,
# "MakeTea" is used both as job name and sidekiq worker class.
#
# :cron can be any cron scheduling string
#
# :every can be used in lieu of :cron. see rufus-scheduler's 'every' usage
# for valid syntax. If :cron is present it will take precedence over :every.
#
# :class must be a sidekiq worker class. If it is missing, the job name (hash key)
# will be used as :class.
#
# :args can be any yaml which will be converted to a ruby literal and
# passed in a params. (optional)
#
# :description is just that, a description of the job (optional). If
# params is an array, each element in the array is passed as a separate
# param, otherwise params is passed in as the only parameter to perform.
def schedule=(schedule_hash)
schedule_hash = prepare_schedule(schedule_hash)
to_remove = get_all_schedules.keys - schedule_hash.keys.map(&:to_s)
schedule_hash.each do |name, job_spec|
set_schedule(name, job_spec)
end
to_remove.each do |name|
remove_schedule(name)
end
@schedule = schedule_hash
end
def schedule
@schedule
end
# Reloads the schedule from Redis and return it.
#
# @return Hash
def reload_schedule!
@schedule = get_schedule
end
alias_method :schedule!, :reload_schedule!
# Retrive the schedule configuration for the given name
# if the name is nil it returns a hash with all the
# names end their schedules.
def get_schedule(name = nil)
if name.nil?
get_all_schedules
else
encoded_schedule = Sidekiq.redis { |r| r.hget(:schedules, name) }
encoded_schedule.nil? ? nil : JSON(encoded_schedule)
end
end
# gets the schedule as it exists in redis
def get_all_schedules
schedules = {}
if Sidekiq.redis { |r| r.exists(:schedules) }
Sidekiq.redis { |r| r.hgetall(:schedules) }.tap do |h|
h.each do |name, config|
schedules[name] = JSON(config)
end
end
end
schedules
end
# Create or update a schedule with the provided name and configuration.
#
# Note: values for class and custom_job_class need to be strings,
# not constants.
#
# Sidekiq.set_schedule('some_job', { :class => 'SomeJob',
# :every => '15mins',
# :queue => 'high',
# :args => '/tmp/poop' })
def set_schedule(name, config)
existing_config = get_schedule(name)
unless existing_config && existing_config == config
Sidekiq.redis { |r| r.hset(:schedules, name, JSON(config)) }
Sidekiq.redis { |r| r.sadd(:schedules_changed, name) }
end
config
end
# remove a given schedule by name
def remove_schedule(name)
Sidekiq.redis { |r| r.hdel(:schedules, name) }
Sidekiq.redis { |r| r.sadd(:schedules_changed, name) }
end
private
def prepare_schedule(schedule_hash)
schedule_hash = Hashie.stringify_keys(schedule_hash)
prepared_hash = {}
schedule_hash.each do |name, job_spec|
job_spec = job_spec.dup
job_class = job_spec.fetch('class', name)
inferred_queue = infer_queue(job_class)
job_spec['class'] ||= job_class
job_spec['queue'] ||= inferred_queue unless inferred_queue.nil?
prepared_hash[name] = job_spec
end
prepared_hash
end
def infer_queue(klass)
klass = try_to_constantize(klass)
if klass.respond_to?(:sidekiq_options)
klass.sidekiq_options['queue']
elsif klass.respond_to?(:queue_name)
klass.queue_name
end
end
def try_to_constantize(klass)
klass.is_a?(String) ? klass.constantize : klass
rescue NameError
klass
end
end
end
Sidekiq.extend SidekiqScheduler::Schedule
Use JSON.generate on SidekiqScheduler::Schedule.set_schedule
yajl gem doesn't generates/parses accordingly the type of the received argument
when invoking `Kernel#JSON` method, it just tries to parse the value.
In order to fix that, we explcitly invoke JSON.generate
require 'hashie'
require 'json'
module SidekiqScheduler
module Schedule
# Accepts a new schedule configuration of the form:
#
# {
# "MakeTea" => {
# "every" => "1m" },
# "some_name" => {
# "cron" => "5/* * * *",
# "class" => "DoSomeWork",
# "args" => "work on this string",
# "description" => "this thing works it"s butter off" },
# ...
# }
#
# Hash keys can be anything and are used to describe and reference
# the scheduled job. If the "class" argument is missing, the key
# is used implicitly as "class" argument - in the "MakeTea" example,
# "MakeTea" is used both as job name and sidekiq worker class.
#
# :cron can be any cron scheduling string
#
# :every can be used in lieu of :cron. see rufus-scheduler's 'every' usage
# for valid syntax. If :cron is present it will take precedence over :every.
#
# :class must be a sidekiq worker class. If it is missing, the job name (hash key)
# will be used as :class.
#
# :args can be any yaml which will be converted to a ruby literal and
# passed in a params. (optional)
#
# :description is just that, a description of the job (optional). If
# params is an array, each element in the array is passed as a separate
# param, otherwise params is passed in as the only parameter to perform.
def schedule=(schedule_hash)
schedule_hash = prepare_schedule(schedule_hash)
to_remove = get_all_schedules.keys - schedule_hash.keys.map(&:to_s)
schedule_hash.each do |name, job_spec|
set_schedule(name, job_spec)
end
to_remove.each do |name|
remove_schedule(name)
end
@schedule = schedule_hash
end
def schedule
@schedule
end
# Reloads the schedule from Redis and return it.
#
# @return Hash
def reload_schedule!
@schedule = get_schedule
end
alias_method :schedule!, :reload_schedule!
# Retrive the schedule configuration for the given name
# if the name is nil it returns a hash with all the
# names end their schedules.
def get_schedule(name = nil)
if name.nil?
get_all_schedules
else
encoded_schedule = Sidekiq.redis { |r| r.hget(:schedules, name) }
encoded_schedule.nil? ? nil : JSON(encoded_schedule)
end
end
# gets the schedule as it exists in redis
def get_all_schedules
schedules = {}
if Sidekiq.redis { |r| r.exists(:schedules) }
Sidekiq.redis { |r| r.hgetall(:schedules) }.tap do |h|
h.each do |name, config|
schedules[name] = JSON(config)
end
end
end
schedules
end
# Create or update a schedule with the provided name and configuration.
#
# Note: values for class and custom_job_class need to be strings,
# not constants.
#
# Sidekiq.set_schedule('some_job', { :class => 'SomeJob',
# :every => '15mins',
# :queue => 'high',
# :args => '/tmp/poop' })
def set_schedule(name, config)
existing_config = get_schedule(name)
unless existing_config && existing_config == config
Sidekiq.redis { |r| r.hset(:schedules, name, JSON.generate(config)) }
Sidekiq.redis { |r| r.sadd(:schedules_changed, name) }
end
config
end
# remove a given schedule by name
def remove_schedule(name)
Sidekiq.redis { |r| r.hdel(:schedules, name) }
Sidekiq.redis { |r| r.sadd(:schedules_changed, name) }
end
private
def prepare_schedule(schedule_hash)
schedule_hash = Hashie.stringify_keys(schedule_hash)
prepared_hash = {}
schedule_hash.each do |name, job_spec|
job_spec = job_spec.dup
job_class = job_spec.fetch('class', name)
inferred_queue = infer_queue(job_class)
job_spec['class'] ||= job_class
job_spec['queue'] ||= inferred_queue unless inferred_queue.nil?
prepared_hash[name] = job_spec
end
prepared_hash
end
def infer_queue(klass)
klass = try_to_constantize(klass)
if klass.respond_to?(:sidekiq_options)
klass.sidekiq_options['queue']
elsif klass.respond_to?(:queue_name)
klass.queue_name
end
end
def try_to_constantize(klass)
klass.is_a?(String) ? klass.constantize : klass
rescue NameError
klass
end
end
end
Sidekiq.extend SidekiqScheduler::Schedule
|
require 'json'
class HTTPBench
class CLI
def run
puts JSON.pretty_generate HTTPBench.new(lines).execute
end
private
def lines
IO.readlines(path).map(&:strip)
end
def path
File.expand_path(ARGV.first)
end
end
end
support STDIN reads
require 'json'
class HTTPBench
class CLI
def run
puts JSON.pretty_generate HTTPBench.new(lines).execute
end
private
def lines
lns = source.readlines.map(&:strip)
source.close unless source.tty?
lns
end
def source
@src = ARGV.first ? File.open(File.expand_path(ARGV.first)) : STDIN
end
end
end
|
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
def initialize(options={})
@load_path = Pathname.new(options[:load_path] || Rails.root.join('lib', 'flows'))
@show_drafts = options[:show_drafts]
preload_flows! if Rails.env.production?
end
def find(name)
raise NotFound unless available?(name)
find_by_name(name) or raise NotFound
end
def flows
available_flows.map { |s| find_by_name(s) }
end
private
def find_by_name(name)
absolute_path = @load_path.join("#{name}.rb").to_s
flow = preloaded(name) || Flow.new {
eval(File.read(absolute_path), binding, absolute_path)
name(name)
}
return nil if flow && flow.draft? && !@show_drafts
flow
end
def available?(name)
available_flows.include?(name)
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path).gsub(/\.rb$/, '')
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = find_by_name(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end
File.basename can already remove extensions.
module SmartAnswer
class FlowRegistry
class NotFound < StandardError; end
def initialize(options={})
@load_path = Pathname.new(options[:load_path] || Rails.root.join('lib', 'flows'))
@show_drafts = options[:show_drafts]
preload_flows! if Rails.env.production?
end
def find(name)
raise NotFound unless available?(name)
find_by_name(name) or raise NotFound
end
def flows
available_flows.map { |s| find_by_name(s) }
end
private
def find_by_name(name)
absolute_path = @load_path.join("#{name}.rb").to_s
flow = preloaded(name) || Flow.new {
eval(File.read(absolute_path), binding, absolute_path)
name(name)
}
return nil if flow && flow.draft? && !@show_drafts
flow
end
def available?(name)
available_flows.include?(name)
end
def available_flows
Dir[@load_path.join('*.rb')].map do |path|
File.basename(path, ".rb")
end
end
def preload_flows!
@preloaded = {}
available_flows.each do |flow_name|
@preloaded[flow_name] = find_by_name(flow_name)
end
end
def preloaded(name)
@preloaded && @preloaded[name]
end
end
end
|
# ImportExport
module ImportExport
module ModelMethods
def self.included(base)
base.send :extend, ClassMethods
end
module ClassMethods
# any method placed here will apply to classes
def acts_as_importable(options = {})
cattr_accessor :import_fields, :export_fields
self.import_fields = options[:import_fields]
self.export_fields = options[:export_fields]
send :include, InstanceMethods
end
def import(filename, context)
collection = []
headers, *data = self.read_csv(filename)
scope_object = context[:scoped]
ActiveRecord::Base.transaction do
data.each_with_index do |data_row, index|
data_row.map{|d| d.strip! if d}
begin
class_or_association = scope_object ? scope_object.send(self.table_name) : self
if key_field = context[:find_existing_by]
key_value = data_row[index_of(key_field)]
element = class_or_association.send("find_by_#{key_field}", key_value) || class_or_association.new
else
element = class_or_association.new
end
Rails.logger.info "#{element.new_record? ? "Creating new" : "Updating existing"} record from #{data_row.inspect}"
self.import_fields.each_with_index do |field_name, field_index|
if field_name.include?('.')
assign_association(element, field_name, field_index, context, data_row)
else
element.send "#{field_name}=", data_row[field_index]
end
end
element.save!
collection << element
rescue Exception => e
e1 = e.exception("Invalid data found at line #{index + 2} : " + e.message)
e1.set_backtrace(e.backtrace)
Rails.logger.error e1.message
Rails.logger.error e1.backtrace.join("\n")
raise e1
end
end
end
return collection
end
def export
export_fields = self.import_fields || self.export_fields
FasterCSV.generate do |csv|
csv << export_fields.map{|f| f.split('.')[0]}
self.find_each(:batch_size => 2000) do |element|
collection = []
export_fields.each do |field_name|
begin
if field_name.include?('.')
method_names = field_name.gsub(/!/,'').split('.').compact
sub_element = element
method_names.each do |method_name|
if sub_element || sub_element.respond_to?(method_name)
sub_element = sub_element.send(method_name)
else
break
end
end
collection << sub_element
else
collection << element.send(field_name)
end
rescue Exception => e
Rails.logger.info ">>>>>>>>> Exception Caught ImportExport >>>>>>>>>>>"
Rails.logger.error e.message
Rails.logger.error e.backtrace
collection << nil
end
end
csv << collection
end
end
end
def read_csv(filename)
if File.exist?(filename)
begin
collection = FasterCSV.parse(File.open(filename, 'rb'))
rescue FasterCSV::MalformedCSVError => e
raise e
end
collection = collection.map{|w| w} unless collection.nil?
collection = [] if collection.nil?
return collection
else
raise ArgumentError, "File does not exist."
end
end
def index_of(fieldname)
@import_field_indices ||= {}
@import_field_indices[fieldname] ||= self.import_fields.index{ |f| f.to_s == fieldname.to_s }
end
protected
def assign_association(element, field_name, field_index, context, data_row)
scope_object = context[:scoped]
create_record = field_name.include?('!')
association_name, association_attribute = field_name.gsub(/!/,'').split('.')
assign_association_method = "assign_#{association_name}"
association_fk = "#{association_name}_id"
if element.respond_to?(assign_association_method)
element.send assign_association_method, data_row, context
elsif element.respond_to?(association_fk)
association_class = association_name.classify.constantize
if scope_object && scope_object.respond_to?(association_class.table_name)
association_class = scope_object.send(association_class.table_name)
end
finder_method = "find_by_#{association_attribute}"
if association_class and association_class.respond_to?(finder_method)
e = association_class.send(finder_method, data_row[field_index])
if e.nil? and create_record and !data_row[field_index].blank?
e = association_class.create!(association_attribute => data_row[field_index])
end
element[association_fk] = e.id if e
end
end
end
end
module InstanceMethods
def index_of(fieldname)
self.class.index_of(fieldname)
end
end
end
end
Remove redundant rescue block.
# ImportExport
module ImportExport
module ModelMethods
def self.included(base)
base.send :extend, ClassMethods
end
module ClassMethods
# any method placed here will apply to classes
def acts_as_importable(options = {})
cattr_accessor :import_fields, :export_fields
self.import_fields = options[:import_fields]
self.export_fields = options[:export_fields]
send :include, InstanceMethods
end
def import(filename, context)
collection = []
headers, *data = self.read_csv(filename)
scope_object = context[:scoped]
ActiveRecord::Base.transaction do
data.each_with_index do |data_row, index|
data_row.map{|d| d.strip! if d}
begin
class_or_association = scope_object ? scope_object.send(self.table_name) : self
if key_field = context[:find_existing_by]
key_value = data_row[index_of(key_field)]
element = class_or_association.send("find_by_#{key_field}", key_value) || class_or_association.new
else
element = class_or_association.new
end
Rails.logger.info "#{element.new_record? ? "Creating new" : "Updating existing"} record from #{data_row.inspect}"
self.import_fields.each_with_index do |field_name, field_index|
if field_name.include?('.')
assign_association(element, field_name, field_index, context, data_row)
else
element.send "#{field_name}=", data_row[field_index]
end
end
element.save!
collection << element
rescue Exception => e
e1 = e.exception("Invalid data found at line #{index + 2} : " + e.message)
e1.set_backtrace(e.backtrace)
Rails.logger.error e1.message
Rails.logger.error e1.backtrace.join("\n")
raise e1
end
end
end
return collection
end
def export
export_fields = self.import_fields || self.export_fields
FasterCSV.generate do |csv|
csv << export_fields.map{|f| f.split('.')[0]}
self.find_each(:batch_size => 2000) do |element|
collection = []
export_fields.each do |field_name|
begin
if field_name.include?('.')
method_names = field_name.gsub(/!/,'').split('.').compact
sub_element = element
method_names.each do |method_name|
if sub_element || sub_element.respond_to?(method_name)
sub_element = sub_element.send(method_name)
else
break
end
end
collection << sub_element
else
collection << element.send(field_name)
end
rescue Exception => e
Rails.logger.info ">>>>>>>>> Exception Caught ImportExport >>>>>>>>>>>"
Rails.logger.error e.message
Rails.logger.error e.backtrace
collection << nil
end
end
csv << collection
end
end
end
def read_csv(filename)
if File.exist?(filename)
collection = FasterCSV.parse(File.open(filename, 'rb'))
collection = collection.map{|w| w} unless collection.nil?
collection = [] if collection.nil?
return collection
else
raise ArgumentError, "File does not exist."
end
end
def index_of(fieldname)
@import_field_indices ||= {}
@import_field_indices[fieldname] ||= self.import_fields.index{ |f| f.to_s == fieldname.to_s }
end
protected
def assign_association(element, field_name, field_index, context, data_row)
scope_object = context[:scoped]
create_record = field_name.include?('!')
association_name, association_attribute = field_name.gsub(/!/,'').split('.')
assign_association_method = "assign_#{association_name}"
association_fk = "#{association_name}_id"
if element.respond_to?(assign_association_method)
element.send assign_association_method, data_row, context
elsif element.respond_to?(association_fk)
association_class = association_name.classify.constantize
if scope_object && scope_object.respond_to?(association_class.table_name)
association_class = scope_object.send(association_class.table_name)
end
finder_method = "find_by_#{association_attribute}"
if association_class and association_class.respond_to?(finder_method)
e = association_class.send(finder_method, data_row[field_index])
if e.nil? and create_record and !data_row[field_index].blank?
e = association_class.create!(association_attribute => data_row[field_index])
end
element[association_fk] = e.id if e
end
end
end
end
module InstanceMethods
def index_of(fieldname)
self.class.index_of(fieldname)
end
end
end
end |
require 'rubygems'
module Inochi
class << self
##
# Establishes your project in Ruby's runtime environment by defining
# the project module (which serves as a namespace for all code in the
# project) and providing a common configuration for the project module:
#
# * Adds the project lib/ directory to the Ruby load path.
#
# * Defines the INOCHI constant in the project module. This constant
# contains the effective configuration parameters (@see project_config).
#
# * Defines all configuration parameters as constants in the project module.
#
# This method must be invoked from immediately within (that is, not from
# within any of its descendant directories) the project lib/ directory.
# Ideally, this method would be invoked from the main project library.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [Hash] project_config
# Project configuration parameters:
#
# [String] :project =>
# Name of the project.
#
# The default value is the value of the project_symbol parameter.
#
# [String] :tagline =>
# An enticing, single line description of the project.
#
# The default value is an empty string.
#
# [String] :website =>
# URL of the published project website.
#
# The default value is an empty string.
#
# [String] :docsite =>
# URL of the published user manual.
#
# The default value is the same value as the :website parameter.
#
# [String] :program =>
# Name of the main project executable.
#
# The default value is the value of the :project parameter in lowercase.
#
# [String] :version =>
# Version of the project.
#
# The default value is "0.0.0".
#
# [String] :release =>
# Date when this version was released.
#
# The default value is the current time.
#
# [String] :display =>
# How the project name should be displayed.
#
# The default value is the project name and version together.
#
# [String] :install =>
# Path to the directory which contains the project.
#
# The default value is one directory above the parent
# directory of the file from which this method was called.
#
# [Hash] :require =>
# The names and version constraints of ruby gems required by
# this project. This information must be expressed as follows:
#
# * Each hash key must be the name of a ruby gem.
#
# * Each hash value must be either +nil+, a single version number
# requirement string (see Gem::Requirement) or an Array thereof.
#
# The default value is an empty Hash.
#
# @return [Module] The newly configured project module.
#
def init project_symbol, project_config = {}
project_module = fetch_project_module(project_symbol)
# this method is not re-entrant
@already_seen ||= []
return project_module if @already_seen.include? project_module
@already_seen << project_module
# put project on Ruby load path
project_file = File.expand_path(first_caller_file)
project_libs = File.dirname(project_file)
$LOAD_PATH.unshift project_libs
# supply configuration defaults
project_config[:project] ||= project_symbol.to_s
project_config[:tagline] ||= ''
project_config[:version] ||= '0.0.0'
project_config[:release] ||= Time.now.strftime('%F')
project_config[:website] ||= ''
project_config[:docsite] ||= project_config[:website]
project_config[:display] ||= "#{project_config[:project]} #{project_config[:version]}"
project_config[:program] ||= calc_program_name(project_symbol)
project_config[:install] ||= File.dirname(project_libs)
project_config[:require] ||= {}
# establish gem version dependencies and
# sanitize the values while we're at it
src = project_config[:require].dup
dst = project_config[:require].clear
src.each_pair do |gem_name, version_reqs|
gem_name = gem_name.to_s
version_reqs = [version_reqs].flatten.compact
dst[gem_name] = version_reqs
gem gem_name, *version_reqs
end
# make configuration parameters available as constants
project_config[:inochi] = project_config
project_config.each_pair do |param, value|
project_module.const_set param.to_s.upcase, value
end
project_module
end
##
# Provides a common configuration for the main project executable:
#
# * The program description (the sequence of non-blank lines at the
# top of the file in which this method is invoked) is properly
# formatted and displayed at the top of program's help information.
#
# * The program version information is fetched from the project module
# and formatted in YAML fashion for easy consumption by other tools.
#
# * A list of command-line options is displayed at
# the bottom of the program's help information.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param trollop_args
# Optional arguments for Trollop::options().
#
# @param trollop_config
# Optional block argument for Trollop::options().
#
# @return The result of Trollop::options().
#
def main project_symbol, *trollop_args, &trollop_config
program_file = first_caller_file
# load the project module
program_name = calc_program_name(project_symbol)
require File.join(File.dirname(program_file), '..', 'lib', program_name)
project_module = fetch_project_module(project_symbol)
# parse command-line options
require 'trollop'
options = Trollop.options(*trollop_args) do
# show project description
text "#{project_module::PROJECT} - #{project_module::TAGLINE}"
text ''
# show program description
text File.read(program_file)[/\A.*?^$\n/m]. # grab the header
gsub(/^# ?/, ''). # strip the comment markers
sub(/\A!.*?\n/, '').lstrip # omit the shebang line
text ''
instance_eval(&trollop_config) if trollop_config
# show version information
version %w[PROJECT VERSION RELEASE WEBSITE INSTALL].map {|c|
"#{c.downcase}: #{project_module.const_get c}"
}.join("\n")
opt :manual, 'Show the user manual'
end
if options[:manual]
require 'launchy'
Launchy::Browser.run "#{project_module::INSTALL}/doc/index.xhtml"
exit
end
options
end
##
# Provides Rake tasks for packaging, publishing, and announcing your project.
#
# * An AUTHORS constant (which has the form "[[name, info]]"
# where "name" is the name of a copyright holder and "info" is
# their contact information) is added to the project module.
#
# This information is extracted from copyright notices in
# the project license file. NOTE that the first copyright
# notice must correspond to the primary project maintainer.
#
# Copyright notices must be in the following form:
#
# Copyright YEAR HOLDER <EMAIL>
#
# Where HOLDER is the name of the copyright holder, YEAR is the year
# when the copyright holder first began working on the project, and
# EMAIL is (optional) the email address of the copyright holder.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [Hash] options
# Additional method parameters, which are all optional:
#
# [String] :license_file =>
# Path (relative to the main project directory which contains the
# project Rakefile) to the file which contains the project license.
#
# The default value is "LICENSE".
#
# [String] :rubyforge_project =>
# Name of the RubyForge project where
# release packages will be published.
#
# The default value is the value of the PROGRAM constant.
#
# [String] :rubyforge_section =>
# Name of the RubyForge project's File Release System
# section where release packages will be published.
#
# The default value is the value of the :rubyforge_project parameter.
#
# [String] :raa_project =>
# Name of the RAA (Ruby Application Archive) entry for this project.
#
# The default value is the value of the PROGRAM constant.
#
# [String] :upload_target =>
# Where to upload the project documentation.
# See "destination" in the rsync manual.
#
# The default value is nil.
#
# [String] :upload_delete =>
# Delete unknown files at the upload target location?
#
# The default value is false.
#
# [Array] :upload_options =>
# Additional command-line arguments to the rsync command.
#
# The default value is an empty array.
#
# @param gem_config
# Block that is passed to Gem::specification.new()
# for additonal gem configuration.
#
# @yieldparam [Gem::Specification] gem_spec the gem specification
#
def rake project_symbol, options = {}, &gem_config
# load the project module
program_name = calc_program_name(project_symbol)
require File.join('lib', program_name)
project_module = fetch_project_module(project_symbol)
# supply default options
options[:rubyforge_project] ||= program_name
options[:rubyforge_section] ||= program_name
options[:raa_project] ||= program_name
options[:license_file] ||= 'LICENSE'
options[:upload_delete] ||= false
options[:upload_options] ||= []
# add AUTHORS constant to the project module
license = File.read(options[:license_file])
copyright_holders =
license.scan(/Copyright.*?\d+\s+(.*)/).flatten.
map {|s| (s =~ /\s*<(.*?)>/) ? [$`, $1] : [s, ''] }
project_module.const_set :AUTHORS, copyright_holders
require 'rake/clean'
hide_rake_task = lambda do |name|
Rake::Task[name].instance_variable_set :@comment, nil
end
# documentation
desc 'Build all documentation.'
task :doc => %w[ doc:api doc:man ann:feed ]
# user manual
doc_man_src = 'doc/index.erb'
doc_man_dst = 'doc/index.xhtml'
doc_man_deps = FileList['doc/*.erb']
doc_man_doc = nil
task :doc_man_doc => doc_man_src do
unless doc_man_doc
require 'erbook' unless defined? ERBook
doc_man_txt = File.read(doc_man_src)
doc_man_doc = ERBook::Document.new(:xhtml, doc_man_txt, doc_man_src, :unindent => true)
end
end
desc 'Build the user manual.'
task 'doc:man' => doc_man_dst
file doc_man_dst => doc_man_deps do
Rake::Task[:doc_man_doc].invoke
File.write doc_man_dst, doc_man_doc
end
CLOBBER.include doc_man_dst
# API reference
doc_api_dst = 'doc/api'
desc 'Build API reference.'
task 'doc:api' => doc_api_dst
require 'yard'
YARD::Rake::YardocTask.new doc_api_dst do |t|
t.options.push '--protected',
'--output-dir', doc_api_dst,
'--readme', options[:license_file]
task doc_api_dst => options[:license_file]
end
hide_rake_task[doc_api_dst]
CLEAN.include '.yardoc'
CLOBBER.include doc_api_dst
# announcments
desc 'Build all release announcements.'
task :ann => %w[ ann:feed ann:html ann:text ann:mail ]
# it has long been a tradition to use an "[ANN]" prefix
# when announcing things on the ruby-talk mailing list
ann_prefix = '[ANN] '
ann_subject = ann_prefix + project_module::DISPLAY
ann_project = ann_prefix + project_module::PROJECT
# fetch the project summary from user manual
ann_nfo_doc = nil
task :ann_nfo_doc => :doc_man_doc do
ann_nfo_doc = $project_summary_node
end
# fetch release notes from user manual
ann_rel_doc = nil
task :ann_rel_doc => :doc_man_doc do
unless ann_rel_doc
if parent = $project_history_node
if child = parent.children.first
ann_rel_doc = child
else
raise 'The "project_history" node in the user manual lacks child nodes.'
end
else
raise 'The user manual lacks a "project_history" node.'
end
end
end
# build release notes in HTML and plain text
# converts the given HTML into plain text. we do this using
# lynx because (1) it outputs a list of all hyperlinks used
# in the HTML document and (2) it runs on all major platforms
convert_html_to_text = lambda do |html|
require 'tempfile'
begin
# lynx's -dump option requires a .html file
tmp_file = Tempfile.new(Inochi::PROGRAM).path + '.html'
File.write tmp_file, html
text = `lynx -dump #{tmp_file} -width 70`
ensure
File.delete tmp_file
end
# improve readability of list items that span multiple
# lines by adding a blank line between such items
text.gsub! %r{^( *[^\*\s].*)(\r?\n)( *\* \S)}, '\1\2\2\3'
text
end
ann_html = nil
task :ann_html => [:doc_man_doc, :ann_nfo_doc, :ann_rel_doc] do
unless ann_html
ann_html = %{
<center>
<h1>#{project_module::DISPLAY}</h1>
<p>#{project_module::TAGLINE}</p>
<p>#{project_module::WEBSITE}</p>
</center>
#{ann_nfo_doc}
#{ann_rel_doc}
}
# remove heading navigation menus
ann_html.gsub! %r{<div class="nav"[^>]*>(.*?)</div>}, ''
# resolve relative URLs into absolute URLs
# see http://en.wikipedia.org/wiki/URI_scheme#Generic_syntax
require 'uri'
uri = URI.parse(project_module::DOCSITE)
doc_url = uri.to_s
dir_url = uri.path =~ %r{/$|^$} ? doc_url : File.dirname(doc_url)
ann_html.gsub! %r{(href=|src=)(.)(.*?)(\2)} do |match|
a, b = $1 + $2, $3.to_s << $4
case $3
when %r{^[[:alpha:]][[:alnum:]\+\.\-]*://} # already absolute
match
when /^#/
a << File.join(doc_url, b)
else
a << File.join(dir_url, b)
end
end
end
end
ann_text = nil
task :ann_text => :ann_html do
unless ann_text
ann_text = convert_html_to_text[ann_html]
end
end
ann_nfo_text = nil
task :ann_nfo_text => :ann_nfo_doc do
unless ann_nfo_text
ann_nfo_text = convert_html_to_text[ann_nfo_doc]
end
end
# HTML
ann_html_dst = 'ANN.html'
desc "Build HTML announcement: #{ann_html_dst}"
task 'ann:html' => ann_html_dst
file ann_html_dst => doc_man_deps do
Rake::Task[:ann_html].invoke
File.write ann_html_dst, ann_html
end
CLEAN.include ann_html_dst
# RSS feed
ann_feed_dst = 'doc/ann.xml'
desc "Build RSS announcement: #{ann_feed_dst}"
task 'ann:feed' => ann_feed_dst
file ann_feed_dst => doc_man_deps do
require 'time'
require 'rss/maker'
feed = RSS::Maker.make('2.0') do |feed|
feed.channel.title = ann_project
feed.channel.link = project_module::WEBSITE
feed.channel.description = project_module::TAGLINE
Rake::Task[:ann_rel_doc].invoke
Rake::Task[:ann_html].invoke
item = feed.items.new_item
item.title = ann_rel_doc.title
item.link = project_module::DOCSITE + '#' + ann_rel_doc.here_frag
item.date = Time.parse(item.title)
item.description = ann_html
end
File.write ann_feed_dst, feed
end
CLOBBER.include ann_feed_dst
# plain text
ann_text_dst = 'ANN.txt'
desc "Build plain text announcement: #{ann_text_dst}"
task 'ann:text' => ann_text_dst
file ann_text_dst => doc_man_deps do
Rake::Task[:ann_text].invoke
File.write ann_text_dst, ann_text
end
CLEAN.include ann_text_dst
# e-mail
ann_mail_dst = 'ANN.eml'
desc "Build e-mail announcement: #{ann_mail_dst}"
task 'ann:mail' => ann_mail_dst
file ann_mail_dst => doc_man_deps do
File.open ann_mail_dst, 'w' do |f|
require 'time'
f.puts "Date: #{Time.now.rfc822}"
f.puts 'To: ruby-talk@ruby-lang.org'
f.puts 'From: "%s" <%s>' % project_module::AUTHORS.first
f.puts "Subject: #{ann_subject}"
Rake::Task[:ann_text].invoke
f.puts '', ann_text
end
end
CLEAN.include ann_mail_dst
# packaging
desc 'Build a release.'
task :pak => [:clobber, :doc] do
sh $0, 'package'
end
CLEAN.include 'pkg'
# ruby gem
require 'rake/gempackagetask'
gem = Gem::Specification.new do |gem|
authors = project_module::AUTHORS
if author = authors.first
gem.author, gem.email = author
end
if authors.length > 1
gem.authors = authors.map {|name, mail| name }
end
gem.rubyforge_project = options[:rubyforge_project]
# XXX: In theory, `gem.name` should be assigned to
# ::PROJECT instead of ::PROGRAM
#
# In practice, PROJECT may contain non-word
# characters and may also contain a mixture
# of lowercase and uppercase letters.
#
# This makes it difficult for people to
# install the project gem because they must
# remember the exact spelling used in
# `gem.name` when running `gem install ____`.
#
# For example, consider the "RedCloth" gem.
#
gem.name = project_module::PROGRAM
gem.version = project_module::VERSION
gem.summary = project_module::TAGLINE
gem.description = gem.summary
gem.homepage = project_module::WEBSITE
gem.files = FileList['**/*'].exclude('_darcs') - CLEAN
gem.executables = project_module::PROGRAM
gem.has_rdoc = true
unless project_module == Inochi
gem.add_dependency 'inochi', Inochi::VERSION
end
project_module::REQUIRE.each_pair do |gem_name, version_reqs|
gem.add_dependency gem_name, *version_reqs
end
# additional configuration is done by user
yield gem if gem_config
end
Rake::GemPackageTask.new(gem).define
# XXX: hide the tasks defined by the above gem packaging library
%w[gem package repackage clobber_package].each {|t| hide_rake_task[t] }
# releasing
desc 'Publish a new release.'
task 'pub' => %w[ pub:pak pub:doc pub:ann ]
# connect to RubyForge services
pub_rubyforge = nil
task :pub_rubyforge do
require 'rubyforge'
pub_rubyforge = RubyForge.new
pub_rubyforge.configure 'release_date' => project_module::RELEASE
pub_rubyforge.login
end
# documentation
desc 'Publish documentation to project website.'
task 'pub:doc' => [:doc, :pub_rubyforge] do
target = options[:upload_target]
unless target
require 'uri'
docsite = URI.parse(project_module::DOCSITE)
# provide uploading capability to websites hosted on RubyForge
if docsite.host.include? '.rubyforge.org'
target = "#{pub_rubyforge.userconfig['username']}@rubyforge.org:#{ File.join '/var/www/gforge-projects', options[:rubyforge_project], docsite.path}"
end
end
if target
cmd = ['rsync', '-auvz', 'doc/', "#{target}/"]
cmd.push '--delete' if options[:upload_delete]
cmd.concat options[:upload_options]
sh(*cmd)
end
end
# announcement
desc 'Publish all release announcements.'
task 'pub:ann' => %w[ pub:ann:forge pub:ann:raa ]
desc 'Announce to RubyForge news.'
task 'pub:ann:forge' => [:pub_rubyforge, :ann_text] do
# TODO: post only if news item is not already there
pub_rubyforge.post_news options[:rubyforge_project], ann_subject, ann_text
end
desc 'Announce to RAA (Ruby Application Archive).'
task 'pub:ann:raa' => :ann_nfo_text do
show_page_error = lambda do |page, message|
raise "#{message}: #{(page/'h2').text} -- #{(page/'p').first.text.strip}"
end
resource = "#{options[:raa_project].inspect} project entry on RAA"
require 'mechanize'
agent = WWW::Mechanize.new
page = agent.get "http://raa.ruby-lang.org/update.rhtml?name=#{options[:raa_project]}"
if form = page.forms[1]
resource << " (owned by #{form.owner.inspect})"
form.description_style = 'Pre-formatted'
form.description = ann_nfo_text
form.short_description = project_module::TAGLINE
form.version = project_module::VERSION
form.url = project_module::WEBSITE
form.pass = options[:raa_password] or begin
# ask for password
print "Password for #{resource}: "
gets.chomp
end
page = agent.submit form
if page.title =~ /error/i
show_page_error[page, "Could not update #{resource}"]
end
else
show_page_error[page, "Could not access #{resource}"]
end
end
# release packages
desc 'Publish release packages to RubyForge.'
task 'pub:pak' => [:pak, :pub_rubyforge] do
uploader = lambda do |command, *files|
pub_rubyforge.__send__ command, options[:rubyforge_project], options[:rubyforge_section], project_module::VERSION, *files
end
packages = Dir['pkg/*.[a-z]*']
unless packages.empty?
# NOTE: use the 'add_release' command ONLY for the first
# file because it creates a new sub-section on the
# RubyForge download page; we do not want one package
# per sub-section on the RubyForge download page!
#
uploader[:add_release, packages.shift]
unless packages.empty?
uploader[:add_file, *packages]
end
end
end
end
##
# Provides a common configuration for the project's user manual:
#
# * Assigns the title, subtitle, date, and authors for the document.
#
# You may override these assignments by reassigning these
# document parameters AFTER this method is invoked.
#
# Refer to the "document parameters" for the XHTML
# format in the "erbook" user manual for details.
#
# * Provides the project's configuration as global variables in the document.
#
# For example, <%= $version %> is the same as
# <%= project_module::VERSION %> in the document.
#
# * Defines a "project_summary" node for use in the document. The body
# of this node should contain a brief introduction to the project.
#
# * Defines a "project_history" node for use in the document. The body
# of this node should contain other nodes, each of which represent a
# single set of release notes for one of the project's releases.
#
# It is assumed that this method is called
# from within the Inochi.rake() environment.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [ERBook::Document::Template] book_template
# The eRuby template which serves as the documentation for the project.
#
def book project_symbol, book_template
project_module = fetch_project_module(project_symbol)
# provide project constants as global variables to the user manual
project_module::INOCHI.each_pair do |param, value|
eval "$#{param} = value", binding
end
# set document parameters for the user manual
$title = project_module::DISPLAY
$subtitle = project_module::TAGLINE
$feeds = { File.join(project_module::DOCSITE, 'ann.xml') => :rss }
$authors = Hash[
*project_module::AUTHORS.map do |name, addr|
# convert raw e-mail addresses into URLs for the erbook XHTML format
addr = "mailto:#{addr}" unless addr =~ /^\w+:/
[name, addr]
end.flatten
]
class << book_template
def project_summary
raise ArgumentError, 'block must be given' unless block_given?
node do
$project_summary_node = @nodes.last
yield
end
end
def project_history
raise ArgumentError, 'block must be given' unless block_given?
node do
$project_history_node = @nodes.last
yield
end
end
end
end
##
# Returns the name of the main program executable, which
# is the same as the project name fully in lowercase.
#
def calc_program_name project_symbol
camel_to_snake_case(project_symbol).downcase
end
##
# Calculates the name of the project module from the given project name.
#
def calc_project_symbol project_name
name = project_name.to_s.gsub(/\W+/, '_').squeeze('_').gsub(/^_|_$/, '')
(name[0,1].upcase + name[1..-1]).to_sym
end
##
# Transforms the given input from CamelCase to snake_case.
#
def camel_to_snake_case input
input = input.to_s.dup
# handle camel case like FooBar => Foo_Bar
while input.gsub!(/([a-z]+)([A-Z])(\w+)/) { $1 + '_' + $2 + $3 }
end
# handle abbreviations like XMLParser => XML_Parser
while input.gsub!(/([A-Z]+)([A-Z])([a-z]+)/) { $1 + '_' + $2 + $3 }
end
input
end
private
##
# Returns the path of the file in which this method was called. Calls
# to this method from within *THIS* file are excluded from the search.
#
def first_caller_file
caller.each {|s| !s.include? __FILE__ and s =~ /^(.*?):\d+/ and break $1 }
end
##
# Returns the project module corresponding to the given symbol.
# A new module is created if none already exists.
#
def fetch_project_module project_symbol
if Object.const_defined? project_symbol
project_module = Object.const_get(project_symbol)
else
project_module = Module.new
Object.const_set project_symbol, project_module
end
project_module
end
end
end
##
# utility methods
#
unless File.respond_to? :write
##
# Writes the given content to the given file.
#
# @return number of bytes written
#
def File.write path, content
File.open(path, 'wb') {|f| f.write content.to_s }
end
end
ann:feed is only needed when publishing to docsite
require 'rubygems'
module Inochi
class << self
##
# Establishes your project in Ruby's runtime environment by defining
# the project module (which serves as a namespace for all code in the
# project) and providing a common configuration for the project module:
#
# * Adds the project lib/ directory to the Ruby load path.
#
# * Defines the INOCHI constant in the project module. This constant
# contains the effective configuration parameters (@see project_config).
#
# * Defines all configuration parameters as constants in the project module.
#
# This method must be invoked from immediately within (that is, not from
# within any of its descendant directories) the project lib/ directory.
# Ideally, this method would be invoked from the main project library.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [Hash] project_config
# Project configuration parameters:
#
# [String] :project =>
# Name of the project.
#
# The default value is the value of the project_symbol parameter.
#
# [String] :tagline =>
# An enticing, single line description of the project.
#
# The default value is an empty string.
#
# [String] :website =>
# URL of the published project website.
#
# The default value is an empty string.
#
# [String] :docsite =>
# URL of the published user manual.
#
# The default value is the same value as the :website parameter.
#
# [String] :program =>
# Name of the main project executable.
#
# The default value is the value of the :project parameter in lowercase.
#
# [String] :version =>
# Version of the project.
#
# The default value is "0.0.0".
#
# [String] :release =>
# Date when this version was released.
#
# The default value is the current time.
#
# [String] :display =>
# How the project name should be displayed.
#
# The default value is the project name and version together.
#
# [String] :install =>
# Path to the directory which contains the project.
#
# The default value is one directory above the parent
# directory of the file from which this method was called.
#
# [Hash] :require =>
# The names and version constraints of ruby gems required by
# this project. This information must be expressed as follows:
#
# * Each hash key must be the name of a ruby gem.
#
# * Each hash value must be either +nil+, a single version number
# requirement string (see Gem::Requirement) or an Array thereof.
#
# The default value is an empty Hash.
#
# @return [Module] The newly configured project module.
#
def init project_symbol, project_config = {}
project_module = fetch_project_module(project_symbol)
# this method is not re-entrant
@already_seen ||= []
return project_module if @already_seen.include? project_module
@already_seen << project_module
# put project on Ruby load path
project_file = File.expand_path(first_caller_file)
project_libs = File.dirname(project_file)
$LOAD_PATH.unshift project_libs
# supply configuration defaults
project_config[:project] ||= project_symbol.to_s
project_config[:tagline] ||= ''
project_config[:version] ||= '0.0.0'
project_config[:release] ||= Time.now.strftime('%F')
project_config[:website] ||= ''
project_config[:docsite] ||= project_config[:website]
project_config[:display] ||= "#{project_config[:project]} #{project_config[:version]}"
project_config[:program] ||= calc_program_name(project_symbol)
project_config[:install] ||= File.dirname(project_libs)
project_config[:require] ||= {}
# establish gem version dependencies and
# sanitize the values while we're at it
src = project_config[:require].dup
dst = project_config[:require].clear
src.each_pair do |gem_name, version_reqs|
gem_name = gem_name.to_s
version_reqs = [version_reqs].flatten.compact
dst[gem_name] = version_reqs
gem gem_name, *version_reqs
end
# make configuration parameters available as constants
project_config[:inochi] = project_config
project_config.each_pair do |param, value|
project_module.const_set param.to_s.upcase, value
end
project_module
end
##
# Provides a common configuration for the main project executable:
#
# * The program description (the sequence of non-blank lines at the
# top of the file in which this method is invoked) is properly
# formatted and displayed at the top of program's help information.
#
# * The program version information is fetched from the project module
# and formatted in YAML fashion for easy consumption by other tools.
#
# * A list of command-line options is displayed at
# the bottom of the program's help information.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param trollop_args
# Optional arguments for Trollop::options().
#
# @param trollop_config
# Optional block argument for Trollop::options().
#
# @return The result of Trollop::options().
#
def main project_symbol, *trollop_args, &trollop_config
program_file = first_caller_file
# load the project module
program_name = calc_program_name(project_symbol)
require File.join(File.dirname(program_file), '..', 'lib', program_name)
project_module = fetch_project_module(project_symbol)
# parse command-line options
require 'trollop'
options = Trollop.options(*trollop_args) do
# show project description
text "#{project_module::PROJECT} - #{project_module::TAGLINE}"
text ''
# show program description
text File.read(program_file)[/\A.*?^$\n/m]. # grab the header
gsub(/^# ?/, ''). # strip the comment markers
sub(/\A!.*?\n/, '').lstrip # omit the shebang line
text ''
instance_eval(&trollop_config) if trollop_config
# show version information
version %w[PROJECT VERSION RELEASE WEBSITE INSTALL].map {|c|
"#{c.downcase}: #{project_module.const_get c}"
}.join("\n")
opt :manual, 'Show the user manual'
end
if options[:manual]
require 'launchy'
Launchy::Browser.run "#{project_module::INSTALL}/doc/index.xhtml"
exit
end
options
end
##
# Provides Rake tasks for packaging, publishing, and announcing your project.
#
# * An AUTHORS constant (which has the form "[[name, info]]"
# where "name" is the name of a copyright holder and "info" is
# their contact information) is added to the project module.
#
# This information is extracted from copyright notices in
# the project license file. NOTE that the first copyright
# notice must correspond to the primary project maintainer.
#
# Copyright notices must be in the following form:
#
# Copyright YEAR HOLDER <EMAIL>
#
# Where HOLDER is the name of the copyright holder, YEAR is the year
# when the copyright holder first began working on the project, and
# EMAIL is (optional) the email address of the copyright holder.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [Hash] options
# Additional method parameters, which are all optional:
#
# [String] :license_file =>
# Path (relative to the main project directory which contains the
# project Rakefile) to the file which contains the project license.
#
# The default value is "LICENSE".
#
# [String] :rubyforge_project =>
# Name of the RubyForge project where
# release packages will be published.
#
# The default value is the value of the PROGRAM constant.
#
# [String] :rubyforge_section =>
# Name of the RubyForge project's File Release System
# section where release packages will be published.
#
# The default value is the value of the :rubyforge_project parameter.
#
# [String] :raa_project =>
# Name of the RAA (Ruby Application Archive) entry for this project.
#
# The default value is the value of the PROGRAM constant.
#
# [String] :upload_target =>
# Where to upload the project documentation.
# See "destination" in the rsync manual.
#
# The default value is nil.
#
# [String] :upload_delete =>
# Delete unknown files at the upload target location?
#
# The default value is false.
#
# [Array] :upload_options =>
# Additional command-line arguments to the rsync command.
#
# The default value is an empty array.
#
# @param gem_config
# Block that is passed to Gem::specification.new()
# for additonal gem configuration.
#
# @yieldparam [Gem::Specification] gem_spec the gem specification
#
def rake project_symbol, options = {}, &gem_config
# load the project module
program_name = calc_program_name(project_symbol)
require File.join('lib', program_name)
project_module = fetch_project_module(project_symbol)
# supply default options
options[:rubyforge_project] ||= program_name
options[:rubyforge_section] ||= program_name
options[:raa_project] ||= program_name
options[:license_file] ||= 'LICENSE'
options[:upload_delete] ||= false
options[:upload_options] ||= []
# add AUTHORS constant to the project module
license = File.read(options[:license_file])
copyright_holders =
license.scan(/Copyright.*?\d+\s+(.*)/).flatten.
map {|s| (s =~ /\s*<(.*?)>/) ? [$`, $1] : [s, ''] }
project_module.const_set :AUTHORS, copyright_holders
require 'rake/clean'
hide_rake_task = lambda do |name|
Rake::Task[name].instance_variable_set :@comment, nil
end
# documentation
desc 'Build all documentation.'
task :doc => %w[ doc:api doc:man ]
# user manual
doc_man_src = 'doc/index.erb'
doc_man_dst = 'doc/index.xhtml'
doc_man_deps = FileList['doc/*.erb']
doc_man_doc = nil
task :doc_man_doc => doc_man_src do
unless doc_man_doc
require 'erbook' unless defined? ERBook
doc_man_txt = File.read(doc_man_src)
doc_man_doc = ERBook::Document.new(:xhtml, doc_man_txt, doc_man_src, :unindent => true)
end
end
desc 'Build the user manual.'
task 'doc:man' => doc_man_dst
file doc_man_dst => doc_man_deps do
Rake::Task[:doc_man_doc].invoke
File.write doc_man_dst, doc_man_doc
end
CLOBBER.include doc_man_dst
# API reference
doc_api_dst = 'doc/api'
desc 'Build API reference.'
task 'doc:api' => doc_api_dst
require 'yard'
YARD::Rake::YardocTask.new doc_api_dst do |t|
t.options.push '--protected',
'--output-dir', doc_api_dst,
'--readme', options[:license_file]
task doc_api_dst => options[:license_file]
end
hide_rake_task[doc_api_dst]
CLEAN.include '.yardoc'
CLOBBER.include doc_api_dst
# announcments
desc 'Build all release announcements.'
task :ann => %w[ ann:feed ann:html ann:text ann:mail ]
# it has long been a tradition to use an "[ANN]" prefix
# when announcing things on the ruby-talk mailing list
ann_prefix = '[ANN] '
ann_subject = ann_prefix + project_module::DISPLAY
ann_project = ann_prefix + project_module::PROJECT
# fetch the project summary from user manual
ann_nfo_doc = nil
task :ann_nfo_doc => :doc_man_doc do
ann_nfo_doc = $project_summary_node
end
# fetch release notes from user manual
ann_rel_doc = nil
task :ann_rel_doc => :doc_man_doc do
unless ann_rel_doc
if parent = $project_history_node
if child = parent.children.first
ann_rel_doc = child
else
raise 'The "project_history" node in the user manual lacks child nodes.'
end
else
raise 'The user manual lacks a "project_history" node.'
end
end
end
# build release notes in HTML and plain text
# converts the given HTML into plain text. we do this using
# lynx because (1) it outputs a list of all hyperlinks used
# in the HTML document and (2) it runs on all major platforms
convert_html_to_text = lambda do |html|
require 'tempfile'
begin
# lynx's -dump option requires a .html file
tmp_file = Tempfile.new(Inochi::PROGRAM).path + '.html'
File.write tmp_file, html
text = `lynx -dump #{tmp_file} -width 70`
ensure
File.delete tmp_file
end
# improve readability of list items that span multiple
# lines by adding a blank line between such items
text.gsub! %r{^( *[^\*\s].*)(\r?\n)( *\* \S)}, '\1\2\2\3'
text
end
ann_html = nil
task :ann_html => [:doc_man_doc, :ann_nfo_doc, :ann_rel_doc] do
unless ann_html
ann_html = %{
<center>
<h1>#{project_module::DISPLAY}</h1>
<p>#{project_module::TAGLINE}</p>
<p>#{project_module::WEBSITE}</p>
</center>
#{ann_nfo_doc}
#{ann_rel_doc}
}
# remove heading navigation menus
ann_html.gsub! %r{<div class="nav"[^>]*>(.*?)</div>}, ''
# resolve relative URLs into absolute URLs
# see http://en.wikipedia.org/wiki/URI_scheme#Generic_syntax
require 'uri'
uri = URI.parse(project_module::DOCSITE)
doc_url = uri.to_s
dir_url = uri.path =~ %r{/$|^$} ? doc_url : File.dirname(doc_url)
ann_html.gsub! %r{(href=|src=)(.)(.*?)(\2)} do |match|
a, b = $1 + $2, $3.to_s << $4
case $3
when %r{^[[:alpha:]][[:alnum:]\+\.\-]*://} # already absolute
match
when /^#/
a << File.join(doc_url, b)
else
a << File.join(dir_url, b)
end
end
end
end
ann_text = nil
task :ann_text => :ann_html do
unless ann_text
ann_text = convert_html_to_text[ann_html]
end
end
ann_nfo_text = nil
task :ann_nfo_text => :ann_nfo_doc do
unless ann_nfo_text
ann_nfo_text = convert_html_to_text[ann_nfo_doc]
end
end
# HTML
ann_html_dst = 'ANN.html'
desc "Build HTML announcement: #{ann_html_dst}"
task 'ann:html' => ann_html_dst
file ann_html_dst => doc_man_deps do
Rake::Task[:ann_html].invoke
File.write ann_html_dst, ann_html
end
CLEAN.include ann_html_dst
# RSS feed
ann_feed_dst = 'doc/ann.xml'
desc "Build RSS announcement: #{ann_feed_dst}"
task 'ann:feed' => ann_feed_dst
file ann_feed_dst => doc_man_deps do
require 'time'
require 'rss/maker'
feed = RSS::Maker.make('2.0') do |feed|
feed.channel.title = ann_project
feed.channel.link = project_module::WEBSITE
feed.channel.description = project_module::TAGLINE
Rake::Task[:ann_rel_doc].invoke
Rake::Task[:ann_html].invoke
item = feed.items.new_item
item.title = ann_rel_doc.title
item.link = project_module::DOCSITE + '#' + ann_rel_doc.here_frag
item.date = Time.parse(item.title)
item.description = ann_html
end
File.write ann_feed_dst, feed
end
CLOBBER.include ann_feed_dst
# plain text
ann_text_dst = 'ANN.txt'
desc "Build plain text announcement: #{ann_text_dst}"
task 'ann:text' => ann_text_dst
file ann_text_dst => doc_man_deps do
Rake::Task[:ann_text].invoke
File.write ann_text_dst, ann_text
end
CLEAN.include ann_text_dst
# e-mail
ann_mail_dst = 'ANN.eml'
desc "Build e-mail announcement: #{ann_mail_dst}"
task 'ann:mail' => ann_mail_dst
file ann_mail_dst => doc_man_deps do
File.open ann_mail_dst, 'w' do |f|
require 'time'
f.puts "Date: #{Time.now.rfc822}"
f.puts 'To: ruby-talk@ruby-lang.org'
f.puts 'From: "%s" <%s>' % project_module::AUTHORS.first
f.puts "Subject: #{ann_subject}"
Rake::Task[:ann_text].invoke
f.puts '', ann_text
end
end
CLEAN.include ann_mail_dst
# packaging
desc 'Build a release.'
task :pak => [:clobber, :doc] do
sh $0, 'package'
end
CLEAN.include 'pkg'
# ruby gem
require 'rake/gempackagetask'
gem = Gem::Specification.new do |gem|
authors = project_module::AUTHORS
if author = authors.first
gem.author, gem.email = author
end
if authors.length > 1
gem.authors = authors.map {|name, mail| name }
end
gem.rubyforge_project = options[:rubyforge_project]
# XXX: In theory, `gem.name` should be assigned to
# ::PROJECT instead of ::PROGRAM
#
# In practice, PROJECT may contain non-word
# characters and may also contain a mixture
# of lowercase and uppercase letters.
#
# This makes it difficult for people to
# install the project gem because they must
# remember the exact spelling used in
# `gem.name` when running `gem install ____`.
#
# For example, consider the "RedCloth" gem.
#
gem.name = project_module::PROGRAM
gem.version = project_module::VERSION
gem.summary = project_module::TAGLINE
gem.description = gem.summary
gem.homepage = project_module::WEBSITE
gem.files = FileList['**/*'].exclude('_darcs') - CLEAN
gem.executables = project_module::PROGRAM
gem.has_rdoc = true
unless project_module == Inochi
gem.add_dependency 'inochi', Inochi::VERSION
end
project_module::REQUIRE.each_pair do |gem_name, version_reqs|
gem.add_dependency gem_name, *version_reqs
end
# additional configuration is done by user
yield gem if gem_config
end
Rake::GemPackageTask.new(gem).define
# XXX: hide the tasks defined by the above gem packaging library
%w[gem package repackage clobber_package].each {|t| hide_rake_task[t] }
# releasing
desc 'Publish a new release.'
task 'pub' => %w[ pub:pak pub:doc pub:ann ]
# connect to RubyForge services
pub_rubyforge = nil
task :pub_rubyforge do
require 'rubyforge'
pub_rubyforge = RubyForge.new
pub_rubyforge.configure 'release_date' => project_module::RELEASE
pub_rubyforge.login
end
# documentation
desc 'Publish documentation to project website.'
task 'pub:doc' => [:doc, 'ann:feed', :pub_rubyforge] do
target = options[:upload_target]
unless target
require 'uri'
docsite = URI.parse(project_module::DOCSITE)
# provide uploading capability to websites hosted on RubyForge
if docsite.host.include? '.rubyforge.org'
target = "#{pub_rubyforge.userconfig['username']}@rubyforge.org:#{ File.join '/var/www/gforge-projects', options[:rubyforge_project], docsite.path}"
end
end
if target
cmd = ['rsync', '-auvz', 'doc/', "#{target}/"]
cmd.push '--delete' if options[:upload_delete]
cmd.concat options[:upload_options]
sh(*cmd)
end
end
# announcement
desc 'Publish all release announcements.'
task 'pub:ann' => %w[ pub:ann:forge pub:ann:raa ]
desc 'Announce to RubyForge news.'
task 'pub:ann:forge' => [:pub_rubyforge, :ann_text] do
# TODO: post only if news item is not already there
pub_rubyforge.post_news options[:rubyforge_project], ann_subject, ann_text
end
desc 'Announce to RAA (Ruby Application Archive).'
task 'pub:ann:raa' => :ann_nfo_text do
show_page_error = lambda do |page, message|
raise "#{message}: #{(page/'h2').text} -- #{(page/'p').first.text.strip}"
end
resource = "#{options[:raa_project].inspect} project entry on RAA"
require 'mechanize'
agent = WWW::Mechanize.new
page = agent.get "http://raa.ruby-lang.org/update.rhtml?name=#{options[:raa_project]}"
if form = page.forms[1]
resource << " (owned by #{form.owner.inspect})"
form.description_style = 'Pre-formatted'
form.description = ann_nfo_text
form.short_description = project_module::TAGLINE
form.version = project_module::VERSION
form.url = project_module::WEBSITE
form.pass = options[:raa_password] or begin
# ask for password
print "Password for #{resource}: "
gets.chomp
end
page = agent.submit form
if page.title =~ /error/i
show_page_error[page, "Could not update #{resource}"]
end
else
show_page_error[page, "Could not access #{resource}"]
end
end
# release packages
desc 'Publish release packages to RubyForge.'
task 'pub:pak' => [:pak, :pub_rubyforge] do
uploader = lambda do |command, *files|
pub_rubyforge.__send__ command, options[:rubyforge_project], options[:rubyforge_section], project_module::VERSION, *files
end
packages = Dir['pkg/*.[a-z]*']
unless packages.empty?
# NOTE: use the 'add_release' command ONLY for the first
# file because it creates a new sub-section on the
# RubyForge download page; we do not want one package
# per sub-section on the RubyForge download page!
#
uploader[:add_release, packages.shift]
unless packages.empty?
uploader[:add_file, *packages]
end
end
end
end
##
# Provides a common configuration for the project's user manual:
#
# * Assigns the title, subtitle, date, and authors for the document.
#
# You may override these assignments by reassigning these
# document parameters AFTER this method is invoked.
#
# Refer to the "document parameters" for the XHTML
# format in the "erbook" user manual for details.
#
# * Provides the project's configuration as global variables in the document.
#
# For example, <%= $version %> is the same as
# <%= project_module::VERSION %> in the document.
#
# * Defines a "project_summary" node for use in the document. The body
# of this node should contain a brief introduction to the project.
#
# * Defines a "project_history" node for use in the document. The body
# of this node should contain other nodes, each of which represent a
# single set of release notes for one of the project's releases.
#
# It is assumed that this method is called
# from within the Inochi.rake() environment.
#
# @param [Symbol] project_symbol
# Name of the Ruby constant which serves
# as a namespace for the entire project.
#
# @param [ERBook::Document::Template] book_template
# The eRuby template which serves as the documentation for the project.
#
def book project_symbol, book_template
project_module = fetch_project_module(project_symbol)
# provide project constants as global variables to the user manual
project_module::INOCHI.each_pair do |param, value|
eval "$#{param} = value", binding
end
# set document parameters for the user manual
$title = project_module::DISPLAY
$subtitle = project_module::TAGLINE
$feeds = { File.join(project_module::DOCSITE, 'ann.xml') => :rss }
$authors = Hash[
*project_module::AUTHORS.map do |name, addr|
# convert raw e-mail addresses into URLs for the erbook XHTML format
addr = "mailto:#{addr}" unless addr =~ /^\w+:/
[name, addr]
end.flatten
]
class << book_template
def project_summary
raise ArgumentError, 'block must be given' unless block_given?
node do
$project_summary_node = @nodes.last
yield
end
end
def project_history
raise ArgumentError, 'block must be given' unless block_given?
node do
$project_history_node = @nodes.last
yield
end
end
end
end
##
# Returns the name of the main program executable, which
# is the same as the project name fully in lowercase.
#
def calc_program_name project_symbol
camel_to_snake_case(project_symbol).downcase
end
##
# Calculates the name of the project module from the given project name.
#
def calc_project_symbol project_name
name = project_name.to_s.gsub(/\W+/, '_').squeeze('_').gsub(/^_|_$/, '')
(name[0,1].upcase + name[1..-1]).to_sym
end
##
# Transforms the given input from CamelCase to snake_case.
#
def camel_to_snake_case input
input = input.to_s.dup
# handle camel case like FooBar => Foo_Bar
while input.gsub!(/([a-z]+)([A-Z])(\w+)/) { $1 + '_' + $2 + $3 }
end
# handle abbreviations like XMLParser => XML_Parser
while input.gsub!(/([A-Z]+)([A-Z])([a-z]+)/) { $1 + '_' + $2 + $3 }
end
input
end
private
##
# Returns the path of the file in which this method was called. Calls
# to this method from within *THIS* file are excluded from the search.
#
def first_caller_file
caller.each {|s| !s.include? __FILE__ and s =~ /^(.*?):\d+/ and break $1 }
end
##
# Returns the project module corresponding to the given symbol.
# A new module is created if none already exists.
#
def fetch_project_module project_symbol
if Object.const_defined? project_symbol
project_module = Object.const_get(project_symbol)
else
project_module = Module.new
Object.const_set project_symbol, project_module
end
project_module
end
end
end
##
# utility methods
#
unless File.respond_to? :write
##
# Writes the given content to the given file.
#
# @return number of bytes written
#
def File.write path, content
File.open(path, 'wb') {|f| f.write content.to_s }
end
end
|
module Sorcery
module Providers
# This class adds support for OAuth with Linkedin.com.
#
# config.linkedin.key = <key>
# config.linkedin.secret = <secret>
# ...
#
class Linkedin < Base
include Protocols::Oauth
attr_accessor :authorize_path, :access_permissions, :access_token_path,
:request_token_path, :user_info_fields, :user_info_path
def initialize
@configuration = {
site: 'https://api.linkedin.com',
authorize_path: '/uas/oauth/authenticate',
request_token_path: '/uas/oauth/requestToken',
access_token_path: '/uas/oauth/accessToken'
}
@user_info_path = '/v1/people/~'
end
# Override included get_consumer method to provide authorize_path
def get_consumer
# Add access permissions to request token path
@configuration[:request_token_path] += '?scope=' + access_permissions.join('+') unless access_permissions.blank? or @configuration[:request_token_path].include? '?scope='
::OAuth::Consumer.new(@key, @secret, @configuration)
end
def get_user_hash(access_token)
fields = self.user_info_fields.join(',')
response = access_token.get("#{@user_info_path}:(id,#{fields})", 'x-li-format' => 'json')
auth_hash(access_token).tap do |h|
h[:user_info] = JSON.parse(response.body)
h[:uid] = h[:user_info]['id'].to_s
end
end
# calculates and returns the url to which the user should be redirected,
# to get authenticated at the external provider's site.
def login_url(params, session)
req_token = get_request_token
session[:request_token] = req_token.token
session[:request_token_secret] = req_token.secret
authorize_url({ request_token: req_token.token, request_token_secret: req_token.secret })
end
# tries to login the user from access token
def process_callback(params, session)
args = {
oauth_verifier: params[:oauth_verifier],
request_token: session[:request_token],
request_token_secret: session[:request_token_secret]
}
args.merge!({ code: params[:code] }) if params[:code]
get_access_token(args)
end
end
end
end
Fixes wrong use of include?()
module Sorcery
module Providers
# This class adds support for OAuth with Linkedin.com.
#
# config.linkedin.key = <key>
# config.linkedin.secret = <secret>
# ...
#
class Linkedin < Base
include Protocols::Oauth
attr_accessor :authorize_path, :access_permissions, :access_token_path,
:request_token_path, :user_info_fields, :user_info_path
def initialize
@configuration = {
site: 'https://api.linkedin.com',
authorize_path: '/uas/oauth/authenticate',
request_token_path: '/uas/oauth/requestToken',
access_token_path: '/uas/oauth/accessToken'
}
@user_info_path = '/v1/people/~'
end
# Override included get_consumer method to provide authorize_path
def get_consumer
# Add access permissions to request token path
@configuration[:request_token_path] += '?scope=' + access_permissions.join('+') unless access_permissions.blank? or @configuration[:request_token_path].include?('?scope=')
::OAuth::Consumer.new(@key, @secret, @configuration)
end
def get_user_hash(access_token)
fields = self.user_info_fields.join(',')
response = access_token.get("#{@user_info_path}:(id,#{fields})", 'x-li-format' => 'json')
auth_hash(access_token).tap do |h|
h[:user_info] = JSON.parse(response.body)
h[:uid] = h[:user_info]['id'].to_s
end
end
# calculates and returns the url to which the user should be redirected,
# to get authenticated at the external provider's site.
def login_url(params, session)
req_token = get_request_token
session[:request_token] = req_token.token
session[:request_token_secret] = req_token.secret
authorize_url({ request_token: req_token.token, request_token_secret: req_token.secret })
end
# tries to login the user from access token
def process_callback(params, session)
args = {
oauth_verifier: params[:oauth_verifier],
request_token: session[:request_token],
request_token_secret: session[:request_token_secret]
}
args.merge!({ code: params[:code] }) if params[:code]
get_access_token(args)
end
end
end
end
|
module Specinfra
module Helper
class DetectOs
def self.detect
self.new(Specinfra.backend).detect
end
def initialize(backend)
@backend = backend
end
def run_command(cmd)
@backend.run_command(cmd)
end
def detect
raise NotImplementedError
end
end
end
end
require 'specinfra/helper/detect_os/aix'
require 'specinfra/helper/detect_os/alpine'
require 'specinfra/helper/detect_os/arch'
require 'specinfra/helper/detect_os/coreos'
require 'specinfra/helper/detect_os/darwin'
require 'specinfra/helper/detect_os/debian'
require 'specinfra/helper/detect_os/esxi'
require 'specinfra/helper/detect_os/eos'
require 'specinfra/helper/detect_os/freebsd'
require 'specinfra/helper/detect_os/gentoo'
require 'specinfra/helper/detect_os/nixos'
require 'specinfra/helper/detect_os/openbsd'
require 'specinfra/helper/detect_os/plamo'
require 'specinfra/helper/detect_os/poky'
require 'specinfra/helper/detect_os/redhat'
require 'specinfra/helper/detect_os/solaris'
require 'specinfra/helper/detect_os/suse'
Update detect_os.rb
module Specinfra
module Helper
class DetectOs
def self.detect
self.new(Specinfra.backend).detect
end
def initialize(backend)
@backend = backend
end
def run_command(cmd)
@backend.run_command(cmd)
end
def detect
raise NotImplementedError
end
end
end
end
require 'specinfra/helper/detect_os/aix'
require 'specinfra/helper/detect_os/alpine'
require 'specinfra/helper/detect_os/arch'
require 'specinfra/helper/detect_os/coreos'
require 'specinfra/helper/detect_os/darwin'
require 'specinfra/helper/detect_os/debian'
require 'specinfra/helper/detect_os/esxi'
require 'specinfra/helper/detect_os/eos'
require 'specinfra/helper/detect_os/freebsd'
require 'specinfra/helper/detect_os/gentoo'
require 'specinfra/helper/detect_os/nixos'
require 'specinfra/helper/detect_os/openbsd'
require 'specinfra/helper/detect_os/photon'
require 'specinfra/helper/detect_os/plamo'
require 'specinfra/helper/detect_os/poky'
require 'specinfra/helper/detect_os/redhat'
require 'specinfra/helper/detect_os/solaris'
require 'specinfra/helper/detect_os/suse'
|
module Spree
module Search
# The following search options are available.
# * taxon
# * keywords in name or description
# * properties values
class Elasticsearch < Spree::Core::Search::Base
include ::Virtus.model
attribute :query, String
attribute :price_min, Float
attribute :price_max, Float
attribute :taxons, Array
attribute :browse_mode, Boolean, default: true
attribute :properties, Hash
attribute :per_page, String
attribute :page, String
attribute :sorting, String
def initialize(params)
self.current_currency = Spree::Config[:currency]
prepare(params)
end
def retrieve_products
search_result = Spree::Product.__elasticsearch__.search(
Spree::Product::ElasticsearchQuery.new(
query: query,
taxons: taxons,
browse_mode: browse_mode,
price_min: price_min,
price_max: price_max,
properties: properties || {},
sorting: sorting
).to_hash
)
@result = search_result.limit(per_page).page(page)
@result.records
end
def facets
@result.response.aggregations
end
module Escaping
LUCENE_SPECIAL_CHARACTERS = Regexp.new("(" + %w[
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \\ /
].map { |s| Regexp.escape(s) }.join("|") + ")")
LUCENE_BOOLEANS = /\b(AND|OR|NOT)\b/
def self.escape(s)
# 6 slashes =>
# ruby reads it as 3 backslashes =>
# the first 2 =>
# go into the regex engine which reads it as a single literal backslash
# the last one combined with the "1" to insert the first match group
special_chars_escaped = s.gsub(LUCENE_SPECIAL_CHARACTERS, '\\\\\1')
# Map something like 'fish AND chips' to 'fish "AND" chips', to avoid
# Lucene trying to parse it as a query conjunction
special_chars_escaped.gsub(LUCENE_BOOLEANS, '"\1"')
end
end
protected
# converts params to instance variables
def prepare(params)
@query = Escaping.escape(params[:keywords])
@sorting = params[:sorting]
@taxons = params[:taxon] unless params[:taxon].nil?
@browse_mode = params[:browse_mode] unless params[:browse_mode].nil?
if params[:search]
# price
if params[:search][:price]
@price_min = params[:search][:price][:min].to_f
@price_max = params[:search][:price][:max].to_f
end
# properties
@properties = params[:search][:properties]
end
@per_page = (params[:per_page].to_i <= 0) ? Spree::Config[:products_per_page] : params[:per_page].to_i
@page = (params[:page].to_i <= 0) ? 1 : params[:page].to_i
end
end
end
end
Take care of nil keywords
module Spree
module Search
# The following search options are available.
# * taxon
# * keywords in name or description
# * properties values
class Elasticsearch < Spree::Core::Search::Base
include ::Virtus.model
attribute :query, String
attribute :price_min, Float
attribute :price_max, Float
attribute :taxons, Array
attribute :browse_mode, Boolean, default: true
attribute :properties, Hash
attribute :per_page, String
attribute :page, String
attribute :sorting, String
def initialize(params)
self.current_currency = Spree::Config[:currency]
prepare(params)
end
def retrieve_products
search_result = Spree::Product.__elasticsearch__.search(
Spree::Product::ElasticsearchQuery.new(
query: query,
taxons: taxons,
browse_mode: browse_mode,
price_min: price_min,
price_max: price_max,
properties: properties || {},
sorting: sorting
).to_hash
)
@result = search_result.limit(per_page).page(page)
@result.records
end
def facets
@result.response.aggregations
end
module Escaping
LUCENE_SPECIAL_CHARACTERS = Regexp.new("(" + %w[
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \\ /
].map { |s| Regexp.escape(s) }.join("|") + ")")
LUCENE_BOOLEANS = /\b(AND|OR|NOT)\b/
def self.escape(s)
# 6 slashes =>
# ruby reads it as 3 backslashes =>
# the first 2 =>
# go into the regex engine which reads it as a single literal backslash
# the last one combined with the "1" to insert the first match group
special_chars_escaped = s.gsub(LUCENE_SPECIAL_CHARACTERS, '\\\\\1')
# Map something like 'fish AND chips' to 'fish "AND" chips', to avoid
# Lucene trying to parse it as a query conjunction
special_chars_escaped.gsub(LUCENE_BOOLEANS, '"\1"')
end
end
protected
# converts params to instance variables
def prepare(params)
@query = Escaping.escape(params[:keywords] || "")
@sorting = params[:sorting]
@taxons = params[:taxon] unless params[:taxon].nil?
@browse_mode = params[:browse_mode] unless params[:browse_mode].nil?
if params[:search]
# price
if params[:search][:price]
@price_min = params[:search][:price][:min].to_f
@price_max = params[:search][:price][:max].to_f
end
# properties
@properties = params[:search][:properties]
end
@per_page = (params[:per_page].to_i <= 0) ? Spree::Config[:products_per_page] : params[:per_page].to_i
@page = (params[:page].to_i <= 0) ? 1 : params[:page].to_i
end
end
end
end
|
module SpreeMultiVendor
module_function
# Returns the version of the currently loaded SpreeMultiVendor as a
# <tt>Gem::Version</tt>.
def version
Gem::Version.new VERSION::STRING
end
module VERSION
MAJOR = 0
MINOR = 9
TINY = 0
PRE = nil
STRING = [MAJOR, MINOR, TINY, PRE].compact.join('.')
end
end
Bump version to 1.0.0 :tada:
module SpreeMultiVendor
module_function
# Returns the version of the currently loaded SpreeMultiVendor as a
# <tt>Gem::Version</tt>.
def version
Gem::Version.new VERSION::STRING
end
module VERSION
MAJOR = 1
MINOR = 0
TINY = 0
PRE = nil
STRING = [MAJOR, MINOR, TINY, PRE].compact.join('.')
end
end
|
require 'ionian/extension/socket'
module Ionian
class Socket
############
# TODO NOTES
############
# Always lazily instiantiate @socket, even when persistent?
# May not work with forwarding method calls.
# Oh! Unless the forwarded methods check for @socket to exist.
# Will persistent methods have to check for the socket not to be
# closed as well?
def initialize(**kvargs)
@socket = nil
@host = kvargs.fetch :host
@port = kvargs.fetch :port, 23
@expression = kvargs.fetch :expression, nil
@protocol = kvargs.fetch :protocol, :tcp
@persistent = kvargs.fetch :persistent, true
create_socket if @persistent
end
# Returns a symbol of the type of protocol this socket uses:
# :tcp, :udp, :unix
def protocol?
@protocol
end
# Returns true if the socket remains open after writing data.
def persistent?
@persistent == false || @persistent == nil ? false : true
end
# Send a command (data) to the socket. Returns received matches.
# Block yields received match.
# See Ionian::Extension::IO#read_match
def cmd(string, **kvargs, &block)
create_socket unless @persistent
@socket.write string
@socket.flush
matches = @socket.read_match(kvargs) {|match| yield match}
@socket.close unless @persistent
matches
end
### Methods Forwarded To @socket ###
# Returns true if there is data in the receive buffer.
# Args:
# Timeout: Number of seconds to wait for data until
# giving up. Set to nil for blocking.
def has_data?(**kvargs)
return false unless @socket
@socket.has_data? kvargs
end
# Returns true if the socket is closed.
def closed?
return true unless @socket
@socket.closed?
end
# Flushes buffered data to the operating system.
# This method has no effect on non-persistent sockets.
def flush
@socket.flush if @persistent
end
# Writes the given string(s) to the socket and appends a
# newline character to any string not already ending with one.
def puts(*string)
self.write string.map{|s| s.chomp}.join("\n") + "\n"
end
# Writes the given string to the socket. Returns the number of
# bytes written.
def write(string)
create_socket unless @persistent
num_bytes = @socket.write string
unless @persistent
# Read in data to prevent RST packets.
has_data = ::IO.select [@socket], nil, nil, 0
@socket.readpartial 0xFFFF if has_data
@socket.close
end
num_bytes
end
alias_method :<<, :write
private
def create_socket
@socket.close if @socket and not @socket.closed?
case @protocol
when :tcp
@socket = ::TCPSocket.new @host, @port
when :udp
@socket = ::UDPSocket.new
@socket.connect @host, @port
when :unix
@socket = ::UNIXSocket.new @host
end
@socket.extend Ionian::Extension::Socket
@socket.expression = @expression if @expression
initialize_socket_methods
end
# Expose the @socket methods that haven't been defined by this class.
# Only do this once for performance -- when non-persistent sockets are
# recreated, they should be of the same type of socket.
def initialize_socket_methods
# Only initialize once, lazily.
# For non-persistent sockets, this forwards the socket methods
# the first time data is sent -- when the new socket is created.
return if @socket_methods_initialized
# Forward undefined methods to @socket.
# This was chosen over method_missing to avoid traversing the object
# hierarchy on every method call, like transmitting data.
@socket.methods
.select {|m| @socket.respond_to? m}
.select {|m| not self.respond_to? m}
.each do |m|
self.singleton_class.send :define_method, m do |*args, &block|
@socket.__send__ m, *args, &block
end
end
@socket_methods_initialized = true
end
end
end
#cmd appears to work.
No tests yet due to blocking issue when test runs.
require 'ionian/extension/socket'
module Ionian
class Socket
############
# TODO NOTES
############
# Always lazily instiantiate @socket, even when persistent?
# May not work with forwarding method calls.
# Oh! Unless the forwarded methods check for @socket to exist.
# Will persistent methods have to check for the socket not to be
# closed as well?
def initialize(**kvargs)
@socket = nil
@host = kvargs.fetch :host
@port = kvargs.fetch :port, 23
@expression = kvargs.fetch :expression, nil
@protocol = kvargs.fetch :protocol, :tcp
@persistent = kvargs.fetch :persistent, true
create_socket if @persistent
end
# Returns a symbol of the type of protocol this socket uses:
# :tcp, :udp, :unix
def protocol?
@protocol
end
# Returns true if the socket remains open after writing data.
def persistent?
@persistent == false || @persistent == nil ? false : true
end
# Send a command (data) to the socket. Returns received matches.
# Block yields received match.
# See Ionian::Extension::IO#read_match
def cmd(string, **kvargs, &block)
create_socket unless @persistent
@socket.write string
@socket.flush
matches = @socket.read_match(kvargs) {|match| yield match if block_given?}
@socket.close unless @persistent
matches
end
### Methods Forwarded To @socket ###
# Returns true if there is data in the receive buffer.
# Args:
# Timeout: Number of seconds to wait for data until
# giving up. Set to nil for blocking.
def has_data?(**kvargs)
return false unless @socket
@socket.has_data? kvargs
end
# Returns true if the socket is closed.
def closed?
return true unless @socket
@socket.closed?
end
# Flushes buffered data to the operating system.
# This method has no effect on non-persistent sockets.
def flush
@socket.flush if @persistent
end
# Writes the given string(s) to the socket and appends a
# newline character to any string not already ending with one.
def puts(*string)
self.write string.map{|s| s.chomp}.join("\n") + "\n"
end
# Writes the given string to the socket. Returns the number of
# bytes written.
def write(string)
create_socket unless @persistent
num_bytes = @socket.write string
unless @persistent
# Read in data to prevent RST packets.
has_data = ::IO.select [@socket], nil, nil, 0
@socket.readpartial 0xFFFF if has_data
@socket.close
end
num_bytes
end
alias_method :<<, :write
private
def create_socket
@socket.close if @socket and not @socket.closed?
case @protocol
when :tcp
@socket = ::TCPSocket.new @host, @port
when :udp
@socket = ::UDPSocket.new
@socket.connect @host, @port
when :unix
@socket = ::UNIXSocket.new @host
end
@socket.extend Ionian::Extension::Socket
@socket.expression = @expression if @expression
initialize_socket_methods
end
# Expose the @socket methods that haven't been defined by this class.
# Only do this once for performance -- when non-persistent sockets are
# recreated, they should be of the same type of socket.
def initialize_socket_methods
# Only initialize once, lazily.
# For non-persistent sockets, this forwards the socket methods
# the first time data is sent -- when the new socket is created.
return if @socket_methods_initialized
# Forward undefined methods to @socket.
# This was chosen over method_missing to avoid traversing the object
# hierarchy on every method call, like transmitting data.
@socket.methods
.select {|m| @socket.respond_to? m}
.select {|m| not self.respond_to? m}
.each do |m|
self.singleton_class.send :define_method, m do |*args, &block|
@socket.__send__ m, *args, &block
end
end
@socket_methods_initialized = true
end
end
end |
module Suspension
# Compares strings and returns diffs
class StringComparer
# Compares string_1 with string_2 using diff_match_patch.
# @param string_1 [String]
# @param string_2 [String]
# @param add_context_info [Boolean, optional] if true will add location and excerpt
# @param different_only [Boolean, optional] if true will show -1 and 1 segments only
# @param options [Hash, optional] with symbolized keys
# @return[Array] An array of diffs like so:
# [[1, 'added', 'line 42', 'text_before text_after'], [-1, 'removed ', 'line 43', 'text_before removed text_after']]
# All information is relative to string_1. 1 means a string was added, -1 it was deleted.
def self.compare(string_1, string_2, add_context_info=true, different_only=true, options={})
options = {
excerpt_window: 20, # how much context do we show before and after
}.merge(options)
if string_1 == string_2
return []
else
diffs = Suspension::DiffAlgorithm.new.call(string_1, string_2)
# Add context information to diffs
deltas = []
# We need to keep separate char counters for string_1 and string_2 so
# that we can pull the excerpt for either of them.
char_pos_1 = 0 # character counter on string_1
char_pos_2 = 0 # character counter on string_2
line_num_1 = 1 # line counter on string_1. We don't need one for string_2
excerpt_window = options[:excerpt_window]
# I have to do a manual loop since we're relying on idx for exception
# rescue retries on invalid utf8 byte sequences
idx = 0
diffs.length.times {
begin
diff = diffs[idx]
if add_context_info
# Add location and excerpt
excerpt = case diff.first
when -1
# use string_1 as context for deletions
excerpt_start = [(char_pos_1 - excerpt_window), 0].max
excerpt_end = [(char_pos_1 + excerpt_window), string_1.length].min - 1
line_num_1 += diff.last.count("\n") # do first as it can raise exception
char_pos_1 += diff.last.length
string_1[excerpt_start..excerpt_end]
when 1
# use string_2 as context for additions
excerpt_start = [(char_pos_2 - excerpt_window), 0].max
excerpt_end = [(char_pos_2 + excerpt_window), string_2.length].min - 1
char_pos_2 += diff.last.length
string_2[excerpt_start..excerpt_end]
when 0
line_num_1 += diff.last.count("\n") # do first as it can raise exception
char_pos_1 += diff.last.length
char_pos_2 += diff.last.length
nil
else
raise "Handle this: #{ diff.inspect }"
end
r = [
diff.first, # type of modification
diff.last, # diff string
"line #{ line_num_1 }",
excerpt
]
else
# Use diffs as returned by DMP
diff.last.match(/./) # Trigger exception for invalid byte sequence in UTF-8
r = diff
end
deltas << r
# Increment at the end of rescue block so that retries are idempotent
idx += 1
rescue ArgumentError => e
if e.message.index('invalid byte sequence')
# Handles invalid UTF-8 byte sequences in diff
# This is caused by two different multibyte characters at the
# same position where the first bytes are identical, and a
# subsequent one is different. DMP splits that multibyte char into
# separate bytes and thus creates an invalid UTF8 byte sequence:
#
# Example: "word2—word3" and "word2…word3"
#
# [
# [0, "word2\xE2\x80"],
# [-1, "\x94word3"],
# [1, "\xA6word3"]
# ]
#
# Here we re-combine the bytes into a valid UTF8 string.
#
# Strategy: Remove trailing invalid bytes from common prefix, and
# prepend them to the two different suffixes and use the combined
# strings as diffs.
#
invalid_diff = diffs[idx].last
last_valid_byte_pos = -1
until(
(lvb = invalid_diff[last_valid_byte_pos]).nil? ||
lvb.valid_encoding?
)
last_valid_byte_pos -= 1
if last_valid_byte_pos < -5
# Stop after looking back for 5 bytes
raise "Handle this: #{ invalid_diff.inspect }"
end
end
valid_prefix = invalid_diff[0..last_valid_byte_pos]
invalid_suffix = invalid_diff[(last_valid_byte_pos + 1)..-1]
# Prepend following diffs with invalid_suffix if:
# * They exist
# * Are invalid
# * Don't have the bytes applied already. There are situations
# where the algorithm may apply twice. See test case:
# "word1 word2—word2…word3 word4 word5"
# "word1 word2…word3 word4"
if(
diffs[idx+1] &&
!diffs[idx+1][1].valid_encoding? &&
diffs[idx+1][1].byteslice(0,invalid_suffix.bytesize) != invalid_suffix
)
# Prepend invalid_suffix to idx+1
diffs[idx+1][1].prepend(invalid_suffix)
end
if(
diffs[idx+2] &&
!diffs[idx+2][1].valid_encoding? &&
diffs[idx+2][1].byteslice(0,invalid_suffix.bytesize) != invalid_suffix
)
# Prepend invalid_suffix to idx+2
diffs[idx+2][1].prepend(invalid_suffix)
end
# Replace invalid_diff with valid_prefix
diffs[idx] = [diffs[idx].first, valid_prefix]
retry
else
valid_excerpt, valid_string = [excerpt, diff.last].map { |e|
e.to_s.force_encoding('UTF-8') \
.encode('UTF-16', :invalid => :replace, :replace => '[invalid UTF-8 byte]') \
.encode('UTF-8')
}
$stderr.puts "Error details:"
$stderr.puts " - line: #{ line_num_1 }"
$stderr.puts " - diff: #{ valid_string.inspect }"
$stderr.puts " - excerpt: #{ excerpt.inspect }"
raise e
end
end
}
if different_only
deltas.find_all { |e| 0 != e.first }
else
deltas
end
end
end
end
end
Fixed issue where loop l_var shadowed outer local variable
module Suspension
# Compares strings and returns diffs
class StringComparer
# Compares string_1 with string_2 using diff_match_patch.
# @param string_1 [String]
# @param string_2 [String]
# @param add_context_info [Boolean, optional] if true will add location and excerpt
# @param different_only [Boolean, optional] if true will show -1 and 1 segments only
# @param options [Hash, optional] with symbolized keys
# @return[Array] An array of diffs like so:
# [[1, 'added', 'line 42', 'text_before text_after'], [-1, 'removed ', 'line 43', 'text_before removed text_after']]
# All information is relative to string_1. 1 means a string was added, -1 it was deleted.
def self.compare(string_1, string_2, add_context_info=true, different_only=true, options={})
options = {
excerpt_window: 20, # how much context do we show before and after
}.merge(options)
if string_1 == string_2
return []
else
diffs = Suspension::DiffAlgorithm.new.call(string_1, string_2)
# Add context information to diffs
deltas = []
# We need to keep separate char counters for string_1 and string_2 so
# that we can pull the excerpt for either of them.
char_pos_1 = 0 # character counter on string_1
char_pos_2 = 0 # character counter on string_2
line_num_1 = 1 # line counter on string_1. We don't need one for string_2
excerpt_window = options[:excerpt_window]
# I have to do a manual loop since we're relying on idx for exception
# rescue retries on invalid utf8 byte sequences
idx = 0
diffs.length.times {
begin
diff = diffs[idx]
if add_context_info
# Add location and excerpt
excerpt = case diff.first
when -1
# use string_1 as context for deletions
excerpt_start = [(char_pos_1 - excerpt_window), 0].max
excerpt_end = [(char_pos_1 + excerpt_window), string_1.length].min - 1
line_num_1 += diff.last.count("\n") # do first as it can raise exception
char_pos_1 += diff.last.length
string_1[excerpt_start..excerpt_end]
when 1
# use string_2 as context for additions
excerpt_start = [(char_pos_2 - excerpt_window), 0].max
excerpt_end = [(char_pos_2 + excerpt_window), string_2.length].min - 1
char_pos_2 += diff.last.length
string_2[excerpt_start..excerpt_end]
when 0
line_num_1 += diff.last.count("\n") # do first as it can raise exception
char_pos_1 += diff.last.length
char_pos_2 += diff.last.length
nil
else
raise "Handle this: #{ diff.inspect }"
end
r = [
diff.first, # type of modification
diff.last, # diff string
"line #{ line_num_1 }",
excerpt
]
else
# Use diffs as returned by DMP
diff.last.match(/./) # Trigger exception for invalid byte sequence in UTF-8
r = diff
end
deltas << r
# Increment at the end of rescue block so that retries are idempotent
idx += 1
rescue ArgumentError => ex
if ex.message.index('invalid byte sequence')
# Handles invalid UTF-8 byte sequences in diff
# This is caused by two different multibyte characters at the
# same position where the first bytes are identical, and a
# subsequent one is different. DMP splits that multibyte char into
# separate bytes and thus creates an invalid UTF8 byte sequence:
#
# Example: "word2—word3" and "word2…word3"
#
# [
# [0, "word2\xE2\x80"],
# [-1, "\x94word3"],
# [1, "\xA6word3"]
# ]
#
# Here we re-combine the bytes into a valid UTF8 string.
#
# Strategy: Remove trailing invalid bytes from common prefix, and
# prepend them to the two different suffixes and use the combined
# strings as diffs.
#
invalid_diff = diffs[idx].last
last_valid_byte_pos = -1
until(
(lvb = invalid_diff[last_valid_byte_pos]).nil? ||
lvb.valid_encoding?
)
last_valid_byte_pos -= 1
if last_valid_byte_pos < -5
# Stop after looking back for 5 bytes
raise "Handle this: #{ invalid_diff.inspect }"
end
end
valid_prefix = invalid_diff[0..last_valid_byte_pos]
invalid_suffix = invalid_diff[(last_valid_byte_pos + 1)..-1]
# Prepend following diffs with invalid_suffix if:
# * They exist
# * Are invalid
# * Don't have the bytes applied already. There are situations
# where the algorithm may apply twice. See test case:
# "word1 word2—word2…word3 word4 word5"
# "word1 word2…word3 word4"
if(
diffs[idx+1] &&
!diffs[idx+1][1].valid_encoding? &&
diffs[idx+1][1].byteslice(0,invalid_suffix.bytesize) != invalid_suffix
)
# Prepend invalid_suffix to idx+1
diffs[idx+1][1].prepend(invalid_suffix)
end
if(
diffs[idx+2] &&
!diffs[idx+2][1].valid_encoding? &&
diffs[idx+2][1].byteslice(0,invalid_suffix.bytesize) != invalid_suffix
)
# Prepend invalid_suffix to idx+2
diffs[idx+2][1].prepend(invalid_suffix)
end
# Replace invalid_diff with valid_prefix
diffs[idx] = [diffs[idx].first, valid_prefix]
retry
else
valid_excerpt, valid_string = [excerpt, diff.last].map { |e|
e.to_s.force_encoding('UTF-8') \
.encode('UTF-16', :invalid => :replace, :replace => '[invalid UTF-8 byte]') \
.encode('UTF-8')
}
$stderr.puts "Error details:"
$stderr.puts " - line: #{ line_num_1 }"
$stderr.puts " - diff: #{ valid_string.inspect }"
$stderr.puts " - excerpt: #{ valid_excerpt.inspect }"
raise ex
end
end
}
if different_only
deltas.find_all { |e| 0 != e.first }
else
deltas
end
end
end
end
end
|
module IRuby
In, Out = [nil], [nil]
::In, ::Out = In, Out
module History
def eval(code, store_history)
b = TOPLEVEL_BINDING
b.local_variable_set(:_ih, In) unless b.local_variable_defined?(:_ih)
b.local_variable_set(:_oh, Out) unless b.local_variable_defined?(:_oh)
out = super
# TODO Add IRuby.cache_size which controls the size of the Out array
# and sets the oldest entries and _<n> variables to nil.
if store_history
b.local_variable_set("_#{Out.size}", out)
b.local_variable_set("_i#{In.size}", code)
Out << out
In << code
b.local_variable_set(:___, Out[-3])
b.local_variable_set(:__, Out[-2])
b.local_variable_set(:_, Out[-1])
b.local_variable_set(:_iii, In[-3])
b.local_variable_set(:_ii, In[-2])
b.local_variable_set(:_i, In[-1])
end
out
end
end
class PlainBackend
prepend History
def initialize
require 'bond'
Bond.start(debug: true)
end
def eval(code, store_history)
TOPLEVEL_BINDING.eval(code)
end
def complete(code)
Bond.agent.call(code, code)
end
end
class PryBackend
prepend History
def initialize
require 'pry'
Pry.memory_size = 3
Pry.pager = false # Don't use the pager
Pry.print = proc {|output, value|} # No result printing
Pry.exception_handler = proc {|output, exception, _| }
reset
end
def eval(code, store_history)
@pry.last_result = nil
unless @pry.eval(code)
reset
raise SystemExit
end
unless @pry.eval_string.empty?
syntax_error = @pry.eval_string
@pry.reset_eval_string
@pry.evaluate_ruby syntax_error
end
raise @pry.last_exception if @pry.last_result_is_exception?
@pry.push_initial_binding unless @pry.current_binding # ensure that we have a binding
@pry.last_result
end
def complete(code)
@pry.complete(code)
end
def reset
@pry = Pry.new(output: $stdout, target: TOPLEVEL_BINDING)
end
end
end
fix: PlainBackend error message line number
module IRuby
In, Out = [nil], [nil]
::In, ::Out = In, Out
module History
def eval(code, store_history)
b = TOPLEVEL_BINDING
b.local_variable_set(:_ih, In) unless b.local_variable_defined?(:_ih)
b.local_variable_set(:_oh, Out) unless b.local_variable_defined?(:_oh)
out = super
# TODO Add IRuby.cache_size which controls the size of the Out array
# and sets the oldest entries and _<n> variables to nil.
if store_history
b.local_variable_set("_#{Out.size}", out)
b.local_variable_set("_i#{In.size}", code)
Out << out
In << code
b.local_variable_set(:___, Out[-3])
b.local_variable_set(:__, Out[-2])
b.local_variable_set(:_, Out[-1])
b.local_variable_set(:_iii, In[-3])
b.local_variable_set(:_ii, In[-2])
b.local_variable_set(:_i, In[-1])
end
out
end
end
class PlainBackend
prepend History
def initialize
require 'bond'
Bond.start(debug: true)
end
def eval(code, store_history)
TOPLEVEL_BINDING.eval(code, '(iruby)', 1)
end
def complete(code)
Bond.agent.call(code, code)
end
end
class PryBackend
prepend History
def initialize
require 'pry'
Pry.memory_size = 3
Pry.pager = false # Don't use the pager
Pry.print = proc {|output, value|} # No result printing
Pry.exception_handler = proc {|output, exception, _| }
reset
end
def eval(code, store_history)
@pry.last_result = nil
unless @pry.eval(code)
reset
raise SystemExit
end
unless @pry.eval_string.empty?
syntax_error = @pry.eval_string
@pry.reset_eval_string
@pry.evaluate_ruby syntax_error
end
raise @pry.last_exception if @pry.last_result_is_exception?
@pry.push_initial_binding unless @pry.current_binding # ensure that we have a binding
@pry.last_result
end
def complete(code)
@pry.complete(code)
end
def reset
@pry = Pry.new(output: $stdout, target: TOPLEVEL_BINDING)
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.