CombinedText stringlengths 4 3.42M |
|---|
# encoding: utf-8
require 'hocon'
require 'hocon/config_error'
require 'hocon/impl'
class Hocon::Impl::ResolveSource
ConfigBugOrBrokenError = Hocon::ConfigError::ConfigBugOrBrokenError
def initialize(root, path_from_root = nil)
@root = root
@path_from_root = path_from_root
end
def push_parent(parent)
unless parent
raise ConfigBugOrBrokenError.new("can't push null parent")
end
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("pushing parent #{parent} ==root #{(parent == root)} onto #{self}")
end
if @path_from_root == nil
if parent.equal?(@root)
return self.class.new(@root, Node.new(parent))
else
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
# this hasDescendant check is super-expensive so it's a
# trace message rather than an assertion
if @root.has_descendant(parent)
Hocon::Impl::ConfigImpl.trace(
"***** BUG ***** tried to push parent #{parent} without having a path to it in #{self}")
end
end
# ignore parents if we aren't proceeding from the
# root
return self
end
else
parent_parent = @path_from_root.head
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
# this hasDescendant check is super-expensive so it's a
# trace message rather than an assertion
if parent_parent != nil && !parent_parent.has_descendant(parent)
Hocon::Impl::ConfigImpl.trace(
"***** BUG ***** trying to push non-child of #{parent_parent}, non-child was #{parent}")
end
end
ResolveSource.new(@root, @path_from_root.prepend(parent))
end
end
class Node
attr_reader :next_node, :value
def initialize(value, next_node = nil)
@value = value
@next_node = next_node
end
def prepend(value)
Node.new(value, self)
end
def head
@value
end
def tail
@next_node
end
def last
i = self
while i.next_node != nil
i = i.next_node
end
i.value
end
def reverse
if @next_node == nil
self
else
reversed = Node.new(@value)
i = @next_node
while i != nil
reversed = reversed.prepend(i.value)
i = i.next_node
end
reversed
end
end
def to_s
sb = ""
sb << "["
to_append_value = self.reverse
while to_append_value != nil
sb << to_append_value.value.to_s
if to_append_value.next != nil
sb << " <= "
end
to_append_value = to_append_value.next
end
sb << "]"
sb
end
end
end
(maint) sync ResolveSource with upstream version
# encoding: utf-8
require 'hocon'
require 'hocon/config_error'
require 'hocon/impl'
require 'hocon/impl/config_impl'
class Hocon::Impl::ResolveSource
ConfigBugOrBrokenError = Hocon::ConfigError::ConfigBugOrBrokenError
ConfigNotResolvedError = Hocon::ConfigError::ConfigNotResolvedError
# 'path_from_root' is used for knowing the chain of parents we used to get here.
# null if we should assume we are not a descendant of the root.
# the root itself should be a node in this if non-null.
attr_accessor :root, :path_from_root
def initialize(root, path_from_root = nil)
@root = root
@path_from_root = path_from_root
end
# as a side effect, findInObject() will have to resolve all parents of the
# child being peeked, but NOT the child itself.Caller has to resolve
# the child itself if needed.ValueWithPath.value can be null but
# the ValueWithPath instance itself should not be.
def find_in_object(obj, context, path)
# resolve ONLY portions of the object which are along our path
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("*** finding '" + path + "' in " + obj)
end
restriction = context.restrict_to_child
partially_resolved = context.restrict(path).resolve(obj, Hocon::Impl::ResolveSource.new(obj))
new_context = partially_resolved.context.restrict(restriction)
if partially_resolved.value.is_a?(Hocon::Impl::AbstractConfigObject)
pair = self.class.find_in_object_impl(partially_resolved.value, path)
ResultWithPair.new(Hocon::Impl::ResolveResult.make(new_context, pair.value), pair.path_from_root)
else
raise ConfigBugOrBrokenError.new("resolved object to non-object " + obj + " to " + partially_resolved)
end
end
def lookup_subst(context, subst, prefix_length)
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("searching for " + subst, depth)
end
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace(subst + " - looking up relative to file it occurred in",
depth)
end
# First we look up the full path, which means relative to the
# included file if we were not a root file
result = find_in_object(@root, context, subst.path)
if result.result.value == nil
# Then we want to check relative to the root file.We don 't
# want the prefix we were included at to be used when looking
# up env variables either.
unprefixed = subst.plath.sub_path(prefix_length)
if prefix_length > 0
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace(
unprefixed + " - looking up relative to parent file",
result.result.context.depth)
end
result = find_in_object(@root, result.result.context, unprefixed)
end
if result.result.value == nil && result.result.context.options.use_system_environment
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace(
unprefixed + " - looking up in system environment",
result.result.context.depth)
end
result = find_in_object(Hocon::Impl::ConfigImpl.env_variables_as_config_object, context, unprefixed)
end
end
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace(
"resolved to " + result,
result.result.context.depth)
end
result
end
def push_parent(parent)
unless parent
raise ConfigBugOrBrokenError.new("can't push null parent")
end
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("pushing parent #{parent} ==root #{(parent == root)} onto #{self}")
end
if @path_from_root == nil
if parent.equal?(@root)
return self.class.new(@root, Node.new(parent))
else
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
# this hasDescendant check is super-expensive so it's a
# trace message rather than an assertion
if @root.has_descendant(parent)
Hocon::Impl::ConfigImpl.trace(
"***** BUG ***** tried to push parent #{parent} without having a path to it in #{self}")
end
end
# ignore parents if we aren't proceeding from the
# root
return self
end
else
parent_parent = @path_from_root.head
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
# this hasDescendant check is super-expensive so it's a
# trace message rather than an assertion
if parent_parent != nil && !parent_parent.has_descendant(parent)
Hocon::Impl::ConfigImpl.trace(
"***** BUG ***** trying to push non-child of #{parent_parent}, non-child was #{parent}")
end
end
ResolveSource.new(@root, @path_from_root.prepend(parent))
end
end
def reset_parents
if @path_from_root == nil
this
else
Hocon::Impl::ResolveSource.new(@root)
end
end
def replace_current_parent(old, replacement)
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("replaceCurrentParent old " + old + "@" + old.hash + " replacement " +
replacement + "@" + old.hash + " in " + self)
end
if old.equal?(replacement)
self
elsif @path_from_root != nil
new_path = replace(@path_from_root, old, replacement)
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("replaced " + old + " with " + replacement + " in " + self)
Hocon::Impl::ConfigImpl.trace("path was: " + @path_from_root + " is now " + new_path)
end
# if we end up nuking the root object itself, we replace it with an
# empty root
if new_path != nil
return ResolveSource.new(new_path.last, new_path)
else
return ResolveSource.new(Hocon::Impl::SimpleConfigObject.empty)
end
else
if old.equal?(@root)
return ResolveSource.new(rust_must_be_obj(replacement))
else
raise ConfigBugOrBrokenError.new("attempt to replace root " + root + " with " + replacement)
end
end
end
# replacement may be null to delete
def replace_within_current_parent(old, replacement)
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("replaceWithinCurrentParent old " + old + "@" + old.hash +
" replacement " + replacement + "@" + old.hash + " in " + self)
end
if old.equal?(replacement)
self
elsif @path_from_root != nil
parent = @path_from_root.head
new_parent = parent.replace_child(old, replacement)
return replace_current_parent(parent, new_parent.is_a?(Hocon::Impl::Container) ? new_parent : nil)
else
if old.equal?(@root) && replacement.is_a?(Hocon::Impl::Container)
return ResolveSource.new(root_must_be_obj(replacement))
else
raise ConfigBugOrBrokenError.new("replace in parent not possible " + old + " with " + replacement +
" in " + self)
end
end
end
def to_s
"ResolveSource(root=" + @root + ", pathFromRoot=" + @path_from_root + ")"
end
# a persistent list
class Node
attr_reader :next_node, :value
def initialize(value, next_node = nil)
@value = value
@next_node = next_node
end
def prepend(value)
Node.new(value, self)
end
def head
@value
end
def tail
@next_node
end
def last
i = self
while i.next_node != nil
i = i.next_node
end
i.value
end
def reverse
if @next_node == nil
self
else
reversed = Node.new(@value)
i = @next_node
while i != nil
reversed = reversed.prepend(i.value)
i = i.next_node
end
reversed
end
end
def to_s
sb = ""
sb << "["
to_append_value = self.reverse
while to_append_value != nil
sb << to_append_value.value.to_s
if to_append_value.next != nil
sb << " <= "
end
to_append_value = to_append_value.next
end
sb << "]"
sb
end
end
# value is allowed to be null
class ValueWithPath
attr_reader :value, :path_from_root
def initialize(value, path_from_root)
@value = value
@path_from_root = path_from_root
end
def to_s
"ValueWithPath(value=" + @value + ", pathFromRoot=" + @path_from_root + ")"
end
end
class ResultWithPath
attr_reader :result, :path_from_root
def initialize(result, path_from_root)
@result = result
@path_from_root = path_from_root
end
def to_s
"ResultWithPath(result=" + @result + ", pathFromRoot=" + @path_from_root + ")"
end
end
private
def root_must_be_obj(value)
if value.is_a?(Hocon::Impl::AbstractConfigObject)
value
else
Hocon::Impl::SimpleConfigObject.empty
end
end
def self.find_in_object_impl(obj, path, parents = nil)
begin
# we 'll fail if anything along the path can' t
# be looked at without resolving.
find_in_object_impl_impl(obj, path, nil)
rescue ConfigNotResolvedError => e
raise Hocon::Impl::ConfigImpl.improve_not_resolved(path, e)
end
end
def self.find_in_object_impl_impl(obj, path, parents)
key = path.first
remainder = path.remainder
if Hocon::Impl::ConfigImpl.trace_substitution_enabled
Hocon::Impl::ConfigImpl.trace("*** looking up '" + key + "' in " + obj)
end
v = obj.attempt_peek_with_partial_resolve(key)
new_parents = parents == nil ? Node.new(obj) : parents.prepend(obj)
if remainder == nil
ValueWithPath.new(v, new_parents)
else
if v.is_a?(Hocon::Impl::AbstractConfigObject)
find_in_object_impl_impl(v, remainder, new_parents)
else
ValueWithPath.new(nil, new_parents)
end
end
end
# returns null if the replacement results in deleting all the nodes.
def self.replace(list, old, replacement)
child = list.head
unless child.equal?(old)
raise ConfigBugOrBrokenError.new("Can only replace() the top node we're resolving; had " + child +
" on top and tried to replace " + old + " overall list was " + list)
end
parent = list.tail == nil ? nil : list.tail.head
if replacement == nil || !replacement.is_a?(Hocon::Impl::Container)
if parent == nil
return nil
else
# we are deleting the child from the stack of containers
# because it's either going away or not a container
new_parent = parent.replace_child(old, nil)
return replace(list.tail, parent, new_parent)
end
else
# we replaced the container with another container
if parent == nil
return Node.new(replacement)
else
new_parent = parent.replace_child(old, replacement)
new_tail = replace(list.tail, parent, new_parent)
if new_tail != nil
return new_tail.prepend(replacement)
else
return Node.new(replacement)
end
end
end
end
end
|
module Invokr
module DependencyInjection
extend self
def inject obj, using
meth = case obj
when Proc then :inject_proc
when Class then :inject_klass
else raise ArgumentError, "can't inject #{obj.inspect}"
end
resolver = build_resolver using
send meth, obj, resolver
end
private
def build_resolver using
if using.is_a? Hash
HashResolver.new using
else
using
end
end
def inject_klass klass, resolver
injector = KlassInjector.new resolver, klass
injector.inject
end
def inject_proc proc, resolver
injector = ProcInjector.new resolver, proc
injector.inject
end
Injector = Struct.new :resolver, :obj do
def keys
method.parameters.map { |_, identifier| identifier }
end
def fetch arg, &default
resolver.resolve arg, &default
end
def has_key? arg
resolver.could_resolve? arg
end
end
class KlassInjector < Injector
def inject
_method = Invokr.query_method method
_method.invoke :method => :new, :with => self
end
def method
obj.instance_method :initialize
end
end
class ProcInjector < Injector
def inject
Invokr.invoke :proc => obj, :with => self
end
def method
obj
end
end
class HashResolver
def initialize hsh
@hsh = hsh
end
def inject klass
DependencyInjection.inject(
:klass => klass,
:using => self,
)
end
def resolve val, &block
@hsh.fetch val, &block
end
def could_resolve? val
@hsh.has_key? val
end
end
end
end
Explicit receiver when injecting a class
module Invokr
module DependencyInjection
extend self
def inject obj, using
meth = case obj
when Proc then :inject_proc
when Class then :inject_klass
else raise ArgumentError, "can't inject #{obj.inspect}"
end
resolver = build_resolver using
send meth, obj, resolver
end
private
def build_resolver using
if using.is_a? Hash
HashResolver.new using
else
using
end
end
def inject_klass klass, resolver
injector = KlassInjector.new resolver, klass
injector.inject
end
def inject_proc proc, resolver
injector = ProcInjector.new resolver, proc
injector.inject
end
Injector = Struct.new :resolver, :obj do
def keys
method.parameters.map { |_, identifier| identifier }
end
def fetch arg, &default
resolver.resolve arg, &default
end
def has_key? arg
resolver.could_resolve? arg
end
end
class KlassInjector < Injector
def inject
_method = Invokr.query_method method
_method.invoke :method => :new, :receiver => obj, :with => self
end
def method
obj.instance_method :initialize
end
end
class ProcInjector < Injector
def inject
Invokr.invoke :proc => obj, :with => self
end
def method
obj
end
end
class HashResolver
def initialize hsh
@hsh = hsh
end
def inject klass
DependencyInjection.inject(
:klass => klass,
:using => self,
)
end
def resolve val, &block
@hsh.fetch val, &block
end
def could_resolve? val
@hsh.has_key? val
end
end
end
end
|
require 'itamae'
module Itamae
module Resources
class MailAlias < Base
define_option :action, default: :create
define_option :mail_alias, type: String, default_name: true
define_option :recipient, type: String
def create_action
if ! backend.check_mail_alias_is_aliased_to(mail_alias, recipient)
backend.add_mail_alias(mail_alias, recipient)
end
end
end
end
end
`recipient` option of `mail_alias` resource is always required.
require 'itamae'
module Itamae
module Resources
class MailAlias < Base
define_option :action, default: :create
define_option :mail_alias, type: String, default_name: true
define_option :recipient, type: String, required: true
def create_action
if ! backend.check_mail_alias_is_aliased_to(mail_alias, recipient)
backend.add_mail_alias(mail_alias, recipient)
end
end
end
end
end
|
# Again largely inspired by http://brizzled.clapper.org/blog/2010/12/20/some-jekyll-hacks/
module Jekyll
module RpLogs
class TagIndex < Jekyll::Page
def initialize(site, base, dir, tag, pages)
@site = site
@base = base
@dir = dir
@name = 'index.html'
self.process(@name)
# Get tag_index filename
tag_index = (site.config['rp_tag_index_layout'] || 'tag_index') + '.html'
self.read_yaml(File.join(base, '_layouts'), tag_index)
self.data['tag'] = tag # Set which tag this index is for
# Sort tagged RPs by their start date
self.data['pages'] = pages.sort_by { |p| p.data['start_date'] }
tag_title_prefix = site.config['rp_tag_title_prefix'] || 'Tag: '
self.data['title'] = "#{tag_title_prefix}#{tag}"
end
end
class TagIndexGenerator < Jekyll::Generator
safe true
# Needs to run after RpLogGenerator
priority :low
def initialize(config)
config['rp_tag_index'] ||= true
config['rp_tag_dir'] ||= '/tags'
end
def generate(site)
return unless site.config['rp_tag_index']
dir = site.config['rp_tag_dir']
tags = rps_by_tag(site)
tags.each_pair { |tag, pages|
site.pages << TagIndex.new(site, site.source, File.join(dir, tag.dir), tag, pages)
}
end
# Returns a hash of tags => [pages with tag]
def rps_by_tag(site)
tag_ref = Hash.new { |hash, key| hash[key] = Set.new }
site.collections[RpLogGenerator::RP_KEY].docs.each { |page|
page.data['rp_tags'].each { |tag| tag_ref[tag] << page }
}
return tag_ref
end
end
end
end
Style edits
* Converted `'` to `"`
* Removed unnecessary uses of `self`
* Other style updates to bring it in line with the rest of the project
# Again largely inspired by http://brizzled.clapper.org/blog/2010/12/20/some-jekyll-hacks/
module Jekyll
module RpLogs
class TagIndex < Jekyll::Page
def initialize(site, base, dir, tag, pages)
@site = site
@base = base
@dir = dir
@name = "index.html"
process(@name)
# Get tag_index filename
tag_index = (site.config["rp_tag_index_layout"] || "tag_index") + ".html"
read_yaml(File.join(base, "_layouts"), tag_index)
data["tag"] = tag # Set which tag this index is for
# Sort tagged RPs by their start date
data["pages"] = pages.sort_by { |p| p.data["start_date"] }
tag_title_prefix = site.config["rp_tag_title_prefix"] || "Tag: "
data["title"] = "#{tag_title_prefix}#{tag}"
end
end
class TagIndexGenerator < Jekyll::Generator
safe true
# Needs to run after RpLogGenerator
priority :low
def initialize(config)
config["rp_tag_index"] ||= true
config["rp_tag_dir"] ||= "/tags"
end
def generate(site)
return unless site.config["rp_tag_index"]
dir = site.config["rp_tag_dir"]
tags = rps_by_tag(site)
tags.each_pair { |tag, pages|
site.pages << TagIndex.new(site, site.source, File.join(dir, tag.dir), tag, pages)
}
end
# Returns a hash of tags => [pages with tag]
def rps_by_tag(site)
tag_ref = Hash.new { |hash, key| hash[key] = Set.new }
site.collections[RpLogGenerator::RP_KEY].docs.each { |page|
page.data["rp_tags"].each { |tag| tag_ref[tag] << page }
}
return tag_ref
end
end
end
end
|
require "json/stream"
module Json
module Streamer
class JsonStreamer
attr_reader :aggregator
attr_reader :parser
def initialize(file_io = nil, chunk_size = 1000)
@parser = JSON::Stream::Parser.new
@file_io = file_io
@chunk_size = chunk_size
@current_nesting_level = -1
@current_key = nil
@aggregator = {}
@aggregator_keys = {}
@parser.start_object {start_object}
@parser.start_array {start_array}
@parser.key {|k| key(k)}
end
# Callbacks containing `yield` have to be defined in the method called via block otherwise yield won't work
def get(nesting_level:-1, key:nil, yield_values:true)
yield_nesting_level = nesting_level
wanted_key = key
@parser.value do |v|
if array_level?(@current_nesting_level)
if yield_values and yield_value?(yield_nesting_level)
yield v
else
@aggregator[@current_nesting_level] << v
end
else
@aggregator[@current_nesting_level][@current_key] = v
if yield_values and yield_value?(yield_nesting_level, wanted_key)
yield v
end
end
end
@parser.end_object do
if yield_object?(yield_nesting_level, wanted_key)
yield @aggregator[@current_nesting_level].clone
@aggregator[@current_nesting_level] = {}
else
merge_up
end
@current_nesting_level -= 1
end
@parser.end_array do
if yield_object?(yield_nesting_level, wanted_key)
yield @aggregator[@current_nesting_level].clone
@aggregator[@current_nesting_level] = []
else
merge_up
end
@current_nesting_level -= 1
end
if @file_io
@file_io.each(@chunk_size) do |chunk|
@parser << chunk
end
end
end
def yield_object?(yield_nesting_level, wanted_key)
@current_nesting_level.eql? yield_nesting_level or (not wanted_key.nil? and wanted_key == @aggregator_keys[@current_nesting_level-1])
end
def yield_value?(yield_nesting_level, wanted_key = nil)
(@current_nesting_level + 1).eql? yield_nesting_level or (not wanted_key.nil? and wanted_key == @current_key)
end
def start_object
set_aggregator_key
@current_nesting_level += 1
@aggregator[@current_nesting_level] = {}
end
def start_array
set_aggregator_key
@current_nesting_level += 1
@aggregator[@current_nesting_level] = []
end
def set_aggregator_key
reset_current_key if array_level?(@current_nesting_level)
@aggregator_keys[@current_nesting_level] = @current_key
end
def reset_current_key
@current_key = nil
end
def array_level?(nesting_level)
@aggregator[nesting_level].is_a?(Array)
end
def key(k)
@current_key = k
end
def merge_up
return if @current_nesting_level == 0
previous_nesting_level = @current_nesting_level - 1
if array_level?(previous_nesting_level)
@aggregator[previous_nesting_level] << @aggregator[@current_nesting_level]
else
@aggregator[previous_nesting_level][@aggregator_keys[previous_nesting_level]] = @aggregator[@current_nesting_level]
end
@aggregator.delete(@current_nesting_level)
end
end
end
end
Extracts previous_nesting_level method
require "json/stream"
module Json
module Streamer
class JsonStreamer
attr_reader :aggregator
attr_reader :parser
def initialize(file_io = nil, chunk_size = 1000)
@parser = JSON::Stream::Parser.new
@file_io = file_io
@chunk_size = chunk_size
@current_nesting_level = -1
@current_key = nil
@aggregator = {}
@aggregator_keys = {}
@parser.start_object {start_object}
@parser.start_array {start_array}
@parser.key {|k| key(k)}
end
# Callbacks containing `yield` have to be defined in the method called via block otherwise yield won't work
def get(nesting_level:-1, key:nil, yield_values:true)
yield_nesting_level = nesting_level
wanted_key = key
@parser.value do |v|
if array_level?(@current_nesting_level)
if yield_values and yield_value?(yield_nesting_level)
yield v
else
@aggregator[@current_nesting_level] << v
end
else
@aggregator[@current_nesting_level][@current_key] = v
if yield_values and yield_value?(yield_nesting_level, wanted_key)
yield v
end
end
end
@parser.end_object do
if yield_object?(yield_nesting_level, wanted_key)
yield @aggregator[@current_nesting_level].clone
@aggregator[@current_nesting_level] = {}
else
merge_up
end
@current_nesting_level -= 1
end
@parser.end_array do
if yield_object?(yield_nesting_level, wanted_key)
yield @aggregator[@current_nesting_level].clone
@aggregator[@current_nesting_level] = []
else
merge_up
end
@current_nesting_level -= 1
end
if @file_io
@file_io.each(@chunk_size) do |chunk|
@parser << chunk
end
end
end
def yield_object?(yield_nesting_level, wanted_key)
@current_nesting_level.eql? yield_nesting_level or (not wanted_key.nil? and wanted_key == @aggregator_keys[@current_nesting_level-1])
end
def yield_value?(yield_nesting_level, wanted_key = nil)
(@current_nesting_level + 1).eql? yield_nesting_level or (not wanted_key.nil? and wanted_key == @current_key)
end
def start_object
set_aggregator_key
@current_nesting_level += 1
@aggregator[@current_nesting_level] = {}
end
def start_array
set_aggregator_key
@current_nesting_level += 1
@aggregator[@current_nesting_level] = []
end
def set_aggregator_key
reset_current_key if array_level?(@current_nesting_level)
@aggregator_keys[@current_nesting_level] = @current_key
end
def reset_current_key
@current_key = nil
end
def array_level?(nesting_level)
@aggregator[nesting_level].is_a?(Array)
end
def key(k)
@current_key = k
end
def merge_up
return if @current_nesting_level == 0
if array_level?(previous_nesting_level)
@aggregator[previous_nesting_level] << @aggregator[@current_nesting_level]
else
@aggregator[previous_nesting_level][@aggregator_keys[previous_nesting_level]] = @aggregator[@current_nesting_level]
end
@aggregator.delete(@current_nesting_level)
end
def previous_nesting_level
@current_nesting_level - 1
end
end
end
end
|
module Less
module Rails
class ImportProcessor
IMPORT_SCANNER = /@import\s*['"]([^'"]+)['"]\s*;/.freeze
PATHNAME_FINDER = Proc.new { |scope, path|
begin
scope.resolve(path)
rescue Sprockets::FileNotFound
nil
end
}
def initialize(filename, &block)
@filename = filename
@source = block.call
end
def render(scope, locals)
self.class.evaluate(@filename, @source, scope)
end
def self.evaluate(filename, source, scope)
depend_on scope, source
source
end
def self.call(input)
filename = input[:filename]
source = input[:data]
scope = input[:environment].context_class.new(input)
result = evaluate(filename, source, scope)
scope.metadata.merge(data: result)
end
def self.depend_on(scope, source, base=File.dirname(scope.logical_path))
import_paths = source.scan(IMPORT_SCANNER).flatten.compact.uniq
import_paths.each do |path|
pathname = PATHNAME_FINDER.call(scope,path) || PATHNAME_FINDER.call(scope, File.join(base, path))
scope.depend_on(pathname) if pathname && pathname.to_s.ends_with?('.less')
depend_on scope, File.read(pathname), File.dirname(path) if pathname
end
source
end
end
end
end
Add a default mime type for import processor
module Less
module Rails
class ImportProcessor
IMPORT_SCANNER = /@import\s*['"]([^'"]+)['"]\s*;/.freeze
PATHNAME_FINDER = Proc.new { |scope, path|
begin
scope.resolve(path)
rescue Sprockets::FileNotFound
nil
end
}
def initialize(filename, &block)
@filename = filename
@source = block.call
end
def render(scope, locals)
self.class.evaluate(@filename, @source, scope)
end
def self.evaluate(filename, source, scope)
depend_on scope, source
source
end
def self.call(input)
filename = input[:filename]
source = input[:data]
scope = input[:environment].context_class.new(input)
result = evaluate(filename, source, scope)
scope.metadata.merge(data: result)
end
def self.default_mime_type
'text/css'
end
def self.depend_on(scope, source, base=File.dirname(scope.logical_path))
import_paths = source.scan(IMPORT_SCANNER).flatten.compact.uniq
import_paths.each do |path|
pathname = PATHNAME_FINDER.call(scope,path) || PATHNAME_FINDER.call(scope, File.join(base, path))
scope.depend_on(pathname) if pathname && pathname.to_s.ends_with?('.less')
depend_on scope, File.read(pathname), File.dirname(path) if pathname
end
source
end
end
end
end
|
module LetterOpener
class Configuration
attr_accessor :location, :message_template
def initialize
@location = Rails.root.join('tmp', 'letter_opener') if defined?(Rails) && Rails.respond_to?(:root)
@message_template = 'default'
end
end
end
Update configuration.rb
Re-add "extra check" that was removed from previous commit.
In some ruby project (like ruby on jets project), Rails may exist, Rails may respond_to root, but root may be nil.
Actually I don't see the point of removing that "extra check" and I think it is best to keep it...
module LetterOpener
class Configuration
attr_accessor :location, :message_template
def initialize
@location = Rails.root.join('tmp', 'letter_opener') if defined?(Rails) && Rails.respond_to?(:root) && Rails.root
@message_template = 'default'
end
end
end
|
factored out code to fix subcommand help displays
# frozen_string_literal: true
require 'thor'
module Lkr
module Commands
class SubCommandBase < Thor
# Workaround so that help displays the right name
# base on this link
# https://github.com/erikhuda/thor/issues/261#issuecomment-69327685
def self.banner(command, namespace = nil, subcommand = false)
"#{basename} #{subcommand_prefix} #{command.usage}"
end
def self.subcommand_prefix
self.name.gsub(%r{.*::}, '').gsub(%r{^[A-Z]}) { |match| match[0].downcase }.gsub(%r{[A-Z]}) { |match| "-#{match[0].downcase}" }
end
end
end
end
|
require 'aws4'
require 'httparty'
module IvonaSpeechCloud
class Client
attr_accessor :access_key, :secret_key, :region, :body, :path
# Initializes a new Client object
#
# @param options [Hash]
# @return [IvonaSpeechCloud::Client]
def initialize(options = {})
options.each do |key, value|
instance_variable_set("@#{key}", value)
end
yield(self) if block_given?
end
def create_speech(text, options={})
speech(text, options).create
end
def speech(text, options)
Speech.new(self, text, options)
end
# @return [Hash]
def credentials
{
access_key: access_key,
secret_key: secret_key,
region: region
}
end
# @return URI::HTTPS
def uri
@uri = begin
uri = URI(endpoint)
uri.path = path if path
uri
end
end
def endpoint
"https://#{host}"
end
def host
"tts.#{region}.ivonacloud.com"
end
end
def signed_headers
signer.sign("POST", uri, headers, body, true)
end
def signer
@signer ||= AWS4::Signer.new(credentials)
end
def headers
@headers ||= {
"Content-Type" => content_type,
"Host" => host,
"X-Amz-Content-SHA256" => x_amz_content_sha256,
"X-Amz-Date" => x_amz_date
}
end
def x_amz_date
date.strftime("%Y%m%dT%H%M%SZ")
end
def date
@date ||= Time.now.utc
end
def x_amz_content_sha256
Digest::SHA256.new.update(body).hexdigest
end
def content_type
@content_type ||= "application/json"
end
# @return [Boolean]
def credentials?
credentials.values.all?
end
end
end
moved method down below
require 'aws4'
require 'httparty'
module IvonaSpeechCloud
class Client
attr_accessor :access_key, :secret_key, :region, :body, :path
# Initializes a new Client object
#
# @param options [Hash]
# @return [IvonaSpeechCloud::Client]
def initialize(options = {})
options.each do |key, value|
instance_variable_set("@#{key}", value)
end
yield(self) if block_given?
end
def create_speech(text, options={})
speech(text, options).create
end
def speech(text, options)
Speech.new(self, text, options)
end
# @return [Hash]
def credentials
{
access_key: access_key,
secret_key: secret_key,
region: region
}
end
# @return URI::HTTPS
def uri
@uri = begin
uri = URI(endpoint)
uri.path = path if path
uri
end
end
def endpoint
"https://#{host}"
end
def host
"tts.#{region}.ivonacloud.com"
end
def x_amz_date
date.strftime("%Y%m%dT%H%M%SZ")
end
def date
@date ||= Time.now.utc
end
def x_amz_content_sha256(body="")
Digest::SHA256.new.update(body).hexdigest
end
def content_type
"application/json"
end
def signed_headers
signer.sign("POST", uri, headers, body, true)
end
def signer
@signer ||= AWS4::Signer.new(credentials)
end
def headers
@headers ||= {
"Content-Type" => content_type,
"Host" => host,
"X-Amz-Content-SHA256" => x_amz_content_sha256,
"X-Amz-Date" => x_amz_date
}
end
# @return [Boolean]
def credentials?
credentials.values.all?
end
end
end
|
#!/usr/bin/env ruby
# encoding: utf-8
# File: base.rb, created 13/12/14
# extracted from notification, created: 21/08/13
#
# (c) Michel Demazure & Kenji Lefevre
module JacintheManagement
# Methods for e-subscriptions notification
module Notifications
# tiers for notification
Tiers = Struct.new(:tiers_id, :name, :ranges, :mails, :drupal, :drupal_mail)
# subscription parameters to be notified
# noinspection RubyConstantNamingConvention
ToBeNotified = Struct.new(:id, :revue, :year, :ref, :billing, :tiers_id, :tiers_email)
# reopening class
class ToBeNotified
# @return [String] explicit reference when special
def reference
case ref
when /Abo..-GT/
'gratuit/free'
when /Abo..-Ech/
'échange/exchange'
when /Abo-Nat/
'compris dans l\'abonnement national CNRS'
else
"ref:#{ref}"
end
end
# @return [String] report for mail
def report
"#{revue} (#{year}) [#{reference}]"
end
end
# base methods for notifications
module Base
# sql to extract tiers
SQL_TIERS = SQLFiles.script('tiers_ip_infos')
# sql to count electronic subscriptions to be notified
SQL_SUBSCRIPTION_NUMBER = SQLFiles.script('subscriptions_number_to_notify')
# sql to extract electronic subscriptions to be notified
SQL_SUBSCRIPTIONS = SQLFiles.script('subscriptions_to_notify')
# sql to update base after notification
SQL_UPDATE = SQLFiles.script('update_subscription_notified')
# @return [String] time stamp for files
def self.time_stamp
Time.now.strftime('%Y-%m-%d')
end
# tell JacintheD that subscription is notified
# @param [STRING] subs_id subscription identity
def self.update(subs_id)
query = SQL_UPDATE
.sub(/::abonnement_id::/, subs_id)
.sub(/::time_stamp::/, time_stamp)
Sql.query(JACINTHE_MODE, query)
end
# will be built and cached
@all_jacinthe_tiers = nil
@classified_notifications = nil
## base of all Jacinthe Tiers
# @return [Array<Tiers>] list o/f all Jacinthe Tiers
def self.build_jacinthe_tiers_list
@all_jacinthe_tiers = []
Sql.answer_to_query(JACINTHE_MODE, SQL_TIERS).drop(1).each do |line|
items = line.split(TAB)
parameters = format_items(items)
@all_jacinthe_tiers[parameters[0]] = Tiers.new(*parameters)
end
end
#
# @return [Array<Tiers>]all Jacinthe tiers
def self.all_jacinthe_tiers
build_jacinthe_tiers_list unless @all_jacinthe_tiers
@all_jacinthe_tiers
end
# @param [Array<String>] items split line form sql answer
# @return [Array] parameters for Tiers struct
def self.format_items(items)
number = items[0].to_i
name = items[2] == 'NULL' ? items[1] : items[2] + ' ' + items[1]
ranges = clean_split('\\n', items[3])
mails = clean_split(',', items[4].chomp)
drupal_id = items[5].to_i
drupal_mail = items[6].chomp
[number, name, ranges, mails, drupal_id, drupal_mail]
end
# @param [String] sep separator
# @param [String] string string to be split
# @return [Array<String|nil>] formatted splitting of string
def self.clean_split(sep, string)
string.split(sep).delete_if { |item| item == 'NULL' }
end
# @param [Integer|#to_i] tiers_id tiers identification
# @return [Tiers] this Tiers
def self.find_tiers(tiers_id)
all_jacinthe_tiers[tiers_id.to_i]
end
## base of all pending notifications
# count and return number of notifications to be done
# @return [Integer] number of notifications to be done
def self.notifications_number
Sql.answer_to_query(JACINTHE_MODE, SQL_SUBSCRIPTION_NUMBER)[1].to_i
end
# @return [ArrayToBeNotified>] all ToBeNotified objects
def self.all_notifications
Sql.answer_to_query(JACINTHE_MODE, SQL_SUBSCRIPTIONS).drop(1).map do |line|
items = line.chomp.split(Core::TAB)
ToBeNotified.new(*items)
end
end
# @return [Hash<ToBeNotified>] all ToBeNOtified objects by categories
def self.build_classified_notifications
table = {}
all_notifications.each do |item|
key = [item.revue, item.year]
(table[key] ||= []) << item
end
@classified_notifications = table
end
# @return [Hash<ToBeNotified>] all ToBeNOtified objects by categories
def self.classified_notifications
build_classified_notifications unless @classified_notifications
@classified_notifications
end
# @return [Array] all the categories of possible notifications
def self.notification_categories
classified_notifications.keys.sort
end
# @return [Array] all the categories of possible notifications
def self.filtered_classified_notifications
classified_notifications.select do |(_, year), _|
Notifications.filter(year)
end
end
end
end
end
if __FILE__ == $PROGRAM_NAME
include JacintheManagement
puts Notification::Base.notifications_number
end
bug with drupal elements
#!/usr/bin/env ruby
# encoding: utf-8
# File: base.rb, created 13/12/14
# extracted from notification, created: 21/08/13
#
# (c) Michel Demazure & Kenji Lefevre
module JacintheManagement
# Methods for e-subscriptions notification
module Notifications
# tiers for notification
Tiers = Struct.new(:tiers_id, :name, :ranges, :mails, :drupal, :drupal_mail)
# subscription parameters to be notified
# noinspection RubyConstantNamingConvention
ToBeNotified = Struct.new(:id, :revue, :year, :ref, :billing, :tiers_id, :tiers_email)
# reopening class
class ToBeNotified
# @return [String] explicit reference when special
def reference
case ref
when /Abo..-GT/
'gratuit/free'
when /Abo..-Ech/
'échange/exchange'
when /Abo-Nat/
'compris dans l\'abonnement national CNRS'
else
"ref:#{ref}"
end
end
# @return [String] report for mail
def report
"#{revue} (#{year}) [#{reference}]"
end
end
# base methods for notifications
module Base
# sql to extract tiers
SQL_TIERS = SQLFiles.script('tiers_ip_infos')
# sql to count electronic subscriptions to be notified
SQL_SUBSCRIPTION_NUMBER = SQLFiles.script('subscriptions_number_to_notify')
# sql to extract electronic subscriptions to be notified
SQL_SUBSCRIPTIONS = SQLFiles.script('subscriptions_to_notify')
# sql to update base after notification
SQL_UPDATE = SQLFiles.script('update_subscription_notified')
# @return [String] time stamp for files
def self.time_stamp
Time.now.strftime('%Y-%m-%d')
end
# tell JacintheD that subscription is notified
# @param [STRING] subs_id subscription identity
def self.update(subs_id)
query = SQL_UPDATE
.sub(/::abonnement_id::/, subs_id)
.sub(/::time_stamp::/, time_stamp)
Sql.query(JACINTHE_MODE, query)
end
# will be built and cached
@all_jacinthe_tiers = nil
@classified_notifications = nil
## base of all Jacinthe Tiers
# @return [Array<Tiers>] list o/f all Jacinthe Tiers
def self.build_jacinthe_tiers_list
@all_jacinthe_tiers = []
Sql.answer_to_query(JACINTHE_MODE, SQL_TIERS).drop(1).each do |line|
items = line.split(TAB)
parameters = format_items(items)
@all_jacinthe_tiers[parameters[0]] = Tiers.new(*parameters)
end
end
#
# @return [Array<Tiers>]all Jacinthe tiers
def self.all_jacinthe_tiers
build_jacinthe_tiers_list unless @all_jacinthe_tiers
@all_jacinthe_tiers
end
# @param [Array<String>] items split line form sql answer
# @return [Array] parameters for Tiers struct
def self.format_items(items)
number = items[0].to_i
name = items[2] == 'NULL' ? items[1] : items[2] + ' ' + items[1]
ranges = clean_split('\\n', items[3])
mails = clean_split(',', items[4].chomp)
drupal_id = items[5]
drupal_mail = items[6].chomp
[number, name, ranges, mails, drupal_id, drupal_mail]
end
# @param [String] sep separator
# @param [String] string string to be split
# @return [Array<String|nil>] formatted splitting of string
def self.clean_split(sep, string)
string.split(sep).delete_if { |item| item == 'NULL' }
end
# @param [Integer|#to_i] tiers_id tiers identification
# @return [Tiers] this Tiers
def self.find_tiers(tiers_id)
all_jacinthe_tiers[tiers_id.to_i]
end
## base of all pending notifications
# count and return number of notifications to be done
# @return [Integer] number of notifications to be done
def self.notifications_number
Sql.answer_to_query(JACINTHE_MODE, SQL_SUBSCRIPTION_NUMBER)[1].to_i
end
# @return [ArrayToBeNotified>] all ToBeNotified objects
def self.all_notifications
Sql.answer_to_query(JACINTHE_MODE, SQL_SUBSCRIPTIONS).drop(1).map do |line|
items = line.chomp.split(Core::TAB)
ToBeNotified.new(*items)
end
end
# @return [Hash<ToBeNotified>] all ToBeNOtified objects by categories
def self.build_classified_notifications
table = {}
all_notifications.each do |item|
key = [item.revue, item.year]
(table[key] ||= []) << item
end
@classified_notifications = table
end
# @return [Hash<ToBeNotified>] all ToBeNOtified objects by categories
def self.classified_notifications
build_classified_notifications unless @classified_notifications
@classified_notifications
end
# @return [Array] all the categories of possible notifications
def self.notification_categories
classified_notifications.keys.sort
end
# @return [Array] all the categories of possible notifications
def self.filtered_classified_notifications
classified_notifications.select do |(_, year), _|
Notifications.filter(year)
end
end
end
end
end
if __FILE__ == $PROGRAM_NAME
include JacintheManagement
puts Notification::Base.notifications_number
end
|
module Magnum
module Addons
class Slack
VERSION = "0.1.1"
end
end
end
Set version to 1.0.0
module Magnum
module Addons
class Slack
VERSION = "1.0.0"
end
end
end
|
# frozen_string_literal: true
module Jekyll
class SeoTag
# Mixin to share common URL-related methods between class
module UrlHelper
private
# Determines if the given string is an absolute URL
#
# Returns true if an absolute URL.
# Retruns false if it's a relative URL
# Returns nil if it is not a string or can't be parsed as a URL
def absolute_url?(string)
return unless string
Addressable::URI.parse(string).absolute?
rescue Addressable::URI::InvalidURIError
nil
end
end
end
end
docs: fix typo (#449)
Merge pull request 449
# frozen_string_literal: true
module Jekyll
class SeoTag
# Mixin to share common URL-related methods between class
module UrlHelper
private
# Determines if the given string is an absolute URL
#
# Returns true if an absolute URL
# Returns false if it's a relative URL
# Returns nil if it is not a string or can't be parsed as a URL
def absolute_url?(string)
return unless string
Addressable::URI.parse(string).absolute?
rescue Addressable::URI::InvalidURIError
nil
end
end
end
end
|
require "marionette_rails_generators/version"
module MarionetteRailsGenerators
# Your code goes here...
end
require backbone and marionette gems
require "marionette_rails_generators/version"
require "backbone-rails"
require "marionette-rails"
module MarionetteRailsGenerators
# Your code goes here...
end
|
#
# Copyright (c) 2012-2013 Kannan Manickam <arangamani.kannan@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
require 'rubygems'
require 'json'
require 'net/http'
require 'nokogiri'
#require 'active_support/core_ext'
#require 'active_support/builder'
require 'base64'
require 'mixlib/shellout'
require 'uri'
# The main module that contains the Client class and all subclasses that
# communicate with the Jenkins's Remote Access API.
module JenkinsApi
# This is the client class that acts as the bridge between the subclasses and
# Jnekins. This class contains methods that performs GET and POST requests
# for various operations.
class Client
attr_accessor :debug, :timeout
# Default port to be used to connect to Jenkins
DEFAULT_SERVER_PORT = 8080
# Default timeout in seconds to be used while performing operations
DEFAULT_TIMEOUT = 120
# Parameters that are permitted as options while initializing the client
VALID_PARAMS = [
"server_ip",
"server_port",
"jenkins_path",
"username",
"password",
"password_base64",
"debug",
"timeout",
"ssl"
].freeze
# Initialize a Client object with Jenkins CI server credentials
#
# @param [Hash] args
# * the +:server_ip+ param is the IP address of the Jenkins CI server
# * the +:server_port+ param is the port on which the Jenkins listens
# * the +:username+ param is the username used for connecting to the server
# * the +:password+ param is the password for connecting to the CI server
# * the +:ssl+ param indicates if Jenkins is accessible over HTTPS (defaults to false)
#
# @return [JenkinsApi::Client] a client object to Jenkins API
#
# @raise [ArgumentError] when required options are not provided.
#
def initialize(args)
args.each do |key, value|
if value && VALID_PARAMS.include?(key.to_s)
instance_variable_set("@#{key}", value)
end
end if args.is_a? Hash
raise "Server IP is required to connect to Jenkins" unless @server_ip
unless @username && (@password || @password_base64)
raise "Credentials are required to connect to te Jenkins Server"
end
@server_port = DEFAULT_SERVER_PORT unless @server_port
@timeout = DEFAULT_TIMEOUT unless @timeout
@ssl ||= false
@debug = false unless @debug
# Base64 decode inserts a newline character at the end. As a workaround
# added chomp to remove newline characters. I hope nobody uses newline
# characters at the end of their passwords :)
@password = Base64.decode64(@password_base64).chomp if @password_base64
@crumbs_enabled = use_crumbs?
end
# This method toggles the debug parameter in run time
#
def toggle_debug
@debug = @debug == false ? true : false
end
# Creates an instance to the Job class by passing a reference to self
#
# @return [JenkinsApi::Client::Job] An object to Job subclass
#
def job
JenkinsApi::Client::Job.new(self)
end
# Creates an instance to the System class by passing a reference to self
#
# @return [JenkinsApi::Client::System] An object to System subclass
#
def system
JenkinsApi::Client::System.new(self)
end
# Creates an instance to the Node class by passing a reference to self
#
# @return [JenkinsApi::Client::Node] An object to Node subclass
#
def node
JenkinsApi::Client::Node.new(self)
end
# Creates an instance to the View class by passing a reference to self
#
# @return [JenkinsApi::Client::View] An object to View subclass
#
def view
JenkinsApi::Client::View.new(self)
end
# Creates an instance to the BuildQueue by passing a reference to self
#
# @return [JenkinsApi::Client::BuildQueue] An object to BuildQueue subclass
#
def queue
JenkinsApi::Client::BuildQueue.new(self)
end
# Returns a string representing the class name
#
# @return [String] string representation of class name
#
def to_s
"#<JenkinsApi::Client>"
end
# Obtains the root of Jenkins server. This function is used to see if
# Jenkins is running
#
# @return [Net::HTTP::Response] Response from Jenkins for "/"
#
def get_root
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Get.new("/")
request.basic_auth @username, @password
http.request(request)
end
# Sends a GET request to the Jenkins CI server with the specified URL
#
# @param [String] url_prefix The prefix to use in the URL
# @param [String] tree A specific JSON tree to optimize the API call
# @param [String] url_suffix The suffix to be used in the URL
#
# @return [String, JSON] JSON response from Jenkins
#
def api_get_request(url_prefix, tree = nil, url_suffix ="/api/json")
url_prefix = "#{@jenkins_path}#{url_prefix}"
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
to_get = ""
if tree
to_get = "#{url_prefix}#{url_suffix}?#{tree}"
else
to_get = "#{url_prefix}#{url_suffix}"
end
to_get = URI.escape(to_get)
request = Net::HTTP::Get.new(to_get)
puts "[INFO] GET #{to_get}" if @debug
request.basic_auth @username, @password
response = http.request(request)
handle_exception(response, "body", url_suffix =~ /json/)
end
# Sends a POST message to the Jenkins CI server with the specified URL
#
# @param [String] url_prefix The prefix to be used in the URL
# @param [Hash] form_data Form data to send with POST request
#
# @return [String] Response code form Jenkins Response
#
def api_post_request(url_prefix, form_data = {})
url_prefix = URI.escape("#{@jenkins_path}#{url_prefix}")
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Post.new("#{url_prefix}")
if @crumbs_enabled
crumb_response = get_crumb
form_data.merge!(
{
crumb_response["crumbRequestField"] => crumb_response["crumb"],
}
)
end
puts "[INFO] PUT #{url_prefix}" if @debug
request.basic_auth @username, @password
request.content_type = 'application/json'
request.set_form_data(form_data) unless form_data.empty?
response = http.request(request)
handle_post_response(response)
end
# Obtains the configuration of a component from the Jenkins CI server
#
# @param [String] url_prefix The prefix to be used in the URL
#
# @return [String] XML configuration obtained from Jenkins
#
def get_config(url_prefix)
url_prefix = URI.escape("#{@jenkins_path}#{url_prefix}")
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Get.new("#{url_prefix}/config.xml")
puts "[INFO] GET #{url_prefix}/config.xml" if @debug
request.basic_auth @username, @password
response = http.request(request)
handle_exception(response, "body")
end
# Posts the given xml configuration to the url given
#
# @param [String] url_prefix The prefix to be used in the URL
# @param [String] xml The XML configuration to be sent to Jenkins
#
# @return [String] Response code returned from Jenkins
#
def post_config(url_prefix, xml, form_data = {})
url_prefix = URI.escape(url_prefix)
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
puts "[INFO] PUT #{url_prefix}" if @debug
puts "POSTING: #{xml}"
if @crumbs_enabled
crumb_response = get_crumb
form_data.merge!(
{
"mode" => "hudson.model.FreeStyleProject",
#"mode" => "",
crumb_response["crumbRequestField"] => crumb_response["crumb"],
}
)
end
request = Net::HTTP::Post.new("#{url_prefix}?" + URI.encode_www_form( form_data ))
request.basic_auth @username, @password
request.body = xml
request.content_type = 'application/xml'
# request.set_form_data(form_data) unless form_data.empty?
puts "DEBUG: Crumb: #{form_data.inspect}"
response = http.request(request)
puts "DEBUG: response: #{response.inspect}"
handle_post_response(response)
end
def use_crumbs?
json = api_get_request("")
json["useCrumbs"]
end
def use_securit?
json = api_get_request("")
json["useSecurity"]
end
# Obtains the jenkins version from the API
#
# @return Jenkins version
#
def get_jenkins_version
response = get_root
response["X-Jenkins"]
end
# Obtain the Hudson version of the CI server
#
# @return [String] Version of Hudson on Jenkins server
#
def get_hudson_version
response = get_root
response["X-Hudson"]
end
# Obtain the date of the Jenkins server
#
# @return [String] Server date
#
def get_server_date
response = get_root
response["Date"]
end
#private
def get_crumb
begin
response = api_get_request("/crumbIssuer")
rescue Exceptions::NotFoundException
raise "You've asked to enable CSRF protection, but it looks like" +
" your Jenkins server doesn't have this setting enabled. Please" +
" change the Jenkins server setting or client configuration."
end
end
def handle_post_response(response)
msg = "HTTP Code: #{response.code}"
msg << " Response Body: #{response.body}" if @debug
case response.code.to_i
when 200, 302
return response.code
when 404
raise Exceptions::NotFoundException.new(msg)
when 500
raise Exceptions::InternelServerErrorException.new(msg)
else
raise Exceptions::ApiException.new(msg)
end
end
def handle_exception(response, to_send = "code", send_json = false)
msg = "HTTP Code: #{response.code}, Response Body: #{response.body}"
case response.code.to_i
when 200, 302
if to_send == "body" && send_json
return JSON.parse(response.body)
elsif to_send == "body"
return response.body
elsif to_send == "code"
return response.code
end
when 401
raise Exceptions::UnautherizedException.new
when 404
raise Exceptions::NotFoundException.new
when 500
raise Exceptions::InternelServerErrorException.new
else
raise Exceptions::ApiException.new
end
end
end
end
crumb as a header
#
# Copyright (c) 2012-2013 Kannan Manickam <arangamani.kannan@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
require 'rubygems'
require 'json'
require 'net/http'
require 'nokogiri'
#require 'active_support/core_ext'
#require 'active_support/builder'
require 'base64'
require 'mixlib/shellout'
require 'uri'
# The main module that contains the Client class and all subclasses that
# communicate with the Jenkins's Remote Access API.
module JenkinsApi
# This is the client class that acts as the bridge between the subclasses and
# Jnekins. This class contains methods that performs GET and POST requests
# for various operations.
class Client
attr_accessor :debug, :timeout
# Default port to be used to connect to Jenkins
DEFAULT_SERVER_PORT = 8080
# Default timeout in seconds to be used while performing operations
DEFAULT_TIMEOUT = 120
# Parameters that are permitted as options while initializing the client
VALID_PARAMS = [
"server_ip",
"server_port",
"jenkins_path",
"username",
"password",
"password_base64",
"debug",
"timeout",
"ssl"
].freeze
# Initialize a Client object with Jenkins CI server credentials
#
# @param [Hash] args
# * the +:server_ip+ param is the IP address of the Jenkins CI server
# * the +:server_port+ param is the port on which the Jenkins listens
# * the +:username+ param is the username used for connecting to the server
# * the +:password+ param is the password for connecting to the CI server
# * the +:ssl+ param indicates if Jenkins is accessible over HTTPS (defaults to false)
#
# @return [JenkinsApi::Client] a client object to Jenkins API
#
# @raise [ArgumentError] when required options are not provided.
#
def initialize(args)
args.each do |key, value|
if value && VALID_PARAMS.include?(key.to_s)
instance_variable_set("@#{key}", value)
end
end if args.is_a? Hash
raise "Server IP is required to connect to Jenkins" unless @server_ip
unless @username && (@password || @password_base64)
raise "Credentials are required to connect to te Jenkins Server"
end
@server_port = DEFAULT_SERVER_PORT unless @server_port
@timeout = DEFAULT_TIMEOUT unless @timeout
@ssl ||= false
@debug = false unless @debug
# Base64 decode inserts a newline character at the end. As a workaround
# added chomp to remove newline characters. I hope nobody uses newline
# characters at the end of their passwords :)
@password = Base64.decode64(@password_base64).chomp if @password_base64
@crumbs_enabled = use_crumbs?
end
# This method toggles the debug parameter in run time
#
def toggle_debug
@debug = @debug == false ? true : false
end
# Creates an instance to the Job class by passing a reference to self
#
# @return [JenkinsApi::Client::Job] An object to Job subclass
#
def job
JenkinsApi::Client::Job.new(self)
end
# Creates an instance to the System class by passing a reference to self
#
# @return [JenkinsApi::Client::System] An object to System subclass
#
def system
JenkinsApi::Client::System.new(self)
end
# Creates an instance to the Node class by passing a reference to self
#
# @return [JenkinsApi::Client::Node] An object to Node subclass
#
def node
JenkinsApi::Client::Node.new(self)
end
# Creates an instance to the View class by passing a reference to self
#
# @return [JenkinsApi::Client::View] An object to View subclass
#
def view
JenkinsApi::Client::View.new(self)
end
# Creates an instance to the BuildQueue by passing a reference to self
#
# @return [JenkinsApi::Client::BuildQueue] An object to BuildQueue subclass
#
def queue
JenkinsApi::Client::BuildQueue.new(self)
end
# Returns a string representing the class name
#
# @return [String] string representation of class name
#
def to_s
"#<JenkinsApi::Client>"
end
# Obtains the root of Jenkins server. This function is used to see if
# Jenkins is running
#
# @return [Net::HTTP::Response] Response from Jenkins for "/"
#
def get_root
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Get.new("/")
request.basic_auth @username, @password
http.request(request)
end
# Sends a GET request to the Jenkins CI server with the specified URL
#
# @param [String] url_prefix The prefix to use in the URL
# @param [String] tree A specific JSON tree to optimize the API call
# @param [String] url_suffix The suffix to be used in the URL
#
# @return [String, JSON] JSON response from Jenkins
#
def api_get_request(url_prefix, tree = nil, url_suffix ="/api/json")
url_prefix = "#{@jenkins_path}#{url_prefix}"
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
to_get = ""
if tree
to_get = "#{url_prefix}#{url_suffix}?#{tree}"
else
to_get = "#{url_prefix}#{url_suffix}"
end
to_get = URI.escape(to_get)
request = Net::HTTP::Get.new(to_get)
puts "[INFO] GET #{to_get}" if @debug
request.basic_auth @username, @password
response = http.request(request)
handle_exception(response, "body", url_suffix =~ /json/)
end
# Sends a POST message to the Jenkins CI server with the specified URL
#
# @param [String] url_prefix The prefix to be used in the URL
# @param [Hash] form_data Form data to send with POST request
#
# @return [String] Response code form Jenkins Response
#
def api_post_request(url_prefix, form_data = {})
url_prefix = URI.escape("#{@jenkins_path}#{url_prefix}")
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Post.new("#{url_prefix}")
if @crumbs_enabled
crumb_response = get_crumb
form_data.merge!(
{
crumb_response["crumbRequestField"] => crumb_response["crumb"],
}
)
end
puts "[INFO] PUT #{url_prefix}" if @debug
request.basic_auth @username, @password
request.content_type = 'application/json'
request.set_form_data(form_data) unless form_data.empty?
response = http.request(request)
handle_post_response(response)
end
# Obtains the configuration of a component from the Jenkins CI server
#
# @param [String] url_prefix The prefix to be used in the URL
#
# @return [String] XML configuration obtained from Jenkins
#
def get_config(url_prefix)
url_prefix = URI.escape("#{@jenkins_path}#{url_prefix}")
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
request = Net::HTTP::Get.new("#{url_prefix}/config.xml")
puts "[INFO] GET #{url_prefix}/config.xml" if @debug
request.basic_auth @username, @password
response = http.request(request)
handle_exception(response, "body")
end
# Posts the given xml configuration to the url given
#
# @param [String] url_prefix The prefix to be used in the URL
# @param [String] xml The XML configuration to be sent to Jenkins
#
# @return [String] Response code returned from Jenkins
#
def post_config(url_prefix, xml, form_data = {})
url_prefix = URI.escape(url_prefix)
http = Net::HTTP.start(@server_ip, @server_port, :use_ssl => @ssl)
puts "[INFO] PUT #{url_prefix}" if @debug
puts "POSTING: #{xml}"
request = Net::HTTP::Post.new("#{url_prefix}")
request.basic_auth @username, @password
request.body = xml
request.content_type = 'application/xml'
if @crumbs_enabled
crumb_response = get_crumb
request[crumb_response['crumbRequestField']] = crumb_response['crumb']
end
# request.set_form_data(form_data) unless form_data.empty?
puts "DEBUG: Crumb: #{form_data.inspect}"
response = http.request(request)
puts "DEBUG: response: #{response.inspect}"
handle_post_response(response)
end
def use_crumbs?
json = api_get_request("")
json["useCrumbs"]
end
def use_securit?
json = api_get_request("")
json["useSecurity"]
end
# Obtains the jenkins version from the API
#
# @return Jenkins version
#
def get_jenkins_version
response = get_root
response["X-Jenkins"]
end
# Obtain the Hudson version of the CI server
#
# @return [String] Version of Hudson on Jenkins server
#
def get_hudson_version
response = get_root
response["X-Hudson"]
end
# Obtain the date of the Jenkins server
#
# @return [String] Server date
#
def get_server_date
response = get_root
response["Date"]
end
#private
def get_crumb
begin
response = api_get_request("/crumbIssuer")
rescue Exceptions::NotFoundException
raise "You've asked to enable CSRF protection, but it looks like" +
" your Jenkins server doesn't have this setting enabled. Please" +
" change the Jenkins server setting or client configuration."
end
end
def handle_post_response(response)
msg = "HTTP Code: #{response.code}"
msg << " Response Body: #{response.body}" if @debug
case response.code.to_i
when 200, 302
return response.code
when 404
raise Exceptions::NotFoundException.new(msg)
when 500
raise Exceptions::InternelServerErrorException.new(msg)
else
raise Exceptions::ApiException.new(msg)
end
end
def handle_exception(response, to_send = "code", send_json = false)
msg = "HTTP Code: #{response.code}, Response Body: #{response.body}"
case response.code.to_i
when 200, 302
if to_send == "body" && send_json
return JSON.parse(response.body)
elsif to_send == "body"
return response.body
elsif to_send == "code"
return response.code
end
when 401
raise Exceptions::UnautherizedException.new
when 404
raise Exceptions::NotFoundException.new
when 500
raise Exceptions::InternelServerErrorException.new
else
raise Exceptions::ApiException.new
end
end
end
end
|
#--
# Copyright (C) 2006 Andrea Censi <andrea (at) rubyforge.org>
#
# This file is part of Maruku.
#
# Maruku is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Maruku is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Maruku; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#++
class String
include MaRuKu::Strings
def md_type()
@md_type ||= line_md_type(self)
end
end
class NilClass
def md_type() nil end
end
# This code does the classification of lines for block-level parsing.
module MaRuKu; module Strings
def line_md_type(l)
# The order of evaluation is important (:text is a catch-all)
return :text if l =~ /^[a-zA-Z]/
return :code if number_of_leading_spaces(l)>=4
return :empty if l =~ /^\s*$/
return :footnote_text if l =~ FootnoteText
return :ref_definition if l =~ LinkRegex or l=~ IncompleteLink
return :abbreviation if l =~ Abbreviation
return :definition if l =~ Definition
# I had a bug with emails and urls at the beginning of the
# line that were mistaken for raw_html
return :text if l=~ /^[ ]{0,3}#{EMailAddress}/
return :text if l=~ /^[ ]{0,3}<http:/
# raw html is like PHP Markdown Extra: at most three spaces before
return :xml_instr if l =~ %r{^\s*<\?}
return :raw_html if l =~ %r{^[ ]?[ ]?[ ]?</?\s*\w+}
return :raw_html if l =~ %r{^[ ]?[ ]?[ ]?<\!\-\-}
# Something is wrong with how we parse lists! :-(
#return :ulist if l =~ /^[ ]{0,3}([\*\-\+])\s+.*\w+/
#return :olist if l =~ /^[ ]{0,3}\d+\..*\w+/
return :ulist if l =~ /^[ ]{0,1}([\*\-\+])\s+.*/
return :olist if l =~ /^[ ]{0,1}\d+\..*/
return :header1 if l =~ /^(=)+/
return :header2 if l =~ /^([-\s])+$/
return :header3 if l =~ /^(#)+\s*\S+/
# at least three asterisks on a line, and only whitespace
return :hrule if l =~ /^(\s*\*\s*){3,1000}$/
return :hrule if l =~ /^(\s*-\s*){3,1000}$/ # or hyphens
return :hrule if l =~ /^(\s*_\s*){3,1000}$/ # or underscores
return :quote if l =~ /^>/
return :metadata if l =~ /^@/
# if @@new_meta_data?
return :ald if l =~ AttributeDefinitionList
return :ial if l =~ InlineAttributeList
# end
# return :equation_end if l =~ EquationEnd
return :text # else, it's just text
end
# $1 = id $2 = attribute list
AttributeDefinitionList = /^\s{0,3}\{([\w\d\s]+)\}:\s*(.*?)\s*$/
#
InlineAttributeList = /^\s{0,3}\{([:#\.].*?)\}\s*$/
# Example:
# ^:blah blah
# ^: blah blah
# ^ : blah blah
Definition = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
: # colon
\s* # whitespace
(\S.*) # the text = $1
$ # end of line
}x
# Example:
# *[HTML]: Hyper Text Markup Language
Abbreviation = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
\* # one asterisk
\[ # opening bracket
([^\]]+) # any non-closing bracket: id = $1
\] # closing bracket
: # colon
\s* # whitespace
(\S.*\S)* # definition=$2
\s* # strip this whitespace
$ # end of line
}x
FootnoteText = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
\[(\^.+)\]: # id = $1 (including '^')
\s*(\S.*)?$ # text = $2 (not obb.)
}x
# This regex is taken from BlueCloth sources
# Link defs are in the form: ^[id]: \n? url "optional title"
LinkRegex = %r{
^[ ]{0,3}\[([^\[\]]+)\]: # id = $1
[ ]*
<?([^>\s]+)>? # url = $2
[ ]*
(?:# Titles are delimited by "quotes" or (parens).
["(']
(.+?) # title = $3
[")'] # Matching ) or "
\s*(.+)? # stuff = $4
)? # title is optional
}x
IncompleteLink = %r{^[ ]{0,3}\[([^\[\]]+?)\]:\s*$}
HeaderWithId = /^(.*?)\{\#([\w_-]+)\}\s*$/
HeaderWithAttributes = /^(.*?)\{(.*?)\}\s*$/
# if contains a pipe, it could be a table header
MightBeTableHeader = %r{\|}
# -------------:
Sep = /\s*(\:)?\s*-+\s*(\:)?\s*/
# | -------------:| ------------------------------ |
TableSeparator = %r{^(\|?#{Sep}\|?)+?\s*$}
EMailAddress = /<([^:]+?@[^:]+?)>/
end end
Fix regression in ordered list processing
Reported by Andrew Stacey.
#--
# Copyright (C) 2006 Andrea Censi <andrea (at) rubyforge.org>
#
# This file is part of Maruku.
#
# Maruku is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Maruku is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Maruku; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#++
class String
include MaRuKu::Strings
def md_type()
@md_type ||= line_md_type(self)
end
end
class NilClass
def md_type() nil end
end
# This code does the classification of lines for block-level parsing.
module MaRuKu; module Strings
def line_md_type(l)
# The order of evaluation is important (:text is a catch-all)
return :text if l =~ /^[a-zA-Z]/
return :code if number_of_leading_spaces(l)>=4
return :empty if l =~ /^\s*$/
return :footnote_text if l =~ FootnoteText
return :ref_definition if l =~ LinkRegex or l=~ IncompleteLink
return :abbreviation if l =~ Abbreviation
return :definition if l =~ Definition
# I had a bug with emails and urls at the beginning of the
# line that were mistaken for raw_html
return :text if l=~ /^[ ]{0,3}#{EMailAddress}/
return :text if l=~ /^[ ]{0,3}<http:/
# raw html is like PHP Markdown Extra: at most three spaces before
return :xml_instr if l =~ %r{^\s*<\?}
return :raw_html if l =~ %r{^[ ]?[ ]?[ ]?</?\s*\w+}
return :raw_html if l =~ %r{^[ ]?[ ]?[ ]?<\!\-\-}
# Something is wrong with how we parse lists! :-(
#return :ulist if l =~ /^[ ]{0,3}([\*\-\+])\s+.*\w+/
#return :olist if l =~ /^[ ]{0,3}\d+\..*\w+/
return :ulist if l =~ /^[ ]{0,1}([\*\-\+])\s+.*/
return :olist if l =~ /^[ ]{0,1}\d+\.\s+.*/
return :header1 if l =~ /^(=)+/
return :header2 if l =~ /^([-\s])+$/
return :header3 if l =~ /^(#)+\s*\S+/
# at least three asterisks on a line, and only whitespace
return :hrule if l =~ /^(\s*\*\s*){3,1000}$/
return :hrule if l =~ /^(\s*-\s*){3,1000}$/ # or hyphens
return :hrule if l =~ /^(\s*_\s*){3,1000}$/ # or underscores
return :quote if l =~ /^>/
return :metadata if l =~ /^@/
# if @@new_meta_data?
return :ald if l =~ AttributeDefinitionList
return :ial if l =~ InlineAttributeList
# end
# return :equation_end if l =~ EquationEnd
return :text # else, it's just text
end
# $1 = id $2 = attribute list
AttributeDefinitionList = /^\s{0,3}\{([\w\d\s]+)\}:\s*(.*?)\s*$/
#
InlineAttributeList = /^\s{0,3}\{([:#\.].*?)\}\s*$/
# Example:
# ^:blah blah
# ^: blah blah
# ^ : blah blah
Definition = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
: # colon
\s* # whitespace
(\S.*) # the text = $1
$ # end of line
}x
# Example:
# *[HTML]: Hyper Text Markup Language
Abbreviation = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
\* # one asterisk
\[ # opening bracket
([^\]]+) # any non-closing bracket: id = $1
\] # closing bracket
: # colon
\s* # whitespace
(\S.*\S)* # definition=$2
\s* # strip this whitespace
$ # end of line
}x
FootnoteText = %r{
^ # begin of line
[ ]{0,3} # up to 3 spaces
\[(\^.+)\]: # id = $1 (including '^')
\s*(\S.*)?$ # text = $2 (not obb.)
}x
# This regex is taken from BlueCloth sources
# Link defs are in the form: ^[id]: \n? url "optional title"
LinkRegex = %r{
^[ ]{0,3}\[([^\[\]]+)\]: # id = $1
[ ]*
<?([^>\s]+)>? # url = $2
[ ]*
(?:# Titles are delimited by "quotes" or (parens).
["(']
(.+?) # title = $3
[")'] # Matching ) or "
\s*(.+)? # stuff = $4
)? # title is optional
}x
IncompleteLink = %r{^[ ]{0,3}\[([^\[\]]+?)\]:\s*$}
HeaderWithId = /^(.*?)\{\#([\w_-]+)\}\s*$/
HeaderWithAttributes = /^(.*?)\{(.*?)\}\s*$/
# if contains a pipe, it could be a table header
MightBeTableHeader = %r{\|}
# -------------:
Sep = /\s*(\:)?\s*-+\s*(\:)?\s*/
# | -------------:| ------------------------------ |
TableSeparator = %r{^(\|?#{Sep}\|?)+?\s*$}
EMailAddress = /<([^:]+?@[^:]+?)>/
end end
|
Pod::Spec.new do |s|
s.name = "MKUnits"
s.version = "1.2.1"
s.summary = "Unit conversion library for Objective-C. It provides units of measurement of physical quantities and simplifies manipulation of them."
s.homepage = "https://github.com/michalkonturek/MKUnits"
s.license = 'MIT'
s.author = {
"Michal Konturek" => "michal.konturek@gmail.com"
}
s.ios.deployment_target = '7.0'
s.source = {
:git => "https://github.com/michalkonturek/MKUnits.git",
:tag => "1.2.1"
}
s.source_files = 'Source/**/*.{h,m}'
s.requires_arc = true
s.dependency 'MKFoundationKit/NSNumber', '~> 1.2.0'
end
Added twitter handle to podspec.
Pod::Spec.new do |s|
s.name = "MKUnits"
s.version = "1.2.1"
s.summary = "Unit conversion library for Objective-C. It provides units of measurement of physical quantities and simplifies manipulation of them."
s.homepage = "https://github.com/michalkonturek/MKUnits"
s.license = 'MIT'
s.author = {
"Michal Konturek" => "michal.konturek@gmail.com"
}
s.ios.deployment_target = '7.0'
s.social_media_url = 'https://twitter.com/michalkonturek'
s.source = {
:git => "https://github.com/michalkonturek/MKUnits.git",
:tag => "1.2.1"
}
s.source_files = 'Source/**/*.{h,m}'
s.requires_arc = true
s.dependency 'MKFoundationKit/NSNumber', '~> 1.2.0'
end
|
module JSONAPI
module Resources
VERSION = '0.5.6'
end
end
Bump to 0.5.7
module JSONAPI
module Resources
VERSION = '0.5.7'
end
end
|
require 'maruto/base'
require 'nokogiri'
module Maruto::ModuleConfiguration
def self.parse_module_configuration(m)
f = File.open(m[:config_path])
xml_root = Nokogiri::XML(f) { |config| config.strict }.root
f.close
version_warnings = parse_module_version(m, xml_root)
event_warnings = parse_all_event_observers(m, xml_root)
config_warnings = version_warnings + event_warnings
all_module_warnings = m[:warnings] || []
all_module_warnings.concat(config_warnings.map { |msg| { :file => m[:config_path], :message => msg } })
m[:warnings] = all_module_warnings unless all_module_warnings.size == 0
m
end
def self.parse_module_version(m, xml_root)
xml_node = xml_root.at_xpath('/config/modules')
if xml_node.nil?
return ["config.xml is missing a /config/modules node"]
end
warnings = []
unless xml_node.at_xpath("./#{m[:name]}")
warnings << "config.xml is missing a /config/modules/#{m[:name]} node"
end
xml_node.xpath("./*").each do |n|
unless n.name.to_sym == m[:name]
warnings << "config.xml contains configuration for a different module (/config/modules/#{n.name})"
end
end
m[:version] = xml_node.at_xpath("./#{m[:name]}/version").content unless xml_node.at_xpath("./#{m[:name]}/version").nil?
warnings
end
def self.parse_scoped_event_observers(base_path, xml_node)
return [],[] if xml_node.nil?
events = []
warnings = []
if xml_node.size > 1
warnings << "duplicate element in config.xml (#{base_path})"
end
xml_node.xpath('events/*').each do |e|
event = {
:name => e.name,
:path => base_path + '/events/' + e.name,
:observers => [],
}
e.xpath('observers/*').each do |o|
observer = {
:name => o.name,
:path => event[:path] + '/observers/' + o.name,
}
type = o.at_xpath('type').content unless o.at_xpath('type').nil?
observer[:class] = o.at_xpath('class').content unless o.at_xpath('class').nil?
observer[:method] = o.at_xpath('method').content unless o.at_xpath('method').nil?
# see Mage_Core_Model_App::dispatchEvent
if type.nil?
# default is singleton
observer[:type] = :singleton
elsif type == 'object'
# object is an alias for model
observer[:type] = :model
warnings << "#{observer[:path]}/type 'object' is an alias for 'model'"
elsif /^(disabled|model|singleton)$/ =~ type
observer[:type] = type.to_sym
else
# everything else => default (with warning)
observer[:type] = :singleton
warnings << "#{observer[:path]}/type replaced with 'singleton', was '#{type}' (possible values: 'disabled', 'model', 'singleton', or nothing)"
end
event[:observers] << observer
end
events << event
end
return events, warnings
end
def self.parse_all_event_observers(m, xml_node)
areas = [:global, :frontend, :adminhtml, :crontab]
events = {}
warnings = []
areas.each do |area|
e, w = parse_scoped_event_observers("/config/#{area}", xml_node.xpath("/config/#{area}"))
events[area] = e if e.size > 0
warnings.concat w
end
m[:events] = events if events.keys.size > 0
warnings << "the 'admin' area should not contain events (/config/admin/events)" unless xml_node.at_xpath("/config/admin/events").nil?
return warnings
end
def self.collect_scoped_event_observers(area, sorted_modules)
events = Hash.new
sorted_modules.each do |m|
if m.include? :events and m[:events].include? area then
m[:events][area].each do |event|
event_name = event[:name]
events[event_name] ||= Hash.new
event[:observers].each do |observer|
observer_name = observer[:name]
if events[event_name].include? observer_name
add_module_config_warning(m, "event_observer:#{area}/#{event_name}/#{observer_name} - defined in #{events[event_name][observer_name][:module]} and redefined in #{m[:name]}")
end
events[event_name][observer_name] = observer
events[event_name][observer_name][:module] = m[:name]
end
end
end
end
events
end
def self.collect_event_observers(sorted_modules)
areas = [:global, :frontend, :adminhtml, :crontab]
events = {}
areas.each do |area|
events[area] = collect_scoped_event_observers(area, sorted_modules)
end
events
end
private
def self.add_module_config_warning(m, msg)
m[:warnings] ||= []
m[:warnings] << { :file => m[:config_path], :message => msg }
end
end
better warning msg
require 'maruto/base'
require 'nokogiri'
module Maruto::ModuleConfiguration
def self.parse_module_configuration(m)
f = File.open(m[:config_path])
xml_root = Nokogiri::XML(f) { |config| config.strict }.root
f.close
version_warnings = parse_module_version(m, xml_root)
event_warnings = parse_all_event_observers(m, xml_root)
config_warnings = version_warnings + event_warnings
all_module_warnings = m[:warnings] || []
all_module_warnings.concat(config_warnings.map { |msg| { :file => m[:config_path], :message => msg } })
m[:warnings] = all_module_warnings unless all_module_warnings.size == 0
m
end
def self.parse_module_version(m, xml_root)
xml_node = xml_root.at_xpath('/config/modules')
if xml_node.nil?
return ["config.xml is missing a /config/modules node"]
end
warnings = []
unless xml_node.at_xpath("./#{m[:name]}")
warnings << "config.xml is missing a /config/modules/#{m[:name]} node"
end
xml_node.xpath("./*").each do |n|
unless n.name.to_sym == m[:name]
warnings << "config.xml contains configuration for a different module (/config/modules/#{n.name})"
end
end
m[:version] = xml_node.at_xpath("./#{m[:name]}/version").content unless xml_node.at_xpath("./#{m[:name]}/version").nil?
warnings
end
def self.parse_scoped_event_observers(base_path, xml_node)
return [],[] if xml_node.nil?
events = []
warnings = []
if xml_node.size > 1
warnings << "duplicate element in config.xml (#{base_path})"
end
xml_node.xpath('events/*').each do |e|
event = {
:name => e.name,
:path => base_path + '/events/' + e.name,
:observers => [],
}
e.xpath('observers/*').each do |o|
observer = {
:name => o.name,
:path => event[:path] + '/observers/' + o.name,
}
type = o.at_xpath('type').content unless o.at_xpath('type').nil?
observer[:class] = o.at_xpath('class').content unless o.at_xpath('class').nil?
observer[:method] = o.at_xpath('method').content unless o.at_xpath('method').nil?
# see Mage_Core_Model_App::dispatchEvent
if type.nil?
# default is singleton
observer[:type] = :singleton
elsif type == 'object'
# object is an alias for model
observer[:type] = :model
warnings << "#{observer[:path]}/type 'object' is an alias for 'model'"
elsif /^(disabled|model|singleton)$/ =~ type
observer[:type] = type.to_sym
else
# everything else => default (with warning)
observer[:type] = :singleton
warnings << "#{observer[:path]}/type replaced with 'singleton', was '#{type}' (possible values: 'disabled', 'model', 'singleton', or nothing)"
end
event[:observers] << observer
end
events << event
end
return events, warnings
end
def self.parse_all_event_observers(m, xml_node)
areas = [:global, :frontend, :adminhtml, :crontab]
events = {}
warnings = []
areas.each do |area|
e, w = parse_scoped_event_observers("/config/#{area}", xml_node.xpath("/config/#{area}"))
events[area] = e if e.size > 0
warnings.concat w
end
m[:events] = events if events.keys.size > 0
warnings << "the 'admin' area should not contain events (/config/admin/events)" unless xml_node.at_xpath("/config/admin/events").nil?
return warnings
end
def self.collect_scoped_event_observers(area, sorted_modules)
events = Hash.new
sorted_modules.each do |m|
if m.include? :events and m[:events].include? area then
m[:events][area].each do |event|
event_name = event[:name]
events[event_name] ||= Hash.new
event[:observers].each do |observer|
observer_name = observer[:name]
if events[event_name].include? observer_name
add_module_config_warning(m, "event_observer:#{area}/#{event_name}/#{observer_name} - defined in #{events[event_name][observer_name][:module]} and redefined in #{m[:name]} (use type: disabled instead)")
end
events[event_name][observer_name] = observer
events[event_name][observer_name][:module] = m[:name]
end
end
end
end
events
end
def self.collect_event_observers(sorted_modules)
areas = [:global, :frontend, :adminhtml, :crontab]
events = {}
areas.each do |area|
events[area] = collect_scoped_event_observers(area, sorted_modules)
end
events
end
private
def self.add_module_config_warning(m, msg)
m[:warnings] ||= []
m[:warnings] << { :file => m[:config_path], :message => msg }
end
end
|
Pod::Spec.new do |s|
s.name = "MTDates"
s.version = "0.7.0"
s.summary = "A category on NSDate. 100+ date calculation methods."
s.homepage = "https://github.com/mysterioustrousers/MTDates"
s.license = 'BSD '
s.author = { "Adam Kirk" => "atomkirk@gmail.com" }
s.source = { :git => "https://github.com/mysterioustrousers/MTDates.git", :tag => "0.7.0" }
s.source_files = 'MTDates/*.{h,m}'
s.requires_arc = true
end
fixing a bug with the date/time style and adding another string method
Pod::Spec.new do |s|
s.name = "MTDates"
s.version = "0.7.1"
s.summary = "A category on NSDate. 100+ date calculation methods."
s.homepage = "https://github.com/mysterioustrousers/MTDates"
s.license = 'BSD '
s.author = { "Adam Kirk" => "atomkirk@gmail.com" }
s.source = { :git => "https://github.com/mysterioustrousers/MTDates.git", :tag => "0.7.1" }
s.source_files = 'MTDates/*.{h,m}'
s.requires_arc = true
end
|
module Librarian
module Ansible
VERSION = "3.0.0"
end
end
chore: bump version #
module Librarian
module Ansible
VERSION = "3.0.1"
end
end
|
module MediaRocket
module Helpers
module Assets
def media_rocket_image_path(*segments)
media_rocket_public_path_for(:image, *segments)
end
def media_rocket_javascript_path(*segments)
media_rocket_public_path_for(:javascript, *segments)
end
def media_rocket_stylesheet_path(*segments)
media_rocket_public_path_for(:stylesheet, *segments)
end
def media_rocket_flash_path(*segments)
# Use String instead of Symbol
# if type is not declared in app_dir_for
media_rocket_public_path_for("flash", *segments)
end
def media_rocket_upload_path(*segments)
media_rocket_public_path_for(:upload, *segments)
end
def media_rocket_public_path_for(type, *segments)
::MediaRocket.public_path_for(type, *segments)
end
def media_rocket_js
script = ""
['jquery/jquery.js',
'jquery/jquery.ui.js',
'jquery/jquery.confirm.js',
'jquery/jquery.form.js',
'jquery/jquery.treetable.js',
'jquery/jquery.validate.js',
'jquery/jquery.livequery.js',
'jquery/thickbox.js',
'jquery/jquery.uploadify.js',
'json2.js',
'permissions.js',
'master.js'].each do |file|
script << media_rocket_js_line(file)
end
script
end
def media_rocket_js_line(file)
"<script src='#{media_rocket_javascript_path file}' type='text/javascript' charset='utf-8'></script>"
end
def media_rocket_css
css = "<!--[if IE]>#{media_rocket_css_line 'ie.css'}<![endif]-->"
['master.css',
'screen.css',
'print.css',
'jquery.treetable.css',
'thickbox.css'].each do |file|
css << media_rocket_css_line(file)
end
css
end
def media_rocket_css_line(file)
"<link rel='stylesheet' href='#{media_rocket_stylesheet_path file}' type='text/css' media='screen, projection'>"
end
end
end
end
use full uploadify.js
module MediaRocket
module Helpers
module Assets
def media_rocket_image_path(*segments)
media_rocket_public_path_for(:image, *segments)
end
def media_rocket_javascript_path(*segments)
media_rocket_public_path_for(:javascript, *segments)
end
def media_rocket_stylesheet_path(*segments)
media_rocket_public_path_for(:stylesheet, *segments)
end
def media_rocket_flash_path(*segments)
# Use String instead of Symbol
# if type is not declared in app_dir_for
media_rocket_public_path_for("flash", *segments)
end
def media_rocket_upload_path(*segments)
media_rocket_public_path_for(:upload, *segments)
end
def media_rocket_public_path_for(type, *segments)
::MediaRocket.public_path_for(type, *segments)
end
def media_rocket_js
script = ""
['jquery/jquery.js',
'jquery/jquery.ui.js',
'jquery/jquery.confirm.js',
'jquery/jquery.form.js',
'jquery/jquery.treetable.js',
'jquery/jquery.validate.js',
'jquery/jquery.livequery.js',
'jquery/thickbox.js',
'jquery-full/jquery.uploadify.js',
'json2.js',
'permissions.js',
'master.js'].each do |file|
script << media_rocket_js_line(file)
end
script
end
def media_rocket_js_line(file)
"<script src='#{media_rocket_javascript_path file}' type='text/javascript' charset='utf-8'></script>"
end
def media_rocket_css
css = "<!--[if IE]>#{media_rocket_css_line 'ie.css'}<![endif]-->"
['master.css',
'screen.css',
'print.css',
'jquery.treetable.css',
'thickbox.css'].each do |file|
css << media_rocket_css_line(file)
end
css
end
def media_rocket_css_line(file)
"<link rel='stylesheet' href='#{media_rocket_stylesheet_path file}' type='text/css' media='screen, projection'>"
end
end
end
end |
require 'dpl/version'
require 'net/http'
require 'securerandom'
module Dpl
module Providers
class Testfairy < Provider
status :alpha
full_name 'TestFairy'
description sq(<<-str)
tbd
str
gem 'json', '~> 2.2.0'
gem 'multipart-post', '~> 2.0.0', require: 'net/http/post/multipart'
opt '--api_key KEY', 'TestFairy API key', required: true, secret: true
opt '--app_file FILE', 'Path to the app file that will be generated after the build (APK/IPA)', required: true
opt '--symbols_file FILE', 'Path to the symbols file'
opt '--testers_groups GROUPS', 'Tester groups to be notified about this build', example: 'e.g. group1,group1'
opt '--notify', 'Send an email with a changelog to your users'
opt '--auto_update', 'Automaticall upgrade all the previous installations of this app this version'
opt '--advanced_options OPTS', 'Comma_separated list of advanced options', example: 'option1,option2'
URL = 'https://upload.testfairy.com/api/upload'
UA = "Travis CI dpl version=#{Dpl::VERSION}"
msgs deploy: 'Uploading to TestFairy: %s',
done: 'Done. Check your build at %s'
def deploy
info :deploy, pretty_print(params)
body = JSON.parse(http.request(request).body)
error body['message'] if body['status'] == 'fail'
info :done, body['build_url']
end
private
def params
@params ||= compact(
'api_key': api_key,
'apk_file': file(app_file),
'symbols_file': file(symbols_file),
'testers-groups': testers_groups,
'notify': bool(notify),
'auto-update': bool(auto_update),
'advanced-options': advanced_options,
'changelog': changelog
)
end
def changelog
git_log "--pretty=oneline --abbrev-commit #{commits}" if commits
end
def commits
ENV['TRAVIS_COMMIT_RANGE']
end
def request
Net::HTTP::Post::Multipart.new(uri.path, params, 'User-Agent' => UA)
end
def http
Net::HTTP.start(uri.host, uri.port, :use_ssl => true)
end
def uri
@uri ||= URI.parse(URL)
end
def file(path)
UploadIO.new(path, '', File.basename(path)) if path
end
def bool(obj)
obj ? 'on' : 'off' unless obj.nil?
end
def pretty_print(params)
params = params.map do |key, value|
value = obfuscate(value) if key == :api_key
value = value.path if value.respond_to?(:path)
[key, value]
end
JSON.pretty_generate(params.to_h)
end
end
end
end
remove json version requirement
require 'dpl/version'
require 'net/http'
require 'securerandom'
module Dpl
module Providers
class Testfairy < Provider
status :alpha
full_name 'TestFairy'
description sq(<<-str)
tbd
str
gem 'json'
gem 'multipart-post', '~> 2.0.0', require: 'net/http/post/multipart'
opt '--api_key KEY', 'TestFairy API key', required: true, secret: true
opt '--app_file FILE', 'Path to the app file that will be generated after the build (APK/IPA)', required: true
opt '--symbols_file FILE', 'Path to the symbols file'
opt '--testers_groups GROUPS', 'Tester groups to be notified about this build', example: 'e.g. group1,group1'
opt '--notify', 'Send an email with a changelog to your users'
opt '--auto_update', 'Automaticall upgrade all the previous installations of this app this version'
opt '--advanced_options OPTS', 'Comma_separated list of advanced options', example: 'option1,option2'
URL = 'https://upload.testfairy.com/api/upload'
UA = "Travis CI dpl version=#{Dpl::VERSION}"
msgs deploy: 'Uploading to TestFairy: %s',
done: 'Done. Check your build at %s'
def deploy
info :deploy, pretty_print(params)
body = JSON.parse(http.request(request).body)
error body['message'] if body['status'] == 'fail'
info :done, body['build_url']
end
private
def params
@params ||= compact(
'api_key': api_key,
'apk_file': file(app_file),
'symbols_file': file(symbols_file),
'testers-groups': testers_groups,
'notify': bool(notify),
'auto-update': bool(auto_update),
'advanced-options': advanced_options,
'changelog': changelog
)
end
def changelog
git_log "--pretty=oneline --abbrev-commit #{commits}" if commits
end
def commits
ENV['TRAVIS_COMMIT_RANGE']
end
def request
Net::HTTP::Post::Multipart.new(uri.path, params, 'User-Agent' => UA)
end
def http
Net::HTTP.start(uri.host, uri.port, :use_ssl => true)
end
def uri
@uri ||= URI.parse(URL)
end
def file(path)
UploadIO.new(path, '', File.basename(path)) if path
end
def bool(obj)
obj ? 'on' : 'off' unless obj.nil?
end
def pretty_print(params)
params = params.map do |key, value|
value = obfuscate(value) if key == :api_key
value = value.path if value.respond_to?(:path)
[key, value]
end
JSON.pretty_generate(params.to_h)
end
end
end
end
|
module Lightning::Generators
desc "Directories of gems"
def gem
`gem environment path`.chomp.split(":").map {|e| e +"/gems/*" }
end
desc "System ruby files"
def ruby
system_ruby.map {|e| e +"/**/*.{rb,bundle,so,c}"}
end
desc "*ALL* local ruby files. Careful where you do this."
def local_ruby
["**/*.rb", "bin/*"]
end
desc "Test or spec files in a ruby project"
def test_ruby
['{spec,test}/**/*_{test,spec}.rb', '{spec,test}/**/{test,spec}_*.rb', 'spec/**/*.spec']
end
private
def system_ruby
require 'rbconfig'
[RbConfig::CONFIG['rubylibdir'], RbConfig::CONFIG['sitelibdir']].compact.uniq
end
end
added a rails generator
module Lightning::Generators
desc "Directories of gems"
def gem
`gem environment path`.chomp.split(":").map {|e| e +"/gems/*" }
end
desc "System ruby files"
def ruby
system_ruby.map {|e| e +"/**/*.{rb,bundle,so,c}"}
end
desc "Files in a rails project"
def rails
["{app,config,lib}/**/*", "{db}/**/*.rb"]
end
desc "*ALL* local ruby files. Careful where you do this."
def local_ruby
["**/*.rb", "bin/*"]
end
desc "Test or spec files in a ruby project"
def test_ruby
['{spec,test}/**/*_{test,spec}.rb', '{spec,test}/**/{test,spec}_*.rb', 'spec/**/*.spec']
end
private
def system_ruby
require 'rbconfig'
[RbConfig::CONFIG['rubylibdir'], RbConfig::CONFIG['sitelibdir']].compact.uniq
end
end |
# Copyright (c) 2012, HipByte SPRL and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'motion/project/app'
App = Motion::Project::App
App.template = :ios
require 'motion/project'
require 'motion/project/template/ios/config'
require 'motion/project/template/ios/builder'
desc "Build the project, then run the simulator"
task :default => :simulator
desc "Build everything"
task :build => ['build:simulator', 'build:device']
namespace :build do
desc "Build the simulator version"
task :simulator do
App.build('iPhoneSimulator')
end
desc "Build the device version"
task :device do
App.build('iPhoneOS')
App.codesign('iPhoneOS')
end
end
desc "Run the simulator"
task :simulator do
unless ENV["skip_build"]
Rake::Task["build:simulator"].invoke
end
app = App.config.app_bundle('iPhoneSimulator')
target = ENV['target'] || App.config.sdk_version
if ENV['TMUX']
tmux_default_command = `tmux show-options -g default-command`.strip
unless tmux_default_command.include?("reattach-to-user-namespace")
App.warn(<<END
It appears you are using tmux without 'reattach-to-user-namespace', the simulator might not work properly. You can either disable tmux or run the following commands:
$ brew install reattach-to-user-namespace
$ echo 'set-option -g default-command "reattach-to-user-namespace -l $SHELL"' >> ~/.tmux.conf
END
)
end
end
# Cleanup the simulator application sandbox, to avoid having old resource files there.
if ENV['clean']
sim_apps = File.expand_path("~/Library/Application Support/iPhone Simulator/#{target}/Applications")
Dir.glob("#{sim_apps}/**/*.app").each do |app_bundle|
if File.basename(app_bundle) == File.basename(app)
rm_rf File.dirname(app_bundle)
break
end
end
end
# Prepare the device family.
family_int =
if family = ENV['device_family']
App.config.device_family_int(family.downcase.intern)
else
App.config.device_family_ints[0]
end
retina = ENV['retina']
# Configure the SimulateDevice variable (the only way to specify if we want to run in retina mode or not).
simulate_device = App.config.device_family_string(family_int, target, retina)
default_simulator = `/usr/bin/defaults read com.apple.iphonesimulator "SimulateDevice"`.strip
if default_simulator != simulate_device && default_simulator != "'#{simulate_device}'"
system("/usr/bin/killall \"iPhone Simulator\" >& /dev/null")
system("/usr/bin/defaults write com.apple.iphonesimulator \"SimulateDevice\" \"'#{simulate_device}'\"")
end
# Launch the simulator.
xcode = App.config.xcode_dir
env = "DYLD_FRAMEWORK_PATH=\"#{xcode}/../Frameworks\":\"#{xcode}/../OtherFrameworks\""
env << ' SIM_SPEC_MODE=1' if App.config.spec_mode
sim = File.join(App.config.bindir, 'ios/sim')
debug = (ENV['debug'] ? 1 : (App.config.spec_mode ? '0' : '2'))
app_args = (ENV['args'] or '')
App.info 'Simulate', app
at_exit { system("stty echo") } if $stdout.tty? # Just in case the simulator launcher crashes and leaves the terminal without echo.
Signal.trap(:INT) { } if ENV['debug']
system "#{env} #{sim} #{debug} #{family_int} #{target} \"#{xcode}\" \"#{app}\" #{app_args}"
App.config.print_crash_message if $?.exitstatus != 0 && !App.config.spec_mode
exit($?.exitstatus)
end
desc "Create an .ipa archive"
task :archive => ['build:device'] do
App.archive
end
namespace :archive do
desc "Create an .ipa archive for distribution (AppStore)"
task :distribution do
App.config_without_setup.build_mode = :release
App.config_without_setup.distribution_mode = true
Rake::Task["archive"].invoke
end
end
desc "Same as 'spec:simulator'"
task :spec => ['spec:simulator']
namespace :spec do
desc "Run the test/spec suite on the simulator"
task :simulator do
App.config_without_setup.spec_mode = true
Rake::Task["simulator"].invoke
end
desc "Run the test/spec suite on the device"
task :device do
App.config_without_setup.spec_mode = true
ENV['debug'] ||= '1'
Rake::Task["device"].invoke
end
end
desc "Deploy on the device"
task :device => :archive do
App.info 'Deploy', App.config.archive
device_id = (ENV['id'] or App.config.device_id)
unless App.config.provisioned_devices.include?(device_id)
App.fail "Device ID `#{device_id}' not provisioned in profile `#{App.config.provisioning_profile}'"
end
env = "XCODE_DIR=\"#{App.config.xcode_dir}\""
deploy = File.join(App.config.bindir, 'ios/deploy')
flags = Rake.application.options.trace ? '-d' : ''
Signal.trap(:INT) { } if ENV['debug']
sh "#{env} #{deploy} #{flags} \"#{device_id}\" \"#{App.config.archive}\""
end
desc "Create a .a static library"
task :static do
libs = %w{iPhoneSimulator iPhoneOS}.map do |platform|
'"' + App.build(platform, :static => true) + '"'
end
fat_lib = File.join(App.config.build_dir, App.config.name + '-universal.a')
App.info 'Create', fat_lib
sh "/usr/bin/lipo -create #{libs.join(' ')} -output \"#{fat_lib}\""
end
[iOS] fix where non-retina iPad simulator is not launch
if string which indicates to launch iOS simulator for iPad that contains single quotation, (like 'iPad')
non-retina iPad simulator is not launch.
# Copyright (c) 2012, HipByte SPRL and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'motion/project/app'
App = Motion::Project::App
App.template = :ios
require 'motion/project'
require 'motion/project/template/ios/config'
require 'motion/project/template/ios/builder'
desc "Build the project, then run the simulator"
task :default => :simulator
desc "Build everything"
task :build => ['build:simulator', 'build:device']
namespace :build do
desc "Build the simulator version"
task :simulator do
App.build('iPhoneSimulator')
end
desc "Build the device version"
task :device do
App.build('iPhoneOS')
App.codesign('iPhoneOS')
end
end
desc "Run the simulator"
task :simulator do
unless ENV["skip_build"]
Rake::Task["build:simulator"].invoke
end
app = App.config.app_bundle('iPhoneSimulator')
target = ENV['target'] || App.config.sdk_version
if ENV['TMUX']
tmux_default_command = `tmux show-options -g default-command`.strip
unless tmux_default_command.include?("reattach-to-user-namespace")
App.warn(<<END
It appears you are using tmux without 'reattach-to-user-namespace', the simulator might not work properly. You can either disable tmux or run the following commands:
$ brew install reattach-to-user-namespace
$ echo 'set-option -g default-command "reattach-to-user-namespace -l $SHELL"' >> ~/.tmux.conf
END
)
end
end
# Cleanup the simulator application sandbox, to avoid having old resource files there.
if ENV['clean']
sim_apps = File.expand_path("~/Library/Application Support/iPhone Simulator/#{target}/Applications")
Dir.glob("#{sim_apps}/**/*.app").each do |app_bundle|
if File.basename(app_bundle) == File.basename(app)
rm_rf File.dirname(app_bundle)
break
end
end
end
# Prepare the device family.
family_int =
if family = ENV['device_family']
App.config.device_family_int(family.downcase.intern)
else
App.config.device_family_ints[0]
end
retina = ENV['retina']
# Configure the SimulateDevice variable (the only way to specify if we want to run in retina mode or not).
simulate_device = App.config.device_family_string(family_int, target, retina)
default_simulator = `/usr/bin/defaults read com.apple.iphonesimulator "SimulateDevice"`.strip
if default_simulator != simulate_device && default_simulator != "'#{simulate_device}'"
simulate_device = "'#{simulate_device}'" if simulate_device.include?(" ")
system("/usr/bin/killall \"iPhone Simulator\" >& /dev/null")
system("/usr/bin/defaults write com.apple.iphonesimulator \"SimulateDevice\" \"#{simulate_device}\"")
end
# Launch the simulator.
xcode = App.config.xcode_dir
env = "DYLD_FRAMEWORK_PATH=\"#{xcode}/../Frameworks\":\"#{xcode}/../OtherFrameworks\""
env << ' SIM_SPEC_MODE=1' if App.config.spec_mode
sim = File.join(App.config.bindir, 'ios/sim')
debug = (ENV['debug'] ? 1 : (App.config.spec_mode ? '0' : '2'))
app_args = (ENV['args'] or '')
App.info 'Simulate', app
at_exit { system("stty echo") } if $stdout.tty? # Just in case the simulator launcher crashes and leaves the terminal without echo.
Signal.trap(:INT) { } if ENV['debug']
system "#{env} #{sim} #{debug} #{family_int} #{target} \"#{xcode}\" \"#{app}\" #{app_args}"
App.config.print_crash_message if $?.exitstatus != 0 && !App.config.spec_mode
exit($?.exitstatus)
end
desc "Create an .ipa archive"
task :archive => ['build:device'] do
App.archive
end
namespace :archive do
desc "Create an .ipa archive for distribution (AppStore)"
task :distribution do
App.config_without_setup.build_mode = :release
App.config_without_setup.distribution_mode = true
Rake::Task["archive"].invoke
end
end
desc "Same as 'spec:simulator'"
task :spec => ['spec:simulator']
namespace :spec do
desc "Run the test/spec suite on the simulator"
task :simulator do
App.config_without_setup.spec_mode = true
Rake::Task["simulator"].invoke
end
desc "Run the test/spec suite on the device"
task :device do
App.config_without_setup.spec_mode = true
ENV['debug'] ||= '1'
Rake::Task["device"].invoke
end
end
desc "Deploy on the device"
task :device => :archive do
App.info 'Deploy', App.config.archive
device_id = (ENV['id'] or App.config.device_id)
unless App.config.provisioned_devices.include?(device_id)
App.fail "Device ID `#{device_id}' not provisioned in profile `#{App.config.provisioning_profile}'"
end
env = "XCODE_DIR=\"#{App.config.xcode_dir}\""
deploy = File.join(App.config.bindir, 'ios/deploy')
flags = Rake.application.options.trace ? '-d' : ''
Signal.trap(:INT) { } if ENV['debug']
sh "#{env} #{deploy} #{flags} \"#{device_id}\" \"#{App.config.archive}\""
end
desc "Create a .a static library"
task :static do
libs = %w{iPhoneSimulator iPhoneOS}.map do |platform|
'"' + App.build(platform, :static => true) + '"'
end
fat_lib = File.join(App.config.build_dir, App.config.name + '-universal.a')
App.info 'Create', fat_lib
sh "/usr/bin/lipo -create #{libs.join(' ')} -output \"#{fat_lib}\""
end
|
Capistrano::Configuration.instance(:must_exist).load do
namespace :dwell do
namespace :passenger do
desc "Enable Passenger"
task :enable_passenger do
input = "\n"
dwell1.sudo_with_input "passenger-install-apache2-module", /enter/i, "\n"
cfg =<<-EOF
LoadModule passenger_module /usr/lib/ruby/gems/1.8/gems/passenger-2.0.3/ext/apache2/mod_passenger.so
PassengerRoot /usr/lib/ruby/gems/1.8/gems/passenger-2.0.3
PassengerRuby /usr/bin/ruby1.8
EOF
put cfg, "/tmp/passenger"
sudo "mv /tmp/passenger /etc/apache2/conf.d/passenger"
dwell1.record_install "apache2_mod_passenger"
end
desc "Setup vhost"
task :setup_vhost do
cfg =<<-EOF
ServerName #{domain}
# ServerAlias #{application}.agilebox.com
DocumentRoot #{deploy_to}/public
EOF
put cfg, "/tmp/vhost"
sudo "mv /tmp/vhost /etc/apache2/sites-available/#{application}"
sudo "a2dissite default"
sudo "a2ensite #{application}"
end
desc "Install Passenger"
task :install do
enable_passenger
setup_vhost
end
end
end
end
latest passenger and don't setup vhost as part of install
Capistrano::Configuration.instance(:must_exist).load do
namespace :dwell do
namespace :passenger do
desc "Enable Passenger"
task :enable_passenger do
input = "\n"
dwell1.sudo_with_input "passenger-install-apache2-module", /enter/i, "\n"
cfg =<<-EOF
LoadModule passenger_module /usr/lib/ruby/gems/1.8/gems/passenger-2.0.4/ext/apache2/mod_passenger.so
PassengerRoot /usr/lib/ruby/gems/1.8/gems/passenger-2.0.4
PassengerRuby /usr/bin/ruby1.8
EOF
put cfg, "/tmp/passenger"
sudo "mv /tmp/passenger /etc/apache2/conf.d/passenger"
dwell1.record_install "apache2_mod_passenger"
end
desc "Setup vhost"
task :setup_vhost do
cfg =<<-EOF
ServerName #{domain}
# ServerAlias #{application}.agilebox.com
DocumentRoot #{deploy_to}/public
EOF
put cfg, "/tmp/vhost"
sudo "mv /tmp/vhost /etc/apache2/sites-available/#{application}"
sudo "a2dissite default"
sudo "a2ensite #{application}"
end
desc "Install Passenger"
task :install do
enable_passenger
# setup_vhost
end
end
end
end |
module LovelyRufus class TextWrapper
def self.wrap text, width: 72
new(text, width: width).call
end
def initialize text, width: 72
@text, @width = text, width
end
def call
best = wrap_to width
(width - 1).downto 1 do |size|
wrap = wrap_to size
return best if wrap.lines.count > best.lines.count
best = wrap
end
end
attr_reader :text, :width
private :text, :width
private
def chain
chain = [OneLetterGluer, BasicWrapper, HangoutWrapper].reverse
chain.reduce(-> hash { hash }) { |inner, outer| outer.new inner }
end
def paras
@paras ||= text.split("\n\n").map { |para| para.tr("\n", ' ').strip }
end
def wrap_to size
paras.map do |para|
chain.call(text: para, width: size)[:text].tr NBSP, ' '
end.join "\n"
end
end end
TextWrapper#paras: strip a strip
module LovelyRufus class TextWrapper
def self.wrap text, width: 72
new(text, width: width).call
end
def initialize text, width: 72
@text, @width = text, width
end
def call
best = wrap_to width
(width - 1).downto 1 do |size|
wrap = wrap_to size
return best if wrap.lines.count > best.lines.count
best = wrap
end
end
attr_reader :text, :width
private :text, :width
private
def chain
chain = [OneLetterGluer, BasicWrapper, HangoutWrapper].reverse
chain.reduce(-> hash { hash }) { |inner, outer| outer.new inner }
end
def paras
@paras ||= text.split("\n\n").map { |para| para.tr "\n", ' ' }
end
def wrap_to size
paras.map do |para|
chain.call(text: para, width: size)[:text].tr NBSP, ' '
end.join "\n"
end
end end
|
require "middleman-core/cli"
require "middleman-deploy/extension"
require 'git'
module Middleman
module Cli
# This class provides a "deploy" command for the middleman CLI.
class Deploy < Thor
include Thor::Actions
check_unknown_options!
namespace :deploy
# Tell Thor to exit with a nonzero exit code on failure
def self.exit_on_failure?
true
end
desc "deploy", "Copy build directory to a remote host"
method_option "clean",
:type => :boolean,
:aliases => "-c",
:desc => "Remove orphaned files or directories on the remote host"
def deploy
send("deploy_#{self.middleman_options.method}")
end
protected
def middleman_options
::Middleman::Application.server.inst.options
end
def deploy_rsync
host = self.middleman_options.host
port = self.middleman_options.port
user = self.middleman_options.user
path = self.middleman_options.path
# These only exists when the config.rb sets them!
if (!host || !user || !path)
raise Thor::Error.new "You need to activate the deploy extension in config.rb"
end
command = "rsync -avze '" + "ssh -p #{port}" + "' build/ #{user}@#{host}:#{path}"
if options.has_key? "clean"
clean = options.clean
else
clean = shared_inst.options.clean
end
if clean
command += " --delete"
end
run command
end
def deploy_git
puts "## Deploying to Github Pages"
Dir.mktmpdir do |tmp|
# clone ./ with branch gh-pages to tmp
repo = Git.clone(ENV['MM_ROOT'], tmp)
repo.checkout('origin/gh-pages', :new_branch => 'gh-pages')
# copy ./build/* to tmp
FileUtils.cp_r(Dir.glob(File.join(ENV['MM_ROOT'], 'build', '*')), tmp)
# git add and commit in tmp
repo.add
repo.commit("Automated commit at #{Time.now.utc}")
# push from tmp to self
repo.push('origin', 'gh-pages')
# push to github
github_url = Git.open(ENV['MM_ROOT']).remote.url
repo.add_remote('github', github_url)
repo.push('github', 'gh-pages')
end
end
end
# Alias "d" to "deploy"
Base.map({ "d" => "deploy" })
end
end
as reported at: http://forum.middlemanapp.com/discussion/comment/177
require "middleman-core/cli"
require "middleman-deploy/extension"
require 'git'
module Middleman
module Cli
# This class provides a "deploy" command for the middleman CLI.
class Deploy < Thor
include Thor::Actions
check_unknown_options!
namespace :deploy
# Tell Thor to exit with a nonzero exit code on failure
def self.exit_on_failure?
true
end
desc "deploy", "Copy build directory to a remote host"
method_option "clean",
:type => :boolean,
:aliases => "-c",
:desc => "Remove orphaned files or directories on the remote host"
def deploy
send("deploy_#{self.middleman_options.method}")
end
protected
def middleman_options
::Middleman::Application.server.inst.options
end
def deploy_rsync
host = self.middleman_options.host
port = self.middleman_options.port
user = self.middleman_options.user
path = self.middleman_options.path
# These only exists when the config.rb sets them!
if (!host || !user || !path)
raise Thor::Error.new "You need to activate the deploy extension in config.rb"
end
command = "rsync -avze '" + "ssh -p #{port}" + "' build/ #{user}@#{host}:#{path}"
if options.has_key? "clean"
clean = options.clean
else
clean = self.middleman_options.clean
end
if clean
command += " --delete"
end
run command
end
def deploy_git
puts "## Deploying to Github Pages"
Dir.mktmpdir do |tmp|
# clone ./ with branch gh-pages to tmp
repo = Git.clone(ENV['MM_ROOT'], tmp)
repo.checkout('origin/gh-pages', :new_branch => 'gh-pages')
# copy ./build/* to tmp
FileUtils.cp_r(Dir.glob(File.join(ENV['MM_ROOT'], 'build', '*')), tmp)
# git add and commit in tmp
repo.add
repo.commit("Automated commit at #{Time.now.utc}")
# push from tmp to self
repo.push('origin', 'gh-pages')
# push to github
github_url = Git.open(ENV['MM_ROOT']).remote.url
repo.add_remote('github', github_url)
repo.push('github', 'gh-pages')
end
end
end
# Alias "d" to "deploy"
Base.map({ "d" => "deploy" })
end
end
|
module Ember
module Middleman
VERSION = '0.0.2'
end
end
Now working on 0.0.3
module Ember
module Middleman
VERSION = '0.0.3'
end
end
|
module Middleman
module Ratchet
VERSION = '0.1.4'
end
end
Bump to v0.1.5
module Middleman
module Ratchet
VERSION = '0.1.5'
end
end
|
class EventLister
@queue = :invoicing
def self.perform
e = EventbriteClient.new ({ :app_key => ENV["EVENTBRITE_API_KEY"], :user_key => ENV["EVENTBRITE_USER_KEY"]})
if response = e.organizer_list_events(id: ENV['EVENTBRITE_ORGANIZER_ID'])
response.parsed_response['events'].each do |event|
e = event['event']
if e['id'] && e['status'] == 'Live'
Resque.enqueue(AttendeeLister, e['id'].to_s)
end
end
end
end
end
Added doc block for EventLister
class EventLister
@queue = :invoicing
# Public: Inspect the list of event on Eventbrite
#
# Examples
#
# EventLister.perform
# # => nil
#
# Returns nil. Queues further jobs to handle inspection of attendee lists.
def self.perform
e = EventbriteClient.new ({ :app_key => ENV["EVENTBRITE_API_KEY"], :user_key => ENV["EVENTBRITE_USER_KEY"]})
if response = e.organizer_list_events(id: ENV['EVENTBRITE_ORGANIZER_ID'])
response.parsed_response['events'].each do |event|
e = event['event']
if e['id'] && e['status'] == 'Live'
Resque.enqueue(AttendeeLister, e['id'].to_s)
end
end
end
end
end |
require 'rbconfig'
require 'net/http'
require 'net/https'
require 'net/ftp'
require 'fileutils'
require 'tempfile'
require 'digest/md5'
class MiniPortile
attr_reader :name, :version, :original_host
attr_writer :configure_options
attr_accessor :host, :files, :patch_files, :target, :logger
def initialize(name, version)
@name = name
@version = version
@target = 'ports'
@files = []
@patch_files = []
@logger = STDOUT
@original_host = @host = detect_host
end
def download
@files.each do |url|
filename = File.basename(url)
download_file(url, File.join(archives_path, filename))
end
end
def extract
@files.each do |url|
filename = File.basename(url)
extract_file(File.join(archives_path, filename), tmp_path)
end
end
def patch
# Set GIT_DIR while appying patches to work around
# git-apply doing nothing when started within another
# git directory.
ENV['GIT_DIR'], old_git = '.', ENV['GIT_DIR']
begin
@patch_files.each do |full_path|
next unless File.exists?(full_path)
output "Running git apply with #{full_path}... "
execute('patch', %w(git apply) + [full_path], :initial_message => false)
end
ensure
ENV['GIT_DIR'] = old_git
end
end
def configure_options
@configure_options ||= configure_defaults
end
def configure
return if configured?
md5_file = File.join(tmp_path, 'configure.md5')
digest = Digest::MD5.hexdigest(computed_options.to_s)
File.open(md5_file, "w") { |f| f.write digest }
execute('configure', %w(sh configure) + computed_options)
end
def compile
execute('compile', make_cmd)
end
def install
return if installed?
execute('install', %Q(#{make_cmd} install))
end
def downloaded?
missing = @files.detect do |url|
filename = File.basename(url)
!File.exist?(File.join(archives_path, filename))
end
missing ? false : true
end
def configured?
configure = File.join(work_path, 'configure')
makefile = File.join(work_path, 'Makefile')
md5_file = File.join(tmp_path, 'configure.md5')
stored_md5 = File.exist?(md5_file) ? File.read(md5_file) : ""
current_md5 = Digest::MD5.hexdigest(computed_options.to_s)
(current_md5 == stored_md5) && newer?(makefile, configure)
end
def installed?
makefile = File.join(work_path, 'Makefile')
target_dir = Dir.glob("#{port_path}/*").find { |d| File.directory?(d) }
newer?(target_dir, makefile)
end
def cook
download unless downloaded?
extract
patch
configure unless configured?
compile
install unless installed?
return true
end
def activate
lib_path = File.join(port_path, "lib")
vars = {
'PATH' => File.join(port_path, 'bin'),
'CPATH' => File.join(port_path, 'include'),
'LIBRARY_PATH' => lib_path
}.reject { |env, path| !File.directory?(path) }
output "Activating #{@name} #{@version} (from #{port_path})..."
vars.each do |var, path|
full_path = File.expand_path(path)
# turn into a valid Windows path (if required)
full_path.gsub!(File::SEPARATOR, File::ALT_SEPARATOR) if File::ALT_SEPARATOR
# save current variable value
old_value = ENV[var] || ''
unless old_value.include?(full_path)
ENV[var] = "#{full_path}#{File::PATH_SEPARATOR}#{old_value}"
end
end
# rely on LDFLAGS when cross-compiling
if File.exist?(lib_path) && (@host != @original_host)
full_path = File.expand_path(lib_path)
old_value = ENV.fetch("LDFLAGS", "")
unless old_value.include?(full_path)
ENV["LDFLAGS"] = "-L#{full_path} #{old_value}".strip
end
end
end
def path
File.expand_path(port_path)
end
private
def tmp_path
"tmp/#{@host}/ports/#{@name}/#{@version}"
end
def port_path
"#{@target}/#{@host}/#{@name}/#{@version}"
end
def archives_path
"#{@target}/archives"
end
def work_path
Dir.glob("#{tmp_path}/*").find { |d| File.directory?(d) }
end
def configure_defaults
[
"--host=#{@host}", # build for specific target (host)
"--enable-static", # build static library
"--disable-shared" # disable generation of shared object
]
end
def configure_prefix
"--prefix=#{File.expand_path(port_path)}"
end
def computed_options
[
configure_options, # customized or default options
configure_prefix, # installation target
].flatten
end
def log_file(action)
File.join(tmp_path, "#{action}.log")
end
def tar_exe
@@tar_exe ||= begin
%w[gtar bsdtar tar basic-bsdtar].find { |c|
which(c)
}
end
end
def tar_compression_switch(filename)
case File.extname(filename)
when '.gz', '.tgz'
'z'
when '.bz2', '.tbz2'
'j'
when '.Z'
'Z'
else
''
end
end
# From: http://stackoverflow.com/a/5471032/7672
# Thanks, Mislav!
#
# Cross-platform way of finding an executable in the $PATH.
#
# which('ruby') #=> /usr/bin/ruby
def which(cmd)
exts = ENV['PATHEXT'] ? ENV['PATHEXT'].split(';') : ['']
ENV['PATH'].split(File::PATH_SEPARATOR).each do |path|
exts.each { |ext|
exe = File.join(path, "#{cmd}#{ext}")
return exe if File.executable? exe
}
end
return nil
end
def detect_host
return @detect_host if defined?(@detect_host)
begin
ENV["LC_ALL"], old_lc_all = "C", ENV["LC_ALL"]
output = `#{gcc_cmd} -v 2>&1`
if m = output.match(/^Target\: (.*)$/)
@detect_host = m[1]
end
@detect_host
ensure
ENV["LC_ALL"] = old_lc_all
end
end
def extract_file(file, target)
filename = File.basename(file)
FileUtils.mkdir_p target
message "Extracting #{filename} into #{target}... "
result = if RUBY_VERSION < "1.9"
`#{tar_exe} #{tar_compression_switch(filename)}xf "#{file}" -C "#{target}" 2>&1`
else
IO.popen([tar_exe,
"#{tar_compression_switch(filename)}xf", file,
"-C", target,
{:err=>[:child, :out]}], &:read)
end
if $?.success?
output "OK"
else
output "ERROR"
output result
raise "Failed to complete extract task"
end
end
def execute(action, command, options={})
log = log_file(action)
log_out = File.expand_path(log)
Dir.chdir work_path do
if options.fetch(:initial_message){ true }
message "Running '#{action}' for #{@name} #{@version}... "
end
if Process.respond_to?(:spawn)
args = [command].flatten + [{[:out, :err]=>[log_out, "w"]}]
pid = spawn(*args)
Process.wait(pid)
else
# Ruby-1.8 compatibility:
if command.kind_of?(Array)
system(*command)
else
redirected = "#{command} >#{log_out} 2>&1"
system(redirected)
end
end
if $?.success?
output "OK"
return true
else
output "ERROR, review '#{log_out}' to see what happened."
raise "Failed to complete #{action} task"
end
end
end
def newer?(target, checkpoint)
if (target && File.exist?(target)) && (checkpoint && File.exist?(checkpoint))
File.mtime(target) > File.mtime(checkpoint)
else
false
end
end
# print out a message with the logger
def message(text)
@logger.print text
@logger.flush
end
# print out a message using the logger but return to a new line
def output(text = "")
@logger.puts text
@logger.flush
end
# Slighly modified from RubyInstaller uri_ext, Rubinius configure
# and adaptations of Wayne's RailsInstaller
def download_file(url, full_path, count = 3)
return if File.exist?(full_path)
uri = URI.parse(url)
begin
case uri.scheme.downcase
when /ftp/
download_file_ftp(uri, full_path)
when /http|https/
download_file_http(url, full_path, count)
end
rescue Exception => e
File.unlink full_path if File.exists?(full_path)
output "ERROR: #{e.message}"
raise "Failed to complete download task"
end
end
def download_file_http(url, full_path, count = 3)
filename = File.basename(full_path)
uri = URI.parse(url)
if ENV['http_proxy']
_, userinfo, p_host, p_port = URI.split(ENV['http_proxy'])
proxy_user, proxy_pass = userinfo.split(/:/) if userinfo
http = Net::HTTP.new(uri.host, uri.port, p_host, p_port, proxy_user, proxy_pass)
else
http = Net::HTTP.new(uri.host, uri.port)
if URI::HTTPS === uri
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
store = OpenSSL::X509::Store.new
# Auto-include system-provided certificates
store.set_default_paths
if ENV.has_key?("SSL_CERT_FILE") && File.exist?(ENV["SSL_CERT_FILE"])
store.add_file ENV["SSL_CERT_FILE"]
end
http.cert_store = store
end
end
message "Downloading #{filename} "
http.start do |h|
h.request_get(uri.path, 'Accept-Encoding' => 'identity') do |response|
case response
when Net::HTTPNotFound
output "404 - Not Found"
return false
when Net::HTTPClientError
output "Error: Client Error: #{response.inspect}"
return false
when Net::HTTPRedirection
raise "Too many redirections for the original URL, halting." if count <= 0
url = response["location"]
return download_file(url, full_path, count - 1)
when Net::HTTPOK
return with_tempfile(filename, full_path) do |temp_file|
size = 0
progress = 0
total = response.header["Content-Length"].to_i
response.read_body do |chunk|
temp_file << chunk
size += chunk.size
new_progress = (size * 100) / total
unless new_progress == progress
message "\rDownloading %s (%3d%%) " % [filename, new_progress]
end
progress = new_progress
end
output
end
end
end
end
end
def download_file_ftp(uri, full_path)
filename = File.basename(uri.path)
with_tempfile(filename, full_path) do |temp_file|
size = 0
progress = 0
Net::FTP.open(uri.host, uri.user, uri.password) do |ftp|
ftp.passive = true
ftp.login
remote_dir = File.dirname(uri.path)
ftp.chdir(remote_dir) unless remote_dir == '.'
total = ftp.size(filename)
ftp.getbinaryfile(filename, temp_file.path, 8192) do |chunk|
# Ruby 1.8.7 already wrote the chunk into the file
unless RUBY_VERSION < "1.9"
temp_file << chunk
end
size += chunk.size
new_progress = (size * 100) / total
unless new_progress == progress
message "\rDownloading %s (%3d%%) " % [filename, new_progress]
end
progress = new_progress
end
end
output
end
end
def with_tempfile(filename, full_path)
temp_file = Tempfile.new("download-#{filename}")
temp_file.binmode
yield temp_file
temp_file.close
File.unlink full_path if File.exists?(full_path)
FileUtils.mkdir_p File.dirname(full_path)
FileUtils.mv temp_file.path, full_path, :force => true
end
def gcc_cmd
cc = ENV["CC"] || RbConfig::CONFIG["CC"] || "gcc"
return cc.dup
end
def make_cmd
m = ENV['MAKE'] || ENV['make'] || 'make'
return m.dup
end
end
Do not clobber the previously written content of a log file.
In the "patch" task execute() is called many times, so log should be
written in append mode.
Based on the work of Akinori MUSHA<knu@idaemons.org> in
https://github.com/flavorjones/mini_portile/pull/34
require 'rbconfig'
require 'net/http'
require 'net/https'
require 'net/ftp'
require 'fileutils'
require 'tempfile'
require 'digest/md5'
class MiniPortile
attr_reader :name, :version, :original_host
attr_writer :configure_options
attr_accessor :host, :files, :patch_files, :target, :logger
def initialize(name, version)
@name = name
@version = version
@target = 'ports'
@files = []
@patch_files = []
@log_files = {}
@logger = STDOUT
@original_host = @host = detect_host
end
def download
@files.each do |url|
filename = File.basename(url)
download_file(url, File.join(archives_path, filename))
end
end
def extract
@files.each do |url|
filename = File.basename(url)
extract_file(File.join(archives_path, filename), tmp_path)
end
end
def patch
# Set GIT_DIR while appying patches to work around
# git-apply doing nothing when started within another
# git directory.
ENV['GIT_DIR'], old_git = '.', ENV['GIT_DIR']
begin
@patch_files.each do |full_path|
next unless File.exists?(full_path)
output "Running git apply with #{full_path}... "
execute('patch', %w(git apply) + [full_path], :initial_message => false)
end
ensure
ENV['GIT_DIR'] = old_git
end
end
def configure_options
@configure_options ||= configure_defaults
end
def configure
return if configured?
md5_file = File.join(tmp_path, 'configure.md5')
digest = Digest::MD5.hexdigest(computed_options.to_s)
File.open(md5_file, "w") { |f| f.write digest }
execute('configure', %w(sh configure) + computed_options)
end
def compile
execute('compile', make_cmd)
end
def install
return if installed?
execute('install', %Q(#{make_cmd} install))
end
def downloaded?
missing = @files.detect do |url|
filename = File.basename(url)
!File.exist?(File.join(archives_path, filename))
end
missing ? false : true
end
def configured?
configure = File.join(work_path, 'configure')
makefile = File.join(work_path, 'Makefile')
md5_file = File.join(tmp_path, 'configure.md5')
stored_md5 = File.exist?(md5_file) ? File.read(md5_file) : ""
current_md5 = Digest::MD5.hexdigest(computed_options.to_s)
(current_md5 == stored_md5) && newer?(makefile, configure)
end
def installed?
makefile = File.join(work_path, 'Makefile')
target_dir = Dir.glob("#{port_path}/*").find { |d| File.directory?(d) }
newer?(target_dir, makefile)
end
def cook
download unless downloaded?
extract
patch
configure unless configured?
compile
install unless installed?
return true
end
def activate
lib_path = File.join(port_path, "lib")
vars = {
'PATH' => File.join(port_path, 'bin'),
'CPATH' => File.join(port_path, 'include'),
'LIBRARY_PATH' => lib_path
}.reject { |env, path| !File.directory?(path) }
output "Activating #{@name} #{@version} (from #{port_path})..."
vars.each do |var, path|
full_path = File.expand_path(path)
# turn into a valid Windows path (if required)
full_path.gsub!(File::SEPARATOR, File::ALT_SEPARATOR) if File::ALT_SEPARATOR
# save current variable value
old_value = ENV[var] || ''
unless old_value.include?(full_path)
ENV[var] = "#{full_path}#{File::PATH_SEPARATOR}#{old_value}"
end
end
# rely on LDFLAGS when cross-compiling
if File.exist?(lib_path) && (@host != @original_host)
full_path = File.expand_path(lib_path)
old_value = ENV.fetch("LDFLAGS", "")
unless old_value.include?(full_path)
ENV["LDFLAGS"] = "-L#{full_path} #{old_value}".strip
end
end
end
def path
File.expand_path(port_path)
end
private
def tmp_path
"tmp/#{@host}/ports/#{@name}/#{@version}"
end
def port_path
"#{@target}/#{@host}/#{@name}/#{@version}"
end
def archives_path
"#{@target}/archives"
end
def work_path
Dir.glob("#{tmp_path}/*").find { |d| File.directory?(d) }
end
def configure_defaults
[
"--host=#{@host}", # build for specific target (host)
"--enable-static", # build static library
"--disable-shared" # disable generation of shared object
]
end
def configure_prefix
"--prefix=#{File.expand_path(port_path)}"
end
def computed_options
[
configure_options, # customized or default options
configure_prefix, # installation target
].flatten
end
def log_file(action)
@log_files[action] ||=
File.expand_path("#{action}.log", tmp_path).tap { |file|
File.unlink(file) if File.exist?(file)
}
end
def tar_exe
@@tar_exe ||= begin
%w[gtar bsdtar tar basic-bsdtar].find { |c|
which(c)
}
end
end
def tar_compression_switch(filename)
case File.extname(filename)
when '.gz', '.tgz'
'z'
when '.bz2', '.tbz2'
'j'
when '.Z'
'Z'
else
''
end
end
# From: http://stackoverflow.com/a/5471032/7672
# Thanks, Mislav!
#
# Cross-platform way of finding an executable in the $PATH.
#
# which('ruby') #=> /usr/bin/ruby
def which(cmd)
exts = ENV['PATHEXT'] ? ENV['PATHEXT'].split(';') : ['']
ENV['PATH'].split(File::PATH_SEPARATOR).each do |path|
exts.each { |ext|
exe = File.join(path, "#{cmd}#{ext}")
return exe if File.executable? exe
}
end
return nil
end
def detect_host
return @detect_host if defined?(@detect_host)
begin
ENV["LC_ALL"], old_lc_all = "C", ENV["LC_ALL"]
output = `#{gcc_cmd} -v 2>&1`
if m = output.match(/^Target\: (.*)$/)
@detect_host = m[1]
end
@detect_host
ensure
ENV["LC_ALL"] = old_lc_all
end
end
def extract_file(file, target)
filename = File.basename(file)
FileUtils.mkdir_p target
message "Extracting #{filename} into #{target}... "
result = if RUBY_VERSION < "1.9"
`#{tar_exe} #{tar_compression_switch(filename)}xf "#{file}" -C "#{target}" 2>&1`
else
IO.popen([tar_exe,
"#{tar_compression_switch(filename)}xf", file,
"-C", target,
{:err=>[:child, :out]}], &:read)
end
if $?.success?
output "OK"
else
output "ERROR"
output result
raise "Failed to complete extract task"
end
end
def execute(action, command, options={})
log_out = log_file(action)
Dir.chdir work_path do
if options.fetch(:initial_message){ true }
message "Running '#{action}' for #{@name} #{@version}... "
end
if Process.respond_to?(:spawn)
args = [command].flatten + [{[:out, :err]=>[log_out, "a"]}]
pid = spawn(*args)
Process.wait(pid)
else
# Ruby-1.8 compatibility:
if command.kind_of?(Array)
system(*command)
else
redirected = "#{command} >#{log_out} 2>&1"
system(redirected)
end
end
if $?.success?
output "OK"
return true
else
output "ERROR, review '#{log_out}' to see what happened."
raise "Failed to complete #{action} task"
end
end
end
def newer?(target, checkpoint)
if (target && File.exist?(target)) && (checkpoint && File.exist?(checkpoint))
File.mtime(target) > File.mtime(checkpoint)
else
false
end
end
# print out a message with the logger
def message(text)
@logger.print text
@logger.flush
end
# print out a message using the logger but return to a new line
def output(text = "")
@logger.puts text
@logger.flush
end
# Slighly modified from RubyInstaller uri_ext, Rubinius configure
# and adaptations of Wayne's RailsInstaller
def download_file(url, full_path, count = 3)
return if File.exist?(full_path)
uri = URI.parse(url)
begin
case uri.scheme.downcase
when /ftp/
download_file_ftp(uri, full_path)
when /http|https/
download_file_http(url, full_path, count)
end
rescue Exception => e
File.unlink full_path if File.exists?(full_path)
output "ERROR: #{e.message}"
raise "Failed to complete download task"
end
end
def download_file_http(url, full_path, count = 3)
filename = File.basename(full_path)
uri = URI.parse(url)
if ENV['http_proxy']
_, userinfo, p_host, p_port = URI.split(ENV['http_proxy'])
proxy_user, proxy_pass = userinfo.split(/:/) if userinfo
http = Net::HTTP.new(uri.host, uri.port, p_host, p_port, proxy_user, proxy_pass)
else
http = Net::HTTP.new(uri.host, uri.port)
if URI::HTTPS === uri
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
store = OpenSSL::X509::Store.new
# Auto-include system-provided certificates
store.set_default_paths
if ENV.has_key?("SSL_CERT_FILE") && File.exist?(ENV["SSL_CERT_FILE"])
store.add_file ENV["SSL_CERT_FILE"]
end
http.cert_store = store
end
end
message "Downloading #{filename} "
http.start do |h|
h.request_get(uri.path, 'Accept-Encoding' => 'identity') do |response|
case response
when Net::HTTPNotFound
output "404 - Not Found"
return false
when Net::HTTPClientError
output "Error: Client Error: #{response.inspect}"
return false
when Net::HTTPRedirection
raise "Too many redirections for the original URL, halting." if count <= 0
url = response["location"]
return download_file(url, full_path, count - 1)
when Net::HTTPOK
return with_tempfile(filename, full_path) do |temp_file|
size = 0
progress = 0
total = response.header["Content-Length"].to_i
response.read_body do |chunk|
temp_file << chunk
size += chunk.size
new_progress = (size * 100) / total
unless new_progress == progress
message "\rDownloading %s (%3d%%) " % [filename, new_progress]
end
progress = new_progress
end
output
end
end
end
end
end
def download_file_ftp(uri, full_path)
filename = File.basename(uri.path)
with_tempfile(filename, full_path) do |temp_file|
size = 0
progress = 0
Net::FTP.open(uri.host, uri.user, uri.password) do |ftp|
ftp.passive = true
ftp.login
remote_dir = File.dirname(uri.path)
ftp.chdir(remote_dir) unless remote_dir == '.'
total = ftp.size(filename)
ftp.getbinaryfile(filename, temp_file.path, 8192) do |chunk|
# Ruby 1.8.7 already wrote the chunk into the file
unless RUBY_VERSION < "1.9"
temp_file << chunk
end
size += chunk.size
new_progress = (size * 100) / total
unless new_progress == progress
message "\rDownloading %s (%3d%%) " % [filename, new_progress]
end
progress = new_progress
end
end
output
end
end
def with_tempfile(filename, full_path)
temp_file = Tempfile.new("download-#{filename}")
temp_file.binmode
yield temp_file
temp_file.close
File.unlink full_path if File.exists?(full_path)
FileUtils.mkdir_p File.dirname(full_path)
FileUtils.mv temp_file.path, full_path, :force => true
end
def gcc_cmd
cc = ENV["CC"] || RbConfig::CONFIG["CC"] || "gcc"
return cc.dup
end
def make_cmd
m = ENV['MAKE'] || ENV['make'] || 'make'
return m.dup
end
end
|
require 'faraday'
module FaradayMiddleware
# Middleware to automatically decompress response bodies. If the
# "Accept-Encoding" header wasn't set in the request, this sets it to
# "gzip,deflate" and appropriately handles the compressed response from the
# server. This resembles what Ruby 1.9+ does internally in Net::HTTP#get.
#
# This middleware is NOT necessary when these adapters are used:
# - net_http on Ruby 1.9+
# - net_http_persistent on Ruby 2.0+
# - em_http
class Gzip < Faraday::Middleware
dependency 'zlib'
ACCEPT_ENCODING = 'Accept-Encoding'.freeze
CONTENT_ENCODING = 'Content-Encoding'.freeze
CONTENT_LENGTH = 'Content-Length'.freeze
SUPPORTED_ENCODINGS = 'gzip,deflate'.freeze
RUBY_ENCODING = '1.9'.respond_to?(:force_encoding)
def call(env)
env[:request_headers][ACCEPT_ENCODING] ||= SUPPORTED_ENCODINGS
@app.call(env).on_complete do |response_env|
case response_env[:response_headers][CONTENT_ENCODING]
when 'gzip'
reset_body(response_env, &method(:uncompress_gzip))
when 'deflate'
reset_body(response_env, &method(:inflate))
end
end
end
def reset_body(env)
env[:body] = yield(env[:body])
env[:response_headers].delete(CONTENT_ENCODING)
env[:response_headers][CONTENT_LENGTH] = env[:body].length
end
def uncompress_gzip(body)
io = StringIO.new(body)
gzip_reader = if RUBY_ENCODING
Zlib::GzipReader.new(io, :encoding => 'ASCII-8BIT')
else
Zlib::GzipReader.new(io)
end
gzip_reader.read
end
def inflate(body)
# Inflate as a DEFLATE (RFC 1950+RFC 1951) stream
Zlib::Inflate.inflate(body)
rescue Zlib::DataError
begin
# Fall back to inflating as a "raw" deflate stream which
# Microsoft servers return
inflate = Zlib::Inflate.new(-Zlib::MAX_WBITS)
inflate.inflate(body)
ensure
inflate.close
end
end
end
end
Pull the initialization out of the begin block
require 'faraday'
module FaradayMiddleware
# Middleware to automatically decompress response bodies. If the
# "Accept-Encoding" header wasn't set in the request, this sets it to
# "gzip,deflate" and appropriately handles the compressed response from the
# server. This resembles what Ruby 1.9+ does internally in Net::HTTP#get.
#
# This middleware is NOT necessary when these adapters are used:
# - net_http on Ruby 1.9+
# - net_http_persistent on Ruby 2.0+
# - em_http
class Gzip < Faraday::Middleware
dependency 'zlib'
ACCEPT_ENCODING = 'Accept-Encoding'.freeze
CONTENT_ENCODING = 'Content-Encoding'.freeze
CONTENT_LENGTH = 'Content-Length'.freeze
SUPPORTED_ENCODINGS = 'gzip,deflate'.freeze
RUBY_ENCODING = '1.9'.respond_to?(:force_encoding)
def call(env)
env[:request_headers][ACCEPT_ENCODING] ||= SUPPORTED_ENCODINGS
@app.call(env).on_complete do |response_env|
case response_env[:response_headers][CONTENT_ENCODING]
when 'gzip'
reset_body(response_env, &method(:uncompress_gzip))
when 'deflate'
reset_body(response_env, &method(:inflate))
end
end
end
def reset_body(env)
env[:body] = yield(env[:body])
env[:response_headers].delete(CONTENT_ENCODING)
env[:response_headers][CONTENT_LENGTH] = env[:body].length
end
def uncompress_gzip(body)
io = StringIO.new(body)
gzip_reader = if RUBY_ENCODING
Zlib::GzipReader.new(io, :encoding => 'ASCII-8BIT')
else
Zlib::GzipReader.new(io)
end
gzip_reader.read
end
def inflate(body)
# Inflate as a DEFLATE (RFC 1950+RFC 1951) stream
Zlib::Inflate.inflate(body)
rescue Zlib::DataError
# Fall back to inflating as a "raw" deflate stream which
# Microsoft servers return
inflate = Zlib::Inflate.new(-Zlib::MAX_WBITS)
begin
inflate.inflate(body)
ensure
inflate.close
end
end
end
end
|
require 'net/https'
require 'addressable'
module Faria
module Launchpad
class Service
LAUNCHPAD_NAME = "Launchpad"
def self.noauth(endpoint, quiet: false)
unless quiet
puts "************************************************************************\n" \
"\007\007\007NOTICE: noauth is only intended as a somewhat easy way to call `ping`\n" \
"and `pubkey`. Nothing else is going to work since keys are required for\n" \
"general API usage.\n" \
"************************************************************************\n"
sleep 2
end
new(endpoint, keys: { local: nil, remote: nil }, source: {name: "No one"})
end
DEFAULTS = {
expires_in: 60 # 1 minute
}
def initialize(endpoint, options = {})
@endpoint = endpoint
@my_key = options[:keys][:local]
@remote_key = options[:keys][:remote]
@source = options[:source]
@app_name = options[:source][:name]
@options = DEFAULTS.merge(options)
end
# utils
def ping
get_without_auth "ping"
end
def pubkey
resp = raw_get_without_auth("pubkey")
return resp.body if resp.code == '200'
end
# utils requiring auth
def info
get "info"
end
def echo(params={})
put "echo", params
end
# sessions
def retrieve_session(session_id, params = {})
get "authentication_sessions/#{session_id}", params
end
# data is intended to be JSON encoded data if passed
def approve_session(session_id, data = {})
params = data.empty? ? {} : { data: data }
post "authentication_sessions/#{session_id}/approve", params
end
# data is intended to be JSON encoded data if passed
def decline_session(session_id, data = {})
params = data.empty? ? {} : { data: data }
post "authentication_sessions/#{session_id}/decline", params
end
# identities
def show_identity(uuid)
get "identities/#{uuid}"
end
def update_identity(identity_representation, uuid)
patch "identities/#{uuid}", identity: identity_representation
end
# by_value allows the unique pairing value to be used to perform
# queries or updates instead of Launchpad's internal UUID
def show_identity_by_pairing_value(pairing_value)
get "identities/by_pairing_value/#{pairing_value}"
end
def update_identity_by_pairing_value(identity_representation, pairing_value)
patch "identities/by_pairing_value/#{pairing_value}", identity: identity_representation
end
# final provisioning step (server side)
def provision(params = {})
raise "you need an :approval_code" if params[:approval_code].blank?
raise "you need an :identity" if params[:identity].blank?
post("pairing/provision", params)
end
# direct methods (for undocumented api?)
def post(url, params = {})
resp = raw_request(:post, url, params)
parse_response(resp)
end
def get(url, params = {})
resp = raw_request(:get, url, params)
parse_response(resp)
end
def put(url, params = {})
resp = raw_request(:put, url, params)
parse_response(resp)
end
def patch(url, params = {})
resp = raw_request(:patch, url, params)
parse_response(resp)
end
def parse_response(resp)
hash = JSON.parse(resp.body)
# be railsy if we can
hash = hash.with_indifferent_access if hash.respond_to?(:with_indifferent_access)
hash
rescue JSON::ParserError
raise JSON::ParserError, resp.body
end
# lower-level HTTP code
def get_without_auth(url, params={})
parse_response raw_get_without_auth(url, params)
end
def raw_get_without_auth(url, params={})
uri = full_url(url)
Net::HTTP.get_response(URI(uri))
end
def raw_request(verb, url, params = {})
uri = full_url(url)
a = Addressable::URI.parse(uri)
Net::HTTP.start(a.host, a.inferred_port) do |http|
http.use_ssl = a.scheme == 'https'
# http.verify_mode = OpenSSL::SSL::VERIFY_NONE
request = verb_to_http_class(verb).new a.request_uri
payload = encrypt_payload(params, a)
if verb == :get
request['Faria-JWE'] = payload
else
request['Content-Type'] = "application/jwe"
request.body = payload
end
http.request request
end
end
# url helpers
def pairing_request_url
rooted_url "third/pairing/request"
end
def pairing_complete_url
rooted_url "third/pairing/complete"
end
private
VALID_VERBS = %w(get put patch post get delete)
# can't guarantee we have Rails or AS so we use eval vs
# constantize/classify, etc
def verb_to_http_class(verb)
raise "#{verb} is not a valid HTTP verb." unless VALID_VERBS.include?(verb.to_s)
Net::HTTP.const_get(verb.to_s.capitalize)
end
def encrypt_payload(params, address)
Faria::Launchpad::Packet.encrypt(
params,
{
api_url: address.normalize.to_s,
source: @source,
expires_in: @options[:expires_in]
},
remote_key: @remote_key,
local_key: @my_key
)
end
def rooted_url(url)
File.join(base_url(@endpoint), url)
end
def base_url(url)
url.gsub(%r{/api/v[^/]+/$},"")
end
def full_url(url)
File.join(@endpoint, url)
end
end
end
end
you must set use_ssl outside `start`
require 'net/https'
require 'addressable'
module Faria
module Launchpad
class Service
LAUNCHPAD_NAME = "Launchpad"
def self.noauth(endpoint, quiet: false)
unless quiet
puts "************************************************************************\n" \
"\007\007\007NOTICE: noauth is only intended as a somewhat easy way to call `ping`\n" \
"and `pubkey`. Nothing else is going to work since keys are required for\n" \
"general API usage.\n" \
"************************************************************************\n"
sleep 2
end
new(endpoint, keys: { local: nil, remote: nil }, source: {name: "No one"})
end
DEFAULTS = {
expires_in: 60 # 1 minute
}
def initialize(endpoint, options = {})
@endpoint = endpoint
@my_key = options[:keys][:local]
@remote_key = options[:keys][:remote]
@source = options[:source]
@app_name = options[:source][:name]
@options = DEFAULTS.merge(options)
end
# utils
def ping
get_without_auth "ping"
end
def pubkey
resp = raw_get_without_auth("pubkey")
return resp.body if resp.code == '200'
end
# utils requiring auth
def info
get "info"
end
def echo(params={})
put "echo", params
end
# sessions
def retrieve_session(session_id, params = {})
get "authentication_sessions/#{session_id}", params
end
# data is intended to be JSON encoded data if passed
def approve_session(session_id, data = {})
params = data.empty? ? {} : { data: data }
post "authentication_sessions/#{session_id}/approve", params
end
# data is intended to be JSON encoded data if passed
def decline_session(session_id, data = {})
params = data.empty? ? {} : { data: data }
post "authentication_sessions/#{session_id}/decline", params
end
# identities
def show_identity(uuid)
get "identities/#{uuid}"
end
def update_identity(identity_representation, uuid)
patch "identities/#{uuid}", identity: identity_representation
end
# by_value allows the unique pairing value to be used to perform
# queries or updates instead of Launchpad's internal UUID
def show_identity_by_pairing_value(pairing_value)
get "identities/by_pairing_value/#{pairing_value}"
end
def update_identity_by_pairing_value(identity_representation, pairing_value)
patch "identities/by_pairing_value/#{pairing_value}", identity: identity_representation
end
# final provisioning step (server side)
def provision(params = {})
raise "you need an :approval_code" if params[:approval_code].blank?
raise "you need an :identity" if params[:identity].blank?
post("pairing/provision", params)
end
# direct methods (for undocumented api?)
def post(url, params = {})
resp = raw_request(:post, url, params)
parse_response(resp)
end
def get(url, params = {})
resp = raw_request(:get, url, params)
parse_response(resp)
end
def put(url, params = {})
resp = raw_request(:put, url, params)
parse_response(resp)
end
def patch(url, params = {})
resp = raw_request(:patch, url, params)
parse_response(resp)
end
def parse_response(resp)
hash = JSON.parse(resp.body)
# be railsy if we can
hash = hash.with_indifferent_access if hash.respond_to?(:with_indifferent_access)
hash
rescue JSON::ParserError
raise JSON::ParserError, resp.body
end
# lower-level HTTP code
def get_without_auth(url, params={})
parse_response raw_get_without_auth(url, params)
end
def raw_get_without_auth(url, params={})
uri = full_url(url)
Net::HTTP.get_response(URI(uri))
end
def raw_request(verb, url, params = {})
uri = full_url(url)
a = Addressable::URI.parse(uri)
http = Net::HTTP.new(a.host, a.inferred_port)
http.use_ssl = a.scheme == 'https'
# http.verify_mode = OpenSSL::SSL::VERIFY_NONE
http.start do |http|
request = verb_to_http_class(verb).new a.request_uri
payload = encrypt_payload(params, a)
if verb == :get
request['Faria-JWE'] = payload
else
request['Content-Type'] = "application/jwe"
request.body = payload
end
http.request request
end
end
# url helpers
def pairing_request_url
rooted_url "third/pairing/request"
end
def pairing_complete_url
rooted_url "third/pairing/complete"
end
private
VALID_VERBS = %w(get put patch post get delete)
# can't guarantee we have Rails or AS so we use eval vs
# constantize/classify, etc
def verb_to_http_class(verb)
raise "#{verb} is not a valid HTTP verb." unless VALID_VERBS.include?(verb.to_s)
Net::HTTP.const_get(verb.to_s.capitalize)
end
def encrypt_payload(params, address)
Faria::Launchpad::Packet.encrypt(
params,
{
api_url: address.normalize.to_s,
source: @source,
expires_in: @options[:expires_in]
},
remote_key: @remote_key,
local_key: @my_key
)
end
def rooted_url(url)
File.join(base_url(@endpoint), url)
end
def base_url(url)
url.gsub(%r{/api/v[^/]+/$},"")
end
def full_url(url)
File.join(@endpoint, url)
end
end
end
end
|
# TODO: Workaround, since hockeyapp.rb from shenzhen includes the code for commander
def command(_param)
end
module Fastlane
module Actions
module SharedValues
HOCKEY_DOWNLOAD_LINK = :HOCKEY_DOWNLOAD_LINK
HOCKEY_BUILD_INFORMATION = :HOCKEY_BUILD_INFORMATION # contains all keys/values from the HockeyApp API, like :title, :bundle_identifier
end
class HockeyAction < Action
def self.run(options)
# Available options: http://support.hockeyapp.net/kb/api/api-versions#upload-version
require 'shenzhen'
require 'shenzhen/plugins/hockeyapp'
if options[:dsym]
dsym_filename = options[:dsym]
else
dsym_path = options[:ipa].gsub('ipa', 'app.dSYM.zip')
if File.exist?(dsym_path)
dsym_filename = dsym_path
else
Helper.log.info "Symbols not found on path #{File.expand_path(dsym_path)}. Crashes won't be symbolicated properly".yellow
end
end
raise "Symbols on path '#{File.expand_path(dsym_filename)}' not found".red if (dsym_filename && !File.exist?(dsym_filename))
Helper.log.info 'Starting with ipa upload to HockeyApp... this could take some time.'.green
client = Shenzhen::Plugins::HockeyApp::Client.new(options[:api_token])
values = options.values
values[:dsym_filename] = dsym_path
values[:notes_type] = options[:notes_type]
return values if Helper.test?
response = client.upload_build(options[:ipa], values)
case response.status
when 200...300
url = response.body['public_url']
Actions.lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] = url
Actions.lane_context[SharedValues::HOCKEY_BUILD_INFORMATION] = response.body
Helper.log.info "Public Download URL: #{url}" if url
Helper.log.info 'Build successfully uploaded to HockeyApp!'.green
else
Helper.log.fatal "Error uploading to HockeyApp: #{response.body}"
raise 'Error when trying to upload ipa to HockeyApp'.red
end
end
def self.description
"Upload a new build to HockeyApp"
end
def self.available_options
[
FastlaneCore::ConfigItem.new(key: :api_token,
env_name: "FL_HOCKEY_API_TOKEN",
description: "API Token for Hockey Access",
verify_block: Proc.new do |value|
raise "No API token for Hockey given, pass using `api_token: 'token'`".red unless (value and not value.empty?)
end),
FastlaneCore::ConfigItem.new(key: :ipa,
env_name: "FL_HOCKEY_IPA",
description: "Path to your IPA file. Optional if you use the `ipa` or `xcodebuild` action",
default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH],
verify_block: Proc.new do |value|
raise "Couldn't find ipa file at path '#{value}'".red unless File.exists?(value)
end),
FastlaneCore::ConfigItem.new(key: :dsym,
env_name: "FL_HOCKEY_DSYM",
description: "Path to your DSYM file",
optional: true,
verify_block: Proc.new do |value|
# validation is done in the action
end),
FastlaneCore::ConfigItem.new(key: :notes,
env_name: "FL_HOCKEY_NOTES",
description: "Beta Notes",
default_value: "No changelog given"),
FastlaneCore::ConfigItem.new(key: :notify,
env_name: "FL_HOCKEY_NOTIFY",
description: "Notify testers? 1 for yes",
default_value: "1"),
FastlaneCore::ConfigItem.new(key: :status,
env_name: "FL_HOCKEY_STATUS",
description: "Download status: 1 = No user can download; 2 = Available for download",
default_value: "2"),
FastlaneCore::ConfigItem.new(key: :notes_type,
env_name: "FL_HOCKEY_NOTES_TYPE",
description: "Notes type for your :notes, 0 = Textile, 1 = Markdown (default)",
default_value: "1")
]
end
def self.output
[
['HOCKEY_DOWNLOAD_LINK', 'The newly generated download link for this build'],
['HOCKEY_BUILD_INFORMATION', 'contains all keys/values from the HockeyApp API, like :title, :bundle_identifier']
]
end
def self.author
"KrauseFx"
end
def self.is_supported?(platform)
platform == :ios
end
end
end
end
Add release_type as ConfigItem for hockey action
This is useful when you need to support uploading to an app on
HockeyApp with different release types. Because they usually have the
same bundle identifier, hockey doesn’t know which app to upload the new
version to, and currently defaults to the Beta app.
# TODO: Workaround, since hockeyapp.rb from shenzhen includes the code for commander
def command(_param)
end
module Fastlane
module Actions
module SharedValues
HOCKEY_DOWNLOAD_LINK = :HOCKEY_DOWNLOAD_LINK
HOCKEY_BUILD_INFORMATION = :HOCKEY_BUILD_INFORMATION # contains all keys/values from the HockeyApp API, like :title, :bundle_identifier
end
class HockeyAction < Action
def self.run(options)
# Available options: http://support.hockeyapp.net/kb/api/api-versions#upload-version
require 'shenzhen'
require 'shenzhen/plugins/hockeyapp'
if options[:dsym]
dsym_filename = options[:dsym]
else
dsym_path = options[:ipa].gsub('ipa', 'app.dSYM.zip')
if File.exist?(dsym_path)
dsym_filename = dsym_path
else
Helper.log.info "Symbols not found on path #{File.expand_path(dsym_path)}. Crashes won't be symbolicated properly".yellow
end
end
raise "Symbols on path '#{File.expand_path(dsym_filename)}' not found".red if (dsym_filename && !File.exist?(dsym_filename))
Helper.log.info 'Starting with ipa upload to HockeyApp... this could take some time.'.green
client = Shenzhen::Plugins::HockeyApp::Client.new(options[:api_token])
values = options.values
values[:dsym_filename] = dsym_path
values[:notes_type] = options[:notes_type]
return values if Helper.test?
response = client.upload_build(options[:ipa], values)
case response.status
when 200...300
url = response.body['public_url']
Actions.lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] = url
Actions.lane_context[SharedValues::HOCKEY_BUILD_INFORMATION] = response.body
Helper.log.info "Public Download URL: #{url}" if url
Helper.log.info 'Build successfully uploaded to HockeyApp!'.green
else
Helper.log.fatal "Error uploading to HockeyApp: #{response.body}"
raise 'Error when trying to upload ipa to HockeyApp'.red
end
end
def self.description
"Upload a new build to HockeyApp"
end
def self.available_options
[
FastlaneCore::ConfigItem.new(key: :api_token,
env_name: "FL_HOCKEY_API_TOKEN",
description: "API Token for Hockey Access",
verify_block: Proc.new do |value|
raise "No API token for Hockey given, pass using `api_token: 'token'`".red unless (value and not value.empty?)
end),
FastlaneCore::ConfigItem.new(key: :ipa,
env_name: "FL_HOCKEY_IPA",
description: "Path to your IPA file. Optional if you use the `ipa` or `xcodebuild` action",
default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH],
verify_block: Proc.new do |value|
raise "Couldn't find ipa file at path '#{value}'".red unless File.exists?(value)
end),
FastlaneCore::ConfigItem.new(key: :dsym,
env_name: "FL_HOCKEY_DSYM",
description: "Path to your DSYM file",
optional: true,
verify_block: Proc.new do |value|
# validation is done in the action
end),
FastlaneCore::ConfigItem.new(key: :notes,
env_name: "FL_HOCKEY_NOTES",
description: "Beta Notes",
default_value: "No changelog given"),
FastlaneCore::ConfigItem.new(key: :notify,
env_name: "FL_HOCKEY_NOTIFY",
description: "Notify testers? 1 for yes",
default_value: "1"),
FastlaneCore::ConfigItem.new(key: :status,
env_name: "FL_HOCKEY_STATUS",
description: "Download status: 1 = No user can download; 2 = Available for download",
default_value: "2"),
FastlaneCore::ConfigItem.new(key: :notes_type,
env_name: "FL_HOCKEY_NOTES_TYPE",
description: "Notes type for your :notes, 0 = Textile, 1 = Markdown (default)",
default_value: "1"),
FastlaneCore::ConfigItem.new(key: :release_type,
env_name: "FL_HOCKEY_RELEASE_TYPE",
description: "Release type of the app",
default_value: "0")
]
end
def self.output
[
['HOCKEY_DOWNLOAD_LINK', 'The newly generated download link for this build'],
['HOCKEY_BUILD_INFORMATION', 'contains all keys/values from the HockeyApp API, like :title, :bundle_identifier']
]
end
def self.author
"KrauseFx"
end
def self.is_supported?(platform)
platform == :ios
end
end
end
end
|
require 'model-api/api_context'
module ModelApi
module BaseController
module ClassMethods
def model_class
nil
end
def base_api_options
{}
end
def base_admin_api_options
base_api_options.merge(admin_only: true)
end
end
class << self
def included(base)
base.extend(ClassMethods)
base.send(:include, InstanceMethods)
base.send(:before_filter, :common_headers)
base.send(:rescue_from, Exception, with: :unhandled_exception)
base.send(:respond_to, :json, :xml)
end
end
module InstanceMethods
SIMPLE_ID_REGEX = /\A[0-9]+\Z/
UUID_REGEX = /\A[0-9A-Za-z]{8}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]\
{12}\Z/x
DEFAULT_PAGE_SIZE = 100
protected
def model_class
self.class.model_class
end
def api_context
@api_context ||= ModelApi::ApiContext.new(self)
end
def render_collection(collection, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
opts[:operation] ||= :index
return unless api_context.validate_read_operation(collection, opts[:operation], opts)
coll_route = opts[:collection_route] || self
collection_links = { self: coll_route }
collection = ModelApi::Utils.process_collection_includes(collection,
opts.merge(model_metadata: opts[:api_model_metadata] || opts[:model_metadata]))
collection, _result_filters = api_context.filter_collection(collection, find_filter_params,
opts)
collection, _result_sorts = api_context.sort_collection(collection, find_sort_params, opts)
collection, collection_links, opts = paginate_collection(collection, collection_links, opts,
coll_route)
opts[:collection_links] = collection_links.merge(opts[:collection_links] || {})
.reverse_merge(common_response_links(opts))
add_collection_object_route(opts)
ModelApi::Renderer.render(self, collection, opts)
end
def render_object(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
klass = ModelApi::Utils.find_class(obj, opts)
object_route = opts[:object_route] || self
opts[:object_links] = { self: object_route }
if obj.is_a?(ActiveRecord::Base)
return unless api_context.validate_read_operation(obj, opts[:operation], opts)
unless obj.present?
return not_found(opts.merge(class: klass, field: :id))
end
opts[:object_links].merge!(opts[:object_links] || {})
else
return not_found(opts) if obj.nil?
obj = ModelApi::Utils.ext_value(obj, opts) unless opts[:raw_output]
opts[:object_links].merge!(opts[:links] || {})
end
opts[:operation] ||= :show
opts[:object_links].reverse_merge!(common_response_links(opts))
ModelApi::Renderer.render(self, obj, opts)
end
def do_create(opts = {})
klass = opts[:model_class] || model_class
return unless ensure_admin_if_admin_only(opts)
unless klass.is_a?(Class) && klass < ActiveRecord::Base
fail 'Unable to process object creation; Missing or invalid model class'
end
obj, opts = prepare_object_for_create(klass, opts)
return bad_payload(class: klass) if opts[:bad_payload]
create_and_render_object(obj, opts)
end
def prepare_object_for_create(klass, opts = {})
opts = api_context.prepare_options(opts)
req_body, format = parse_request_body
add_hateoas_links_for_update(opts)
api_context.get_updated_object(klass, get_operation(:create, opts), req_body,
opts.merge(format: format))
end
def create_and_render_object(obj, opts = {})
opts = api_context.prepare_options(opts)
object_link_options = opts[:object_link_options]
object_link_options[:action] = :show
save_and_render_object(obj, get_operation(:create, opts), opts.merge(location_header: true))
end
def do_update(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
obj, opts = prepare_object_for_update(obj, opts)
return bad_payload(class: klass) if opts[:bad_payload]
unless obj.present?
return not_found(opts.merge(class: ModelApi::Utils.find_class(obj, opts), field: :id))
end
update_and_render_object(obj, opts)
end
def prepare_object_for_update(obj, opts = {})
opts = api_context.prepare_options(opts)
req_body, format = parse_request_body
add_hateoas_links_for_update(opts)
api_context.get_updated_object(obj, get_operation(:update, opts), req_body,
opts.merge(format: format))
end
def update_and_render_object(obj, opts = {})
opts = api_context.prepare_options(opts)
save_and_render_object(obj, get_operation(:update, opts), opts)
end
def save_and_render_object(obj, operation, opts = {})
opts = api_context.prepare_options(opts)
status, msgs = Utils.process_updated_model_save(obj, operation, opts)
add_hateoas_links_for_updated_object(opts)
successful = ModelApi::Utils.response_successful?(status)
ModelApi::Renderer.render(self, successful ? obj : opts[:request_obj],
opts.merge(status: status, operation: :show, messages: msgs))
end
def do_destroy(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
obj = obj.first if obj.is_a?(ActiveRecord::Relation)
add_hateoas_links_for_update(opts)
unless obj.present?
return not_found(opts.merge(class: klass, field: :id))
end
operation = opts[:operation] = get_operation(:destroy, opts)
ModelApi::Utils.validate_operation(obj, operation,
opts.merge(model_metadata: opts[:api_model_metadata] || opts[:model_metadata]))
response_status, errs_or_msgs = Utils.process_object_destroy(obj, operation, opts)
add_hateoas_links_for_updated_object(opts)
klass = ModelApi::Utils.find_class(obj, opts)
ModelApi::Renderer.render(self, obj, opts.merge(status: response_status,
root: ModelApi::Utils.model_name(klass).singular, messages: errs_or_msgs))
end
def common_response_links(_opts = {})
{}
end
def initialize_options(opts)
return opts if opts[:options_initialized]
opts = opts.symbolize_keys
opts[:api_context] ||= @api_context
opts[:model_class] ||= model_class
opts[:user] ||= filter_by_user
opts[:user_id] ||= opts[:user].try(:id)
opts[:admin_user] ||= admin_user?(opts)
opts[:admin] ||= admin?(opts)
unless opts.include?(:collection_link_options) && opts.include?(:object_link_options)
default_link_options = request.params.to_h.symbolize_keys
opts[:collection_link_options] ||= default_link_options
opts[:object_link_options] ||= default_link_options
if default_link_options[:exclude_associations].present?
opts[:exclude_associations] ||= default_link_options[:exclude_associations]
end
end
opts[:options_initialized] ||= true
opts
end
# Default implementation, can be hidden by API controller classes to include any
# application-specific options
def prepare_options(opts)
return opts if opts[:options_initialized]
initialize_options(opts)
end
def id_info(opts = {})
id_info = {}
id_info[:id_attribute] = (opts[:id_attribute] || :id).to_sym
id_info[:id_param] = (opts[:id_param] || :id).to_sym
id_info[:id_value] = (opts[:id_value] || params[id_info[:id_param]]).to_s
id_info
end
def common_object_query(opts = {})
opts = api_context.prepare_options(opts)
id_info = opts[:id_info] || id_info(opts)
api_context.common_object_query(id_info[:id_attribute], id_info[:id_value],
opts.merge(id_param: id_info[:id_param]))
end
def collection_query(opts = {})
opts = api_context.prepare_options(base_api_options.merge(opts))
klass = opts[:model_class] || model_class
query = api_context.api_query(klass, opts)
unless (opts.include?(:user_filter) && !opts[:user_filter]) ||
(admin? || filtered_by_foreign_key?(query)) || !opts[:user]
query = api_context.user_query(query, opts[:user], opts.merge(model_class: klass))
end
query
end
def object_query(opts = {})
common_object_query(api_context.prepare_options(base_api_options.merge(opts)))
end
def base_api_options
self.class.base_api_options
end
def base_admin_api_options
base_api_options.merge(admin: true, admin_only: true)
end
def ensure_admin
return true if admin_user?
# Mask presence of endpoint if user is not authorized to access it
not_found
false
end
def unhandled_exception(err)
return if handle_api_exceptions(err)
return if performed?
error_details = {}
if Rails.env == 'development'
error_details[:message] = "Exception: #{err.message}"
error_details[:backtrace] = err.backtrace
else
error_details[:message] = 'An internal server error has occurred ' \
'while processing your request.'
end
ModelApi::Renderer.render(self, error_details, root: :error_details,
status: :internal_server_error)
end
def handle_api_exceptions(err)
if err.is_a?(ModelApi::NotFoundException)
not_found(field: err.field, message: err.message)
elsif err.is_a?(ModelApi::UnauthorizedException)
unauthorized
else
return false
end
true
end
def doorkeeper_unauthorized_render_options(error: nil)
{ json: unauthorized(error: 'Not authorized to access resource', message: error.description,
format: :json, generate_body_only: true) }
end
# Indicates whether user has access to data they do not own.
def admin_user?(opts = {})
return opts[:admin_user] if opts.include?(:admin_user)
user = current_user
return nil if user.nil?
[:admin_api_user?, :admin_user?, :admin?].each do |method|
next unless user.respond_to?(method)
opts[:admin_user] = user.send(method) rescue next
break
end
opts[:admin_user] ||= false
end
# Indicates whether API should render administrator-only content in API responses
def admin?(opts = {})
return opts[:admin] if opts.include?(:admin)
param = request.params[:admin]
param.present? && admin_user?(opts) &&
(param.to_i != 0 && params.to_s.strip.downcase != 'false')
end
# Deprecated
def admin_content?(opts = {})
admin?(opts)
end
def resource_parent_id(parent_model_class, opts = {})
opts = api_context.prepare_options(opts)
id_info = id_info(opts.reverse_merge(id_param: "#{parent_model_class.name.underscore}_id"))
model_name = parent_model_class.model_name.human
if id_info[:id_value].blank?
unless opts[:optional]
fail ModelApi::NotFoundException.new(id_info[:id_param], "#{model_name} not found")
end
return nil
end
query = common_object_query(opts.merge(model_class: parent_model_class, id_info: id_info))
parent_id = query.pluck(:id).first
if parent_id.blank?
unless opts[:optional]
fail ModelApi::NotFoundException.new(id_info[:id_param],
"#{model_name} '#{id_info[:id_value]}' not found")
end
return nil
end
parent_id
end
def simple_error(status, error, opts = {})
opts = opts.dup
klass = opts[:class]
opts[:root] = ModelApi::Utils.model_name(klass).singular if klass.present?
if error.is_a?(Array)
errs_or_msgs = error.map do |e|
if e.is_a?(Hash)
next e if e.include?(:error) && e.include?(:message)
next e.reverse_merge(
error: e[:error] || 'Unspecified error',
message: e[:message] || e[:error] || 'Unspecified error')
end
{ error: e.to_s, message: e.to_s }
end
elsif error.is_a?(Hash)
errs_or_msgs = [error]
else
errs_or_msgs = [{ error: error, message: opts[:message] || error }]
end
errs_or_msgs[0][:field] = opts[:field] if opts.include?(:field)
ModelApi::Renderer.render(self, opts[:request_obj], opts.merge(status: status,
messages: errs_or_msgs))
end
def not_found(opts = {})
opts = opts.dup
opts[:message] ||= 'No resource found at the path provided or matching the criteria ' \
'specified'
simple_error(:not_found, opts.delete(:error) || 'No resource found', opts)
end
def bad_payload(opts = {})
opts = opts.dup
format = opts[:format] || identify_format
opts[:message] ||= "A properly-formatted #{format.to_s.upcase} " \
'payload was expected in the HTTP request body but not found'
simple_error(:bad_request, opts.delete(:error) || 'Missing/invalid request body (payload)',
opts)
end
def bad_request(error, message, opts = {})
opts[:message] = message || 'This request is invalid for the resource in its present state'
simple_error(:bad_request, error || 'Invalid API request', opts)
end
def unauthorized(opts = {})
opts = opts.dup
opts[:message] ||= 'Missing one or more privileges required to complete request'
simple_error(:unauthorized, opts.delete(:error) || 'Not authorized', opts)
end
def not_implemented(opts = {})
opts = opts.dup
opts[:message] ||= 'This API feature is presently unavailable'
simple_error(:not_implemented, opts.delete(:error) || 'Not implemented', opts)
end
def filter_by_user
current_user
end
def current_user
nil
end
def common_headers
ModelApi::Utils.common_http_headers.each do |k, v|
response.headers[k] = v
end
end
def identify_format
format = self.request.format.symbol rescue :json
format == :xml ? :xml : :json
end
def ensure_admin_if_admin_only(opts = {})
return true unless opts[:admin_only]
ensure_admin
end
def get_operation(default_operation, opts = {})
if opts.key?(:operation)
return opts[:operation]
elsif action_name.start_with?('create')
return :create
elsif action_name.start_with?('update')
return :update
elsif action_name.start_with?('patch')
return :patch
elsif action_name.start_with?('destroy')
return :destroy
else
return default_operation
end
end
def parse_request_body
unless instance_variable_defined?(:@request_body)
@req_body, @format = ModelApi::Utils.parse_request_body(request)
end
[@req_body, @format]
end
private
def find_filter_params
request.params.reject { |p, _v| %w(access_token sort_by admin).include?(p) }
end
def find_sort_params
sort_by = params[:sort_by]
return {} if sort_by.blank?
sort_by = sort_by.to_s.strip
if sort_by.starts_with?('{') || sort_by.starts_with?('[')
process_json_sort_params(sort_by)
else
process_simple_sort_params(sort_by)
end
end
def process_json_sort_params(sort_by)
sort_params = {}
sort_json_obj = (JSON.parse(sort_by) rescue {})
sort_json_obj = Hash[sort_json_obj.map { |v| [v, nil] }] if sort_json_obj.is_a?(Array)
sort_json_obj.each do |key, value|
next if key.blank?
value_lc = value.to_s.downcase
if %w(a asc ascending).include?(value_lc)
order = :asc
elsif %w(d desc descending).include?(value_lc)
order = :desc
else
order = :default
end
sort_params[key] = order
end
sort_params
end
def process_simple_sort_params(sort_by)
sort_params = {}
sort_by.split(',').each do |key|
key = key.to_s.strip
key_lc = key.downcase
if key_lc.ends_with?('_a') || key_lc.ends_with?(' a')
key = sort_by[key[0..-3]]
order = :asc
elsif key_lc.ends_with?('_asc') || key_lc.ends_with?(' asc')
key = sort_by[key[0..-5]]
order = :asc
elsif key_lc.ends_with?('_ascending') || key_lc.ends_with?(' ascending')
key = sort_by[key[0..-11]]
order = :asc
elsif key_lc.ends_with?('_d') || key_lc.ends_with?(' d')
key = sort_by[key[0..-3]]
order = :desc
elsif key_lc.ends_with?('_desc') || key_lc.ends_with?(' desc')
key = sort_by[key[0..-6]]
order = :desc
elsif key_lc.ends_with?('_descending') || key_lc.ends_with?(' descending')
key = sort_by[key[0..-12]]
order = :desc
else
order = :default
end
next if key.blank?
sort_params[key] = order
end
sort_params
end
def paginate_collection(collection, collection_links, opts, coll_route)
collection_size = collection.count
page_size = (params[:page_size] || DEFAULT_PAGE_SIZE).to_i
page = [params[:page].to_i, 1].max
page_count = [(collection_size + page_size - 1) / page_size, 1].max
page = page_count if page > page_count
offset = (page - 1) * page_size
opts = opts.dup
opts[:count] ||= collection_size
opts[:page] ||= page
opts[:page_size] ||= page_size
opts[:page_count] ||= page_count
response.headers['X-Total-Count'] = collection_size.to_s
opts[:collection_link_options] = (opts[:collection_link_options] || {})
.reject { |k, _v| [:page].include?(k.to_sym) }
opts[:object_link_options] = (opts[:object_link_options] || {})
.reject { |k, _v| [:page, :page_size].include?(k.to_sym) }
if collection_size > page_size
opts[:collection_link_options][:page] = page
add_pagination_links(collection_links, coll_route, page, page_count)
collection = collection.limit(page_size).offset(offset)
end
[collection, collection_links, opts]
end
def add_pagination_links(collection_links, coll_route, page, last_page)
if page < last_page
collection_links[:next] = [coll_route, { page: (page + 1) }]
end
collection_links[:prev] = [coll_route, { page: (page - 1) }] if page > 1
collection_links[:first] = [coll_route, { page: 1 }]
collection_links[:last] = [coll_route, { page: last_page }]
end
def add_collection_object_route(opts)
object_route = opts[:object_route]
unless object_route.present?
route_name = ModelApi::Utils.route_name(request)
if route_name.present?
if (singular_route_name = route_name.singularize) != route_name
object_route = singular_route_name
end
end
end
if object_route.present? && (object_route.is_a?(String) || object_route.is_a?(Symbol))
object_route = nil unless self.respond_to?("#{object_route}_url")
end
object_route = opts[:default_object_route] if object_route.blank?
return if object_route.blank?
opts[:object_links] = (opts[:object_links] || {}).merge(self: object_route)
end
def add_hateoas_links_for_update(opts)
binding.pry
puts "ADDDDDDDING HATE LINKS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
object_route = opts[:object_route] || self
links = { self: object_route }.reverse_merge(common_response_links(opts))
opts[:links] = links.merge(opts[:links] || {})
opts
end
def add_hateoas_links_for_updated_object(opts)
binding.pry
object_route = opts[:object_route] || self
object_links = { self: object_route }
opts[:object_links] = object_links.merge(opts[:object_links] || {})
opts
end
def filtered_by_foreign_key?(query)
fk_cache = self.class.instance_variable_get(:@foreign_key_cache)
self.class.instance_variable_set(:@foreign_key_cache, fk_cache = {}) if fk_cache.nil?
klass = query.klass
foreign_keys = (fk_cache[klass] ||= query.klass.reflections.values
.select { |a| a.macro == :belongs_to }.map { |a| a.foreign_key.to_s })
(query.values[:where] || []).to_h.select { |v| v.is_a?(Arel::Nodes::Equality) }
.map { |v| v.left.name }.each do |key|
return true if foreign_keys.include?(key)
end
false
rescue Exception => e
Rails.logger.warn "Exception encounterd determining if query is filtered: #{e.message}\n" \
"#{e.backtrace.join("\n")}"
end
end
class Utils
class << self
def process_updated_model_save(obj, operation, opts = {})
opts = opts.dup
opts[:operation] = operation
successful = ModelApi::Utils.save_obj(obj,
opts.merge(model_metadata: opts[:api_model_metadata]))
if successful
suggested_response_status = :ok
object_errors = []
else
suggested_response_status = :bad_request
attr_metadata = opts.delete(:api_attr_metadata) ||
ModelApi::Utils.filtered_attrs(obj, operation, opts)
object_errors = ModelApi::Utils.extract_error_msgs(obj,
opts.merge(api_attr_metadata: attr_metadata))
unless object_errors.present?
object_errors << {
error: 'Unspecified error',
message: "Unspecified error processing #{operation}: " \
'Please contact customer service for further assistance.'
}
end
end
[suggested_response_status, object_errors]
end
def process_object_destroy(obj, operation, opts)
soft_delete = obj.errors.present? ? false : object_destroy(obj, opts)
if obj.errors.blank? && (soft_delete || obj.destroyed?)
response_status = :ok
object_errors = []
else
object_errors = ModelApi::Utils.extract_error_msgs(obj, opts)
if object_errors.present?
response_status = :bad_request
else
response_status = :internal_server_error
object_errors << {
error: 'Unspecified error',
message: "Unspecified error processing #{operation}: " \
'Please contact customer service for further assistance.'
}
end
end
[response_status, object_errors]
end
def object_destroy(obj, opts = {})
klass = ModelApi::Utils.find_class(obj)
object_id = obj.send(opts[:id_attribute] || :id)
obj.instance_variable_set(:@readonly, false) if obj.instance_variable_get(:@readonly)
if (deleted_col = klass.columns_hash['deleted']).present?
case deleted_col.type
when :boolean
obj.update_attribute(:deleted, true)
return true
when :integer, :decimal
obj.update_attribute(:deleted, 1)
return true
else
obj.destroy
end
else
obj.destroy
end
false
rescue Exception => e
Rails.logger.warn "Error destroying #{klass.name} \"#{object_id}\": \"#{e.message}\")."
false
end
end
end
end
end
sidestep hateoas links for now
require 'model-api/api_context'
module ModelApi
module BaseController
module ClassMethods
def model_class
nil
end
def base_api_options
{}
end
def base_admin_api_options
base_api_options.merge(admin_only: true)
end
end
class << self
def included(base)
base.extend(ClassMethods)
base.send(:include, InstanceMethods)
base.send(:before_filter, :common_headers)
base.send(:rescue_from, Exception, with: :unhandled_exception)
base.send(:respond_to, :json, :xml)
end
end
module InstanceMethods
SIMPLE_ID_REGEX = /\A[0-9]+\Z/
UUID_REGEX = /\A[0-9A-Za-z]{8}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]{4}-?[0-9A-Za-z]\
{12}\Z/x
DEFAULT_PAGE_SIZE = 100
protected
def model_class
self.class.model_class
end
def api_context
@api_context ||= ModelApi::ApiContext.new(self)
end
def render_collection(collection, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
opts[:operation] ||= :index
return unless api_context.validate_read_operation(collection, opts[:operation], opts)
coll_route = opts[:collection_route] || self
collection_links = { self: coll_route }
collection = ModelApi::Utils.process_collection_includes(collection,
opts.merge(model_metadata: opts[:api_model_metadata] || opts[:model_metadata]))
collection, _result_filters = api_context.filter_collection(collection, find_filter_params,
opts)
collection, _result_sorts = api_context.sort_collection(collection, find_sort_params, opts)
collection, collection_links, opts = paginate_collection(collection, collection_links, opts,
coll_route)
opts[:collection_links] = collection_links.merge(opts[:collection_links] || {})
.reverse_merge(common_response_links(opts))
add_collection_object_route(opts)
ModelApi::Renderer.render(self, collection, opts)
end
def render_object(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
klass = ModelApi::Utils.find_class(obj, opts)
object_route = opts[:object_route] || self
opts[:object_links] = { self: object_route }
if obj.is_a?(ActiveRecord::Base)
return unless api_context.validate_read_operation(obj, opts[:operation], opts)
unless obj.present?
return not_found(opts.merge(class: klass, field: :id))
end
opts[:object_links].merge!(opts[:object_links] || {})
else
return not_found(opts) if obj.nil?
obj = ModelApi::Utils.ext_value(obj, opts) unless opts[:raw_output]
opts[:object_links].merge!(opts[:links] || {})
end
opts[:operation] ||= :show
opts[:object_links].reverse_merge!(common_response_links(opts))
ModelApi::Renderer.render(self, obj, opts)
end
def do_create(opts = {})
klass = opts[:model_class] || model_class
return unless ensure_admin_if_admin_only(opts)
unless klass.is_a?(Class) && klass < ActiveRecord::Base
fail 'Unable to process object creation; Missing or invalid model class'
end
obj, opts = prepare_object_for_create(klass, opts)
return bad_payload(class: klass) if opts[:bad_payload]
create_and_render_object(obj, opts)
end
def prepare_object_for_create(klass, opts = {})
opts = api_context.prepare_options(opts)
req_body, format = parse_request_body
#add_hateoas_links_for_update(opts)
api_context.get_updated_object(klass, get_operation(:create, opts), req_body,
opts.merge(format: format))
end
def create_and_render_object(obj, opts = {})
opts = api_context.prepare_options(opts)
object_link_options = opts[:object_link_options]
object_link_options[:action] = :show
save_and_render_object(obj, get_operation(:create, opts), opts.merge(location_header: true))
end
def do_update(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
obj, opts = prepare_object_for_update(obj, opts)
return bad_payload(class: klass) if opts[:bad_payload]
unless obj.present?
return not_found(opts.merge(class: ModelApi::Utils.find_class(obj, opts), field: :id))
end
update_and_render_object(obj, opts)
end
def prepare_object_for_update(obj, opts = {})
opts = api_context.prepare_options(opts)
req_body, format = parse_request_body
#add_hateoas_links_for_update(opts)
api_context.get_updated_object(obj, get_operation(:update, opts), req_body,
opts.merge(format: format))
end
def update_and_render_object(obj, opts = {})
opts = api_context.prepare_options(opts)
save_and_render_object(obj, get_operation(:update, opts), opts)
end
def save_and_render_object(obj, operation, opts = {})
opts = api_context.prepare_options(opts)
status, msgs = Utils.process_updated_model_save(obj, operation, opts)
#add_hateoas_links_for_updated_object(opts)
successful = ModelApi::Utils.response_successful?(status)
ModelApi::Renderer.render(self, successful ? obj : opts[:request_obj],
opts.merge(status: status, operation: :show, messages: msgs))
end
def do_destroy(obj, opts = {})
return unless ensure_admin_if_admin_only(opts)
opts = api_context.prepare_options(opts)
obj = obj.first if obj.is_a?(ActiveRecord::Relation)
#add_hateoas_links_for_update(opts)
unless obj.present?
return not_found(opts.merge(class: klass, field: :id))
end
operation = opts[:operation] = get_operation(:destroy, opts)
ModelApi::Utils.validate_operation(obj, operation,
opts.merge(model_metadata: opts[:api_model_metadata] || opts[:model_metadata]))
response_status, errs_or_msgs = Utils.process_object_destroy(obj, operation, opts)
#add_hateoas_links_for_updated_object(opts)
klass = ModelApi::Utils.find_class(obj, opts)
ModelApi::Renderer.render(self, obj, opts.merge(status: response_status,
root: ModelApi::Utils.model_name(klass).singular, messages: errs_or_msgs))
end
def common_response_links(_opts = {})
{}
end
def initialize_options(opts)
return opts if opts[:options_initialized]
opts = opts.symbolize_keys
opts[:api_context] ||= @api_context
opts[:model_class] ||= model_class
opts[:user] ||= filter_by_user
opts[:user_id] ||= opts[:user].try(:id)
opts[:admin_user] ||= admin_user?(opts)
opts[:admin] ||= admin?(opts)
unless opts.include?(:collection_link_options) && opts.include?(:object_link_options)
default_link_options = request.params.to_h.symbolize_keys
opts[:collection_link_options] ||= default_link_options
opts[:object_link_options] ||= default_link_options
if default_link_options[:exclude_associations].present?
opts[:exclude_associations] ||= default_link_options[:exclude_associations]
end
end
opts[:options_initialized] ||= true
opts
end
# Default implementation, can be hidden by API controller classes to include any
# application-specific options
def prepare_options(opts)
return opts if opts[:options_initialized]
initialize_options(opts)
end
def id_info(opts = {})
id_info = {}
id_info[:id_attribute] = (opts[:id_attribute] || :id).to_sym
id_info[:id_param] = (opts[:id_param] || :id).to_sym
id_info[:id_value] = (opts[:id_value] || params[id_info[:id_param]]).to_s
id_info
end
def common_object_query(opts = {})
opts = api_context.prepare_options(opts)
id_info = opts[:id_info] || id_info(opts)
api_context.common_object_query(id_info[:id_attribute], id_info[:id_value],
opts.merge(id_param: id_info[:id_param]))
end
def collection_query(opts = {})
opts = api_context.prepare_options(base_api_options.merge(opts))
klass = opts[:model_class] || model_class
query = api_context.api_query(klass, opts)
unless (opts.include?(:user_filter) && !opts[:user_filter]) ||
(admin? || filtered_by_foreign_key?(query)) || !opts[:user]
query = api_context.user_query(query, opts[:user], opts.merge(model_class: klass))
end
query
end
def object_query(opts = {})
common_object_query(api_context.prepare_options(base_api_options.merge(opts)))
end
def base_api_options
self.class.base_api_options
end
def base_admin_api_options
base_api_options.merge(admin: true, admin_only: true)
end
def ensure_admin
return true if admin_user?
# Mask presence of endpoint if user is not authorized to access it
not_found
false
end
def unhandled_exception(err)
return if handle_api_exceptions(err)
return if performed?
error_details = {}
if Rails.env == 'development'
error_details[:message] = "Exception: #{err.message}"
error_details[:backtrace] = err.backtrace
else
error_details[:message] = 'An internal server error has occurred ' \
'while processing your request.'
end
ModelApi::Renderer.render(self, error_details, root: :error_details,
status: :internal_server_error)
end
def handle_api_exceptions(err)
if err.is_a?(ModelApi::NotFoundException)
not_found(field: err.field, message: err.message)
elsif err.is_a?(ModelApi::UnauthorizedException)
unauthorized
else
return false
end
true
end
def doorkeeper_unauthorized_render_options(error: nil)
{ json: unauthorized(error: 'Not authorized to access resource', message: error.description,
format: :json, generate_body_only: true) }
end
# Indicates whether user has access to data they do not own.
def admin_user?(opts = {})
return opts[:admin_user] if opts.include?(:admin_user)
user = current_user
return nil if user.nil?
[:admin_api_user?, :admin_user?, :admin?].each do |method|
next unless user.respond_to?(method)
opts[:admin_user] = user.send(method) rescue next
break
end
opts[:admin_user] ||= false
end
# Indicates whether API should render administrator-only content in API responses
def admin?(opts = {})
return opts[:admin] if opts.include?(:admin)
param = request.params[:admin]
param.present? && admin_user?(opts) &&
(param.to_i != 0 && params.to_s.strip.downcase != 'false')
end
# Deprecated
def admin_content?(opts = {})
admin?(opts)
end
def resource_parent_id(parent_model_class, opts = {})
opts = api_context.prepare_options(opts)
id_info = id_info(opts.reverse_merge(id_param: "#{parent_model_class.name.underscore}_id"))
model_name = parent_model_class.model_name.human
if id_info[:id_value].blank?
unless opts[:optional]
fail ModelApi::NotFoundException.new(id_info[:id_param], "#{model_name} not found")
end
return nil
end
query = common_object_query(opts.merge(model_class: parent_model_class, id_info: id_info))
parent_id = query.pluck(:id).first
if parent_id.blank?
unless opts[:optional]
fail ModelApi::NotFoundException.new(id_info[:id_param],
"#{model_name} '#{id_info[:id_value]}' not found")
end
return nil
end
parent_id
end
def simple_error(status, error, opts = {})
opts = opts.dup
klass = opts[:class]
opts[:root] = ModelApi::Utils.model_name(klass).singular if klass.present?
if error.is_a?(Array)
errs_or_msgs = error.map do |e|
if e.is_a?(Hash)
next e if e.include?(:error) && e.include?(:message)
next e.reverse_merge(
error: e[:error] || 'Unspecified error',
message: e[:message] || e[:error] || 'Unspecified error')
end
{ error: e.to_s, message: e.to_s }
end
elsif error.is_a?(Hash)
errs_or_msgs = [error]
else
errs_or_msgs = [{ error: error, message: opts[:message] || error }]
end
errs_or_msgs[0][:field] = opts[:field] if opts.include?(:field)
ModelApi::Renderer.render(self, opts[:request_obj], opts.merge(status: status,
messages: errs_or_msgs))
end
def not_found(opts = {})
opts = opts.dup
opts[:message] ||= 'No resource found at the path provided or matching the criteria ' \
'specified'
simple_error(:not_found, opts.delete(:error) || 'No resource found', opts)
end
def bad_payload(opts = {})
opts = opts.dup
format = opts[:format] || identify_format
opts[:message] ||= "A properly-formatted #{format.to_s.upcase} " \
'payload was expected in the HTTP request body but not found'
simple_error(:bad_request, opts.delete(:error) || 'Missing/invalid request body (payload)',
opts)
end
def bad_request(error, message, opts = {})
opts[:message] = message || 'This request is invalid for the resource in its present state'
simple_error(:bad_request, error || 'Invalid API request', opts)
end
def unauthorized(opts = {})
opts = opts.dup
opts[:message] ||= 'Missing one or more privileges required to complete request'
simple_error(:unauthorized, opts.delete(:error) || 'Not authorized', opts)
end
def not_implemented(opts = {})
opts = opts.dup
opts[:message] ||= 'This API feature is presently unavailable'
simple_error(:not_implemented, opts.delete(:error) || 'Not implemented', opts)
end
def filter_by_user
current_user
end
def current_user
nil
end
def common_headers
ModelApi::Utils.common_http_headers.each do |k, v|
response.headers[k] = v
end
end
def identify_format
format = self.request.format.symbol rescue :json
format == :xml ? :xml : :json
end
def ensure_admin_if_admin_only(opts = {})
return true unless opts[:admin_only]
ensure_admin
end
def get_operation(default_operation, opts = {})
if opts.key?(:operation)
return opts[:operation]
elsif action_name.start_with?('create')
return :create
elsif action_name.start_with?('update')
return :update
elsif action_name.start_with?('patch')
return :patch
elsif action_name.start_with?('destroy')
return :destroy
else
return default_operation
end
end
def parse_request_body
unless instance_variable_defined?(:@request_body)
@req_body, @format = ModelApi::Utils.parse_request_body(request)
end
[@req_body, @format]
end
private
def find_filter_params
request.params.reject { |p, _v| %w(access_token sort_by admin).include?(p) }
end
def find_sort_params
sort_by = params[:sort_by]
return {} if sort_by.blank?
sort_by = sort_by.to_s.strip
if sort_by.starts_with?('{') || sort_by.starts_with?('[')
process_json_sort_params(sort_by)
else
process_simple_sort_params(sort_by)
end
end
def process_json_sort_params(sort_by)
sort_params = {}
sort_json_obj = (JSON.parse(sort_by) rescue {})
sort_json_obj = Hash[sort_json_obj.map { |v| [v, nil] }] if sort_json_obj.is_a?(Array)
sort_json_obj.each do |key, value|
next if key.blank?
value_lc = value.to_s.downcase
if %w(a asc ascending).include?(value_lc)
order = :asc
elsif %w(d desc descending).include?(value_lc)
order = :desc
else
order = :default
end
sort_params[key] = order
end
sort_params
end
def process_simple_sort_params(sort_by)
sort_params = {}
sort_by.split(',').each do |key|
key = key.to_s.strip
key_lc = key.downcase
if key_lc.ends_with?('_a') || key_lc.ends_with?(' a')
key = sort_by[key[0..-3]]
order = :asc
elsif key_lc.ends_with?('_asc') || key_lc.ends_with?(' asc')
key = sort_by[key[0..-5]]
order = :asc
elsif key_lc.ends_with?('_ascending') || key_lc.ends_with?(' ascending')
key = sort_by[key[0..-11]]
order = :asc
elsif key_lc.ends_with?('_d') || key_lc.ends_with?(' d')
key = sort_by[key[0..-3]]
order = :desc
elsif key_lc.ends_with?('_desc') || key_lc.ends_with?(' desc')
key = sort_by[key[0..-6]]
order = :desc
elsif key_lc.ends_with?('_descending') || key_lc.ends_with?(' descending')
key = sort_by[key[0..-12]]
order = :desc
else
order = :default
end
next if key.blank?
sort_params[key] = order
end
sort_params
end
def paginate_collection(collection, collection_links, opts, coll_route)
collection_size = collection.count
page_size = (params[:page_size] || DEFAULT_PAGE_SIZE).to_i
page = [params[:page].to_i, 1].max
page_count = [(collection_size + page_size - 1) / page_size, 1].max
page = page_count if page > page_count
offset = (page - 1) * page_size
opts = opts.dup
opts[:count] ||= collection_size
opts[:page] ||= page
opts[:page_size] ||= page_size
opts[:page_count] ||= page_count
response.headers['X-Total-Count'] = collection_size.to_s
opts[:collection_link_options] = (opts[:collection_link_options] || {})
.reject { |k, _v| [:page].include?(k.to_sym) }
opts[:object_link_options] = (opts[:object_link_options] || {})
.reject { |k, _v| [:page, :page_size].include?(k.to_sym) }
if collection_size > page_size
opts[:collection_link_options][:page] = page
add_pagination_links(collection_links, coll_route, page, page_count)
collection = collection.limit(page_size).offset(offset)
end
[collection, collection_links, opts]
end
def add_pagination_links(collection_links, coll_route, page, last_page)
if page < last_page
collection_links[:next] = [coll_route, { page: (page + 1) }]
end
collection_links[:prev] = [coll_route, { page: (page - 1) }] if page > 1
collection_links[:first] = [coll_route, { page: 1 }]
collection_links[:last] = [coll_route, { page: last_page }]
end
def add_collection_object_route(opts)
object_route = opts[:object_route]
unless object_route.present?
route_name = ModelApi::Utils.route_name(request)
if route_name.present?
if (singular_route_name = route_name.singularize) != route_name
object_route = singular_route_name
end
end
end
if object_route.present? && (object_route.is_a?(String) || object_route.is_a?(Symbol))
object_route = nil unless self.respond_to?("#{object_route}_url")
end
object_route = opts[:default_object_route] if object_route.blank?
return if object_route.blank?
opts[:object_links] = (opts[:object_links] || {}).merge(self: object_route)
end
def add_hateoas_links_for_update(opts)
object_route = opts[:object_route] || self
links = { self: object_route }.reverse_merge(common_response_links(opts))
opts[:links] = links.merge(opts[:links] || {})
opts
end
def add_hateoas_links_for_updated_object(opts)
object_route = opts[:object_route] || self
object_links = { self: object_route }
opts[:object_links] = object_links.merge(opts[:object_links] || {})
opts
end
def filtered_by_foreign_key?(query)
fk_cache = self.class.instance_variable_get(:@foreign_key_cache)
self.class.instance_variable_set(:@foreign_key_cache, fk_cache = {}) if fk_cache.nil?
klass = query.klass
foreign_keys = (fk_cache[klass] ||= query.klass.reflections.values
.select { |a| a.macro == :belongs_to }.map { |a| a.foreign_key.to_s })
(query.values[:where] || []).to_h.select { |v| v.is_a?(Arel::Nodes::Equality) }
.map { |v| v.left.name }.each do |key|
return true if foreign_keys.include?(key)
end
false
rescue Exception => e
Rails.logger.warn "Exception encounterd determining if query is filtered: #{e.message}\n" \
"#{e.backtrace.join("\n")}"
end
end
class Utils
class << self
def process_updated_model_save(obj, operation, opts = {})
opts = opts.dup
opts[:operation] = operation
successful = ModelApi::Utils.save_obj(obj,
opts.merge(model_metadata: opts[:api_model_metadata]))
if successful
suggested_response_status = :ok
object_errors = []
else
suggested_response_status = :bad_request
attr_metadata = opts.delete(:api_attr_metadata) ||
ModelApi::Utils.filtered_attrs(obj, operation, opts)
object_errors = ModelApi::Utils.extract_error_msgs(obj,
opts.merge(api_attr_metadata: attr_metadata))
unless object_errors.present?
object_errors << {
error: 'Unspecified error',
message: "Unspecified error processing #{operation}: " \
'Please contact customer service for further assistance.'
}
end
end
[suggested_response_status, object_errors]
end
def process_object_destroy(obj, operation, opts)
soft_delete = obj.errors.present? ? false : object_destroy(obj, opts)
if obj.errors.blank? && (soft_delete || obj.destroyed?)
response_status = :ok
object_errors = []
else
object_errors = ModelApi::Utils.extract_error_msgs(obj, opts)
if object_errors.present?
response_status = :bad_request
else
response_status = :internal_server_error
object_errors << {
error: 'Unspecified error',
message: "Unspecified error processing #{operation}: " \
'Please contact customer service for further assistance.'
}
end
end
[response_status, object_errors]
end
def object_destroy(obj, opts = {})
klass = ModelApi::Utils.find_class(obj)
object_id = obj.send(opts[:id_attribute] || :id)
obj.instance_variable_set(:@readonly, false) if obj.instance_variable_get(:@readonly)
if (deleted_col = klass.columns_hash['deleted']).present?
case deleted_col.type
when :boolean
obj.update_attribute(:deleted, true)
return true
when :integer, :decimal
obj.update_attribute(:deleted, 1)
return true
else
obj.destroy
end
else
obj.destroy
end
false
rescue Exception => e
Rails.logger.warn "Error destroying #{klass.name} \"#{object_id}\": \"#{e.message}\")."
false
end
end
end
end
end
|
# TODO: Workaround, since hockeyapp.rb from shenzhen includes the code for commander
def command(param)
end
module Fastlane
module Actions
module SharedValues
HOCKEY_DOWNLOAD_LINK = :HOCKEY_DOWNLOAD_LINK
HOCKEY_BUILD_INFORMATION = :HOCKEY_BUILD_INFORMATION # contains all keys/values from the HockeyApp API, like :title, :bundle_identifier
end
class HockeyAction
def self.run(params)
# Available options: http://support.hockeyapp.net/kb/api/api-versions#upload-version
options = {
notes: "No changelog given",
status: 2,
notify: 1
}.merge(params.first)
require 'shenzhen'
require 'shenzhen/plugins/hockeyapp'
raise "No API Token for Hockey given, pass using `api_token: 'token'`".red unless options[:api_token].to_s.length > 0
raise "No IPA file given or found, pass using `ipa: 'path.ipa'`".red unless options[:ipa]
raise "IPA file on path '#{File.expand_path(options[:ipa])}' not found".red unless File.exists?(options[:ipa])
Helper.log.info "Starting with ipa upload to HockeyApp... this could take some time.".green
client = Shenzhen::Plugins::HockeyApp::Client.new(options[:api_token])
return if Helper.is_test?
response = client.upload_build(options[:ipa], options)
case response.status
when 200...300
url = response.body['public_url']
Actions.lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] = url
Actions.lane_context[SharedValues::HOCKEY_BUILD_INFORMATION] = response.body
Helper.log.info "Public Download URL: #{url}" if url
Helper.log.info "Build successfully uploaded to HockeyApp!".green
else
Helper.log.fatal "Error uploading to HockeyApp: #{response.body}"
raise "Error when trying to upload ipa to HockeyApp".red
end
end
end
end
end
Uploading symbols together with build to HockeyApp
# TODO: Workaround, since hockeyapp.rb from shenzhen includes the code for commander
def command(param)
end
module Fastlane
module Actions
module SharedValues
HOCKEY_DOWNLOAD_LINK = :HOCKEY_DOWNLOAD_LINK
HOCKEY_BUILD_INFORMATION = :HOCKEY_BUILD_INFORMATION # contains all keys/values from the HockeyApp API, like :title, :bundle_identifier
end
class HockeyAction
def self.run(params)
# Available options: http://support.hockeyapp.net/kb/api/api-versions#upload-version
options = {
notes: "No changelog given",
status: 2,
notify: 1
}.merge(params.first)
require 'shenzhen'
require 'shenzhen/plugins/hockeyapp'
raise "No API Token for Hockey given, pass using `api_token: 'token'`".red unless options[:api_token].to_s.length > 0
raise "No IPA file given or found, pass using `ipa: 'path.ipa'`".red unless options[:ipa]
raise "IPA file on path '#{File.expand_path(options[:ipa])}' not found".red unless File.exists?(options[:ipa])
if options[:dsym]
options[:dsym_filename] = options[:dsym]
else
dsym_path = options[:ipa].gsub("ipa", "app.dSYM.zip")
if File.exists?(dsym_path)
options[:dsym_filename] = dsym_path
else
Helper.log.info "Symbols not found on path #{File.expand_path(dsym_path)}. Crashes won't be symbolicated properly".yellow
end
end
raise "Symbols on path '#{File.expand_path(options[:dsym_filename])}' not found".red if (options[:dsym_filename] && !File.exists?(options[:dsym_filename]))
Helper.log.info "Starting with ipa upload to HockeyApp... this could take some time.".green
client = Shenzhen::Plugins::HockeyApp::Client.new(options[:api_token])
return if Helper.is_test?
response = client.upload_build(options[:ipa], options)
case response.status
when 200...300
url = response.body['public_url']
Actions.lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] = url
Actions.lane_context[SharedValues::HOCKEY_BUILD_INFORMATION] = response.body
Helper.log.info "Public Download URL: #{url}" if url
Helper.log.info "Build successfully uploaded to HockeyApp!".green
else
Helper.log.fatal "Error uploading to HockeyApp: #{response.body}"
raise "Error when trying to upload ipa to HockeyApp".red
end
end
end
end
end |
module Monban
module Generators
VERSION = "0.0.2"
end
end
Version bump 0.0.3
module Monban
module Generators
VERSION = "0.0.3"
end
end
|
# frozen_string_literal: true
class Fastly::GetVersionDiff
include Fastly::Request
request_method :get
request_path { |r| "/service/#{r.service_id}/diff/from/#{r.from}/to/#{r.to}" }
parameter :service_id, :from, :to, :format
def mock
mock_response(
'from' => from,
'to' => to,
'format' => format,
'diff' => <<-'YAML-ISH'.strip
backends:
- name: My Backend
address: backend.example.com
auto_loadbalance: '0'
between_bytes_timeout: 10000
client_cert:
comment: ''
connect_timeout: 1000
error_threshold: 0
first_byte_timeout: 15000
healthcheck:
hostname: www.example.com
ipv4:
ipv6:
max_conn: 200
port: 80
request_condition: ''
shield:
ssl_ca_cert:
ssl_client_cert:
ssl_client_key:
ssl_hostname:
use_ssl: false
weight: 100
cache_settings: []
comment: ''
conditions: []
deployed:
directors: []
domains:
- name: www.example.com
comment: ''
gzips: []
-headers: []
+headers:
+- name: Debug
+ action: set
+ cache_condition:
+ dst: http.X-Test
+ ignore_if_set: '0'
+ priority: '10'
+ regex: ''
+ request_condition:
+ response_condition:
+ src: '"testing"'
+ substitution: ''
+ type: request
healthchecks: []
matches: []
origins: []
request_settings: []
response_objects: []
service_id: SU1Z0isxPaozGVKXdv0eY
settings:
general.default_host: ''
general.default_ttl: 3600
staging:
syslogs: []
testing:
vcls: []
wordpress: []
}
YAML-ISH
)
end
end
style(mock): move mock diff to a constant
# frozen_string_literal: true
class Fastly::GetVersionDiff
include Fastly::Request
request_method :get
request_path { |r| "/service/#{r.service_id}/diff/from/#{r.from}/to/#{r.to}" }
parameter :service_id, :from, :to, :format
MOCK_DIFF = <<-'YAML-ISH'.strip
backends:
- name: My Backend
address: backend.example.com
auto_loadbalance: '0'
between_bytes_timeout: 10000
client_cert:
comment: ''
connect_timeout: 1000
error_threshold: 0
first_byte_timeout: 15000
healthcheck:
hostname: www.example.com
ipv4:
ipv6:
max_conn: 200
port: 80
request_condition: ''
shield:
ssl_ca_cert:
ssl_client_cert:
ssl_client_key:
ssl_hostname:
use_ssl: false
weight: 100
cache_settings: []
comment: ''
conditions: []
deployed:
directors: []
domains:
- name: www.example.com
comment: ''
gzips: []
-headers: []
+headers:
+- name: Debug
+ action: set
+ cache_condition:
+ dst: http.X-Test
+ ignore_if_set: '0'
+ priority: '10'
+ regex: ''
+ request_condition:
+ response_condition:
+ src: '"testing"'
+ substitution: ''
+ type: request
healthchecks: []
matches: []
origins: []
request_settings: []
response_objects: []
service_id: SU1Z0isxPaozGVKXdv0eY
settings:
general.default_host: ''
general.default_ttl: 3600
staging:
syslogs: []
testing:
vcls: []
wordpress: []
}
YAML-ISH
def mock
mock_response(
'from' => from,
'to' => to,
'format' => format,
'diff' => MOCK_DIFF,
)
end
end
|
# encoding: UTF-8
# --
# Copyright (C) 2008-2011 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++
module Mongo
# Instantiates and manages connections to a MongoDB replica set.
class ReplSetConnection < Connection
REPL_SET_OPTS = [:read, :refresh_mode, :refresh_interval, :require_primary,
:read_secondary, :rs_name, :name]
attr_reader :replica_set_name, :seeds, :refresh_interval, :refresh_mode,
:refresh_version, :manager
# Create a connection to a MongoDB replica set.
#
# If no args are provided, it will check <code>ENV["MONGODB_URI"]</code>.
#
# Once connected to a replica set, you can find out which nodes are primary, secondary, and
# arbiters with the corresponding accessors: Connection#primary, Connection#secondaries, and
# Connection#arbiters. This is useful if your application needs to connect manually to nodes other
# than the primary.
#
# @param [Array] seeds "host:port" strings
#
# @option opts [String] :name (nil) The name of the replica set to connect to. You
# can use this option to verify that you're connecting to the right replica set.
# @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
# propagated to DB objects instantiated off of this Connection. This
# default can be overridden upon instantiation of any DB by explicitly setting a :safe value
# on initialization.
# @option opts [:primary, :secondary] :read (:primary) The default read preference for Mongo::DB
# objects created from this connection object. If +:secondary+ is chosen, reads will be sent
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
# read will be sent to the primary.
# @option opts [Logger] :logger (nil) Logger instance to receive driver operation log.
# @option opts [Integer] :pool_size (1) The maximum number of socket connections allowed per
# connection pool. Note: this setting is relevant only for multi-threaded applications.
# @option opts [Float] :pool_timeout (5.0) When all of the connections a pool are checked out,
# this is the number of seconds to wait for a new connection to be released before throwing an exception.
# Note: this setting is relevant only for multi-threaded applications.
# @option opts [Float] :op_timeout (nil) The number of seconds to wait for a read operation to time out.
# @option opts [Float] :connect_timeout (30) The number of seconds to wait before timing out a
# connection attempt.
# @option opts [Boolean] :ssl (false) If true, create the connection to the server using SSL.
# @option opts [Boolean] :refresh_mode (false) Set this to :sync to periodically update the
# state of the connection every :refresh_interval seconds. Replica set connection failures
# will always trigger a complete refresh. This option is useful when you want to add new nodes
# or remove replica set nodes not currently in use by the driver.
# @option opts [Integer] :refresh_interval (90) If :refresh_mode is enabled, this is the number of seconds
# between calls to check the replica set's state.
# @option opts [Boolean] :require_primary (true) If true, require a primary node for the connection
# to succeed. Otherwise, connection will succeed as long as there's at least one secondary node.
# Note: that the number of seed nodes does not have to be equal to the number of replica set members.
# The purpose of seed nodes is to permit the driver to find at least one replica set member even if a member is down.
#
# @example Connect to a replica set and provide two seed nodes.
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'])
#
# @example Connect to a replica set providing two seed nodes and ensuring a connection to the replica set named 'prod':
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'], :name => 'prod')
#
# @example Connect to a replica set providing two seed nodes and allowing reads from a secondary node:
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'], :read => :secondary)
#
# @see http://api.mongodb.org/ruby/current/file.REPLICA_SETS.html Replica sets in Ruby
#
# @raise [MongoArgumentError] This is raised for usage errors.
#
# @raise [ConnectionFailure] This is raised for the various connection failures.
def initialize(*args)
if args.last.is_a?(Hash)
opts = args.pop
else
opts = {}
end
nodes = args
if nodes.empty? and ENV.has_key?('MONGODB_URI')
parser = URIParser.new ENV['MONGODB_URI']
if parser.direct?
raise MongoArgumentError, "Mongo::ReplSetConnection.new called with no arguments, but ENV['MONGODB_URI'] implies a direct connection."
end
opts = parser.connection_options.merge! opts
nodes = [parser.nodes]
end
unless nodes.length > 0
raise MongoArgumentError, "A ReplSetConnection requires at least one seed node."
end
# This is temporary until support for the old format is dropped
if nodes.first.last.is_a?(Integer)
warn "Initiating a ReplSetConnection with seeds passed as individual [host, port] array arguments is deprecated."
warn "Please specify hosts as an array of 'host:port' strings; the old format will be removed in v2.0"
@seeds = nodes
else
@seeds = nodes.first.map do |host_port|
host, port = host_port.split(":")
[ host, port.to_i ]
end
end
# TODO: add a method for replacing this list of node.
@seeds.freeze
# Refresh
@last_refresh = Time.now
@refresh_version = 0
# No connection manager by default.
@manager = nil
@old_managers = []
# Lock for request ids.
@id_lock = Mutex.new
@pool_mutex = Mutex.new
@connected = false
@safe_mutex_lock = Mutex.new
@safe_mutexes = Hash.new {|hash, key| hash[key] = Mutex.new}
@connect_mutex = Mutex.new
@refresh_mutex = Mutex.new
check_opts(opts)
setup(opts)
end
def valid_opts
GENERIC_OPTS + REPL_SET_OPTS
end
def inspect
"<Mongo::ReplSetConnection:0x#{self.object_id.to_s(16)} @seeds=#{@seeds.inspect} " +
"@connected=#{@connected}>"
end
# Initiate a connection to the replica set.
def connect
log(:info, "Connecting...")
@connect_mutex.synchronize do
return if @connected
discovered_seeds = @manager ? @manager.seeds : []
@manager = PoolManager.new(self, discovered_seeds)
Thread.current[:managers] ||= Hash.new
Thread.current[:managers][self] = @manager
@manager.connect
@refresh_version += 1
if @require_primary && @manager.primary.nil? #TODO: in v2.0, we'll let this be optional and do a lazy connect.
close
raise ConnectionFailure, "Failed to connect to primary node."
elsif @manager.read_pool.nil?
close
raise ConnectionFailure, "Failed to connect to any node."
else
@connected = true
end
end
end
# Determine whether a replica set refresh is
# required. If so, run a hard refresh. You can
# force a hard refresh by running
# ReplSetConnection#hard_refresh!
#
# @return [Boolean] +true+ unless a hard refresh
# is run and the refresh lock can't be acquired.
def refresh(opts={})
if !connected?
log(:info, "Trying to check replica set health but not " +
"connected...")
return hard_refresh!
end
log(:debug, "Checking replica set connection health...")
@manager.check_connection_health
if @manager.refresh_required?
return hard_refresh!
end
return true
end
# Force a hard refresh of this connection's view
# of the replica set.
#
# @return [Boolean] +true+ if hard refresh
# occurred. +false+ is returned when unable
# to get the refresh lock.
def hard_refresh!
log(:info, "Initiating hard refresh...")
discovered_seeds = @manager ? @manager.seeds : []
new_manager = PoolManager.new(self, discovered_seeds | @seeds)
new_manager.connect
Thread.current[:managers][self] = new_manager
# TODO: make sure that connect has succeeded
@old_managers << @manager
@manager = new_manager
@refresh_version += 1
return true
end
def connected?
@connected && (@manager.primary_pool || @manager.read_pool)
end
# @deprecated
def connecting?
warn "ReplSetConnection#connecting? is deprecated and will be removed in v2.0."
false
end
# The replica set primary's host name.
#
# @return [String]
def host
@manager.primary_pool.host
end
# The replica set primary's port.
#
# @return [Integer]
def port
@manager.primary_pool.port
end
def nodes
warn "ReplSetConnection#nodes is DEPRECATED and will be removed in v2.0. " +
"Please use ReplSetConnection#seeds instead."
@seeds
end
# Determine whether we're reading from a primary node. If false,
# this connection connects to a secondary node and @read_secondaries is true.
#
# @return [Boolean]
def read_primary?
@manager.read_pool == @manager.primary_pool
end
alias :primary? :read_primary?
def read_preference
@read
end
# Close the connection to the database.
def close(opts={})
if opts[:soft]
@manager.close(:soft => true) if @manager
else
@manager.close if @manager
end
# Clear the reference to this object.
if Thread.current[:managers]
Thread.current[:managers].delete(self)
end
@connected = false
end
# If a ConnectionFailure is raised, this method will be called
# to close the connection and reset connection values.
# @deprecated
def reset_connection
close
warn "ReplSetConnection#reset_connection is now deprecated and will be removed in v2.0. " +
"Use ReplSetConnection#close instead."
end
# Returns +true+ if it's okay to read from a secondary node.
# Since this is a replica set, this must always be true.
#
# This method exist primarily so that Cursor objects will
# generate query messages with a slaveOkay value of +true+.
#
# @return [Boolean] +true+
def slave_ok?
true
end
def authenticate_pools
if primary_pool
primary_pool.authenticate_existing
end
secondary_pools.each do |pool|
pool.authenticate_existing
end
end
def logout_pools(db)
if primary_pool
primary_pool.logout_existing(db)
end
secondary_pools.each do |pool|
pool.logout_existing(db)
end
end
# Generic socket checkout
# Takes a block that returns a socket from pool
def checkout(&block)
if connected?
sync_refresh
else
connect
end
begin
socket = block.call
rescue => ex
checkin(socket) if socket
raise ex
end
if socket
socket
else
@connected = false
raise ConnectionFailure.new("Could not checkout a socket.")
end
end
# Checkout best available socket by trying primary
# pool first and then falling back to secondary.
def checkout_best
checkout do
socket = get_socket_from_pool(:primary)
if !socket
connect
socket = get_socket_from_pool(:secondary)
end
socket
end
end
# Checkout a socket for reading (i.e., a secondary node).
# Note that @read_pool might point to the primary pool
# if no read pool has been defined.
def checkout_reader
checkout do
socket = get_socket_from_pool(:read)
if !socket
connect
socket = get_socket_from_pool(:primary)
end
socket
end
end
# Checkout a socket from a secondary
# For :read_preference => :secondary_only
def checkout_secondary
checkout do
get_socket_from_pool(:secondary)
end
end
# Checkout a socket for writing (i.e., a primary node).
def checkout_writer
checkout do
get_socket_from_pool(:primary)
end
end
# Checkin a socket used for reading.
def checkin(socket)
if socket
socket.pool.checkin(socket)
end
sync_refresh
end
def close_socket(socket)
begin
socket.close if socket
rescue IOError
log(:info, "Tried to close socket #{socket} but already closed.")
end
end
def ensure_manager
Thread.current[:managers] ||= Hash.new
if Thread.current[:managers][self] != @manager
Thread.current[:managers][self] = @manager
end
end
def get_socket_from_pool(pool_type)
ensure_manager
pool = case pool_type
when :primary
primary_pool
when :secondary
secondary_pool
when :read
read_pool
end
begin
if pool
pool.checkout
end
rescue ConnectionFailure => ex
log(:info, "Failed to checkout from #{pool} with #{ex.class}; #{ex.message}")
return nil
end
end
def local_manager
Thread.current[:managers][self] if Thread.current[:managers]
end
def arbiters
local_manager.arbiters.nil? ? [] : local_manager.arbiters
end
def primary
local_manager ? local_manager.primary : nil
end
# Note: might want to freeze these after connecting.
def secondaries
local_manager ? local_manager.secondaries : []
end
def hosts
local_manager ? local_manager.hosts : []
end
def primary_pool
local_manager ? local_manager.primary_pool : nil
end
def read_pool
local_manager ? local_manager.read_pool : nil
end
def secondary_pool
local_manager ? local_manager.secondary_pool : nil
end
def secondary_pools
local_manager ? local_manager.secondary_pools : []
end
def tag_map
local_manager ? local_manager.tag_map : {}
end
def max_bson_size
if local_manager && local_manager.max_bson_size
local_manager.max_bson_size
else
Mongo::DEFAULT_MAX_BSON_SIZE
end
end
private
# Parse option hash
def setup(opts)
# Require a primary node to connect?
@require_primary = opts.fetch(:require_primary, true)
# Refresh
@refresh_mode = opts.fetch(:refresh_mode, false)
@refresh_interval = opts.fetch(:refresh_interval, 90)
if @refresh_mode && @refresh_interval < 60
@refresh_interval = 60 unless ENV['TEST_MODE'] = 'TRUE'
end
if @refresh_mode == :async
warn ":async refresh mode has been deprecated. Refresh
mode will be disabled."
elsif ![:sync, false].include?(@refresh_mode)
raise MongoArgumentError,
"Refresh mode must be either :sync or false."
end
# Are we allowing reads from secondaries?
if opts[:read_secondary]
warn ":read_secondary options has now been deprecated and will " +
"be removed in driver v2.0. Use the :read option instead."
@read_secondary = opts.fetch(:read_secondary, false)
@read = :secondary
else
@read = opts.fetch(:read, :primary)
Mongo::Support.validate_read_preference(@read)
end
# Replica set name
if opts[:rs_name]
warn ":rs_name option has been deprecated and will be removed in v2.0. " +
"Please use :name instead."
@replica_set_name = opts[:rs_name]
else
@replica_set_name = opts[:name]
end
opts[:connect_timeout] = opts[:connect_timeout] || 30
super opts
end
# Checkout a socket connected to a node with one of
# the provided tags. If no such node exists, raise
# an exception.
#
# NOTE: will be available in driver release v2.0.
def checkout_tagged(tags)
tags.each do |k, v|
pool = self.tag_map[{k.to_s => v}]
if pool
socket = pool.checkout
return socket
end
end
raise NodeWithTagsNotFound,
"Could not find a connection tagged with #{tags}."
end
def prune_managers
@old_managers.each do |manager|
if manager != @manager
if manager.closed?
@old_managers.delete(manager)
else
manager.close(:soft => true)
end
end
end
end
def sync_refresh
if @refresh_mode == :sync &&
((Time.now - @last_refresh) > @refresh_interval)
@last_refresh = Time.now
if @refresh_mutex.try_lock
begin
refresh
prune_managers
ensure
@refresh_mutex.unlock
end
end
end
end
end
end
minor: ReplSetConnection#initialize cleanup
Clean up documentation
Shortened opts pop
# encoding: UTF-8
# --
# Copyright (C) 2008-2011 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++
module Mongo
# Instantiates and manages connections to a MongoDB replica set.
class ReplSetConnection < Connection
REPL_SET_OPTS = [:read, :refresh_mode, :refresh_interval, :require_primary,
:read_secondary, :rs_name, :name]
attr_reader :replica_set_name, :seeds, :refresh_interval, :refresh_mode,
:refresh_version, :manager
# Create a connection to a MongoDB replica set.
#
# If no args are provided, it will check <code>ENV["MONGODB_URI"]</code>.
#
# Once connected to a replica set, you can find out which nodes are primary, secondary, and
# arbiters with the corresponding accessors: Connection#primary, Connection#secondaries, and
# Connection#arbiters. This is useful if your application needs to connect manually to nodes other
# than the primary.
#
# @overload initialize(seeds=ENV["MONGODB_URI"], opts={})
# @param [Array<String>, Array<Array(String, Integer)>] seeds
#
# @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
# propagated to DB objects instantiated off of this Connection. This
# default can be overridden upon instantiation of any DB by explicitly setting a :safe value
# on initialization.
# @option opts [:primary, :secondary, :secondary_only] :read (:primary) The default read preference for Mongo::DB
# objects created from this connection object. If +:secondary+ is chosen, reads will be sent
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
# read will be sent to the primary.
# @option opts [Logger] :logger (nil) Logger instance to receive driver operation log.
# @option opts [Integer] :pool_size (1) The maximum number of socket connections allowed per
# connection pool. Note: this setting is relevant only for multi-threaded applications.
# @option opts [Float] :pool_timeout (5.0) When all of the connections a pool are checked out,
# this is the number of seconds to wait for a new connection to be released before throwing an exception.
# Note: this setting is relevant only for multi-threaded applications.
# @option opts [Float] :op_timeout (nil) The number of seconds to wait for a read operation to time out.
# @option opts [Float] :connect_timeout (30) The number of seconds to wait before timing out a
# connection attempt.
# @option opts [Boolean] :ssl (false) If true, create the connection to the server using SSL.
# @option opts [Boolean] :refresh_mode (false) Set this to :sync to periodically update the
# state of the connection every :refresh_interval seconds. Replica set connection failures
# will always trigger a complete refresh. This option is useful when you want to add new nodes
# or remove replica set nodes not currently in use by the driver.
# @option opts [Integer] :refresh_interval (90) If :refresh_mode is enabled, this is the number of seconds
# between calls to check the replica set's state.
# @option opts [Boolean] :require_primary (true) If true, require a primary node for the connection
# to succeed. Otherwise, connection will succeed as long as there's at least one secondary node.
# @note the number of seed nodes does not have to be equal to the number of replica set members.
# The purpose of seed nodes is to permit the driver to find at least one replica set member even if a member is down.
#
# @example Connect to a replica set and provide two seed nodes.
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'])
#
# @example Connect to a replica set providing two seed nodes and ensuring a connection to the replica set named 'prod':
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'], :name => 'prod')
#
# @example Connect to a replica set providing two seed nodes and allowing reads from a secondary node:
# Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'], :read => :secondary)
#
# @see http://api.mongodb.org/ruby/current/file.REPLICA_SETS.html Replica sets in Ruby
#
# @raise [MongoArgumentError] This is raised for usage errors.
#
# @raise [ConnectionFailure] This is raised for the various connection failures.
def initialize(*args)
opts = args.last.is_a?(Hash) ? args.pop : {}
nodes = args
if nodes.empty? and ENV.has_key?('MONGODB_URI')
parser = URIParser.new ENV['MONGODB_URI']
if parser.direct?
raise MongoArgumentError, "Mongo::ReplSetConnection.new called with no arguments, but ENV['MONGODB_URI'] implies a direct connection."
end
opts = parser.connection_options.merge! opts
nodes = [parser.nodes]
end
unless nodes.length > 0
raise MongoArgumentError, "A ReplSetConnection requires at least one seed node."
end
# This is temporary until support for the old format is dropped
if nodes.first.last.is_a?(Integer)
warn "Initiating a ReplSetConnection with seeds passed as individual [host, port] array arguments is deprecated."
warn "Please specify hosts as an array of 'host:port' strings; the old format will be removed in v2.0"
@seeds = nodes
else
@seeds = nodes.first.map do |host_port|
host, port = host_port.split(":")
[ host, port.to_i ]
end
end
@seeds.freeze
# Refresh
@last_refresh = Time.now
@refresh_version = 0
# No connection manager by default.
@manager = nil
@old_managers = []
# Lock for request ids.
@id_lock = Mutex.new
@pool_mutex = Mutex.new
@connected = false
@safe_mutex_lock = Mutex.new
@safe_mutexes = Hash.new {|hash, key| hash[key] = Mutex.new}
@connect_mutex = Mutex.new
@refresh_mutex = Mutex.new
check_opts(opts)
setup(opts)
end
def valid_opts
GENERIC_OPTS + REPL_SET_OPTS
end
def inspect
"<Mongo::ReplSetConnection:0x#{self.object_id.to_s(16)} @seeds=#{@seeds.inspect} " +
"@connected=#{@connected}>"
end
# Initiate a connection to the replica set.
def connect
log(:info, "Connecting...")
@connect_mutex.synchronize do
return if @connected
discovered_seeds = @manager ? @manager.seeds : []
@manager = PoolManager.new(self, discovered_seeds)
Thread.current[:managers] ||= Hash.new
Thread.current[:managers][self] = @manager
@manager.connect
@refresh_version += 1
if @require_primary && @manager.primary.nil? #TODO: in v2.0, we'll let this be optional and do a lazy connect.
close
raise ConnectionFailure, "Failed to connect to primary node."
elsif @manager.read_pool.nil?
close
raise ConnectionFailure, "Failed to connect to any node."
else
@connected = true
end
end
end
# Determine whether a replica set refresh is
# required. If so, run a hard refresh. You can
# force a hard refresh by running
# ReplSetConnection#hard_refresh!
#
# @return [Boolean] +true+ unless a hard refresh
# is run and the refresh lock can't be acquired.
def refresh(opts={})
if !connected?
log(:info, "Trying to check replica set health but not " +
"connected...")
return hard_refresh!
end
log(:debug, "Checking replica set connection health...")
@manager.check_connection_health
if @manager.refresh_required?
return hard_refresh!
end
return true
end
# Force a hard refresh of this connection's view
# of the replica set.
#
# @return [Boolean] +true+ if hard refresh
# occurred. +false+ is returned when unable
# to get the refresh lock.
def hard_refresh!
log(:info, "Initiating hard refresh...")
discovered_seeds = @manager ? @manager.seeds : []
new_manager = PoolManager.new(self, discovered_seeds | @seeds)
new_manager.connect
Thread.current[:managers][self] = new_manager
# TODO: make sure that connect has succeeded
@old_managers << @manager
@manager = new_manager
@refresh_version += 1
return true
end
def connected?
@connected && (@manager.primary_pool || @manager.read_pool)
end
# @deprecated
def connecting?
warn "ReplSetConnection#connecting? is deprecated and will be removed in v2.0."
false
end
# The replica set primary's host name.
#
# @return [String]
def host
@manager.primary_pool.host
end
# The replica set primary's port.
#
# @return [Integer]
def port
@manager.primary_pool.port
end
def nodes
warn "ReplSetConnection#nodes is DEPRECATED and will be removed in v2.0. " +
"Please use ReplSetConnection#seeds instead."
@seeds
end
# Determine whether we're reading from a primary node. If false,
# this connection connects to a secondary node and @read_secondaries is true.
#
# @return [Boolean]
def read_primary?
@manager.read_pool == @manager.primary_pool
end
alias :primary? :read_primary?
def read_preference
@read
end
# Close the connection to the database.
def close(opts={})
if opts[:soft]
@manager.close(:soft => true) if @manager
else
@manager.close if @manager
end
# Clear the reference to this object.
if Thread.current[:managers]
Thread.current[:managers].delete(self)
end
@connected = false
end
# If a ConnectionFailure is raised, this method will be called
# to close the connection and reset connection values.
# @deprecated
def reset_connection
close
warn "ReplSetConnection#reset_connection is now deprecated and will be removed in v2.0. " +
"Use ReplSetConnection#close instead."
end
# Returns +true+ if it's okay to read from a secondary node.
# Since this is a replica set, this must always be true.
#
# This method exist primarily so that Cursor objects will
# generate query messages with a slaveOkay value of +true+.
#
# @return [Boolean] +true+
def slave_ok?
true
end
def authenticate_pools
if primary_pool
primary_pool.authenticate_existing
end
secondary_pools.each do |pool|
pool.authenticate_existing
end
end
def logout_pools(db)
if primary_pool
primary_pool.logout_existing(db)
end
secondary_pools.each do |pool|
pool.logout_existing(db)
end
end
# Generic socket checkout
# Takes a block that returns a socket from pool
def checkout(&block)
if connected?
sync_refresh
else
connect
end
begin
socket = block.call
rescue => ex
checkin(socket) if socket
raise ex
end
if socket
socket
else
@connected = false
raise ConnectionFailure.new("Could not checkout a socket.")
end
end
# Checkout best available socket by trying primary
# pool first and then falling back to secondary.
def checkout_best
checkout do
socket = get_socket_from_pool(:primary)
if !socket
connect
socket = get_socket_from_pool(:secondary)
end
socket
end
end
# Checkout a socket for reading (i.e., a secondary node).
# Note that @read_pool might point to the primary pool
# if no read pool has been defined.
def checkout_reader
checkout do
socket = get_socket_from_pool(:read)
if !socket
connect
socket = get_socket_from_pool(:primary)
end
socket
end
end
# Checkout a socket from a secondary
# For :read_preference => :secondary_only
def checkout_secondary
checkout do
get_socket_from_pool(:secondary)
end
end
# Checkout a socket for writing (i.e., a primary node).
def checkout_writer
checkout do
get_socket_from_pool(:primary)
end
end
# Checkin a socket used for reading.
def checkin(socket)
if socket
socket.pool.checkin(socket)
end
sync_refresh
end
def close_socket(socket)
begin
socket.close if socket
rescue IOError
log(:info, "Tried to close socket #{socket} but already closed.")
end
end
def ensure_manager
Thread.current[:managers] ||= Hash.new
if Thread.current[:managers][self] != @manager
Thread.current[:managers][self] = @manager
end
end
def get_socket_from_pool(pool_type)
ensure_manager
pool = case pool_type
when :primary
primary_pool
when :secondary
secondary_pool
when :read
read_pool
end
begin
if pool
pool.checkout
end
rescue ConnectionFailure => ex
log(:info, "Failed to checkout from #{pool} with #{ex.class}; #{ex.message}")
return nil
end
end
def local_manager
Thread.current[:managers][self] if Thread.current[:managers]
end
def arbiters
local_manager.arbiters.nil? ? [] : local_manager.arbiters
end
def primary
local_manager ? local_manager.primary : nil
end
# Note: might want to freeze these after connecting.
def secondaries
local_manager ? local_manager.secondaries : []
end
def hosts
local_manager ? local_manager.hosts : []
end
def primary_pool
local_manager ? local_manager.primary_pool : nil
end
def read_pool
local_manager ? local_manager.read_pool : nil
end
def secondary_pool
local_manager ? local_manager.secondary_pool : nil
end
def secondary_pools
local_manager ? local_manager.secondary_pools : []
end
def tag_map
local_manager ? local_manager.tag_map : {}
end
def max_bson_size
if local_manager && local_manager.max_bson_size
local_manager.max_bson_size
else
Mongo::DEFAULT_MAX_BSON_SIZE
end
end
private
# Parse option hash
def setup(opts)
# Require a primary node to connect?
@require_primary = opts.fetch(:require_primary, true)
# Refresh
@refresh_mode = opts.fetch(:refresh_mode, false)
@refresh_interval = opts.fetch(:refresh_interval, 90)
if @refresh_mode && @refresh_interval < 60
@refresh_interval = 60 unless ENV['TEST_MODE'] = 'TRUE'
end
if @refresh_mode == :async
warn ":async refresh mode has been deprecated. Refresh
mode will be disabled."
elsif ![:sync, false].include?(@refresh_mode)
raise MongoArgumentError,
"Refresh mode must be either :sync or false."
end
# Are we allowing reads from secondaries?
if opts[:read_secondary]
warn ":read_secondary options has now been deprecated and will " +
"be removed in driver v2.0. Use the :read option instead."
@read_secondary = opts.fetch(:read_secondary, false)
@read = :secondary
else
@read = opts.fetch(:read, :primary)
Mongo::Support.validate_read_preference(@read)
end
# Replica set name
if opts[:rs_name]
warn ":rs_name option has been deprecated and will be removed in v2.0. " +
"Please use :name instead."
@replica_set_name = opts[:rs_name]
else
@replica_set_name = opts[:name]
end
opts[:connect_timeout] = opts[:connect_timeout] || 30
super opts
end
# Checkout a socket connected to a node with one of
# the provided tags. If no such node exists, raise
# an exception.
#
# NOTE: will be available in driver release v2.0.
def checkout_tagged(tags)
tags.each do |k, v|
pool = self.tag_map[{k.to_s => v}]
if pool
socket = pool.checkout
return socket
end
end
raise NodeWithTagsNotFound,
"Could not find a connection tagged with #{tags}."
end
def prune_managers
@old_managers.each do |manager|
if manager != @manager
if manager.closed?
@old_managers.delete(manager)
else
manager.close(:soft => true)
end
end
end
end
def sync_refresh
if @refresh_mode == :sync &&
((Time.now - @last_refresh) > @refresh_interval)
@last_refresh = Time.now
if @refresh_mutex.try_lock
begin
refresh
prune_managers
ensure
@refresh_mutex.unlock
end
end
end
end
end
end
|
# frozen_string_literal: true
require 'date'
require_relative '../utils/query_builder'
module FinApps
module REST
class Screenings < FinAppsCore::REST::Resources # :nodoc:
include FinApps::Utils::QueryBuilder
def show(id)
not_blank(id, :session_id)
path = "#{end_point}/#{ERB::Util.url_encode(id)}/resume"
super(nil, path)
end
def tenant_schemas
path = 'schemas'
send_request path, :get
end
def last(consumer_id)
not_blank(consumer_id, :consumer_id)
path = "#{end_point}/#{ERB::Util.url_encode(consumer_id)}/consumer"
send_request_for_id path, :get, nil
end
def create(params)
not_blank(params, :params)
super params
end
def list(params = nil)
return super if params.nil?
fail FinAppsCore::InvalidArgumentsError, 'Invalid argument: params' unless params.is_a? Hash
super build_query_path(end_point, params)
end
def update(id, params)
not_blank(id, :session_id)
not_blank(params, :params)
path = "#{end_point}/#{ERB::Util.url_encode(id)}"
super params, path
end
def destroy(id)
not_blank(id, :session_id)
super
end
private
def build_filter(params)
term_filter(params[:searchTerm])
.merge(date_range_filter(params[:fromDate], params[:toDate]))
.merge(progress_filter(params[:progress]))
end
def term_filter(term)
return {} unless term
{'$or': term_array(term) + split_term_array(term)}
end
def term_array(term)
[
{'consumer.public_id': term},
{'consumer.email': term},
{'consumer.first_name': term},
{'consumer.last_name': term},
{'consumer.external_id': term}
]
end
def split_term_array(term)
return [] unless space?(term)
arr = []
term.split.each do |t|
arr.append('consumer.first_name': t)
arr.append('consumer.last_name': t)
end
arr
end
def date_range_filter(from_date, to_date)
return {} unless from_date || to_date
{'*date_created': from_filter(from_date).merge(to_filter(to_date))}
end
def from_filter(from_date)
return {} unless from_date
{'$gte': to_rfc_date(from_date.to_s)}
end
def to_filter(to_date)
return {} unless to_date
{'$lt': to_rfc_date(to_date.to_s)}
end
def to_rfc_date(str)
date = DateTime.parse(str)
date.rfc3339
end
def progress_filter(progress)
return {} unless progress
{progress: progress.to_i}
end
def space?(string)
/\s/.match?(string)
end
end
end
end
Remove parsing and formatting of dates
Leave that responsibility to the client. This library will pass through
date values in the filter in the same format as they are received.
# frozen_string_literal: true
require 'date'
require_relative '../utils/query_builder'
module FinApps
module REST
class Screenings < FinAppsCore::REST::Resources # :nodoc:
include FinApps::Utils::QueryBuilder
def show(id)
not_blank(id, :session_id)
path = "#{end_point}/#{ERB::Util.url_encode(id)}/resume"
super(nil, path)
end
def tenant_schemas
path = 'schemas'
send_request path, :get
end
def last(consumer_id)
not_blank(consumer_id, :consumer_id)
path = "#{end_point}/#{ERB::Util.url_encode(consumer_id)}/consumer"
send_request_for_id path, :get, nil
end
def create(params)
not_blank(params, :params)
super params
end
def list(params = nil)
return super if params.nil?
fail FinAppsCore::InvalidArgumentsError, 'Invalid argument: params' unless params.is_a? Hash
super build_query_path(end_point, params)
end
def update(id, params)
not_blank(id, :session_id)
not_blank(params, :params)
path = "#{end_point}/#{ERB::Util.url_encode(id)}"
super params, path
end
def destroy(id)
not_blank(id, :session_id)
super
end
private
def build_filter(params)
term_filter(params[:searchTerm])
.merge(date_range_filter(params[:fromDate], params[:toDate]))
.merge(progress_filter(params[:progress]))
end
def term_filter(term)
return {} unless term
{'$or': term_array(term) + split_term_array(term)}
end
def term_array(term)
[
{'consumer.public_id': term},
{'consumer.email': term},
{'consumer.first_name': term},
{'consumer.last_name': term},
{'consumer.external_id': term}
]
end
def split_term_array(term)
return [] unless space?(term)
arr = []
term.split.each do |t|
arr.append('consumer.first_name': t)
arr.append('consumer.last_name': t)
end
arr
end
def date_range_filter(from_date, to_date)
return {} unless from_date || to_date
{'*date_created': from_filter(from_date).merge(to_filter(to_date))}
end
def from_filter(from_date)
return {} unless from_date
{'$gte': from_date}
end
def to_filter(to_date)
return {} unless to_date
{'$lt': to_date}
end
def progress_filter(progress)
return {} unless progress
{progress: progress.to_i}
end
def space?(string)
/\s/.match?(string)
end
end
end
end
|
# encoding: UTF-8
require 'mongo_mapper/plugins/keys/key'
module MongoMapper
module Plugins
module Keys
extend ActiveSupport::Concern
included do
extend ActiveSupport::DescendantsTracker
key :_id, ObjectId, :default => lambda { BSON::ObjectId.new }
end
module ClassMethods
def inherited(descendant)
descendant.instance_variable_set(:@keys, keys.dup)
super
end
def keys
@keys ||= {}
end
def key(*args)
Key.new(*args).tap do |key|
keys[key.name] = key
create_accessors_for(key)
create_key_in_descendants(*args)
create_indexes_for(key)
create_validations_for(key)
end
end
def key?(key)
keys.key?(key.to_s)
end
def using_object_id?
object_id_key?(:_id)
end
def object_id_keys
keys.keys.select { |key| keys[key].type == ObjectId }.map { |k| k.to_sym }
end
def object_id_key?(name)
object_id_keys.include?(name.to_sym)
end
def to_mongo(instance)
return nil if instance.nil?
instance.to_mongo
end
def from_mongo(value)
return nil if value.nil?
value.is_a?(self) ? value : load(value)
end
# load is overridden in identity map to ensure same objects are loaded
def load(attrs)
return nil if attrs.nil?
begin
attrs['_type'] ? attrs['_type'].constantize : self
rescue NameError
self
end.allocate.initialize_from_database(attrs)
end
private
def key_accessors_module_defined?
if method(:const_defined?).arity == 1 # Ruby 1.9 compat check
const_defined?('MongoMapperKeys')
else
const_defined?('MongoMapperKeys', false)
end
end
def accessors_module
if key_accessors_module_defined?
const_get 'MongoMapperKeys'
else
const_set 'MongoMapperKeys', Module.new
end
end
def create_accessors_for(key)
accessors_module.module_eval <<-end_eval
attr_reader :#{key.name}_before_type_cast
def #{key.name}
read_key(:#{key.name})
end
def #{key.name}=(value)
write_key(:#{key.name}, value)
end
def #{key.name}?
read_key(:#{key.name}).present?
end
end_eval
include accessors_module
end
def create_key_in_descendants(*args)
descendants.each { |descendant| descendant.key(*args) }
end
def create_indexes_for(key)
if key.options[:index] && !key.embeddable?
warn "[DEPRECATION] :index option when defining key #{key.name.inspect} is deprecated. Put indexes in `db/indexes.rb`"
ensure_index key.name
end
end
def create_validations_for(key)
attribute = key.name.to_sym
if key.options[:required]
if key.type == Boolean
validates_inclusion_of attribute, :in => [true, false]
else
validates_presence_of(attribute)
end
end
if key.options[:unique]
validates_uniqueness_of(attribute)
end
if key.options[:numeric]
number_options = key.type == Integer ? {:only_integer => true} : {}
validates_numericality_of(attribute, number_options)
end
if key.options[:format]
validates_format_of(attribute, :with => key.options[:format])
end
if key.options[:in]
validates_inclusion_of(attribute, :in => key.options[:in])
end
if key.options[:not_in]
validates_exclusion_of(attribute, :in => key.options[:not_in])
end
if key.options[:length]
length_options = case key.options[:length]
when Integer
{:minimum => 0, :maximum => key.options[:length]}
when Range
{:within => key.options[:length]}
when Hash
key.options[:length]
end
validates_length_of(attribute, length_options)
end
end
end
def initialize(attrs={})
@_new = true
initialize_default_values
self.attributes = attrs
end
def initialize_from_database(attrs={})
@_new = false
initialize_default_values
load_from_database(attrs)
self
end
def persisted?
!new? && !destroyed?
end
def attributes=(attrs)
return if attrs == nil or attrs.blank?
attrs.each_pair do |key, value|
if respond_to?(:"#{key}=")
self.send(:"#{key}=", value)
else
self[key] = value
end
end
end
def attributes
HashWithIndifferentAccess.new.tap do |attrs|
keys.each do |name, key|
if key.type == ObjectId || !self[key.name].nil?
value = key.set(self[key.name])
attrs[name] = value
end
end
embedded_associations.each do |association|
if documents = instance_variable_get(association.ivar)
if association.is_a?(Associations::OneAssociation)
attrs[association.name] = documents.to_mongo
else
attrs[association.name] = documents.map { |document| document.to_mongo }
end
end
end
end
end
alias :to_mongo :attributes
def assign(attrs={})
warn "[DEPRECATION] #assign is deprecated, use #attributes="
self.attributes = attrs
end
def update_attributes(attrs={})
self.attributes = attrs
save
end
def update_attributes!(attrs={})
self.attributes = attrs
save!
end
def update_attribute(name, value)
self.send(:"#{name}=", value)
save(:validate => false)
end
def id
self[:_id]
end
def id=(value)
if self.class.using_object_id?
value = ObjectId.to_mongo(value)
end
self[:_id] = value
end
def read_key(key_name)
instance_key = :"@#{key_name}"
if instance_variable_defined? instance_key
instance_variable_get instance_key
elsif key = keys[key_name.to_s]
value = key.get instance_variable_get(:"@#{key_name}_before_type_cast")
instance_variable_set instance_key, value
end
end
alias_method :[], :read_key
def []=(name, value)
ensure_key_exists(name)
write_key(name, value)
end
def keys
self.class.keys
end
def key_names
keys.keys
end
def non_embedded_keys
keys.values.select { |key| !key.embeddable? }
end
def embedded_keys
keys.values.select { |key| key.embeddable? }
end
private
def load_from_database(attrs)
return if attrs == nil or attrs.blank?
attrs.each do |key, value|
if respond_to?(:"#{key}=") && !self.class.key?(key)
self.send(:"#{key}=", value)
else
self[key] = value
end
end
end
def ensure_key_exists(name)
self.class.key(name) unless respond_to?(:"#{name}=")
end
def set_parent_document(key, value)
if value.respond_to?(:_parent_document) && value.is_a?(key.type) && key.embeddable?
value._parent_document = self
end
end
def write_key(name, value)
key = keys[name.to_s]
as_mongo = key.set(value)
as_typecast = key.get(as_mongo)
set_parent_document(key, value)
set_parent_document(key, as_typecast)
instance_variable_set :"@#{key.name}", as_typecast
instance_variable_set :"@#{key.name}_before_type_cast", value
@attributes = nil
end
def initialize_default_values
keys.values.select { |key| key.default? }.each do |key|
write_key key.name, key.default_value
end
end
#end private
end
end
end
Minor optimizations; memoize enum-build values and reduce iterations where possible
# encoding: UTF-8
require 'mongo_mapper/plugins/keys/key'
module MongoMapper
module Plugins
module Keys
extend ActiveSupport::Concern
IS_RUBY_1_9 = method(:const_defined?).arity == 1
included do
extend ActiveSupport::DescendantsTracker
key :_id, ObjectId, :default => lambda { BSON::ObjectId.new }
end
module ClassMethods
def inherited(descendant)
descendant.instance_variable_set(:@keys, keys.dup)
super
end
def keys
@keys ||= {}
end
def key(*args)
Key.new(*args).tap do |key|
keys[key.name] = key
create_accessors_for(key)
create_key_in_descendants(*args)
create_indexes_for(key)
create_validations_for(key)
end
end
def key?(key)
keys.key?(key.to_s)
end
def using_object_id?
object_id_key?(:_id)
end
def object_id_keys
@object_id_keys ||= keys.keys.select { |key| keys[key].type == ObjectId }.map(&:to_sym)
end
def object_id_key?(name)
object_id_keys.include?(name.to_sym)
end
def to_mongo(instance)
instance && instance.to_mongo
end
def from_mongo(value)
value && (value.is_a?(self) ? value : load(value))
end
# load is overridden in identity map to ensure same objects are loaded
def load(attrs)
return nil if attrs.nil?
begin
attrs['_type'] ? attrs['_type'].constantize : self
rescue NameError
self
end.allocate.initialize_from_database(attrs)
end
private
def key_accessors_module_defined?
if IS_RUBY_1_9
const_defined?('MongoMapperKeys')
else
const_defined?('MongoMapperKeys', false)
end
end
def accessors_module
if key_accessors_module_defined?
const_get 'MongoMapperKeys'
else
const_set 'MongoMapperKeys', Module.new
end
end
def create_accessors_for(key)
accessors_module.module_eval <<-end_eval
attr_reader :#{key.name}_before_type_cast
def #{key.name}
read_key(:#{key.name})
end
def #{key.name}=(value)
write_key(:#{key.name}, value)
end
def #{key.name}?
read_key(:#{key.name}).present?
end
end_eval
include accessors_module
end
def create_key_in_descendants(*args)
descendants.each { |descendant| descendant.key(*args) }
end
def create_indexes_for(key)
if key.options[:index] && !key.embeddable?
warn "[DEPRECATION] :index option when defining key #{key.name.inspect} is deprecated. Put indexes in `db/indexes.rb`"
ensure_index key.name
end
end
def create_validations_for(key)
attribute = key.name.to_sym
if key.options[:required]
if key.type == Boolean
validates_inclusion_of attribute, :in => [true, false]
else
validates_presence_of(attribute)
end
end
if key.options[:unique]
validates_uniqueness_of(attribute)
end
if key.options[:numeric]
number_options = key.type == Integer ? {:only_integer => true} : {}
validates_numericality_of(attribute, number_options)
end
if key.options[:format]
validates_format_of(attribute, :with => key.options[:format])
end
if key.options[:in]
validates_inclusion_of(attribute, :in => key.options[:in])
end
if key.options[:not_in]
validates_exclusion_of(attribute, :in => key.options[:not_in])
end
if key.options[:length]
length_options = case key.options[:length]
when Integer
{:minimum => 0, :maximum => key.options[:length]}
when Range
{:within => key.options[:length]}
when Hash
key.options[:length]
end
validates_length_of(attribute, length_options)
end
end
end
def initialize(attrs={})
@_new = true
initialize_default_values
self.attributes = attrs
end
def initialize_from_database(attrs={})
@_new = false
initialize_default_values
load_from_database(attrs)
self
end
def persisted?
!new? && !destroyed?
end
def attributes=(attrs)
return if attrs == nil or attrs.blank?
attrs.each_pair do |key, value|
if respond_to?(:"#{key}=")
self.send(:"#{key}=", value)
else
self[key] = value
end
end
end
def attributes
HashWithIndifferentAccess.new.tap do |attrs|
keys.each do |name, key|
if key.type == ObjectId || !self[key.name].nil?
value = key.set(self[key.name])
attrs[name] = value
end
end
embedded_associations.each do |association|
if documents = instance_variable_get(association.ivar)
if association.is_a?(Associations::OneAssociation)
attrs[association.name] = documents.to_mongo
else
attrs[association.name] = documents.map &:to_mongo
end
end
end
end
end
alias :to_mongo :attributes
def assign(attrs={})
warn "[DEPRECATION] #assign is deprecated, use #attributes="
self.attributes = attrs
end
def update_attributes(attrs={})
self.attributes = attrs
save
end
def update_attributes!(attrs={})
self.attributes = attrs
save!
end
def update_attribute(name, value)
self.send(:"#{name}=", value)
save(:validate => false)
end
def id
self[:_id]
end
def id=(value)
if self.class.using_object_id?
value = ObjectId.to_mongo(value)
end
self[:_id] = value
end
def read_key(key_name)
instance_key = :"@#{key_name}"
if instance_variable_defined? instance_key
instance_variable_get instance_key
elsif key = keys[key_name.to_s]
value = key.get instance_variable_get(:"@#{key_name}_before_type_cast")
instance_variable_set instance_key, value
end
end
alias_method :[], :read_key
def []=(name, value)
ensure_key_exists(name)
write_key(name, value)
end
def keys
self.class.keys
end
def key_names
@key_names ||= keys.keys
end
def non_embedded_keys
@non_embedded_keys ||= keys.values.select { |key| !key.embeddable? }
end
def embedded_keys
@embedded_keys ||= keys.values.select &:embeddable?
end
private
def load_from_database(attrs)
return if attrs == nil or attrs.blank?
attrs.each do |key, value|
if respond_to?(:"#{key}=") && !self.class.key?(key)
self.send(:"#{key}=", value)
else
self[key] = value
end
end
end
def ensure_key_exists(name)
self.class.key(name) unless respond_to?(:"#{name}=")
end
def set_parent_document(key, value)
if value.respond_to?(:_parent_document) && value.is_a?(key.type) && key.embeddable?
value._parent_document = self
end
end
def write_key(name, value)
key = keys[name.to_s]
as_mongo = key.set(value)
as_typecast = key.get(as_mongo)
set_parent_document(key, value)
set_parent_document(key, as_typecast)
instance_variable_set :"@#{key.name}", as_typecast
instance_variable_set :"@#{key.name}_before_type_cast", value
@attributes = nil
end
def initialize_default_values
keys.values.each do |key|
write_key key.name, key.default_value if key.default?
end
end
#end private
end
end
end
|
require 'solr_ead'
require 'fileutils'
##
# Ead Indexer
#
# This class will index a file or directory into a Solr index configured via solr.yml
# It essentially wraps the functionality of SolrEad::Indexer with some customizations
# mainly the ability to index directories and reindex changed files from a Git diff.
#
# The #index function takes in a file or directory and calls update on all the valid .xml files it finds.
# The #reindex_changed_since_last_commit function finds all the files changed since the previous commit and updates, adds or deletes accordingly.
# The #reindex_changed_since_yesterday function finds all the files changed since yesterday and updates, adds or deletes accordingly.
# The #reindex_changed_since_last_week function finds all the files changed since last week and updates, adds or deletes accordingly.
# The .delete_all convenience method wraps Blacklight.solr to easily clear the index
class Findingaids::Ead::Indexer
def self.delete_all
Blacklight.solr.delete_by_query("*:*")
Blacklight.solr.commit
end
attr_accessor :indexer, :data_path
def initialize(data_path="findingaids_eads")
@data_path = data_path
@indexer = SolrEad::Indexer.new(document: Findingaids::Ead::Document, component: Findingaids::Ead::Component)
end
def index(file)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
unless File.directory?(file)
update(file)
else
Dir.glob(File.join(file,"*")).each do |file|
update(file)
end
end
end
# Reindex files changed only since the last commit
def reindex_changed_since_last_commit
reindex_changed(commits)
end
# Reindex all files changed in the last day
def reindex_changed_since_yesterday
reindex_changed(commits('--since=1.day'))
end
# Reindex all files changed in the last day
def reindex_changed_since_last_week
reindex_changed(commits('--since=1.week'))
end
private
# Reindex files changed in list of commit SHAs
def reindex_changed(last_commits)
changed_files(last_commits).each do |file|
status, filename, message = file.split("\t")
fullpath = File.join(data_path, filename)
update_or_delete(status, fullpath, message)
end
end
# TODO: Make time range configurable by instance variable
# and cascade through to rake jobs
# Get the sha for the time range given
#
# time_range git option to get set of results based on a date/time range;
# default is -1, just the last commit
def commits(time_range = '-1')
@commits ||= `cd #{data_path} && git log --pretty=format:'%h' #{time_range} && cd ..`.split("\n")
end
# Get list of files changed since last commit
def changed_files(last_commits)
changed_files = []
last_commits.each do |commit|
files_in_commit = (`cd #{data_path} && git diff-tree --no-commit-id --name-status -r #{commit} && cd ..`).split("\n")
commit_message = (`cd #{data_path} && git log --pretty=format:'%s' -1 -c #{commit} && cd ..`).gsub(/(\n+)$/,'')
log.info "Data path: #{data_path}"
log.info "Files in commit: #{files_in_commit}"
log.info "Commit: #{commit}"
log.info "Commit message: #{commit_message}"
changed_files << [files_in_commit, commit_message].join("\t")
end
changed_files.flatten
end
# Update or delete depending on git status
def update_or_delete(status, file, message)
eadid = get_eadid_from_message(file, message)
if File.exists?(file)
update(file)
# Status == D means the file was deleted
elsif status.eql? "D"
delete(file, eadid)
end
end
def get_eadid_from_message(file, message)
log.info "File: #{file}"
log.info "Message: #{message}"
# Strip out initial folder name to match filename in commit message
file_without_data_path = file.gsub(/#{data_path}(\/)?/,'')
eadid_matches = message.match(/#{file_without_data_path} EADID='(.+?)'/)
log.info "Matches: #{eadid_matches.inspect}"
eadid_matches.captures.first unless eadid_matches.nil?
end
# Wrapper method for SolrEad::Indexer#update(file)
# => @file filename of EAD
def update(file)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
begin
# The document is built around a repository that relies on the folder structure
# since it does not exist consistently in the EAD, so we pass in the full path to extract the repos.
ENV["EAD"] = file
indexer.update(file)
log.info "Indexed #{file}."
rescue Exception => e
log.info "Failed to index #{file}: #{e}."
false
end
end
# Wrapper method for SolrEad::Indexer#delete
# => @id EAD id
def delete(file, eadid)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
# If eadid was passed in, use it to delete
# it not, make a guess based on filename
id = (eadid || File.basename(file).split("\.")[0])
begin
indexer.delete(id)
log.info "Deleted #{file} with id #{id}."
rescue Exception => e
log.info "Failed to delete #{file} with id #{id}: #{e}"
false
end
end
# Set FINDINGAIDS_LOG=STDOUT to view logs in standard out
def log
@log ||= (ENV['FINDINGAIDS_LOG']) ? Logger.new(ENV['FINDINGAIDS_LOG'].constantize) : Rails.logger
end
end
Removed debugging statements
require 'solr_ead'
require 'fileutils'
##
# Ead Indexer
#
# This class will index a file or directory into a Solr index configured via solr.yml
# It essentially wraps the functionality of SolrEad::Indexer with some customizations
# mainly the ability to index directories and reindex changed files from a Git diff.
#
# The #index function takes in a file or directory and calls update on all the valid .xml files it finds.
# The #reindex_changed_since_last_commit function finds all the files changed since the previous commit and updates, adds or deletes accordingly.
# The #reindex_changed_since_yesterday function finds all the files changed since yesterday and updates, adds or deletes accordingly.
# The #reindex_changed_since_last_week function finds all the files changed since last week and updates, adds or deletes accordingly.
# The .delete_all convenience method wraps Blacklight.solr to easily clear the index
class Findingaids::Ead::Indexer
def self.delete_all
Blacklight.solr.delete_by_query("*:*")
Blacklight.solr.commit
end
attr_accessor :indexer, :data_path
def initialize(data_path="findingaids_eads")
@data_path = data_path
@indexer = SolrEad::Indexer.new(document: Findingaids::Ead::Document, component: Findingaids::Ead::Component)
end
def index(file)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
unless File.directory?(file)
update(file)
else
Dir.glob(File.join(file,"*")).each do |file|
update(file)
end
end
end
# Reindex files changed only since the last commit
def reindex_changed_since_last_commit
reindex_changed(commits)
end
# Reindex all files changed in the last day
def reindex_changed_since_yesterday
reindex_changed(commits('--since=1.day'))
end
# Reindex all files changed in the last day
def reindex_changed_since_last_week
reindex_changed(commits('--since=1.week'))
end
private
# Reindex files changed in list of commit SHAs
def reindex_changed(last_commits)
changed_files(last_commits).each do |file|
status, filename, message = file.split("\t")
fullpath = File.join(data_path, filename)
update_or_delete(status, fullpath, message)
end
end
# TODO: Make time range configurable by instance variable
# and cascade through to rake jobs
# Get the sha for the time range given
#
# time_range git option to get set of results based on a date/time range;
# default is -1, just the last commit
def commits(time_range = '-1')
@commits ||= `cd #{data_path} && git log --pretty=format:'%h' #{time_range} && cd ..`.split("\n")
end
# Get list of files changed since last commit
def changed_files(last_commits)
changed_files = []
last_commits.each do |commit|
files_in_commit = (`cd #{data_path} && git diff-tree --no-commit-id --name-status -r #{commit} && cd ..`).split("\n")
commit_message = (`cd #{data_path} && git log --pretty=format:'%s' -1 -c #{commit} && cd ..`).gsub(/(\n+)$/,'')
changed_files << [files_in_commit, commit_message].join("\t")
end
changed_files.flatten
end
# Update or delete depending on git status
def update_or_delete(status, file, message)
eadid = get_eadid_from_message(file, message)
if File.exists?(file)
update(file)
# Status == D means the file was deleted
elsif status.eql? "D"
delete(file, eadid)
end
end
def get_eadid_from_message(file, message)
# Strip out initial folder name to match filename in commit message
file_without_data_path = file.gsub(/#{data_path}(\/)?/,'')
eadid_matches = message.match(/#{file_without_data_path} EADID='(.+?)'/)
eadid_matches.captures.first unless eadid_matches.nil?
end
# Wrapper method for SolrEad::Indexer#update(file)
# => @file filename of EAD
def update(file)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
begin
# The document is built around a repository that relies on the folder structure
# since it does not exist consistently in the EAD, so we pass in the full path to extract the repos.
ENV["EAD"] = file
indexer.update(file)
log.info "Indexed #{file}."
rescue Exception => e
log.info "Failed to index #{file}: #{e}."
false
end
end
# Wrapper method for SolrEad::Indexer#delete
# => @id EAD id
def delete(file, eadid)
if file.blank?
raise ArgumentError.new("Expecting #{file} to be a file or directory")
end
# If eadid was passed in, use it to delete
# it not, make a guess based on filename
id = (eadid || File.basename(file).split("\.")[0])
begin
indexer.delete(id)
log.info "Deleted #{file} with id #{id}."
rescue Exception => e
log.info "Failed to delete #{file} with id #{id}: #{e}"
false
end
end
# Set FINDINGAIDS_LOG=STDOUT to view logs in standard out
def log
@log ||= (ENV['FINDINGAIDS_LOG']) ? Logger.new(ENV['FINDINGAIDS_LOG'].constantize) : Rails.logger
end
end
|
# encoding: utf-8
module Mongoid
module Translate
VERSION = "0.1.0"
end
end
Bump version.
Signed-off-by: chatgris <f9469d12bf3d131e7aae80be27ccfe58aa9db1f1@af83.com>
# encoding: utf-8
module Mongoid
module Translate
VERSION = "0.1.1"
end
end
|
# encoding: utf-8
require 'set'
module FiniteMachine
# A class responsible for observing state changes
class Observer
include Threadable
include Safety
# The current state machine
attr_threadsafe :machine
# The hooks to trigger around the transition lifecycle.
attr_threadsafe :hooks
# Initialize an Observer
#
# @api public
def initialize(machine)
@machine = machine
@machine.subscribe(self)
@hooks = FiniteMachine::Hooks.new
end
# Evaluate in current context
#
# @api private
def call(&block)
instance_eval(&block)
end
# Register callback for a given event type
#
# @param [Symbol, FiniteMachine::HookEvent] event_type
# @param [Array] args
# @param [Proc] callback
#
# @api public
# TODO: throw error if event type isn't handled
def on(event_type = HookEvent, *args, &callback)
sync_exclusive do
name, async, _ = args
name = ANY_EVENT if name.nil?
async = false if async.nil?
ensure_valid_callback_name!(event_type, name)
callback.extend(Async) if async == :async
hooks.register event_type, name, callback
end
end
# Unregister callback for a given event
#
# @api public
def off(event_type = ANY_EVENT, name = ANY_STATE, &callback)
sync_exclusive do
hooks.unregister event_type, name, callback
end
end
module Once; end
module Async; end
def on_enter(*args, &callback)
on HookEvent::Enter, *args, &callback
end
def on_transition(*args, &callback)
on HookEvent::Transition, *args, &callback
end
def on_exit(*args, &callback)
on HookEvent::Exit, *args, &callback
end
def once_on_enter(*args, &callback)
on HookEvent::Enter, *args, &callback.extend(Once)
end
def once_on_transition(*args, &callback)
on HookEvent::Transition, *args, &callback.extend(Once)
end
def once_on_exit(*args, &callback)
on HookEvent::Exit, *args, &callback.extend(Once)
end
def on_before(*args, &callback)
on HookEvent::Before, *args, &callback
end
def on_after(*args, &callback)
on HookEvent::After, *args, &callback
end
def once_on_before(*args, &callback)
on HookEvent::Before, *args, &callback.extend(Once)
end
def once_on_after(*args, &callback)
on HookEvent::After, *args, &callback.extend(Once)
end
# Trigger all listeners
#
# @api public
def trigger(event, *args, &block)
sync_exclusive do
[event.type, ANY_EVENT].each do |event_type|
[event.state, ANY_STATE].each do |event_state|
hooks.call(event_type, event_state) do |hook|
handle_callback(hook, event)
off(event_type, event_state, &hook) if hook.is_a?(Once)
end
end
end
end
end
private
# Defer callback execution
#
# @api private
def defer(callable, trans_event, *data)
async_call = AsyncCall.build(machine, callable, trans_event, *data)
machine.event_queue << async_call
end
# Create callable instance
#
# @api private
def create_callable(hook)
deferred_hook = proc do |_trans_event, *_data|
machine.instance_exec(_trans_event, *_data, &hook)
end
Callable.new(deferred_hook)
end
# Handle callback and decide if run synchronously or asynchronously
#
# @api private
def handle_callback(hook, event)
trans_event = TransitionEvent.build(event.transition)
data = event.data
callable = create_callable(hook)
if hook.is_a?(Async)
defer(callable, trans_event, *data)
result = nil
else
result = callable.call(trans_event, *data)
end
event.transition.cancelled = (result == CANCELLED)
end
# Callback names including all states and events
#
# @return [Array[Symbol]]
# valid callback names
#
# @api private
def callback_names
machine.states + machine.event_names + [ANY_EVENT]
end
# Forward the message to observer
#
# @param [String] method_name
#
# @param [Array] args
#
# @return [self]
#
# @api private
def method_missing(method_name, *args, &block)
_, event_name, callback_name = *method_name.to_s.match(/^(\w*?on_\w+?)_(\w+)$/)
if callback_name && callback_names.include?(callback_name.to_sym)
public_send(event_name, :"#{callback_name}", *args, &block)
else
super
end
end
# Test if a message can be handled by observer
#
# @param [String] method_name
#
# @param [Boolean] include_private
#
# @return [Boolean]
#
# @api private
def respond_to_missing?(method_name, include_private = false)
*_, callback_name = *method_name.to_s.match(/^(\w*?on_\w+?)_(\w+)$/)
callback_name && callback_names.include?(:"#{callback_name}")
end
end # Observer
end # FiniteMachine
Change to pass data to transition event.
# encoding: utf-8
require 'set'
module FiniteMachine
# A class responsible for observing state changes
class Observer
include Threadable
include Safety
# The current state machine
attr_threadsafe :machine
# The hooks to trigger around the transition lifecycle.
attr_threadsafe :hooks
# Initialize an Observer
#
# @api public
def initialize(machine)
@machine = machine
@machine.subscribe(self)
@hooks = FiniteMachine::Hooks.new
end
# Evaluate in current context
#
# @api private
def call(&block)
instance_eval(&block)
end
# Register callback for a given event type
#
# @param [Symbol, FiniteMachine::HookEvent] event_type
# @param [Array] args
# @param [Proc] callback
#
# @api public
# TODO: throw error if event type isn't handled
def on(event_type = HookEvent, *args, &callback)
sync_exclusive do
name, async, _ = args
name = ANY_EVENT if name.nil?
async = false if async.nil?
ensure_valid_callback_name!(event_type, name)
callback.extend(Async) if async == :async
hooks.register event_type, name, callback
end
end
# Unregister callback for a given event
#
# @api public
def off(event_type = ANY_EVENT, name = ANY_STATE, &callback)
sync_exclusive do
hooks.unregister event_type, name, callback
end
end
module Once; end
module Async; end
def on_enter(*args, &callback)
on HookEvent::Enter, *args, &callback
end
def on_transition(*args, &callback)
on HookEvent::Transition, *args, &callback
end
def on_exit(*args, &callback)
on HookEvent::Exit, *args, &callback
end
def once_on_enter(*args, &callback)
on HookEvent::Enter, *args, &callback.extend(Once)
end
def once_on_transition(*args, &callback)
on HookEvent::Transition, *args, &callback.extend(Once)
end
def once_on_exit(*args, &callback)
on HookEvent::Exit, *args, &callback.extend(Once)
end
def on_before(*args, &callback)
on HookEvent::Before, *args, &callback
end
def on_after(*args, &callback)
on HookEvent::After, *args, &callback
end
def once_on_before(*args, &callback)
on HookEvent::Before, *args, &callback.extend(Once)
end
def once_on_after(*args, &callback)
on HookEvent::After, *args, &callback.extend(Once)
end
# Trigger all listeners
#
# @api public
def trigger(event, *args, &block)
sync_exclusive do
[event.type, ANY_EVENT].each do |event_type|
[event.state, ANY_STATE].each do |event_state|
hooks.call(event_type, event_state) do |hook|
handle_callback(hook, event)
off(event_type, event_state, &hook) if hook.is_a?(Once)
end
end
end
end
end
private
# Defer callback execution
#
# @api private
def defer(callable, trans_event, *data)
async_call = AsyncCall.build(machine, callable, trans_event, *data)
machine.event_queue << async_call
end
# Create callable instance
#
# @api private
def create_callable(hook)
deferred_hook = proc do |_trans_event, *_data|
machine.instance_exec(_trans_event, *_data, &hook)
end
Callable.new(deferred_hook)
end
# Handle callback and decide if run synchronously or asynchronously
#
# @api private
def handle_callback(hook, event)
data = event.data
trans_event = TransitionEvent.build(event.transition, *data)
callable = create_callable(hook)
if hook.is_a?(Async)
defer(callable, trans_event, *data)
result = nil
else
result = callable.call(trans_event, *data)
end
event.transition.cancelled = (result == CANCELLED)
end
# Callback names including all states and events
#
# @return [Array[Symbol]]
# valid callback names
#
# @api private
def callback_names
machine.states + machine.event_names + [ANY_EVENT]
end
# Forward the message to observer
#
# @param [String] method_name
#
# @param [Array] args
#
# @return [self]
#
# @api private
def method_missing(method_name, *args, &block)
_, event_name, callback_name = *method_name.to_s.match(/^(\w*?on_\w+?)_(\w+)$/)
if callback_name && callback_names.include?(callback_name.to_sym)
public_send(event_name, :"#{callback_name}", *args, &block)
else
super
end
end
# Test if a message can be handled by observer
#
# @param [String] method_name
#
# @param [Boolean] include_private
#
# @return [Boolean]
#
# @api private
def respond_to_missing?(method_name, include_private = false)
*_, callback_name = *method_name.to_s.match(/^(\w*?on_\w+?)_(\w+)$/)
callback_name && callback_names.include?(:"#{callback_name}")
end
end # Observer
end # FiniteMachine
|
module Mongoid
module Rateable
extend ActiveSupport::Concern
module Ext
extend ActiveSupport::Concern
module ClassMethods
def rateable options = {}
class_eval do
self.send :include, Mongoid::Rateable
puts "options: #{options}"
self.rate_config options
end
end
end
end
included do
field :rates, type: Integer, default: 0
field :rating, type: Float, default: nil
field :rating_previous, type: Float, default: nil
field :rating_delta, type: Float, default: 0.0
field :weighted_rate_count, type: Integer, default: 0
embeds_many :rating_marks, :as => :rateable
index({"rating_marks.rater_id" => 1, "rating_marks.rater_class" => 1})
scope :unrated, where(:rating.exists => false)
scope :rated, where(:rating.exists => true)
scope :rated_by, ->(rater) { where("rating_marks.rater_id" => rater.id, "rating_marks.rater_class" => rater.class.to_s) }
scope :with_rating, ->(range) { where(:rating.gte => range.begin, :rating.lte => range.end) }
scope :highest_rated, ->(limit=10) { order_by([:rating, :desc]).limit(limit) }
end
module ClassMethods
def rater_classes
@rater_classes ||= []
end
def valid_rater_class? clazz
return true if !rater_classes || rater_classes.empty?
rater_classes.include? clazz
end
def in_rating_range?(value)
range = rating_range if respond_to?(:rating_range)
range ? range.include?(value.to_i) : true
end
# macro to create dynamic :rating_range class method!
# can now even take an Array and find the range of values!
def set_rating_range range = nil
raterange = case range
when Array
arr = range.sort
Range.new arr.first, arr.last
when Range
range
when nil
(1..5)
else
raise ArgumentError, "Must be a range, was: #{range}"
end
(class << self; self; end).send(:define_method, :rating_range) do
raterange
end
end
def rateable_by *clazzes
@rater_classes = []
return if clazzes.compact.empty?
clazzes.each do |clazz|
raise ArgumentError, "A rateable must be a class, was: #{clazz}" unless clazz.respond_to?(:new)
@rater_classes << clazz
end
end
def rate_config options = {}, &block
set_rating_range options[:range]
rateable_by options[:raters]
default_rater options[:default_rater], &block
end
def default_rater rater=nil, &block
case rater
when Symbol, String
define_method :default_rater do
self.send(rater) # fx to use owner or user relation
end
when nil
return unless block_given?
define_method :default_rater do
self.instance_eval(&block)
end
else
raise ArgumentError, "Must take symbol or block argument"
end
end
end # class methods
def rate(mark, rater = nil, weight = 1)
case rater
when Array
rater.each{|rater| rate(mark, rater, weight)}
else
if !rater
unless respond_to?(:default_rater)
raise ArgumentError, "No rater argument and no default_rater specified"
end
rater = default_rater
end
validate_rater!(rater)
validate_rating!(mark)
unrate_without_rating_update(rater)
total_mark = mark.to_i*weight.to_i
self.rates += total_mark
self.rating_marks.new(:rater_id => rater.id, :mark => mark, :rater_class => rater.class.to_s, :weight => weight)
self.weighted_rate_count += weight
update_rating
end
end
def unrate(rater)
case rater
when Array
rater.each{|rater| unrate(mark, rater, weight)}
else
unrate_without_rating_update(rater)
update_rating
end
end
def rate_and_save(mark, rater, weight = 1)
case rater
when Array
rater.each{|rater| rate_and_save(mark, rater, weight)}
else
rate(mark, rater, weight)
save
end
end
def unrate_and_save(rater)
case rater
when Array
rater.each{|rater| unrate_and_save(mark, rater, weight)}
else
unrate(rater)
save
end
end
def rated?
rate_count != 0
end
def rated_by?(rater)
case rater
when Array
rater.each{|rater| rated_by(mark, rater, weight)}
else
self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).count == 1
end
end
def rating
read_attribute(:rating)
end
def previous_rating
read_attribute(:rating_previous)
end
def rating_delta
read_attribute(:rating_delta)
end
def unweighted_rating
return nil if self.rating_marks.empty?
total_sum = self.rating_marks.map(&:mark).sum
return total_sum.to_f/self.rating_marks.size
end
def rate_count
self.rating_marks.size
end
def rate_weight
check_weighted_rate_count
read_attribute(:weighted_rate_count)
end
def user_mark(rater)
case rater
when Array
if rater.map{|x| x.class}.uniq.count > 1
raise ArgumentError, "Raters all must be of same class."
return
end
r = self.rating_marks.in(:rater_id => rater.map(&:id), :rater_class => rater.first.class.to_s)
r ? r.inject(Hash.new(0)) { |h, e| h[e.rater_id] = mark ; h } : nil
else
r = self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).first
r ? r.mark : nil
end
end
protected
def validate_rater!(rater)
unless self.class.valid_rater_class?(rater.class)
raise ArgumentError, "Not a valid rater: #{rater.class}, must be of one of #{self.class.rater_classes}"
end
end
def validate_rating!(value)
if !self.class.in_rating_range?(value)
raise ArgumentError, "Rating not in range #{self.class.rating_range}. Rating provided was #{value}."
end
end
def unrate_without_rating_update(rater)
rmark = self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).first
return unless rmark
weight = (rmark.weight ||= 1)
total_mark = rmark.mark.to_i*weight.to_i
self.rates -= total_mark
self.weighted_rate_count -= weight
rmark.delete
end
def update_rating
check_weighted_rate_count
write_attribute(:rating_previous, self.rating)
rt = (self.rates.to_f / self.weighted_rate_count.to_f) unless self.rating_marks.blank?
write_attribute(:rating, rt)
delta = (self.rating && self.previous_rating) ? rating-previous_rating : 0.0
write_attribute(:rating_delta, delta)
end
def check_weighted_rate_count
#migration from old version
wrc = read_attribute(:weighted_rate_count).to_i
if (wrc==0 && rate_count!=0)
write_attribute(:weighted_rate_count, self.rating_marks.size)
end
end
end
end
allow user_mark with array
module Mongoid
module Rateable
extend ActiveSupport::Concern
module Ext
extend ActiveSupport::Concern
module ClassMethods
def rateable options = {}
class_eval do
self.send :include, Mongoid::Rateable
puts "options: #{options}"
self.rate_config options
end
end
end
end
included do
field :rates, type: Integer, default: 0
field :rating, type: Float, default: nil
field :rating_previous, type: Float, default: nil
field :rating_delta, type: Float, default: 0.0
field :weighted_rate_count, type: Integer, default: 0
embeds_many :rating_marks, :as => :rateable
index({"rating_marks.rater_id" => 1, "rating_marks.rater_class" => 1})
scope :unrated, where(:rating.exists => false)
scope :rated, where(:rating.exists => true)
scope :rated_by, ->(rater) { where("rating_marks.rater_id" => rater.id, "rating_marks.rater_class" => rater.class.to_s) }
scope :with_rating, ->(range) { where(:rating.gte => range.begin, :rating.lte => range.end) }
scope :highest_rated, ->(limit=10) { order_by([:rating, :desc]).limit(limit) }
end
module ClassMethods
def rater_classes
@rater_classes ||= []
end
def valid_rater_class? clazz
return true if !rater_classes || rater_classes.empty?
rater_classes.include? clazz
end
def in_rating_range?(value)
range = rating_range if respond_to?(:rating_range)
range ? range.include?(value.to_i) : true
end
# macro to create dynamic :rating_range class method!
# can now even take an Array and find the range of values!
def set_rating_range range = nil
raterange = case range
when Array
arr = range.sort
Range.new arr.first, arr.last
when Range
range
when nil
(1..5)
else
raise ArgumentError, "Must be a range, was: #{range}"
end
(class << self; self; end).send(:define_method, :rating_range) do
raterange
end
end
def rateable_by *clazzes
@rater_classes = []
return if clazzes.compact.empty?
clazzes.each do |clazz|
raise ArgumentError, "A rateable must be a class, was: #{clazz}" unless clazz.respond_to?(:new)
@rater_classes << clazz
end
end
def rate_config options = {}, &block
set_rating_range options[:range]
rateable_by options[:raters]
default_rater options[:default_rater], &block
end
def default_rater rater=nil, &block
case rater
when Symbol, String
define_method :default_rater do
self.send(rater) # fx to use owner or user relation
end
when nil
return unless block_given?
define_method :default_rater do
self.instance_eval(&block)
end
else
raise ArgumentError, "Must take symbol or block argument"
end
end
end # class methods
def rate(mark, rater = nil, weight = 1)
case rater
when Array
rater.each{|rater| rate(mark, rater, weight)}
else
if !rater
unless respond_to?(:default_rater)
raise ArgumentError, "No rater argument and no default_rater specified"
end
rater = default_rater
end
validate_rater!(rater)
validate_rating!(mark)
unrate_without_rating_update(rater)
total_mark = mark.to_i*weight.to_i
self.rates += total_mark
self.rating_marks.new(:rater_id => rater.id, :mark => mark, :rater_class => rater.class.to_s, :weight => weight)
self.weighted_rate_count += weight
update_rating
end
end
def unrate(rater)
case rater
when Array
rater.each{|rater| unrate(mark, rater, weight)}
else
unrate_without_rating_update(rater)
update_rating
end
end
def rate_and_save(mark, rater, weight = 1)
case rater
when Array
rater.each{|rater| rate_and_save(mark, rater, weight)}
else
rate(mark, rater, weight)
save
end
end
def unrate_and_save(rater)
case rater
when Array
rater.each{|rater| unrate_and_save(mark, rater, weight)}
else
unrate(rater)
save
end
end
def rated?
rate_count != 0
end
def rated_by?(rater)
case rater
when Array
rater.each{|rater| rated_by(mark, rater, weight)}
else
self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).count == 1
end
end
def rating
read_attribute(:rating)
end
def previous_rating
read_attribute(:rating_previous)
end
def rating_delta
read_attribute(:rating_delta)
end
def unweighted_rating
return nil if self.rating_marks.empty?
total_sum = self.rating_marks.map(&:mark).sum
return total_sum.to_f/self.rating_marks.size
end
def rate_count
self.rating_marks.size
end
def rate_weight
check_weighted_rate_count
read_attribute(:weighted_rate_count)
end
def user_mark(rater)
logger.info(rater.class)
case rater
when Array
if rater.map{|x| x.class}.uniq.count > 1
raise ArgumentError, "Raters all must be of same class."
return
end
r = self.rating_marks.in(:rater_id => rater.map(&:id), :rater_class => rater.first.class.to_s)
r ? r.inject(Hash.new(0)) { |h, e| h[e.rater_id] = e.mark ; h } : nil
else
r = self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).first
r ? r.mark : nil
end
end
protected
def validate_rater!(rater)
unless self.class.valid_rater_class?(rater.class)
raise ArgumentError, "Not a valid rater: #{rater.class}, must be of one of #{self.class.rater_classes}"
end
end
def validate_rating!(value)
if !self.class.in_rating_range?(value)
raise ArgumentError, "Rating not in range #{self.class.rating_range}. Rating provided was #{value}."
end
end
def unrate_without_rating_update(rater)
rmark = self.rating_marks.where(:rater_id => rater.id, :rater_class => rater.class.to_s).first
return unless rmark
weight = (rmark.weight ||= 1)
total_mark = rmark.mark.to_i*weight.to_i
self.rates -= total_mark
self.weighted_rate_count -= weight
rmark.delete
end
def update_rating
check_weighted_rate_count
write_attribute(:rating_previous, self.rating)
rt = (self.rates.to_f / self.weighted_rate_count.to_f) unless self.rating_marks.blank?
write_attribute(:rating, rt)
delta = (self.rating && self.previous_rating) ? rating-previous_rating : 0.0
write_attribute(:rating_delta, delta)
end
def check_weighted_rate_count
#migration from old version
wrc = read_attribute(:weighted_rate_count).to_i
if (wrc==0 && rate_count!=0)
write_attribute(:weighted_rate_count, self.rating_marks.size)
end
end
end
end
|
require 'set'
module Flipper
module Adapters
class Memory
# Public
def initialize(source = nil)
@source = source || {}
end
# Public
def get(feature)
result = {}
feature.gates.each do |gate|
result[gate] = gate.value
end
result
end
# Public
def enable(feature, gate, thing)
case gate.data_type
when :boolean, :integer
write gate.adapter_key, thing.value.to_s
when :set
set_add gate.adapter_key, thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Public
def disable(feature, gate, thing)
case gate.data_type
when :boolean
# FIXME: Need to make boolean gate not need to delete everything
feature.gates.each do |gate|
delete gate.adapter_key
end
when :integer
write gate.adapter_key, 0
when :set
set_delete gate.adapter_key, thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Private
def read(key)
@source[key.to_s]
end
# Private
def write(key, value)
@source[key.to_s] = value.to_s
end
# Private
def delete(key)
@source.delete(key.to_s)
end
# Private
def set_add(key, value)
ensure_set_initialized(key)
@source[key.to_s].add(value.to_s)
end
# Private
def set_delete(key, value)
ensure_set_initialized(key)
@source[key.to_s].delete(value.to_s)
end
# Private
def set_members(key)
ensure_set_initialized(key)
@source[key.to_s]
end
private
def ensure_set_initialized(key)
@source[key.to_s] ||= Set.new
end
end
end
end
Stop using gate.value in memory adapter.
require 'set'
module Flipper
module Adapters
class Memory
# Public
def initialize(source = nil)
@source = source || {}
end
# Public
def get(feature)
result = {}
feature.gates.each do |gate|
result[gate] = case gate.data_type
when :boolean
read gate.adapter_key
when :integer
read gate.adapter_key
when :set
set_members gate.adapter_key
else
raise "#{gate} is not supported by this adapter yet"
end
end
result
end
# Public
def enable(feature, gate, thing)
case gate.data_type
when :boolean, :integer
write gate.adapter_key, thing.value.to_s
when :set
set_add gate.adapter_key, thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Public
def disable(feature, gate, thing)
case gate.data_type
when :boolean
# FIXME: Need to make boolean gate not need to delete everything
feature.gates.each do |gate|
delete gate.adapter_key
end
when :integer
write gate.adapter_key, 0
when :set
set_delete gate.adapter_key, thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Private
def read(key)
@source[key.to_s]
end
# Private
def write(key, value)
@source[key.to_s] = value.to_s
end
# Private
def delete(key)
@source.delete(key.to_s)
end
# Private
def set_add(key, value)
ensure_set_initialized(key)
@source[key.to_s].add(value.to_s)
end
# Private
def set_delete(key, value)
ensure_set_initialized(key)
@source[key.to_s].delete(value.to_s)
end
# Private
def set_members(key)
ensure_set_initialized(key)
@source[key.to_s]
end
private
def ensure_set_initialized(key)
@source[key.to_s] ||= Set.new
end
end
end
end
|
module MorseContactable
VERSION = "0.1.4"
end
Bump to 0.1.5
module MorseContactable
VERSION = "0.1.5"
end
|
require 'set'
module Flipper
module Adapters
class Memory
# Public
def initialize(source = nil)
@source = source || {}
end
# Public
def get(feature)
result = {}
feature.gates.each do |gate|
result[gate] = case gate.data_type
when :boolean
read key(feature, gate)
when :integer
read key(feature, gate)
when :set
set_members key(feature, gate)
else
raise "#{gate} is not supported by this adapter yet"
end
end
result
end
# Public
def enable(feature, gate, thing)
case gate.data_type
when :boolean, :integer
write key(feature, gate), thing.value.to_s
when :set
set_add key(feature, gate), thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Public
def disable(feature, gate, thing)
case gate.data_type
when :boolean
# FIXME: Need to make boolean gate not need to delete everything
feature.gates.each do |gate|
delete key(feature, gate)
end
when :integer
write key(feature, gate), 0
when :set
set_delete key(feature, gate), thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
def key(feature, gate)
"#{feature.key}/#{gate.key}"
end
# Private
def read(key)
@source[key.to_s]
end
# Private
def write(key, value)
@source[key.to_s] = value.to_s
end
# Private
def delete(key)
@source.delete(key.to_s)
end
# Private
def set_add(key, value)
ensure_set_initialized(key)
@source[key.to_s].add(value.to_s)
end
# Private
def set_delete(key, value)
ensure_set_initialized(key)
@source[key.to_s].delete(value.to_s)
end
# Private
def set_members(key)
ensure_set_initialized(key)
@source[key.to_s]
end
private
def ensure_set_initialized(key)
@source[key.to_s] ||= Set.new
end
end
end
end
Removing fixme. Changed my mind and think this is right.
The boolean disable should clear everything. Any other option I thought
through was a pain in the buns from a user experience standpoint and an
actual code standpoint. Adapters will just be required to clear
everything on boolean disable.
require 'set'
module Flipper
module Adapters
class Memory
# Public
def initialize(source = nil)
@source = source || {}
end
# Public
def get(feature)
result = {}
feature.gates.each do |gate|
result[gate] = case gate.data_type
when :boolean
read key(feature, gate)
when :integer
read key(feature, gate)
when :set
set_members key(feature, gate)
else
raise "#{gate} is not supported by this adapter yet"
end
end
result
end
# Public
def enable(feature, gate, thing)
case gate.data_type
when :boolean, :integer
write key(feature, gate), thing.value.to_s
when :set
set_add key(feature, gate), thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
# Public
def disable(feature, gate, thing)
case gate.data_type
when :boolean
feature.gates.each do |gate|
delete key(feature, gate)
end
when :integer
write key(feature, gate), 0
when :set
set_delete key(feature, gate), thing.value.to_s
else
raise "#{gate} is not supported by this adapter yet"
end
end
def key(feature, gate)
"#{feature.key}/#{gate.key}"
end
# Private
def read(key)
@source[key.to_s]
end
# Private
def write(key, value)
@source[key.to_s] = value.to_s
end
# Private
def delete(key)
@source.delete(key.to_s)
end
# Private
def set_add(key, value)
ensure_set_initialized(key)
@source[key.to_s].add(value.to_s)
end
# Private
def set_delete(key, value)
ensure_set_initialized(key)
@source[key.to_s].delete(value.to_s)
end
# Private
def set_members(key)
ensure_set_initialized(key)
@source[key.to_s]
end
private
def ensure_set_initialized(key)
@source[key.to_s] ||= Set.new
end
end
end
end
|
class Object
def should(matcher=nil)
MSpec.expectation
MSpec.actions :expectation, MSpec.current.state
if matcher
unless matcher.matches?(self)
SpecExpectation.fail_with(*matcher.failure_message)
end
else
SpecPositiveOperatorMatcher.new(self)
end
end
def should_not(matcher=nil)
MSpec.expectation
MSpec.actions :expectation, MSpec.current.state
if matcher
if matcher.matches?(self)
SpecExpectation.fail_with(*matcher.negative_failure_message)
end
else
SpecNegativeOperatorMatcher.new(self)
end
end
end
Be strict about the argument passed to should, so that 42.should respond_to?(:to_int) generates an error
class Object
NO_MATCHER_GIVEN = Object.new
def should(matcher=NO_MATCHER_GIVEN)
MSpec.expectation
MSpec.actions :expectation, MSpec.current.state
unless matcher.equal?(NO_MATCHER_GIVEN)
unless matcher.matches?(self)
SpecExpectation.fail_with(*matcher.failure_message)
end
else
SpecPositiveOperatorMatcher.new(self)
end
end
def should_not(matcher=NO_MATCHER_GIVEN)
MSpec.expectation
MSpec.actions :expectation, MSpec.current.state
unless matcher.equal?(NO_MATCHER_GIVEN)
if matcher.matches?(self)
SpecExpectation.fail_with(*matcher.negative_failure_message)
end
else
SpecNegativeOperatorMatcher.new(self)
end
end
end
|
require 'fortitude/rendering_context'
if defined?(ActiveSupport)
ActiveSupport.on_load(:before_initialize) do
ActiveSupport.on_load(:action_view) do
require "fortitude/rails/template_handler"
end
end
end
module Fortitude
class << self
def refine_rails_helpers(on_or_off = :not_specified)
@refine_rails_helpers = !! on_or_off unless on_or_off == :not_specified
!! @refine_rails_helpers
end
end
refine_rails_helpers true
end
module Fortitude
module Rails
class Railtie < ::Rails::Railtie
config.after_initialize do
if Fortitude.refine_rails_helpers
require 'fortitude/rails/helpers'
Fortitude::Rails::Helpers.apply_refined_helpers_to!(Fortitude::Widget)
end
if ::Rails.env.development?
::Fortitude::Widget.class_eval do
format_output true
start_and_end_comments true
debug true
end
end
end
initializer :fortitude, :before => :set_autoload_paths do |app|
# All of this code is involved in setting up autoload_paths to work with Fortitude.
# Why so hard?
#
# We're trying to do something that ActiveSupport::Dependencies -- which is what Rails uses for
# class autoloading -- doesn't really support. We want app/views to be on the autoload path,
# because there are now Ruby classes living there. (It usually isn't just because all that's there
# are template source files, not actual Ruby code.) That isn't an issue, though -- adding it
# is trivial (just do
# <tt>ActiveSupport::Dependencies.autoload_paths << File.join(Rails.root, 'app/views')</tt>).
#
# The real issue is that we want the class <tt>app/views/foo/bar.rb</tt> to define a class called
# <tt>Views::Foo::Bar</tt>, not just plain <tt>Foo::Bar</tt>. This is what's different from what
# ActiveSupport::Dependencies normally supports; it expects the filesystem path underneath the
# root to be exactly identical to the fully-qualified class name.
#
# Why are we doing this crazy thing? Because we want you to be able to have a view called
# <tt>app/views/user/password.rb</tt>, and _not_ have that conflict with a module you just happen to define
# elsewhere called <tt>User::Password</tt>. If we don't prefix view classes with anything at all, then the
# potential for conflicts is enormous.
#
# As such, we have this code. We'll walk through it step-by-step; note that at the end we *do*
# add app/views/ to the autoload path, so all this code is doing is just dealing with the fact that
# the fully-qualified classname (<tt>Views::Foo::Bar</tt>) has one extra component on the front of it
# (<tt>Views::</tt>) when compared to the subpath (<tt>foo/bar.rb</tt>) underneath what's on the autoload
# path (<tt>app/views</tt>).
# Go compute our views root.
views_root = File.expand_path(File.join(::Rails.root, 'app', 'views'))
# Now, do all this work inside ::ActiveSupport::Dependencies...
::ActiveSupport::Dependencies.module_eval do
@@_fortitude_views_root = views_root
# This is the method that gets called to auto-generate namespacing empty
# modules (_e.g._, the toplevel <tt>Views::</tt> module) for directories
# under an autoload path.
#
# The original method says:
#
# "Does the provided path_suffix correspond to an autoloadable module?
# Instead of returning a boolean, the autoload base for this module is
# returned."
#
# So, we just need to strip off the leading +views/+ from the +path_suffix+,
# and see if that maps to a directory underneath <tt>app/views/</tt>; if so,
# we'll return the path to <tt>.../app/views/</tt>. Otherwise, we just
# delegate back to the superclass method.
def autoloadable_module_with_fortitude?(path_suffix)
if path_suffix =~ %r{^views(/.*)?$}i
# If we got here, then we were passed a subpath of views/....
subpath = $1
if subpath.blank? || File.directory?(File.join(@@_fortitude_views_root, subpath))
return @@_fortitude_views_root
end
end
with_fortitude_views_removed_from_autoload_path do
autoloadable_module_without_fortitude?(path_suffix)
end
end
alias_method_chain :autoloadable_module?, :fortitude
# When we delegate back to original methods, we want them to act as if
# <tt>app/views/</tt> is _not_ on the autoload path. In order to be thread-safe
# about that, we couple this method with our override of the writer side of the
# <tt>mattr_accessor :autoload_paths</tt>, which simply prefers the thread-local
# that we set to the actual underlying variable.
def with_fortitude_views_removed_from_autoload_path
begin
Thread.current[:_fortitude_autoload_paths_override] = autoload_paths - [ @@_fortitude_views_root ]
yield
ensure
Thread.current[:_fortitude_autoload_paths_override] = nil
end
end
# The use of 'class_eval' here may seem funny, and I think it is, but, without it,
# the +@@autoload_paths+ gets interpreted as a class variable for this *Railtie*,
# rather than for ::ActiveSupport::Dependencies. (Why is that? Got me...)
class_eval <<-EOS
def self.autoload_paths
Thread.current[:_fortitude_autoload_paths_override] || @@autoload_paths
end
EOS
# The original method says:
#
# "Search for a file in autoload_paths matching the provided suffix."
#
# So, we just look to see if the given +path_suffix+ is specifying something like
# <tt>views/foo/bar</tt>; if so, we glue it together properly, removing the initial
# <tt>views/</tt> first. (Otherwise, the mechanism would expect
# <tt>Views::Foo::Bar</tt> to show up in <tt>app/views/views/foo/bar</tt> (yes, a double
# +views+), since <tt>app/views</tt> is on the autoload path.)
def search_for_file_with_fortitude(path_suffix)
# This just makes sure our path always ends in exactly one ".rb", whether it started
# with one or not.
new_path_suffix = path_suffix.sub(/(\.rb)?$/, ".rb")
if new_path_suffix =~ %r{^views(/.*)$}i
path = File.join(@@_fortitude_views_root, $1)
return path if File.file?(path)
end
# Make sure that we remove the views autoload path before letting the rest of
# the dependency mechanism go searching for files, or else <tt>app/views/foo/bar.rb</tt>
# *will* be found when looking for just <tt>::Foo::Bar</tt>.
with_fortitude_views_removed_from_autoload_path { search_for_file_without_fortitude(path_suffix) }
end
alias_method_chain :search_for_file, :fortitude
end
# And, finally, this is where we add our root to the set of autoload paths.
::ActiveSupport::Dependencies.autoload_paths << views_root
# This is our support for partials. Fortitude doesn't really have a distinction between
# partials and "full" templates -- everything is just a widget, which is much more elegant --
# but we still want you to be able to render a widget <tt>Views::Foo::Bar</tt> by saying
# <tt>render :partial => 'foo/bar'</tt> (from ERb, although you can do it from Fortitude if
# you want for some reason, too).
#
# Normally, ActionView only looks for partials in files starting with an underscore. We
# do want to allow this, too (in the above case, if you define the widget in the file
# <tt>app/views/foo/_bar.rb</tt>, it will still work fine); however, we also want to allow
# you to define it in a file that does _not_ start with an underscore ('cause these are
# Ruby classes, and that's just plain weird).
#
# So, we patch #find_templates: if it's looking for a partial, doesn't find one, and is
# searching Fortitude templates (the +.rb+ handler), then we try again, turning off the
# +partial+ flag, and return that instead.
::ActionView::PathResolver.class_eval do
def find_templates_with_fortitude(name, prefix, partial, details)
templates = find_templates_without_fortitude(name, prefix, partial, details)
if partial && templates.empty? && details[:handlers] && details[:handlers].include?(:rb)
templates = find_templates_without_fortitude(name, prefix, false, details.merge(:handlers => [ :rb ]))
end
templates
end
alias_method_chain :find_templates, :fortitude
end
require "fortitude/rails/template_handler"
require "fortitude/rails/rendering_methods"
::ActionController::Base.send(:include, ::Fortitude::Rails::RenderingMethods)
::ActionMailer::Base.send(:include, ::Fortitude::Rails::RenderingMethods)
end
end
end
end
Make autoloading of widgets ending in .html.rb work properly, too.
require 'fortitude/rendering_context'
if defined?(ActiveSupport)
ActiveSupport.on_load(:before_initialize) do
ActiveSupport.on_load(:action_view) do
require "fortitude/rails/template_handler"
end
end
end
module Fortitude
class << self
def refine_rails_helpers(on_or_off = :not_specified)
@refine_rails_helpers = !! on_or_off unless on_or_off == :not_specified
!! @refine_rails_helpers
end
end
refine_rails_helpers true
end
module Fortitude
module Rails
class Railtie < ::Rails::Railtie
config.after_initialize do
if Fortitude.refine_rails_helpers
require 'fortitude/rails/helpers'
Fortitude::Rails::Helpers.apply_refined_helpers_to!(Fortitude::Widget)
end
if ::Rails.env.development?
::Fortitude::Widget.class_eval do
format_output true
start_and_end_comments true
debug true
end
end
end
initializer :fortitude, :before => :set_autoload_paths do |app|
# All of this code is involved in setting up autoload_paths to work with Fortitude.
# Why so hard?
#
# We're trying to do something that ActiveSupport::Dependencies -- which is what Rails uses for
# class autoloading -- doesn't really support. We want app/views to be on the autoload path,
# because there are now Ruby classes living there. (It usually isn't just because all that's there
# are template source files, not actual Ruby code.) That isn't an issue, though -- adding it
# is trivial (just do
# <tt>ActiveSupport::Dependencies.autoload_paths << File.join(Rails.root, 'app/views')</tt>).
#
# The real issue is that we want the class <tt>app/views/foo/bar.rb</tt> to define a class called
# <tt>Views::Foo::Bar</tt>, not just plain <tt>Foo::Bar</tt>. This is what's different from what
# ActiveSupport::Dependencies normally supports; it expects the filesystem path underneath the
# root to be exactly identical to the fully-qualified class name.
#
# Why are we doing this crazy thing? Because we want you to be able to have a view called
# <tt>app/views/user/password.rb</tt>, and _not_ have that conflict with a module you just happen to define
# elsewhere called <tt>User::Password</tt>. If we don't prefix view classes with anything at all, then the
# potential for conflicts is enormous.
#
# As such, we have this code. We'll walk through it step-by-step; note that at the end we *do*
# add app/views/ to the autoload path, so all this code is doing is just dealing with the fact that
# the fully-qualified classname (<tt>Views::Foo::Bar</tt>) has one extra component on the front of it
# (<tt>Views::</tt>) when compared to the subpath (<tt>foo/bar.rb</tt>) underneath what's on the autoload
# path (<tt>app/views</tt>).
# Go compute our views root.
views_root = File.expand_path(File.join(::Rails.root, 'app', 'views'))
# Now, do all this work inside ::ActiveSupport::Dependencies...
::ActiveSupport::Dependencies.module_eval do
@@_fortitude_views_root = views_root
# This is the method that gets called to auto-generate namespacing empty
# modules (_e.g._, the toplevel <tt>Views::</tt> module) for directories
# under an autoload path.
#
# The original method says:
#
# "Does the provided path_suffix correspond to an autoloadable module?
# Instead of returning a boolean, the autoload base for this module is
# returned."
#
# So, we just need to strip off the leading +views/+ from the +path_suffix+,
# and see if that maps to a directory underneath <tt>app/views/</tt>; if so,
# we'll return the path to <tt>.../app/views/</tt>. Otherwise, we just
# delegate back to the superclass method.
def autoloadable_module_with_fortitude?(path_suffix)
if path_suffix =~ %r{^views(/.*)?$}i
# If we got here, then we were passed a subpath of views/....
subpath = $1
if subpath.blank? || File.directory?(File.join(@@_fortitude_views_root, subpath))
return @@_fortitude_views_root
end
end
with_fortitude_views_removed_from_autoload_path do
autoloadable_module_without_fortitude?(path_suffix)
end
end
alias_method_chain :autoloadable_module?, :fortitude
# When we delegate back to original methods, we want them to act as if
# <tt>app/views/</tt> is _not_ on the autoload path. In order to be thread-safe
# about that, we couple this method with our override of the writer side of the
# <tt>mattr_accessor :autoload_paths</tt>, which simply prefers the thread-local
# that we set to the actual underlying variable.
def with_fortitude_views_removed_from_autoload_path
begin
Thread.current[:_fortitude_autoload_paths_override] = autoload_paths - [ @@_fortitude_views_root ]
yield
ensure
Thread.current[:_fortitude_autoload_paths_override] = nil
end
end
# The use of 'class_eval' here may seem funny, and I think it is, but, without it,
# the +@@autoload_paths+ gets interpreted as a class variable for this *Railtie*,
# rather than for ::ActiveSupport::Dependencies. (Why is that? Got me...)
class_eval <<-EOS
def self.autoload_paths
Thread.current[:_fortitude_autoload_paths_override] || @@autoload_paths
end
EOS
# The original method says:
#
# "Search for a file in autoload_paths matching the provided suffix."
#
# So, we just look to see if the given +path_suffix+ is specifying something like
# <tt>views/foo/bar</tt> or the fully-qualified version thereof; if so, we glue it together properly,
# removing the initial <tt>views/</tt> first. (Otherwise, the mechanism would expect
# <tt>Views::Foo::Bar</tt> to show up in <tt>app/views/views/foo/bar</tt> (yes, a double
# +views+), since <tt>app/views</tt> is on the autoload path.)
def search_for_file_with_fortitude(path_suffix)
# Remove any ".rb" extension, if present...
new_path_suffix = path_suffix.sub(/(\.rb)?$/, "")
found_subpath = if new_path_suffix =~ %r{^views(/.*)$}i
$1
elsif new_path_suffix =~ %r{^#{Regexp.escape(@@_fortitude_views_root)}(/.*)$}i
$1
end
if found_subpath
[ ".html.rb", ".rb" ].each do |extension|
path = File.join(@@_fortitude_views_root, "#{found_subpath}#{extension}")
return path if File.file?(path)
end
end
# Make sure that we remove the views autoload path before letting the rest of
# the dependency mechanism go searching for files, or else <tt>app/views/foo/bar.rb</tt>
# *will* be found when looking for just <tt>::Foo::Bar</tt>.
with_fortitude_views_removed_from_autoload_path { search_for_file_without_fortitude(path_suffix) }
end
alias_method_chain :search_for_file, :fortitude
end
# And, finally, this is where we add our root to the set of autoload paths.
::ActiveSupport::Dependencies.autoload_paths << views_root
# This is our support for partials. Fortitude doesn't really have a distinction between
# partials and "full" templates -- everything is just a widget, which is much more elegant --
# but we still want you to be able to render a widget <tt>Views::Foo::Bar</tt> by saying
# <tt>render :partial => 'foo/bar'</tt> (from ERb, although you can do it from Fortitude if
# you want for some reason, too).
#
# Normally, ActionView only looks for partials in files starting with an underscore. We
# do want to allow this, too (in the above case, if you define the widget in the file
# <tt>app/views/foo/_bar.rb</tt>, it will still work fine); however, we also want to allow
# you to define it in a file that does _not_ start with an underscore ('cause these are
# Ruby classes, and that's just plain weird).
#
# So, we patch #find_templates: if it's looking for a partial, doesn't find one, and is
# searching Fortitude templates (the +.rb+ handler), then we try again, turning off the
# +partial+ flag, and return that instead.
::ActionView::PathResolver.class_eval do
def find_templates_with_fortitude(name, prefix, partial, details)
templates = find_templates_without_fortitude(name, prefix, partial, details)
if partial && templates.empty? && details[:handlers] && details[:handlers].include?(:rb)
templates = find_templates_without_fortitude(name, prefix, false, details.merge(:handlers => [ :rb ]))
end
templates
end
alias_method_chain :find_templates, :fortitude
end
require "fortitude/rails/template_handler"
require "fortitude/rails/rendering_methods"
::ActionController::Base.send(:include, ::Fortitude::Rails::RenderingMethods)
::ActionMailer::Base.send(:include, ::Fortitude::Rails::RenderingMethods)
end
end
end
end
|
require_relative 'conrefifier'
# Unsure why attr_accessor does not work here
module VariableMixin
def self.variables
@variables
end
def self.variables=(variables)
@variables = variables
end
def self.fetch_data_file(association)
reference = association.split('.')
data = VariableMixin.variables['site']['data']
while key = reference.shift
data = data[key]
end
data
end
end
class ConrefFS < Nanoc::DataSource
include Nanoc::DataSources::Filesystem
include VariableMixin
include NanocConrefFS::Ancestry
identifier :'conref-fs'
# Before iterating over the file objects, this method loads the data folder
# and applies it to an ivar for later usage.
def load_objects(dir_name, kind, klass)
load_data_folder if klass == Nanoc::Int::Item && @variables.nil?
super
end
def load_data_folder
data = Datafiles.process(@site_config)
config = @site_config.to_h
@variables = { 'site' => { 'config' => config, 'data' => data } }
VariableMixin.variables = @variables
end
# This function calls the parent super, then adds additional metadata to the item.
def parse(content_filename, meta_filename, _kind)
meta, content = super
apply_attributes(meta, content_filename)
[meta, content]
end
def apply_attributes(meta, content_filename)
page_vars = Conrefifier.file_variables(@site_config[:page_variables], content_filename)
unless page_vars[:data_association].nil?
association = page_vars[:data_association]
toc = VariableMixin.fetch_data_file(association)
meta[:parents] = create_parents(toc, meta)
meta[:children] = create_children(toc, meta)
end
page_vars.each_pair do |name, value|
meta[name.to_s] = value
end
end
# This file reads each piece of content as it comes in. It also applies the conref variables
# (demarcated by Liquid's {{ }} tags) using both the data/ folder and any variables defined
# within the nanoc.yaml config file
def read(filename)
content = super
return content unless filename.start_with?('content', 'layouts')
@unparsed_content = content
Conrefifier.liquify(filename, content, @site_config)
end
# This method is extracted from the Nanoc default FS
def filename_for(base_filename, ext)
if ext.nil?
nil
elsif ext.empty?
base_filename
else
base_filename + '.' + ext
end
end
# This method is extracted from the Nanoc default FS
def identifier_for_filename(filename)
if config[:identifier_type] == 'full'
return Nanoc::Identifier.new(filename)
end
if filename =~ /(^|\/)index(\.[^\/]+)?$/
regex = @config && @config[:allow_periods_in_identifiers] ? /\/?(index)?(\.[^\/\.]+)?$/ : /\/?index(\.[^\/]+)?$/
else
regex = @config && @config[:allow_periods_in_identifiers] ? /\.[^\/\.]+$/ : /\.[^\/]+$/
end
Nanoc::Identifier.new(filename.sub(regex, ''), type: :legacy)
end
end
Add `unparsed_content`
require_relative 'conrefifier'
# Unsure why attr_accessor does not work here
module VariableMixin
def self.variables
@variables
end
def self.variables=(variables)
@variables = variables
end
def self.fetch_data_file(association)
reference = association.split('.')
data = VariableMixin.variables['site']['data']
while key = reference.shift
data = data[key]
end
data
end
end
class ConrefFS < Nanoc::DataSource
include Nanoc::DataSources::Filesystem
include VariableMixin
include NanocConrefFS::Ancestry
identifier :'conref-fs'
attr_reader :unparsed_content
# Before iterating over the file objects, this method loads the data folder
# and applies it to an ivar for later usage.
def load_objects(dir_name, kind, klass)
load_data_folder if klass == Nanoc::Int::Item && @variables.nil?
super
end
def load_data_folder
data = Datafiles.process(@site_config)
config = @site_config.to_h
@variables = { 'site' => { 'config' => config, 'data' => data } }
VariableMixin.variables = @variables
end
# This function calls the parent super, then adds additional metadata to the item.
def parse(content_filename, meta_filename, _kind)
meta, content = super
apply_attributes(meta, content_filename)
[meta, content]
end
def apply_attributes(meta, content_filename)
page_vars = Conrefifier.file_variables(@site_config[:page_variables], content_filename)
unless page_vars[:data_association].nil?
association = page_vars[:data_association]
toc = VariableMixin.fetch_data_file(association)
meta[:parents] = create_parents(toc, meta)
meta[:children] = create_children(toc, meta)
end
meta[:unparsed_content] = @unparsed_content
page_vars.each_pair do |name, value|
meta[name.to_s] = value
end
end
# This file reads each piece of content as it comes in. It also applies the conref variables
# (demarcated by Liquid's {{ }} tags) using both the data/ folder and any variables defined
# within the nanoc.yaml config file
def read(filename)
content = super
return content unless filename.start_with?('content', 'layouts')
@unparsed_content = content
Conrefifier.liquify(filename, content, @site_config)
end
# This method is extracted from the Nanoc default FS
def filename_for(base_filename, ext)
if ext.nil?
nil
elsif ext.empty?
base_filename
else
base_filename + '.' + ext
end
end
# This method is extracted from the Nanoc default FS
def identifier_for_filename(filename)
if config[:identifier_type] == 'full'
return Nanoc::Identifier.new(filename)
end
if filename =~ /(^|\/)index(\.[^\/]+)?$/
regex = @config && @config[:allow_periods_in_identifiers] ? /\/?(index)?(\.[^\/\.]+)?$/ : /\/?index(\.[^\/]+)?$/
else
regex = @config && @config[:allow_periods_in_identifiers] ? /\.[^\/\.]+$/ : /\.[^\/]+$/
end
Nanoc::Identifier.new(filename.sub(regex, ''), type: :legacy)
end
end
|
# FriendlyId Global Configuration
#
# Use this to set up shared configuration options for your entire application.
# Any of the configuration options shown here can also be applied to single
# models by passing arguments to the `friendly_id` class method or defining
# methods in your model.
#
# To learn more, check out the guide:
#
# http://norman.github.io/friendly_id/file.Guide.html
FriendlyId.defaults do |config|
# ## Reserved Words
#
# Some words could conflict with Rails's routes when used as slugs, or are
# undesirable to allow as slugs. Edit this list as needed for your app.
config.use :reserved
config.reserved_words = %w(new edit index session login logout users admin
stylesheets assets javascripts images)
# ## Friendly Finders
#
# Uncomment this to use friendly finders in all models. By default, if
# you wish to find a record by its friendly id, you must do:
#
# MyModel.friendly.find('foo')
#
# If you uncomment this, you can do:
#
# MyModel.find('foo')
#
# This is significantly more convenient but may not be appropriate for
# all applications, so you must explicity opt-in to this behavior. You can
# always also configure it on a per-model basis if you prefer.
#
# Something else to consider is that using the :finders addon boosts
# performance because it will avoid Rails-internal code that makes runtime
# calls to `Module.extend`.
#
# config.use :finders
#
# ## Slugs
#
# Most applications will use the :slugged module everywhere. If you wish
# to do so, uncomment the following line.
#
# config.use :slugged
#
# By default, FriendlyId's :slugged addon expects the slug column to be named
# 'slug', but you can change it if you wish.
#
# config.slug_column = 'slug'
#
# When FriendlyId can not generate a unique ID from your base method, it appends
# a UUID, separated by a single dash. You can configure the character used as the
# separator. If you're upgrading from FriendlyId 4, you may wish to replace this
# with two dashes.
#
# config.sequence_separator = '-'
#
# ## Tips and Tricks
#
# ### Controlling when slugs are generated
#
# As of FriendlyId 5.0, new slugs are generated only when the slug field is
# nil, but you if you're using a column as your base method can change this
# behavior by overriding the `should_generate_new_friendly_id` method that
# FriendlyId adds to your model. The change below makes FriendlyId 5.0 behave
# more like 4.0.
#
# config.use Module.new {
# def should_generate_new_friendly_id?
# slug.blank? || <your_column_name_here>_changed?
# end
# }
#
# FriendlyId uses Rails's `parameterize` method to generate slugs, but for
# languages that don't use the Roman alphabet, that's not usually suffient. Here
# we use the Babosa library to transliterate Russian Cyrillic slugs to ASCII. If
# you use this, don't forget to add "babosa" to your Gemfile.
#
# config.use Module.new {
# def normalize_friendly_id(text)
# text.to_slug.normalize! :transliterations => [:russian, :latin]
# end
# }
end
Fix typo in Initializer comments
# FriendlyId Global Configuration
#
# Use this to set up shared configuration options for your entire application.
# Any of the configuration options shown here can also be applied to single
# models by passing arguments to the `friendly_id` class method or defining
# methods in your model.
#
# To learn more, check out the guide:
#
# http://norman.github.io/friendly_id/file.Guide.html
FriendlyId.defaults do |config|
# ## Reserved Words
#
# Some words could conflict with Rails's routes when used as slugs, or are
# undesirable to allow as slugs. Edit this list as needed for your app.
config.use :reserved
config.reserved_words = %w(new edit index session login logout users admin
stylesheets assets javascripts images)
# ## Friendly Finders
#
# Uncomment this to use friendly finders in all models. By default, if
# you wish to find a record by its friendly id, you must do:
#
# MyModel.friendly.find('foo')
#
# If you uncomment this, you can do:
#
# MyModel.find('foo')
#
# This is significantly more convenient but may not be appropriate for
# all applications, so you must explicity opt-in to this behavior. You can
# always also configure it on a per-model basis if you prefer.
#
# Something else to consider is that using the :finders addon boosts
# performance because it will avoid Rails-internal code that makes runtime
# calls to `Module.extend`.
#
# config.use :finders
#
# ## Slugs
#
# Most applications will use the :slugged module everywhere. If you wish
# to do so, uncomment the following line.
#
# config.use :slugged
#
# By default, FriendlyId's :slugged addon expects the slug column to be named
# 'slug', but you can change it if you wish.
#
# config.slug_column = 'slug'
#
# When FriendlyId can not generate a unique ID from your base method, it appends
# a UUID, separated by a single dash. You can configure the character used as the
# separator. If you're upgrading from FriendlyId 4, you may wish to replace this
# with two dashes.
#
# config.sequence_separator = '-'
#
# ## Tips and Tricks
#
# ### Controlling when slugs are generated
#
# As of FriendlyId 5.0, new slugs are generated only when the slug field is
# nil, but if you're using a column as your base method can change this
# behavior by overriding the `should_generate_new_friendly_id` method that
# FriendlyId adds to your model. The change below makes FriendlyId 5.0 behave
# more like 4.0.
#
# config.use Module.new {
# def should_generate_new_friendly_id?
# slug.blank? || <your_column_name_here>_changed?
# end
# }
#
# FriendlyId uses Rails's `parameterize` method to generate slugs, but for
# languages that don't use the Roman alphabet, that's not usually suffient. Here
# we use the Babosa library to transliterate Russian Cyrillic slugs to ASCII. If
# you use this, don't forget to add "babosa" to your Gemfile.
#
# config.use Module.new {
# def normalize_friendly_id(text)
# text.to_slug.normalize! :transliterations => [:russian, :latin]
# end
# }
end
|
module NationalHolidays
VERSION = "0.2.18"
end
Version bump (7 new countries)
module NationalHolidays
VERSION = "0.3.1"
end
|
module Fusuma
# Execute Command
class CommandExecutor
def initialize(vector)
@vector = vector
end
attr_reader :vector
def execute
pid = fork {
Process.daemon(true)
exec("#{command_or_shortcut}")
}
Process.detach(pid)
MultiLogger.info("Execute: #{command_or_shortcut}")
end
def executable?
command || shortcut
end
private
def command_or_shortcut
@command_or_shortcut ||= command || shortcut || no_command
end
def command
Config.command(vector)
end
def shortcut
s = Config.shortcut(vector)
return unless s
c = "xdotool key #{s}"
MultiLogger.warn 'shortcut property is deprecated.'
MultiLogger.warn "Use command: #{c} instead of shortcut: #{s}"
c
end
def no_command
"echo \"Command is not assigned #{config_parameters}\""
end
def config_parameters
{
gesture: vector.class::TYPE,
finger: vector.finger,
direction: vector.direction
}
end
end
end
Remove unused codes
|
module NationalHolidays
VERSION = "0.2.9"
end
Version bump to 0.2.10
module NationalHolidays
VERSION = "0.2.10"
end
|
module GeneValidator
# extention of the Array Class (i.e new methods for vectors)
module ExtraArrayMethods
def sum
inject(0) { |accum, i| accum + i }
end
def mean
sum / length.to_f
end
def median
sorted = sort
len = sorted.length
(sorted[(len - 1) / 2] + sorted[len / 2]) / 2.0
end
def mode
freq = inject(Hash.new(0)) { |h, v| h[v] += 1; h }
sort_by { |v| freq[v] }.last
end
def sample_variance
m = mean
sum = inject(0) { |accum, i| accum + (i - m)**2 }
sum / (length - 1).to_f
end
def standard_deviation
Math.sqrt(sample_variance)
end
end
end
class Array
include GeneValidator::ExtraArrayMethods
end
add updated mean method to Array
module GeneValidator
# extention of the Array Class (i.e new methods for vectors)
module ExtraArrayMethods
def sum
inject(0) { |accum, i| accum + i }
end
def mean
sum / length.to_f
end
def median
sorted = sort
len = sorted.length
(sorted[(len - 1) / 2] + sorted[len / 2]) / 2.0
end
def mode
freq = inject(Hash.new(0)) { |h, v| h[v] += 1; h }
sort_by { |v| freq[v] }.last
end
def sample_variance
m = mean
sum = inject(0) { |accum, i| accum + (i - m)**2 }
sum / (length - 1).to_f
end
def standard_deviation
Math.sqrt(sample_variance)
end
end
end
class Array
include GeneValidator::ExtraArrayMethods
def mean
inject(:+).to_f / length
end
end
|
module Net
# Make Socket calls resilient by adding timeouts, retries and specific
# exception categories
#
# TCP Client with:
# * Connection Timeouts
# Ability to timeout if a connect does not complete within a reasonable time
# For example, this can occur when the server is turned off without shutting down
# causing clients to hang creating new connections
#
# * Automatic retries on startup connection failure
# For example, the server is being restarted while the client is starting
# Gives the server a few seconds to restart to
#
# * Automatic retries on active connection failures
# If the server is restarted during
#
# Connection and Read Timeouts are fully configurable
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout is exceeded
# Raises Net::TCPClient::ReadTimeout when the read timeout is exceeded
# Raises Net::TCPClient::ConnectionFailure when a network error occurs whilst reading or writing
#
# Note: Only the following methods currently have auto-reconnect enabled:
# * read
# * write
#
# Future:
# * Add auto-reconnect feature to sysread, syswrite, etc...
# * To be a drop-in replacement to TCPSocket should also need to implement the
# following TCPSocket instance methods: :addr, :peeraddr
#
# Design Notes:
# * Does not inherit from Socket or TCP Socket because the socket instance
# has to be completely destroyed and recreated after a connection failure
#
class TCPClient
include SemanticLogger::Loggable if defined?(SemanticLogger::Loggable)
attr_accessor :connect_timeout, :read_timeout, :write_timeout,
:connect_retry_count, :connect_retry_interval, :retry_count,
:policy, :close_on_error, :buffered, :ssl, :proxy_server, :keepalive
attr_reader :servers, :address, :socket, :ssl_handshake_timeout
# Supports embedding user supplied data along with this connection
# such as sequence number and other connection specific information
# Not used or modified by TCPClient
attr_accessor :user_data
@reconnect_on_errors = [
Errno::ECONNABORTED,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::EHOSTUNREACH,
Errno::EIO,
Errno::ENETDOWN,
Errno::ENETRESET,
Errno::EPIPE,
Errno::ETIMEDOUT,
EOFError,
Net::TCPClient::ConnectionTimeout,
IOError
]
# Return the array of errors that will result in an automatic connection retry
# To add any additional errors to the standard list:
# Net::TCPClient.reconnect_on_errors << Errno::EPROTO
def self.reconnect_on_errors
@reconnect_on_errors
end
# Create a connection, call the supplied block and close the connection on
# completion of the block
#
# See #initialize for the list of parameters
#
# Example
# Net::TCPClient.connect(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5
# ) do |client|
# client.retry_on_connection_failure do
# client.send('Update the database')
# end
# response = client.read(20)
# puts "Received: #{response}"
# end
#
def self.connect(params={})
begin
connection = self.new(params)
yield(connection)
ensure
connection.close if connection
end
end
# Create a new TCP Client connection
#
# Parameters:
# :server [String]
# URL of the server to connect to with port number
# 'localhost:2000'
# '192.168.1.10:80'
#
# :servers [Array of String]
# Array of URL's of servers to connect to with port numbers
# ['server1:2000', 'server2:2000']
#
# The second server will only be attempted once the first server
# cannot be connected to or has timed out on connect
# A read failure or timeout will not result in switching to the second
# server, only a connection failure or during an automatic reconnect
#
# :connect_timeout [Float]
# Time in seconds to timeout when trying to connect to the server
# A value of -1 will cause the connect wait time to be infinite
# Default: 10 seconds
#
# :read_timeout [Float]
# Time in seconds to timeout on read
# Can be overridden by supplying a timeout in the read call
# Default: 60
#
# :write_timeout [Float]
# Time in seconds to timeout on write
# Can be overridden by supplying a timeout in the write call
# Default: 60
#
# :buffered [true|false]
# Whether to use Nagle's Buffering algorithm (http://en.wikipedia.org/wiki/Nagle's_algorithm)
# Recommend disabling for RPC style invocations where we don't want to wait for an
# ACK from the server before sending the last partial segment
# Buffering is recommended in a browser or file transfer style environment
# where multiple sends are expected during a single response.
# Also sets sync to true if buffered is false so that all data is sent immediately without
# internal buffering.
# Default: true
#
# :keepalive [true|false]
# Makes the OS check connections even when not in use, so that failed connections fail immediately
# upon use instead of possibly taking considerable time to fail.
# Default: true
#
# :connect_retry_count [Fixnum]
# Number of times to retry connecting when a connection fails
# Default: 10
#
# :connect_retry_interval [Float]
# Number of seconds between connection retry attempts after the first failed attempt
# Default: 0.5
#
# :retry_count [Fixnum]
# Number of times to retry when calling #retry_on_connection_failure
# This is independent of :connect_retry_count which still applies with
# connection failures. This retry controls upto how many times to retry the
# supplied block should a connection failure occur during the block
# Default: 3
#
# :on_connect [Proc]
# Directly after a connection is established and before it is made available
# for use this Block is invoked.
# Typical Use Cases:
# - Initialize per connection session sequence numbers.
# - Pass authentication information to the server.
# - Perform a handshake with the server.
#
# :policy [Symbol|Proc]
# Specify the policy to use when connecting to servers.
# :ordered
# Select a server in the order supplied in the array, with the first
# having the highest priority. The second server will only be connected
# to if the first server is unreachable
# :random
# Randomly select a server from the list every time a connection
# is established, including during automatic connection recovery.
# :ping_time
# FUTURE - Not implemented yet - Pull request anyone?
# The server with the lowest ping time will be tried first
# Proc:
# When a Proc is supplied, it will be called passing in the list
# of servers. The Proc must return one server name
# Example:
# :policy => Proc.new do |servers|
# servers.last
# end
# Default: :ordered
#
# :close_on_error [True|False]
# To prevent the connection from going into an inconsistent state
# automatically close the connection if an error occurs
# This includes a Read Timeout
# Default: true
#
# :proxy_server [String]
# The host name and port in the form of 'host_name:1234' to forward
# socket connections though.
# Default: nil ( none )
#
# SSL Options
# :ssl [true|false|Hash]
# true: SSL is enabled using the SSL context defaults.
# false: SSL is not used.
# Hash:
# Keys from OpenSSL::SSL::SSLContext:
# ca_file, ca_path, cert, cert_store, ciphers, key, ssl_timeout, ssl_version
# verify_callback, verify_depth, verify_mode
# handshake_timeout: [Float]
# The number of seconds to timeout the SSL Handshake.
# Default: connect_timeout
# Default: false.
# See OpenSSL::SSL::SSLContext::DEFAULT_PARAMS for the defaults.
#
# Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5
# )
#
# client.retry_on_connection_failure do
# client.send('Update the database')
# end
#
# # Read upto 20 characters from the server
# response = client.read(20)
#
# puts "Received: #{response}"
# client.close
#
# SSL Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5,
# ssl: true
# )
#
# SSL with options Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5,
# ssl: {
# verify_mode: OpenSSL::SSL::VERIFY_NONE
# }
# )
def initialize(server: nil, servers: nil,
policy: :ordered, buffered: true, keepalive: true,
connect_timeout: 10.0, read_timeout: 60.0, write_timeout: 60.0,
connect_retry_count: 10, retry_count: 3, connect_retry_interval: 0.5, close_on_error: true,
on_connect: nil, proxy_server: nil, ssl: nil
)
@read_timeout = read_timeout.to_f
@write_timeout = write_timeout.to_f
@connect_timeout = connect_timeout.to_f
@buffered = buffered
@keepalive = keepalive
@connect_retry_count = connect_retry_count
@retry_count = retry_count
@connect_retry_interval = connect_retry_interval.to_f
@on_connect = on_connect
@proxy_server = proxy_server
@policy = policy
@close_on_error = close_on_error
if ssl
@ssl = ssl == true ? {} : ssl
@ssl_handshake_timeout = (@ssl.delete(:handshake_timeout) || @connect_timeout).to_f
end
@servers = [server] if server
@servers = servers if servers
raise(ArgumentError, 'Missing mandatory :server or :servers') unless @servers
connect
end
# Connect to the TCP server
#
# Raises Net::TCPClient::ConnectionTimeout when the time taken to create a connection
# exceeds the :connect_timeout
# Raises Net::TCPClient::ConnectionFailure whenever Socket raises an error such as Error::EACCESS etc, see Socket#connect for more information
#
# Error handling is implemented as follows:
# 1. TCP Socket Connect failure:
# Cannot reach server
# Server is being restarted, or is not running
# Retry 50 times every 100ms before raising a Net::TCPClient::ConnectionFailure
# - Means all calls to #connect will take at least 5 seconds before failing if the server is not running
# - Allows hot restart of server process if it restarts within 5 seconds
#
# 2. TCP Socket Connect timeout:
# Timed out after 5 seconds trying to connect to the server
# Usually means server is busy or the remote server disappeared off the network recently
# No retry, just raise a Net::TCPClient::ConnectionTimeout
#
# Note: When multiple servers are supplied it will only try to connect to
# the subsequent servers once the retry count has been exceeded
#
# Note: Calling #connect on an open connection will close the current connection
# and create a new connection
def connect
start_time = Time.now
retries = 0
close
# Number of times to try
begin
connect_to_server(servers, policy)
logger.info(message: "Connected to #{address}", duration: (Time.now - start_time) * 1000) if respond_to?(:logger)
rescue ConnectionFailure, ConnectionTimeout => exception
cause = exception.is_a?(ConnectionTimeout) ? exception : exception.cause
# Retry-able?
if self.class.reconnect_on_errors.include?(cause.class) && (retries < connect_retry_count.to_i)
retries += 1
logger.warn "#connect Failed to connect to any of #{servers.join(',')}. Sleeping:#{connect_retry_interval}s. Retry: #{retries}" if respond_to?(:logger)
sleep(connect_retry_interval)
retry
else
message = "#connect Failed to connect to any of #{servers.join(',')} after #{retries} retries. #{exception.class}: #{exception.message}"
logger.benchmark_error(message, exception: exception, duration: (Time.now - start_time)) if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, cause)
end
end
end
# Send data to the server
#
# Use #with_retry to add resilience to the #send method
#
# Raises Net::TCPClient::ConnectionFailure whenever the send fails
# For a description of the errors, see Socket#write
#
# Parameters
# timeout [Float]
# Optional: Override the default write timeout for this write
# Number of seconds before raising Net::TCPClient::WriteTimeout when no data has
# been written.
# A value of -1 will wait forever
# Default: :write_timeout supplied to #initialize
#
# Note: After a Net::TCPClient::ReadTimeout #read can be called again on
# the same socket to read the response later.
# If the application no longers want the connection after a
# Net::TCPClient::ReadTimeout, then the #close method _must_ be called
# before calling _connect_ or _retry_on_connection_failure_ to create
# a new connection
def write(data, timeout = write_timeout)
data = data.to_s
if respond_to?(:logger)
payload = {timeout: timeout}
# With trace level also log the sent data
payload[:data] = data if logger.trace?
logger.benchmark_debug('#write', payload: payload) do
payload[:bytes] = socket_write(data, timeout)
end
else
socket_write(data, timeout)
end
rescue Exception => exc
close if close_on_error
raise exc
end
# Returns a response from the server
#
# Raises Net::TCPClient::ConnectionTimeout when the time taken to create a connection
# exceeds the :connect_timeout
# Connection is closed
# Raises Net::TCPClient::ConnectionFailure whenever Socket raises an error such as
# Error::EACCESS etc, see Socket#connect for more information
# Connection is closed
# Raises Net::TCPClient::ReadTimeout if the timeout has been exceeded waiting for the
# requested number of bytes from the server
# Partial data will not be returned
# Connection is _not_ closed and #read can be called again later
# to read the respnse from the connection
#
# Parameters
# length [Fixnum]
# The number of bytes to return
# #read will not return until 'length' bytes have been received from
# the server
#
# buffer [String]
# Optional buffer into which to write the data that is read.
#
# timeout [Float]
# Optional: Override the default read timeout for this read
# Number of seconds before raising Net::TCPClient::ReadTimeout when no data has
# been returned
# A value of -1 will wait forever for a response on the socket
# Default: :read_timeout supplied to #initialize
#
# Note: After a Net::TCPClient::ReadTimeout #read can be called again on
# the same socket to read the response later.
# If the application no longers want the connection after a
# Net::TCPClient::ReadTimeout, then the #close method _must_ be called
# before calling _connect_ or _retry_on_connection_failure_ to create
# a new connection
def read(length, buffer = nil, timeout = read_timeout)
if respond_to?(:logger)
payload = {bytes: length, timeout: timeout}
logger.benchmark_debug('#read', payload: payload) do
data = socket_read(length, buffer, timeout)
# With trace level also log the received data
payload[:data] = data if logger.trace?
data
end
else
socket_read(length, buffer, timeout)
end
rescue Exception => exc
close if close_on_error
raise exc
end
# Send and/or receive data with automatic retry on connection failure
#
# On a connection failure, it will create a new connection and retry the block.
# Returns immediately on exception Net::TCPClient::ReadTimeout
# The connection is always closed on Net::TCPClient::ConnectionFailure regardless of close_on_error
#
# 1. Example of a resilient _readonly_ request:
#
# When reading data from a server that does not change state on the server
# Wrap both the send and the read with #retry_on_connection_failure
# since it is safe to send the same data twice to the server
#
# # Since the send can be sent many times it is safe to also put the receive
# # inside the retry block
# value = client.retry_on_connection_failure do
# client.send("GETVALUE:count\n")
# client.read(20).strip.to_i
# end
#
# 2. Example of a resilient request that _modifies_ data on the server:
#
# When changing state on the server, for example when updating a value
# Wrap _only_ the send with #retry_on_connection_failure
# The read must be outside the #retry_on_connection_failure since we must
# not retry the send if the connection fails during the #read
#
# value = 45
# # Only the send is within the retry block since we cannot re-send once
# # the send was successful since the server may have made the change
# client.retry_on_connection_failure do
# client.send("SETVALUE:#{count}\n")
# end
# # Server returns "SAVED" if the call was successfull
# result = client.read(20).strip
#
# Error handling is implemented as follows:
# If a network failure occurrs during the block invocation the block
# will be called again with a new connection to the server.
# It will only be retried up to 3 times
# The re-connect will independently retry and timeout using all the
# rules of #connect
def retry_on_connection_failure
retries = 0
begin
connect if closed?
yield(self)
rescue ConnectionFailure => exception
exc_str = exception.cause ? "#{exception.cause.class}: #{exception.cause.message}" : exception.message
# Re-raise exceptions that should not be retried
if !self.class.reconnect_on_errors.include?(exception.cause.class)
logger.info "#retry_on_connection_failure not configured to retry: #{exc_str}" if respond_to?(:logger)
raise exception
elsif retries < @retry_count
retries += 1
logger.warn "#retry_on_connection_failure retry #{retries} due to #{exception.class}: #{exception.message}" if respond_to?(:logger)
connect
retry
end
logger.error "#retry_on_connection_failure Connection failure: #{exception.class}: #{exception.message}. Giving up after #{retries} retries" if respond_to?(:logger)
raise ConnectionFailure.new("After #{retries} retries to host '#{server}': #{exc_str}", server, exception.cause)
end
end
# Close the socket only if it is not already closed
#
# Logs a warning if an error occurs trying to close the socket
def close
socket.close if socket && !socket.closed?
@socket = nil
@address = nil
true
rescue IOError => exception
logger.warn "IOError when attempting to close socket: #{exception.class}: #{exception.message}" if respond_to?(:logger)
false
end
def flush
return unless socket
respond_to?(:logger) ? logger.benchmark_debug('#flush') { socket.flush } : socket.flush
end
def closed?
socket.nil? || socket.closed?
end
def eof?
socket.nil? || socket.eof?
end
# Returns whether the connection to the server is alive
#
# It is useful to call this method before making a call to the server
# that would change data on the server
#
# Note: This method is only useful if the server closed the connection or
# if a previous connection failure occurred.
# If the server is hard killed this will still return true until one
# or more writes are attempted
#
# Note: In testing the overhead of this call is rather low, with the ability to
# make about 120,000 calls per second against an active connection.
# I.e. About 8.3 micro seconds per call
def alive?
return false if socket.nil? || closed?
if IO.select([socket], nil, nil, 0)
!socket.eof? rescue false
else
true
end
rescue IOError
false
end
def setsockopt(*args)
socket.nil? || socket.setsockopt(*args)
end
private
# Connect to one of the servers in the list, per the current policy
# Returns [Socket] the socket connected to or an Exception
def connect_to_server(servers, policy)
# Iterate over each server address until it successfully connects to a host
last_exception = nil
Policy::Base.factory(policy, servers).each do |address|
begin
return connect_to_address(address)
rescue ConnectionTimeout, ConnectionFailure => exception
last_exception = exception
end
end
# Raise Exception once it has failed to connect to any server
last_exception ? raise(last_exception) : raise(ArgumentError, "No servers supplied to connect to: #{servers.join(',')}")
end
# Returns [Socket] connected to supplied address
# address [Net::TCPClient::Address]
# Host name, ip address and port of server to connect to
# Connect to the server at the supplied address
# Returns the socket connection
def connect_to_address(address)
socket =
if proxy_server
::SOCKSSocket.new("#{address.ip_address}:#{address.port}", proxy_server)
else
::Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
end
unless buffered
socket.sync = true
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
end
socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if keepalive
socket_connect(socket, address, connect_timeout)
@socket = ssl ? ssl_connect(socket, address, ssl_handshake_timeout) : socket
@address = address
# Invoke user supplied Block every time a new connection has been established
@on_connect.call(self) if @on_connect
end
# Connect to server
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout has been exceeded
# Raises Net::TCPClient::ConnectionFailure
def socket_connect(socket, address, timeout)
socket_address = Socket.pack_sockaddr_in(address.port, address.ip_address)
# Timeout of -1 means wait forever for a connection
return socket.connect(socket_address) if timeout == -1
deadline = Time.now.utc + timeout
begin
non_blocking(socket, deadline) { socket.connect_nonblock(socket_address) }
rescue Errno::EISCONN
# Connection was successful.
rescue NonBlockingTimeout
raise ConnectionTimeout.new("Timed out after #{timeout} seconds trying to connect to #{address}")
rescue SystemCallError, IOError => exception
message = "#connect Connection failure connecting to '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
end
# Write to the socket
def socket_write(data, timeout)
if timeout < 0
socket.write(data)
else
deadline = Time.now.utc + timeout
non_blocking(socket, deadline) do
socket.write_nonblock(data)
end
end
rescue NonBlockingTimeout
logger.warn "#write Timeout after #{timeout} seconds" if respond_to?(:logger)
raise WriteTimeout.new("Timed out after #{timeout} seconds trying to write to #{address}")
rescue SystemCallError, IOError => exception
message = "#write Connection failure while writing to '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
def socket_read(length, buffer, timeout)
result =
if timeout < 0
buffer.nil? ? socket.read(length) : socket.read(length, buffer)
else
deadline = Time.now.utc + timeout
non_blocking(socket, deadline) do
buffer.nil? ? socket.read_nonblock(length) : socket.read_nonblock(length, buffer)
end
end
# EOF before all the data was returned
if result.nil? || (result.length < length)
logger.warn "#read server closed the connection before #{length} bytes were returned" if respond_to?(:logger)
raise ConnectionFailure.new('Connection lost while reading data', address.to_s, EOFError.new('end of file reached'))
end
result
rescue NonBlockingTimeout
logger.warn "#read Timeout after #{timeout} seconds" if respond_to?(:logger)
raise ReadTimeout.new("Timed out after #{timeout} seconds trying to read from #{address}")
rescue SystemCallError, IOError => exception
message = "#read Connection failure while reading data from '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
class NonBlockingTimeout< ::SocketError
end
def non_blocking(socket, deadline)
yield
rescue IO::WaitReadable
time_remaining = check_time_remaining(deadline)
raise NonBlockingTimeout unless IO.select([socket], nil, nil, time_remaining)
retry
rescue IO::WaitWritable
time_remaining = check_time_remaining(deadline)
raise NonBlockingTimeout unless IO.select(nil, [socket], nil, time_remaining)
retry
end
def check_time_remaining(deadline)
time_remaining = deadline - Time.now.utc
raise NonBlockingTimeout if time_remaining < 0
time_remaining
end
# Try connecting to a single server
# Returns the connected socket
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout has been exceeded
# Raises Net::TCPClient::ConnectionFailure
def ssl_connect(socket, address, timeout)
ssl_context = OpenSSL::SSL::SSLContext.new
ssl_context.set_params(ssl.is_a?(Hash) ? ssl : {})
ssl_socket = OpenSSL::SSL::SSLSocket.new(socket, ssl_context)
ssl_socket.sync_close = true
begin
if timeout == -1
# Timeout of -1 means wait forever for a connection
ssl_socket.connect
else
deadline = Time.now.utc + timeout
begin
non_blocking(socket, deadline) { ssl_socket.connect_nonblock }
rescue Errno::EISCONN
# Connection was successful.
rescue NonBlockingTimeout
raise ConnectionTimeout.new("SSL handshake Timed out after #{timeout} seconds trying to connect to #{address.to_s}")
end
end
rescue SystemCallError, OpenSSL::SSL::SSLError, IOError => exception
message = "#connect SSL handshake failure with '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
# Verify Peer certificate
ssl_verify(ssl_socket, address) if ssl_context.verify_mode != OpenSSL::SSL::VERIFY_NONE
ssl_socket
end
# Raises Net::TCPClient::ConnectionFailure if the peer certificate does not match its hostname
def ssl_verify(ssl_socket, address)
unless OpenSSL::SSL.verify_certificate_identity(ssl_socket.peer_cert, address.host_name)
ssl_socket.close
message = "#connect SSL handshake failed due to a hostname mismatch with '#{address.to_s}'"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s)
end
end
end
end
add cert domains to hostname mismatch error message
module Net
# Make Socket calls resilient by adding timeouts, retries and specific
# exception categories
#
# TCP Client with:
# * Connection Timeouts
# Ability to timeout if a connect does not complete within a reasonable time
# For example, this can occur when the server is turned off without shutting down
# causing clients to hang creating new connections
#
# * Automatic retries on startup connection failure
# For example, the server is being restarted while the client is starting
# Gives the server a few seconds to restart to
#
# * Automatic retries on active connection failures
# If the server is restarted during
#
# Connection and Read Timeouts are fully configurable
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout is exceeded
# Raises Net::TCPClient::ReadTimeout when the read timeout is exceeded
# Raises Net::TCPClient::ConnectionFailure when a network error occurs whilst reading or writing
#
# Note: Only the following methods currently have auto-reconnect enabled:
# * read
# * write
#
# Future:
# * Add auto-reconnect feature to sysread, syswrite, etc...
# * To be a drop-in replacement to TCPSocket should also need to implement the
# following TCPSocket instance methods: :addr, :peeraddr
#
# Design Notes:
# * Does not inherit from Socket or TCP Socket because the socket instance
# has to be completely destroyed and recreated after a connection failure
#
class TCPClient
include SemanticLogger::Loggable if defined?(SemanticLogger::Loggable)
attr_accessor :connect_timeout, :read_timeout, :write_timeout,
:connect_retry_count, :connect_retry_interval, :retry_count,
:policy, :close_on_error, :buffered, :ssl, :proxy_server, :keepalive
attr_reader :servers, :address, :socket, :ssl_handshake_timeout
# Supports embedding user supplied data along with this connection
# such as sequence number and other connection specific information
# Not used or modified by TCPClient
attr_accessor :user_data
@reconnect_on_errors = [
Errno::ECONNABORTED,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::EHOSTUNREACH,
Errno::EIO,
Errno::ENETDOWN,
Errno::ENETRESET,
Errno::EPIPE,
Errno::ETIMEDOUT,
EOFError,
Net::TCPClient::ConnectionTimeout,
IOError
]
# Return the array of errors that will result in an automatic connection retry
# To add any additional errors to the standard list:
# Net::TCPClient.reconnect_on_errors << Errno::EPROTO
def self.reconnect_on_errors
@reconnect_on_errors
end
# Create a connection, call the supplied block and close the connection on
# completion of the block
#
# See #initialize for the list of parameters
#
# Example
# Net::TCPClient.connect(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5
# ) do |client|
# client.retry_on_connection_failure do
# client.send('Update the database')
# end
# response = client.read(20)
# puts "Received: #{response}"
# end
#
def self.connect(params={})
begin
connection = self.new(params)
yield(connection)
ensure
connection.close if connection
end
end
# Create a new TCP Client connection
#
# Parameters:
# :server [String]
# URL of the server to connect to with port number
# 'localhost:2000'
# '192.168.1.10:80'
#
# :servers [Array of String]
# Array of URL's of servers to connect to with port numbers
# ['server1:2000', 'server2:2000']
#
# The second server will only be attempted once the first server
# cannot be connected to or has timed out on connect
# A read failure or timeout will not result in switching to the second
# server, only a connection failure or during an automatic reconnect
#
# :connect_timeout [Float]
# Time in seconds to timeout when trying to connect to the server
# A value of -1 will cause the connect wait time to be infinite
# Default: 10 seconds
#
# :read_timeout [Float]
# Time in seconds to timeout on read
# Can be overridden by supplying a timeout in the read call
# Default: 60
#
# :write_timeout [Float]
# Time in seconds to timeout on write
# Can be overridden by supplying a timeout in the write call
# Default: 60
#
# :buffered [true|false]
# Whether to use Nagle's Buffering algorithm (http://en.wikipedia.org/wiki/Nagle's_algorithm)
# Recommend disabling for RPC style invocations where we don't want to wait for an
# ACK from the server before sending the last partial segment
# Buffering is recommended in a browser or file transfer style environment
# where multiple sends are expected during a single response.
# Also sets sync to true if buffered is false so that all data is sent immediately without
# internal buffering.
# Default: true
#
# :keepalive [true|false]
# Makes the OS check connections even when not in use, so that failed connections fail immediately
# upon use instead of possibly taking considerable time to fail.
# Default: true
#
# :connect_retry_count [Fixnum]
# Number of times to retry connecting when a connection fails
# Default: 10
#
# :connect_retry_interval [Float]
# Number of seconds between connection retry attempts after the first failed attempt
# Default: 0.5
#
# :retry_count [Fixnum]
# Number of times to retry when calling #retry_on_connection_failure
# This is independent of :connect_retry_count which still applies with
# connection failures. This retry controls upto how many times to retry the
# supplied block should a connection failure occur during the block
# Default: 3
#
# :on_connect [Proc]
# Directly after a connection is established and before it is made available
# for use this Block is invoked.
# Typical Use Cases:
# - Initialize per connection session sequence numbers.
# - Pass authentication information to the server.
# - Perform a handshake with the server.
#
# :policy [Symbol|Proc]
# Specify the policy to use when connecting to servers.
# :ordered
# Select a server in the order supplied in the array, with the first
# having the highest priority. The second server will only be connected
# to if the first server is unreachable
# :random
# Randomly select a server from the list every time a connection
# is established, including during automatic connection recovery.
# :ping_time
# FUTURE - Not implemented yet - Pull request anyone?
# The server with the lowest ping time will be tried first
# Proc:
# When a Proc is supplied, it will be called passing in the list
# of servers. The Proc must return one server name
# Example:
# :policy => Proc.new do |servers|
# servers.last
# end
# Default: :ordered
#
# :close_on_error [True|False]
# To prevent the connection from going into an inconsistent state
# automatically close the connection if an error occurs
# This includes a Read Timeout
# Default: true
#
# :proxy_server [String]
# The host name and port in the form of 'host_name:1234' to forward
# socket connections though.
# Default: nil ( none )
#
# SSL Options
# :ssl [true|false|Hash]
# true: SSL is enabled using the SSL context defaults.
# false: SSL is not used.
# Hash:
# Keys from OpenSSL::SSL::SSLContext:
# ca_file, ca_path, cert, cert_store, ciphers, key, ssl_timeout, ssl_version
# verify_callback, verify_depth, verify_mode
# handshake_timeout: [Float]
# The number of seconds to timeout the SSL Handshake.
# Default: connect_timeout
# Default: false.
# See OpenSSL::SSL::SSLContext::DEFAULT_PARAMS for the defaults.
#
# Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5
# )
#
# client.retry_on_connection_failure do
# client.send('Update the database')
# end
#
# # Read upto 20 characters from the server
# response = client.read(20)
#
# puts "Received: #{response}"
# client.close
#
# SSL Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5,
# ssl: true
# )
#
# SSL with options Example:
# client = Net::TCPClient.new(
# server: 'server:3300',
# connect_retry_interval: 0.1,
# connect_retry_count: 5,
# ssl: {
# verify_mode: OpenSSL::SSL::VERIFY_NONE
# }
# )
def initialize(server: nil, servers: nil,
policy: :ordered, buffered: true, keepalive: true,
connect_timeout: 10.0, read_timeout: 60.0, write_timeout: 60.0,
connect_retry_count: 10, retry_count: 3, connect_retry_interval: 0.5, close_on_error: true,
on_connect: nil, proxy_server: nil, ssl: nil
)
@read_timeout = read_timeout.to_f
@write_timeout = write_timeout.to_f
@connect_timeout = connect_timeout.to_f
@buffered = buffered
@keepalive = keepalive
@connect_retry_count = connect_retry_count
@retry_count = retry_count
@connect_retry_interval = connect_retry_interval.to_f
@on_connect = on_connect
@proxy_server = proxy_server
@policy = policy
@close_on_error = close_on_error
if ssl
@ssl = ssl == true ? {} : ssl
@ssl_handshake_timeout = (@ssl.delete(:handshake_timeout) || @connect_timeout).to_f
end
@servers = [server] if server
@servers = servers if servers
raise(ArgumentError, 'Missing mandatory :server or :servers') unless @servers
connect
end
# Connect to the TCP server
#
# Raises Net::TCPClient::ConnectionTimeout when the time taken to create a connection
# exceeds the :connect_timeout
# Raises Net::TCPClient::ConnectionFailure whenever Socket raises an error such as Error::EACCESS etc, see Socket#connect for more information
#
# Error handling is implemented as follows:
# 1. TCP Socket Connect failure:
# Cannot reach server
# Server is being restarted, or is not running
# Retry 50 times every 100ms before raising a Net::TCPClient::ConnectionFailure
# - Means all calls to #connect will take at least 5 seconds before failing if the server is not running
# - Allows hot restart of server process if it restarts within 5 seconds
#
# 2. TCP Socket Connect timeout:
# Timed out after 5 seconds trying to connect to the server
# Usually means server is busy or the remote server disappeared off the network recently
# No retry, just raise a Net::TCPClient::ConnectionTimeout
#
# Note: When multiple servers are supplied it will only try to connect to
# the subsequent servers once the retry count has been exceeded
#
# Note: Calling #connect on an open connection will close the current connection
# and create a new connection
def connect
start_time = Time.now
retries = 0
close
# Number of times to try
begin
connect_to_server(servers, policy)
logger.info(message: "Connected to #{address}", duration: (Time.now - start_time) * 1000) if respond_to?(:logger)
rescue ConnectionFailure, ConnectionTimeout => exception
cause = exception.is_a?(ConnectionTimeout) ? exception : exception.cause
# Retry-able?
if self.class.reconnect_on_errors.include?(cause.class) && (retries < connect_retry_count.to_i)
retries += 1
logger.warn "#connect Failed to connect to any of #{servers.join(',')}. Sleeping:#{connect_retry_interval}s. Retry: #{retries}" if respond_to?(:logger)
sleep(connect_retry_interval)
retry
else
message = "#connect Failed to connect to any of #{servers.join(',')} after #{retries} retries. #{exception.class}: #{exception.message}"
logger.benchmark_error(message, exception: exception, duration: (Time.now - start_time)) if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, cause)
end
end
end
# Send data to the server
#
# Use #with_retry to add resilience to the #send method
#
# Raises Net::TCPClient::ConnectionFailure whenever the send fails
# For a description of the errors, see Socket#write
#
# Parameters
# timeout [Float]
# Optional: Override the default write timeout for this write
# Number of seconds before raising Net::TCPClient::WriteTimeout when no data has
# been written.
# A value of -1 will wait forever
# Default: :write_timeout supplied to #initialize
#
# Note: After a Net::TCPClient::ReadTimeout #read can be called again on
# the same socket to read the response later.
# If the application no longer wants the connection after a
# Net::TCPClient::ReadTimeout, then the #close method _must_ be called
# before calling _connect_ or _retry_on_connection_failure_ to create
# a new connection
def write(data, timeout = write_timeout)
data = data.to_s
if respond_to?(:logger)
payload = {timeout: timeout}
# With trace level also log the sent data
payload[:data] = data if logger.trace?
logger.benchmark_debug('#write', payload: payload) do
payload[:bytes] = socket_write(data, timeout)
end
else
socket_write(data, timeout)
end
rescue Exception => exc
close if close_on_error
raise exc
end
# Returns a response from the server
#
# Raises Net::TCPClient::ConnectionTimeout when the time taken to create a connection
# exceeds the :connect_timeout
# Connection is closed
# Raises Net::TCPClient::ConnectionFailure whenever Socket raises an error such as
# Error::EACCESS etc, see Socket#connect for more information
# Connection is closed
# Raises Net::TCPClient::ReadTimeout if the timeout has been exceeded waiting for the
# requested number of bytes from the server
# Partial data will not be returned
# Connection is _not_ closed and #read can be called again later
# to read the response from the connection
#
# Parameters
# length [Fixnum]
# The number of bytes to return
# #read will not return until 'length' bytes have been received from
# the server
#
# buffer [String]
# Optional buffer into which to write the data that is read.
#
# timeout [Float]
# Optional: Override the default read timeout for this read
# Number of seconds before raising Net::TCPClient::ReadTimeout when no data has
# been returned
# A value of -1 will wait forever for a response on the socket
# Default: :read_timeout supplied to #initialize
#
# Note: After a Net::TCPClient::ReadTimeout #read can be called again on
# the same socket to read the response later.
# If the application no longer wants the connection after a
# Net::TCPClient::ReadTimeout, then the #close method _must_ be called
# before calling _connect_ or _retry_on_connection_failure_ to create
# a new connection
def read(length, buffer = nil, timeout = read_timeout)
if respond_to?(:logger)
payload = {bytes: length, timeout: timeout}
logger.benchmark_debug('#read', payload: payload) do
data = socket_read(length, buffer, timeout)
# With trace level also log the received data
payload[:data] = data if logger.trace?
data
end
else
socket_read(length, buffer, timeout)
end
rescue Exception => exc
close if close_on_error
raise exc
end
# Send and/or receive data with automatic retry on connection failure
#
# On a connection failure, it will create a new connection and retry the block.
# Returns immediately on exception Net::TCPClient::ReadTimeout
# The connection is always closed on Net::TCPClient::ConnectionFailure regardless of close_on_error
#
# 1. Example of a resilient _readonly_ request:
#
# When reading data from a server that does not change state on the server
# Wrap both the send and the read with #retry_on_connection_failure
# since it is safe to send the same data twice to the server
#
# # Since the send can be sent many times it is safe to also put the receive
# # inside the retry block
# value = client.retry_on_connection_failure do
# client.send("GETVALUE:count\n")
# client.read(20).strip.to_i
# end
#
# 2. Example of a resilient request that _modifies_ data on the server:
#
# When changing state on the server, for example when updating a value
# Wrap _only_ the send with #retry_on_connection_failure
# The read must be outside the #retry_on_connection_failure since we must
# not retry the send if the connection fails during the #read
#
# value = 45
# # Only the send is within the retry block since we cannot re-send once
# # the send was successful since the server may have made the change
# client.retry_on_connection_failure do
# client.send("SETVALUE:#{count}\n")
# end
# # Server returns "SAVED" if the call was successful
# result = client.read(20).strip
#
# Error handling is implemented as follows:
# If a network failure occurs during the block invocation the block
# will be called again with a new connection to the server.
# It will only be retried up to 3 times
# The re-connect will independently retry and timeout using all the
# rules of #connect
def retry_on_connection_failure
retries = 0
begin
connect if closed?
yield(self)
rescue ConnectionFailure => exception
exc_str = exception.cause ? "#{exception.cause.class}: #{exception.cause.message}" : exception.message
# Re-raise exceptions that should not be retried
if !self.class.reconnect_on_errors.include?(exception.cause.class)
logger.info "#retry_on_connection_failure not configured to retry: #{exc_str}" if respond_to?(:logger)
raise exception
elsif retries < @retry_count
retries += 1
logger.warn "#retry_on_connection_failure retry #{retries} due to #{exception.class}: #{exception.message}" if respond_to?(:logger)
connect
retry
end
logger.error "#retry_on_connection_failure Connection failure: #{exception.class}: #{exception.message}. Giving up after #{retries} retries" if respond_to?(:logger)
raise ConnectionFailure.new("After #{retries} retries to host '#{server}': #{exc_str}", server, exception.cause)
end
end
# Close the socket only if it is not already closed
#
# Logs a warning if an error occurs trying to close the socket
def close
socket.close if socket && !socket.closed?
@socket = nil
@address = nil
true
rescue IOError => exception
logger.warn "IOError when attempting to close socket: #{exception.class}: #{exception.message}" if respond_to?(:logger)
false
end
def flush
return unless socket
respond_to?(:logger) ? logger.benchmark_debug('#flush') { socket.flush } : socket.flush
end
def closed?
socket.nil? || socket.closed?
end
def eof?
socket.nil? || socket.eof?
end
# Returns whether the connection to the server is alive
#
# It is useful to call this method before making a call to the server
# that would change data on the server
#
# Note: This method is only useful if the server closed the connection or
# if a previous connection failure occurred.
# If the server is hard killed this will still return true until one
# or more writes are attempted
#
# Note: In testing the overhead of this call is rather low, with the ability to
# make about 120,000 calls per second against an active connection.
# I.e. About 8.3 micro seconds per call
def alive?
return false if socket.nil? || closed?
if IO.select([socket], nil, nil, 0)
!socket.eof? rescue false
else
true
end
rescue IOError
false
end
def setsockopt(*args)
socket.nil? || socket.setsockopt(*args)
end
private
# Connect to one of the servers in the list, per the current policy
# Returns [Socket] the socket connected to or an Exception
def connect_to_server(servers, policy)
# Iterate over each server address until it successfully connects to a host
last_exception = nil
Policy::Base.factory(policy, servers).each do |address|
begin
return connect_to_address(address)
rescue ConnectionTimeout, ConnectionFailure => exception
last_exception = exception
end
end
# Raise Exception once it has failed to connect to any server
last_exception ? raise(last_exception) : raise(ArgumentError, "No servers supplied to connect to: #{servers.join(',')}")
end
# Returns [Socket] connected to supplied address
# address [Net::TCPClient::Address]
# Host name, ip address and port of server to connect to
# Connect to the server at the supplied address
# Returns the socket connection
def connect_to_address(address)
socket =
if proxy_server
::SOCKSSocket.new("#{address.ip_address}:#{address.port}", proxy_server)
else
::Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
end
unless buffered
socket.sync = true
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
end
socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if keepalive
socket_connect(socket, address, connect_timeout)
@socket = ssl ? ssl_connect(socket, address, ssl_handshake_timeout) : socket
@address = address
# Invoke user supplied Block every time a new connection has been established
@on_connect.call(self) if @on_connect
end
# Connect to server
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout has been exceeded
# Raises Net::TCPClient::ConnectionFailure
def socket_connect(socket, address, timeout)
socket_address = Socket.pack_sockaddr_in(address.port, address.ip_address)
# Timeout of -1 means wait forever for a connection
return socket.connect(socket_address) if timeout == -1
deadline = Time.now.utc + timeout
begin
non_blocking(socket, deadline) { socket.connect_nonblock(socket_address) }
rescue Errno::EISCONN
# Connection was successful.
rescue NonBlockingTimeout
raise ConnectionTimeout.new("Timed out after #{timeout} seconds trying to connect to #{address}")
rescue SystemCallError, IOError => exception
message = "#connect Connection failure connecting to '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
end
# Write to the socket
def socket_write(data, timeout)
if timeout < 0
socket.write(data)
else
deadline = Time.now.utc + timeout
non_blocking(socket, deadline) do
socket.write_nonblock(data)
end
end
rescue NonBlockingTimeout
logger.warn "#write Timeout after #{timeout} seconds" if respond_to?(:logger)
raise WriteTimeout.new("Timed out after #{timeout} seconds trying to write to #{address}")
rescue SystemCallError, IOError => exception
message = "#write Connection failure while writing to '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
def socket_read(length, buffer, timeout)
result =
if timeout < 0
buffer.nil? ? socket.read(length) : socket.read(length, buffer)
else
deadline = Time.now.utc + timeout
non_blocking(socket, deadline) do
buffer.nil? ? socket.read_nonblock(length) : socket.read_nonblock(length, buffer)
end
end
# EOF before all the data was returned
if result.nil? || (result.length < length)
logger.warn "#read server closed the connection before #{length} bytes were returned" if respond_to?(:logger)
raise ConnectionFailure.new('Connection lost while reading data', address.to_s, EOFError.new('end of file reached'))
end
result
rescue NonBlockingTimeout
logger.warn "#read Timeout after #{timeout} seconds" if respond_to?(:logger)
raise ReadTimeout.new("Timed out after #{timeout} seconds trying to read from #{address}")
rescue SystemCallError, IOError => exception
message = "#read Connection failure while reading data from '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
class NonBlockingTimeout< ::SocketError
end
def non_blocking(socket, deadline)
yield
rescue IO::WaitReadable
time_remaining = check_time_remaining(deadline)
raise NonBlockingTimeout unless IO.select([socket], nil, nil, time_remaining)
retry
rescue IO::WaitWritable
time_remaining = check_time_remaining(deadline)
raise NonBlockingTimeout unless IO.select(nil, [socket], nil, time_remaining)
retry
end
def check_time_remaining(deadline)
time_remaining = deadline - Time.now.utc
raise NonBlockingTimeout if time_remaining < 0
time_remaining
end
# Try connecting to a single server
# Returns the connected socket
#
# Raises Net::TCPClient::ConnectionTimeout when the connection timeout has been exceeded
# Raises Net::TCPClient::ConnectionFailure
def ssl_connect(socket, address, timeout)
ssl_context = OpenSSL::SSL::SSLContext.new
ssl_context.set_params(ssl.is_a?(Hash) ? ssl : {})
ssl_socket = OpenSSL::SSL::SSLSocket.new(socket, ssl_context)
ssl_socket.sync_close = true
begin
if timeout == -1
# Timeout of -1 means wait forever for a connection
ssl_socket.connect
else
deadline = Time.now.utc + timeout
begin
non_blocking(socket, deadline) { ssl_socket.connect_nonblock }
rescue Errno::EISCONN
# Connection was successful.
rescue NonBlockingTimeout
raise ConnectionTimeout.new("SSL handshake Timed out after #{timeout} seconds trying to connect to #{address.to_s}")
end
end
rescue SystemCallError, OpenSSL::SSL::SSLError, IOError => exception
message = "#connect SSL handshake failure with '#{address.to_s}': #{exception.class}: #{exception.message}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s, exception)
end
# Verify Peer certificate
ssl_verify(ssl_socket, address) if ssl_context.verify_mode != OpenSSL::SSL::VERIFY_NONE
ssl_socket
end
# Raises Net::TCPClient::ConnectionFailure if the peer certificate does not match its hostname
def ssl_verify(ssl_socket, address)
unless OpenSSL::SSL.verify_certificate_identity(ssl_socket.peer_cert, address.host_name)
domains = extract_domains_from_cert(ssl_socket.peer_cert)
ssl_socket.close
message = "#connect SSL handshake failed due to a hostname mismatch. Request address was: '#{address.to_s}'" +
" Certificate valid for hostnames: #{domains.map { |d| "'#{d}'"}.join(',')}"
logger.error message if respond_to?(:logger)
raise ConnectionFailure.new(message, address.to_s)
end
end
def extract_domains_from_cert(cert)
cert.subject.to_a.each{|oid, value|
return [value] if oid == "CN"
}
end
end
end
|
require 'git-lib'
require 'uncommitted-changes-error'
require 'git-rebase-error'
require 'git-merge-error'
require 'parked-changes-error'
require 'pull-request'
require 'shellwords'
require 'highline/import'
module Git
class Process
attr_reader :lib
@@server_name = 'origin'
@@master_branch = 'master'
def initialize(dir = nil, gitlib = nil, options = {})
@lib = gitlib || Git::GitLib.new(dir, options)
end
def Process.remote_master_branch
"#{@@server_name}/#{@@master_branch}"
end
def Process.server_name
@@server_name
end
def Process.master_branch
@@master_branch
end
def rebase_to_master
raise UncommittedChangesError.new unless lib.status.clean?
raise ParkedChangesError.new(lib) if is_parked?
if lib.has_a_remote?
lib.fetch
rebase(Process::remote_master_branch)
lib.push(Process::server_name, lib.branches.current, Process::master_branch)
else
rebase("master")
end
end
def sync_with_server(rebase)
raise UncommittedChangesError.new unless lib.status.clean?
raise ParkedChangesError.new(lib) if is_parked?
current_branch = lib.branches.current
remote_branch = "#{Process::server_name}/#{current_branch}"
lib.fetch
if rebase
# rebase(remote_branch)
rebase(Process::remote_master_branch)
old_sha = lib.command('rev-parse', remote_branch) rescue ''
else
# merge(remote_branch)
merge(Process::remote_master_branch)
end
unless current_branch == Process::master_branch
lib.fetch
if rebase
new_sha = lib.command('rev-parse', remote_branch) rescue ''
unless old_sha == new_sha
logger.warn("'#{current_branch}' changed on '#{Process::server_name}'"+
" [#{old_sha[0..5]}->#{new_sha[0..5]}]; trying sync again.")
sync_with_server(rebase)
end
end
lib.push(Process::server_name, current_branch, current_branch, :force => rebase)
else
logger.warn("Not pushing to the server because the current branch is the master branch.")
end
end
def bad_parking_branch_msg
hl = HighLine.new
hl.color("\n***********************************************************************************************\n\n"+
"There is an old '_parking_' branch with unacounted changes in it.\n"+
"It has been renamed to '_parking_OLD_'.\n"+
"Please rename the branch to what the changes are about (`git branch -m _parking_OLD_ my_fb_name`),\n"+
" or remove it altogher (`git branch -D _parking_OLD_`).\n\n"+
"***********************************************************************************************\n", :red, :bold)
end
def remove_feature_branch
branches = lib.branches
remote_master = branches[Process::remote_master_branch]
current_branch = branches.current
unless remote_master.contains_all_of(current_branch.name)
raise GitProcessError.new("Branch '#{current_branch.name}' has not been merged into '#{Process::remote_master_branch}'")
end
parking_branch = branches['_parking_']
if parking_branch
if (parking_branch.is_ahead_of(remote_master.name) and
!current_branch.contains_all_of(parking_branch.name))
parking_branch.rename('_parking_OLD_')
logger.warn {bad_parking_branch_msg}
else
parking_branch.delete
end
end
remote_master.checkout_to_new('_parking_', :no_track => true)
current_branch.delete
lib.command(:push, [Process.server_name, ":#{current_branch.name}"]) if lib.has_a_remote?
end
def is_parked?
branches = lib.branches
branches.parking == branches.current
end
def rebase(base)
begin
lib.rebase(base)
rescue Git::GitExecuteError => rebase_error
raise RebaseError.new(rebase_error.message, lib)
end
end
def merge(base)
begin
lib.merge(base)
rescue Git::GitExecuteError => merge_error
raise MergeError.new(merge_error.message, lib)
end
end
def pull_request(repo_name, base, head, title, body, opts = {})
repo_name ||= lib.repo_name
base ||= @@master_branch
head ||= lib.branches.current
title ||= ask_for_pull_title
body ||= ask_for_pull_body
GitHub::PullRequest.new(lib, repo_name, opts).pull_request(base, head, title, body)
end
def ask_for_pull_title
ask("What <%= color('title', [:bold]) %> do you want to give the pull request? ") do |q|
q.validate = /^\w+.*/
end
end
def ask_for_pull_body
ask("What <%= color('description', [:bold]) %> do you want to give the pull request? ")
end
def logger
@lib.logger
end
end
end
Adds remove_feature_branch to rebase_to_master.
require 'git-lib'
require 'uncommitted-changes-error'
require 'git-rebase-error'
require 'git-merge-error'
require 'parked-changes-error'
require 'pull-request'
require 'shellwords'
require 'highline/import'
module Git
class Process
attr_reader :lib
@@server_name = 'origin'
@@master_branch = 'master'
def initialize(dir = nil, gitlib = nil, options = {})
@lib = gitlib || Git::GitLib.new(dir, options)
end
def Process.remote_master_branch
"#{@@server_name}/#{@@master_branch}"
end
def Process.server_name
@@server_name
end
def Process.master_branch
@@master_branch
end
def rebase_to_master
raise UncommittedChangesError.new unless lib.status.clean?
raise ParkedChangesError.new(lib) if is_parked?
if lib.has_a_remote?
lib.fetch
rebase(Process::remote_master_branch)
lib.push(Process::server_name, lib.branches.current, Process::master_branch)
remove_feature_branch
else
rebase("master")
end
end
def sync_with_server(rebase)
raise UncommittedChangesError.new unless lib.status.clean?
raise ParkedChangesError.new(lib) if is_parked?
current_branch = lib.branches.current
remote_branch = "#{Process::server_name}/#{current_branch}"
lib.fetch
if rebase
# rebase(remote_branch)
rebase(Process::remote_master_branch)
old_sha = lib.command('rev-parse', remote_branch) rescue ''
else
# merge(remote_branch)
merge(Process::remote_master_branch)
end
unless current_branch == Process::master_branch
lib.fetch
if rebase
new_sha = lib.command('rev-parse', remote_branch) rescue ''
unless old_sha == new_sha
logger.warn("'#{current_branch}' changed on '#{Process::server_name}'"+
" [#{old_sha[0..5]}->#{new_sha[0..5]}]; trying sync again.")
sync_with_server(rebase)
end
end
lib.push(Process::server_name, current_branch, current_branch, :force => rebase)
else
logger.warn("Not pushing to the server because the current branch is the master branch.")
end
end
def bad_parking_branch_msg
hl = HighLine.new
hl.color("\n***********************************************************************************************\n\n"+
"There is an old '_parking_' branch with unacounted changes in it.\n"+
"It has been renamed to '_parking_OLD_'.\n"+
"Please rename the branch to what the changes are about (`git branch -m _parking_OLD_ my_fb_name`),\n"+
" or remove it altogher (`git branch -D _parking_OLD_`).\n\n"+
"***********************************************************************************************\n", :red, :bold)
end
def remove_feature_branch
branches = lib.branches
remote_master = branches[Process::remote_master_branch]
current_branch = branches.current
unless remote_master.contains_all_of(current_branch.name)
raise GitProcessError.new("Branch '#{current_branch.name}' has not been merged into '#{Process::remote_master_branch}'")
end
parking_branch = branches['_parking_']
if parking_branch
if (parking_branch.is_ahead_of(remote_master.name) and
!current_branch.contains_all_of(parking_branch.name))
parking_branch.rename('_parking_OLD_')
logger.warn {bad_parking_branch_msg}
else
parking_branch.delete
end
end
remote_master.checkout_to_new('_parking_', :no_track => true)
current_branch.delete
lib.command(:push, [Process.server_name, ":#{current_branch.name}"]) if lib.has_a_remote?
end
def is_parked?
branches = lib.branches
branches.parking == branches.current
end
def rebase(base)
begin
lib.rebase(base)
rescue Git::GitExecuteError => rebase_error
raise RebaseError.new(rebase_error.message, lib)
end
end
def merge(base)
begin
lib.merge(base)
rescue Git::GitExecuteError => merge_error
raise MergeError.new(merge_error.message, lib)
end
end
def pull_request(repo_name, base, head, title, body, opts = {})
repo_name ||= lib.repo_name
base ||= @@master_branch
head ||= lib.branches.current
title ||= ask_for_pull_title
body ||= ask_for_pull_body
GitHub::PullRequest.new(lib, repo_name, opts).pull_request(base, head, title, body)
end
def ask_for_pull_title
ask("What <%= color('title', [:bold]) %> do you want to give the pull request? ") do |q|
q.validate = /^\w+.*/
end
end
def ask_for_pull_body
ask("What <%= color('description', [:bold]) %> do you want to give the pull request? ")
end
def logger
@lib.logger
end
end
end
|
class GitCommitNotifier::Git
class << self
def from_shell(cmd)
r = `#{cmd}`
raise ArgumentError.new("#{cmd} failed") unless $?.exitstatus.zero?
r
end
def show(rev, ignore_whitespace)
gitopt = ""
gitopt += " -w" if ignore_whitespace
from_shell("git show #{rev.strip}#{gitopt}")
end
def log(rev1, rev2)
from_shell("git log #{rev1}..#{rev2}").strip
end
def changed_files(rev1, rev2)
output = ""
lines = from_shell("git log #{rev1}..#{rev2} --name-status --oneline")
lines = lines.lines if lines.respond_to?(:lines)
lines = lines.select {|line| line =~ /^\w{1}\s+\w+/} # grep out only filenames
lines.uniq
end
def branch_commits(treeish)
args = branch_heads - [ branch_head(treeish) ]
args.map! { |tree| "^#{tree}" }
args << treeish
lines = from_shell("git rev-list #{args.join(' ')}")
lines = lines.lines if lines.respond_to?(:lines)
lines.to_a.map { |commit| commit.chomp }
end
def branch_heads
lines = from_shell("git rev-parse --branches")
lines = lines.lines if lines.respond_to?(:lines)
lines.to_a.map { |head| head.chomp }
end
def branch_head(treeish)
from_shell("git rev-parse #{treeish}").strip
end
def repo_name
git_prefix = from_shell("git config hooks.emailprefix").strip
return git_prefix unless git_prefix.empty?
Dir.pwd.split("/").last.sub(/\.git$/, '')
end
def mailing_list_address
from_shell("git config hooks.mailinglist").strip
end
end
end
git config hooks.emailprefix may be exited with 1, fixes #95
class GitCommitNotifier::Git
class << self
def from_shell(cmd)
r = `#{cmd}`
raise ArgumentError.new("#{cmd} failed") unless $?.exitstatus.zero?
r
end
def show(rev, ignore_whitespace)
gitopt = ""
gitopt += " -w" if ignore_whitespace
from_shell("git show #{rev.strip}#{gitopt}")
end
def log(rev1, rev2)
from_shell("git log #{rev1}..#{rev2}").strip
end
def changed_files(rev1, rev2)
output = ""
lines = from_shell("git log #{rev1}..#{rev2} --name-status --oneline")
lines = lines.lines if lines.respond_to?(:lines)
lines = lines.select {|line| line =~ /^\w{1}\s+\w+/} # grep out only filenames
lines.uniq
end
def branch_commits(treeish)
args = branch_heads - [ branch_head(treeish) ]
args.map! { |tree| "^#{tree}" }
args << treeish
lines = from_shell("git rev-list #{args.join(' ')}")
lines = lines.lines if lines.respond_to?(:lines)
lines.to_a.map { |commit| commit.chomp }
end
def branch_heads
lines = from_shell("git rev-parse --branches")
lines = lines.lines if lines.respond_to?(:lines)
lines.to_a.map { |head| head.chomp }
end
def branch_head(treeish)
from_shell("git rev-parse #{treeish}").strip
end
def repo_name
git_prefix = begin
from_shell("git config hooks.emailprefix").strip
rescue ArgumentError
''
end
return git_prefix unless git_prefix.empty?
Dir.pwd.split("/").last.sub(/\.git$/, '')
end
def mailing_list_address
from_shell("git config hooks.mailinglist").strip
end
end
end
|
module Groupdate
module QueryMethods
Groupdate::PERIODS.each do |period|
define_method :"group_by_#{period}" do |field, time_zone = nil, range = nil, **options|
warn "[groupdate] positional arguments are deprecated" if time_zone || range
Groupdate::Magic::Relation.generate_relation(self,
period: period,
field: field,
time_zone: time_zone,
range: range,
**options
)
end
end
def group_by_period(period, field, permit: nil, **options)
# to_sym is unsafe on user input, so convert to strings
permitted_periods = ((permit || Groupdate::PERIODS).map(&:to_sym) & Groupdate::PERIODS).map(&:to_s)
if permitted_periods.include?(period.to_s)
send("group_by_#{period}", field, **options)
else
raise ArgumentError, "Unpermitted period"
end
end
end
end
Better deprecation message [skip ci]
module Groupdate
module QueryMethods
Groupdate::PERIODS.each do |period|
define_method :"group_by_#{period}" do |field, time_zone = nil, range = nil, **options|
warn "[groupdate] positional arguments for time zone and range are deprecated" if time_zone || range
Groupdate::Magic::Relation.generate_relation(self,
period: period,
field: field,
time_zone: time_zone,
range: range,
**options
)
end
end
def group_by_period(period, field, permit: nil, **options)
# to_sym is unsafe on user input, so convert to strings
permitted_periods = ((permit || Groupdate::PERIODS).map(&:to_sym) & Groupdate::PERIODS).map(&:to_s)
if permitted_periods.include?(period.to_s)
send("group_by_#{period}", field, **options)
else
raise ArgumentError, "Unpermitted period"
end
end
end
end
|
module Grunt
module Concerns
module Commands
def is_command?(string, prefix = nil)
prefix ||= Grunt.config["prefix"] || "."
if string =~ /^#{Regexp.escape(prefix)}([a-zA-Z0-9_]+)\s*?(.*)$/
{ :name => $~[1], :args => $~[2] }
else
nil
end
end
def is_pm_command?(string)
is_command?(string, "")
end
end
end
end
Better command detection regex
module Grunt
module Concerns
module Commands
def is_command?(string, prefix = nil)
prefix ||= Grunt.config["prefix"] || "."
if string =~ /^#{Regexp.escape(prefix)}([a-zA-Z0-9_\-]+)(?: +(.*))?$/
{ :name => $~[1], :args => $~[2] }
else
nil
end
end
def is_pm_command?(string)
is_command?(string, "")
end
end
end
end
|
module Guard
class Jasmine
module Inspector
class << self
def clean(paths)
paths.uniq!
paths.compact!
paths = paths.select { |p| jasmine_spec?(p) }
clear_jasmine_specs_list
paths
end
private
def jasmine_spec?(path)
jasmine_specs.include?(path)
end
def jasmine_specs
@jasmine_specs ||= Dir.glob('spec/**/*_spec.js(.coffee)?')
end
def clear_jasmine_specs_list
@jasmine_specs = nil
end
end
end
end
end
Smaller method name for clear spec.
module Guard
class Jasmine
module Inspector
class << self
def clean(paths)
paths.uniq!
paths.compact!
paths = paths.select { |p| jasmine_spec?(p) }
clear_jasmine_specs
paths
end
private
def jasmine_spec?(path)
jasmine_specs.include?(path)
end
def jasmine_specs
@jasmine_specs ||= Dir.glob('spec/**/*_spec.js(.coffee)?')
end
def clear_jasmine_specs
@jasmine_specs = nil
end
end
end
end
end
|
module OmniAuth
module Jinshuju
VERSION = '0.0.1'
end
end
bump version
module OmniAuth
module Jinshuju
VERSION = '0.0.2'
end
end
|
# encoding: utf-8
require 'guard/notifier'
module Guard
class MinitestNotifier
def self.guard_message(test_count, assertion_count, failure_count, error_count, skip_count, duration)
message = "#{test_count} examples, #{assertion_count} assertions, #{failure_count} failures, #{error_count} errors"
if skip_count > 0
message << " (#{skip_count} skips)"
end
if test_count && assertion_count
message << "\nin %.6f seconds, %.4f tests/s, %.4f assertions/s." % [duration, test_count / duration, assertion_count / duration]
end
message
end
# failed | pending (skip) | success
def self.guard_image(failure_count, skip_count)
icon = if failure_count > 0
:failed
elsif skip_count > 0
:pending
else
:success
end
end
def self.notify(test_count, assertion_count, failure_count, error_count, skip_count, duration)
message = guard_message(test_count, assertion_count, failure_count, error_count, skip_count, duration)
image = guard_image(failure_count + error_count, skip_count)
::Guard::Notifier.notify(message, :title => 'MiniTest results', :image => image)
end
end
end
Added ::Guard::Notifier.turn_on because it was not displaying notification possibly related to spork forks.
# encoding: utf-8
require 'guard/notifier'
module Guard
class MinitestNotifier
def self.guard_message(test_count, assertion_count, failure_count, error_count, skip_count, duration)
message = "#{test_count} examples, #{assertion_count} assertions, #{failure_count} failures, #{error_count} errors"
if skip_count > 0
message << " (#{skip_count} skips)"
end
if test_count && assertion_count
message << "\nin %.6f seconds, %.4f tests/s, %.4f assertions/s." % [duration, test_count / duration, assertion_count / duration]
end
message
end
# failed | pending (skip) | success
def self.guard_image(failure_count, skip_count)
icon = if failure_count > 0
:failed
elsif skip_count > 0
:pending
else
:success
end
end
def self.notify(test_count, assertion_count, failure_count, error_count, skip_count, duration)
message = guard_message(test_count, assertion_count, failure_count, error_count, skip_count, duration)
image = guard_image(failure_count + error_count, skip_count)
::Guard::Notifier.turn_on
::Guard::Notifier.notify(message, :title => 'MiniTest results', :image => image)
end
end
end
|
require "omniauth-oauth2"
module OmniAuth
module Strategies
class Weibo < OmniAuth::Strategies::OAuth2
option :client_options, {
:site => "https://api.weibo.com",
:authorize_url => "/oauth2/authorize",
:token_url => "/oauth2/access_token",
:token_method => :post
}
option :token_params, {
:parse => :json
}
uid do
raw_info['id']
end
info do
{
:nickname => raw_info['screen_name'],
:name => raw_info['name'],
:location => raw_info['location'],
:image => image_url,
:description => raw_info['description'],
:urls => {
'Blog' => raw_info['url'],
'Weibo' => raw_info['domain'].empty? ? "http://weibo.com/u/#{raw_info['id']}" : "http://weibo.com/#{raw_info['domain']}",
}
}
end
extra do
{
:raw_info => raw_info
}
end
def raw_info
access_token.options[:mode] = :query
access_token.options[:param_name] = 'access_token'
@uid ||= access_token.get('/2/account/get_uid.json').parsed["uid"]
@raw_info ||= access_token.get("/2/users/show.json", :params => {:uid => @uid}).parsed
end
def find_image
raw_info[%w(avatar_hd avatar_large profile_image_url).find { |e| raw_info[e].present? }]
end
#url: option: size:
#avatar_hd original original_size
#avatar_large large 180x180
#profile_image_url middle 50x50
# small 30x30
#default is middle
def image_url
image_size = options[:image_size] || :middle
case image_size.to_sym
when :original
url = raw_info['avatar_hd']
when :large
url = raw_info['avatar_large']
when :small
url = raw_info['avatar_large'].sub('/180/','/30/')
else
url = raw_info['profile_image_url']
end
end
##
# You can pass +display+, +with_offical_account+ or +state+ params to the auth request, if
# you need to set them dynamically. You can also set these options
# in the OmniAuth config :authorize_params option.
#
# /auth/weibo?display=mobile&with_offical_account=1
#
def authorize_params
super.tap do |params|
%w[display with_offical_account forcelogin].each do |v|
if request.params[v]
params[v.to_sym] = request.params[v]
end
end
end
end
protected
def build_access_token
params = {
'client_id' => client.id,
'client_secret' => client.secret,
'code' => request.params['code'],
'grant_type' => 'authorization_code',
'redirect_uri' => options['redirect_uri']
}.merge(token_params.to_hash(symbolize_keys: true))
client.get_token(params, deep_symbolize(options.auth_token_params))
end
end
end
end
OmniAuth.config.add_camelization "weibo", "Weibo"
fixed invalid_credentials issue
require "omniauth-oauth2"
module OmniAuth
module Strategies
class Weibo < OmniAuth::Strategies::OAuth2
option :client_options, {
:site => "https://api.weibo.com",
:authorize_url => "/oauth2/authorize",
:token_url => "/oauth2/access_token",
:token_method => :post
}
option :token_params, {
:parse => :json
}
uid do
raw_info['id']
end
info do
{
:nickname => raw_info['screen_name'],
:name => raw_info['name'],
:location => raw_info['location'],
:image => image_url,
:description => raw_info['description'],
:urls => {
'Blog' => raw_info['url'],
'Weibo' => raw_info['domain'].empty? ? "http://weibo.com/u/#{raw_info['id']}" : "http://weibo.com/#{raw_info['domain']}",
}
}
end
extra do
{
:raw_info => raw_info
}
end
def callback_url
full_host + script_name + callback_path
end
def raw_info
access_token.options[:mode] = :query
access_token.options[:param_name] = 'access_token'
@uid ||= access_token.get('/2/account/get_uid.json').parsed["uid"]
@raw_info ||= access_token.get("/2/users/show.json", :params => {:uid => @uid}).parsed
end
def find_image
raw_info[%w(avatar_hd avatar_large profile_image_url).find { |e| raw_info[e].present? }]
end
#url: option: size:
#avatar_hd original original_size
#avatar_large large 180x180
#profile_image_url middle 50x50
# small 30x30
#default is middle
def image_url
image_size = options[:image_size] || :middle
case image_size.to_sym
when :original
url = raw_info['avatar_hd']
when :large
url = raw_info['avatar_large']
when :small
url = raw_info['avatar_large'].sub('/180/','/30/')
else
url = raw_info['profile_image_url']
end
end
##
# You can pass +display+, +with_offical_account+ or +state+ params to the auth request, if
# you need to set them dynamically. You can also set these options
# in the OmniAuth config :authorize_params option.
#
# /auth/weibo?display=mobile&with_offical_account=1
#
def authorize_params
super.tap do |params|
%w[display with_offical_account forcelogin state].each do |v|
if request.params[v]
params[v.to_sym] = request.params[v]
end
session["omniauth.state"] = params[v.to_sym] if v == 'state'
end
end
end
protected
def build_access_token
params = {
'client_id' => client.id,
'client_secret' => client.secret,
'code' => request.params['code'],
'grant_type' => 'authorization_code',
'redirect_uri' => options['redirect_uri']
}.merge(token_params.to_hash(symbolize_keys: true))
client.get_token(params, deep_symbolize(options.token_params))
end
end
end
end
OmniAuth.config.add_camelization "weibo", "Weibo"
|
require 'haml'
require 'haml/template'
##
# This plugin provides "magical translations" in your .haml files. What does it
# mean? It's mean that all your raw texts in templates will be automatically
# translated by GetText, FastGettext or Gettext backend from I18n. No more
# complicated translation keys and ugly translation methods in views. Now you can
# only write in your language, nothing more. At the end of your work you can easy
# find all phrases to translate and generate .po files for it. This type of files
# are also more readable and easier to translate, thanks to it you save your
# time with translations.
#
# === Examples
#
# Now you can write what you want, and at the end of work you
# will easy found all phrases to translate. Check out following example:
#
# %p This is my simple dummy text.
# %p And more lorem ipsum...
# %p= link_to _("This will be also translated"), "#"
#
# Those translations are allso allowing you to use standard Haml interpolation.
# You can easy write:
#
# %p This is my text with #{"interpolation".upcase}... Great, isn't it?
#
# And text from codes above will be stored in .po files as:
#
# # File test1.haml, line 1
# msgid "This is my simple dummy text"
# msgstr "This is my dummy translation of dummy text"
#
# # File test2.haml, line 1
# msgid "This is my text with %s... Great, isn't it?"
# msgstr "Next one %s translation!"
#
# Generator for .po files also includes information where your phrases are placed
# in filesystem. Thanks to it you don't forget about any even small word to
# translate.
#
module Haml::MagicTranslations
def self.included(haml) # :nodoc:
haml.send(:include, EngineMethods)
Haml::Template.send(:extend, TemplateMethods)
end
module TemplateMethods
def enable_magic_translations(backend = :i18n)
case backend
when :i18n
require 'i18n'
require 'i18n/backend/gettext'
require 'i18n/gettext/helpers'
I18n::Backend::Simple.send(:include, I18n::Backend::Gettext)
Haml::Helpers.send(:include, I18n::Gettext::Helpers)
when :gettext
require 'gettext'
Haml::Helpers.send(:include, GetText)
when :fast_gettext
require 'fast_gettext'
Haml::Helpers.send(:include, FastGettext::Translation)
else
raise ArgumentError, "Backend #{which.to_s} is not available in Haml::MagicTranslations"
end
Haml::Template.options[:magic_translations] = true
end
end
module EngineMethods
# Overriden function that parses Haml tags. Injects gettext call for all plain
# text lines.
def parse_tag(line)
tag_name, attributes, attributes_hashes, object_ref, nuke_outer_whitespace,
nuke_inner_whitespace, action, value, last_line = super(line)
magic_translations = self.options[:magic_translations]
magic_translations = Haml::Template.options[:magic_translations] if magic_translations.nil?
if magic_translations
unless action && action != '!' || action == '!' && value[0] == '=' || value.empty?
value, interpolation_arguments = prepare_i18n_interpolation(value)
value = "\#{_('#{value.gsub(/'/, "\\\\'")}') % #{interpolation_arguments}\}\n"
end
end
[tag_name, attributes, attributes_hashes, object_ref, nuke_outer_whitespace,
nuke_inner_whitespace, action, value, last_line]
end
# Magical translations will be also used for plain text.
def plain(text, escape_html = nil)
if block_opened?
raise SyntaxError.new("Illegal nesting: nesting within plain text is illegal.", @next_line.index)
end
options[:magic_translations] = self.options[:magic_translations] if options[:magic_translations].nil?
options[:magic_translations] = Haml::Template.options[:magic_translations] if options[:magic_translations].nil?
if options[:magic_translations]
value, interpolation_arguments = prepare_i18n_interpolation(text, :escape_html => escape_html)
value = "_('#{value.gsub(/'/, "\\\\'")}') % #{interpolation_arguments}\n"
script(value, !:escape_html)
else
super
end
end
# It discovers all fragments of code embeded in text and replacing with
# simple string interpolation parameters.
#
# ==== Example:
#
# Following line...
#
# %p This is some #{'Interpolated'.upcase'} text
#
# ... will be translated to:
#
# [ "This is some %s text", "['Interpolated'.upcase]" ]
#
def prepare_i18n_interpolation(str, opts = {})
args = []
res = ''
str = str.
gsub(/\n/, '\n').
gsub(/\r/, '\r').
gsub(/\#/, '\#').
gsub(/\"/, '\"').
gsub(/\\/, '\\\\')
rest = Haml::Shared.handle_interpolation '"' + str + '"' do |scan|
escapes = (scan[2].size - 1) / 2
res << scan.matched[0...-3 - escapes]
if escapes % 2 == 1
res << '#{'
else
content = eval('"' + balance(scan, ?{, ?}, 1)[0][0...-1] + '"')
content = "Haml::Helpers.html_escape(#{content.to_s})" if opts[:escape_html]
args << content
res << '%s'
end
end
value = res+rest.gsub(/\\(.)/, '\1').chomp
value = value[1..-2] unless value.to_s == ''
args = "[#{args.join(', ')}]"
[value, args]
end
end
end
Haml::Engine.send(:include, Haml::MagicTranslations)
Make prepare_i18n_interpolation arguments better mimic Haml >= 3.1
require 'haml'
require 'haml/template'
##
# This plugin provides "magical translations" in your .haml files. What does it
# mean? It's mean that all your raw texts in templates will be automatically
# translated by GetText, FastGettext or Gettext backend from I18n. No more
# complicated translation keys and ugly translation methods in views. Now you can
# only write in your language, nothing more. At the end of your work you can easy
# find all phrases to translate and generate .po files for it. This type of files
# are also more readable and easier to translate, thanks to it you save your
# time with translations.
#
# === Examples
#
# Now you can write what you want, and at the end of work you
# will easy found all phrases to translate. Check out following example:
#
# %p This is my simple dummy text.
# %p And more lorem ipsum...
# %p= link_to _("This will be also translated"), "#"
#
# Those translations are allso allowing you to use standard Haml interpolation.
# You can easy write:
#
# %p This is my text with #{"interpolation".upcase}... Great, isn't it?
#
# And text from codes above will be stored in .po files as:
#
# # File test1.haml, line 1
# msgid "This is my simple dummy text"
# msgstr "This is my dummy translation of dummy text"
#
# # File test2.haml, line 1
# msgid "This is my text with %s... Great, isn't it?"
# msgstr "Next one %s translation!"
#
# Generator for .po files also includes information where your phrases are placed
# in filesystem. Thanks to it you don't forget about any even small word to
# translate.
#
module Haml::MagicTranslations
def self.included(haml) # :nodoc:
haml.send(:include, EngineMethods)
Haml::Template.send(:extend, TemplateMethods)
end
module TemplateMethods
def enable_magic_translations(backend = :i18n)
case backend
when :i18n
require 'i18n'
require 'i18n/backend/gettext'
require 'i18n/gettext/helpers'
I18n::Backend::Simple.send(:include, I18n::Backend::Gettext)
Haml::Helpers.send(:include, I18n::Gettext::Helpers)
when :gettext
require 'gettext'
Haml::Helpers.send(:include, GetText)
when :fast_gettext
require 'fast_gettext'
Haml::Helpers.send(:include, FastGettext::Translation)
else
raise ArgumentError, "Backend #{which.to_s} is not available in Haml::MagicTranslations"
end
Haml::Template.options[:magic_translations] = true
end
end
module EngineMethods
# Overriden function that parses Haml tags. Injects gettext call for all plain
# text lines.
def parse_tag(line)
tag_name, attributes, attributes_hashes, object_ref, nuke_outer_whitespace,
nuke_inner_whitespace, action, value, last_line = super(line)
magic_translations = self.options[:magic_translations]
magic_translations = Haml::Template.options[:magic_translations] if magic_translations.nil?
if magic_translations
unless action && action != '!' || action == '!' && value[0] == '=' || value.empty?
value, interpolation_arguments = prepare_i18n_interpolation(value)
value = "\#{_('#{value.gsub(/'/, "\\\\'")}') % #{interpolation_arguments}\}\n"
end
end
[tag_name, attributes, attributes_hashes, object_ref, nuke_outer_whitespace,
nuke_inner_whitespace, action, value, last_line]
end
# Magical translations will be also used for plain text.
def plain(text, escape_html = nil)
if block_opened?
raise SyntaxError.new("Illegal nesting: nesting within plain text is illegal.", @next_line.index)
end
options[:magic_translations] = self.options[:magic_translations] if options[:magic_translations].nil?
options[:magic_translations] = Haml::Template.options[:magic_translations] if options[:magic_translations].nil?
if options[:magic_translations]
value, interpolation_arguments = prepare_i18n_interpolation(text, escape_html)
value = "_('#{value.gsub(/'/, "\\\\'")}') % #{interpolation_arguments}\n"
script(value, !:escape_html)
else
super
end
end
# It discovers all fragments of code embeded in text and replacing with
# simple string interpolation parameters.
#
# ==== Example:
#
# Following line...
#
# %p This is some #{'Interpolated'.upcase'} text
#
# ... will be translated to:
#
# [ "This is some %s text", "['Interpolated'.upcase]" ]
#
def prepare_i18n_interpolation(str, escape_html = nil)
args = []
res = ''
str = str.
gsub(/\n/, '\n').
gsub(/\r/, '\r').
gsub(/\#/, '\#').
gsub(/\"/, '\"').
gsub(/\\/, '\\\\')
rest = Haml::Shared.handle_interpolation '"' + str + '"' do |scan|
escapes = (scan[2].size - 1) / 2
res << scan.matched[0...-3 - escapes]
if escapes % 2 == 1
res << '#{'
else
content = eval('"' + balance(scan, ?{, ?}, 1)[0][0...-1] + '"')
content = "Haml::Helpers.html_escape(#{content.to_s})" if escape_html
args << content
res << '%s'
end
end
value = res+rest.gsub(/\\(.)/, '\1').chomp
value = value[1..-2] unless value.to_s == ''
args = "[#{args.join(', ')}]"
[value, args]
end
end
end
Haml::Engine.send(:include, Haml::MagicTranslations)
|
module OpenFlashChart
module View
def periodically_call_function(function, options = {})
frequency = options[:frequency] || 10 # every ten seconds by default
code = "new PeriodicalExecuter(function() {#{function}}, #{frequency})"
ActionView::Base.new.javascript_tag(code)
end
def js_open_flash_chart_object(div_name, width, height, base="/")
<<-OUTPUT
<script type="text/javascript">
swfobject.embedSWF("#{base}open-flash-chart.swf", "#{div_name}", "#{width}", "#{height}", "9.0.0");
</script>
#{self.to_open_flash_chart_data}
<div id="#{div_name}"></div>
OUTPUT
end
def link_to_ofc_load(link_text, div_name)
data_name = "#{link_text.gsub(" ","_")}_#{div_name.gsub(" ","_")}"
<<-OUTPUT
<script type="text/javascript">
function load_#{data_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
x = tmp_#{div_name}.load(Object.toJSON(data_#{data_name}));
}
var data_#{data_name} = #{self.render};
</script>
#{ActionView::Base.new.link_to_function link_text, "load_#{data_name}()"}
OUTPUT
end
def link_to_remote_ofc_load(link_text, div_name, url)
fx_name = "#{link_text.gsub(" ","_")}_#{div_name.gsub(" ","_")}"
<<-OUTPUT
<script type="text/javascript">
function reload_#{fx_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
new Ajax.Request('#{url}', {
method : 'get',
onSuccess : function(obj) {tmp_#{div_name}.load(obj.responseText);},
onFailure : function(obj) {alert("Failed to request #{url}");}});
}
</script>
#{ActionView::Base.new.link_to_function link_text, "reload_#{fx_name}()"}
OUTPUT
end
def periodically_call_to_remote_ofc_load(div_name, url, options={})
fx_name = "#{div_name.gsub(" ","_")}"
# fix a bug in rails with url_for
url = url.gsub("&","&")
<<-OUTPUT
<script type="text/javascript">
function reload_#{fx_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
new Ajax.Request('#{url}', {
method : 'get',
onSuccess : function(obj) {tmp_#{div_name}.load(obj.responseText);},
onFailure : function(obj) {alert("Failed to request #{url}");}});
}
</script>
#{periodically_call_function("reload_#{fx_name}()", options)}
OUTPUT
end
def to_open_flash_chart_data
# this builds the open_flash_chart_data js function
<<-OUTPUT
<script type="text/javascript">
function ofc_ready() {
}
function open_flash_chart_data() {
return Object.toJSON(data);
}
function findSWF(movieName) {
if (navigator.appName.indexOf("Microsoft")!= -1) {
return window[movieName];
} else {
return document[movieName];
}
}
var data = #{self.render};
</script>
OUTPUT
end
end
end
corrections from ctrochalakis for multiple graphs per page with js helpers
module OpenFlashChart
module View
def periodically_call_function(function, options = {})
frequency = options[:frequency] || 10 # every ten seconds by default
code = "new PeriodicalExecuter(function() {#{function}}, #{frequency})"
ActionView::Base.new.javascript_tag(code)
end
def js_open_flash_chart_object(div_name, width, height, base="/")
<<-OUTPUT
<script type="text/javascript">
swfobject.embedSWF("#{base}open-flash-chart.swf", "#{div_name}", "#{width}", "#{height}", "9.0.0", "expressInstall.swf", {"get-data":"open_data_#{div_name}"});
</script>
#{self.to_open_flash_chart_data(div_name)}
<div id="#{div_name}"></div>
OUTPUT
end
def link_to_ofc_load(link_text, div_name)
data_name = "#{link_text.gsub(" ","_")}_#{div_name.gsub(" ","_")}"
<<-OUTPUT
<script type="text/javascript">
function load_#{data_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
x = tmp_#{div_name}.load(Object.toJSON(data_#{data_name}));
}
var data_#{data_name} = #{self.render};
</script>
#{ActionView::Base.new.link_to_function link_text, "load_#{data_name}()"}
OUTPUT
end
def link_to_remote_ofc_load(link_text, div_name, url)
fx_name = "#{link_text.gsub(" ","_")}_#{div_name.gsub(" ","_")}"
<<-OUTPUT
<script type="text/javascript">
function reload_#{fx_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
new Ajax.Request('#{url}', {
method : 'get',
onSuccess : function(obj) {tmp_#{div_name}.load(obj.responseText);},
onFailure : function(obj) {alert("Failed to request #{url}");}});
}
</script>
#{ActionView::Base.new.link_to_function link_text, "reload_#{fx_name}()"}
OUTPUT
end
def periodically_call_to_remote_ofc_load(div_name, url, options={})
fx_name = "#{div_name.gsub(" ","_")}"
# fix a bug in rails with url_for
url = url.gsub("&","&")
<<-OUTPUT
<script type="text/javascript">
function reload_#{fx_name}() {
tmp_#{div_name} = findSWF("#{div_name}");
new Ajax.Request('#{url}', {
method : 'get',
onSuccess : function(obj) {tmp_#{div_name}.load(obj.responseText);},
onFailure : function(obj) {alert("Failed to request #{url}");}});
}
</script>
#{periodically_call_function("reload_#{fx_name}()", options)}
OUTPUT
end
def to_open_flash_chart_data(id="in")
# this builds the open_flash_chart_data js function
<<-OUTPUT
<script type="text/javascript">
function ofc_ready() {
}
function open_flash_chart_data_#{id}() {
return Object.toJSON(data_#{id});
}
function findSWF(movieName) {
if (navigator.appName.indexOf("Microsoft")!= -1) {
return window[movieName];
} else {
return document[movieName];
}
}
var data_#{id} = #{self.render};
</script>
OUTPUT
end
end
end
|
class OpenfireApi::UserService
@@api_path = "plugins/userService/userservice"
@@api_exceptions = %w(UserServiceDisabled RequestNotAuthorised IllegalArgumentException UserNotFoundException UserAlreadyExistsException)
class HTTPException < StandardError; end
class InvalidResponseException < StandardError; end
class UserServiceDisabledException < StandardError; end
class RequestNotAuthorisedException < StandardError; end
class IllegalArgumentException < StandardError; end
class UserNotFoundException < StandardError; end
class UserAlreadyExistsException < StandardError; end
def initialize(options=Hash.new)
@options = { :path => @@api_path }.merge(options)
end
def add_user!(opts)
submit_request(opts.merge(:type => :add))
end
def delete_user!(opts)
submit_request(opts.merge(:type => :delete))
end
def update_user!(opts)
submit_request(opts.merge(:type => :update))
end
def lock_user!(opts)
submit_request(opts.merge(:type => :disable))
end
def unlock_user!(opts)
submit_request(opts.merge(:type => :enable))
end
private
def build_query(params)
"#{build_query_uri.to_s}?#{build_query_params(params)}"
end
def build_query_uri
uri = URI.parse(@options[:url])
uri.path = File.join(uri.path, @@api_path)
uri
end
def build_query_params(params)
params.merge!(:secret => @options[:secret])
params.to_a.map{ |p| "#{p[0]}=#{p[1]}" }.join('&')
end
def submit_request(params)
data = submit_http_request(build_query_uri, build_query_params(params))
parse_response(data)
end
def submit_http_request(uri, params)
res = Net::HTTP.start(uri.host, uri.port) do |http|
http.get("#{uri}?#{params}")
end
return res.body
rescue Exception => e
raise HTTPException, e.to_s
end
def parse_response(data)
error = data.match(/<error>(.*)<\/error>/)
if error && @@api_exceptions.include?(error[1])
raise eval("#{error[1].gsub('Exception', '')}Exception")
end
raise InvalidResponseException unless data.match(/<result>ok<\/result>/)
return true
end
end
http get fix
class OpenfireApi::UserService
@@api_path = "plugins/userService/userservice"
@@api_exceptions = %w(UserServiceDisabled RequestNotAuthorised IllegalArgumentException UserNotFoundException UserAlreadyExistsException)
class HTTPException < StandardError; end
class InvalidResponseException < StandardError; end
class UserServiceDisabledException < StandardError; end
class RequestNotAuthorisedException < StandardError; end
class IllegalArgumentException < StandardError; end
class UserNotFoundException < StandardError; end
class UserAlreadyExistsException < StandardError; end
def initialize(options=Hash.new)
@options = { :path => @@api_path }.merge(options)
end
def add_user!(opts)
submit_request(opts.merge(:type => :add))
end
def delete_user!(opts)
submit_request(opts.merge(:type => :delete))
end
def update_user!(opts)
submit_request(opts.merge(:type => :update))
end
def lock_user!(opts)
submit_request(opts.merge(:type => :disable))
end
def unlock_user!(opts)
submit_request(opts.merge(:type => :enable))
end
private
def build_query(params)
"#{build_query_uri.to_s}?#{build_query_params(params)}"
end
def build_query_uri
uri = URI.parse(@options[:url])
uri.path = File.join(uri.path, @@api_path)
uri
end
def build_query_params(params)
params.merge!(:secret => @options[:secret])
params.to_a.map{ |p| "#{p[0]}=#{p[1]}" }.join('&')
end
def submit_request(params)
data = submit_http_request(build_query_uri, build_query_params(params))
parse_response(data)
end
def submit_http_request(uri, params)
#res = Net::HTTP.start(uri.host, uri.port) do |http|
# http.get("#{uri}?#{params}")
#end
req = Net::HTTP::Get.new("#{uri.path}?#{params}")
res = Net::HTTP.start(url.host, url.port) {|http|
http.request(req)}
return res.body
rescue Exception => e
raise HTTPException, e.to_s
end
def parse_response(data)
error = data.match(/<error>(.*)<\/error>/)
if error && @@api_exceptions.include?(error[1])
raise eval("#{error[1].gsub('Exception', '')}Exception")
end
raise InvalidResponseException unless data.match(/<result>ok<\/result>/)
return true
end
end
|
require 'json'
require 'set'
require_relative './card_store'
module Hearthstone
module Models
# represent an object in game
class Entity
attr_accessor :id, :card, :zone
def initialize(id: id, card: card, zone: zone=nil)
@id = id
@card = card
@zone = zone
end
def eql?(other)
other.equal?(id) || id == other.id
end
def hash
id.hash
end
end
class Player
attr_reader :id, :name, :first_player
attr_accessor :hero, :hero_power
attr_reader :deck, :hand, :play, :graveyard, :setaside
def initialize(id: id, name: name, first_player: first_player, hero: hero, hero_power: hero_power)
@id = id
@name = name
@first_player = first_player
@hero = hero
@hero_power = hero_power
@deck = Set.new
@hand = Set.new
@play = Set.new
@graveyard = Set.new
@setaside = Set.new
end
def move_card(card, to_zone)
[:deck, :hand, :play, :graveyard, :setaside].each do |zone|
if to_zone != zone
self.send(zone).delete(card)
else
self.send(to_zone) << card
end
end
end
end
class Game
attr_reader :store, :entities, :players
def initialize(store=CardStore.new)
@store = store
@entities = {}
@players = []
end
def add_player(id: id, name: name, first_player: first_player, hero_id: hero_id, hero_card_id: hero_card_id, hero_power_id: hero_power_id, hero_power_card_id: hero_power_card_id)
hero = entity_with_id(hero_id, card_id: hero_card_id)
hero_power = entity_with_id(hero_power_id, card_id: hero_power_card_id)
player = Player.new(id: id, name: name, first_player: first_player, hero: hero, hero_power: hero_power)
self.players << player
end
def open_card(id: id, card_id: card_id)
entity_with_id(id).card = card_with_card_id(card_id)
end
def card_revealed(id: id, card_id: card_id)
entity_with_id(id).card = card_with_card_id(card_id)
end
def card_added_to_deck(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :deck)
end
def card_received(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :hand)
end
def card_drawn(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :hand)
end
def process_turn(turn)
end
def entity_with_id(id, card_id: card_id)
entity = entities[id]
unless entity
entity = Entity.new(id: id, card: card_with_card_id(card_id))
entities[id] = entity
end
entity
end
def player_with_id(id)
self.players.detect{|p| p.id == id}
end
def card_with_card_id(card_id)
card = nil
if card_id && card_id != ""
card = self.store.card_with_id(card_id)
raise "Card #{card_id} not found!" unless card
end
card
end
end
end
end
add to_s
require 'json'
require 'set'
require_relative './card_store'
module Hearthstone
module Models
# represent an object in game
class Entity
attr_accessor :id, :card
def initialize(id: id, card: card)
@id = id
@card = card
end
def eql?(other)
other.equal?(id) || id == other.id
end
def hash
id.hash
end
def to_s
"<Entity ##{id} \"#{card.name}\">"
end
end
class Player
attr_reader :id, :name, :first_player
attr_accessor :hero, :hero_power
attr_reader :deck, :hand, :play, :graveyard, :setaside
def initialize(id: id, name: name, first_player: first_player, hero: hero, hero_power: hero_power)
@id = id
@name = name
@first_player = first_player
@hero = hero
@hero_power = hero_power
@deck = Set.new
@hand = Set.new
@play = Set.new
@graveyard = Set.new
@setaside = Set.new
end
def move_card(card, to_zone)
[:deck, :hand, :play, :graveyard, :setaside].each do |zone|
if to_zone != zone
self.send(zone).delete(card)
else
self.send(to_zone) << card
end
end
end
def to_s
"<Player ##{id} \"#{name}\">"
end
end
class Game
attr_reader :store, :entities, :players
def initialize(store=CardStore.new)
@store = store
@entities = {}
@players = []
end
def add_player(id: id, name: name, first_player: first_player, hero_id: hero_id, hero_card_id: hero_card_id, hero_power_id: hero_power_id, hero_power_card_id: hero_power_card_id)
hero = entity_with_id(hero_id, card_id: hero_card_id)
hero_power = entity_with_id(hero_power_id, card_id: hero_power_card_id)
player = Player.new(id: id, name: name, first_player: first_player, hero: hero, hero_power: hero_power)
self.players << player
end
def open_card(id: id, card_id: card_id)
entity_with_id(id).card = card_with_card_id(card_id)
end
def card_revealed(id: id, card_id: card_id)
entity_with_id(id).card = card_with_card_id(card_id)
end
def card_added_to_deck(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :deck)
end
def card_received(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :hand)
end
def card_drawn(player_id: player_id, id: id, card_id: card_id)
entity = entity_with_id(id, card_id: card_id)
entity.card = card_with_card_id(card_id)
player = player_with_id(player_id)
raise "Player #{player_id} not found!" unless player
player.move_card(entity, :hand)
end
def process_turn(turn)
end
def entity_with_id(id, card_id: card_id)
entity = entities[id]
unless entity
entity = Entity.new(id: id, card: card_with_card_id(card_id))
entities[id] = entity
end
entity
end
def player_with_id(id)
self.players.detect{|p| p.id == id}
end
def card_with_card_id(card_id)
card = nil
if card_id && card_id != ""
card = self.store.card_with_id(card_id)
raise "Card #{card_id} not found!" unless card
end
card
end
end
end
end |
module OpenStax
module Accounts
VERSION = "7.2.0"
end
end
7.3.0
module OpenStax
module Accounts
VERSION = "7.3.0"
end
end
|
# coding: utf-8
require 'gpgme'
module Payzilla
module Gateways
class Yamoney < Gateway
register_settings %w(url currency password gpg_key)
register_attachments %w(public_key secret_key)
def check(payment)
begin
result = send '1002',
:TR_NR => payment.id,
:DSTACNT_NR => payment.account,
:TR_AMT => payment.enrolled_amount,
:CUR_CD => @config.setting_currency,
:SIGN => sign([payment.id, 1002, payment.account, payment.enrolled_amount, @config.setting_currency])
return retval(result)
rescue Errno::ECONNRESET
return {:success => false, :error => -1000}
end
end
def pay(payment)
begin
result = send '1',
:TR_NR => payment.id,
:DSTACNT_NR => payment.account,
:TR_AMT => payment.enrolled_amount,
:CUR_CD => @config.setting_currency,
:CONT => "Пополнение кошелька".encode("Windows-1251"),
:SIGN => sign([payment.id, 1002, payment.account, payment.enrolled_amount, @config.setting_currency])
return retval(result)
rescue Errno::ECONNRESET
return {:success => false, :error => -1000}
end
end
private
def retval(result)
if result[:RES_CD] == "0"
return {:success => true, :error => "0"}
else
return {:success => false, :error => result[:ERR_CD]}
end
end
def sign(values)
attach_keys
crypto = GPGME::Crypto.new :armor => true
crypto.clearsign(values.map{|x| x.to_s}.join('&'),
{
:password => @config.setting_password,
:signer => @config.setting_gpg_key
}
)
end
def attach_keys
%w(public secret).each do |key|
if GPGME::Key.find(key.to_sym, @config.setting_gpg_key).empty?
GPGME::Key.import(File.open(@config.send("attachment_#{key}_key".to_sym)))
end
end
end
def send(operation, params)
params[:ACT_CD] = operation
params[:VERSION] = '2.02'
resource = RestClient::Resource.new(@config.setting_url)
result = resource.post :params => params
sign = GPGME::Crypto.new(:armor => true)
result = sign.verify(result.to_s) do |sig|
return {:RES_CD => "1", :ERR_CD => "Bad signature" } if sig.bad?
end
result = result.to_s.split("\n").map{|x| x.split("=")}.flatten
result = Hash[*result].with_indifferent_access
return result
end
end
end
end
Yamoney fix
# coding: utf-8
require 'gpgme'
module Payzilla
module Gateways
class Yamoney < Gateway
register_settings %w(url currency password gpg_key)
register_attachments %w(public_key secret_key)
def check(payment)
begin
result = send '1002',
:TR_NR => payment.id,
:DSTACNT_NR => payment.account,
:TR_AMT => payment.enrolled_amount,
:CUR_CD => @config.setting_currency,
:SIGN => sign([payment.id, 1002, payment.account, payment.enrolled_amount, @config.setting_currency])
return retval(result)
rescue Errno::ECONNRESET
return {:success => false, :error => -1000}
end
end
def pay(payment)
begin
result = send '1',
:TR_NR => payment.id,
:DSTACNT_NR => payment.account,
:TR_AMT => payment.enrolled_amount,
:CUR_CD => @config.setting_currency,
:CONT => "Пополнение кошелька".encode("Windows-1251"),
:SIGN => sign([payment.id, 1002, payment.account, payment.enrolled_amount, @config.setting_currency])
return retval(result)
rescue Errno::ECONNRESET
return {:success => false, :error => -1000}
end
end
private
def retval(result)
if result[:RES_CD] == "0"
return {:success => true, :error => "0"}
else
return {:success => false, :error => result[:ERR_CD]}
end
end
def sign(values)
attach_keys
crypto = GPGME::Crypto.new :armor => true
crypto.clearsign(values.map{|x| x.to_s}.join('&'),
{
:password => @config.setting_password,
:signer => @config.setting_gpg_key
}
)
end
def attach_keys
%w(public secret).each do |key|
if GPGME::Key.find(key.to_sym, @config.setting_gpg_key).empty?
GPGME::Key.import(File.open(@config.send("attachment_#{key}_key".to_sym)))
end
end
end
def send(operation, params)
params[:ACT_CD] = operation
params[:VERSION] = '2.02'
resource = RestClient::Resource.new(@config.setting_url)
result = resource.post :params => params
sign = GPGME::Crypto.new(:armor => true)
params = sign.verify(result.to_s) do |sig|
result = {:RES_CD => "1", :ERR_CD => "Bad signature" } if sig.bad?
end
return result if result.kind_of(Hash)
result = params.to_s.split("\n").map{|x| x.split("=")}.flatten
result = Hash[*result].with_indifferent_access
return result
end
end
end
end
|
# encoding: utf-8
require 'phonetic/algorithm'
module Phonetic
# The Double Metaphone phonetic encoding algorithm is the second generation
# of the Metaphone algorithm. Its original implementation was described
# by Lawrence Philips in the June 2000 issue of C/C++ Users Journal.
#
# This implementation based on the PHP implementation by Stephen Woodbridge
# and contains modifications of algorithm by Kevin Atkinson.
# @see http://swoodbridge.com/DoubleMetaPhone/
# PHP implementation by Stephen Woodbridge
# @see http://aspell.net/metaphone/dmetaph.cpp
# C++ implementation with modifications by Kevin Atkinson
# @example
# Phonetic::DoubleMetaphone.encode('czerny') # => ['SRN', 'XRN']
# Phonetic::DoubleMetaphone.encode('dumb') # => ['TM', 'TM']
# Phonetic::DoubleMetaphone.encode('edgar') # => ['ATKR', 'ATKR']
# # or use alias:
# Phonetic::Metaphone2.encode('czerny') # => ['SRN', 'XRN']
# Phonetic::Metaphone2.encode('dumb') # => ['TM', 'TM']
# Phonetic::Metaphone2.encode('edgar') # => ['ATKR', 'ATKR']
class DoubleMetaphone < Algorithm
# Encode word to its Double Metaphone code.
def self.encode_word(word, options = { size: 4 })
code_size = options[:size] || 4
w = word.strip.upcase
code = ['', '']
def code.add(primary, secondary)
self[0] += primary
self[1] += secondary
end
i = 0
len = w.size
last = len - 1
# pad the original string so that we can index beyond the edge of the world
w += ' ' * 5
# skip these when at start of word
i += 1 if w[0, 2] =~ /[GKP]N|WR|PS/
# initial 'X' is pronounced 'Z' e.g. 'Xavier'
if w[0] == 'X'
code.add 'S', 'S'
i += 1
end
while i < len && (code.first.size < code_size || code.last.size < code_size)
case w[i]
when 'A', 'E', 'I', 'O', 'U', 'Y'
code.add 'A', 'A' if i == 0 # all init vowels now map to 'A'
i += 1
when 'B'
# "-mb", e.g", "dumb", already skipped over...
code.add 'P', 'P'
i += w[i + 1] == 'B' ? 2 : 1
when 'Ç', 'ç'
code.add 'S', 'S'
i += 1
when 'C'
i += encode_c(w, i, len, code)
when 'D'
i += encode_d(w, i, len, code)
when 'F', 'K', 'N'
code.add w[i], w[i]
i += w[i + 1] == w[i] ? 2 : 1
when 'G'
i += encode_g(w, i, len, code)
when 'H'
i += encode_h(w, i, len, code)
when 'J'
i += encode_j(w, i, len, code)
when 'L'
i += encode_l(w, i, len, code)
when 'M'
i += encode_m(w, i, len, code)
when 'Ñ', 'ñ'
code.add 'N', 'N'
i += 1
when 'P'
i += encode_p(w, i, len, code)
when 'Q'
i += w[i + 1] == 'Q' ? 2 : 1
code.add 'K', 'K'
when 'R'
i += encode_r(w, i, len, code)
when 'S'
i += encode_s(w, i, len, code)
when 'T'
i += encode_t(w, i, len, code)
when 'V'
i += w[i + 1] == 'V' ? 2 : 1
code.add 'F', 'F'
when 'W'
i += encode_w(w, i, len, code)
when 'X'
# french e.g. breaux
code.add 'KS', 'KS' unless x_french?(w, i, last)
i += w[i + 1] =~ /[CX]/ ? 2 : 1
when 'Z'
i += encode_z(w, i, len, code)
else
i += 1
end
end
[code.first[0, code_size], code.last[0, code_size]]
end
def self.encode(str, options = { size: 4 })
encode_word(str, options)
end
private
def self.encode_c(w, i, len, code)
r = 0
case
# various germanic
when c_germanic?(w, i)
code.add 'K', 'K'
r += 2
# special case 'caesar'
when i == 0 && w[i, 6] == 'CAESAR'
code.add 'S', 'S'
r += 2
when w[i, 2] == 'CH'
encode_ch(w, i, len, code)
r += 2
when w[i, 2] == 'CZ' && !(i > 1 && w[i - 2, 4] == 'WICZ')
# e.g, 'czerny'
code.add 'S', 'X'
r += 2
when w[i + 1, 3] == 'CIA'
# e.g., 'focaccia'
code.add 'X', 'X'
r += 3
# double 'C', but not if e.g. 'McClellan'
when w[i, 2] == 'CC' && !(i == 1 && w[0] == 'M')
r += encode_cc(w, i, code) + 2
when w[i, 2] =~ /C[KGQ]/
code.add 'K', 'K'
r += 2
when w[i, 2] =~ /C[IEY]/
# italian vs. english
if w[i, 3] =~ /CI[OEA]/
code.add 'S', 'X'
else
code.add 'S', 'S'
end
r += 2
else
code.add 'K', 'K'
# name sent in 'mac caffrey', 'mac gregor'
if w[i + 1, 2] =~ /\s[CQG]/
r += 3
elsif w[i + 1] =~ /[CKQ]/ && w[i + 1, 2] !~ /C[EI]/
r += 2
else
r += 1
end
end
r
end
def self.encode_d(w, i, len, code)
r = 1
if w[i, 2] == 'DG'
if w[i + 2] =~ /[IEY]/
# e.g. 'edge'
code.add 'J', 'J'
r += 2
else
# e.g. 'edgar'
code.add 'TK', 'TK'
r += 1
end
elsif w[i, 2] =~ /D[TD]/
code.add 'T', 'T'
r += 1
else
code.add 'T', 'T'
end
r
end
def self.encode_g(w, i, len, code)
r = 2
if w[i + 1] == 'H'
encode_gh(w, i, code)
elsif w[i + 1] == 'N'
encode_gn(w, i, code)
# 'tagliaro'
elsif w[i + 1, 2] == 'LI' && !slavo_germanic?(w)
code.add 'KL', 'L'
# -ges-, -gep-, -gel-, -gie- at beginning
elsif i == 0 && w[1, 2] =~ /^Y|E[SPBLYIR]|I[BLNE]/
code.add 'K', 'J'
# -ger-, -gy-
elsif g_ger_or_gy?(w, i)
code.add 'K', 'J'
# italian e.g, 'biaggi'
elsif w[i + 1] =~ /[EIY]/ || (i > 0 && w[i - 1, 4] =~ /[AO]GGI/)
if w[0, 4] =~ /^(VAN |VON |SCH)/ || w[i + 1, 2] == 'ET'
code.add 'K', 'K'
elsif w[i + 1, 4] =~ /IER\s/
code.add 'J', 'J'
else
code.add 'J', 'K'
end
else
r -= 1 if w[i + 1] != 'G'
code.add 'K', 'K'
end
r
end
def self.encode_h(w, i, len, code)
r = 1
# only keep if first & before vowel or btw. 2 vowels
if (i == 0 || i > 0 && vowel?(w[i - 1])) && vowel?(w[i + 1])
code.add 'H', 'H'
r += 1
end
r
end
def self.encode_j(w, i, len, code)
r = 1
last = len - 1
# obvious spanish, 'jose', 'san jacinto'
if w[i, 4] == 'JOSE' || w[0, 4] =~ /SAN\s/
if i == 0 && w[i + 4] == ' ' || w[0, 4] =~ /SAN\s/
code.add 'H', 'H'
else
code.add 'J', 'H'
end
else
if i == 0 && w[i, 4] != 'JOSE'
code.add 'J', 'A'
# Yankelovich/Jankelowicz
else
# spanish pron. of e.g. 'bajador'
if j_spanish_pron?(w, i)
code.add 'J', 'H'
elsif i == last
code.add 'J', ''
elsif w[i + 1] !~ /[LTKSNMBZ]/ && !(i > 0 && w[i - 1] =~ /[SKL]/)
code.add 'J', 'J'
end
end
r += 1 if w[i + 1] == 'J'
end
r
end
def self.encode_l(w, i, len, code)
r = 1
if w[i + 1] == 'L'
# spanish e.g. 'cabrillo', 'gallegos'
if ll_spanish?(w, i, len)
code.add 'L', ''
else
code.add 'L', 'L'
end
r += 1
else
code.add 'L', 'L'
end
r
end
def self.encode_m(w, i, len, code)
r = 1
# 'dumb','thumb'
r += 1 if i > 0 && w[i - 1, 5] =~ /UMB( |ER)/ || w[i + 1] == 'M'
code.add 'M', 'M'
r
end
def self.encode_p(w, i, len, code)
r = 1
if w[i + 1] == 'H'
code.add 'F', 'F'
r += 1
else
# also account for "campbell", "raspberry"
r += 1 if w[i + 1] =~ /[PB]/
code.add 'P', 'P'
end
r
end
def self.encode_r(w, i, len, code)
last = len - 1
# french e.g. 'rogier', but exclude 'hochmeier'
if r_french?(w, i, last)
code.add '', 'R'
else
code.add 'R', 'R'
end
w[i + 1] == 'R' ? 2 : 1
end
def self.encode_s(w, i, len, code)
r = 1
last = len - 1
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if i > 0 && w[i - 1, 3] =~ /[IY]SL/
# special case 'sugar-'
elsif i == 0 && w[i, 5] == 'SUGAR'
code.add 'X', 'S'
elsif w[i, 2] == 'SH'
# germanic
if w[i + 1, 4] =~ /H(EIM|OEK|OL[MZ])/
code.add 'S', 'S'
else
code.add 'X', 'X'
end
r += 1
# italian & armenian
elsif w[i, 3] =~ /SI[OA]/
if !slavo_germanic?(w)
code.add 'S', 'X'
else
code.add 'S', 'S'
end
r += 2
# german & anglicisations, e.g. 'smith' match 'schmidt',
# 'snider' match 'schneider' also, -sz- in slavic language altho in
# hungarian it is pronounced 's'
elsif i == 0 && w[i + 1] =~ /[MNLW]/ || w[i + 1] == 'Z'
code.add 'S', 'X'
r += 1 if w[i + 1] == 'Z'
elsif w[i, 2] == 'SC'
encode_sc(w, i, code)
r += 2
# french e.g. 'resnais', 'artois'
else
if i == last && i > 1 && w[i - 2, 2] =~ /[AO]I/
code.add '', 'S'
else
code.add 'S', 'S'
end
r += 1 if w[i + 1] =~ /[SZ]/
end
r
end
def self.encode_t(w, i, len, code)
r = 1
if w[i, 4] =~ /^(TION|TIA|TCH)/
code.add 'X', 'X'
r += 2
elsif w[i, 2] == 'TH' || w[i, 3] == 'TTH'
# special case 'thomas', 'thames' or germanic
if w[i + 2, 2] =~ /[OA]M/ || w[0, 4] =~ /^(VAN |VON |SCH)/
code.add 'T', 'T'
else
code.add '0', 'T'
end
r += 1
else
r += 1 if w[i + 1] =~ /[TD]/
code.add 'T', 'T'
end
r
end
def self.encode_w(w, i, len, code)
last = len - 1
r = 1
# can also be in middle of word
if w[i, 2] == 'WR'
code.add 'R', 'R'
r += 1
else
if i == 0 && (vowel?(w[i + 1]) || w[i, 2] == 'WH')
# Wasserman should match Vasserman
if vowel?(w[i + 1])
code.add 'A', 'F'
else
# need Uomo to match Womo
code.add 'A', 'A'
end
end
# Arnow should match Arnoff
if i == last && i > 0 && vowel?(w[i - 1]) ||
i > 0 && w[i - 1, 5] =~ /EWSKI|EWSKY|OWSKI|OWSKY/ ||
w[0, 3] == 'SCH'
code.add '', 'F'
elsif w[i, 4] =~ /WICZ|WITZ/
# polish e.g. 'filipowicz'
code.add 'TS', 'FX'
r += 3
end
end
r
end
def self.encode_z(w, i, len, code)
r = 1
# chinese pinyin e.g. 'zhao'
if w[i + 1] == 'H'
code.add 'J', 'J'
r += 1
else
if w[i + 1, 2] =~ /Z[OIA]/ ||
slavo_germanic?(w) && i > 0 && w[i - 1] != 'T'
code.add 'S', 'TS';
else
code.add 'S', 'S';
end
r += 1 if w[i + 1] == 'Z'
end
r
end
def self.encode_ch(w, i, len, code)
case
# italian 'chianti'
when w[i, 4] == 'CHIA'
code.add 'K', 'K'
# find 'michael'
when i > 0 && w[i, 4] == 'CHAE'
code.add 'K', 'X'
# greek roots e.g. 'chemistry', 'chorus'
when ch_greek_roots?(w, i)
code.add 'K', 'K'
# germanic, greek, or otherwise 'ch' for 'kh' sound
when ch_germanic_or_greek?(w, i, len)
code.add 'K', 'K'
when i == 0
code.add 'X', 'X'
when w[0, 2] == 'MC'
# e.g., "McHugh"
code.add 'K', 'K'
else
code.add 'X', 'K'
end
end
def self.encode_cc(w, i, code)
r = 0
# 'bellocchio' but not 'bacchus'
if w[i + 2, 1] =~ /[IEH]/ && w[i + 2, 2] != 'HU'
# 'accident', 'accede' 'succeed'
if i == 1 && w[i - 1] == 'A' || w[i - 1, 5] =~ /UCCEE|UCCES/
# 'bacci', 'bertucci', other italian
code.add 'KS', 'KS'
else
code.add 'X', 'X'
end
r = 1
else
# Pierce's rule
code.add 'K', 'K'
end
r
end
def self.encode_gh(w, i, code)
if i > 0 && !vowel?(w[i - 1])
code.add 'K', 'K'
elsif i == 0
# ghislane, ghiradelli
if w[i + 2] == 'I'
code.add 'J', 'J'
else
code.add 'K', 'K'
end
# Parker's rule (with some further refinements)
elsif !(i > 1 && w[i - 2] =~ /[BHD]/ || # e.g., 'hugh'
i > 2 && w[i - 3] =~ /[BHD]/ || # e.g., 'bough'
i > 3 && w[i - 4] =~ /[BH]/) # e.g., 'broughton'
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if i > 2 && w[i - 1] == 'U' && w[i - 3] =~ /[CGLRT]/
code.add 'F', 'F'
elsif i > 0 && w[i - 1] != 'I'
code.add 'K', 'K'
end
end
end
def self.encode_gn(w, i, code)
if i == 1 && vowel?(w[0]) && !slavo_germanic?(w)
code.add 'KN', 'N'
# not e.g. 'cagney'
elsif w[i + 2, 2] != 'EY' && w[i + 1] != 'Y' && !slavo_germanic?(w)
code.add 'N', 'KN'
else
code.add 'KN', 'KN'
end
end
def self.encode_sc(w, i, code)
# Schlesinger's rule
if w[i + 2] == 'H'
# dutch origin, e.g. 'school', 'schooner'
if w[i + 3, 2] =~ /OO|UY|E[DM]/
code.add 'SK', 'SK'
# 'schermerhorn', 'schenker'
elsif w[i + 3, 2] =~ /E[RN]/
code.add 'X', 'SK'
elsif i == 0 && !vowel?(w[3]) && w[3] != 'W'
code.add 'X', 'S'
else
code.add 'X', 'X'
end
elsif w[i + 2] =~ /[IEY]/
code.add 'S', 'S'
else
code.add 'SK', 'SK'
end
end
def self.slavo_germanic?(w)
w =~ /W|K|CZ|WITZ/
end
def self.vowel?(c)
c =~ /[AEIOUY]/
end
def self.c_germanic?(w, i)
# various germanic
i > 1 &&
!vowel?(w[i - 2]) &&
w[i - 1, 3] == 'ACH' &&
(w[i + 2] !~ /[IE]/ || w[i - 2, 6] =~ /[BM]ACHER/)
end
def self.ch_greek_roots?(w, i)
# greek roots e.g. 'chemistry', 'chorus'
i == 0 && w[1, 5] =~ /^H(ARAC|ARIS|OR|YM|IA|EM)/ && w[0, 5] != 'CHORE'
end
def self.ch_germanic_or_greek?(w, i, len)
# germanic, greek, or otherwise 'ch' for 'kh' sound
w[0, 4] =~ /^(V[AO]N\s|SCH)/ ||
# 'architect but not 'arch', 'orchestra', 'orchid'
i > 1 && w[i - 2, 6] =~ /ORCHES|ARCHIT|ORCHID/ ||
(w[i + 2] =~ /[TS]/) ||
(i > 0 && w[i - 1] =~ /[AOUE]/ || i == 0) &&
# e.g., 'wachtler', 'wechsler', but not 'tichner'
(w[i + 2] =~ /[LRNMBHFVW ]/ || i + 2 >= len)
end
def self.g_ger_or_gy?(w, i)
# -ger-, -gy-
w[i + 1, 2] =~ /^(ER|Y)/ &&
w[0, 6] !~ /[DRM]ANGER/ &&
!(i > 0 && w[i - 1] =~ /[EI]/) &&
!(i > 0 && w[i - 1, 3] =~ /[RO]GY/)
end
def self.j_spanish_pron?(w, i)
# spanish pron. of e.g. 'bajador'
i > 0 && vowel?(w[i - 1]) && !slavo_germanic?(w) && w[i + 1] =~ /[AO]/
end
def self.ll_spanish?(w, i, len)
last = len - 1
# spanish e.g. 'cabrillo', 'gallegos'
(i == len - 3 && i > 0 && w[i - 1, 4] =~ /ILL[OA]|ALLE/) ||
(last > 0 && w[last - 1, 2] =~ /[AO]S/ || w[last] =~ /[AO]/) &&
(i > 0 && w[i - 1, 4] == 'ALLE')
end
def self.r_french?(w, i, last)
# french e.g. 'rogier', but exclude 'hochmeier'
i == last && !slavo_germanic?(w) &&
i > 1 && w[i - 2, 2] == 'IE' &&
!(i > 3 && w[i - 4, 2] =~ /M[EA]/)
end
def self.x_french?(w, i, last)
# french e.g. breaux
i == last && (i > 2 && w[i - 3, 3] =~ /[IE]AU/ || i > 1 && w[i - 2, 2] =~ /[AO]U/)
end
end
end
refactor double metaphone class 5
# encoding: utf-8
require 'phonetic/algorithm'
module Phonetic
# The Double Metaphone phonetic encoding algorithm is the second generation
# of the Metaphone algorithm. Its original implementation was described
# by Lawrence Philips in the June 2000 issue of C/C++ Users Journal.
#
# This implementation based on the PHP implementation by Stephen Woodbridge
# and contains modifications of algorithm by Kevin Atkinson.
# @see http://swoodbridge.com/DoubleMetaPhone/
# PHP implementation by Stephen Woodbridge
# @see http://aspell.net/metaphone/dmetaph.cpp
# C++ implementation with modifications by Kevin Atkinson
# @example
# Phonetic::DoubleMetaphone.encode('czerny') # => ['SRN', 'XRN']
# Phonetic::DoubleMetaphone.encode('dumb') # => ['TM', 'TM']
# Phonetic::DoubleMetaphone.encode('edgar') # => ['ATKR', 'ATKR']
# # or use alias:
# Phonetic::Metaphone2.encode('czerny') # => ['SRN', 'XRN']
# Phonetic::Metaphone2.encode('dumb') # => ['TM', 'TM']
# Phonetic::Metaphone2.encode('edgar') # => ['ATKR', 'ATKR']
class DoubleMetaphone < Algorithm
# Encode word to its Double Metaphone code.
def self.encode_word(word, options = { size: 4 })
code_size = options[:size] || 4
w = word.strip.upcase
code = ['', '']
def code.add(primary, secondary)
self[0] += primary
self[1] += secondary
end
i = 0
len = w.size
last = len - 1
# pad the original string so that we can index beyond the edge of the world
w += ' ' * 5
# skip these when at start of word
i += 1 if w[0, 2] =~ /[GKP]N|WR|PS/
# initial 'X' is pronounced 'Z' e.g. 'Xavier'
if w[0] == 'X'
code.add 'S', 'S'
i += 1
end
while i < len && (code.first.size < code_size || code.last.size < code_size)
case w[i]
when 'A', 'E', 'I', 'O', 'U', 'Y'
code.add 'A', 'A' if i == 0 # all init vowels now map to 'A'
i += 1
when 'B'
# "-mb", e.g", "dumb", already skipped over...
i += gen_encode(w, i, 'P', 'P', code)
when 'Ç', 'ç'
code.add 'S', 'S'
i += 1
when 'C'
i += encode_c(w, i, len, code)
when 'D'
i += encode_d(w, i, len, code)
when 'F', 'K', 'N'
i += gen_encode(w, i, w[i], w[i], code)
when 'G'
i += encode_g(w, i, len, code)
when 'H'
i += encode_h(w, i, len, code)
when 'J'
i += encode_j(w, i, len, code)
when 'L'
i += encode_l(w, i, len, code)
when 'M'
i += encode_m(w, i, len, code)
when 'Ñ', 'ñ'
code.add 'N', 'N'
i += 1
when 'P'
i += encode_p(w, i, len, code)
when 'Q'
i += gen_encode(w, i, 'K', 'K', code)
when 'R'
i += encode_r(w, i, len, code)
when 'S'
i += encode_s(w, i, len, code)
when 'T'
i += encode_t(w, i, len, code)
when 'V'
i += gen_encode(w, i, 'F', 'F', code)
when 'W'
i += encode_w(w, i, len, code)
when 'X'
i += encode_x(w, i, len, code)
when 'Z'
i += encode_z(w, i, len, code)
else
i += 1
end
end
[code.first[0, code_size], code.last[0, code_size]]
end
def self.encode(str, options = { size: 4 })
encode_word(str, options)
end
private
def self.gen_encode(w, i, primary, secondary, code)
code.add primary, secondary
w[i + 1] == w[i] ? 2 : 1
end
def self.encode_c(w, i, len, code)
r = 0
case
# various germanic
when c_germanic?(w, i)
code.add 'K', 'K'
r += 2
# special case 'caesar'
when i == 0 && w[i, 6] == 'CAESAR'
code.add 'S', 'S'
r += 2
when w[i, 2] == 'CH'
encode_ch(w, i, len, code)
r += 2
when w[i, 2] == 'CZ' && !(i > 1 && w[i - 2, 4] == 'WICZ')
# e.g, 'czerny'
code.add 'S', 'X'
r += 2
when w[i + 1, 3] == 'CIA'
# e.g., 'focaccia'
code.add 'X', 'X'
r += 3
# double 'C', but not if e.g. 'McClellan'
when w[i, 2] == 'CC' && !(i == 1 && w[0] == 'M')
r += encode_cc(w, i, code) + 2
when w[i, 2] =~ /C[KGQ]/
code.add 'K', 'K'
r += 2
when w[i, 2] =~ /C[IEY]/
# italian vs. english
if w[i, 3] =~ /CI[OEA]/
code.add 'S', 'X'
else
code.add 'S', 'S'
end
r += 2
else
code.add 'K', 'K'
# name sent in 'mac caffrey', 'mac gregor'
if w[i + 1, 2] =~ /\s[CQG]/
r += 3
elsif w[i + 1] =~ /[CKQ]/ && w[i + 1, 2] !~ /C[EI]/
r += 2
else
r += 1
end
end
r
end
def self.encode_d(w, i, len, code)
r = 1
if w[i, 2] == 'DG'
if w[i + 2] =~ /[IEY]/
# e.g. 'edge'
code.add 'J', 'J'
r += 2
else
# e.g. 'edgar'
code.add 'TK', 'TK'
r += 1
end
elsif w[i, 2] =~ /D[TD]/
code.add 'T', 'T'
r += 1
else
code.add 'T', 'T'
end
r
end
def self.encode_g(w, i, len, code)
r = 2
if w[i + 1] == 'H'
encode_gh(w, i, code)
elsif w[i + 1] == 'N'
encode_gn(w, i, code)
# 'tagliaro'
elsif w[i + 1, 2] == 'LI' && !slavo_germanic?(w)
code.add 'KL', 'L'
# -ges-, -gep-, -gel-, -gie- at beginning
elsif i == 0 && w[1, 2] =~ /^Y|E[SPBLYIR]|I[BLNE]/
code.add 'K', 'J'
# -ger-, -gy-
elsif g_ger_or_gy?(w, i)
code.add 'K', 'J'
# italian e.g, 'biaggi'
elsif w[i + 1] =~ /[EIY]/ || (i > 0 && w[i - 1, 4] =~ /[AO]GGI/)
if w[0, 4] =~ /^(VAN |VON |SCH)/ || w[i + 1, 2] == 'ET'
code.add 'K', 'K'
elsif w[i + 1, 4] =~ /IER\s/
code.add 'J', 'J'
else
code.add 'J', 'K'
end
else
r -= 1 if w[i + 1] != 'G'
code.add 'K', 'K'
end
r
end
def self.encode_h(w, i, len, code)
r = 1
# only keep if first & before vowel or btw. 2 vowels
if (i == 0 || i > 0 && vowel?(w[i - 1])) && vowel?(w[i + 1])
code.add 'H', 'H'
r += 1
end
r
end
def self.encode_j(w, i, len, code)
r = 1
last = len - 1
# obvious spanish, 'jose', 'san jacinto'
if w[i, 4] == 'JOSE' || w[0, 4] =~ /SAN\s/
if i == 0 && w[i + 4] == ' ' || w[0, 4] =~ /SAN\s/
code.add 'H', 'H'
else
code.add 'J', 'H'
end
else
if i == 0 && w[i, 4] != 'JOSE'
code.add 'J', 'A'
# Yankelovich/Jankelowicz
else
# spanish pron. of e.g. 'bajador'
if j_spanish_pron?(w, i)
code.add 'J', 'H'
elsif i == last
code.add 'J', ''
elsif w[i + 1] !~ /[LTKSNMBZ]/ && !(i > 0 && w[i - 1] =~ /[SKL]/)
code.add 'J', 'J'
end
end
r += 1 if w[i + 1] == 'J'
end
r
end
def self.encode_l(w, i, len, code)
r = 1
if w[i + 1] == 'L'
# spanish e.g. 'cabrillo', 'gallegos'
if ll_spanish?(w, i, len)
code.add 'L', ''
else
code.add 'L', 'L'
end
r += 1
else
code.add 'L', 'L'
end
r
end
def self.encode_m(w, i, len, code)
r = 1
# 'dumb','thumb'
r += 1 if i > 0 && w[i - 1, 5] =~ /UMB( |ER)/ || w[i + 1] == 'M'
code.add 'M', 'M'
r
end
def self.encode_p(w, i, len, code)
r = 1
if w[i + 1] == 'H'
code.add 'F', 'F'
r += 1
else
# also account for "campbell", "raspberry"
r += 1 if w[i + 1] =~ /[PB]/
code.add 'P', 'P'
end
r
end
def self.encode_r(w, i, len, code)
last = len - 1
# french e.g. 'rogier', but exclude 'hochmeier'
if r_french?(w, i, last)
code.add '', 'R'
else
code.add 'R', 'R'
end
w[i + 1] == 'R' ? 2 : 1
end
def self.encode_s(w, i, len, code)
r = 1
last = len - 1
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if i > 0 && w[i - 1, 3] =~ /[IY]SL/
# special case 'sugar-'
elsif i == 0 && w[i, 5] == 'SUGAR'
code.add 'X', 'S'
elsif w[i, 2] == 'SH'
# germanic
if w[i + 1, 4] =~ /H(EIM|OEK|OL[MZ])/
code.add 'S', 'S'
else
code.add 'X', 'X'
end
r += 1
# italian & armenian
elsif w[i, 3] =~ /SI[OA]/
if !slavo_germanic?(w)
code.add 'S', 'X'
else
code.add 'S', 'S'
end
r += 2
# german & anglicisations, e.g. 'smith' match 'schmidt',
# 'snider' match 'schneider' also, -sz- in slavic language altho in
# hungarian it is pronounced 's'
elsif i == 0 && w[i + 1] =~ /[MNLW]/ || w[i + 1] == 'Z'
code.add 'S', 'X'
r += 1 if w[i + 1] == 'Z'
elsif w[i, 2] == 'SC'
encode_sc(w, i, code)
r += 2
# french e.g. 'resnais', 'artois'
else
if i == last && i > 1 && w[i - 2, 2] =~ /[AO]I/
code.add '', 'S'
else
code.add 'S', 'S'
end
r += 1 if w[i + 1] =~ /[SZ]/
end
r
end
def self.encode_t(w, i, len, code)
r = 1
if w[i, 4] =~ /^(TION|TIA|TCH)/
code.add 'X', 'X'
r += 2
elsif w[i, 2] == 'TH' || w[i, 3] == 'TTH'
# special case 'thomas', 'thames' or germanic
if w[i + 2, 2] =~ /[OA]M/ || w[0, 4] =~ /^(VAN |VON |SCH)/
code.add 'T', 'T'
else
code.add '0', 'T'
end
r += 1
else
r += 1 if w[i + 1] =~ /[TD]/
code.add 'T', 'T'
end
r
end
def self.encode_w(w, i, len, code)
last = len - 1
r = 1
# can also be in middle of word
if w[i, 2] == 'WR'
code.add 'R', 'R'
r += 1
else
if i == 0 && (vowel?(w[i + 1]) || w[i, 2] == 'WH')
# Wasserman should match Vasserman
if vowel?(w[i + 1])
code.add 'A', 'F'
else
# need Uomo to match Womo
code.add 'A', 'A'
end
end
# Arnow should match Arnoff
if i == last && i > 0 && vowel?(w[i - 1]) ||
i > 0 && w[i - 1, 5] =~ /EWSKI|EWSKY|OWSKI|OWSKY/ ||
w[0, 3] == 'SCH'
code.add '', 'F'
elsif w[i, 4] =~ /WICZ|WITZ/
# polish e.g. 'filipowicz'
code.add 'TS', 'FX'
r += 3
end
end
r
end
def self.encode_x(w, i, len, code)
# french e.g. breaux
code.add 'KS', 'KS' unless x_french?(w, i, len - 1)
w[i + 1] =~ /[CX]/ ? 2 : 1
end
def self.encode_z(w, i, len, code)
r = 1
# chinese pinyin e.g. 'zhao'
if w[i + 1] == 'H'
code.add 'J', 'J'
r += 1
else
if w[i + 1, 2] =~ /Z[OIA]/ ||
slavo_germanic?(w) && i > 0 && w[i - 1] != 'T'
code.add 'S', 'TS';
else
code.add 'S', 'S';
end
r += 1 if w[i + 1] == 'Z'
end
r
end
def self.encode_ch(w, i, len, code)
case
# italian 'chianti'
when w[i, 4] == 'CHIA'
code.add 'K', 'K'
# find 'michael'
when i > 0 && w[i, 4] == 'CHAE'
code.add 'K', 'X'
# greek roots e.g. 'chemistry', 'chorus'
when ch_greek_roots?(w, i)
code.add 'K', 'K'
# germanic, greek, or otherwise 'ch' for 'kh' sound
when ch_germanic_or_greek?(w, i, len)
code.add 'K', 'K'
when i == 0
code.add 'X', 'X'
when w[0, 2] == 'MC'
# e.g., "McHugh"
code.add 'K', 'K'
else
code.add 'X', 'K'
end
end
def self.encode_cc(w, i, code)
r = 0
# 'bellocchio' but not 'bacchus'
if w[i + 2, 1] =~ /[IEH]/ && w[i + 2, 2] != 'HU'
# 'accident', 'accede' 'succeed'
if i == 1 && w[i - 1] == 'A' || w[i - 1, 5] =~ /UCCEE|UCCES/
# 'bacci', 'bertucci', other italian
code.add 'KS', 'KS'
else
code.add 'X', 'X'
end
r = 1
else
# Pierce's rule
code.add 'K', 'K'
end
r
end
def self.encode_gh(w, i, code)
if i > 0 && !vowel?(w[i - 1])
code.add 'K', 'K'
elsif i == 0
# ghislane, ghiradelli
if w[i + 2] == 'I'
code.add 'J', 'J'
else
code.add 'K', 'K'
end
# Parker's rule (with some further refinements)
elsif !(i > 1 && w[i - 2] =~ /[BHD]/ || # e.g., 'hugh'
i > 2 && w[i - 3] =~ /[BHD]/ || # e.g., 'bough'
i > 3 && w[i - 4] =~ /[BH]/) # e.g., 'broughton'
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if i > 2 && w[i - 1] == 'U' && w[i - 3] =~ /[CGLRT]/
code.add 'F', 'F'
elsif i > 0 && w[i - 1] != 'I'
code.add 'K', 'K'
end
end
end
def self.encode_gn(w, i, code)
if i == 1 && vowel?(w[0]) && !slavo_germanic?(w)
code.add 'KN', 'N'
# not e.g. 'cagney'
elsif w[i + 2, 2] != 'EY' && w[i + 1] != 'Y' && !slavo_germanic?(w)
code.add 'N', 'KN'
else
code.add 'KN', 'KN'
end
end
def self.encode_sc(w, i, code)
# Schlesinger's rule
if w[i + 2] == 'H'
# dutch origin, e.g. 'school', 'schooner'
if w[i + 3, 2] =~ /OO|UY|E[DM]/
code.add 'SK', 'SK'
# 'schermerhorn', 'schenker'
elsif w[i + 3, 2] =~ /E[RN]/
code.add 'X', 'SK'
elsif i == 0 && !vowel?(w[3]) && w[3] != 'W'
code.add 'X', 'S'
else
code.add 'X', 'X'
end
elsif w[i + 2] =~ /[IEY]/
code.add 'S', 'S'
else
code.add 'SK', 'SK'
end
end
def self.slavo_germanic?(w)
w =~ /W|K|CZ|WITZ/
end
def self.vowel?(c)
c =~ /[AEIOUY]/
end
def self.c_germanic?(w, i)
# various germanic
i > 1 &&
!vowel?(w[i - 2]) &&
w[i - 1, 3] == 'ACH' &&
(w[i + 2] !~ /[IE]/ || w[i - 2, 6] =~ /[BM]ACHER/)
end
def self.ch_greek_roots?(w, i)
# greek roots e.g. 'chemistry', 'chorus'
i == 0 && w[1, 5] =~ /^H(ARAC|ARIS|OR|YM|IA|EM)/ && w[0, 5] != 'CHORE'
end
def self.ch_germanic_or_greek?(w, i, len)
# germanic, greek, or otherwise 'ch' for 'kh' sound
w[0, 4] =~ /^(V[AO]N\s|SCH)/ ||
# 'architect but not 'arch', 'orchestra', 'orchid'
i > 1 && w[i - 2, 6] =~ /ORCHES|ARCHIT|ORCHID/ ||
(w[i + 2] =~ /[TS]/) ||
(i > 0 && w[i - 1] =~ /[AOUE]/ || i == 0) &&
# e.g., 'wachtler', 'wechsler', but not 'tichner'
(w[i + 2] =~ /[LRNMBHFVW ]/ || i + 2 >= len)
end
def self.g_ger_or_gy?(w, i)
# -ger-, -gy-
w[i + 1, 2] =~ /^(ER|Y)/ &&
w[0, 6] !~ /[DRM]ANGER/ &&
!(i > 0 && w[i - 1] =~ /[EI]/) &&
!(i > 0 && w[i - 1, 3] =~ /[RO]GY/)
end
def self.j_spanish_pron?(w, i)
# spanish pron. of e.g. 'bajador'
i > 0 && vowel?(w[i - 1]) && !slavo_germanic?(w) && w[i + 1] =~ /[AO]/
end
def self.ll_spanish?(w, i, len)
last = len - 1
# spanish e.g. 'cabrillo', 'gallegos'
(i == len - 3 && i > 0 && w[i - 1, 4] =~ /ILL[OA]|ALLE/) ||
(last > 0 && w[last - 1, 2] =~ /[AO]S/ || w[last] =~ /[AO]/) &&
(i > 0 && w[i - 1, 4] == 'ALLE')
end
def self.r_french?(w, i, last)
# french e.g. 'rogier', but exclude 'hochmeier'
i == last && !slavo_germanic?(w) &&
i > 1 && w[i - 2, 2] == 'IE' &&
!(i > 3 && w[i - 4, 2] =~ /M[EA]/)
end
def self.x_french?(w, i, last)
# french e.g. breaux
i == last && (i > 2 && w[i - 3, 3] =~ /[IE]AU/ || i > 1 && w[i - 2, 2] =~ /[AO]U/)
end
end
end
|
module HotelsCombined
class Request
def self.city_search(params)
request_params = Hash.new
request_params["CityID"] = params[:city_id]
request_params["Checkin"] = format_date(params[:checkin])
request_params["Checkout"] = format_date(params[:checkout])
request_params["Guests"] = params[:guests]
request_params["Rooms"] = params[:rooms]
request_params["UserID"] = params[:user_id]
request_params["UserIPAddress"] = params[:user_ip_address]
request_params["UserAgent"] = params[:user_agent]
request_params["ApiKey"] = HotelsCombined.configuration.api_key
request_params["PageSize"] = params[:page_size] if params[:page_size]
http_params = request_params.map {|key, value| "#{key}=#{URI::encode(value.to_s)}" }.join("&")
url = URI.parse("#{HotelsCombined.base_url}CitySearch?#{http_params}")
request = Net::HTTP::Get.new("#{HotelsCombined.base_url}CitySearch?#{http_params}", { "Accept-Encoding" => "gzip" })
response = Net::HTTP.new(url.host, url.port).start do |http|
http.request(request)
end
xml_doc = Nokogiri::XML(response.body)
xml_doc.remove_namespaces!
if xml_doc.xpath("//Fault").count > 0
error = xml_doc.at_xpath(".//Fault/Reason/Text").text
raise(HotelsCombined.const_get(error.gsub("Error", "") + "Error"),
"Fault: #{xml_doc.at_xpath(".//Fault/Code/Value").text}")
end
xml_doc.xpath("//Hotel").map {|node|
hotel = Hotel.from_xml(node)
hotel.rates = node.xpath(".//Rate").map {|rate_node|
Rate.from_xml(rate_node)
}
hotel
}
end
private
def self.format_date(date)
raise ArgumentError, "Date is required" if date.nil?
Chronic.parse(date).strftime("%Y-%m-%d")
end
end
end
Decompress responses when necessary
module HotelsCombined
class Request
def self.city_search(params)
request_params = Hash.new
request_params["CityID"] = params[:city_id]
request_params["Checkin"] = format_date(params[:checkin])
request_params["Checkout"] = format_date(params[:checkout])
request_params["Guests"] = params[:guests]
request_params["Rooms"] = params[:rooms]
request_params["UserID"] = params[:user_id]
request_params["UserIPAddress"] = params[:user_ip_address]
request_params["UserAgent"] = params[:user_agent]
request_params["ApiKey"] = HotelsCombined.configuration.api_key
request_params["PageSize"] = params[:page_size] if params[:page_size]
http_params = request_params.map {|key, value| "#{key}=#{URI::encode(value.to_s)}" }.join("&")
url = URI.parse("#{HotelsCombined.base_url}CitySearch?#{http_params}")
request = Net::HTTP::Get.new("#{HotelsCombined.base_url}CitySearch?#{http_params}", { "Accept-Encoding" => "gzip" })
response = Net::HTTP.new(url.host, url.port).start do |http|
http.request(request)
end
xml_doc = Nokogiri::XML(decompress_response(response))
xml_doc.remove_namespaces!
if xml_doc.xpath("//Fault").count > 0
error = xml_doc.at_xpath(".//Fault/Reason/Text").text
raise(HotelsCombined.const_get(error.gsub("Error", "") + "Error"),
"Fault: #{xml_doc.at_xpath(".//Fault/Code/Value").text}")
end
xml_doc.xpath("//Hotel").map {|node|
hotel = Hotel.from_xml(node)
hotel.rates = node.xpath(".//Rate").map {|rate_node|
Rate.from_xml(rate_node)
}
hotel
}
end
private
def self.format_date(date)
raise ArgumentError, "Date is required" if date.nil?
Chronic.parse(date).strftime("%Y-%m-%d")
end
def self.decompress_response(response)
if response.header["Content-Encoding"] == "gzip"
Zlib::GzipReader.new(StringIO.new(response.body)).read()
else
response.body
end
end
end
end
|
module PragmaticContext
VERSION = "0.0.1"
end
Version bump to 0.0.2
module PragmaticContext
VERSION = "0.0.2"
end
|
module ICReader
class BaseNotifier
def self.notify message
p "BaseNotifier: #{message}"
end
def notify message
self.class.notify message
end
end
end
cleaned up BaseNotifier message
module ICReader
class BaseNotifier
def self.notify message
p "#{message}"
end
def notify message
self.class.notify message
end
end
end
|
# encoding: UTF-8
module Prometheus
module Client
VERSION = '0.11.0-alpha.1'
end
end
Release v1.0
After almost a year with this code in production, all our backward
incompatible changes addressed, and much improved documentation,
we are now in a good place to call this "ready for everyone to use"
Signed-off-by: Daniel Magliola <89d49b6744ccc763daa8041f77a567a08347f890@gocardless.com>
# encoding: UTF-8
module Prometheus
module Client
VERSION = '1.0.0'
end
end
|
module Identity
module ErrorHandling
UNAVAILABLE_ERRORS = [
Excon::Errors::BadGateway,
# a NotAcceptable probably means that the ELB has lost its backends and
# doesn't know how to respond to our V3 "Accept"; display unavailable
Excon::Errors::NotAcceptable,
Excon::Errors::ServiceUnavailable,
Excon::Errors::SocketError,
Excon::Errors::Timeout,
]
def self.registered(app)
app.error *UNAVAILABLE_ERRORS do
e = env["sinatra.error"]
Identity.log(:exception, type: :unavailable,
class: e.class.name, message: e.message,
request_id: request.env["REQUEST_IDS"], backtrace: e.backtrace.inspect)
slim :"errors/503", layout: :"layouts/classic"
end
app.error do
e = env["sinatra.error"]
Identity.log(
:exception,
class: e.class.name, message: e.message,
request_id: request.env["REQUEST_IDS"],
backtrace: e.backtrace.inspect
)
Airbrake.notify(e) if Config.airbrake_api_key
Honeybadger.notify(e, context: {
method: request.request_method,
module: self.class.name,
request_id: env["REQUEST_IDS"],
route_signature: env["HTTP_X_ROUTE_SIGNATURE"],
session_id: @cookie ? @cookie.session_id : nil,
user_id: @cookie ? @cookie.user_id : nil,
}) if Config.honeybadger_api_key
slim :"errors/500", layout: :"layouts/classic"
end
end
def route(verb, path, *)
condition { env["HTTP_X_ROUTE_SIGNATURE"] = path.to_s }
super
end
end
end
Add exception context to logging as well
module Identity
module ErrorHandling
UNAVAILABLE_ERRORS = [
Excon::Errors::BadGateway,
# a NotAcceptable probably means that the ELB has lost its backends and
# doesn't know how to respond to our V3 "Accept"; display unavailable
Excon::Errors::NotAcceptable,
Excon::Errors::ServiceUnavailable,
Excon::Errors::SocketError,
Excon::Errors::Timeout,
]
def self.registered(app)
app.error *UNAVAILABLE_ERRORS do
e = env["sinatra.error"]
Identity.log(:exception, type: :unavailable,
class: e.class.name, message: e.message,
request_id: request.env["REQUEST_IDS"], backtrace: e.backtrace.inspect)
slim :"errors/503", layout: :"layouts/classic"
end
app.error do
e = env["sinatra.error"]
context = {
method: request.request_method,
module: self.class.name,
request_id: env["REQUEST_IDS"],
route_signature: env["HTTP_X_ROUTE_SIGNATURE"],
session_id: @cookie ? @cookie.session_id : nil,
user_id: @cookie ? @cookie.user_id : nil,
}
Identity.log(:exception, {
class: e.class.name,
message: e.message,
backtrace: e.backtrace.inspect
}.merge(context))
Airbrake.notify(e) if Config.airbrake_api_key
Honeybadger.notify(e, context: context) if Config.honeybadger_api_key
slim :"errors/500", layout: :"layouts/classic"
end
end
def route(verb, path, *)
condition { env["HTTP_X_ROUTE_SIGNATURE"] = path.to_s }
super
end
end
end
|
class Pry
module DefaultCommands
Gems = Pry::CommandSet.new do
command "gem-install", "Install a gem and refresh the gem cache.", :argument_required => true do |gem|
begin
destination = File.writable?(Gem.dir) ? Gem.dir : Gem.user_dir
installer = Gem::DependencyInstaller.new :install_dir => destination
installer.install gem
rescue Errno::EACCES
output.puts "Insufficient permissions to install `#{text.green gem}`"
rescue Gem::GemNotFoundException
output.puts "Gem `#{text.green gem}` not found."
else
Gem.refresh
output.puts "Gem `#{text.green gem}` installed."
end
end
command "gem-cd", "Change working directory to specified gem's directory.", :argument_required => true do |gem|
specs = Gem::Specification.respond_to?(:each) ? Gem::Specification.find_all_by_name(gem) : Gem.source_index.find_name(gem)
spec = specs.sort { |a,b| Gem::Version.new(b.version) <=> Gem::Version.new(a.version) }.first
spec ? Dir.chdir(spec.full_gem_path) : output.puts("Gem `#{gem}` not found.")
end
command "gem-list", "List/search installed gems. (Optional parameter: a regexp to limit the search)" do |pattern|
pattern = Regexp.new pattern.to_s, Regexp::IGNORECASE
gems = if Gem::Specification.respond_to?(:each)
Gem::Specification.select{|spec| spec.name =~ pattern }.group_by(&:name)
else
Gem.source_index.gems.values.group_by(&:name).select { |gemname, specs| gemname =~ pattern }
end
gems.each do |gem, specs|
specs.sort! do |a,b|
Gem::Version.new(b.version) <=> Gem::Version.new(a.version)
end
versions = specs.map.with_index do |spec, index|
index == 0 ? text.bright_green(spec.version.to_s) : text.green(spec.version.to_s)
end
output.puts "#{text.white gem} (#{versions.join ', '})"
end
end
end
end
end
removed specs.map.with_index from gem-list command and replaced with each_with_index.map to meet 1.8 compatibility
class Pry
module DefaultCommands
Gems = Pry::CommandSet.new do
command "gem-install", "Install a gem and refresh the gem cache.", :argument_required => true do |gem|
begin
destination = File.writable?(Gem.dir) ? Gem.dir : Gem.user_dir
installer = Gem::DependencyInstaller.new :install_dir => destination
installer.install gem
rescue Errno::EACCES
output.puts "Insufficient permissions to install `#{text.green gem}`"
rescue Gem::GemNotFoundException
output.puts "Gem `#{text.green gem}` not found."
else
Gem.refresh
output.puts "Gem `#{text.green gem}` installed."
end
end
command "gem-cd", "Change working directory to specified gem's directory.", :argument_required => true do |gem|
specs = Gem::Specification.respond_to?(:each) ? Gem::Specification.find_all_by_name(gem) : Gem.source_index.find_name(gem)
spec = specs.sort { |a,b| Gem::Version.new(b.version) <=> Gem::Version.new(a.version) }.first
spec ? Dir.chdir(spec.full_gem_path) : output.puts("Gem `#{gem}` not found.")
end
command "gem-list", "List/search installed gems. (Optional parameter: a regexp to limit the search)" do |pattern|
pattern = Regexp.new pattern.to_s, Regexp::IGNORECASE
gems = if Gem::Specification.respond_to?(:each)
Gem::Specification.select{|spec| spec.name =~ pattern }.group_by(&:name)
else
Gem.source_index.gems.values.group_by(&:name).select { |gemname, specs| gemname =~ pattern }
end
gems.each do |gem, specs|
specs.sort! do |a,b|
Gem::Version.new(b.version) <=> Gem::Version.new(a.version)
end
versions = specs.each_with_index.map do |spec, index|
index == 0 ? text.bright_green(spec.version.to_s) : text.green(spec.version.to_s)
end
output.puts "#{text.white gem} (#{versions.join ', '})"
end
end
end
end
end
|
module IdentityPlugin
VERSION = '0.7.0'
end
bump version
module IdentityPlugin
VERSION = '0.8.0'
end
|
module Quacky
class LineGraphBuilder
@data = []
def initialize(init_data = [])
@data = init_data
end
# Return a content tag that can be selected by the client-side, and drawn on.
# The data attribute of the HTML tag is @data.
def draw
"<div class='line-graph' data-chart='#{self.get_data}'></div>".html_safe
end
def get_data
output = []
@data.each do |event|
output << {
x: event[:time].to_i,
y: event[:data]
}
end
output.to_json
end
end
end
line graph builder has layout for y-axis
module Quacky
class LineGraphBuilder
@data = []
def initialize(init_data = [])
@data = init_data
end
# Return a content tag that can be selected by the client-side, and drawn on.
# The data attribute of the HTML tag is @data.
def draw
"<div class='line-graph-container'><div class='y-axis'></div><div class='line-graph' data-chart='#{self.get_data}'></div></div>".html_safe
end
def get_data
output = []
@data.each do |event|
output << {
x: event[:time].to_i,
y: event[:data]
}
end
output.to_json
end
end
end
|
require "quartz_torrent/log.rb"
require "quartz_torrent/trackerclient.rb"
require "quartz_torrent/peermsg.rb"
require "quartz_torrent/reactor.rb"
require "quartz_torrent/util.rb"
require "quartz_torrent/classifiedpeers.rb"
require "quartz_torrent/peerholder.rb"
require "quartz_torrent/peermanager.rb"
require "quartz_torrent/blockstate.rb"
require "quartz_torrent/filemanager.rb"
require "quartz_torrent/semaphore.rb"
require "quartz_torrent/piecemanagerrequestmetadata.rb"
require "quartz_torrent/metainfopiecestate.rb"
require "quartz_torrent/extension.rb"
require "quartz_torrent/magnet.rb"
require "quartz_torrent/torrentqueue.rb"
module QuartzTorrent
# Extra metadata stored in a PieceManagerRequestMetadata specific to read requests.
class ReadRequestMetadata
def initialize(peer, requestMsg)
@peer = peer
@requestMsg = requestMsg
end
attr_accessor :peer
attr_accessor :requestMsg
end
# Class used by PeerClientHandler to keep track of information associated with a single torrent
# being downloaded/uploaded.
class TorrentData
def initialize(infoHash, info, trackerClient)
@infoHash = infoHash
@info = info
@trackerClient = trackerClient
@peerManager = PeerManager.new
@pieceManagerRequestMetadata = {}
@pieceManagerMetainfoRequestMetadata = {}
@bytesDownloadedDataOnly = 0
@bytesUploadedDataOnly = 0
@bytesDownloaded = 0
@bytesUploaded = 0
@magnet = nil
@peers = PeerHolder.new
@state = :initializing
@blockState = nil
@metainfoPieceState = nil
@metainfoRequestTimer = nil
@managePeersTimer = nil
@checkMetadataPieceManagerTimer = nil
@checkPieceManagerTimer = nil
@requestBlocksTimer = nil
@paused = false
@queued = false
@downRateLimit = nil
@upRateLimit = nil
@ratio = nil
@uploadDuration = nil
@downloadCompletedTime = nil
@isEndgame = false
end
# The torrents Metainfo.Info struct. This is nil if the torrent has no metadata and we need to download it
# (i.e. a magnet link)
attr_accessor :info
# The infoHash of the torrent
attr_accessor :infoHash
attr_accessor :trackerClient
attr_accessor :peers
# The MagnetURI object, if this torrent was created from a magnet link. Nil for torrents not created from magnets.
attr_accessor :magnet
attr_accessor :peerManager
attr_accessor :blockState
attr_accessor :pieceManager
# Metadata associated with outstanding requests to the PieceManager responsible for the pieces of the torrent data.
attr_accessor :pieceManagerRequestMetadata
# Metadata associated with outstanding requests to the PieceManager responsible for the pieces of the torrent metainfo.
attr_accessor :pieceManagerMetainfoRequestMetadata
attr_accessor :peerChangeListener
attr_accessor :bytesDownloadedDataOnly
attr_accessor :bytesUploadedDataOnly
attr_accessor :bytesDownloaded
attr_accessor :bytesUploaded
# State of the torrent. Is one of the following states:
# :initializing Datastructures have been created, but no work started.
# :checking_pieces Checking piece hashes on startup
# :downloading_metainfo Downloading the torrent metainfo
# :uploading The torrent is complete and we are only uploading
# :running The torrent is incomplete and we are downloading and uploading
# :error There was an unrecoverable error with the torrent.
attr_accessor :state
attr_accessor :isEndgame
attr_accessor :metainfoPieceState
# The timer handle for the timer that requests metainfo pieces. This is used to cancel the
# timer when the metadata is completely downloaded.
attr_accessor :metainfoRequestTimer
# Timer handle for timer that manages peers.
attr_accessor :managePeersTimer
# Timer handle for timer that checks metadata piece manager results
attr_accessor :checkMetadataPieceManagerTimer
# Timer handle for timer that checks piece manager results
attr_accessor :checkPieceManagerTimer
# Timer handle for timer that requests blocks
attr_accessor :requestBlocksTimer
# Is the torrent paused
attr_accessor :paused
# Is the torrent queued
attr_accessor :queued
# The RateLimit for downloading this torrent.
attr_accessor :downRateLimit
# The RateLimit for uploading to peers for this torrent.
attr_accessor :upRateLimit
# After we have completed downloading a torrent, we will continue to upload until we have
# uploaded ratio * torrent_size bytes. If nil, no limit on upload.
attr_accessor :ratio
# Maximum amount of time in seconds that the torrent can be in the uploading state before it's paused.
attr_accessor :uploadDuration
# Time at which we completely downloaded all bytes of the torrent.
attr_accessor :downloadCompletedTime
end
# Data about torrents for use by the end user.
class TorrentDataDelegate
# Create a new TorrentDataDelegate. This is meant to only be called internally.
def initialize(torrentData, peerClientHandler)
fillFrom(torrentData)
@torrentData = torrentData
@peerClientHandler = peerClientHandler
end
# Torrent Metainfo.info struct. This is nil if the torrent has no metadata and we haven't downloaded it yet
# (i.e. a magnet link).
attr_accessor :info
# Infohash of the torrent. This is binary data.
attr_accessor :infoHash
# Recommended display name for this torrent.
attr_accessor :recommendedName
# Download rate in bytes/second
attr_reader :downloadRate
# Upload rate in bytes/second
attr_reader :uploadRate
# Download rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :downloadRateLimit
# Upload rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :uploadRateLimit
# Download rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :downloadRateDataOnly
attr_reader :uploadRateDataOnly
# Count of completed bytes of the torrent
attr_reader :completedBytes
# Array of peers for the torrent. These include connected, disconnected, and handshaking peers
attr_reader :peers
# State of the torrent. This may be one of :downloading_metainfo, :error, :checking_pieces, :running, :downloading_metainfo, or :deleted.
# The :deleted state indicates that the torrent that this TorrentDataDelegate refers to is no longer being managed by the peer client.
attr_reader :state
# Bitfield representing which pieces of the torrent are completed.
attr_reader :completePieceBitfield
# Length of metainfo info in bytes. This is only set when the state is :downloading_metainfo
attr_reader :metainfoLength
# How much of the metainfo info we have downloaded in bytes. This is only set when the state is :downloading_metainfo
attr_reader :metainfoCompletedLength
# Whether or not the torrent is paused.
attr_reader :paused
# Whether or not the torrent is queued.
attr_reader :queued
# After we have completed downloading a torrent, we will continue to upload until we have
# uploaded ratio * torrent_size bytes. If nil, no limit on upload.
attr_accessor :ratio
attr_accessor :uploadDuration
attr_accessor :bytesUploadedDataOnly
attr_accessor :bytesDownloadedDataOnly
attr_accessor :bytesUploaded
attr_accessor :bytesDownloaded
# Update the data in this TorrentDataDelegate from the torrentData
# object that it was created from. TODO: What if that torrentData is now gone?
def refresh
@peerClientHandler.updateDelegateTorrentData self
end
# Set the fields of this TorrentDataDelegate from the passed torrentData.
# This is meant to only be called internally.
def internalRefresh
fillFrom(@torrentData)
end
private
def fillFrom(torrentData)
@infoHash = torrentData.infoHash
@info = torrentData.info
@bytesUploadedDataOnly = torrentData.bytesUploadedDataOnly
@bytesDownloadedDataOnly = torrentData.bytesDownloadedDataOnly
@bytesUploaded = torrentData.bytesUploaded
@bytesDownloaded = torrentData.bytesDownloaded
if torrentData.state == :checking_pieces
# When checking pieces there is only one request pending with the piece manager.
checkExistingRequestId = torrentData.pieceManagerRequestMetadata.keys.first
progress = torrentData.pieceManager.progress checkExistingRequestId
@completedBytes = progress ? progress * torrentData.info.dataLength / 100 : 0
else
@completedBytes = torrentData.blockState.nil? ? 0 : torrentData.blockState.completedLength
end
# This should really be a copy:
@completePieceBitfield = torrentData.blockState.nil? ? nil : torrentData.blockState.completePieceBitfield
buildPeersList(torrentData)
@downloadRate = @peers.reduce(0){ |memo, peer| memo + peer.uploadRate }
@uploadRate = @peers.reduce(0){ |memo, peer| memo + peer.downloadRate }
@downloadRateDataOnly = @peers.reduce(0){ |memo, peer| memo + peer.uploadRateDataOnly }
@uploadRateDataOnly = @peers.reduce(0){ |memo, peer| memo + peer.downloadRateDataOnly }
@state = torrentData.state
@metainfoLength = nil
@paused = torrentData.paused
@queued = torrentData.queued
@metainfoCompletedLength = nil
if torrentData.metainfoPieceState && torrentData.state == :downloading_metainfo
@metainfoLength = torrentData.metainfoPieceState.metainfoLength
@metainfoCompletedLength = torrentData.metainfoPieceState.metainfoCompletedLength
end
if torrentData.info
@recommendedName = torrentData.info.name
else
if torrentData.magnet
@recommendedName = torrentData.magnet.displayName
else
@recommendedName = nil
end
end
@downloadRateLimit = torrentData.downRateLimit.unitsPerSecond if torrentData.downRateLimit
@uploadRateLimit = torrentData.upRateLimit.unitsPerSecond if torrentData.upRateLimit
@ratio = torrentData.ratio
@uploadDuration = torrentData.uploadDuration
end
def buildPeersList(torrentData)
@peers = []
torrentData.peers.all.each do |peer|
@peers.push peer.clone
end
end
end
# This class implements a Reactor Handler object. This Handler implements the PeerClient.
class PeerClientHandler < QuartzTorrent::Handler
def initialize(baseDirectory, maxIncomplete = 5, maxActive = 10)
# Hash of TorrentData objects, keyed by torrent infoHash
@torrentData = {}
@torrentQueue = TorrentQueue.new(maxIncomplete, maxActive)
@baseDirectory = baseDirectory
@logger = LogManager.getLogger("peerclient")
# Overall maximum number of peers (connected + disconnected)
@maxPeerCount = 120
# Number of peers we ideally want to try and be downloading/uploading with
@targetActivePeerCount = 50
@targetUnchokedPeerCount = 4
@managePeersPeriod = 10 # Defined in bittorrent spec. Only unchoke peers every 10 seconds.
@requestBlocksPeriod = 1
@handshakeTimeout = 1
@requestTimeout = 60
@endgameBlockThreshold = 20
end
################################################ PUBLIC API METHODS ################################################
attr_reader :torrentData
# Add a new tracker client. This effectively adds a new torrent to download. Returns the TorrentData object for the
# new torrent.
def addTrackerClient(infoHash, info, trackerclient)
raise "There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}" if @torrentData.has_key? infoHash
torrentData = TorrentData.new(infoHash, info, trackerclient)
@torrentData[infoHash] = torrentData
torrentData.info = info
torrentData.state = :initializing
queue(torrentData)
dequeue
torrentData
end
# Remove a torrent.
def removeTorrent(infoHash, deleteFiles = false)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@reactor.scheduleTimer(0, [:removetorrent, infoHash, deleteFiles], false, true)
end
# Pause or unpause the specified torrent.
def setPaused(infoHash, value)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to #{value ? "pause" : "unpause"} torrent."
@reactor.scheduleTimer(0, [:pausetorrent, infoHash, value], false, true)
end
# Set the download rate limit. Pass nil as the bytesPerSecond to disable the limit.
def setDownloadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.downRateLimit
torrentData.downRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.downRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.downRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting download rate limit") do |io|
io.readRateLimit = torrentData.downRateLimit
end
end
end
# Set the upload rate limit. Pass nil as the bytesPerSecond to disable the limit.
def setUploadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.upRateLimit
torrentData.upRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.upRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.upRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting upload rate limit") do |io|
io.writeRateLimit = torrentData.upRateLimit
end
end
end
# Set the upload ratio. Pass nil to disable
def setUploadRatio(infoHash, ratio)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload ratio limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
torrentData.ratio = ratio
end
# Set the maximum amount of time (in seconds) that a torrent can be in the upload-only state before
# it is paused. Pass nil to disable.
def setUploadDuration(infoHash, seconds)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload duration for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
torrentData.uploadDuration = seconds
end
# Get a hash of new TorrentDataDelegate objects keyed by torrent infohash.
# This method is meant to be called from a different thread than the one
# the reactor is running in. This method is not immediate but blocks until the
# data is prepared.
# If infoHash is passed, only that torrent data is returned (still in a hashtable; just one entry)
def getDelegateTorrentData(infoHash = nil)
# Use an immediate, non-recurring timer.
result = {}
return result if stopped?
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:get_torrent_data, result, semaphore, infoHash], false, true)
semaphore.wait
result
end
# Update the data stored in a TorrentDataDelegate to the latest information.
def updateDelegateTorrentData(delegate)
return if stopped?
# Use an immediate, non-recurring timer.
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:update_torrent_data, delegate, semaphore], false, true)
semaphore.wait
result
end
################################################ REACTOR METHODS ################################################
# Reactor method called when a peer has connected to us.
def serverInit(metadata, addr, port)
# A peer connected to us
# Read handshake message
@logger.warn "Peer connection from #{addr}:#{port}"
begin
msg = PeerHandshake.unserializeExceptPeerIdFrom currentIo
rescue
@logger.warn "Peer failed handshake: #{$!}"
close
return
end
torrentData = torrentDataForHandshake(msg, "#{addr}:#{port}")
# Are we tracking this torrent?
if !torrentData
@logger.warn "Peer sent handshake for unknown torrent"
close
return
end
trackerclient = torrentData.trackerClient
# If we already have too many connections, don't allow this connection.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if classifiedPeers.establishedPeers.length > @targetActivePeerCount
@logger.warn "Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} "
close
return
end
# Send handshake
outgoing = PeerHandshake.new
outgoing.peerId = trackerclient.peerId
outgoing.infoHash = torrentData.infoHash
outgoing.serializeTo currentIo
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
# Read incoming handshake's peerid
msg.peerId = currentIo.read(PeerHandshake::PeerIdLen)
if msg.peerId == trackerclient.peerId
@logger.info "We got a connection from ourself. Closing connection."
close
return
end
peer = nil
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state != :disconnected
@logger.warn "Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
close
return
else
if existingPeer.trackerPeer.ip == addr && existingPeer.trackerPeer.port == port
peer = existingPeer
end
end
end
end
if ! peer
peer = Peer.new(TrackerPeer.new(addr, port))
updatePeerWithHandshakeInfo(torrentData, msg, peer)
torrentData.peers.add peer
if ! peers
@logger.warn "Unknown peer with id #{msg.peerId} connected."
else
@logger.warn "Known peer with id #{msg.peerId} connected from new location."
end
else
@logger.warn "Known peer with id #{msg.peerId} connected from known location."
end
@logger.info "Peer #{peer} connected to us. "
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setMetaInfo(peer)
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end
# Reactor method called when we have connected to a peer.
def clientInit(peer)
# We connected to a peer
# Send handshake
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.warn "No tracker client found for peer #{peer}. Closing connection."
close
return
end
trackerclient = torrentData.trackerClient
@logger.info "Connected to peer #{peer}. Sending handshake."
msg = PeerHandshake.new
msg.peerId = trackerclient.peerId
msg.infoHash = peer.infoHash
msg.serializeTo currentIo
peer.state = :handshaking
@reactor.scheduleTimer(@handshakeTimeout, [:handshake_timeout, peer], false)
@logger.debug "Done sending handshake."
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end
# Reactor method called when there is data ready to be read from a socket
def recvData(peer)
msg = nil
@logger.debug "Got data from peer #{peer}"
if peer.state == :handshaking
# Read handshake message
begin
@logger.debug "Reading handshake from #{peer}"
msg = PeerHandshake.unserializeFrom currentIo
rescue
@logger.warn "Peer #{peer} failed handshake: #{$!}"
setPeerDisconnected(peer)
close
return
end
else
begin
@logger.debug "Reading wire-message from #{peer}"
msg = peer.peerMsgSerializer.unserializeFrom currentIo
#msg = PeerWireMessage.unserializeFrom currentIo
rescue EOFError
@logger.info "Peer #{peer} disconnected."
setPeerDisconnected(peer)
close
return
rescue
@logger.warn "Unserializing message from peer #{peer} failed: #{$!}"
@logger.warn $!.backtrace.join "\n"
setPeerDisconnected(peer)
close
return
end
peer.updateUploadRate msg
torrentData = @torrentData[peer.infoHash]
torrentData.bytesDownloaded += msg.length if torrentData
@logger.debug "Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}"
end
if msg.is_a? PeerHandshake
# This is a remote peer that we connected to returning our handshake.
processHandshake(msg, peer)
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
elsif msg.is_a? BitfieldMessage
@logger.debug "Received bitfield message from peer."
handleBitfield(msg, peer)
elsif msg.is_a? Unchoke
@logger.debug "Received unchoke message from peer."
peer.amChoked = false
elsif msg.is_a? Choke
@logger.debug "Received choke message from peer."
peer.amChoked = true
elsif msg.is_a? Interested
@logger.debug "Received interested message from peer."
peer.peerInterested = true
elsif msg.is_a? Uninterested
@logger.debug "Received uninterested message from peer."
peer.peerInterested = false
elsif msg.is_a? Piece
@logger.debug "Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}."
handlePieceReceive(msg, peer)
elsif msg.is_a? Request
@logger.debug "Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}."
handleRequest(msg, peer)
elsif msg.is_a? Have
@logger.debug "Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}"
handleHave(msg, peer)
elsif msg.is_a? KeepAlive
@logger.debug "Received keep alive message from peer."
elsif msg.is_a? ExtendedHandshake
@logger.debug "Received extended handshake message from peer."
handleExtendedHandshake(msg, peer)
elsif msg.is_a? ExtendedMetaInfo
@logger.debug "Received extended metainfo message from peer."
handleExtendedMetainfo(msg, peer)
else
@logger.warn "Received a #{msg.class} message but handler is not implemented"
end
end
# Reactor method called when a scheduled timer expires.
def timerExpired(metadata)
if metadata.is_a?(Array) && metadata[0] == :manage_peers
managePeers(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :request_blocks
requestBlocks(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_piece_manager
checkPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :handshake_timeout
handleHandshakeTimeout(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :removetorrent
handleRemoveTorrent(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :pausetorrent
handlePause(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :get_torrent_data
@torrentData.each do |k,v|
begin
if metadata[3].nil? || k == metadata[3]
v = TorrentDataDelegate.new(v, self)
metadata[1][k] = v
end
rescue
@logger.error "Error building torrent data response for user: #{$!}"
@logger.error "#{$!.backtrace.join("\n")}"
end
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :update_torrent_data
delegate = metadata[1]
if ! @torrentData.has_key?(infoHash)
delegate.state = :deleted
else
delegate.internalRefresh
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :request_metadata_pieces
requestMetadataPieces(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_metadata_piece_manager
checkMetadataPieceManagerResults(metadata[1])
else
@logger.info "Unknown timer #{metadata} expired."
end
end
# Reactor method called when an IO error occurs.
def error(peer, details)
# If a peer closes the connection during handshake before we determine their id, we don't have a completed
# Peer object yet. In this case the peer parameter is the symbol :listener_socket
if peer == :listener_socket
@logger.info "Error with handshaking peer: #{details}. Closing connection."
else
@logger.info "Error with peer #{peer}: #{details}. Closing connection."
setPeerDisconnected(peer)
end
# Close connection
close
end
################################################ PRIVATE METHODS ################################################
private
def setPeerDisconnected(peer)
peer.state = :disconnected
peer.uploadRate.reset
peer.downloadRate.reset
peer.uploadRateDataOnly.reset
peer.downloadRateDataOnly.reset
torrentData = @torrentData[peer.infoHash]
# Are we tracking this torrent?
if torrentData && torrentData.blockState
# For any outstanding requests, mark that we no longer have requested them
peer.requestedBlocks.each do |blockIndex, b|
blockInfo = torrentData.blockState.createBlockinfoByBlockIndex(blockIndex)
torrentData.blockState.setBlockRequested blockInfo, false
end
peer.requestedBlocks.clear
end
end
def processHandshake(msg, peer)
torrentData = torrentDataForHandshake(msg, peer)
# Are we tracking this torrent?
return false if !torrentData
if msg.peerId == torrentData.trackerClient.peerId
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We connected to ourself. Closing connection."
peer.isUs = true
close
return
end
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state == :connected
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
torrentData.peers.delete existingPeer
setPeerDisconnected(peer)
close
return
end
end
end
trackerclient = torrentData.trackerClient
updatePeerWithHandshakeInfo(torrentData, msg, peer)
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
true
end
def torrentDataForHandshake(msg, peer)
torrentData = @torrentData[msg.infoHash]
# Are we tracking this torrent?
if !torrentData
if peer.is_a?(Peer)
@logger.info "Peer #{peer} failed handshake: we are not managing torrent #{QuartzTorrent.bytesToHex(msg.infoHash)}"
setPeerDisconnected(peer)
else
@logger.info "Incoming peer #{peer} failed handshake: we are not managing torrent #{QuartzTorrent.bytesToHex(msg.infoHash)}"
end
close
return nil
end
torrentData
end
def updatePeerWithHandshakeInfo(torrentData, msg, peer)
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: peer #{peer} sent valid handshake for torrent #{QuartzTorrent.bytesToHex(torrentData.infoHash)}"
peer.infoHash = msg.infoHash
# If this was a peer we got from a tracker that had no id then we only learn the id on handshake.
peer.trackerPeer.id = msg.peerId
torrentData.peers.idSet peer
end
def handleHandshakeTimeout(peer)
if peer.state == :handshaking
@logger.warn "Peer #{peer} failed handshake: handshake timed out after #{@handshakeTimeout} seconds."
withPeersIo(peer, "handling handshake timeout") do |io|
setPeerDisconnected(peer)
close(io)
end
end
end
def managePeers(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Manage peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
trackerclient = torrentData.trackerClient
# Update our internal peer list for this torrent from the tracker client
getPeersFromTracker(torrentData, infoHash)
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
manager = torrentData.peerManager
if ! manager
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Manage peers: peer manager client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
toConnect = manager.manageConnections(classifiedPeers)
toConnect.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Connecting to peer #{peer}"
connect peer.trackerPeer.ip, peer.trackerPeer.port, peer
end
manageResult = manager.managePeers(classifiedPeers)
manageResult.unchoke.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Unchoking peer #{peer}"
withPeersIo(peer, "unchoking peer") do |io|
msg = Unchoke.new
sendMessageToPeer msg, io, peer
peer.peerChoked = false
end
end
manageResult.choke.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Choking peer #{peer}"
withPeersIo(peer, "choking peer") do |io|
msg = Choke.new
sendMessageToPeer msg, io, peer
peer.peerChoked = true
end
end
end
def requestBlocks(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request blocks peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if ! torrentData.blockState
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Request blocks peers: no blockstate yet."
return
end
if torrentData.state == :uploading && !torrentData.paused
if torrentData.ratio
if torrentData.bytesUploadedDataOnly >= torrentData.ratio*torrentData.blockState.totalLength
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Pausing torrent due to upload ratio limit." if torrentData.metainfoPieceState.complete?
setPaused(infoHash, true)
return
end
end
if torrentData.uploadDuration && torrentData.downloadCompletedTime
if Time.new > torrentData.downloadCompletedTime + torrentData.uploadDuration
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Pausing torrent due to upload duration being reached." if torrentData.metainfoPieceState.complete?
setPaused(infoHash, true)
return
end
end
end
# Should we switch to endgame mode?
if torrentData.state == :running && !torrentData.isEndgame
blocks = torrentData.blockState.completeBlockBitfield
set = blocks.countSet
if set >= blocks.length - @endgameBlockThreshold && set < blocks.length
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Entering endgame mode: blocks #{set}/#{blocks.length} complete."
torrentData.isEndgame = true
end
elsif torrentData.isEndgame && torrentData.state != :running
torrentData.isEndgame = false
end
# Delete any timed-out requests.
classifiedPeers.establishedPeers.each do |peer|
toDelete = []
peer.requestedBlocks.each do |blockIndex, requestTime|
toDelete.push blockIndex if (Time.new - requestTime) > @requestTimeout
end
toDelete.each do |blockIndex|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Block #{blockIndex} request timed out."
blockInfo = torrentData.blockState.createBlockinfoByBlockIndex(blockIndex)
torrentData.blockState.setBlockRequested blockInfo, false
peer.requestedBlocks.delete blockIndex
end
end
# Update the allowed pending requests based on how well the peer did since last time.
classifiedPeers.establishedPeers.each do |peer|
if peer.requestedBlocksSizeLastPass
if peer.requestedBlocksSizeLastPass == peer.maxRequestedBlocks
downloaded = peer.requestedBlocksSizeLastPass - peer.requestedBlocks.size
if downloaded > peer.maxRequestedBlocks*8/10
peer.maxRequestedBlocks = peer.maxRequestedBlocks * 12 / 10
elsif downloaded == 0
peer.maxRequestedBlocks = peer.maxRequestedBlocks * 8 / 10
end
peer.maxRequestedBlocks = 10 if peer.maxRequestedBlocks < 10
end
end
end
# Request blocks
blockInfos = torrentData.blockState.findRequestableBlocks(classifiedPeers, 100)
blockInfos.each do |blockInfo|
peersToRequest = []
if torrentData.isEndgame
# Since we are in endgame mode, request blocks from all elegible peers
elegiblePeers = blockInfo.peers.find_all{ |p| p.requestedBlocks.length < p.maxRequestedBlocks }
peersToRequest.concat elegiblePeers
else
# Pick one of the peers that has the piece to download it from. Pick one of the
# peers with the top 3 upload rates.
elegiblePeers = blockInfo.peers.find_all{ |p| p.requestedBlocks.length < p.maxRequestedBlocks }.sort{ |a,b| b.uploadRate.value <=> a.uploadRate.value}
random = elegiblePeers[rand(blockInfo.peers.size)]
peer = elegiblePeers.first(3).push(random).shuffle.first
next if ! peer
peersToRequest.push peer
end
peersToRequest.each do |peer|
withPeersIo(peer, "requesting block") do |io|
if ! peer.amInterested
# Let this peer know that I'm interested if I haven't yet.
msg = Interested.new
sendMessageToPeer msg, io, peer
peer.amInterested = true
end
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting block from #{peer}: piece #{blockInfo.pieceIndex} offset #{blockInfo.offset} length #{blockInfo.length}"
msg = blockInfo.getRequest
sendMessageToPeer msg, io, peer
torrentData.blockState.setBlockRequested blockInfo, true
peer.requestedBlocks[blockInfo.blockIndex] = Time.new
end
end
end
if blockInfos.size == 0
if torrentData.state != :uploading && torrentData.blockState.completePieceBitfield.allSet?
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Download complete."
torrentData.state = :uploading
torrentData.downloadCompletedTime = Time.new
dequeue
end
end
classifiedPeers.establishedPeers.each { |peer| peer.requestedBlocksSizeLastPass = peer.requestedBlocks.length }
end
# For a torrent where we don't have the metainfo, request metainfo pieces from peers.
def requestMetadataPieces(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo." if torrentData.metainfoPieceState.complete?
pieces = torrentData.metainfoPieceState.findRequestablePieces
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
peers = torrentData.metainfoPieceState.findRequestablePeers(classifiedPeers)
if peers.size > 0
# For now, just request all pieces from the first peer.
pieces.each do |pieceIndex|
msg = ExtendedMetaInfo.new
msg.msgType = :request
msg.piece = pieceIndex
withPeersIo(peers.first, "requesting metadata piece") do |io|
sendMessageToPeer msg, io, peers.first
torrentData.metainfoPieceState.setPieceRequested(pieceIndex, true)
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}"
end
end
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata."
end
end
def checkMetadataPieceManagerResults(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Check metadata piece manager results: data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
results = torrentData.metainfoPieceState.checkResults
results.each do |result|
metaData = torrentData.pieceManagerMetainfoRequestMetadata.delete(result.requestId)
if ! metaData
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :read && result.successful?
# Send the piece to the peer.
msg = ExtendedMetaInfo.new
msg.msgType = :piece
msg.piece = metaData.data.requestMsg.piece
msg.data = result.data
withPeersIo(metaData.data.peer, "sending extended metainfo piece message") do |io|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Sending metainfo piece to #{metaData.data.peer}: piece #{msg.piece} with data length #{msg.data.length}"
sendMessageToPeer msg, io, metaData.data.peer
end
result.data
end
end
if torrentData.metainfoPieceState.complete? && torrentData.state == :downloading_metainfo
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo. Will begin checking existing pieces."
torrentData.metainfoPieceState.flush
# We don't need to download metainfo anymore.
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
info = MetainfoPieceState.downloaded(@baseDirectory, torrentData.infoHash)
if info
torrentData.info = info
startCheckingPieces torrentData
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Metadata download is complete but reading the metadata failed"
torrentData.state = :error
end
end
end
def handlePieceReceive(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Receive piece: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if ! torrentData.blockState
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: no blockstate yet."
return
end
blockInfo = torrentData.blockState.createBlockinfoByPieceResponse(msg.pieceIndex, msg.blockOffset, msg.data.length)
if ! peer.requestedBlocks.has_key?(blockInfo.blockIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: we either didn't request this piece, or it was already received due to endgame strategy. Ignoring this message."
return
end
if torrentData.blockState.blockCompleted?(blockInfo)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: we already have this block. Ignoring this message."
return
end
peer.requestedBlocks.delete blockInfo.blockIndex
# Block is marked as not requested when hash is confirmed
torrentData.bytesDownloadedDataOnly += msg.data.length
id = torrentData.pieceManager.writeBlock(msg.pieceIndex, msg.blockOffset, msg.data)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:write, msg)
if torrentData.isEndgame
# Assume this block is correct. Send a Cancel message to all other peers from whom we requested
# this piece.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
classifiedPeers.requestablePeers.each do |otherPeer|
if otherPeer.requestedBlocks.has_key?(blockInfo.blockIndex)
withPeersIo(otherPeer, "when sending Cancel message") do |io|
cancel = Cancel.new
cancel.pieceIndex = msg.pieceIndex
cancel.blockOffset = msg.blockOffset
cancel.blockLength = msg.data.length
sendMessageToPeer cancel, io, otherPeer
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Cancel message to peer #{peer}"
end
end
end
end
end
def handleRequest(msg, peer)
if peer.peerChoked
@logger.warn "Request piece: peer #{peer} requested a block when they are choked."
return
end
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Request piece: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.blockLength <= 0
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Request piece: peer requested block of length #{msg.blockLength} which is invalid."
return
end
id = torrentData.pieceManager.readBlock(msg.pieceIndex, msg.blockOffset, msg.blockLength)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:read, ReadRequestMetadata.new(peer,msg))
end
def handleBitfield(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Bitfield: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
peer.bitfield = msg.bitfield
if torrentData.info
peer.bitfield.length = torrentData.info.pieces.length
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: A peer connected and sent a bitfield but we don't know the length of the torrent yet. Assuming number of pieces is divisible by 8"
end
if ! torrentData.blockState
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Bitfield: no blockstate yet."
return
end
# If we are interested in something from this peer, let them know.
needed = torrentData.blockState.completePieceBitfield.compliment
needed.intersection!(peer.bitfield)
if ! needed.allClear?
if ! peer.amInterested
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Need some pieces from peer #{peer} so sending Interested message"
msg = Interested.new
sendMessageToPeer msg, currentIo, peer
peer.amInterested = true
end
end
end
def handleHave(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Have: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.pieceIndex >= peer.bitfield.length
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer #{peer} sent Have message with invalid piece index"
return
end
# Update peer's bitfield
peer.bitfield.set msg.pieceIndex
if ! torrentData.blockState
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Have: no blockstate yet."
return
end
# If we are interested in something from this peer, let them know.
if ! torrentData.blockState.completePieceBitfield.set?(msg.pieceIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer #{peer} just got a piece we need so sending Interested message"
msg = Interested.new
sendMessageToPeer msg, currentIo, peer
peer.amInterested = true
end
end
def checkPieceManagerResults(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request blocks peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
while true
result = torrentData.pieceManager.nextResult
break if ! result
metaData = torrentData.pieceManagerRequestMetadata.delete(result.requestId)
if ! metaData
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :write
if result.successful?
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Block written to disk. "
# Block successfully written!
torrentData.blockState.setBlockCompleted metaData.data.pieceIndex, metaData.data.blockOffset, true do |pieceIndex|
# The peice is completed! Check hash.
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Piece #{pieceIndex} is complete. Checking hash. "
id = torrentData.pieceManager.checkPieceHash(metaData.data.pieceIndex)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:hash, metaData.data.pieceIndex)
end
else
# Block failed! Clear completed and requested state.
torrentData.blockState.setBlockCompleted metaData.data.pieceIndex, metaData.data.blockOffset, false
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Writing block failed: #{result.error}"
end
elsif metaData.type == :read
if result.successful?
readRequestMetadata = metaData.data
peer = readRequestMetadata.peer
withPeersIo(peer, "sending piece message") do |io|
msg = Piece.new
msg.pieceIndex = readRequestMetadata.requestMsg.pieceIndex
msg.blockOffset = readRequestMetadata.requestMsg.blockOffset
msg.data = result.data
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending block to #{peer}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}"
sendMessageToPeer msg, io, peer
torrentData.bytesUploadedDataOnly += msg.data.length
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending piece to peer"
end
else
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Reading block failed: #{result.error}"
end
elsif metaData.type == :hash
if result.successful?
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Hash of piece #{metaData.data} is correct"
sendHaves(torrentData, metaData.data)
sendUninterested(torrentData)
else
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Hash of piece #{metaData.data} is incorrect. Marking piece as not complete."
torrentData.blockState.setPieceCompleted metaData.data, false
end
elsif metaData.type == :check_existing
handleCheckExistingResult(torrentData, result)
end
end
end
# Handle the result of the PieceManager's checkExisting (check which pieces we already have) operation.
# If the resukt is successful, this begins the actual download.
def handleCheckExistingResult(torrentData, pieceManagerResult)
if pieceManagerResult.successful?
existingBitfield = pieceManagerResult.data
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We already have #{existingBitfield.countSet}/#{existingBitfield.length} pieces."
info = torrentData.info
torrentData.blockState = BlockState.new(info, existingBitfield)
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Starting torrent. Information:"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: piece length: #{info.pieceLen}"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: number of pieces: #{info.pieces.size}"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: total length #{info.dataLength}"
startDownload torrentData
else
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Checking existing pieces of torrent failed: #{pieceManagerResult.error}"
torrentData.state = :error
end
end
# Start checking which pieces we already have downloaded. This method schedules the necessary timers
# and changes the state to :checking_pieces. When the pieces are finished being checked the actual download will
# begin.
# Preconditions: The torrentData object already has it's info member set.
def startCheckingPieces(torrentData)
torrentData.pieceManager = QuartzTorrent::PieceManager.new(@baseDirectory, torrentData.info)
torrentData.state = :checking_pieces
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Checking pieces of torrent #{QuartzTorrent.bytesToHex(torrentData.infoHash)} asynchronously."
id = torrentData.pieceManager.findExistingPieces
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:check_existing, nil)
if ! torrentData.metainfoPieceState
torrentData.metainfoPieceState = MetainfoPieceState.new(@baseDirectory, torrentData.infoHash, nil, torrentData.info)
end
# Schedule checking for PieceManager results
torrentData.checkPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_piece_manager, torrentData.infoHash], true, false)
# Schedule checking for metainfo PieceManager results (including when piece reading completes)
if ! torrentData.checkMetadataPieceManagerTimer
torrentData.checkMetadataPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_metadata_piece_manager, torrentData.infoHash], true, false)
end
end
# Take a torrent that is in the :initializing state and make it go.
def initTorrent(torrentData)
# If we already have the metainfo info for this torrent, we can begin checking the pieces.
# If we don't have the metainfo info then we need to get the metainfo first.
if ! torrentData.info
torrentData.info = MetainfoPieceState.downloaded(@baseDirectory, torrentData.infoHash)
end
if torrentData.info
startCheckingPieces torrentData
else
# Request the metainfo from peers.
torrentData.state = :downloading_metainfo
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Downloading metainfo"
# Schedule peer connection management. Recurring and immediate
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], true, true)
# Schedule a timer for requesting metadata pieces from peers.
torrentData.metainfoRequestTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:request_metadata_pieces, infoHash], true, false)
# Schedule checking for metainfo PieceManager results (including when piece reading completes)
torrentData.checkMetadataPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_metadata_piece_manager, infoHash], true, false)
end
end
# Start the actual torrent download. This method schedules the necessary timers and registers the necessary listeners
# and changes the state to :running. It is meant to be called after checking for existing pieces or downloading the
# torrent metadata (if this is a magnet link torrent)
def startDownload(torrentData)
# Add a listener for when the tracker's peers change.
torrentData.peerChangeListener = Proc.new do
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Managing peers on peer change event"
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
torrentData.trackerClient.addPeersChangedListener torrentData.peerChangeListener
# Schedule peer connection management. Recurring and immediate
if ! torrentData.managePeersTimer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], true, true)
end
# Schedule requesting blocks from peers. Recurring and not immediate
torrentData.requestBlocksTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:request_blocks, torrentData.infoHash], true, false)
torrentData.state = :running
end
def handleExtendedHandshake(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Extended Handshake: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
metadataSize = msg.dict['metadata_size']
if metadataSize
# This peer knows the size of the metadata. If we haven't created our MetainfoPieceState yet, create it now.
if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Extended Handshake: Learned that metadata size is #{metadataSize}. Creating MetainfoPieceState"
torrentData.metainfoPieceState = MetainfoPieceState.new(@baseDirectory, torrentData.infoHash, metadataSize)
end
end
end
def handleExtendedMetainfo(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Extended Handshake: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.msgType == :request
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo request for piece #{msg.piece}"
# Build a response for this piece.
if torrentData.metainfoPieceState.pieceCompleted? msg.piece
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Requesting extended metainfo piece #{msg.piece} from metainfoPieceState."
id = torrentData.metainfoPieceState.readPiece msg.piece
torrentData.pieceManagerMetainfoRequestMetadata[id] =
PieceManagerRequestMetadata.new(:read, ReadRequestMetadata.new(peer,msg))
else
reject = ExtendedMetaInfo.new
reject.msgType = :reject
reject.piece = msg.piece
withPeersIo(peer, "sending extended metainfo reject message") do |io|
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending metainfo reject to #{peer}: piece #{msg.piece}"
sendMessageToPeer reject, io, peer
end
end
elsif msg.msgType == :piece
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo piece response for piece #{msg.piece} with data length #{msg.data.length}"
if ! torrentData.metainfoPieceState.pieceCompleted? msg.piece
id = torrentData.metainfoPieceState.savePiece msg.piece, msg.data
torrentData.pieceManagerMetainfoRequestMetadata[id] =
PieceManagerRequestMetadata.new(:write, msg)
end
elsif msg.msgType == :reject
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo reject response for piece #{msg.piece}"
# Mark this peer as bad.
torrentData.metainfoPieceState.markPeerBad peer
torrentData.metainfoPieceState.setPieceRequested(msg.piece, false)
end
end
# Find the io associated with the peer and yield it to the passed block.
# If no io is found an error is logged.
#
def withPeersIo(peer, what = nil)
io = findIoByMetainfo(peer)
if io
yield io
else
s = ""
s = "when #{what}" if what
@logger.warn "Couldn't find the io for peer #{peer} #{what}"
end
end
def sendBitfield(io, bitfield)
if ! bitfield.allClear?
@logger.debug "Sending bitfield of size #{bitfield.length}."
msg = BitfieldMessage.new
msg.bitfield = bitfield
msg.serializeTo io
end
end
def sendHaves(torrentData, pieceIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Have messages to all connected peers for piece #{pieceIndex}"
torrentData.peers.all.each do |peer|
next if peer.state != :established || peer.isUs
withPeersIo(peer, "when sending Have message") do |io|
msg = Have.new
msg.pieceIndex = pieceIndex
sendMessageToPeer msg, io, peer
end
end
end
def sendUninterested(torrentData)
# If we are no longer interested in peers once this piece has been completed, let them know
return if ! torrentData.blockState
needed = torrentData.blockState.completePieceBitfield.compliment
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
classifiedPeers.establishedPeers.each do |peer|
# Don't bother sending uninterested message if we are already uninterested.
next if ! peer.amInterested || peer.isUs
needFromPeer = needed.intersection(peer.bitfield)
if needFromPeer.allClear?
withPeersIo(peer, "when sending Uninterested message") do |io|
msg = Uninterested.new
sendMessageToPeer msg, io, peer
peer.amInterested = false
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Uninterested message to peer #{peer}"
end
end
end
end
def sendMessageToPeer(msg, io, peer)
peer.updateDownloadRate(msg)
torrentData = @torrentData[peer.infoHash]
torrentData.bytesUploaded += msg.length if torrentData
begin
peer.peerMsgSerializer.serializeTo(msg, io)
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending message to peer #{peer} failed: #{$!.message}"
end
end
# Update our internal peer list for this torrent from the tracker client
def getPeersFromTracker(torrentData, infoHash)
addPeer = Proc.new do |trackerPeer|
peer = Peer.new(trackerPeer)
peer.infoHash = infoHash
torrentData.peers.add peer
true
end
classifiedPeers = nil
replaceDisconnectedPeer = Proc.new do |trackerPeer|
classifiedPeers = ClassifiedPeers.new(torrentData.peers.all) if ! classifiedPeers
if classifiedPeers.disconnectedPeers.size > 0
torrentData.peers.delete classifiedPeers.disconnectedPeers.pop
addPeer.call trackerPeer
true
else
false
end
end
trackerclient = torrentData.trackerClient
addProc = addPeer
flipped = false
trackerclient.peers.each do |p|
if ! flipped && torrentData.peers.size >= @maxPeerCount
addProc = replaceDisconnectedPeer
flipped = true
end
# Don't treat ourself as a peer.
next if p.id && p.id == trackerclient.peerId
if ! torrentData.peers.findByAddr(p.ip, p.port)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list"
break if ! addProc.call(p)
end
end
end
# Remove a torrent that we are downloading.
def handleRemoveTorrent(infoHash, deleteFiles)
torrentData = @torrentData.delete infoHash
if ! torrentData
@logger.warn "Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer" if ! torrentData.metainfoRequestTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer" if ! torrentData.managePeersTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer" if ! torrentData.checkMetadataPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer" if ! torrentData.checkPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer" if ! torrentData.requestBlocksTimer
# Stop all timers
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
cancelTimer torrentData.managePeersTimer if torrentData.managePeersTimer
cancelTimer torrentData.checkMetadataPieceManagerTimer if torrentData.checkMetadataPieceManagerTimer
cancelTimer torrentData.checkPieceManagerTimer if torrentData.checkPieceManagerTimer
cancelTimer torrentData.requestBlocksTimer if torrentData.requestBlocksTimer
torrentData.trackerClient.removePeersChangedListener(torrentData.peerChangeListener)
# Remove all the peers for this torrent.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
@logger.debug "Closing connection to peer #{peer}"
end
end
torrentData.peers.delete peer
end
# Stop tracker client
torrentData.trackerClient.stop if torrentData.trackerClient
# Stop PieceManagers
torrentData.pieceManager.stop if torrentData.pieceManager
torrentData.metainfoPieceState.stop if torrentData.metainfoPieceState
# Remove metainfo file if it exists
begin
torrentData.metainfoPieceState.remove if torrentData.metainfoPieceState
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
if deleteFiles
if torrentData.info
begin
path = @baseDirectory + File::SEPARATOR + torrentData.info.name
if File.exists? path
FileUtils.rm_r path
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}"
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist"
end
end
end
dequeue
end
# Pause or unpause a torrent that we are downloading.
def handlePause(infoHash, value)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
return if torrentData.paused == value
torrentData.paused = value
if !value
# On unpause, queue the torrent since there might not be room for it to run.
# Make sure it goes to the head of the queue.
queue(torrentData, :unshift)
end
setFrozen infoHash, value if ! torrentData.queued
dequeue
end
# Queue a torrent
def queue(torrentData, mode = :queue)
return if torrentData.queued
# Queue the torrent
if mode == :unshift
@torrentQueue.unshift torrentData
else
@torrentQueue.push torrentData
end
setFrozen torrentData, true if ! torrentData.paused
end
# Dequeue any torrents that can now run based on available space
def dequeue
torrents = @torrentQueue.dequeue(@torrentData.values)
torrents.each do |torrentData|
if torrentData.state == :initializing
initTorrent torrentData
else
setFrozen torrentData, false if ! torrentData.paused
end
end
end
# Freeze or unfreeze a torrent. If value is true, then we disconnect from all peers for this torrent and forget
# the peers. If value is false, we start reconnecting to peers.
# Parameter torrent can be an infoHash or TorrentData
def setFrozen(torrent, value)
torrentData = torrent
if ! torrent.is_a?(TorrentData)
torrentData = @torrentData[torrent]
if ! torrentData
@logger.warn "Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}"
return
end
end
if value
# Disconnect from all peers so we won't reply to any messages.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
end
end
torrentData.peers.delete peer
end
else
# Get our list of peers and start connecting right away
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
end
end
# Represents a client that talks to bittorrent peers. This is the main class used to download and upload
# bittorrents.
class PeerClient
# Create a new PeerClient that will save and load torrent data under the specified baseDirectory.
def initialize(baseDirectory, maxIncomplete = 5, maxActive = 10)
@port = 9998
@handler = nil
@stopped = true
@reactor = nil
@logger = LogManager.getLogger("peerclient")
@worker = nil
@handler = PeerClientHandler.new baseDirectory, maxIncomplete, maxActive
@reactor = QuartzTorrent::Reactor.new(@handler, LogManager.getLogger("peerclient.reactor"))
@toStart = []
end
# Set the port used by the torrent peer client. This only has an effect if start has not yet been called.
attr_accessor :port
# Start the PeerClient: open the listening port, and start a new thread to begin downloading/uploading pieces.
# If listening fails, an exception of class Errno::EADDRINUSE is thrown.
def start
return if ! @stopped
@reactor.listen("0.0.0.0",@port,:listener_socket)
@stopped = false
@worker = Thread.new do
QuartzTorrent.initThread("peerclient")
begin
@toStart.each{ |trackerclient| trackerclient.start }
@reactor.start
@logger.info "Reactor stopped."
@handler.torrentData.each do |k,v|
v.trackerClient.stop
end
rescue
@logger.error "Unexpected exception in worker thread: #{$!}"
@logger.error $!.backtrace.join("\n")
end
end
end
# Stop the PeerClient. This method may take some time to complete.
def stop
return if @stopped
@logger.info "Stop called. Stopping reactor"
@reactor.stop
if @worker
@logger.info "Worker wait timed out after 10 seconds. Shutting down anyway" if ! @worker.join(10)
end
@stopped = true
end
# Add a new torrent to manage described by a Metainfo object. This is generally the
# method to call if you have a .torrent file.
# Returns the infoHash of the newly added torrent.
def addTorrentByMetainfo(metainfo)
raise "addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}" if ! metainfo.is_a?(Metainfo)
trackerclient = TrackerClient.createFromMetainfo(metainfo, false)
addTorrent(trackerclient, metainfo.infoHash, metainfo.info)
end
# Add a new torrent to manage given an announceUrl and an infoHash.
# Returns the infoHash of the newly added torrent.
def addTorrentWithoutMetainfo(announceUrl, infoHash, magnet = nil)
raise "addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}" if magnet && ! magnet.is_a?(MagnetURI)
trackerclient = TrackerClient.create(announceUrl, infoHash, 0, false)
addTorrent(trackerclient, infoHash, nil, magnet)
end
# Add a new torrent to manage given a MagnetURI object. This is generally the
# method to call if you have a magnet link.
# Returns the infoHash of the newly added torrent.
def addTorrentByMagnetURI(magnet)
raise "addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}" if ! magnet.is_a?(MagnetURI)
trackerUrl = magnet.tracker
raise "addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL." if !trackerUrl
addTorrentWithoutMetainfo(trackerUrl, magnet.btInfoHash, magnet)
end
# Get a hash of new TorrentDataDelegate objects keyed by torrent infohash. This is the method to
# call to get information about the state of torrents being downloaded.
def torrentData(infoHash = nil)
# This will have to work by putting an event in the handler's queue, and blocking for a response.
# The handler will build a response and return it.
@handler.getDelegateTorrentData(infoHash)
end
# Pause or unpause the specified torrent.
def setPaused(infoHash, value)
@handler.setPaused(infoHash, value)
end
# Set the download rate limit in bytes/second.
def setDownloadRateLimit(infoHash, bytesPerSecond)
raise "download rate limit must be an Integer, not a #{bytesPerSecond.class}" if bytesPerSecond && ! bytesPerSecond.is_a?(Integer)
@handler.setDownloadRateLimit(infoHash, bytesPerSecond)
end
# Set the upload rate limit in bytes/second.
def setUploadRateLimit(infoHash, bytesPerSecond)
raise "upload rate limit must be an Integer, not a #{bytesPerSecond.class}" if bytesPerSecond && ! bytesPerSecond.is_a?(Integer)
@handler.setUploadRateLimit(infoHash, bytesPerSecond)
end
# Set the upload ratio. Pass nil to disable
def setUploadRatio(infoHash, ratio)
raise "upload ratio must be Numeric, not a #{ratio.class}" if ratio && ! ratio.is_a?(Numeric)
@handler.setUploadRatio(infoHash, ratio)
end
# Set the maximum amount of time (in seconds) that a torrent can be in the upload-only state before
# it is paused. Pass nil to disable.
def setUploadDuration(infoHash, seconds)
raise "upload ratio must be Numeric, not a #{seconds.class}" if seconds && ! seconds.is_a?(Numeric)
@handler.setUploadDuration(infoHash, seconds)
end
# Remove a currently running torrent
def removeTorrent(infoHash, deleteFiles = false)
@handler.removeTorrent(infoHash, deleteFiles)
end
private
# Helper method for adding a torrent.
def addTorrent(trackerclient, infoHash, info, magnet = nil)
trackerclient.port = @port
torrentData = @handler.addTrackerClient(infoHash, info, trackerclient)
torrentData.magnet = magnet
trackerclient.dynamicRequestParamsBuilder = Proc.new do
torrentData = @handler.torrentData[infoHash]
dataLength = (info ? info.dataLength : nil)
result = TrackerDynamicRequestParams.new(dataLength)
if torrentData && torrentData.blockState
result.left = torrentData.blockState.totalLength - torrentData.blockState.completedLength
result.downloaded = torrentData.bytesDownloadedDataOnly
result.uploaded = torrentData.bytesUploadedDataOnly
end
result
end
# If we haven't started yet then add this trackerclient to a queue of
# trackerclients to start once we are started. If we start too soon we
# will connect to the tracker, and it will try to connect back to us before we are listening.
if ! trackerclient.started?
if @stopped
@toStart.push trackerclient
else
trackerclient.start
end
end
torrentData.infoHash
end
end
end
Fixed bug when starting torrent
require "quartz_torrent/log.rb"
require "quartz_torrent/trackerclient.rb"
require "quartz_torrent/peermsg.rb"
require "quartz_torrent/reactor.rb"
require "quartz_torrent/util.rb"
require "quartz_torrent/classifiedpeers.rb"
require "quartz_torrent/peerholder.rb"
require "quartz_torrent/peermanager.rb"
require "quartz_torrent/blockstate.rb"
require "quartz_torrent/filemanager.rb"
require "quartz_torrent/semaphore.rb"
require "quartz_torrent/piecemanagerrequestmetadata.rb"
require "quartz_torrent/metainfopiecestate.rb"
require "quartz_torrent/extension.rb"
require "quartz_torrent/magnet.rb"
require "quartz_torrent/torrentqueue.rb"
module QuartzTorrent
# Extra metadata stored in a PieceManagerRequestMetadata specific to read requests.
class ReadRequestMetadata
def initialize(peer, requestMsg)
@peer = peer
@requestMsg = requestMsg
end
attr_accessor :peer
attr_accessor :requestMsg
end
# Class used by PeerClientHandler to keep track of information associated with a single torrent
# being downloaded/uploaded.
class TorrentData
def initialize(infoHash, info, trackerClient)
@infoHash = infoHash
@info = info
@trackerClient = trackerClient
@peerManager = PeerManager.new
@pieceManagerRequestMetadata = {}
@pieceManagerMetainfoRequestMetadata = {}
@bytesDownloadedDataOnly = 0
@bytesUploadedDataOnly = 0
@bytesDownloaded = 0
@bytesUploaded = 0
@magnet = nil
@peers = PeerHolder.new
@state = :initializing
@blockState = nil
@metainfoPieceState = nil
@metainfoRequestTimer = nil
@managePeersTimer = nil
@checkMetadataPieceManagerTimer = nil
@checkPieceManagerTimer = nil
@requestBlocksTimer = nil
@paused = false
@queued = false
@downRateLimit = nil
@upRateLimit = nil
@ratio = nil
@uploadDuration = nil
@downloadCompletedTime = nil
@isEndgame = false
end
# The torrents Metainfo.Info struct. This is nil if the torrent has no metadata and we need to download it
# (i.e. a magnet link)
attr_accessor :info
# The infoHash of the torrent
attr_accessor :infoHash
attr_accessor :trackerClient
attr_accessor :peers
# The MagnetURI object, if this torrent was created from a magnet link. Nil for torrents not created from magnets.
attr_accessor :magnet
attr_accessor :peerManager
attr_accessor :blockState
attr_accessor :pieceManager
# Metadata associated with outstanding requests to the PieceManager responsible for the pieces of the torrent data.
attr_accessor :pieceManagerRequestMetadata
# Metadata associated with outstanding requests to the PieceManager responsible for the pieces of the torrent metainfo.
attr_accessor :pieceManagerMetainfoRequestMetadata
attr_accessor :peerChangeListener
attr_accessor :bytesDownloadedDataOnly
attr_accessor :bytesUploadedDataOnly
attr_accessor :bytesDownloaded
attr_accessor :bytesUploaded
# State of the torrent. Is one of the following states:
# :initializing Datastructures have been created, but no work started.
# :checking_pieces Checking piece hashes on startup
# :downloading_metainfo Downloading the torrent metainfo
# :uploading The torrent is complete and we are only uploading
# :running The torrent is incomplete and we are downloading and uploading
# :error There was an unrecoverable error with the torrent.
attr_accessor :state
attr_accessor :isEndgame
attr_accessor :metainfoPieceState
# The timer handle for the timer that requests metainfo pieces. This is used to cancel the
# timer when the metadata is completely downloaded.
attr_accessor :metainfoRequestTimer
# Timer handle for timer that manages peers.
attr_accessor :managePeersTimer
# Timer handle for timer that checks metadata piece manager results
attr_accessor :checkMetadataPieceManagerTimer
# Timer handle for timer that checks piece manager results
attr_accessor :checkPieceManagerTimer
# Timer handle for timer that requests blocks
attr_accessor :requestBlocksTimer
# Is the torrent paused
attr_accessor :paused
# Is the torrent queued
attr_accessor :queued
# The RateLimit for downloading this torrent.
attr_accessor :downRateLimit
# The RateLimit for uploading to peers for this torrent.
attr_accessor :upRateLimit
# After we have completed downloading a torrent, we will continue to upload until we have
# uploaded ratio * torrent_size bytes. If nil, no limit on upload.
attr_accessor :ratio
# Maximum amount of time in seconds that the torrent can be in the uploading state before it's paused.
attr_accessor :uploadDuration
# Time at which we completely downloaded all bytes of the torrent.
attr_accessor :downloadCompletedTime
end
# Data about torrents for use by the end user.
class TorrentDataDelegate
# Create a new TorrentDataDelegate. This is meant to only be called internally.
def initialize(torrentData, peerClientHandler)
fillFrom(torrentData)
@torrentData = torrentData
@peerClientHandler = peerClientHandler
end
# Torrent Metainfo.info struct. This is nil if the torrent has no metadata and we haven't downloaded it yet
# (i.e. a magnet link).
attr_accessor :info
# Infohash of the torrent. This is binary data.
attr_accessor :infoHash
# Recommended display name for this torrent.
attr_accessor :recommendedName
# Download rate in bytes/second
attr_reader :downloadRate
# Upload rate in bytes/second
attr_reader :uploadRate
# Download rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :downloadRateLimit
# Upload rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :uploadRateLimit
# Download rate limit in bytes/second if a limit is set, nil otherwise
attr_reader :downloadRateDataOnly
attr_reader :uploadRateDataOnly
# Count of completed bytes of the torrent
attr_reader :completedBytes
# Array of peers for the torrent. These include connected, disconnected, and handshaking peers
attr_reader :peers
# State of the torrent. This may be one of :downloading_metainfo, :error, :checking_pieces, :running, :downloading_metainfo, or :deleted.
# The :deleted state indicates that the torrent that this TorrentDataDelegate refers to is no longer being managed by the peer client.
attr_reader :state
# Bitfield representing which pieces of the torrent are completed.
attr_reader :completePieceBitfield
# Length of metainfo info in bytes. This is only set when the state is :downloading_metainfo
attr_reader :metainfoLength
# How much of the metainfo info we have downloaded in bytes. This is only set when the state is :downloading_metainfo
attr_reader :metainfoCompletedLength
# Whether or not the torrent is paused.
attr_reader :paused
# Whether or not the torrent is queued.
attr_reader :queued
# After we have completed downloading a torrent, we will continue to upload until we have
# uploaded ratio * torrent_size bytes. If nil, no limit on upload.
attr_accessor :ratio
attr_accessor :uploadDuration
attr_accessor :bytesUploadedDataOnly
attr_accessor :bytesDownloadedDataOnly
attr_accessor :bytesUploaded
attr_accessor :bytesDownloaded
# Update the data in this TorrentDataDelegate from the torrentData
# object that it was created from. TODO: What if that torrentData is now gone?
def refresh
@peerClientHandler.updateDelegateTorrentData self
end
# Set the fields of this TorrentDataDelegate from the passed torrentData.
# This is meant to only be called internally.
def internalRefresh
fillFrom(@torrentData)
end
private
def fillFrom(torrentData)
@infoHash = torrentData.infoHash
@info = torrentData.info
@bytesUploadedDataOnly = torrentData.bytesUploadedDataOnly
@bytesDownloadedDataOnly = torrentData.bytesDownloadedDataOnly
@bytesUploaded = torrentData.bytesUploaded
@bytesDownloaded = torrentData.bytesDownloaded
if torrentData.state == :checking_pieces
# When checking pieces there is only one request pending with the piece manager.
checkExistingRequestId = torrentData.pieceManagerRequestMetadata.keys.first
progress = torrentData.pieceManager.progress checkExistingRequestId
@completedBytes = progress ? progress * torrentData.info.dataLength / 100 : 0
else
@completedBytes = torrentData.blockState.nil? ? 0 : torrentData.blockState.completedLength
end
# This should really be a copy:
@completePieceBitfield = torrentData.blockState.nil? ? nil : torrentData.blockState.completePieceBitfield
buildPeersList(torrentData)
@downloadRate = @peers.reduce(0){ |memo, peer| memo + peer.uploadRate }
@uploadRate = @peers.reduce(0){ |memo, peer| memo + peer.downloadRate }
@downloadRateDataOnly = @peers.reduce(0){ |memo, peer| memo + peer.uploadRateDataOnly }
@uploadRateDataOnly = @peers.reduce(0){ |memo, peer| memo + peer.downloadRateDataOnly }
@state = torrentData.state
@metainfoLength = nil
@paused = torrentData.paused
@queued = torrentData.queued
@metainfoCompletedLength = nil
if torrentData.metainfoPieceState && torrentData.state == :downloading_metainfo
@metainfoLength = torrentData.metainfoPieceState.metainfoLength
@metainfoCompletedLength = torrentData.metainfoPieceState.metainfoCompletedLength
end
if torrentData.info
@recommendedName = torrentData.info.name
else
if torrentData.magnet
@recommendedName = torrentData.magnet.displayName
else
@recommendedName = nil
end
end
@downloadRateLimit = torrentData.downRateLimit.unitsPerSecond if torrentData.downRateLimit
@uploadRateLimit = torrentData.upRateLimit.unitsPerSecond if torrentData.upRateLimit
@ratio = torrentData.ratio
@uploadDuration = torrentData.uploadDuration
end
def buildPeersList(torrentData)
@peers = []
torrentData.peers.all.each do |peer|
@peers.push peer.clone
end
end
end
# This class implements a Reactor Handler object. This Handler implements the PeerClient.
class PeerClientHandler < QuartzTorrent::Handler
def initialize(baseDirectory, maxIncomplete = 5, maxActive = 10)
# Hash of TorrentData objects, keyed by torrent infoHash
@torrentData = {}
@torrentQueue = TorrentQueue.new(maxIncomplete, maxActive)
@baseDirectory = baseDirectory
@logger = LogManager.getLogger("peerclient")
# Overall maximum number of peers (connected + disconnected)
@maxPeerCount = 120
# Number of peers we ideally want to try and be downloading/uploading with
@targetActivePeerCount = 50
@targetUnchokedPeerCount = 4
@managePeersPeriod = 10 # Defined in bittorrent spec. Only unchoke peers every 10 seconds.
@requestBlocksPeriod = 1
@handshakeTimeout = 1
@requestTimeout = 60
@endgameBlockThreshold = 20
end
################################################ PUBLIC API METHODS ################################################
attr_reader :torrentData
# Add a new tracker client. This effectively adds a new torrent to download. Returns the TorrentData object for the
# new torrent.
def addTrackerClient(infoHash, info, trackerclient)
raise "There is already a tracker registered for torrent #{QuartzTorrent.bytesToHex(infoHash)}" if @torrentData.has_key? infoHash
torrentData = TorrentData.new(infoHash, info, trackerclient)
@torrentData[infoHash] = torrentData
torrentData.info = info
torrentData.state = :initializing
queue(torrentData)
dequeue
torrentData
end
# Remove a torrent.
def removeTorrent(infoHash, deleteFiles = false)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to remove torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@reactor.scheduleTimer(0, [:removetorrent, infoHash, deleteFiles], false, true)
end
# Pause or unpause the specified torrent.
def setPaused(infoHash, value)
# Can't do this right now, since it could be in use by an event handler. Use an immediate, non-recurring timer instead.
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Scheduling immediate timer to #{value ? "pause" : "unpause"} torrent."
@reactor.scheduleTimer(0, [:pausetorrent, infoHash, value], false, true)
end
# Set the download rate limit. Pass nil as the bytesPerSecond to disable the limit.
def setDownloadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set download rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.downRateLimit
torrentData.downRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.downRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.downRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting download rate limit") do |io|
io.readRateLimit = torrentData.downRateLimit
end
end
end
# Set the upload rate limit. Pass nil as the bytesPerSecond to disable the limit.
def setUploadRateLimit(infoHash, bytesPerSecond)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload rate limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
if bytesPerSecond
if ! torrentData.upRateLimit
torrentData.upRateLimit = RateLimit.new(bytesPerSecond, 2*bytesPerSecond, 0)
else
torrentData.upRateLimit.unitsPerSecond = bytesPerSecond
end
else
torrentData.upRateLimit = nil
end
torrentData.peers.all.each do |peer|
withPeersIo(peer, "setting upload rate limit") do |io|
io.writeRateLimit = torrentData.upRateLimit
end
end
end
# Set the upload ratio. Pass nil to disable
def setUploadRatio(infoHash, ratio)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload ratio limit for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
torrentData.ratio = ratio
end
# Set the maximum amount of time (in seconds) that a torrent can be in the upload-only state before
# it is paused. Pass nil to disable.
def setUploadDuration(infoHash, seconds)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to set upload duration for a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
torrentData.uploadDuration = seconds
end
# Get a hash of new TorrentDataDelegate objects keyed by torrent infohash.
# This method is meant to be called from a different thread than the one
# the reactor is running in. This method is not immediate but blocks until the
# data is prepared.
# If infoHash is passed, only that torrent data is returned (still in a hashtable; just one entry)
def getDelegateTorrentData(infoHash = nil)
# Use an immediate, non-recurring timer.
result = {}
return result if stopped?
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:get_torrent_data, result, semaphore, infoHash], false, true)
semaphore.wait
result
end
# Update the data stored in a TorrentDataDelegate to the latest information.
def updateDelegateTorrentData(delegate)
return if stopped?
# Use an immediate, non-recurring timer.
semaphore = Semaphore.new
@reactor.scheduleTimer(0, [:update_torrent_data, delegate, semaphore], false, true)
semaphore.wait
result
end
################################################ REACTOR METHODS ################################################
# Reactor method called when a peer has connected to us.
def serverInit(metadata, addr, port)
# A peer connected to us
# Read handshake message
@logger.warn "Peer connection from #{addr}:#{port}"
begin
msg = PeerHandshake.unserializeExceptPeerIdFrom currentIo
rescue
@logger.warn "Peer failed handshake: #{$!}"
close
return
end
torrentData = torrentDataForHandshake(msg, "#{addr}:#{port}")
# Are we tracking this torrent?
if !torrentData
@logger.warn "Peer sent handshake for unknown torrent"
close
return
end
trackerclient = torrentData.trackerClient
# If we already have too many connections, don't allow this connection.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if classifiedPeers.establishedPeers.length > @targetActivePeerCount
@logger.warn "Closing connection to peer from #{addr}:#{port} because we already have #{classifiedPeers.establishedPeers.length} active peers which is > the target count of #{@targetActivePeerCount} "
close
return
end
# Send handshake
outgoing = PeerHandshake.new
outgoing.peerId = trackerclient.peerId
outgoing.infoHash = torrentData.infoHash
outgoing.serializeTo currentIo
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
# Read incoming handshake's peerid
msg.peerId = currentIo.read(PeerHandshake::PeerIdLen)
if msg.peerId == trackerclient.peerId
@logger.info "We got a connection from ourself. Closing connection."
close
return
end
peer = nil
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state != :disconnected
@logger.warn "Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
close
return
else
if existingPeer.trackerPeer.ip == addr && existingPeer.trackerPeer.port == port
peer = existingPeer
end
end
end
end
if ! peer
peer = Peer.new(TrackerPeer.new(addr, port))
updatePeerWithHandshakeInfo(torrentData, msg, peer)
torrentData.peers.add peer
if ! peers
@logger.warn "Unknown peer with id #{msg.peerId} connected."
else
@logger.warn "Known peer with id #{msg.peerId} connected from new location."
end
else
@logger.warn "Known peer with id #{msg.peerId} connected from known location."
end
@logger.info "Peer #{peer} connected to us. "
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setMetaInfo(peer)
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end
# Reactor method called when we have connected to a peer.
def clientInit(peer)
# We connected to a peer
# Send handshake
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.warn "No tracker client found for peer #{peer}. Closing connection."
close
return
end
trackerclient = torrentData.trackerClient
@logger.info "Connected to peer #{peer}. Sending handshake."
msg = PeerHandshake.new
msg.peerId = trackerclient.peerId
msg.infoHash = peer.infoHash
msg.serializeTo currentIo
peer.state = :handshaking
@reactor.scheduleTimer(@handshakeTimeout, [:handshake_timeout, peer], false)
@logger.debug "Done sending handshake."
# Send bitfield
sendBitfield(currentIo, torrentData.blockState.completePieceBitfield) if torrentData.blockState
setReadRateLimit(torrentData.downRateLimit) if torrentData.downRateLimit
setWriteRateLimit(torrentData.upRateLimit) if torrentData.upRateLimit
end
# Reactor method called when there is data ready to be read from a socket
def recvData(peer)
msg = nil
@logger.debug "Got data from peer #{peer}"
if peer.state == :handshaking
# Read handshake message
begin
@logger.debug "Reading handshake from #{peer}"
msg = PeerHandshake.unserializeFrom currentIo
rescue
@logger.warn "Peer #{peer} failed handshake: #{$!}"
setPeerDisconnected(peer)
close
return
end
else
begin
@logger.debug "Reading wire-message from #{peer}"
msg = peer.peerMsgSerializer.unserializeFrom currentIo
#msg = PeerWireMessage.unserializeFrom currentIo
rescue EOFError
@logger.info "Peer #{peer} disconnected."
setPeerDisconnected(peer)
close
return
rescue
@logger.warn "Unserializing message from peer #{peer} failed: #{$!}"
@logger.warn $!.backtrace.join "\n"
setPeerDisconnected(peer)
close
return
end
peer.updateUploadRate msg
torrentData = @torrentData[peer.infoHash]
torrentData.bytesDownloaded += msg.length if torrentData
@logger.debug "Peer #{peer} upload rate: #{peer.uploadRate.value} data only: #{peer.uploadRateDataOnly.value}"
end
if msg.is_a? PeerHandshake
# This is a remote peer that we connected to returning our handshake.
processHandshake(msg, peer)
peer.state = :established
peer.amChoked = true
peer.peerChoked = true
peer.amInterested = false
peer.peerInterested = false
elsif msg.is_a? BitfieldMessage
@logger.debug "Received bitfield message from peer."
handleBitfield(msg, peer)
elsif msg.is_a? Unchoke
@logger.debug "Received unchoke message from peer."
peer.amChoked = false
elsif msg.is_a? Choke
@logger.debug "Received choke message from peer."
peer.amChoked = true
elsif msg.is_a? Interested
@logger.debug "Received interested message from peer."
peer.peerInterested = true
elsif msg.is_a? Uninterested
@logger.debug "Received uninterested message from peer."
peer.peerInterested = false
elsif msg.is_a? Piece
@logger.debug "Received piece message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}."
handlePieceReceive(msg, peer)
elsif msg.is_a? Request
@logger.debug "Received request message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.blockLength}."
handleRequest(msg, peer)
elsif msg.is_a? Have
@logger.debug "Received have message from peer for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)}: piece #{msg.pieceIndex}"
handleHave(msg, peer)
elsif msg.is_a? KeepAlive
@logger.debug "Received keep alive message from peer."
elsif msg.is_a? ExtendedHandshake
@logger.debug "Received extended handshake message from peer."
handleExtendedHandshake(msg, peer)
elsif msg.is_a? ExtendedMetaInfo
@logger.debug "Received extended metainfo message from peer."
handleExtendedMetainfo(msg, peer)
else
@logger.warn "Received a #{msg.class} message but handler is not implemented"
end
end
# Reactor method called when a scheduled timer expires.
def timerExpired(metadata)
if metadata.is_a?(Array) && metadata[0] == :manage_peers
managePeers(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :request_blocks
requestBlocks(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_piece_manager
checkPieceManagerResults(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :handshake_timeout
handleHandshakeTimeout(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :removetorrent
handleRemoveTorrent(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :pausetorrent
handlePause(metadata[1], metadata[2])
elsif metadata.is_a?(Array) && metadata[0] == :get_torrent_data
@torrentData.each do |k,v|
begin
if metadata[3].nil? || k == metadata[3]
v = TorrentDataDelegate.new(v, self)
metadata[1][k] = v
end
rescue
@logger.error "Error building torrent data response for user: #{$!}"
@logger.error "#{$!.backtrace.join("\n")}"
end
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :update_torrent_data
delegate = metadata[1]
if ! @torrentData.has_key?(infoHash)
delegate.state = :deleted
else
delegate.internalRefresh
end
metadata[2].signal
elsif metadata.is_a?(Array) && metadata[0] == :request_metadata_pieces
requestMetadataPieces(metadata[1])
elsif metadata.is_a?(Array) && metadata[0] == :check_metadata_piece_manager
checkMetadataPieceManagerResults(metadata[1])
else
@logger.info "Unknown timer #{metadata} expired."
end
end
# Reactor method called when an IO error occurs.
def error(peer, details)
# If a peer closes the connection during handshake before we determine their id, we don't have a completed
# Peer object yet. In this case the peer parameter is the symbol :listener_socket
if peer == :listener_socket
@logger.info "Error with handshaking peer: #{details}. Closing connection."
else
@logger.info "Error with peer #{peer}: #{details}. Closing connection."
setPeerDisconnected(peer)
end
# Close connection
close
end
################################################ PRIVATE METHODS ################################################
private
def setPeerDisconnected(peer)
peer.state = :disconnected
peer.uploadRate.reset
peer.downloadRate.reset
peer.uploadRateDataOnly.reset
peer.downloadRateDataOnly.reset
torrentData = @torrentData[peer.infoHash]
# Are we tracking this torrent?
if torrentData && torrentData.blockState
# For any outstanding requests, mark that we no longer have requested them
peer.requestedBlocks.each do |blockIndex, b|
blockInfo = torrentData.blockState.createBlockinfoByBlockIndex(blockIndex)
torrentData.blockState.setBlockRequested blockInfo, false
end
peer.requestedBlocks.clear
end
end
def processHandshake(msg, peer)
torrentData = torrentDataForHandshake(msg, peer)
# Are we tracking this torrent?
return false if !torrentData
if msg.peerId == torrentData.trackerClient.peerId
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We connected to ourself. Closing connection."
peer.isUs = true
close
return
end
peers = torrentData.peers.findById(msg.peerId)
if peers
peers.each do |existingPeer|
if existingPeer.state == :connected
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer with id #{msg.peerId} created a new connection when we already have a connection in state #{existingPeer.state}. Closing new connection."
torrentData.peers.delete existingPeer
setPeerDisconnected(peer)
close
return
end
end
end
trackerclient = torrentData.trackerClient
updatePeerWithHandshakeInfo(torrentData, msg, peer)
if torrentData.info
peer.bitfield = Bitfield.new(torrentData.info.pieces.length)
else
peer.bitfield = EmptyBitfield.new
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We have no metainfo yet, so setting peer #{peer} to have an EmptyBitfield"
end
# Send extended handshake if the peer supports extensions
if (msg.reserved.unpack("C8")[5] & 0x10) != 0
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer supports extensions. Sending extended handshake"
extended = Extension.createExtendedHandshake torrentData.info
extended.serializeTo currentIo
end
true
end
def torrentDataForHandshake(msg, peer)
torrentData = @torrentData[msg.infoHash]
# Are we tracking this torrent?
if !torrentData
if peer.is_a?(Peer)
@logger.info "Peer #{peer} failed handshake: we are not managing torrent #{QuartzTorrent.bytesToHex(msg.infoHash)}"
setPeerDisconnected(peer)
else
@logger.info "Incoming peer #{peer} failed handshake: we are not managing torrent #{QuartzTorrent.bytesToHex(msg.infoHash)}"
end
close
return nil
end
torrentData
end
def updatePeerWithHandshakeInfo(torrentData, msg, peer)
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: peer #{peer} sent valid handshake for torrent #{QuartzTorrent.bytesToHex(torrentData.infoHash)}"
peer.infoHash = msg.infoHash
# If this was a peer we got from a tracker that had no id then we only learn the id on handshake.
peer.trackerPeer.id = msg.peerId
torrentData.peers.idSet peer
end
def handleHandshakeTimeout(peer)
if peer.state == :handshaking
@logger.warn "Peer #{peer} failed handshake: handshake timed out after #{@handshakeTimeout} seconds."
withPeersIo(peer, "handling handshake timeout") do |io|
setPeerDisconnected(peer)
close(io)
end
end
end
def managePeers(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Manage peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
trackerclient = torrentData.trackerClient
# Update our internal peer list for this torrent from the tracker client
getPeersFromTracker(torrentData, infoHash)
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
manager = torrentData.peerManager
if ! manager
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Manage peers: peer manager client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
toConnect = manager.manageConnections(classifiedPeers)
toConnect.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Connecting to peer #{peer}"
connect peer.trackerPeer.ip, peer.trackerPeer.port, peer
end
manageResult = manager.managePeers(classifiedPeers)
manageResult.unchoke.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Unchoking peer #{peer}"
withPeersIo(peer, "unchoking peer") do |io|
msg = Unchoke.new
sendMessageToPeer msg, io, peer
peer.peerChoked = false
end
end
manageResult.choke.each do |peer|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Choking peer #{peer}"
withPeersIo(peer, "choking peer") do |io|
msg = Choke.new
sendMessageToPeer msg, io, peer
peer.peerChoked = true
end
end
end
def requestBlocks(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request blocks peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
if ! torrentData.blockState
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Request blocks peers: no blockstate yet."
return
end
if torrentData.state == :uploading && !torrentData.paused
if torrentData.ratio
if torrentData.bytesUploadedDataOnly >= torrentData.ratio*torrentData.blockState.totalLength
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Pausing torrent due to upload ratio limit." if torrentData.metainfoPieceState.complete?
setPaused(infoHash, true)
return
end
end
if torrentData.uploadDuration && torrentData.downloadCompletedTime
if Time.new > torrentData.downloadCompletedTime + torrentData.uploadDuration
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Pausing torrent due to upload duration being reached." if torrentData.metainfoPieceState.complete?
setPaused(infoHash, true)
return
end
end
end
# Should we switch to endgame mode?
if torrentData.state == :running && !torrentData.isEndgame
blocks = torrentData.blockState.completeBlockBitfield
set = blocks.countSet
if set >= blocks.length - @endgameBlockThreshold && set < blocks.length
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Entering endgame mode: blocks #{set}/#{blocks.length} complete."
torrentData.isEndgame = true
end
elsif torrentData.isEndgame && torrentData.state != :running
torrentData.isEndgame = false
end
# Delete any timed-out requests.
classifiedPeers.establishedPeers.each do |peer|
toDelete = []
peer.requestedBlocks.each do |blockIndex, requestTime|
toDelete.push blockIndex if (Time.new - requestTime) > @requestTimeout
end
toDelete.each do |blockIndex|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Block #{blockIndex} request timed out."
blockInfo = torrentData.blockState.createBlockinfoByBlockIndex(blockIndex)
torrentData.blockState.setBlockRequested blockInfo, false
peer.requestedBlocks.delete blockIndex
end
end
# Update the allowed pending requests based on how well the peer did since last time.
classifiedPeers.establishedPeers.each do |peer|
if peer.requestedBlocksSizeLastPass
if peer.requestedBlocksSizeLastPass == peer.maxRequestedBlocks
downloaded = peer.requestedBlocksSizeLastPass - peer.requestedBlocks.size
if downloaded > peer.maxRequestedBlocks*8/10
peer.maxRequestedBlocks = peer.maxRequestedBlocks * 12 / 10
elsif downloaded == 0
peer.maxRequestedBlocks = peer.maxRequestedBlocks * 8 / 10
end
peer.maxRequestedBlocks = 10 if peer.maxRequestedBlocks < 10
end
end
end
# Request blocks
blockInfos = torrentData.blockState.findRequestableBlocks(classifiedPeers, 100)
blockInfos.each do |blockInfo|
peersToRequest = []
if torrentData.isEndgame
# Since we are in endgame mode, request blocks from all elegible peers
elegiblePeers = blockInfo.peers.find_all{ |p| p.requestedBlocks.length < p.maxRequestedBlocks }
peersToRequest.concat elegiblePeers
else
# Pick one of the peers that has the piece to download it from. Pick one of the
# peers with the top 3 upload rates.
elegiblePeers = blockInfo.peers.find_all{ |p| p.requestedBlocks.length < p.maxRequestedBlocks }.sort{ |a,b| b.uploadRate.value <=> a.uploadRate.value}
random = elegiblePeers[rand(blockInfo.peers.size)]
peer = elegiblePeers.first(3).push(random).shuffle.first
next if ! peer
peersToRequest.push peer
end
peersToRequest.each do |peer|
withPeersIo(peer, "requesting block") do |io|
if ! peer.amInterested
# Let this peer know that I'm interested if I haven't yet.
msg = Interested.new
sendMessageToPeer msg, io, peer
peer.amInterested = true
end
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting block from #{peer}: piece #{blockInfo.pieceIndex} offset #{blockInfo.offset} length #{blockInfo.length}"
msg = blockInfo.getRequest
sendMessageToPeer msg, io, peer
torrentData.blockState.setBlockRequested blockInfo, true
peer.requestedBlocks[blockInfo.blockIndex] = Time.new
end
end
end
if blockInfos.size == 0
if torrentData.state != :uploading && torrentData.blockState.completePieceBitfield.allSet?
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Download complete."
torrentData.state = :uploading
torrentData.downloadCompletedTime = Time.new
dequeue
end
end
classifiedPeers.establishedPeers.each { |peer| peer.requestedBlocksSizeLastPass = peer.requestedBlocks.length }
end
# For a torrent where we don't have the metainfo, request metainfo pieces from peers.
def requestMetadataPieces(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request metadata pices: torrent data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
return if torrentData.paused || torrentData.queued
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo." if torrentData.metainfoPieceState.complete?
pieces = torrentData.metainfoPieceState.findRequestablePieces
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
peers = torrentData.metainfoPieceState.findRequestablePeers(classifiedPeers)
if peers.size > 0
# For now, just request all pieces from the first peer.
pieces.each do |pieceIndex|
msg = ExtendedMetaInfo.new
msg.msgType = :request
msg.piece = pieceIndex
withPeersIo(peers.first, "requesting metadata piece") do |io|
sendMessageToPeer msg, io, peers.first
torrentData.metainfoPieceState.setPieceRequested(pieceIndex, true)
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Requesting metainfo piece from #{peers.first}: piece #{pieceIndex}"
end
end
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: No peers found that have metadata."
end
end
def checkMetadataPieceManagerResults(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Check metadata piece manager results: data for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
# We may not have completed the extended handshake with the peer which specifies the torrent size.
# In this case torrentData.metainfoPieceState is not yet set.
return if ! torrentData.metainfoPieceState
results = torrentData.metainfoPieceState.checkResults
results.each do |result|
metaData = torrentData.pieceManagerMetainfoRequestMetadata.delete(result.requestId)
if ! metaData
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :read && result.successful?
# Send the piece to the peer.
msg = ExtendedMetaInfo.new
msg.msgType = :piece
msg.piece = metaData.data.requestMsg.piece
msg.data = result.data
withPeersIo(metaData.data.peer, "sending extended metainfo piece message") do |io|
@logger.debug "#{QuartzTorrent.bytesToHex(infoHash)}: Sending metainfo piece to #{metaData.data.peer}: piece #{msg.piece} with data length #{msg.data.length}"
sendMessageToPeer msg, io, metaData.data.peer
end
result.data
end
end
if torrentData.metainfoPieceState.complete? && torrentData.state == :downloading_metainfo
@logger.info "#{QuartzTorrent.bytesToHex(infoHash)}: Obtained all pieces of metainfo. Will begin checking existing pieces."
torrentData.metainfoPieceState.flush
# We don't need to download metainfo anymore.
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
info = MetainfoPieceState.downloaded(@baseDirectory, torrentData.infoHash)
if info
torrentData.info = info
startCheckingPieces torrentData
else
@logger.error "#{QuartzTorrent.bytesToHex(infoHash)}: Metadata download is complete but reading the metadata failed"
torrentData.state = :error
end
end
end
def handlePieceReceive(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Receive piece: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if ! torrentData.blockState
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: no blockstate yet."
return
end
blockInfo = torrentData.blockState.createBlockinfoByPieceResponse(msg.pieceIndex, msg.blockOffset, msg.data.length)
if ! peer.requestedBlocks.has_key?(blockInfo.blockIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: we either didn't request this piece, or it was already received due to endgame strategy. Ignoring this message."
return
end
if torrentData.blockState.blockCompleted?(blockInfo)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Receive piece: we already have this block. Ignoring this message."
return
end
peer.requestedBlocks.delete blockInfo.blockIndex
# Block is marked as not requested when hash is confirmed
torrentData.bytesDownloadedDataOnly += msg.data.length
id = torrentData.pieceManager.writeBlock(msg.pieceIndex, msg.blockOffset, msg.data)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:write, msg)
if torrentData.isEndgame
# Assume this block is correct. Send a Cancel message to all other peers from whom we requested
# this piece.
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
classifiedPeers.requestablePeers.each do |otherPeer|
if otherPeer.requestedBlocks.has_key?(blockInfo.blockIndex)
withPeersIo(otherPeer, "when sending Cancel message") do |io|
cancel = Cancel.new
cancel.pieceIndex = msg.pieceIndex
cancel.blockOffset = msg.blockOffset
cancel.blockLength = msg.data.length
sendMessageToPeer cancel, io, otherPeer
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Cancel message to peer #{peer}"
end
end
end
end
end
def handleRequest(msg, peer)
if peer.peerChoked
@logger.warn "Request piece: peer #{peer} requested a block when they are choked."
return
end
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Request piece: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.blockLength <= 0
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Request piece: peer requested block of length #{msg.blockLength} which is invalid."
return
end
id = torrentData.pieceManager.readBlock(msg.pieceIndex, msg.blockOffset, msg.blockLength)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:read, ReadRequestMetadata.new(peer,msg))
end
def handleBitfield(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Bitfield: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
peer.bitfield = msg.bitfield
if torrentData.info
peer.bitfield.length = torrentData.info.pieces.length
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: A peer connected and sent a bitfield but we don't know the length of the torrent yet. Assuming number of pieces is divisible by 8"
end
if ! torrentData.blockState
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Bitfield: no blockstate yet."
return
end
# If we are interested in something from this peer, let them know.
needed = torrentData.blockState.completePieceBitfield.compliment
needed.intersection!(peer.bitfield)
if ! needed.allClear?
if ! peer.amInterested
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Need some pieces from peer #{peer} so sending Interested message"
msg = Interested.new
sendMessageToPeer msg, currentIo, peer
peer.amInterested = true
end
end
end
def handleHave(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Have: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.pieceIndex >= peer.bitfield.length
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer #{peer} sent Have message with invalid piece index"
return
end
# Update peer's bitfield
peer.bitfield.set msg.pieceIndex
if ! torrentData.blockState
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Have: no blockstate yet."
return
end
# If we are interested in something from this peer, let them know.
if ! torrentData.blockState.completePieceBitfield.set?(msg.pieceIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Peer #{peer} just got a piece we need so sending Interested message"
msg = Interested.new
sendMessageToPeer msg, currentIo, peer
peer.amInterested = true
end
end
def checkPieceManagerResults(infoHash)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.error "Request blocks peers: tracker client for torrent #{QuartzTorrent.bytesToHex(infoHash)} not found."
return
end
while true
result = torrentData.pieceManager.nextResult
break if ! result
metaData = torrentData.pieceManagerRequestMetadata.delete(result.requestId)
if ! metaData
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :write
if result.successful?
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Block written to disk. "
# Block successfully written!
torrentData.blockState.setBlockCompleted metaData.data.pieceIndex, metaData.data.blockOffset, true do |pieceIndex|
# The peice is completed! Check hash.
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Piece #{pieceIndex} is complete. Checking hash. "
id = torrentData.pieceManager.checkPieceHash(metaData.data.pieceIndex)
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:hash, metaData.data.pieceIndex)
end
else
# Block failed! Clear completed and requested state.
torrentData.blockState.setBlockCompleted metaData.data.pieceIndex, metaData.data.blockOffset, false
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Writing block failed: #{result.error}"
end
elsif metaData.type == :read
if result.successful?
readRequestMetadata = metaData.data
peer = readRequestMetadata.peer
withPeersIo(peer, "sending piece message") do |io|
msg = Piece.new
msg.pieceIndex = readRequestMetadata.requestMsg.pieceIndex
msg.blockOffset = readRequestMetadata.requestMsg.blockOffset
msg.data = result.data
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending block to #{peer}: piece #{msg.pieceIndex} offset #{msg.blockOffset} length #{msg.data.length}"
sendMessageToPeer msg, io, peer
torrentData.bytesUploadedDataOnly += msg.data.length
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending piece to peer"
end
else
@logger.error "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Reading block failed: #{result.error}"
end
elsif metaData.type == :hash
if result.successful?
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Hash of piece #{metaData.data} is correct"
sendHaves(torrentData, metaData.data)
sendUninterested(torrentData)
else
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Hash of piece #{metaData.data} is incorrect. Marking piece as not complete."
torrentData.blockState.setPieceCompleted metaData.data, false
end
elsif metaData.type == :check_existing
handleCheckExistingResult(torrentData, result)
end
end
end
# Handle the result of the PieceManager's checkExisting (check which pieces we already have) operation.
# If the resukt is successful, this begins the actual download.
def handleCheckExistingResult(torrentData, pieceManagerResult)
if pieceManagerResult.successful?
existingBitfield = pieceManagerResult.data
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: We already have #{existingBitfield.countSet}/#{existingBitfield.length} pieces."
info = torrentData.info
torrentData.blockState = BlockState.new(info, existingBitfield)
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Starting torrent. Information:"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: piece length: #{info.pieceLen}"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: number of pieces: #{info.pieces.size}"
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: total length #{info.dataLength}"
startDownload torrentData
else
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Checking existing pieces of torrent failed: #{pieceManagerResult.error}"
torrentData.state = :error
end
end
# Start checking which pieces we already have downloaded. This method schedules the necessary timers
# and changes the state to :checking_pieces. When the pieces are finished being checked the actual download will
# begin.
# Preconditions: The torrentData object already has it's info member set.
def startCheckingPieces(torrentData)
torrentData.pieceManager = QuartzTorrent::PieceManager.new(@baseDirectory, torrentData.info)
torrentData.state = :checking_pieces
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Checking pieces of torrent #{QuartzTorrent.bytesToHex(torrentData.infoHash)} asynchronously."
id = torrentData.pieceManager.findExistingPieces
torrentData.pieceManagerRequestMetadata[id] = PieceManagerRequestMetadata.new(:check_existing, nil)
if ! torrentData.metainfoPieceState
torrentData.metainfoPieceState = MetainfoPieceState.new(@baseDirectory, torrentData.infoHash, nil, torrentData.info)
end
# Schedule checking for PieceManager results
torrentData.checkPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_piece_manager, torrentData.infoHash], true, false)
# Schedule checking for metainfo PieceManager results (including when piece reading completes)
if ! torrentData.checkMetadataPieceManagerTimer
torrentData.checkMetadataPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_metadata_piece_manager, torrentData.infoHash], true, false)
end
end
# Take a torrent that is in the :initializing state and make it go.
def initTorrent(torrentData)
# If we already have the metainfo info for this torrent, we can begin checking the pieces.
# If we don't have the metainfo info then we need to get the metainfo first.
if ! torrentData.info
torrentData.info = MetainfoPieceState.downloaded(@baseDirectory, torrentData.infoHash)
end
if torrentData.info
startCheckingPieces torrentData
else
# Request the metainfo from peers.
torrentData.state = :downloading_metainfo
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Downloading metainfo"
# Schedule peer connection management. Recurring and immediate
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], true, true)
# Schedule a timer for requesting metadata pieces from peers.
torrentData.metainfoRequestTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:request_metadata_pieces, torrentData.infoHash], true, false)
# Schedule checking for metainfo PieceManager results (including when piece reading completes)
torrentData.checkMetadataPieceManagerTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:check_metadata_piece_manager, torrentData.infoHash], true, false)
end
end
# Start the actual torrent download. This method schedules the necessary timers and registers the necessary listeners
# and changes the state to :running. It is meant to be called after checking for existing pieces or downloading the
# torrent metadata (if this is a magnet link torrent)
def startDownload(torrentData)
# Add a listener for when the tracker's peers change.
torrentData.peerChangeListener = Proc.new do
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Managing peers on peer change event"
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
torrentData.trackerClient.addPeersChangedListener torrentData.peerChangeListener
# Schedule peer connection management. Recurring and immediate
if ! torrentData.managePeersTimer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], true, true)
end
# Schedule requesting blocks from peers. Recurring and not immediate
torrentData.requestBlocksTimer =
@reactor.scheduleTimer(@requestBlocksPeriod, [:request_blocks, torrentData.infoHash], true, false)
torrentData.state = :running
end
def handleExtendedHandshake(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Extended Handshake: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
metadataSize = msg.dict['metadata_size']
if metadataSize
# This peer knows the size of the metadata. If we haven't created our MetainfoPieceState yet, create it now.
if ! torrentData.metainfoPieceState
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Extended Handshake: Learned that metadata size is #{metadataSize}. Creating MetainfoPieceState"
torrentData.metainfoPieceState = MetainfoPieceState.new(@baseDirectory, torrentData.infoHash, metadataSize)
end
end
end
def handleExtendedMetainfo(msg, peer)
torrentData = @torrentData[peer.infoHash]
if ! torrentData
@logger.error "Extended Handshake: torrent data for torrent #{QuartzTorrent.bytesToHex(peer.infoHash)} not found."
return
end
if msg.msgType == :request
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo request for piece #{msg.piece}"
# Build a response for this piece.
if torrentData.metainfoPieceState.pieceCompleted? msg.piece
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Requesting extended metainfo piece #{msg.piece} from metainfoPieceState."
id = torrentData.metainfoPieceState.readPiece msg.piece
torrentData.pieceManagerMetainfoRequestMetadata[id] =
PieceManagerRequestMetadata.new(:read, ReadRequestMetadata.new(peer,msg))
else
reject = ExtendedMetaInfo.new
reject.msgType = :reject
reject.piece = msg.piece
withPeersIo(peer, "sending extended metainfo reject message") do |io|
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending metainfo reject to #{peer}: piece #{msg.piece}"
sendMessageToPeer reject, io, peer
end
end
elsif msg.msgType == :piece
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo piece response for piece #{msg.piece} with data length #{msg.data.length}"
if ! torrentData.metainfoPieceState.pieceCompleted? msg.piece
id = torrentData.metainfoPieceState.savePiece msg.piece, msg.data
torrentData.pieceManagerMetainfoRequestMetadata[id] =
PieceManagerRequestMetadata.new(:write, msg)
end
elsif msg.msgType == :reject
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Got extended metainfo reject response for piece #{msg.piece}"
# Mark this peer as bad.
torrentData.metainfoPieceState.markPeerBad peer
torrentData.metainfoPieceState.setPieceRequested(msg.piece, false)
end
end
# Find the io associated with the peer and yield it to the passed block.
# If no io is found an error is logged.
#
def withPeersIo(peer, what = nil)
io = findIoByMetainfo(peer)
if io
yield io
else
s = ""
s = "when #{what}" if what
@logger.warn "Couldn't find the io for peer #{peer} #{what}"
end
end
def sendBitfield(io, bitfield)
if ! bitfield.allClear?
@logger.debug "Sending bitfield of size #{bitfield.length}."
msg = BitfieldMessage.new
msg.bitfield = bitfield
msg.serializeTo io
end
end
def sendHaves(torrentData, pieceIndex)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Have messages to all connected peers for piece #{pieceIndex}"
torrentData.peers.all.each do |peer|
next if peer.state != :established || peer.isUs
withPeersIo(peer, "when sending Have message") do |io|
msg = Have.new
msg.pieceIndex = pieceIndex
sendMessageToPeer msg, io, peer
end
end
end
def sendUninterested(torrentData)
# If we are no longer interested in peers once this piece has been completed, let them know
return if ! torrentData.blockState
needed = torrentData.blockState.completePieceBitfield.compliment
classifiedPeers = ClassifiedPeers.new torrentData.peers.all
classifiedPeers.establishedPeers.each do |peer|
# Don't bother sending uninterested message if we are already uninterested.
next if ! peer.amInterested || peer.isUs
needFromPeer = needed.intersection(peer.bitfield)
if needFromPeer.allClear?
withPeersIo(peer, "when sending Uninterested message") do |io|
msg = Uninterested.new
sendMessageToPeer msg, io, peer
peer.amInterested = false
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending Uninterested message to peer #{peer}"
end
end
end
end
def sendMessageToPeer(msg, io, peer)
peer.updateDownloadRate(msg)
torrentData = @torrentData[peer.infoHash]
torrentData.bytesUploaded += msg.length if torrentData
begin
peer.peerMsgSerializer.serializeTo(msg, io)
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Sending message to peer #{peer} failed: #{$!.message}"
end
end
# Update our internal peer list for this torrent from the tracker client
def getPeersFromTracker(torrentData, infoHash)
addPeer = Proc.new do |trackerPeer|
peer = Peer.new(trackerPeer)
peer.infoHash = infoHash
torrentData.peers.add peer
true
end
classifiedPeers = nil
replaceDisconnectedPeer = Proc.new do |trackerPeer|
classifiedPeers = ClassifiedPeers.new(torrentData.peers.all) if ! classifiedPeers
if classifiedPeers.disconnectedPeers.size > 0
torrentData.peers.delete classifiedPeers.disconnectedPeers.pop
addPeer.call trackerPeer
true
else
false
end
end
trackerclient = torrentData.trackerClient
addProc = addPeer
flipped = false
trackerclient.peers.each do |p|
if ! flipped && torrentData.peers.size >= @maxPeerCount
addProc = replaceDisconnectedPeer
flipped = true
end
# Don't treat ourself as a peer.
next if p.id && p.id == trackerclient.peerId
if ! torrentData.peers.findByAddr(p.ip, p.port)
@logger.debug "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Adding tracker peer #{p} to peers list"
break if ! addProc.call(p)
end
end
end
# Remove a torrent that we are downloading.
def handleRemoveTorrent(infoHash, deleteFiles)
torrentData = @torrentData.delete infoHash
if ! torrentData
@logger.warn "Asked to remove a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent. #{deleteFiles ? "Will" : "Wont"} delete downloaded files."
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.metainfoRequestTimer" if ! torrentData.metainfoRequestTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.managePeersTimer" if ! torrentData.managePeersTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkMetadataPieceManagerTimer" if ! torrentData.checkMetadataPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.checkPieceManagerTimer" if ! torrentData.checkPieceManagerTimer
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Removing torrent: no torrentData.requestBlocksTimer" if ! torrentData.requestBlocksTimer
# Stop all timers
cancelTimer torrentData.metainfoRequestTimer if torrentData.metainfoRequestTimer
cancelTimer torrentData.managePeersTimer if torrentData.managePeersTimer
cancelTimer torrentData.checkMetadataPieceManagerTimer if torrentData.checkMetadataPieceManagerTimer
cancelTimer torrentData.checkPieceManagerTimer if torrentData.checkPieceManagerTimer
cancelTimer torrentData.requestBlocksTimer if torrentData.requestBlocksTimer
torrentData.trackerClient.removePeersChangedListener(torrentData.peerChangeListener)
# Remove all the peers for this torrent.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
@logger.debug "Closing connection to peer #{peer}"
end
end
torrentData.peers.delete peer
end
# Stop tracker client
torrentData.trackerClient.stop if torrentData.trackerClient
# Stop PieceManagers
torrentData.pieceManager.stop if torrentData.pieceManager
torrentData.metainfoPieceState.stop if torrentData.metainfoPieceState
# Remove metainfo file if it exists
begin
torrentData.metainfoPieceState.remove if torrentData.metainfoPieceState
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting metainfo file for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
if deleteFiles
if torrentData.info
begin
path = @baseDirectory + File::SEPARATOR + torrentData.info.name
if File.exists? path
FileUtils.rm_r path
@logger.info "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleted #{path}"
else
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: Deleting '#{path}' for torrent #{QuartzTorrent.bytesToHex(infoHash)} failed: #{$!}"
end
rescue
@logger.warn "#{QuartzTorrent.bytesToHex(torrentData.infoHash)}: When removing torrent, deleting '#{path}' failed because it doesn't exist"
end
end
end
dequeue
end
# Pause or unpause a torrent that we are downloading.
def handlePause(infoHash, value)
torrentData = @torrentData[infoHash]
if ! torrentData
@logger.warn "Asked to pause a non-existent torrent #{QuartzTorrent.bytesToHex(infoHash)}"
return
end
return if torrentData.paused == value
torrentData.paused = value
if !value
# On unpause, queue the torrent since there might not be room for it to run.
# Make sure it goes to the head of the queue.
queue(torrentData, :unshift)
end
setFrozen infoHash, value if ! torrentData.queued
dequeue
end
# Queue a torrent
def queue(torrentData, mode = :queue)
return if torrentData.queued
# Queue the torrent
if mode == :unshift
@torrentQueue.unshift torrentData
else
@torrentQueue.push torrentData
end
setFrozen torrentData, true if ! torrentData.paused
end
# Dequeue any torrents that can now run based on available space
def dequeue
torrents = @torrentQueue.dequeue(@torrentData.values)
torrents.each do |torrentData|
if torrentData.state == :initializing
initTorrent torrentData
else
setFrozen torrentData, false if ! torrentData.paused
end
end
end
# Freeze or unfreeze a torrent. If value is true, then we disconnect from all peers for this torrent and forget
# the peers. If value is false, we start reconnecting to peers.
# Parameter torrent can be an infoHash or TorrentData
def setFrozen(torrent, value)
torrentData = torrent
if ! torrent.is_a?(TorrentData)
torrentData = @torrentData[torrent]
if ! torrentData
@logger.warn "Asked to freeze a non-existent torrent #{QuartzTorrent.bytesToHex(torrent)}"
return
end
end
if value
# Disconnect from all peers so we won't reply to any messages.
torrentData.peers.all.each do |peer|
if peer.state != :disconnected
# Close socket
withPeersIo(peer, "when removing torrent") do |io|
setPeerDisconnected(peer)
close(io)
end
end
torrentData.peers.delete peer
end
else
# Get our list of peers and start connecting right away
# Non-recurring and immediate timer
torrentData.managePeersTimer =
@reactor.scheduleTimer(@managePeersPeriod, [:manage_peers, torrentData.infoHash], false, true)
end
end
end
# Represents a client that talks to bittorrent peers. This is the main class used to download and upload
# bittorrents.
class PeerClient
# Create a new PeerClient that will save and load torrent data under the specified baseDirectory.
def initialize(baseDirectory, maxIncomplete = 5, maxActive = 10)
@port = 9998
@handler = nil
@stopped = true
@reactor = nil
@logger = LogManager.getLogger("peerclient")
@worker = nil
@handler = PeerClientHandler.new baseDirectory, maxIncomplete, maxActive
@reactor = QuartzTorrent::Reactor.new(@handler, LogManager.getLogger("peerclient.reactor"))
@toStart = []
end
# Set the port used by the torrent peer client. This only has an effect if start has not yet been called.
attr_accessor :port
# Start the PeerClient: open the listening port, and start a new thread to begin downloading/uploading pieces.
# If listening fails, an exception of class Errno::EADDRINUSE is thrown.
def start
return if ! @stopped
@logger.info "Starting"
@reactor.listen("0.0.0.0",@port,:listener_socket)
@stopped = false
@worker = Thread.new do
QuartzTorrent.initThread("peerclient")
begin
@toStart.each{ |trackerclient| trackerclient.start }
@reactor.start
@logger.info "Reactor stopped."
@handler.torrentData.each do |k,v|
v.trackerClient.stop
end
rescue
@logger.error "Unexpected exception in worker thread: #{$!}"
@logger.error $!.backtrace.join("\n")
end
end
end
# Stop the PeerClient. This method may take some time to complete.
def stop
return if @stopped
@logger.info "Stop called. Stopping reactor"
@reactor.stop
if @worker
@logger.info "Worker wait timed out after 10 seconds. Shutting down anyway" if ! @worker.join(10)
end
@stopped = true
end
# Add a new torrent to manage described by a Metainfo object. This is generally the
# method to call if you have a .torrent file.
# Returns the infoHash of the newly added torrent.
def addTorrentByMetainfo(metainfo)
raise "addTorrentByMetainfo should be called with a Metainfo object, not #{metainfo.class}" if ! metainfo.is_a?(Metainfo)
trackerclient = TrackerClient.createFromMetainfo(metainfo, false)
addTorrent(trackerclient, metainfo.infoHash, metainfo.info)
end
# Add a new torrent to manage given an announceUrl and an infoHash.
# Returns the infoHash of the newly added torrent.
def addTorrentWithoutMetainfo(announceUrl, infoHash, magnet = nil)
raise "addTorrentWithoutMetainfo should be called with a Magnet object, not a #{magnet.class}" if magnet && ! magnet.is_a?(MagnetURI)
trackerclient = TrackerClient.create(announceUrl, infoHash, 0, false)
addTorrent(trackerclient, infoHash, nil, magnet)
end
# Add a new torrent to manage given a MagnetURI object. This is generally the
# method to call if you have a magnet link.
# Returns the infoHash of the newly added torrent.
def addTorrentByMagnetURI(magnet)
raise "addTorrentByMagnetURI should be called with a MagnetURI object, not a #{magnet.class}" if ! magnet.is_a?(MagnetURI)
trackerUrl = magnet.tracker
raise "addTorrentByMagnetURI can't handle magnet links that don't have a tracker URL." if !trackerUrl
addTorrentWithoutMetainfo(trackerUrl, magnet.btInfoHash, magnet)
end
# Get a hash of new TorrentDataDelegate objects keyed by torrent infohash. This is the method to
# call to get information about the state of torrents being downloaded.
def torrentData(infoHash = nil)
# This will have to work by putting an event in the handler's queue, and blocking for a response.
# The handler will build a response and return it.
@handler.getDelegateTorrentData(infoHash)
end
# Pause or unpause the specified torrent.
def setPaused(infoHash, value)
@handler.setPaused(infoHash, value)
end
# Set the download rate limit in bytes/second.
def setDownloadRateLimit(infoHash, bytesPerSecond)
raise "download rate limit must be an Integer, not a #{bytesPerSecond.class}" if bytesPerSecond && ! bytesPerSecond.is_a?(Integer)
@handler.setDownloadRateLimit(infoHash, bytesPerSecond)
end
# Set the upload rate limit in bytes/second.
def setUploadRateLimit(infoHash, bytesPerSecond)
raise "upload rate limit must be an Integer, not a #{bytesPerSecond.class}" if bytesPerSecond && ! bytesPerSecond.is_a?(Integer)
@handler.setUploadRateLimit(infoHash, bytesPerSecond)
end
# Set the upload ratio. Pass nil to disable
def setUploadRatio(infoHash, ratio)
raise "upload ratio must be Numeric, not a #{ratio.class}" if ratio && ! ratio.is_a?(Numeric)
@handler.setUploadRatio(infoHash, ratio)
end
# Set the maximum amount of time (in seconds) that a torrent can be in the upload-only state before
# it is paused. Pass nil to disable.
def setUploadDuration(infoHash, seconds)
raise "upload ratio must be Numeric, not a #{seconds.class}" if seconds && ! seconds.is_a?(Numeric)
@handler.setUploadDuration(infoHash, seconds)
end
# Remove a currently running torrent
def removeTorrent(infoHash, deleteFiles = false)
@handler.removeTorrent(infoHash, deleteFiles)
end
private
# Helper method for adding a torrent.
def addTorrent(trackerclient, infoHash, info, magnet = nil)
trackerclient.port = @port
torrentData = @handler.addTrackerClient(infoHash, info, trackerclient)
torrentData.magnet = magnet
trackerclient.dynamicRequestParamsBuilder = Proc.new do
torrentData = @handler.torrentData[infoHash]
dataLength = (info ? info.dataLength : nil)
result = TrackerDynamicRequestParams.new(dataLength)
if torrentData && torrentData.blockState
result.left = torrentData.blockState.totalLength - torrentData.blockState.completedLength
result.downloaded = torrentData.bytesDownloadedDataOnly
result.uploaded = torrentData.bytesUploadedDataOnly
end
result
end
# If we haven't started yet then add this trackerclient to a queue of
# trackerclients to start once we are started. If we start too soon we
# will connect to the tracker, and it will try to connect back to us before we are listening.
if ! trackerclient.started?
if @stopped
@toStart.push trackerclient
else
trackerclient.start
end
end
torrentData.infoHash
end
end
end
|
module Infusionsoft
class APILogger
def info; end
def info(msg); $stdout.print msg end
def warn; end
def warn(msg); $stdout.print msg end
def error; end
def error(msg); $stdout.print msg end
# def debug; end
# def fatal; end
end
end
added new methods onto logger
module Infusionsoft
class APILogger
def info; end
def info(msg); $stdout.print msg end
def warn; end
def warn(msg); $stdout.print msg end
def error; end
def error(msg); $stdout.print msg end
def debug; end
def debug(msg); $stdout.print msg end
def fatal; end
def fatal(msg); $stdout.print msg end
end
end
|
module Rack::Authorize
class Authorizer
def initialize(app, opts = {}, &block)
@app = app
@no_auth_routes = opts[:excludes] || {}
@auth_definition = opts[:auth_definition] || "scopes"
@block = block
end
def call(env)
#puts env
if authorizable_route?(env)
method = env["REQUEST_METHOD"]
path = env["PATH_INFO"]
# The JWT payload is saved in rack.jwt.session the scopes key is scopes
#puts "----------------------------"
#puts env
#puts "----------------------------"
scopes = Oj.load(env.fetch("rack.jwt.session", {})[@auth_definition])
return [403, {}, ["Access Forbidden"]] unless @block.call(method, path, scopes)
end
@app.call(env)
end
private
def authorizable_route?(env)
if @no_auth_routes.length > 0
!@no_auth_routes.find do |route|
if route =~ /\*/
env['PATH_INFO'] =~ /#{route.chomp("*")}/
else
route =~ /#{env['PATH_INFO']}/
end
end
end
end
end
end
trailing * and comments
module Rack::Authorize
class Authorizer
def initialize(app, opts = {}, &block)
@app = app
@no_auth_routes = opts[:excludes] || {}
@auth_definition = opts[:auth_definition] || "scopes"
@block = block
end
def call(env)
#puts env
if authorizable_route?(env)
method = env["REQUEST_METHOD"]
path = env["PATH_INFO"]
# The JWT payload is saved in rack.jwt.session the scopes key is scopes
#puts "----------------------------"
#puts env
#puts "----------------------------"
scopes = Oj.load(env.fetch("rack.jwt.session", {})[@auth_definition])
return [403, {}, ["Access Forbidden"]] unless @block.call(method, path, scopes)
end
@app.call(env)
end
private
def authorizable_route?(env)
if @no_auth_routes.length > 0
!@no_auth_routes.find do |route|
# This checks if the excluded route has a trailing *
# if it does it checks the path with the route as
# its regexp thus checking a partial match
if route =~ /\*$/
env['PATH_INFO'] =~ /#{route.chomp("*")}/
# Otherwise it checks the route with the path
# as its regexp thus checking a complete match
else
route =~ /#{env['PATH_INFO']}/
end
end
end
end
end
end
|
module Jasmine
module Phantom
VERSION = "0.0.5"
end
end
Version bump
module Jasmine
module Phantom
VERSION = "0.0.6"
end
end
|
module Rack::Authorize
class Authorizer
def initialize(app, opts = {}, &block)
@app = app
@no_auth_routes = opts[:excludes] || {}
@auth_definition = opts[:auth_definition] || "scopes"
@block = block
end
def call(env)
dup._call(env)
end
def _call(env)
#puts env
if authorizable_route?(env)
method = env["REQUEST_METHOD"]
path = env["PATH_INFO"]
# The JWT payload is saved in rack.jwt.session the scopes key is scopes
#puts "----------------------------"
#puts env
#puts "----------------------------"
if Object.constants.include?(:Rails)
jwt_session_data = env.fetch('rack.session', {}).fetch("jwt_token", {})
else
jwt_session_data = env.fetch("rack.jwt.session", {})
end
if jwt_session_data.is_a? String
jwt_session_data = Oj.load(jwt_session_data)
scopes = jwt_session_data.fetch(@auth_definition.to_sym, {})
else
scopes = Oj.load(jwt_session_data[@auth_definition])
end
return [403, {}, ["Access Forbidden"]] unless @block.call(method, path, scopes)
end
@app.call(env)
end
private
def authorizable_route?(env)
if @no_auth_routes.length > 0
!@no_auth_routes.find do |route|
# This checks if the excluded route has a trailing *
# if it does it checks the path with the route as
# its regexp thus checking a partial match
if route =~ /\*$/
env['PATH_INFO'] =~ /#{route.chomp("*")}/
# Otherwise it checks the route with the path
# as its regexp thus checking a complete match
else
route =~ /#{env['PATH_INFO']}/
end
end
end
end
end
end
added session parameter in rails with token auth result
module Rack::Authorize
class Authorizer
def initialize(app, opts = {}, &block)
@app = app
@no_auth_routes = opts[:excludes] || {}
@auth_definition = opts[:auth_definition] || "scopes"
@block = block
end
def call(env)
dup._call(env)
end
def _call(env)
#puts env
if authorizable_route?(env)
method = env["REQUEST_METHOD"]
path = env["PATH_INFO"]
# The JWT payload is saved in rack.jwt.session the scopes key is scopes
#puts "----------------------------"
#puts env
#puts "----------------------------"
if Object.constants.include?(:Rails)
jwt_session_data = env.fetch('rack.session', {}).fetch("jwt_token", {})
else
jwt_session_data = env.fetch("rack.jwt.session", {})
end
if jwt_session_data.is_a? String
jwt_session_data = Oj.load(jwt_session_data)
scopes = jwt_session_data.fetch(@auth_definition.to_sym, {})
else
scopes = Oj.load(jwt_session_data[@auth_definition])
end
if Object.constants.include?(:Rails)
env["rack_session"]["jwt_authorized"] = @block.call(method, path, scopes)
else
return [403, {}, ["Access Forbidden"]] unless @block.call(method, path, scopes)
end
end
@app.call(env)
end
private
def authorizable_route?(env)
if @no_auth_routes.length > 0
!@no_auth_routes.find do |route|
# This checks if the excluded route has a trailing *
# if it does it checks the path with the route as
# its regexp thus checking a partial match
if route =~ /\*$/
env['PATH_INFO'] =~ /#{route.chomp("*")}/
# Otherwise it checks the route with the path
# as its regexp thus checking a complete match
else
route =~ /#{env['PATH_INFO']}/
end
end
end
end
end
end
|
# This script is required by script/jax if script/jax is found.
require 'thor'
require 'thor/group'
require File.expand_path('../../jax', File.dirname(__FILE__))
require File.expand_path('interactions', File.dirname(__FILE__))
module Jax
module Generators
class Error < Thor::Error
end
module Usage
module ClassMethods
def start(given_args=ARGV, config={})
if (given_args.length == 0)
puts usage
else
super
end
end
def usage
usage = ERB.new(File.read(File.expand_path("USAGE", base_path)), nil, '-')
usage.result(binding)
end
def base_path
@base_path || raise("Jax Command base path was not found")
end
def base_path=(path)
@base_path = path
end
end
class << self
def extended(base)
base.send :extend, ClassMethods
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
end
def included(base)
base.send :extend, ClassMethods
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
end
end
end
class Command < Thor::Group
include Thor::Actions
include Jax::Generators::Usage
no_tasks do
def exit(message = "")
raise Jax::Generators::Error, message
end
end
def self.inherited(base)
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
base.instance_eval do
def self.source_root
File.join(base_path, "templates")
end
end
end
end
# Generators extending PluggableCommand will produce code in either a Jax
# application proper, or in a plugin within the app.
class PluggableCommand < Command
def check_plugin_destination
if ENV['JAX_CWD'] && cwd = File.expand_path('.', ENV['JAX_CWD'])
if cwd =~ /^#{Regexp::escape File.join(Jax.root, "vendor/plugins/", "")}(.*?)(\/|$)/
self.destination_root = Jax.root.join("vendor", "plugins", $1)
end
end
end
end
autoload :Controller, "jax/generators/controller/controller_generator"
autoload :Model, "jax/generators/model/model_generator"
autoload :LightSource, "jax/generators/light_source/light_source_generator"
autoload :Material, "jax/generators/material/material_generator"
autoload :Shader, "jax/generators/shader/shader_generator"
autoload :Plugin, "jax/generators/plugin/all"
autoload :Packager, "jax/generators/packager/package_generator"
end
end
class JaxGeneratorInvoker < Thor
include Thor::Actions
def self.basename
"jax generate"
end
desc "controller NAME", "generates a new controller"
def controller(*args)
Jax::Generators::Controller::ControllerGenerator.start(args, :behavior => behavior)
end
desc "model NAME", "generates a new model"
def model(*args)
Jax::Generators::Model::ModelGenerator.start(args, :behavior => behavior)
end
desc "light NAME TYPE", "generates a new light source"
def light(*args)
Jax::Generators::LightSource::LightSourceGenerator.start(args, :behavior => behavior)
end
desc "material NAME", "generates a new material"
def material(*args)
args = ARGV.dup
args.shift
Jax::Generators::Material::MaterialGenerator.start(args, :behavior => behavior)
end
desc "scaffold NAME", "generates a controller, model and material, all with the same name"
def scaffold(name)
Jax::Generators::Controller::ControllerGenerator.start([name, 'index'], :behavior => behavior)
Jax::Generators::Model::ModelGenerator.start([name], :behavior => behavior)
Jax::Generators::Material::MaterialGenerator.start([name], :behavior => behavior)
end
desc "shader NAME", "generates a new custom shader"
def shader(*name)
Jax::Generators::Shader::ShaderGenerator.start(name, :behavior => behavior)
end
desc "plugin NAME", "generates a new plugin"
def plugin(*args)
Jax::Generators::Plugin::PluginGenerator.start(ARGV[1..-1], :behavior => behavior)
end
desc "package", "packages this Jax application in preparation for deployment"
def package(*args)
Jax::Generators::Packager::PackageGenerator.start(args, :behavior => behavior)
end
end
class JaxGenerator
attr_reader :args
COMMANDS = {
"generate" => "Generate new code",
"destroy" => "Undo code generated with \"generate\"",
"plugin" => "Install a plugin"
} unless defined?(COMMANDS)
ALIASES = { "g" => "generate" } unless defined?(ALIASES)
def initialize(args)
@args = args
show_usage and return unless command
if respond_to? command then send command
else invalid command
end
rescue ArgumentError
puts $!.message
end
def generate
JaxGeneratorInvoker.start
end
def destroy
JaxGeneratorInvoker.start(ARGV, :behavior => :revoke)
end
def plugin
Jax::Generators::Plugin::PluginManager.start
end
def command
@command ||= begin
command = args.shift
command = ALIASES[command] || command
end
end
def invalid(command)
puts "Invalid command."
puts
show_usage
end
def show_usage
puts <<-end_banner
Usage: jax COMMAND [ARGS]
The following commands are available:
#{command_list.join("\n ")}
All commands can be run with -h for more information.
end_banner
end
def command_list
COMMANDS.keys.collect { |command| "#{command.ljust(13)}#{description_for command}"}
end
def description_for(command)
if i = ALIASES.values.index(command)
COMMANDS[command] + " (shortcut alias: \"#{ALIASES.keys[i]}\")"
else
COMMANDS[command]
end
end
class << self
# this gets called by script/jax from within a jax app
def start
new ARGV
end
end
end
minor generator bugfix
# This script is required by script/jax if script/jax is found.
require 'thor'
require 'thor/group'
require File.expand_path('../../jax', File.dirname(__FILE__))
require File.expand_path('interactions', File.dirname(__FILE__))
module Jax
module Generators
class Error < Thor::Error
end
module Usage
module ClassMethods
def start(given_args=ARGV, config={})
if (given_args.length == 0)
puts usage
else
super
end
end
def usage
usage = ERB.new(File.read(File.expand_path("USAGE", base_path)), nil, '-')
usage.result(binding)
end
def base_path
@base_path || raise("Jax Command base path was not found")
end
def base_path=(path)
@base_path = path
end
end
class << self
def extended(base)
base.send :extend, ClassMethods
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
end
def included(base)
base.send :extend, ClassMethods
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
end
end
end
class Command < Thor::Group
include Thor::Actions
include Jax::Generators::Usage
no_tasks do
def exit(message = "")
raise Jax::Generators::Error, message
end
end
def self.inherited(base)
base.base_path = File.dirname(caller.first.gsub(/:.*$/, ''))
base.instance_eval do
def self.source_root
File.join(base_path, "templates")
end
end
end
end
# Generators extending PluggableCommand will produce code in either a Jax
# application proper, or in a plugin within the app.
class PluggableCommand < Command
def check_plugin_destination
if ENV['JAX_CWD'] && cwd = File.expand_path('.', ENV['JAX_CWD'])
if cwd =~ /^#{Regexp::escape File.join(Jax.root, "vendor/plugins/", "")}(.*?)(\/|$)/
self.destination_root = Jax.root.join("vendor", "plugins", $1)
end
end
end
end
autoload :Controller, "jax/generators/controller/controller_generator"
autoload :Model, "jax/generators/model/model_generator"
autoload :LightSource, "jax/generators/light_source/light_source_generator"
autoload :Material, "jax/generators/material/material_generator"
autoload :Shader, "jax/generators/shader/shader_generator"
autoload :Plugin, "jax/generators/plugin/all"
autoload :Packager, "jax/generators/packager/package_generator"
end
end
class JaxGeneratorInvoker < Thor
include Thor::Actions
def self.basename
"jax generate"
end
desc "controller NAME", "generates a new controller"
def controller(*args)
Jax::Generators::Controller::ControllerGenerator.start(args, :behavior => behavior)
end
desc "model NAME", "generates a new model"
def model(*args)
Jax::Generators::Model::ModelGenerator.start(args, :behavior => behavior)
end
desc "light NAME TYPE", "generates a new light source"
def light(*args)
Jax::Generators::LightSource::LightSourceGenerator.start(args, :behavior => behavior)
end
desc "material NAME", "generates a new material"
def material(*args)
args = ARGV.dup
args.shift
Jax::Generators::Material::MaterialGenerator.start(args, :behavior => behavior)
end
desc "scaffold NAME", "generates a controller, model and material, all with the same name"
def scaffold(name)
Jax::Generators::Controller::ControllerGenerator.start([name, 'index'], :behavior => behavior)
Jax::Generators::Model::ModelGenerator.start([name], :behavior => behavior)
Jax::Generators::Material::MaterialGenerator.start([name], :behavior => behavior)
end
desc "shader NAME", "generates a new custom shader"
def shader(*name)
Jax::Generators::Shader::ShaderGenerator.start(name, :behavior => behavior)
end
desc "plugin NAME", "generates a new plugin"
def plugin(*args)
Jax::Generators::Plugin::PluginGenerator.start(ARGV[1..-1], :behavior => behavior)
end
end
class JaxGenerator
attr_reader :args
COMMANDS = {
"generate" => "Generate new code",
"destroy" => "Undo code generated with \"generate\"",
"plugin" => "Install a plugin",
"package" => "Package the app for production"
} unless defined?(COMMANDS)
ALIASES = { "g" => "generate" } unless defined?(ALIASES)
def initialize(args)
@args = args
show_usage and return unless command
if respond_to? command then send command
else invalid command
end
rescue ArgumentError
puts $!.message
end
def package
Jax::Generators::Packager::PackageGenerator.start
end
def generate
JaxGeneratorInvoker.start
end
def destroy
JaxGeneratorInvoker.start(ARGV, :behavior => :revoke)
end
def plugin
Jax::Generators::Plugin::PluginManager.start
end
def command
@command ||= begin
command = args.shift
command = ALIASES[command] || command
end
end
def invalid(command)
puts "Invalid command."
puts
show_usage
end
def show_usage
puts <<-end_banner
Usage: jax COMMAND [ARGS]
The following commands are available:
#{command_list.join("\n ")}
All commands can be run with -h for more information.
end_banner
end
def command_list
COMMANDS.keys.collect { |command| "#{command.ljust(13)}#{description_for command}"}
end
def description_for(command)
if i = ALIASES.values.index(command)
COMMANDS[command] + " (shortcut alias: \"#{ALIASES.keys[i]}\")"
else
COMMANDS[command]
end
end
class << self
# this gets called by script/jax from within a jax app
def start
new ARGV
end
end
end
|
require 'rack/dev-mark/theme/base'
module Rack
module DevMark
module Theme
class Title < Tag
def initialize(options = {})
super options.merge(name: 'title', attribute: nil)
end
end
end
end
end
Fix spec failure
require 'rack/dev-mark/theme/tag'
module Rack
module DevMark
module Theme
class Title < Tag
def initialize(options = {})
super options.merge(name: 'title', attribute: nil)
end
end
end
end
end
|
module Rack
class WebProfiler
VERSION = "0.1.0-alpha".freeze
end
end
Bump to 0.1.0-alpha1
module Rack
class WebProfiler
VERSION = "0.1.0-alpha1".freeze
end
end
|
require 'open-uri'
require "rails_admin_import/import_logger"
module RailsAdminImport
module Import
extend ActiveSupport::Concern
module ClassMethods
def file_fields
attrs = []
if self.methods.include?(:attachment_definitions) && !self.attachment_definitions.nil?
attrs = self.attachment_definitions.keys
end
attrs - RailsAdminImport.config(self).excluded_fields
end
def import_fields
fields = []
fields = self.new.attributes.keys.collect { |key| key.to_sym }
self.belongs_to_fields.each do |key|
fields.delete("#{key}_id".to_sym)
end
self.file_fields.each do |key|
fields.delete("#{key}_file_name".to_sym)
fields.delete("#{key}_content_type".to_sym)
fields.delete("#{key}_file_size".to_sym)
fields.delete("#{key}_updated_at".to_sym)
end
excluded_fields = RailsAdminImport.config(self).excluded_fields
[:id, :created_at, :updated_at, excluded_fields].flatten.each do |key|
fields.delete(key)
end
fields
end
def belongs_to_fields
attrs = self.reflections.select { |k, v| v.macro == :belongs_to }.keys
attrs - RailsAdminImport.config(self).excluded_fields
end
def many_fields
attrs = []
self.reflections.each do |k, v|
if [:has_and_belongs_to_many, :has_many].include?(v.macro)
attrs << k.to_s.singularize.to_sym
end
end
attrs - RailsAdminImport.config(self).excluded_fields
end
def run_import(params)
begin
if !params.has_key?(:file)
return results = { :success => [], :error => ["You must select a file."] }
end
if RailsAdminImport.config.logging
FileUtils.copy(params[:file].tempfile, "#{Rails.root}/log/import/#{Time.now.strftime("%Y-%m-%d-%H-%M-%S")}-import.csv")
end
text = File.read(params[:file].tempfile)
clean = text #.force_encoding('BINARY').encode('UTF-8', :undef => :replace, :replace => '').gsub(/\n$/, '')
file_check = CSV.new(clean)
logger = ImportLogger.new
if file_check.readlines.size > RailsAdminImport.config.line_item_limit
return results = { :success => [], :error => ["Please limit upload file to #{RailsAdminImport.config.line_item_limit} line items."] }
end
map = {}
file = CSV.new(clean)
file.readline.each_with_index do |key, i|
if self.many_fields.include?(key.to_sym)
map[key.to_sym] ||= []
map[key.to_sym] << i
else
map[key.to_sym] = i
end
end
update = params.has_key?(:update_if_exists) && params[:update_if_exists] ? params[:update_lookup].to_sym : nil
if update && !map.has_key?(params[:update_lookup].to_sym)
return results = { :success => [], :error => ["Your file must contain a column for the 'Update lookup field' you selected."] }
end
results = { :success => [], :error => [] }
associated_map = {}
self.belongs_to_fields.flatten.each do |field|
associated_map[field] = field.to_s.classify.constantize.all.inject({}) { |hash, c| hash[c.send(params[field]).to_s] = c.id; hash }
end
self.many_fields.flatten.each do |field|
associated_map[field] = field.to_s.classify.constantize.all.inject({}) { |hash, c| hash[c.send(params[field]).to_s] = c; hash }
end
label_method = RailsAdminImport.config(self).label
file.each do |row|
object = self.import_initialize(row, map, update)
object.import_belongs_to_data(associated_map, row, map)
object.import_many_data(associated_map, row, map)
object.before_import_save(row, map)
object.import_files(row, map)
verb = object.new_record? ? "Create" : "Update"
if object.errors.empty?
if object.save
logger.info "#{Time.now.to_s}: #{verb}d: #{object.send(label_method)}"
results[:success] << "#{verb}d: #{object.send(label_method)}"
object.after_import_save(row, map)
else
logger.info "#{Time.now.to_s}: Failed to #{verb}: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
results[:error] << "Failed to #{verb}: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
end
else
logger.info "#{Time.now.to_s}: Errors before save: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
results[:error] << "Errors before save: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
end
end
results
rescue Exception => e
logger.info "#{Time.now.to_s}: Unknown exception in import: #{e.inspect}"
return results = { :success => [], :error => ["Could not upload. Unexpected error: #{e.to_s}"] }
end
end
def import_initialize(row, map, update)
new_attrs = {}
self.import_fields.each do |key|
new_attrs[key] = row[map[key]] if map[key]
end
item = nil
if update.present?
item = self.send("find_by_#{update}", row[map[update]])
end
if item.nil?
item = self.new(new_attrs)
else
item.attributes = new_attrs.except(update.to_sym)
item.save
end
item
end
end
def before_import_save(*args)
# Meant to be overridden to do special actions
end
def after_import_save(*args)
# Meant to be overridden to do special actions
end
def import_display
self.id
end
def import_files(row, map)
if self.new_record? && self.valid?
self.class.file_fields.each do |key|
if map[key] && !row[map[key]].nil?
begin
# Strip file
row[map[key]] = row[map[key]].gsub(/\s+/, "")
format = row[map[key]].match(/[a-z0-9]+$/)
open("#{Rails.root}/tmp/#{self.permalink}.#{format}", 'wb') { |file| file << open(row[map[key]]).read }
self.send("#{key}=", File.open("#{Rails.root}/tmp/#{self.permalink}.#{format}"))
rescue Exception => e
self.errors.add(:base, "Import error: #{e.inspect}")
end
end
end
end
end
def import_belongs_to_data(associated_map, row, map)
self.class.belongs_to_fields.each do |key|
if map.has_key?(key) && row[map[key]] != ""
self.send("#{key}_id=", associated_map[key][row[map[key]]])
end
end
end
def import_many_data(associated_map, row, map)
self.class.many_fields.each do |key|
values = []
map[key] ||= []
map[key].each do |pos|
if row[pos] != "" && associated_map[key][row[pos]]
values << associated_map[key][row[pos]]
end
end
if values.any?
self.send("#{key.to_s.pluralize}=", values)
end
end
end
end
end
class ActiveRecord::Base
include RailsAdminImport::Import
end
fix issue#6
https://github.com/stephskardal/rails_admin_import/issues/6
require 'open-uri'
require "rails_admin_import/import_logger"
module RailsAdminImport
module Import
extend ActiveSupport::Concern
module ClassMethods
def file_fields
attrs = []
if self.methods.include?(:attachment_definitions) && !self.attachment_definitions.nil?
attrs = self.attachment_definitions.keys
end
attrs - RailsAdminImport.config(self).excluded_fields
end
def import_fields
fields = []
fields = self.new.attributes.keys.collect { |key| key.to_sym }
self.belongs_to_fields.each do |key|
fields.delete("#{key}_id".to_sym)
end
self.file_fields.each do |key|
fields.delete("#{key}_file_name".to_sym)
fields.delete("#{key}_content_type".to_sym)
fields.delete("#{key}_file_size".to_sym)
fields.delete("#{key}_updated_at".to_sym)
end
excluded_fields = RailsAdminImport.config(self).excluded_fields
[:id, :created_at, :updated_at, excluded_fields].flatten.each do |key|
fields.delete(key)
end
fields
end
def belongs_to_fields
attrs = self.reflections.select { |k, v| v.macro == :belongs_to && ! v.options.has_key?(:polymorphic) }.keys
attrs - RailsAdminImport.config(self).excluded_fields
end
def many_fields
attrs = []
self.reflections.each do |k, v|
if [:has_and_belongs_to_many, :has_many].include?(v.macro)
attrs << k.to_s.singularize.to_sym
end
end
attrs - RailsAdminImport.config(self).excluded_fields
end
def run_import(params)
begin
if !params.has_key?(:file)
return results = { :success => [], :error => ["You must select a file."] }
end
if RailsAdminImport.config.logging
FileUtils.copy(params[:file].tempfile, "#{Rails.root}/log/import/#{Time.now.strftime("%Y-%m-%d-%H-%M-%S")}-import.csv")
end
text = File.read(params[:file].tempfile)
clean = text #.force_encoding('BINARY').encode('UTF-8', :undef => :replace, :replace => '').gsub(/\n$/, '')
file_check = CSV.new(clean)
logger = ImportLogger.new
if file_check.readlines.size > RailsAdminImport.config.line_item_limit
return results = { :success => [], :error => ["Please limit upload file to #{RailsAdminImport.config.line_item_limit} line items."] }
end
map = {}
file = CSV.new(clean)
file.readline.each_with_index do |key, i|
if self.many_fields.include?(key.to_sym)
map[key.to_sym] ||= []
map[key.to_sym] << i
else
map[key.to_sym] = i
end
end
update = params.has_key?(:update_if_exists) && params[:update_if_exists] ? params[:update_lookup].to_sym : nil
if update && !map.has_key?(params[:update_lookup].to_sym)
return results = { :success => [], :error => ["Your file must contain a column for the 'Update lookup field' you selected."] }
end
results = { :success => [], :error => [] }
associated_map = {}
self.belongs_to_fields.flatten.each do |field|
associated_map[field] = field.to_s.classify.constantize.all.inject({}) { |hash, c| hash[c.send(params[field]).to_s] = c.id; hash }
end
self.many_fields.flatten.each do |field|
associated_map[field] = field.to_s.classify.constantize.all.inject({}) { |hash, c| hash[c.send(params[field]).to_s] = c; hash }
end
label_method = RailsAdminImport.config(self).label
file.each do |row|
object = self.import_initialize(row, map, update)
object.import_belongs_to_data(associated_map, row, map)
object.import_many_data(associated_map, row, map)
object.before_import_save(row, map)
object.import_files(row, map)
verb = object.new_record? ? "Create" : "Update"
if object.errors.empty?
if object.save
logger.info "#{Time.now.to_s}: #{verb}d: #{object.send(label_method)}"
results[:success] << "#{verb}d: #{object.send(label_method)}"
object.after_import_save(row, map)
else
logger.info "#{Time.now.to_s}: Failed to #{verb}: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
results[:error] << "Failed to #{verb}: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
end
else
logger.info "#{Time.now.to_s}: Errors before save: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
results[:error] << "Errors before save: #{object.send(label_method)}. Errors: #{object.errors.full_messages.join(', ')}."
end
end
results
rescue Exception => e
logger.info "#{Time.now.to_s}: Unknown exception in import: #{e.inspect}"
return results = { :success => [], :error => ["Could not upload. Unexpected error: #{e.to_s}"] }
end
end
def import_initialize(row, map, update)
new_attrs = {}
self.import_fields.each do |key|
new_attrs[key] = row[map[key]] if map[key]
end
item = nil
if update.present?
item = self.send("find_by_#{update}", row[map[update]])
end
if item.nil?
item = self.new(new_attrs)
else
item.attributes = new_attrs.except(update.to_sym)
item.save
end
item
end
end
def before_import_save(*args)
# Meant to be overridden to do special actions
end
def after_import_save(*args)
# Meant to be overridden to do special actions
end
def import_display
self.id
end
def import_files(row, map)
if self.new_record? && self.valid?
self.class.file_fields.each do |key|
if map[key] && !row[map[key]].nil?
begin
# Strip file
row[map[key]] = row[map[key]].gsub(/\s+/, "")
format = row[map[key]].match(/[a-z0-9]+$/)
open("#{Rails.root}/tmp/#{self.permalink}.#{format}", 'wb') { |file| file << open(row[map[key]]).read }
self.send("#{key}=", File.open("#{Rails.root}/tmp/#{self.permalink}.#{format}"))
rescue Exception => e
self.errors.add(:base, "Import error: #{e.inspect}")
end
end
end
end
end
def import_belongs_to_data(associated_map, row, map)
self.class.belongs_to_fields.each do |key|
if map.has_key?(key) && row[map[key]] != ""
self.send("#{key}_id=", associated_map[key][row[map[key]]])
end
end
end
def import_many_data(associated_map, row, map)
self.class.many_fields.each do |key|
values = []
map[key] ||= []
map[key].each do |pos|
if row[pos] != "" && associated_map[key][row[pos]]
values << associated_map[key][row[pos]]
end
end
if values.any?
self.send("#{key.to_s.pluralize}=", values)
end
end
end
end
end
class ActiveRecord::Base
include RailsAdminImport::Import
end
|
require 'yaml'
class Rake::Builder
class LocalConfig
VERSIONS = ['1.0', '1.1']
attr_accessor :include_paths
attr_accessor :compilation_options
def initialize( file_name )
@file_name = file_name
@include_paths = []
@compilation_options = []
end
def load
config = YAML.load_file( @file_name )
version = config[:rake_builder][:config_file][:version]
@include_paths = config[:include_paths]
@compilation_options = config[:compilation_options]
if not VERSIONS.find_index(version)
raise Rake::Builder::Error.new('Config file version incorrect')
end
end
def save
File.open( @file_name, 'w' ) do | file |
file.write config.to_yaml
end
end
def config
{ :rake_builder => { :config_file => { :version => VERSIONS[-1] } },
:include_paths => @include_paths,
:compilation_options => @compilation_options }
end
end
end
Whitespace
require 'yaml'
class Rake::Builder
class LocalConfig
VERSIONS = ['1.0', '1.1']
attr_accessor :include_paths
attr_accessor :compilation_options
def initialize(file_name)
@file_name = file_name
@include_paths = []
@compilation_options = []
end
def load
config = YAML.load_file(@file_name)
version = config[:rake_builder][:config_file][:version]
if not VERSIONS.include?(version)
raise Rake::Builder::Error.new('Config file version incorrect')
end
@include_paths = config[:include_paths]
@compilation_options = config[:compilation_options]
end
def save
File.open(@file_name, 'w') do |file|
file.write config.to_yaml
end
end
def config
{
:rake_builder => {:config_file => {:version => VERSIONS[-1]}},
:include_paths => @include_paths,
:compilation_options => @compilation_options
}
end
end
end
|
# encode: utf-8
#
# FNordmetric helper
#
#
require 'ramaze'
require 'fnordmetric'
require 'redis'
module Ramaze
module Helper
##
# This helper provides a convenience wrapper for sending events to
# Fnordmetric.
#
# events can be anything, its just an indication that something happened.
# Fnordmetric can then make some agregates on events received per period.
#
# Since events can carry arbitrary data, this helper adds methods that send
# performance data to Fnordmetric, so one can easily measure code execution
# times.
#
# events are associated to the Innate session id, and thus are linked to
# visitors of your site. this is really usefull since you can, for instance,
# see how long a controller action took for a particular user.
#
# If you want so use a redis server other than the usual localhost:6379, you
# need to define :fnord_redis_url trait, e.g. :
#
# trait :fnord_redis_url => "redis://redis.example.com:6332"
#
# TODO: @example Basic usage...
# TODO: Implement with_id that uses specific id instead of innate.sid
module Fnordmetric
# @@fnord will hold Fnordmetric API instance
# @@redis holds a Redis connection
# A timer is an Array holding the event name, a Hash of arguments and a timestamp
@@fnord = nil
@@redis = nil
@@sstack_key_root = "fnordmetric.%s.%s.%s" % [ ENV['HOSTNAME'] || "localhost", ENV['USER'], Ramaze.options.app.name.to_s ]
##
# We need clock as a class method
# Let's extend the includer when it includes us
def self.included(base)
Ramaze::Log.debug("Fnordmetric helper is being included in %s" % base.name)
base.extend(ClassMethods)
end
##
# Creates a fnordmetric instance, holding the Redis connection
#
# Do not call this
def _connect # :nodoc:
Ramaze::Log.debug("In connect")
begin
url = ancestral_trait[:fnord_redis_url]
rescue
url = "redis://localhost:6379"
ensure
@@fnord = FnordMetric::API.new(:redis_url => url)
@@redis = Redis.new(:url => url)
Ramaze::Log.debug("Connected to FnordMetric")
end
end
##
# Sends an event to Fnordmetric
#
# This helper method sends an event to Fnordmetric, populating the
# :_session field with the current innate sid.
#
# @param [Symbol] evt the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
def event(evt, args = {})
# Let's connect first, it will have to be done anyway
return unless evt
_connect unless @@fnord
evt = { :_type => evt.to_s, :_session => session.sid }
evt.merge!(args)
Ramaze::Log.debug("Logging Fnordmetric event %s" % evt.inspect)
@@fnord.event(evt)
end
##
# All in one timing function for a block
#
# This method will send an event containing the execution time of
# the passed block.
#
# @example Block style usage
#
# times(:performance, :method => :whatever) do
# # do stuff to be measured
# end
#
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
# @param [Block] block code to be executed and timed
#
def times(event_name, args = {}, &block)
push_timer(event_name, args)
# THINK: may be raise since there is no point in using times without a
# block
yield if block_given?
ensure
pop_timer
end
##
# Starts a timer and pushes it to the timers stack
#
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
# @example Push/Pop style usage
#
# push_timer(:performance, :field => :whatever)
# # some code
# pop_timer
#
def push_timer(event_name, args = {})
_connect unless @@redis
@@redis.set(_key, [event_name, args, Time.now.to_f].to_json)
@@redis.expire(_key, _ttl)
Ramaze::Log.debug("Timer pushed and TTL set to %s for %s to %s (stack level is now %s)" %
[ _ttl,
event_name,
_key,
@@redis.llen(_key) ])
end
##
# Pops a timer and sends an event
#
# This method pops the last pushed timer and sends an event
# No arguments are needed, since they were stored by push_timer
#
def pop_timer
len = @@redis.llen(_key)
if len > 0
json = @@redis.lpop(_key)
wat, args, wen = JSON.parse(json)
Ramaze::Log.debug("Timer popped for %s (stack level is now %s)" % [ wat, len - 1])
# We log millisecs
time = Time.now-Time.at(wen)
time *= 1000
event(wat, args.merge(:time => time.to_i))
else
Ramaze::Log.error("Unable to pop timer in %s (no event in stack)" % _key)
#raise RuntimeError, "Unable to pop timer in %s (no event in stack)" % _key
end
end
##
# Removes all timers in the stack
#
def clear_timers
Ramaze::Log.debug("Cleared %s timers for %s" % [ @@redis.llen(_key), _key ])
@@redis.del _key
end
##
# Sends a _pageview Fnordmetric event
#
# This method sends a specific _pageview event Fnordmetric event
# This event is treated in a special way by Fnordmetric (see doc).
#
# @param [String] url the URl that is accessed. Defaults to request.env['REQUEST_PATH']
#
# @example Logging all page views
#
# If all your controllers inherit 'Controller', you can log all page view
# very easily :
#
# class Controller < Ramaze::Controller
# helper :fnordmetric
#
# before_all do
# pageview
# end
#
def pageview(url=request.env['REQUEST_PATH'])
event(:_pageview, :url => url)
end
##
# Sets username for the current session
#
# This manually sets a user name for the current session. It calls the
# specific :_set_name Fnordmetric event
# This comes handy for user tracking
#
# @params [String] name the user name
#
def set_name(name)
event(:_set_name, :name => name)
end
##
# Sets the picture URL for the user
#
# This manually sets a user picture for the current session. It calls the
# specific :_set_picture Fnordmetric event.
# Using this method, you'll be able to have a picture associated to the user
# in Fnordmetric's user tracking panel
#
# @param [String] url Picture url
#
# @example Using Gravatar to set user picture
#
#
# class Users < Controller
# helper :user, :gravatar, :fnordmetric
# ...
# def login
# ...
# redirect_referrer if logged_in?
# user_login(request.subset(:email, :password))
# if logged_in?
# set_name("#{user.name} #{user.surname}")
# set_picture(gravatar(user.email.to_s)) if user.email
# end
# ...
# end
#
def set_picture(url="http://placekitten.com/80/80")
url = url.to_s if url.class.to_s == 'URI::HTTP'
event(:_set_picture, :url => url)
end
##
# Returns the Redis key
#
#
def _key # :nodoc:
"%s.%s" % [ @@sstack_key_root, session.sid ]
end
def _ttl
ancestral_trait[:fnord_helper_key_ttl] || Innate::Session.options.ttl
end
##
# Holds class methods
#
# This is used to extend the calling controller so these methods are
# available at the class level
# Since helpers are only included, extending the calling controller is
# done via the 'included' hook.
#
module ClassMethods
##
# This method replaces the original controller method with a times
# call that yields the original method.
# This allows to measure execution time for the method without manually
# modifying the method itself
#
# @param [Symbol] method the method measure
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
# @example Measuring execution time for a controller action
#
# class Users < Controller
# helper :user, :gravatar, :fnordmetric
# ...
# def login
# ...
# # whatever login does
# ...
# end
# clock :login, :performance, :some_field => "some value"
#
def clock(method, event_name, args = {})
# Let's alias the original controller method to original_name
original = "__fnordmetric_%s" % method
# We merge the method name in the args that will be sent in the event
args.merge!(:method => "#{self.name}##{method}")
self.class_eval do
# Let's create a shiny new method replacing the old one
alias_method original, method
private original
define_method(method) { |*a| times(event_name, args) do send(original, *a) end }
end
Ramaze::Log.debug("Clo(a)cking enabled for %s (renamed as %s)" % [ method, original ])
end
end
class Timer
def initialize
end
def pop
end
end
end
end
end
Reverts beginning of csome key model changes
Few lines have been added to use a single key based behaviour
Theses lines have been removed so gem can work properly for now
The downside is that timers won't work if the innate.sid changes (but
nothing breaks, it's just not measured)
# encode: utf-8
#
# FNordmetric helper
#
#
require 'ramaze'
require 'fnordmetric'
require 'redis'
module Ramaze
module Helper
##
# This helper provides a convenience wrapper for sending events to
# Fnordmetric.
#
# events can be anything, its just an indication that something happened.
# Fnordmetric can then make some agregates on events received per period.
#
# Since events can carry arbitrary data, this helper adds methods that send
# performance data to Fnordmetric, so one can easily measure code execution
# times.
#
# events are associated to the Innate session id, and thus are linked to
# visitors of your site. this is really usefull since you can, for instance,
# see how long a controller action took for a particular user.
#
# If you want so use a redis server other than the usual localhost:6379, you
# need to define :fnord_redis_url trait, e.g. :
#
# trait :fnord_redis_url => "redis://redis.example.com:6332"
#
# TODO: @example Basic usage here...
# TODO: Implement optional with_id that uses specific id instead of innate.sid in conjunction with...
# TODO: simple keys instead of list, for the above
#
module Fnordmetric
# @@fnord will hold Fnordmetric API instance
# @@redis holds a Redis connection
# A timer is an Array holding the event name, a Hash of arguments and a timestamp
@@fnord = nil
@@redis = nil
@@sstack_key_root = "fnordmetric.%s.%s.%s" % [ ENV['HOSTNAME'] || "localhost", ENV['USER'], Ramaze.options.app.name.to_s ]
##
# We need clock as a class method
# Let's extend the includer when it includes us
def self.included(base)
Ramaze::Log.debug("Fnordmetric helper is being included in %s" % base.name)
base.extend(ClassMethods)
end
##
# Sends an event to Fnordmetric
#
# This helper method sends an event to Fnordmetric, populating the
# :_session field with the current innate sid.
#
# @param [Symbol] evt the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
def event(evt, args = {})
# Let's connect first, it will have to be done anyway
return unless evt
_connect unless @@fnord
evt = { :_type => evt.to_s, :_session => session.sid }
evt.merge!(args)
Ramaze::Log.debug("Logging Fnordmetric event %s" % evt.inspect)
@@fnord.event(evt)
end
##
# All in one timing function for a block
#
# This method will send an event containing the execution time of
# the passed block.
#
# @example Block style usage
#
# times(:performance, :method => :whatever) do
# # do stuff to be measured
# end
#
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
# @param [Block] block code to be executed and timed
#
def times(event_name, args = {}, &block)
push_timer(event_name, args)
# THINK: may be raise since there is no point in using times without a
# block
yield if block_given?
ensure
pop_timer
end
##
# Starts a timer and pushes it to the timers stack
#
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
# @example Push/Pop style usage
#
# push_timer(:performance, :field => :whatever)
# # some code
# pop_timer
#
def push_timer(event_name, args = {})
_connect unless @@redis
@@redis.lpush(_key, [event_name, args, Time.now.to_f].to_json)
@@redis.expire(_key, _ttl)
Ramaze::Log.debug("Timer pushed and TTL set to %s for %s to %s (stack level is now %s)" %
[ _ttl,
event_name,
_key,
@@redis.llen(_key) ])
end
##
# Pops a timer and sends an event
#
# This method pops the last pushed timer and sends an event
# No arguments are needed, since they were stored by push_timer
#
def pop_timer
len = @@redis.llen(_key)
if len > 0
json = @@redis.lpop(_key)
wat, args, wen = JSON.parse(json)
Ramaze::Log.debug("Timer popped for %s (stack level is now %s)" % [ wat, len - 1])
# We log millisecs
time = Time.now-Time.at(wen)
time *= 1000
event(wat, args.merge(:time => time.to_i))
else
Ramaze::Log.error("Unable to pop timer in %s (no event in stack)" % _key)
#raise RuntimeError, "Unable to pop timer in %s (no event in stack)" % _key
end
end
##
# Removes all timers in the stack
#
def clear_timers
Ramaze::Log.debug("Cleared %s timers for %s" % [ @@redis.llen(_key), _key ])
@@redis.del _key
end
##
# Sends a _pageview Fnordmetric event
#
# This method sends a specific _pageview event Fnordmetric event
# This event is treated in a special way by Fnordmetric (see doc).
#
# @param [String] url the URl that is accessed. Defaults to request.env['REQUEST_PATH']
#
# @example Logging all page views
#
# If all your controllers inherit 'Controller', you can log all page view
# very easily :
#
# class Controller < Ramaze::Controller
# helper :fnordmetric
#
# before_all do
# pageview
# end
#
def pageview(url=request.env['REQUEST_PATH'])
event(:_pageview, :url => url)
end
##
# Sets username for the current session
#
# This manually sets a user name for the current session. It calls the
# specific :_set_name Fnordmetric event
# This comes handy for user tracking
#
# @params [String] name the user name
#
def set_name(name)
event(:_set_name, :name => name)
end
##
# Sets the picture URL for the user
#
# This manually sets a user picture for the current session. It calls the
# specific :_set_picture Fnordmetric event.
# Using this method, you'll be able to have a picture associated to the user
# in Fnordmetric's user tracking panel
#
# @param [String] url Picture url
#
# @example Using Gravatar to set user picture
#
#
# class Users < Controller
# helper :user, :gravatar, :fnordmetric
# ...
# def login
# ...
# redirect_referrer if logged_in?
# user_login(request.subset(:email, :password))
# if logged_in?
# set_name("#{user.name} #{user.surname}")
# set_picture(gravatar(user.email.to_s)) if user.email
# end
# ...
# end
#
def set_picture(url="http://placekitten.com/80/80")
url = url.to_s if url.class.to_s == 'URI::HTTP'
event(:_set_picture, :url => url)
end
private
##
# Creates a fnordmetric instance, holding the Redis connection
#
#:nocov:
def _connect
Ramaze::Log.debug("In connect")
begin
url = ancestral_trait[:fnord_redis_url]
rescue
url = "redis://localhost:6379"
ensure
@@fnord = FnordMetric::API.new(:redis_url => url)
@@redis = Redis.new(:url => url)
Ramaze::Log.debug("Connected to FnordMetric")
end
end
#:nocov:
##
# Returns the Redis key
#
def _key
"%s.%s" % [ @@sstack_key_root, session.sid ]
end
##
# Returns the ttl to use for internal keys
#
def _ttl
ancestral_trait[:fnord_helper_key_ttl] || Innate::Session.options.ttl
end
##
# Holds class methods
#
# This is used to extend the calling controller so these methods are
# available at the class level
# Since helpers are only included, extending the calling controller is
# done via the 'included' hook.
#
module ClassMethods
##
# This method replaces the original controller method with a times
# call that yields the original method.
# This allows to measure execution time for the method without manually
# modifying the method itself
#
# @param [Symbol] method the method measure
# @param [Symbol] event_name the name of the event to send to Fnordmetric.
# @param [Hash] args a hash of supplemental data to send
#
# @example Measuring execution time for a controller action
#
# class Users < Controller
# helper :user, :gravatar, :fnordmetric
# ...
# def login
# ...
# # whatever login does
# ...
# end
# clock :login, :performance, :some_field => "some value"
#
def clock(method, event_name, args = {})
# Let's alias the original controller method to original_name
original = "__fnordmetric_%s" % method
# We merge the method name in the args that will be sent in the event
args.merge!(:method => "#{self.name}##{method}")
self.class_eval do
# Let's create a shiny new method replacing the old one
alias_method original, method
private original
define_method(method) { |*a| times(event_name, args) do send(original, *a) end }
end
Ramaze::Log.debug("Clo(a)cking enabled for %s (renamed as %s)" % [ method, original ])
end
end
end
end
end
|
module RapidFTR
class DbSetup
def self.reset_default_form_section_definitions
FormSection.all.each {|u| u.destroy }
basic_details_fields = [
Field.new("name" => "name", "display_name" => "Name", "type" => "text_field"),
Field.new("name" => "age", "display_name" => "Age", "type" => "text_field"),
Field.new("name" => "age_is", "display_name" => "Age Is", "type" => "select_box", "option_strings" => ["Approximate", "Exact"]),
Field.new("name" => "gender", "display_name" => "Gender", "type" => "radio_button", "option_strings" => ["Male", "Female"]),
Field.new("name" => "origin", "display_name" => "Origin", "type" => "text_field"),
Field.new("name" => "last_known_location", "display_name" => "Last Known Location", "type" => "text_field"),
Field.new("name" => "date_of_separation", "display_name" => "Date of Separation", "type" => "select_box", "option_strings" => ["", "1-2 weeks ago", "2-4 weeks ago", "1-6 months ago", "6 months to 1 year ago", "More than 1 year ago"]),
Field.new("name" => "current_photo_key", "display_name" => "Current Photo Key", "type" => "photo_upload_box"),
Field.new("name" => "recorded_audio", "display_name" => "Recorded Audio", "type" => "audio_upload_box")
]
FormSection.create!("name" =>"Basic details", "enabled"=>true, :description => "Basic information about a child", :order=> 1, :unique_id=>"basic_details", :editable => false, :fields => basic_details_fields)
family_details_fields = [
Field.new("name" => "fathers_name", "display_name" => "Fathers Name", "type" => "text_field"),
Field.new("name" => "is_father_alive", "display_name" => "Is Father Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_father", "display_name" => "Reunite With Father", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "mothers_name", "display_name" => "Mothers Name", "type" => "text_field"),
Field.new("name" => "is_mother_alive", "display_name" => "Is Mother Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_mother", "display_name" => "Reunite With Mother", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_1_name", "display_name" => "Relative 1 Name", "type" => "text_field"),
Field.new("name" => "relative_1_relationship", "display_name" => "Relative 1 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_1_alive", "display_name" => "Is Relative 1 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_1", "display_name" => "Reunite With Relative 1", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_2_name", "display_name" => "Relative 2 Name", "type" => "text_field"),
Field.new("name" => "relative_2_relationship", "display_name" => "Relative 2 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_2_alive", "display_name" => "Is Relative 2 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_2", "display_name" => "Reunite With Relative 2", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_3_name", "display_name" => "Relative 3 Name", "type" => "text_field"),
Field.new("name" => "relative_3_relationship", "display_name" => "Relative 3 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_3_alive", "display_name" => "Is Relative 3 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_3", "display_name" => "Reunite With Relative 3", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_4_name", "display_name" => "Relative 4 Name", "type" => "text_field"),
Field.new("name" => "relative_4_relationship", "display_name" => "Relative 4 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_4_alive", "display_name" => "Is Relative 4 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_4", "display_name" => "Reunite With Relative 4", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "married", "display_name" => "Married", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "spouse_or_partner_name", "display_name" => "Spouse or Partner Name", "type" => "text_field"),
Field.new("name" => "no_of_children", "display_name" => "No of Children", "type" => "text_field"),
]
FormSection.create!("name" =>"Family details", "enabled"=>true, :description =>"Information about a child's known family", :order=> 2, :unique_id=>"family_details", :fields => family_details_fields)
caregiver_details_fields = [
Field.new("name" => "caregivers_name", "display_name" => "Caregiver's Name", "type" => "text_field"),
Field.new("name" => "occupation", "display_name" => "Occupation", "type" => "text_field"),
Field.new("name" => "relationship_to_child", "display_name" => "Relationship to Child", "type" => "text_field"),
Field.new("name" => "is_orphan", "display_name" => "Is Orphan?", "type" => "check_box"),
Field.new("name" => "is_refugee", "display_name" => "Is Refugees?", "type" => "check_box"),
Field.new("name" => "trafficked_child", "display_name" => "Trafficked child?", "type" => "check_box"),
Field.new("name" => "in_interim_care", "display_name" => "In interim care?", "type" => "check_box"),
Field.new("name" => "is_in_child_headed_household", "display_name" => "Is in child headed household?", "type" => "check_box"),
Field.new("name" => "sick_or_injured", "display_name" => "Sick or injured?", "type" => "check_box"),
Field.new("name" => "possible_physical_or_sexual_abuse", "display_name" => "Possible physical or sexual abuse?", "type" => "check_box"),
Field.new("name" => "is_disabled", "display_name" => "Is disabled?", "type" => "check_box")
]
FormSection.create!("name" =>"Caregiver details", "enabled"=>true, :description =>"Information about the child's current caregiver", :order=> 3, :unique_id=>"caregiver_details", :fields => caregiver_details_fields)
return true
end
end
end
Changed appropriate default fields over to new 'numeric' type
module RapidFTR
class DbSetup
def self.reset_default_form_section_definitions
FormSection.all.each {|u| u.destroy }
basic_details_fields = [
Field.new("name" => "name", "display_name" => "Name", "type" => "text_field"),
Field.new("name" => "age", "display_name" => "Age", "type" => "numeric_text_field"),
Field.new("name" => "age_is", "display_name" => "Age Is", "type" => "select_box", "option_strings" => ["Approximate", "Exact"]),
Field.new("name" => "gender", "display_name" => "Gender", "type" => "radio_button", "option_strings" => ["Male", "Female"]),
Field.new("name" => "origin", "display_name" => "Origin", "type" => "text_field"),
Field.new("name" => "last_known_location", "display_name" => "Last Known Location", "type" => "text_field"),
Field.new("name" => "date_of_separation", "display_name" => "Date of Separation", "type" => "select_box", "option_strings" => ["", "1-2 weeks ago", "2-4 weeks ago", "1-6 months ago", "6 months to 1 year ago", "More than 1 year ago"]),
Field.new("name" => "current_photo_key", "display_name" => "Current Photo Key", "type" => "photo_upload_box"),
Field.new("name" => "recorded_audio", "display_name" => "Recorded Audio", "type" => "audio_upload_box")
]
FormSection.create!("name" =>"Basic details", "enabled"=>true, :description => "Basic information about a child", :order=> 1, :unique_id=>"basic_details", :editable => false, :fields => basic_details_fields)
family_details_fields = [
Field.new("name" => "fathers_name", "display_name" => "Fathers Name", "type" => "text_field"),
Field.new("name" => "is_father_alive", "display_name" => "Is Father Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_father", "display_name" => "Reunite With Father", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "mothers_name", "display_name" => "Mothers Name", "type" => "text_field"),
Field.new("name" => "is_mother_alive", "display_name" => "Is Mother Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_mother", "display_name" => "Reunite With Mother", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_1_name", "display_name" => "Relative 1 Name", "type" => "text_field"),
Field.new("name" => "relative_1_relationship", "display_name" => "Relative 1 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_1_alive", "display_name" => "Is Relative 1 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_1", "display_name" => "Reunite With Relative 1", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_2_name", "display_name" => "Relative 2 Name", "type" => "text_field"),
Field.new("name" => "relative_2_relationship", "display_name" => "Relative 2 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_2_alive", "display_name" => "Is Relative 2 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_2", "display_name" => "Reunite With Relative 2", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_3_name", "display_name" => "Relative 3 Name", "type" => "text_field"),
Field.new("name" => "relative_3_relationship", "display_name" => "Relative 3 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_3_alive", "display_name" => "Is Relative 3 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_3", "display_name" => "Reunite With Relative 3", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "relative_4_name", "display_name" => "Relative 4 Name", "type" => "text_field"),
Field.new("name" => "relative_4_relationship", "display_name" => "Relative 4 Relationship", "type" => "text_field"),
Field.new("name" => "is_relative_4_alive", "display_name" => "Is Relative 4 Alive", "type" => "radio_button", "option_strings" => ["Yes", "No", "Unknown"]),
Field.new("name" => "reunite_with_relative_4", "display_name" => "Reunite With Relative 4", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "married", "display_name" => "Married", "type" => "select_box", "option_strings" => ["Yes", "No"]),
Field.new("name" => "spouse_or_partner_name", "display_name" => "Spouse or Partner Name", "type" => "text_field"),
Field.new("name" => "no_of_children", "display_name" => "No of Children", "type" => "numeric_text_field"),
]
FormSection.create!("name" =>"Family details", "enabled"=>true, :description =>"Information about a child's known family", :order=> 2, :unique_id=>"family_details", :fields => family_details_fields)
caregiver_details_fields = [
Field.new("name" => "caregivers_name", "display_name" => "Caregiver's Name", "type" => "text_field"),
Field.new("name" => "occupation", "display_name" => "Occupation", "type" => "text_field"),
Field.new("name" => "relationship_to_child", "display_name" => "Relationship to Child", "type" => "text_field"),
Field.new("name" => "is_orphan", "display_name" => "Is Orphan?", "type" => "check_box"),
Field.new("name" => "is_refugee", "display_name" => "Is Refugees?", "type" => "check_box"),
Field.new("name" => "trafficked_child", "display_name" => "Trafficked child?", "type" => "check_box"),
Field.new("name" => "in_interim_care", "display_name" => "In interim care?", "type" => "check_box"),
Field.new("name" => "is_in_child_headed_household", "display_name" => "Is in child headed household?", "type" => "check_box"),
Field.new("name" => "sick_or_injured", "display_name" => "Sick or injured?", "type" => "check_box"),
Field.new("name" => "possible_physical_or_sexual_abuse", "display_name" => "Possible physical or sexual abuse?", "type" => "check_box"),
Field.new("name" => "is_disabled", "display_name" => "Is disabled?", "type" => "check_box")
]
FormSection.create!("name" =>"Caregiver details", "enabled"=>true, :description =>"Information about the child's current caregiver", :order=> 3, :unique_id=>"caregiver_details", :fields => caregiver_details_fields)
return true
end
end
end
|
#
# Be sure to run `pod lib lint OHMySQL.podspec' to ensure this is a
# valid spec before submitting.
Pod::Spec.new do |s|
s.name = 'OHMySQL'
s.version = '2.1.1'
s.summary = 'A simple Objective-C wrapper for MySQL C API.'
s.description = <<-DESC
You can connect to your remote MySQL database using OHMySQL API. It allows you doing queries in easy and object-oriented way. Common queries such as SELECT, INSERT, DELETE, JOIN are wrapped by Objective-C code and you don't need to dive into MySQL C API.
DESC
s.homepage = 'https://github.com/oleghnidets/OHMySQL'
s.license = 'MIT'
s.author = { 'Oleg Hnidets' => 'oleg.oleksan@gmail.com' }
s.source = { :git => 'https://github.com/oleghnidets/OHMySQL.git', :tag => s.version.to_s }
s.ios.deployment_target = '8.0'
s.osx.deployment_target = '10.9'
s.requires_arc = true
s.source_files = 'OHMySQL/**/*.{h,m}', 'OHMySQL/lib/include/**/**/*.{h}'
s.private_header_files = 'OHMySQL/lib/include/**/**/*.{h}'
s.frameworks = 'Foundation'
s.ios.vendored_libraries = 'OHMySQL/lib/ios/libmysqlclient.a'
s.osx.vendored_libraries = 'OHMySQL/lib/mac/libmysqlclient.a'
s.library = "c++"
end
Update podspec.
#
# Be sure to run `pod lib lint OHMySQL.podspec' to ensure this is a
# valid spec before submitting.
Pod::Spec.new do |s|
s.name = 'OHMySQL'
s.version = '2.1.2'
s.summary = 'A simple Objective-C wrapper for MySQL C API.'
s.description = <<-DESC
You can connect to your remote MySQL database using OHMySQL API. It allows you doing queries in easy and object-oriented way. Common queries such as SELECT, INSERT, DELETE, JOIN are wrapped by Objective-C code and you don't need to dive into MySQL C API.
DESC
s.homepage = 'https://github.com/oleghnidets/OHMySQL'
s.license = 'MIT'
s.author = { 'Oleg Hnidets' => 'oleg.oleksan@gmail.com' }
s.source = { :git => 'https://github.com/oleghnidets/OHMySQL.git', :tag => s.version.to_s }
s.ios.deployment_target = '8.0'
s.osx.deployment_target = '10.9'
s.requires_arc = true
s.source_files = 'OHMySQL/**/*.{h,m}', 'OHMySQL/lib/include/**/**/*.{h}'
s.private_header_files = 'OHMySQL/lib/include/**/**/*.{h}'
s.frameworks = 'Foundation'
s.ios.vendored_libraries = 'OHMySQL/lib/ios/libmysqlclient.a'
s.osx.vendored_libraries = 'OHMySQL/lib/mac/libmysqlclient.a'
s.library = "c++"
end
|
class Repositext
# Represents a collection of git content repositories.
# Assumes that all repositories are siblings in the same folder.
# Expects current directory to be a repositext content repo root path.
# Usage example:
# repository_set = RepositorySet.new('/repositories/parent/path')
# repository_set.git_pull(:all_content_repos)
class RepositorySet
attr_reader :repo_set_parent_path
def self.content_type_names
%w[
general
]
end
# @param repo_set_parent_path [String] path to the folder that contains all repos.
def initialize(repo_set_parent_path)
@repo_set_parent_path = repo_set_parent_path
end
def all_content_repo_names
[primary_repo_name] + foreign_content_repo_names
end
def all_repo_names
all_content_repo_names + code_repo_names
end
def code_repo_names
%w[
repositext
suspension
]
end
def content_type_names
self.class.content_type_names
end
def foreign_content_repo_names
%w[
french
german
italian
spanish
]
end
# Returns an array of paths to all repos in repo_set
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def all_repo_paths(repo_set)
compute_repo_paths(repo_set)
end
# Clones all git repos that don't exist on local filesystem yet.
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @example Clone all language repos
# # cd into primary repo root folder
# # run `bundle console`
# repository_set = Repositext::RepositorySet.new('/path/to/repos/parent/folder')
# repository_set.git_clone_missing_repos(:all_content_repos)
def git_clone_missing_repos(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
repo_name = repo_path.split('/').last
if File.exists?(repo_path)
puts " - Skipping #{ repo_name }"
next
end
puts " - Cloning #{ repo_name }"
clone_command = "git clone git@vgrtr.vgr.local:vgr-text-repository/#{ repo_name }.git"
cmd = %(cd #{ repo_set_parent_path } && #{ clone_command })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Cloned #{ repo_name }"
else
msg = %(Could not clone #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Makes sure that all content repos are ready for git operations:
# * They are on master branch
# * They have no uncommitted changes
# * They pulled latest from origin
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param block [Proc, optional] will be called for each repo.
# @return [Hash] with repos that are not ready. Keys are repo paths, values
# are arrays with issue messages if any exist.
def git_ensure_repos_are_ready(repo_set)
repos_with_issues = {}
compute_repo_paths(repo_set).each { |repo_path|
if block_given?
yield
end
repo_issues = []
cmd = %(cd #{ repo_path } && git pull && git status)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
r = stdout.read
if !r.index('On branch master')
repo_issues << "Is not on master branch."
end
if !r.index(%(Your branch is up-to-date with 'origin/master'))
repo_issues << "Is not up-to-date with origin"
end
if !r.index(%(nothing to commit, working directory clean))
repo_issues << "Has uncommitted changes"
end
if !exit_status.success?
repo_issues << "Error: could not check repo (#{ stderr.read })"
end
end
if repo_issues.any?
repos_with_issues[repo_path] = repo_issues
end
}
repos_with_issues
end
# Pulls all repos
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def git_pull(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
cmd = %(cd #{ repo_path } && git pull)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Pulled #{ repo_path }"
else
msg = %(Could not pull #{ repo_path }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Pushes all repos to remote_spec
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param remote_spec [String, optional] defaults to 'origin'
def git_push(repo_set, remote_spec = 'origin')
compute_repo_paths(repo_set).each { |repo_path|
cmd = %(cd #{ repo_path } && git push #{ remote_spec })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Pushed #{ repo_path }"
else
msg = %(Could not push #{ repo_path }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Prints git_status for all repos
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def git_status(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
puts '-' * 80
puts "Git status for #{ repo_path }"
FileUtils.cd(repo_path)
puts `git status`
}
true
end
# Initializes any empty content repositories.
# @param primary_language_repo_path [String]
def initialize_empty_content_repos(primary_language_repo_path)
compute_repo_paths(:all_content_repos).each { |repo_path|
repo_name = repo_path.split('/').last
if File.exists?(File.join(repo_path, 'data.json'))
puts " - Skipping #{ repo_name } (`data.json` file already exists)"
next
end
puts " - Initializing #{ repo_name }"
# Create directories
puts " - Creating directories"
create_default_content_directory_structure(repo_path)
# Copy standard files
puts " - Copying standard files"
copy_default_content_repo_files(repo_path, primary_language_repo_path)
# TODO: Figure out how to run bundle install from Ruby so it works.
# Bundle install
# puts " - Installing RubyGems"
# cmd = %(cd #{ repo_path } && bundle install)
# Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
# exit_status = wait_thr.value
# if exit_status.success?
# puts " - Gems installed"
# else
# msg = %(Could not install Gems:\n\n)
# puts(msg + stderr.read)
# end
# end
}
end
def primary_repo_name
'english'
end
# Replaces text in all repositories
def replace_text(filename, &block)
end
# Allows running of any command (e.g., export, fix, report, validate) on
# a repository set.
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param command_string [String] the command to run on the command line,
# e.g., "repositext general fix update_rtfiles_to_settings_hierarchy -g"
def run_repositext_command(repo_set, command_string)
puts " - Running command `#{ command_string }`"
compute_repo_paths(repo_set).each { |repo_path|
puts " - in #{ repo_name }"
cmd = %(cd #{ repo_path } && #{ command_string })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - completed"
else
msg = %(Could not run command in #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Updates all gems in language repos.
def update_all_rubygems
puts
puts "This command assists in updating Rubygems in all content repos."
puts
puts "Please follow the onscreen instructions (=>) and hit enter after each completed step."
puts "No problem if you make a mistake, just re-run the command."
# Pull code repos (to get Gemfile updates)
puts
puts "Pulling updates for code repos"
compute_repo_paths(:code_repos).each { |repo_path|
repo_name = repo_path.split('/').last
puts " - #{ repo_name }"
cmd = %(cd #{ repo_path } && git pull)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if !exit_status.success?
msg = %(Could not pull #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
puts
# Bundle install code + content repos
puts "=> run `bundle install` in primary content repo to update 'Gemfile.lock', then press <Enter>."
$stdout.flush
$stdin.gets
puts "Copying 'Gemfile.lock' to all foreign repos"
primary_gemfile_lock_path = File.join(
compute_repo_paths(:primary_repo).first,
'Gemfile.lock'
)
compute_repo_paths(:foreign_content_repos).each { |foreign_repo_path|
foreign_repo_name = foreign_repo_path.split('/').last
puts " - #{ foreign_repo_name }"
FileUtils.cp(primary_gemfile_lock_path, foreign_repo_path)
}
puts
puts "=> commit changes to 'Gemfile.lock' in all repos and push them to origin, then press <Enter>."
$stdout.flush
$stdin.gets
# Wrap up message
puts
puts "Command completed."
end
protected
# Returns collection of paths to all repos in repo_set
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def compute_repo_paths(repo_set)
repo_names = case repo_set
when Array
repo_set
when :all_content_repos
all_content_repo_names
when :all_repos
all_repo_names
when :code_repos
code_repo_names
when :foreign_content_repos
foreign_content_repo_names
when :primary_repo
[primary_repo_name]
when :test_content_repos
all_content_repo_names.first(2)
else
raise ArgumentError.new("Invalid repo_set: #{ repo_set.inspect }")
end
repo_names.map { |repo_name|
File.join(repo_set_parent_path, repo_name)
}
end
# @param repo_root_path [String] absolute path to root of repo
def create_default_content_directory_structure(repo_root_path)
# root level directories
(
%w[data] +
content_type_names.map{ |e| "ct-#{ e }" }
).each do |rel_path|
FileUtils.mkdir_p(File.join(repo_root_path, rel_path))
end
# per content_type directories
content_type_names.each do |content_type_name|
%w[
content
lucene_table_export
lucene_table_export/json_export
lucene_table_export/L232
lucene_table_export/L232/full
lucene_table_export/L232/full/lucene_index
lucene_table_export/L232/short
lucene_table_export/L232/short/lucene_index
lucene_table_export/L472
lucene_table_export/L472/full
lucene_table_export/L472/full/lucene_index
lucene_table_export/L472/short
lucene_table_export/L472/short/lucene_index
pdf_export
reports
staging
].each do |rel_path|
FileUtils.mkdir(
File.join(repo_root_path, "ct-#{ content_type_name }", rel_path)
)
end
end
end
# @param repo_root_path [String] absolute path to root of new repo
# @param primary_language_repo_path [String] absolute path
def copy_default_content_repo_files(repo_root_path, primary_language_repo_path)
# Copy files that are the same between primary and foreign repos
[
'.gitignore',
'.ruby-gemset',
'.ruby-version',
'Gemfile',
'Gemfile.lock',
'readme.md',
].each do |filename|
FileUtils.cp(
File.join(primary_language_repo_path, filename),
repo_root_path
)
end
# Copy repository level data.json file from code template
repo_dir_name = repo_root_path.split('/').last.sub(/\Avgr\-/, '')
language = Language.find_by_repo_dir_name(repo_dir_name)
@langcode_2 = language.code_2_chars
@langcode_3 = language.code_3_chars
erb_template = ERB.new(File.read(repository_level_data_json_file_template_path))
dj_output_path = File.join(repo_root_path, 'data.json')
File.write(dj_output_path, erb_template.result(binding))
# Copy content_type level Rtfiles from code template
content_type_names.each do |content_type_name|
@content_type_name = content_type_name
erb_template = ERB.new(File.read(rtfile_template_path))
rtfile_output_path = File.join(repo_root_path, "ct-#{ content_type_name }", 'Rtfile')
File.write(rtfile_output_path, erb_template.result(binding))
end
end
# Returns the absolute path to the repository level data.json template to
# use for new language repos.
def repository_level_data_json_file_template_path
File.expand_path("../../../../repositext/templates/repository-level-data.json.erb", __FILE__)
end
# Returns the absolute path to the content_type level Rtfile templates to
# use for new language repos.
def rtfile_template_path
File.expand_path("../../../../repositext/templates/Rtfile.erb", __FILE__)
end
end
end
RepositorySet: Improved #git_ensure_repos_are_ready
class Repositext
# Represents a collection of git content repositories.
# Assumes that all repositories are siblings in the same folder.
# Expects current directory to be a repositext content repo root path.
# Usage example:
# repository_set = RepositorySet.new('/repositories/parent/path')
# repository_set.git_pull(:all_content_repos)
class RepositorySet
attr_reader :repo_set_parent_path
def self.content_type_names
%w[
general
]
end
# @param repo_set_parent_path [String] path to the folder that contains all repos.
def initialize(repo_set_parent_path)
@repo_set_parent_path = repo_set_parent_path
end
def all_content_repo_names
[primary_repo_name] + foreign_content_repo_names
end
def all_repo_names
all_content_repo_names + code_repo_names
end
def code_repo_names
%w[
repositext
suspension
]
end
def content_type_names
self.class.content_type_names
end
def foreign_content_repo_names
%w[
french
german
italian
spanish
]
end
# Returns an array of paths to all repos in repo_set
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def all_repo_paths(repo_set)
compute_repo_paths(repo_set)
end
# Clones all git repos that don't exist on local filesystem yet.
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @example Clone all language repos
# # cd into primary repo root folder
# # run `bundle console`
# repository_set = Repositext::RepositorySet.new('/path/to/repos/parent/folder')
# repository_set.git_clone_missing_repos(:all_content_repos)
def git_clone_missing_repos(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
repo_name = repo_path.split('/').last
if File.exists?(repo_path)
puts " - Skipping #{ repo_name }"
next
end
puts " - Cloning #{ repo_name }"
clone_command = "git clone git@vgrtr.vgr.local:vgr-text-repository/#{ repo_name }.git"
cmd = %(cd #{ repo_set_parent_path } && #{ clone_command })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Cloned #{ repo_name }"
else
msg = %(Could not clone #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Makes sure that all content repos are ready for git operations:
# * They are on master branch
# * They have no uncommitted changes
# * They pulled latest from origin
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param block [Proc, optional] will be called for each repo.
# @return [Hash] with repos that are not ready. Keys are repo paths, values
# are arrays with issue messages if any exist.
def git_ensure_repos_are_ready(repo_set)
repos_with_issues = {}
compute_repo_paths(repo_set).each { |repo_path|
if block_given?
yield(repo_path)
end
repo_issues = []
cmd = %(cd #{ repo_path } && git pull && git status)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
r = stdout.read
if !r.index('On branch master')
repo_issues << "Is not on master branch."
end
if !r.index(%(Your branch is up-to-date with 'origin/master'))
repo_issues << "Is not up-to-date with origin"
end
if !(
r.index(%(nothing to commit, working directory clean)) ||
r.index(%(nothing added to commit but untracked files present))
)
repo_issues << "Has uncommitted changes"
end
if !exit_status.success?
repo_issues << "Error: could not check repo (#{ stderr.read })"
end
end
if repo_issues.any?
repos_with_issues[repo_path] = repo_issues
end
}
repos_with_issues
end
# Pulls all repos
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def git_pull(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
cmd = %(cd #{ repo_path } && git pull)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Pulled #{ repo_path }"
else
msg = %(Could not pull #{ repo_path }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Pushes all repos to remote_spec
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param remote_spec [String, optional] defaults to 'origin'
def git_push(repo_set, remote_spec = 'origin')
compute_repo_paths(repo_set).each { |repo_path|
cmd = %(cd #{ repo_path } && git push #{ remote_spec })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - Pushed #{ repo_path }"
else
msg = %(Could not push #{ repo_path }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Prints git_status for all repos
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def git_status(repo_set)
compute_repo_paths(repo_set).each { |repo_path|
puts '-' * 80
puts "Git status for #{ repo_path }"
FileUtils.cd(repo_path)
puts `git status`
}
true
end
# Initializes any empty content repositories.
# @param primary_language_repo_path [String]
def initialize_empty_content_repos(primary_language_repo_path)
compute_repo_paths(:all_content_repos).each { |repo_path|
repo_name = repo_path.split('/').last
if File.exists?(File.join(repo_path, 'data.json'))
puts " - Skipping #{ repo_name } (`data.json` file already exists)"
next
end
puts " - Initializing #{ repo_name }"
# Create directories
puts " - Creating directories"
create_default_content_directory_structure(repo_path)
# Copy standard files
puts " - Copying standard files"
copy_default_content_repo_files(repo_path, primary_language_repo_path)
# TODO: Figure out how to run bundle install from Ruby so it works.
# Bundle install
# puts " - Installing RubyGems"
# cmd = %(cd #{ repo_path } && bundle install)
# Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
# exit_status = wait_thr.value
# if exit_status.success?
# puts " - Gems installed"
# else
# msg = %(Could not install Gems:\n\n)
# puts(msg + stderr.read)
# end
# end
}
end
def primary_repo_name
'english'
end
# Replaces text in all repositories
def replace_text(filename, &block)
end
# Allows running of any command (e.g., export, fix, report, validate) on
# a repository set.
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
# @param command_string [String] the command to run on the command line,
# e.g., "repositext general fix update_rtfiles_to_settings_hierarchy -g"
def run_repositext_command(repo_set, command_string)
puts " - Running command `#{ command_string }`"
compute_repo_paths(repo_set).each { |repo_path|
puts " - in #{ repo_name }"
cmd = %(cd #{ repo_path } && #{ command_string })
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if exit_status.success?
puts " - completed"
else
msg = %(Could not run command in #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
end
# Updates all gems in language repos.
def update_all_rubygems
puts
puts "This command assists in updating Rubygems in all content repos."
puts
puts "Please follow the onscreen instructions (=>) and hit enter after each completed step."
puts "No problem if you make a mistake, just re-run the command."
# Pull code repos (to get Gemfile updates)
puts
puts "Pulling updates for code repos"
compute_repo_paths(:code_repos).each { |repo_path|
repo_name = repo_path.split('/').last
puts " - #{ repo_name }"
cmd = %(cd #{ repo_path } && git pull)
Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr|
exit_status = wait_thr.value
if !exit_status.success?
msg = %(Could not pull #{ repo_name }:\n\n)
puts(msg + stderr.read)
end
end
}
puts
# Bundle install code + content repos
puts "=> run `bundle install` in primary content repo to update 'Gemfile.lock', then press <Enter>."
$stdout.flush
$stdin.gets
puts "Copying 'Gemfile.lock' to all foreign repos"
primary_gemfile_lock_path = File.join(
compute_repo_paths(:primary_repo).first,
'Gemfile.lock'
)
compute_repo_paths(:foreign_content_repos).each { |foreign_repo_path|
foreign_repo_name = foreign_repo_path.split('/').last
puts " - #{ foreign_repo_name }"
FileUtils.cp(primary_gemfile_lock_path, foreign_repo_path)
}
puts
puts "=> commit changes to 'Gemfile.lock' in all repos and push them to origin, then press <Enter>."
$stdout.flush
$stdin.gets
# Wrap up message
puts
puts "Command completed."
end
protected
# Returns collection of paths to all repos in repo_set
# @param repo_set [Symbol, Array<String>] A symbol describing a predefined
# group of repos, or an Array with specific repo names as strings.
def compute_repo_paths(repo_set)
repo_names = case repo_set
when Array
repo_set
when :all_content_repos
all_content_repo_names
when :all_repos
all_repo_names
when :code_repos
code_repo_names
when :foreign_content_repos
foreign_content_repo_names
when :primary_repo
[primary_repo_name]
when :test_content_repos
all_content_repo_names.first(2)
else
raise ArgumentError.new("Invalid repo_set: #{ repo_set.inspect }")
end
repo_names.map { |repo_name|
File.join(repo_set_parent_path, repo_name)
}
end
# @param repo_root_path [String] absolute path to root of repo
def create_default_content_directory_structure(repo_root_path)
# root level directories
(
%w[data] +
content_type_names.map{ |e| "ct-#{ e }" }
).each do |rel_path|
FileUtils.mkdir_p(File.join(repo_root_path, rel_path))
end
# per content_type directories
content_type_names.each do |content_type_name|
%w[
content
lucene_table_export
lucene_table_export/json_export
lucene_table_export/L232
lucene_table_export/L232/full
lucene_table_export/L232/full/lucene_index
lucene_table_export/L232/short
lucene_table_export/L232/short/lucene_index
lucene_table_export/L472
lucene_table_export/L472/full
lucene_table_export/L472/full/lucene_index
lucene_table_export/L472/short
lucene_table_export/L472/short/lucene_index
pdf_export
reports
staging
].each do |rel_path|
FileUtils.mkdir(
File.join(repo_root_path, "ct-#{ content_type_name }", rel_path)
)
end
end
end
# @param repo_root_path [String] absolute path to root of new repo
# @param primary_language_repo_path [String] absolute path
def copy_default_content_repo_files(repo_root_path, primary_language_repo_path)
# Copy files that are the same between primary and foreign repos
[
'.gitignore',
'.ruby-gemset',
'.ruby-version',
'Gemfile',
'Gemfile.lock',
'readme.md',
].each do |filename|
FileUtils.cp(
File.join(primary_language_repo_path, filename),
repo_root_path
)
end
# Copy repository level data.json file from code template
repo_dir_name = repo_root_path.split('/').last.sub(/\Avgr\-/, '')
language = Language.find_by_repo_dir_name(repo_dir_name)
@langcode_2 = language.code_2_chars
@langcode_3 = language.code_3_chars
erb_template = ERB.new(File.read(repository_level_data_json_file_template_path))
dj_output_path = File.join(repo_root_path, 'data.json')
File.write(dj_output_path, erb_template.result(binding))
# Copy content_type level Rtfiles from code template
content_type_names.each do |content_type_name|
@content_type_name = content_type_name
erb_template = ERB.new(File.read(rtfile_template_path))
rtfile_output_path = File.join(repo_root_path, "ct-#{ content_type_name }", 'Rtfile')
File.write(rtfile_output_path, erb_template.result(binding))
end
end
# Returns the absolute path to the repository level data.json template to
# use for new language repos.
def repository_level_data_json_file_template_path
File.expand_path("../../../../repositext/templates/repository-level-data.json.erb", __FILE__)
end
# Returns the absolute path to the content_type level Rtfile templates to
# use for new language repos.
def rtfile_template_path
File.expand_path("../../../../repositext/templates/Rtfile.erb", __FILE__)
end
end
end
|
require 'active_support'
module RiceCooker
module ClassMethods
protected
def initialize_model_class!
# First priority is the namespaced model, e.g. User::Group
# Handle InhRes computing
if self.respond_to? :resource_class
self.resource_model = self.resource_class
return self.resource_model
end
self.resource_model ||= begin
namespaced_class = name.sub(/Controller$/, '').singularize
namespaced_class.constantize
rescue NameError
nil
end
# Second priority is the top namespace model, e.g. EngineName::Article for EngineName::Admin::ArticlesController
self.resource_model ||= begin
namespaced_classes = name.sub(/Controller$/, '').split('::')
namespaced_class = [namespaced_classes.first, namespaced_classes.last].join('::').singularize
namespaced_class.constantize
rescue NameError
nil
end
# Third priority the camelcased c, i.e. UserGroup
self.resource_model ||= begin
camelcased_class = name.sub(/Controller$/, '').gsub('::', '').singularize
camelcased_class.constantize
rescue NameError
nil
end
# Otherwise use the Group class, or fail
self.resource_model ||= begin
class_name = controller_name.classify
class_name.constantize
rescue NameError => e
raise unless e.message.include?(class_name)
nil
end
# We prevent for creating a resource wich is not a model
if self.resource_model
begin
self.resource_model = nil unless self.resource_model < ActiveRecord::Base
rescue
nil
end
end
end
def inherited(base) #:nodoc:
super(base)
base.send :initialize_model_class!
end
end
end
Updating resource class resolution
require 'active_support'
# require "pry"
module RiceCooker
module ClassMethods
protected
def initialize_model_class!
# First priority is the namespaced model, e.g. User::Group
# binding.pry
# Handle InhRes computing
# puts "[#{self}] In initialization, resource_class: #{self.respond_to?(:resource_class) && self.resource_class.inspect}"
if self.respond_to?(:resource_class) && self.resource_class
self.resource_model = self.resource_class
return self.resource_model
end
# binding.pry
# puts "[1/5] Resource model: #{self.resource_model.inspect}"
self.resource_model ||= begin
namespaced_class = name.sub(/Controller$/, '').singularize
namespaced_class.constantize
rescue NameError
nil
end
# Second priority is the top namespace model, e.g. EngineName::Article for EngineName::Admin::ArticlesController
# puts "[2/5] Resource model: #{self.resource_model.inspect}"
self.resource_model ||= begin
namespaced_classes = name.sub(/Controller$/, '').split('::')
namespaced_class = [namespaced_classes.first, namespaced_classes.last].join('::').singularize
namespaced_class.constantize
rescue NameError
nil
end
# Second second priority is the top namespace model, e.g. EngineName::Article for EngineName::Admin::ArticlesController
# puts "[3/5] Resource model: #{self.resource_model.inspect}"
self.resource_model ||= begin
namespaced_class = name.sub(/Controller$/, '').split('::')[1..3].join('::').singularize
namespaced_class.constantize
rescue NameError
nil
end
# Third priority the camelcased c, i.e. UserGroup
# puts "[4/5] Resource model: #{self.resource_model.inspect}"
self.resource_model ||= begin
camelcased_class = name.sub(/Controller$/, '').gsub('::', '').singularize
camelcased_class.constantize
rescue NameError
nil
end
# Otherwise use the Group class, or fail
# puts "[5/5] Resource model: #{self.resource_model.inspect}"
self.resource_model ||= begin
class_name = controller_name.classify
class_name.constantize
rescue NameError => e
raise unless e.message.include?(class_name)
nil
end
# We prevent for creating a resource wich is not a model
if self.resource_model
begin
self.resource_model = nil unless self.resource_model < ActiveRecord::Base
rescue
nil
end
end
end
def inherited(base) #:nodoc:
super(base)
base.send :initialize_model_class!
end
end
end
|
# Copyright (c) 2012 Arxopia LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Arxopia LLC nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ARXOPIA LLC BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
#OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
#OF THE POSSIBILITY OF SUCH DAMAGE.
module Risu
module Templates
module TemplateHelper
#
def report_classification classification=Report.classification.upcase, newline=true
@output.font_size(12) do
@output.text classification, :align => :center
@output.text "\n" if newline
end
end
#
def report_title title, newline=false
@output.font_size(24) do
@output.text title, :align => :center
@output.text "\n" if newline
end
end
#
def report_subtitle title, newline=false
@output.font_size(18) do
@output.text title, :align => :center
@output.text "\n" if newline
end
end
#
def report_author author, newline=false
@output.font_size(14) do
@output.text author, :align => :center
@output.text "\n" if newline
end
end
#
def report_text(text, options = {})
@output.text text, options
end
#
def heading1 title
@output.font_size(24) do
@output.text title, :style => :bold
end
end
#
def heading2 title
@output.font_size(18) do
@output.text title, :style => :bold
end
end
#
def heading3 title
@output.font_size(14) do
@output.text title, :style => :bold
end
end
#
def heading4 title
@output.font_size(12) do
@output.text title, :style => :bold
end
end
#
def heading5 title
@output.font_size(10) do
@output.text title, :style => :bold
end
end
#
def heading6 title
@output.font_size(8) do
@output.text title, :style => :bold
end
end
end
end
end
Using text instead of report_text for the templatehelper
# Copyright (c) 2012 Arxopia LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Arxopia LLC nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ARXOPIA LLC BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
#OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
#OF THE POSSIBILITY OF SUCH DAMAGE.
module Risu
module Templates
module TemplateHelper
#
def report_classification classification=Report.classification.upcase, newline=true
@output.font_size(12) do
@output.text classification, :align => :center
@output.text "\n" if newline
end
end
#
def report_title title, newline=false
@output.font_size(24) do
@output.text title, :align => :center
@output.text "\n" if newline
end
end
#
def report_subtitle title, newline=false
@output.font_size(18) do
@output.text title, :align => :center
@output.text "\n" if newline
end
end
#
def report_author author, newline=false
@output.font_size(14) do
@output.text author, :align => :center
@output.text "\n" if newline
end
end
#
def text(text, options = {})
if text == nil
text = ""
end
@output.text text, options
end
#
def heading1 title
@output.font_size(24) do
@output.text title, :style => :bold
end
end
#
def heading2 title
@output.font_size(18) do
@output.text title, :style => :bold
end
end
#
def heading3 title
@output.font_size(14) do
@output.text title, :style => :bold
end
end
#
def heading4 title
@output.font_size(12) do
@output.text title, :style => :bold
end
end
#
def heading5 title
@output.font_size(10) do
@output.text title, :style => :bold
end
end
#
def heading6 title
@output.font_size(8) do
@output.text title, :style => :bold
end
end
end
end
end
|
require 'rom/support/enumerable_dataset'
module ROM
# A helper module that adds data-proxy behavior to an array-like object
#
# @see EnumerableDataset
#
# @api public
module ArrayDataset
extend DataProxy::ClassMethods
include EnumerableDataset
# Extends the class with data-proxy behavior
#
# @api private
def self.included(klass)
klass.class_eval do
include Options
include DataProxy
end
end
forward(
:*, :+, :-, :compact, :compact!, :flatten, :flatten!, :length, :pop,
:reverse, :reverse!, :sample, :select!, :size, :shift, :shuffle, :shuffle!,
:slice, :slice!, :sort!, :sort_by!, :uniq, :uniq!, :unshift, :values_at
)
[
:map!, :combination, :cycle, :delete_if, :keep_if, :permutation, :reject!,
:select!, :sort_by!
].each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(*args, &block)
return to_enum unless block
self.class.new(data.send(:#{method}, *args, &block), options)
end
RUBY
end
end
end
Define ArrayDataset#{sort_by!,select!} only once
This commit eliminates the following warnings (run via`rspec --warnings`):
rom/lib/rom/support/array_dataset.rb:34: warning: method redefined; discarding old select!
rom/lib/rom/support/array_dataset.rb:34: warning: method redefined; discarding old sort_by!
require 'rom/support/enumerable_dataset'
module ROM
# A helper module that adds data-proxy behavior to an array-like object
#
# @see EnumerableDataset
#
# @api public
module ArrayDataset
extend DataProxy::ClassMethods
include EnumerableDataset
# Extends the class with data-proxy behavior
#
# @api private
def self.included(klass)
klass.class_eval do
include Options
include DataProxy
end
end
forward(
:*, :+, :-, :compact, :compact!, :flatten, :flatten!, :length, :pop,
:reverse, :reverse!, :sample, :size, :shift, :shuffle, :shuffle!,
:slice, :slice!, :sort!, :uniq, :uniq!, :unshift, :values_at
)
[
:map!, :combination, :cycle, :delete_if, :keep_if, :permutation, :reject!,
:select!, :sort_by!
].each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(*args, &block)
return to_enum unless block
self.class.new(data.send(:#{method}, *args, &block), options)
end
RUBY
end
end
end
|
# frozen_string_literal: true
# rubocop:disable Metrics/LineLength
module RubyRabbitmqJanus
module Janus
module Responses
# @author VAILLANT Jeremy <jeremy.vaillant@dazzl.tv>
#
# @since 3.0.0
#
# Manage exception to response Janus
class Errors
# Unauthorized (can only happen when using apisecret/auth token)
def _403(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Unauthorized, request
end
# Unauthorized access to a plugin (can only
# happen when using auth token)
def _405(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnauthorizedPlugin, request
end
# Transport related error
def _450(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TransportSpecific, request
end
# The request is missing in the message
def _452(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::MissingRequest, request
end
# The Janus core does not suppurt this request
def _453(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnknownRequest, request
end
# The payload is not a valid JSON message
def _454(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidJSON, request
end
# The object is not a valid JSON object as expected
def _455(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidJSONObject, request
end
# A mandatory element is missing in the message
def _456(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::MissingMandatoryElement, request
end
# The request cannot be handled for this webserver path
def _457(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidRequestPath, request
end
# The session the request refers to doesn't exist
def _458(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::SessionNotFound, request
end
# The handle the request refers to doesn't exist
def _459(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::HandleNotFound, request
end
# The plugin the request wants to talk to doesn't exist
def _460(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginNotFound, request
end
# An error occurring when trying to attach to
# a plugin and create a handle
def _461(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginAttach, request
end
# An error occurring when trying to send a message/request to the plugin
def _462(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginMessage, request
end
# brief An error occurring when trying to detach from
# a plugin and destroy the related handle
def _463(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginDetach, request
end
# The Janus core doesn't support this SDP type
def _464(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::JSEPUnknownType, request
end
# The Session Description provided by the peer is invalid
def _465(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::JSEPInvalidSDP, request
end
# The stream a trickle candidate for does not exist or is invalid
def _466(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TrickleInvalidStream, request
end
# A JSON element is of the wrong type
# (e.g., an integer instead of a string)
def _467(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidElementType, request
end
# The ID provided to create a new session is already in use
def _468(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::SessionConflit, request
end
# We got an ANSWER to an OFFER we never made
def _469(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnexpectedAnswer, request
end
# The auth token the request refers to doesn't exist
def _470(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TokenNotFound, request
end
# The current request cannot be handled because
# of not compatible WebRTC state
def _471(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::WebRTCState, request
end
# The server is currently configured not to accept new sessions
def _472(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::NotAcceptingSession, request
end
# Unknown/undocumented error
def _490(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Unknown, request
end
def respond_to_missing?(name, include_private); end
# rubocop:disable Style/MethodMissingSuper
def method_missing(_method, request)
default_error(request)
end
# rubocop:enable Style/MethodMissingSuper
def default_error(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Nok, request
end
end
end
end
end
# rubocop:enable Metrics/LineLength
Change rubocop rule name
# frozen_string_literal: true
# rubocop:disable Layout/LineLength
module RubyRabbitmqJanus
module Janus
module Responses
# @author VAILLANT Jeremy <jeremy.vaillant@dazzl.tv>
#
# @since 3.0.0
#
# Manage exception to response Janus
class Errors
# Unauthorized (can only happen when using apisecret/auth token)
def _403(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Unauthorized, request
end
# Unauthorized access to a plugin (can only
# happen when using auth token)
def _405(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnauthorizedPlugin, request
end
# Transport related error
def _450(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TransportSpecific, request
end
# The request is missing in the message
def _452(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::MissingRequest, request
end
# The Janus core does not suppurt this request
def _453(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnknownRequest, request
end
# The payload is not a valid JSON message
def _454(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidJSON, request
end
# The object is not a valid JSON object as expected
def _455(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidJSONObject, request
end
# A mandatory element is missing in the message
def _456(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::MissingMandatoryElement, request
end
# The request cannot be handled for this webserver path
def _457(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidRequestPath, request
end
# The session the request refers to doesn't exist
def _458(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::SessionNotFound, request
end
# The handle the request refers to doesn't exist
def _459(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::HandleNotFound, request
end
# The plugin the request wants to talk to doesn't exist
def _460(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginNotFound, request
end
# An error occurring when trying to attach to
# a plugin and create a handle
def _461(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginAttach, request
end
# An error occurring when trying to send a message/request to the plugin
def _462(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginMessage, request
end
# brief An error occurring when trying to detach from
# a plugin and destroy the related handle
def _463(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::PluginDetach, request
end
# The Janus core doesn't support this SDP type
def _464(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::JSEPUnknownType, request
end
# The Session Description provided by the peer is invalid
def _465(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::JSEPInvalidSDP, request
end
# The stream a trickle candidate for does not exist or is invalid
def _466(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TrickleInvalidStream, request
end
# A JSON element is of the wrong type
# (e.g., an integer instead of a string)
def _467(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::InvalidElementType, request
end
# The ID provided to create a new session is already in use
def _468(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::SessionConflit, request
end
# We got an ANSWER to an OFFER we never made
def _469(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::UnexpectedAnswer, request
end
# The auth token the request refers to doesn't exist
def _470(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::TokenNotFound, request
end
# The current request cannot be handled because
# of not compatible WebRTC state
def _471(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::WebRTCState, request
end
# The server is currently configured not to accept new sessions
def _472(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::NotAcceptingSession, request
end
# Unknown/undocumented error
def _490(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Unknown, request
end
def respond_to_missing?(name, include_private); end
# rubocop:disable Style/MethodMissingSuper
def method_missing(_method, request)
default_error(request)
end
# rubocop:enable Style/MethodMissingSuper
def default_error(request)
raise RubyRabbitmqJanus::Errors::Janus::Responses::Nok, request
end
end
end
end
end
# rubocop:enable Metrics/LineLength
|
class Juxtapose
def self.it_should_look_like(template)
Screenshotter.new(self, template).verify.should === true
end
class Screenshotter
M_PI = Math::PI
M_PI_2 = Math::PI / 2
attr_reader :context
attr_reader :template
def initialize(context, template)
@context = context
@template = template
end
def resolution
@resolution ||= UIScreen.mainScreen.bounds
end
def version
@version ||= "ios_#{Device.ios_version}"
end
def width
resolution.width
end
def height
resolution.height
end
def dir
@dir ||= "#{File.dirname(__FILE__)}/../../spec/screens/#{version}/#{template}".tap do |dir|
`mkdir -p #{dir}`
end
end
def filename(base)
raise "unknown filename" unless [:current, :accepted, :diff].include?(base)
components = [base]
components << timestamp unless base == :accepted
components += ["#{width}x#{height}", "png"]
File.join dir, components.join('.')
end
def timestamp
@timestamp ||= Time.now.to_f.to_s.gsub(/\./, '')
end
def save_current
application = UIApplication.sharedApplication
windows = application.windows
currentOrientation = application.statusBarOrientation
scale = UIScreen.mainScreen.scale
size = UIScreen.mainScreen.bounds.size
if [:landscape_right, :landscape_left].include?(Device.orientation)
size = CGSizeMake(size.height, size.width);
end
UIGraphicsBeginImageContextWithOptions(size, false, scale)
context = UIGraphicsGetCurrentContext()
if currentOrientation == UIInterfaceOrientationLandscapeLeft
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, M_PI_2)
CGContextTranslateCTM(context, - size.height / 2.0, - size.width / 2.0)
elsif currentOrientation == UIInterfaceOrientationLandscapeRight
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, -M_PI_2)
CGContextTranslateCTM(context, - size.height / 2.0, - size.width / 2.0)
elsif currentOrientation == UIInterfaceOrientationPortraitUpsideDown
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, M_PI)
CGContextTranslateCTM(context, -size.width / 2.0, -size.height / 2.0)
end
windows.each do |window|
CGContextSaveGState(context)
CGContextTranslateCTM(context, window.center.x, window.center.y)
CGContextConcatCTM(context, window.transform)
CGContextTranslateCTM(context,
- window.bounds.size.width * window.layer.anchorPoint.x,
- window.bounds.size.height * window.layer.anchorPoint.y)
window.layer.presentationLayer.renderInContext(UIGraphicsGetCurrentContext())
CGContextRestoreGState(context)
end
image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
UIImagePNGRepresentation(image).writeToFile(filename(:current), atomically: true)
end
def accept_current
`cp #{filename(:current)} #{filename(:accepted)}`
end
def verify
save_current
accept_current if ENV['ACCEPT_ALL_SCREENSHOTS']
success = true
if File.exists? filename(:accepted )
compare_command = "compare -metric AE -dissimilarity-threshold 1 -subimage-search"
out = `#{compare_command} \"#{filename :current}\" \"#{filename :accepted}\" \"#{filename :diff}\" 2>&1`
out.chomp!
(out == '0').tap do |verified|
if verified
`rm #{filename(:current)}`
`rm #{filename(:diff)}`
else
success = false
puts "Screenshot verification failed (current: #{filename :current}, diff: #{filename :diff})"
end
end
else
raise "No accepted screen shot for #{filename :accepted}"
end
success
end
end
end
Don't attempt to render windows without presentation layer.
class Juxtapose
def self.it_should_look_like(template)
Screenshotter.new(self, template).verify.should === true
end
class Screenshotter
M_PI = Math::PI
M_PI_2 = Math::PI / 2
attr_reader :context
attr_reader :template
def initialize(context, template)
@context = context
@template = template
end
def resolution
@resolution ||= UIScreen.mainScreen.bounds
end
def version
@version ||= "ios_#{Device.ios_version}"
end
def width
resolution.width
end
def height
resolution.height
end
def dir
@dir ||= "#{File.dirname(__FILE__)}/../../spec/screens/#{version}/#{template}".tap do |dir|
`mkdir -p #{dir}`
end
end
def filename(base)
raise "unknown filename" unless [:current, :accepted, :diff].include?(base)
components = [base]
components << timestamp unless base == :accepted
components += ["#{width}x#{height}", "png"]
File.join dir, components.join('.')
end
def timestamp
@timestamp ||= Time.now.to_f.to_s.gsub(/\./, '')
end
def save_current
application = UIApplication.sharedApplication
windows = application.windows
currentOrientation = application.statusBarOrientation
scale = UIScreen.mainScreen.scale
size = UIScreen.mainScreen.bounds.size
if [:landscape_right, :landscape_left].include?(Device.orientation)
size = CGSizeMake(size.height, size.width);
end
UIGraphicsBeginImageContextWithOptions(size, false, scale)
context = UIGraphicsGetCurrentContext()
if currentOrientation == UIInterfaceOrientationLandscapeLeft
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, M_PI_2)
CGContextTranslateCTM(context, - size.height / 2.0, - size.width / 2.0)
elsif currentOrientation == UIInterfaceOrientationLandscapeRight
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, -M_PI_2)
CGContextTranslateCTM(context, - size.height / 2.0, - size.width / 2.0)
elsif currentOrientation == UIInterfaceOrientationPortraitUpsideDown
CGContextTranslateCTM(context, size.width / 2.0, size.height / 2.0)
CGContextRotateCTM(context, M_PI)
CGContextTranslateCTM(context, -size.width / 2.0, -size.height / 2.0)
end
windows.each do |window|
next if window.layer.presentationLayer.nil?
CGContextSaveGState(context)
CGContextTranslateCTM(context, window.center.x, window.center.y)
CGContextConcatCTM(context, window.transform)
CGContextTranslateCTM(context,
- window.bounds.size.width * window.layer.anchorPoint.x,
- window.bounds.size.height * window.layer.anchorPoint.y)
window.layer.presentationLayer.renderInContext(UIGraphicsGetCurrentContext())
CGContextRestoreGState(context)
end
image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
UIImagePNGRepresentation(image).writeToFile(filename(:current), atomically: true)
end
def accept_current
`cp #{filename(:current)} #{filename(:accepted)}`
end
def verify
save_current
accept_current if ENV['ACCEPT_ALL_SCREENSHOTS']
success = true
if File.exists? filename(:accepted )
compare_command = "compare -metric AE -dissimilarity-threshold 1 -subimage-search"
out = `#{compare_command} \"#{filename :current}\" \"#{filename :accepted}\" \"#{filename :diff}\" 2>&1`
out.chomp!
(out == '0').tap do |verified|
if verified
`rm #{filename(:current)}`
`rm #{filename(:diff)}`
else
success = false
puts "Screenshot verification failed (current: #{filename :current}, diff: #{filename :diff})"
end
end
else
raise "No accepted screen shot for #{filename :accepted}"
end
success
end
end
end
|
module RSpec
module Expectations
# @api private
# Provides methods for enabling and disabling the available
# syntaxes provided by rspec-expectations.
module Syntax
extend self
# @method should
# Passes if `matcher` returns true. Available on every `Object`.
# @example
# actual.should eq expected
# actual.should match /expression/
# @param [Matcher]
# matcher
# @param [String] message optional message to display when the expectation fails
# @return [Boolean] true if the expectation succeeds (else raises)
# @see RSpec::Matchers
# @method should_not
# Passes if `matcher` returns false. Available on every `Object`.
# @example
# actual.should_not eq expected
# @param [Matcher]
# matcher
# @param [String] message optional message to display when the expectation fails
# @return [Boolean] false if the negative expectation succeeds (else raises)
# @see RSpec::Matchers
# @method expect
# Supports `expect(actual).to matcher` syntax by wrapping `actual` in an
# `ExpectationTarget`.
# @example
# expect(actual).to eq(expected)
# expect(actual).not_to eq(expected)
# @return [ExpectationTarget]
# @see ExpectationTarget#to
# @see ExpectationTarget#not_to
# @api private
# Determines where we add `should` and `should_not`.
def default_should_host
@default_should_host ||= ::Object.ancestors.last
end
def self.warn_about_should!
@warn_about_should = true
end
def self.warn_about_should_unless_configured(method_name)
if @warn_about_should
RSpec.deprecate(
"Using `#{method_name}` from the old `:should` syntax without explicitly enabling the syntax",
:replacement => "the new `:expect` syntax or explicitly enable `:should`"
)
@warn_about_should = false
end
end
# @api private
# Enables the `should` syntax.
def enable_should(syntax_host = default_should_host)
@warn_about_should = false
return if should_enabled?(syntax_host)
syntax_host.module_exec do
def should(matcher=nil, message=nil, &block)
::RSpec::Expectations::Syntax.warn_about_should_unless_configured(__method__)
::RSpec::Expectations::PositiveExpectationHandler.handle_matcher(self, matcher, message, &block)
end
def should_not(matcher=nil, message=nil, &block)
::RSpec::Expectations::Syntax.warn_about_should_unless_configured(__method__)
::RSpec::Expectations::NegativeExpectationHandler.handle_matcher(self, matcher, message, &block)
end
end
::RSpec::Expectations::ExpectationTarget.enable_deprecated_should if expect_enabled?
end
# @api private
# Disables the `should` syntax.
def disable_should(syntax_host = default_should_host)
return unless should_enabled?(syntax_host)
syntax_host.module_exec do
undef should
undef should_not
end
::RSpec::Expectations::ExpectationTarget.disable_deprecated_should
end
# @api private
# Enables the `expect` syntax.
def enable_expect(syntax_host = ::RSpec::Matchers)
return if expect_enabled?(syntax_host)
syntax_host.module_exec do
def expect(*target, &target_block)
target << target_block if block_given?
raise ArgumentError.new("You must pass an argument or a block to #expect but not both.") unless target.size == 1
::RSpec::Expectations::ExpectationTarget.new(target.first)
end
end
::RSpec::Expectations::ExpectationTarget.enable_deprecated_should if should_enabled?
end
# @api private
# Disables the `expect` syntax.
def disable_expect(syntax_host = ::RSpec::Matchers)
return unless expect_enabled?(syntax_host)
syntax_host.module_exec do
undef expect
end
::RSpec::Expectations::ExpectationTarget.disable_deprecated_should
end
# @api private
# Indicates whether or not the `should` syntax is enabled.
def should_enabled?(syntax_host = default_should_host)
syntax_host.method_defined?(:should)
end
# @api private
# Indicates whether or not the `expect` syntax is enabled.
def expect_enabled?(syntax_host = ::RSpec::Matchers)
syntax_host.method_defined?(:expect)
end
# @api private
# Generates a positive expectation expression.
def positive_expression(target_expression, matcher_expression)
expression_generator.positive_expression(target_expression, matcher_expression)
end
# @api private
# Generates a negative expectation expression.
def negative_expression(target_expression, matcher_expression)
expression_generator.negative_expression(target_expression, matcher_expression)
end
# @api private
# Selects which expression generator to use based on the configured syntax.
def expression_generator
if expect_enabled?
ExpectExpressionGenerator
else
ShouldExpressionGenerator
end
end
# @api private
# Generates expectation expressions for the `should` syntax.
module ShouldExpressionGenerator
def self.positive_expression(target_expression, matcher_expression)
"#{target_expression}.should #{matcher_expression}"
end
def self.negative_expression(target_expression, matcher_expression)
"#{target_expression}.should_not #{matcher_expression}"
end
end
# @api private
# Generates expectation expressions for the `expect` syntax.
module ExpectExpressionGenerator
def self.positive_expression(target_expression, matcher_expression)
"expect(#{target_expression}).to #{matcher_expression}"
end
def self.negative_expression(target_expression, matcher_expression)
"expect(#{target_expression}).not_to #{matcher_expression}"
end
end
end
end
end
Improve deprecation warning message.
rspec-mocks issues a similar warning so it's useful
to clarify that this is from rspec-expectations.
module RSpec
module Expectations
# @api private
# Provides methods for enabling and disabling the available
# syntaxes provided by rspec-expectations.
module Syntax
extend self
# @method should
# Passes if `matcher` returns true. Available on every `Object`.
# @example
# actual.should eq expected
# actual.should match /expression/
# @param [Matcher]
# matcher
# @param [String] message optional message to display when the expectation fails
# @return [Boolean] true if the expectation succeeds (else raises)
# @see RSpec::Matchers
# @method should_not
# Passes if `matcher` returns false. Available on every `Object`.
# @example
# actual.should_not eq expected
# @param [Matcher]
# matcher
# @param [String] message optional message to display when the expectation fails
# @return [Boolean] false if the negative expectation succeeds (else raises)
# @see RSpec::Matchers
# @method expect
# Supports `expect(actual).to matcher` syntax by wrapping `actual` in an
# `ExpectationTarget`.
# @example
# expect(actual).to eq(expected)
# expect(actual).not_to eq(expected)
# @return [ExpectationTarget]
# @see ExpectationTarget#to
# @see ExpectationTarget#not_to
# @api private
# Determines where we add `should` and `should_not`.
def default_should_host
@default_should_host ||= ::Object.ancestors.last
end
def self.warn_about_should!
@warn_about_should = true
end
def self.warn_about_should_unless_configured(method_name)
if @warn_about_should
RSpec.deprecate(
"Using `#{method_name}` from rspec-expectations' old `:should` syntax without explicitly enabling the syntax",
:replacement => "the new `:expect` syntax or explicitly enable `:should`"
)
@warn_about_should = false
end
end
# @api private
# Enables the `should` syntax.
def enable_should(syntax_host = default_should_host)
@warn_about_should = false
return if should_enabled?(syntax_host)
syntax_host.module_exec do
def should(matcher=nil, message=nil, &block)
::RSpec::Expectations::Syntax.warn_about_should_unless_configured(__method__)
::RSpec::Expectations::PositiveExpectationHandler.handle_matcher(self, matcher, message, &block)
end
def should_not(matcher=nil, message=nil, &block)
::RSpec::Expectations::Syntax.warn_about_should_unless_configured(__method__)
::RSpec::Expectations::NegativeExpectationHandler.handle_matcher(self, matcher, message, &block)
end
end
::RSpec::Expectations::ExpectationTarget.enable_deprecated_should if expect_enabled?
end
# @api private
# Disables the `should` syntax.
def disable_should(syntax_host = default_should_host)
return unless should_enabled?(syntax_host)
syntax_host.module_exec do
undef should
undef should_not
end
::RSpec::Expectations::ExpectationTarget.disable_deprecated_should
end
# @api private
# Enables the `expect` syntax.
def enable_expect(syntax_host = ::RSpec::Matchers)
return if expect_enabled?(syntax_host)
syntax_host.module_exec do
def expect(*target, &target_block)
target << target_block if block_given?
raise ArgumentError.new("You must pass an argument or a block to #expect but not both.") unless target.size == 1
::RSpec::Expectations::ExpectationTarget.new(target.first)
end
end
::RSpec::Expectations::ExpectationTarget.enable_deprecated_should if should_enabled?
end
# @api private
# Disables the `expect` syntax.
def disable_expect(syntax_host = ::RSpec::Matchers)
return unless expect_enabled?(syntax_host)
syntax_host.module_exec do
undef expect
end
::RSpec::Expectations::ExpectationTarget.disable_deprecated_should
end
# @api private
# Indicates whether or not the `should` syntax is enabled.
def should_enabled?(syntax_host = default_should_host)
syntax_host.method_defined?(:should)
end
# @api private
# Indicates whether or not the `expect` syntax is enabled.
def expect_enabled?(syntax_host = ::RSpec::Matchers)
syntax_host.method_defined?(:expect)
end
# @api private
# Generates a positive expectation expression.
def positive_expression(target_expression, matcher_expression)
expression_generator.positive_expression(target_expression, matcher_expression)
end
# @api private
# Generates a negative expectation expression.
def negative_expression(target_expression, matcher_expression)
expression_generator.negative_expression(target_expression, matcher_expression)
end
# @api private
# Selects which expression generator to use based on the configured syntax.
def expression_generator
if expect_enabled?
ExpectExpressionGenerator
else
ShouldExpressionGenerator
end
end
# @api private
# Generates expectation expressions for the `should` syntax.
module ShouldExpressionGenerator
def self.positive_expression(target_expression, matcher_expression)
"#{target_expression}.should #{matcher_expression}"
end
def self.negative_expression(target_expression, matcher_expression)
"#{target_expression}.should_not #{matcher_expression}"
end
end
# @api private
# Generates expectation expressions for the `expect` syntax.
module ExpectExpressionGenerator
def self.positive_expression(target_expression, matcher_expression)
"expect(#{target_expression}).to #{matcher_expression}"
end
def self.negative_expression(target_expression, matcher_expression)
"expect(#{target_expression}).not_to #{matcher_expression}"
end
end
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.