CombinedText stringlengths 4 3.42M |
|---|
require "mongo_mapper"
require "rails"
require "active_model/railtie"
module MongoMapper
# = MongoMapper Railtie
class Railtie < Rails::Railtie
config.mongo_mapper = ActiveSupport::OrderedOptions.new
rake_tasks do
load "mongo_mapper/railtie/database.rake"
end
initializer "mongo_mapper.set_configs" do |app|
ActiveSupport.on_load(:mongo_mapper) do
app.config.mongo_mapper.each do |k,v|
send "#{k}=", v
end
end
end
# This sets the database configuration and establishes the connection.
initializer "mongo_mapper.initialize_database" do |app|
config_file = Rails.root.join('config/mongo.yml')
if config_file.file?
config = YAML.load(ERB.new(config_file.read).result)
MongoMapper.setup(config, Rails.env, :logger => Rails.logger)
end
end
# Clear the identity map after each request
initializer "mongo_mapper.clear_identity_map" do |app|
app.config.middleware.use 'MongoMapper::Middleware::IdentityMap'
end
end
end
Middleware is now optional as it turns IM on.
require "mongo_mapper"
require "rails"
require "active_model/railtie"
module MongoMapper
# = MongoMapper Railtie
class Railtie < Rails::Railtie
config.mongo_mapper = ActiveSupport::OrderedOptions.new
rake_tasks do
load "mongo_mapper/railtie/database.rake"
end
initializer "mongo_mapper.set_configs" do |app|
ActiveSupport.on_load(:mongo_mapper) do
app.config.mongo_mapper.each do |k,v|
send "#{k}=", v
end
end
end
# This sets the database configuration and establishes the connection.
initializer "mongo_mapper.initialize_database" do |app|
config_file = Rails.root.join('config/mongo.yml')
if config_file.file?
config = YAML.load(ERB.new(config_file.read).result)
MongoMapper.setup(config, Rails.env, :logger => Rails.logger)
end
end
end
end
|
# Description: ChefVault VERSION file
# Copyright 2013-15, Nordstrom, Inc.
# Copyright 2015-2016, Chef Software, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ChefVault
VERSION = "2.7.1"
MAJOR, MINOR, TINY = VERSION.split(".")
end
do a prerelease of 2.8.0
# Description: ChefVault VERSION file
# Copyright 2013-15, Nordstrom, Inc.
# Copyright 2015-2016, Chef Software, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ChefVault
VERSION = "2.8.0.rc1"
MAJOR, MINOR, TINY = VERSION.split(".")
end
|
# encoding: utf-8
module Mongoid #:nodoc:
# Defines behaviour for the identity map in Mongoid.
class IdentityMap < Hash
# Get a document from the identity map by its id.
#
# @example Get the document from the map.
# map.get(Person, id)
#
# @param [ Class ] klass The class of the document.
# @param [ Object, Hash ] idenfier The document id or selector.
#
# @return [ Document ] The matching document.
#
# @since 2.1.0
def get(klass, identifier)
return nil unless Mongoid.identity_map_enabled?
documents_for(klass)[identifier]
end
# Remove the document from the identity map.
#
# @example Remove the document.
# map.removed(person)
#
# @param [ Document ] document The document to remove.
#
# @return [ Document, nil ] The removed document.
#
# @since 2.1.0
def remove(document)
return nil unless Mongoid.identity_map_enabled? && document && document.id
documents_for(document.class).delete(document.id)
end
# Puts a document in the identity map, accessed by it's id.
#
# @example Put the document in the map.
# identity_map.set(document)
#
# @param [ Document ] document The document to place in the map.
#
# @return [ Document ] The provided document.
#
# @since 2.1.0
def set(document)
return nil unless Mongoid.identity_map_enabled? && document && document.id
documents_for(document.class)[document.id] = document
end
# Set a document in the identity map for the provided selector.
#
# @example Set the document in the map.
# identity_map.set_selector(document, { :person_id => person.id })
#
# @param [ Document ] document The document to set.
# @param [ Hash ] selector The selector to identify it.
#
# @return [ Array<Document> ] The documents.
#
# @since 2.2.0
def set_many(document, selector)
(documents_for(document.class)[selector] ||= []).push(document)
end
# Set a document in the identity map for the provided selector.
#
# @example Set the document in the map.
# identity_map.set_selector(document, { :person_id => person.id })
#
# @param [ Document ] document The document to set.
# @param [ Hash ] selector The selector to identify it.
#
# @return [ Document ] The matching document.
#
# @since 2.2.0
def set_one(document, selector)
documents_for(document.class)[selector] = document
end
# Set a custom field in the identity map
#
# @example Set the custom field in the map.
# identity_map.set_custom_fields("current_user_liked", like.id, true)
#
# @param [ Identifier ] custom field identifier.
# @param [ Key ] custom field hash key.
# @param [ Value ] custom field value for following key.
#
# @return [ Hash ] The matching hash.
#
# @since 2.2.0
def set_custom_fields(identifier, key, value)
return nil unless Mongoid.identity_map_enabled? && key && value
((self["custom_fields"] ||= {})[identifier] ||= {})[key] = value
end
private
# Get the documents in the identity map for a specific class.
#
# @example Get the documents for the class.
# map.documents_for(Person)
#
# @param [ Class ] klass The class to retrieve.
#
# @return [ Hash ] The documents.
#
# @since 2.1.0
def documents_for(klass)
self[klass] ||= {}
end
class << self
# For ease of access we provide the same API to the identity map on the
# class level, which in turn just gets the identity map that is on the
# current thread.
#
# @example Get a document from the current identity map by id.
# IdentityMap.get(id)
#
# @example Set a document in the current identity map.
# IdentityMap.set(document)
#
# @since 2.1.0
delegate *(
Hash.public_instance_methods(false) +
IdentityMap.public_instance_methods(false) <<
{ :to => :"Mongoid::Threaded.identity_map" }
)
end
end
end
add missing commit back
# encoding: utf-8
module Mongoid #:nodoc:
# Defines behaviour for the identity map in Mongoid.
class IdentityMap < Hash
# Get a document from the identity map by its id.
#
# @example Get the document from the map.
# map.get(Person, id)
#
# @param [ Class ] klass The class of the document.
# @param [ Object, Hash ] idenfier The document id or selector.
#
# @return [ Document ] The matching document.
#
# @since 2.1.0
def get(klass, identifier)
return nil unless Mongoid.identity_map_enabled?
documents_for(klass)[identifier]
end
# Remove the document from the identity map.
#
# @example Remove the document.
# map.removed(person)
#
# @param [ Document ] document The document to remove.
#
# @return [ Document, nil ] The removed document.
#
# @since 2.1.0
def remove(document)
return nil unless Mongoid.identity_map_enabled? && document && document.id
documents_for(document.class).delete(document.id)
end
# Puts a document in the identity map, accessed by it's id.
#
# @example Put the document in the map.
# identity_map.set(document)
#
# @param [ Document ] document The document to place in the map.
#
# @return [ Document ] The provided document.
#
# @since 2.1.0
def set(document)
return nil unless Mongoid.identity_map_enabled? && document && document.id
documents_for(document.class)[document.id] = document
end
# Set a document in the identity map for the provided selector.
#
# @example Set the document in the map.
# identity_map.set_selector(document, { :person_id => person.id })
#
# @param [ Document ] document The document to set.
# @param [ Hash ] selector The selector to identify it.
#
# @return [ Array<Document> ] The documents.
#
# @since 2.2.0
def set_many(document, selector)
(documents_for(document.class)[selector] ||= []).push(document)
end
# Set a document in the identity map for the provided selector.
#
# @example Set the document in the map.
# identity_map.set_selector(document, { :person_id => person.id })
#
# @param [ Document ] document The document to set.
# @param [ Hash ] selector The selector to identify it.
#
# @return [ Document ] The matching document.
#
# @since 2.2.0
def set_one(document, selector)
documents_for(document.class)[selector] = document
end
# Set a custom field in the identity map
#
# @example Set the custom field in the map.
# identity_map.set_custom_fields_hash("current_user_liked", like.id, true)
#
# @param [ Identifier ] custom field identifier.
# @param [ Key ] custom field hash key.
# @param [ Value ] custom field value for following key.
#
# @return [ Hash ] The matching hash.
#
# @since 2.2.0
def set_custom_fields_hash(identifier, key, value)
return nil unless Mongoid.identity_map_enabled? && key && value
((self["custom_fields"] ||= {})[identifier] ||= {})[key] = value
end
private
# Get the documents in the identity map for a specific class.
#
# @example Get the documents for the class.
# map.documents_for(Person)
#
# @param [ Class ] klass The class to retrieve.
#
# @return [ Hash ] The documents.
#
# @since 2.1.0
def documents_for(klass)
self[klass] ||= {}
end
class << self
# For ease of access we provide the same API to the identity map on the
# class level, which in turn just gets the identity map that is on the
# current thread.
#
# @example Get a document from the current identity map by id.
# IdentityMap.get(id)
#
# @example Set a document in the current identity map.
# IdentityMap.set(document)
#
# @since 2.1.0
delegate *(
Hash.public_instance_methods(false) +
IdentityMap.public_instance_methods(false) <<
{ :to => :"Mongoid::Threaded.identity_map" }
)
end
end
end
|
module Classyfier
VERSION = "0.0.1"
end
Bumped version number
module Classyfier
VERSION = "0.1.0"
end |
require "mongoid"
module Mongoid
module Acts
module Tree
def self.included(model)
model.class_eval do
extend InitializerMethods
end
end
module InitializerMethods
def acts_as_tree(options = {})
options = {
:parent_id_field => "parent_id",
:path_field => "path",
:depth_field => "depth"
}.merge(options)
write_inheritable_attribute :acts_as_tree_options, options
class_inheritable_reader :acts_as_tree_options
include InstanceMethods
include Fields
extend Fields
extend ClassMethods
field parent_id_field, :type => BSON::ObjectId
field path_field, :type => Array, :default => [], :index => true
field depth_field, :type => Integer, :default => 0
self.class_eval do
define_method "#{parent_id_field}=" do | new_parent_id |
if new_parent_id.present?
new_parent = self.class.find new_parent_id
new_parent.children.push self, false
else
self.write_attribute parent_id_field, nil
self[path_field] = []
self[depth_field] = 0
end
end
end
after_save :move_children
validate :will_save_tree
before_destroy :destroy_descendants
end
end
module ClassMethods
def roots
self.where(parent_id_field => nil).order_by tree_order
end
end
module InstanceMethods
def [](field_name)
self.send field_name
end
def []=(field_name, value)
self.send "#{field_name}=", value
end
def ==(other)
return true if other.equal?(self)
return true if other.instance_of?(self.class) and other._id == self._id
false
end
def will_save_tree
if @_cyclic
errors.add(:base, "Can't be children of a descendant")
end
end
def fix_position
if parent.nil?
self.write_attribute parent_id_field, nil
self[path_field] = []
self[depth_field] = 0
else
self.write_attribute parent_id_field, parent._id
self[path_field] = parent[path_field] + [parent._id]
self[depth_field] = parent[depth_field] + 1
self.save
end
end
def parent
@_parent or (self[parent_id_field].nil? ? nil : self.class.find(self[parent_id_field]))
end
def root?
self[parent_id_field].nil?
end
def root
self[path_field].first.nil? ? self : self.class.find(self[path_field].first)
end
def ancestors
return [] if root?
self.class.where(:_id.in => self[path_field]).order_by(depth_field)
end
def self_and_ancestors
ancestors << self
end
def siblings
self.class.where(:_id.ne => self._id, parent_id_field => self[parent_id_field]).order_by tree_order
end
def self_and_siblings
self.class.where(parent_id_field => self[parent_id_field]).order_by tree_order
end
def children
Children.new self
end
def children=(new_children_list)
self.children.clear
new_children_list.each do | child |
self.children << child
end
end
alias replace children=
def descendants
# workorund for mongoid unexpected behavior
_new_record_var = self.instance_variable_get(:@new_record)
_new_record = _new_record_var != false
return [] if _new_record
self.class.all_in(path_field => [self._id]).order_by tree_order
end
def self_and_descendants
[self] + self.descendants
end
def is_ancestor_of?(other)
other[path_field].include?(self._id)
end
def is_or_is_ancestor_of?(other)
(other == self) or is_ancestor_of?(other)
end
def is_descendant_of?(other)
self[path_field].include?(other._id)
end
def is_or_is_descendant_of?(other)
(other == self) or is_descendant_of?(other)
end
def is_sibling_of?(other)
(other != self) and (other[parent_id_field] == self[parent_id_field])
end
def is_or_is_sibling_of?(other)
(other == self) or is_sibling_of?(other)
end
def move_children
if @_will_move
@_will_move = false
self.children.each do | child |
child.fix_position
child.save
end
@_will_move = true
end
end
def destroy_descendants
self.descendants.each &:destroy
end
end
#proxy class
class Children < Array
#TODO: improve accessors to options to eliminate object[object.parent_id_field]
def initialize(owner)
@parent = owner
self.concat find_children_for_owner.to_a
end
#Add new child to list of object children
def <<(object, will_save=true)
if object.descendants.include? @parent
object.instance_variable_set :@_cyclic, true
else
object.write_attribute object.parent_id_field, @parent._id
object[object.path_field] = @parent[@parent.path_field] + [@parent._id]
object[object.depth_field] = @parent[@parent.depth_field] + 1
object.instance_variable_set :@_will_move, true
object.save if will_save
end
super(object)
end
def build(attributes)
child = @parent.class.new(attributes)
self.push child
child
end
alias create build
alias push <<
#Deletes object only from children list.
#To delete object use <tt>object.destroy</tt>.
def delete(object_or_id)
object = case object_or_id
when String, BSON::ObjectID
@parent.class.find object_or_id
else
object_or_id
end
object.write_attribute object.parent_id_field, nil
object[object.path_field] = []
object[object.depth_field] = 0
object.save
super(object)
end
#Clear children list
def clear
self.each do | child |
@parent.children.delete child
end
end
private
def find_children_for_owner
@parent.class.where(@parent.parent_id_field => @parent.id).
order_by @parent.tree_order
end
end
module Fields
def parent_id_field
acts_as_tree_options[:parent_id_field]
end
def path_field
acts_as_tree_options[:path_field]
end
def depth_field
acts_as_tree_options[:depth_field]
end
def tree_order
acts_as_tree_options[:order] or []
end
end
end
end
end
fix name: BSON::ObjectID => BSON::ObjectId
require "mongoid"
module Mongoid
module Acts
module Tree
def self.included(model)
model.class_eval do
extend InitializerMethods
end
end
module InitializerMethods
def acts_as_tree(options = {})
options = {
:parent_id_field => "parent_id",
:path_field => "path",
:depth_field => "depth"
}.merge(options)
write_inheritable_attribute :acts_as_tree_options, options
class_inheritable_reader :acts_as_tree_options
include InstanceMethods
include Fields
extend Fields
extend ClassMethods
field parent_id_field, :type => BSON::ObjectId
field path_field, :type => Array, :default => [], :index => true
field depth_field, :type => Integer, :default => 0
self.class_eval do
define_method "#{parent_id_field}=" do | new_parent_id |
if new_parent_id.present?
new_parent = self.class.find new_parent_id
new_parent.children.push self, false
else
self.write_attribute parent_id_field, nil
self[path_field] = []
self[depth_field] = 0
end
end
end
after_save :move_children
validate :will_save_tree
before_destroy :destroy_descendants
end
end
module ClassMethods
def roots
self.where(parent_id_field => nil).order_by tree_order
end
end
module InstanceMethods
def [](field_name)
self.send field_name
end
def []=(field_name, value)
self.send "#{field_name}=", value
end
def ==(other)
return true if other.equal?(self)
return true if other.instance_of?(self.class) and other._id == self._id
false
end
def will_save_tree
if @_cyclic
errors.add(:base, "Can't be children of a descendant")
end
end
def fix_position
if parent.nil?
self.write_attribute parent_id_field, nil
self[path_field] = []
self[depth_field] = 0
else
self.write_attribute parent_id_field, parent._id
self[path_field] = parent[path_field] + [parent._id]
self[depth_field] = parent[depth_field] + 1
self.save
end
end
def parent
@_parent or (self[parent_id_field].nil? ? nil : self.class.find(self[parent_id_field]))
end
def root?
self[parent_id_field].nil?
end
def root
self[path_field].first.nil? ? self : self.class.find(self[path_field].first)
end
def ancestors
return [] if root?
self.class.where(:_id.in => self[path_field]).order_by(depth_field)
end
def self_and_ancestors
ancestors << self
end
def siblings
self.class.where(:_id.ne => self._id, parent_id_field => self[parent_id_field]).order_by tree_order
end
def self_and_siblings
self.class.where(parent_id_field => self[parent_id_field]).order_by tree_order
end
def children
Children.new self
end
def children=(new_children_list)
self.children.clear
new_children_list.each do | child |
self.children << child
end
end
alias replace children=
def descendants
# workorund for mongoid unexpected behavior
_new_record_var = self.instance_variable_get(:@new_record)
_new_record = _new_record_var != false
return [] if _new_record
self.class.all_in(path_field => [self._id]).order_by tree_order
end
def self_and_descendants
[self] + self.descendants
end
def is_ancestor_of?(other)
other[path_field].include?(self._id)
end
def is_or_is_ancestor_of?(other)
(other == self) or is_ancestor_of?(other)
end
def is_descendant_of?(other)
self[path_field].include?(other._id)
end
def is_or_is_descendant_of?(other)
(other == self) or is_descendant_of?(other)
end
def is_sibling_of?(other)
(other != self) and (other[parent_id_field] == self[parent_id_field])
end
def is_or_is_sibling_of?(other)
(other == self) or is_sibling_of?(other)
end
def move_children
if @_will_move
@_will_move = false
self.children.each do | child |
child.fix_position
child.save
end
@_will_move = true
end
end
def destroy_descendants
self.descendants.each &:destroy
end
end
#proxy class
class Children < Array
#TODO: improve accessors to options to eliminate object[object.parent_id_field]
def initialize(owner)
@parent = owner
self.concat find_children_for_owner.to_a
end
#Add new child to list of object children
def <<(object, will_save=true)
if object.descendants.include? @parent
object.instance_variable_set :@_cyclic, true
else
object.write_attribute object.parent_id_field, @parent._id
object[object.path_field] = @parent[@parent.path_field] + [@parent._id]
object[object.depth_field] = @parent[@parent.depth_field] + 1
object.instance_variable_set :@_will_move, true
object.save if will_save
end
super(object)
end
def build(attributes)
child = @parent.class.new(attributes)
self.push child
child
end
alias create build
alias push <<
#Deletes object only from children list.
#To delete object use <tt>object.destroy</tt>.
def delete(object_or_id)
object = case object_or_id
when String, BSON::ObjectId
@parent.class.find object_or_id
else
object_or_id
end
object.write_attribute object.parent_id_field, nil
object[object.path_field] = []
object[object.depth_field] = 0
object.save
super(object)
end
#Clear children list
def clear
self.each do | child |
@parent.children.delete child
end
end
private
def find_children_for_owner
@parent.class.where(@parent.parent_id_field => @parent.id).
order_by @parent.tree_order
end
end
module Fields
def parent_id_field
acts_as_tree_options[:parent_id_field]
end
def path_field
acts_as_tree_options[:path_field]
end
def depth_field
acts_as_tree_options[:depth_field]
end
def tree_order
acts_as_tree_options[:order] or []
end
end
end
end
end
|
# Numbers to Commas Solo Challenge
# I spent [4] hours on this challenge.
# Complete each step below according to the challenge directions and
# include it in this file. Also make sure everything that isn't code
# is commented in the file.
# 0. Pseudocode
=begin
INPUT: Obtain a positive number
Create a container object called string_number
Inside of the container object store the conversion of the number to a string
IF the length of the number is < 4
return the number
ELSE IF number is greater than or equal to 4 AND less than or equal to 6 THEN
count from the last item in the area three spots back
add a comma
return the string_number with the newly added commas
ELSE IF number is greater than or equal to 7 AND less than or equal to 8 THEN
count from the last item in the area three spots back
add a comma
count three more spots back THEN
add another comman
return the string_number with the newly added commas
END IF
OUTPUT: Return a comman seperated string
=end
# 1. Initial Solution
=begin
def separate_comma(number)
number = number.to_s
if number.length < 4
return number
elsif number.length >= 4 && number.length <= 6
number.insert(-4, ",")
return number
elsif number.length >= 7 && number.length <=8
number.insert(-4, ",") && number.insert(-8, ",")
return number
end
end
=end
# 2. Refactored Solution
def separate_comma(number)
number = number.to_s
if number.length < 4
number
elsif number.length <= 6
number.insert(-4, ",")
elsif number.length <= 8
number.insert(-4, ",") && number.insert(-8, ",")
end
end
# 3. Reflection
=begin
What was your process for breaking the problem down? What different approaches did you consider?
So I really over thought this problem. My first approach was centered on turning the number parameter into a array, reversing the array, adding a comma after index three, undoing the array, and then return the string.
After a hour and half of that I was pretty frustrated. I had to take a break. When I came back I scrapped the whole thing, and started fresh.
One big thing I noticed was that my pseudcode was very vague. Which meant that I was trying to solve a problem that I didn't fully understand.
I looked at the specs more and realized that there was a pattern in terms of comma position and number length.
When I notice that I went back and started pseudo coding. Once I was done with that I implemented my solution. All in all that took me around 30 so minutes.
I find pseudo coding helpful because it helps me determine where my failure points are. If I pseudocode first and can't solve the problem then I know I don't understand it.
I used the insert method for the refactored solution. I tried to stray away from methods that were to cryptic. Instead, choosing one that decreased the lengthen of my code, but didn't sacrifice readability.
I do think that my code is more readable. I took out some of the explicit returns along with shortning some of the conditional statements. I think conditionals can get really messy because it's so hard to follow what's going on.
How did you initially iterate through the data structure?
Do you feel your refactored solution is more readable than your initial solution? Why?
=end
Solve seperate comma challenge
# Numbers to Commas Solo Challenge
# I spent [4] hours on this challenge.
# Complete each step below according to the challenge directions and
# include it in this file. Also make sure everything that isn't code
# is commented in the file.
# 0. Pseudocode
=begin
INPUT: Obtain a positive number
Create a container object called string_number
Inside of the container object store the conversion of the number to a string
IF the length of the number is < 4
return the number
ELSE IF number is greater than or equal to 4 AND less than or equal to 6 THEN
count from the last item in the area three spots back
add a comma
return the string_number with the newly added commas
ELSE IF number is greater than or equal to 7 AND less than or equal to 8 THEN
count from the last item in the area three spots back
add a comma
count three more spots back THEN
add another comman
return the string_number with the newly added commas
END IF
OUTPUT: Return a comman seperated string
=end
# 1. Initial Solution
#begin
def separate_comma(number)
number = number.to_s
add_comma = []
add_another_comma = []
if number.length < 4
return number
elsif number.length >= 4 && number.length <= 6
reverse_string = number.reverse
1.times do
add_comma = reverse_string[2] + ","
end
reverse_string[2] = add_comma
return reverse_string.reverse
elsif number.length >= 7 && number.length <=8
reverse_string = number.reverse
2.times do
add_comma = reverse_string[2] + ","
add_another_comma = reverse_string[5] + ","
end
reverse_string[2] = add_comma
reverse_string[6] = add_another_comma
return reverse_string.reverse
end
end
# 2. Refactored Solution
def separate_comma(number)
number = number.to_s
if number.length < 4
number
elsif number.length <= 6
number.insert(-4, ",")
elsif number.length <= 8
number.insert(-4, ",") && number.insert(-8, ",")
end
end
# 3. Reflection
=begin
So I really over thought this problem.
I started out with very vague pseudcode.
Which meant that I was trying to solve a problem that I didn't fully understand.
So I looked at the specs more,and realized that there was a pattern in terms of comma position and number length.
When I noticed that I went back and re-pseudocoded my solution.
I broke the problem down into three different conditional expressions.
The first conditional branch was to return the original array if the length of the number was less than 4.
The second conditional branch add one comma to numbers between 4 and 6.
Finally, the last conditional branch added two commas to numbers between 7 and 8.
I find pseudo coding helpful because it helps me determine where my failure points are. If I pseudocode first and can't solve the problem then I know I don't understand it.
I initally reversed my string, iterated through the reversed string, added a comma(s), pushed the number with a comma into the array, then returned the reversed string.
I used the insert method for my refactored solution. I tried to stray away from methods that were to cryptic. Instead, choosing one that decreased the length of my code, but didn't sacrifice readability.
I do think that my code is more readable. I took out the explicit returns, shortened my conditional statements, and used a built in Ruby method to replace having to reverse my string and iterate through it to place the comma.
=end
|
module OVIRT
class Client
def cluster_version?(cluster_id, major)
c = cluster(cluster_id)
c.version.split('.')[0] == major
end
def clusters(opts={})
headers = {:accept => "application/xml; detail=datacenters"}
search= opts[:search] || ("datacenter=%s" % current_datacenter.name)
http_get("/clusters?search=%s" % CGI.escape(search), headers).xpath('/clusters/cluster').collect do |cl|
OVIRT::Cluster.new(self, cl)
end
end
def cluster(cluster_id)
headers = {:accept => "application/xml; detail=datacenters"}
cluster_xml = http_get("/clusters/%s" % cluster_id, headers)
OVIRT::Cluster.new(self, cluster_xml.root)
end
def networks(opts)
cluster_id = opts[:cluster_id] || current_cluster.id
http_get("/clusters/%s/networks" % cluster_id, http_headers).xpath('/networks/network').collect do |cl|
OVIRT::Network.new(self, cl)
end
end
end
end
Fixed a bug - clusters method is not filtering by current data center.
The filter by current data-center is not working on RHEV3.0 rest-api.
A work around was added.
module OVIRT
class Client
def cluster_version?(cluster_id, major)
c = cluster(cluster_id)
c.version.split('.')[0] == major
end
def clusters(opts={})
headers = {:accept => "application/xml; detail=datacenters"}
search= opts[:search] || ("datacenter=%s" % current_datacenter.name)
http_get("/clusters?search=%s" % CGI.escape(search), headers).xpath('/clusters/cluster').collect do |cl|
cluster = OVIRT::Cluster.new(self, cl)
#the following line is needed as a work-around a bug in RHEV 3.0 rest-api
cluster if cluster.datacenter.id == current_datacenter.id
end.compact
end
def cluster(cluster_id)
headers = {:accept => "application/xml; detail=datacenters"}
cluster_xml = http_get("/clusters/%s" % cluster_id, headers)
OVIRT::Cluster.new(self, cluster_xml.root)
end
def networks(opts)
cluster_id = opts[:cluster_id] || current_cluster.id
http_get("/clusters/%s/networks" % cluster_id, http_headers).xpath('/networks/network').collect do |cl|
OVIRT::Network.new(self, cl)
end
end
end
end
|
module Morpheus
module Cli
VERSION = "4.1.2"
end
end
version bump 4.1.3
module Morpheus
module Cli
VERSION = "4.1.3"
end
end
|
module Morpheus
module Cli
VERSION = "3.6.21"
end
end
VERSION=3.6.22
module Morpheus
module Cli
VERSION = "3.6.22"
end
end
|
require 'spqr/spqr'
require 'rhubarb/rhubarb'
require 'mrg/grid/config/Group'
require 'mrg/grid/config/QmfUtils'
module Mrg
module Grid
module Config
# forward declarations
class NodeMembership
end
class Store
end
class Node
include ::Rhubarb::Persisting
include ::SPQR::Manageable
qmf_package_name 'mrg.grid.config'
qmf_class_name 'Node'
### Property method declarations
# property name sstr
declare_column :name, :string, :not_null
declare_index_on :name
declare_column :pool, :string
declare_column :idgroup, :integer, references(Group)
declare_column :provisioned, :boolean, :default, :true
declare_column :last_checkin, :integer
qmf_property :name, :sstr, :index=>true
qmf_property :provisioned, :bool
qmf_property :last_checkin, :uint64
### Schema method declarations
[:MakeProvisioned, :MakeUnprovisioned].each do |name|
define_method name do
log.debug "#{name} called on #{self}"
self.provisioned = (name == :MakeProvisioned)
# NB: these don't change the dirty status of this node
end
expose name do |args| ; end
end
# GetLastCheckinTime
# * time (uint32/O)
def GetLastCheckinTime()
log.debug "GetLastCheckinTime called on node #{self.inspect}"
# Assign values to output parameters
self.last_checkin ||= 0
# Return value
return self.last_checkin
end
expose :GetLastCheckinTime do |args|
args.declare :time, :uint32, :out, {}
end
# GetConfig
# * config (map/O)
# A map(parameter, value) representing the configuration for the node supplied
def GetConfig()
log.debug "GetConfig called on node #{self.inspect}"
config = Group.DEFAULT_GROUP.GetConfig
log.debug "Starting with DEFAULT_GROUP config, which is #{config.inspect}"
memberships.reverse_each do |grp|
log.debug("#{self.name} is a member of #{grp.name}")
log.debug("#{grp.name} has #{grp.features.size} features")
config = config.merge(grp.GetConfig)
end
config = config.merge(idgroup.GetConfig)
config
end
expose :GetConfig do |args|
args.declare :config, :map, :out, {}
end
# CheckConfigVersion
# * version (uint32/I)
def CheckConfigVersion(version)
# Print values of input parameters
log.debug "CheckConfigVersion: version => #{version.inspect}"
end
expose :CheckConfigVersion do |args|
args.declare :version, :uint32, :in, {}
end
def GetIdentityGroup
log.debug "GetIdentityGroup called on node #{self.inspect}"
self.idgroup ||= id_group_init
end
expose :GetIdentityGroup do |args|
args.declare :group, :objId, :out, {}
end
# ModifyMemberships
# * command (sstr/I)
# Valid commands are 'ADD', 'REMOVE', and 'REPLACE'.
# * groups (map/I)
# A list of groups, in inverse priority order (most important first)
# * options (map/I)
def ModifyMemberships(command,groups,options={})
# Print values of input parameters
log.debug "ModifyMemberships: command => #{command.inspect}"
log.debug "ModifyMemberships: groups => #{groups.inspect}"
log.debug "ModifyMemberships: options => #{options.inspect}"
groups = FakeList.normalize(groups).to_a.map do |gn|
group = Group.find_first_by_name(gn)
raise "Invalid group #{gn.inspect}" unless group
group
end
command = command.upcase
case command
when "ADD", "REMOVE" then
groups.each do |grow|
gn = grow.name
# Delete any prior mappings for each supplied grp in either case
NodeMembership.find_by(:node=>self, :grp=>grow).map {|nm| nm.delete unless nm.grp.is_identity_group}
# Add new mappings when requested
NodeMembership.create(:node=>self, :grp=>grow) if command == "ADD"
end
when "REPLACE" then
memberships.map {|nm| nm.delete}
groups.each do |grow|
gn = grow.name
NodeMembership.create(:node=>self, :grp=>grow)
end
else raise ArgumentError.new("invalid command #{command}")
end
DirtyElement.dirty_node(self)
end
expose :ModifyMemberships do |args|
args.declare :command, :sstr, :in, {}
args.declare :groups, :map, :in, {}
args.declare :options, :map, :in, {}
end
# GetMemberships
# * list (map/O)
# A list of the groups associated with this node, in inverse priority order (most important first), not including the identity group
def GetMemberships()
log.debug "GetMemberships called on node #{self.inspect}"
FakeList[*memberships.map{|g| g.name}]
end
expose :GetMemberships do |args|
args.declare :groups, :map, :out, {}
end
# Validate ensures the following for a given node N:
# 1. if N enables some feature F that depends on F', N must also include F',
# enable F', or enable some feature F'' that includes F'
# 2. if N enables some feature F that depends on some param P being set,
# N must provide a value for P
#
# Other consistency properties are ensured by other parts of the store (e.g.
# that a group does not enable conflicting features). Returns true if the
# configuration is valid, or an explanation if it is not.
def validate
orphaned_deps = Feature.dependencies_for_node(self) - Feature.features_for_node(self)
orphaned_params = [] # FIXME
return true if orphaned_deps == [] && orphaned_params == []
result = {}
result["Unsatisfied feature dependencies"] = orphaned_deps if orphaned_deps != []
result["Unsatisfied parameter dependencies"] = orphaned_params if orphaned_params != []
[self.name, result]
end
declare_custom_query :get_dirty_nodes, <<-QUERY
SELECT * FROM __TABLE__ WHERE row_id IN (
SELECT nodemembership.node AS node FROM dirtyelement JOIN nodemembership WHERE dirtyelement.grp = nodemembership.grp UNION
SELECT node FROM dirtyelement UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupfeatures, nodemembership WHERE dirtyelement.feature = groupfeatures.feature AND nodemembership.grp = groupfeatures.grp UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupparams, nodemembership WHERE dirtyelement.parameter = groupparams.param AND nodemembership.grp = groupparams.grp UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupfeatures, nodemembership WHERE dirtyelement.feature = groupfeatures.feature AND nodemembership.grp = groupfeatures.grp
)
QUERY
private
def my_features
my_groups.inject([]) do |acc, grp|
current_features = grp.features
acc |= grp.features
acc
end
end
def my_groups
[Group.DEFAULT_GROUP] + memberships + [idgroup]
end
def idgroupname
"+++#{Digest::MD5.hexdigest(self.name)}"
end
def id_group_init
ig = Group.create(:name=>idgroupname, :is_identity_group=>true)
NodeMembership.create(:node=>self, :grp=>ig)
ig
end
def memberships
NodeMembership.find_by(:node=>self).map{|nm| nm.grp}.select {|g| not g.is_identity_group}
end
end
end
end
end
Node#validate now should find two of three error cases
require 'spqr/spqr'
require 'rhubarb/rhubarb'
require 'mrg/grid/config/Group'
require 'mrg/grid/config/QmfUtils'
module Mrg
module Grid
module Config
# forward declarations
class NodeMembership
end
class Store
end
class DirtyElement
end
class Node
include ::Rhubarb::Persisting
include ::SPQR::Manageable
qmf_package_name 'mrg.grid.config'
qmf_class_name 'Node'
### Property method declarations
# property name sstr
declare_column :name, :string, :not_null
declare_index_on :name
declare_column :pool, :string
declare_column :idgroup, :integer, references(Group)
declare_column :provisioned, :boolean, :default, :true
declare_column :last_checkin, :integer
qmf_property :name, :sstr, :index=>true
qmf_property :provisioned, :bool
qmf_property :last_checkin, :uint64
### Schema method declarations
[:MakeProvisioned, :MakeUnprovisioned].each do |name|
define_method name do
log.debug "#{name} called on #{self}"
self.provisioned = (name == :MakeProvisioned)
# NB: these don't change the dirty status of this node
end
expose name do |args| ; end
end
# GetLastCheckinTime
# * time (uint32/O)
def GetLastCheckinTime()
log.debug "GetLastCheckinTime called on node #{self.inspect}"
# Assign values to output parameters
self.last_checkin ||= 0
# Return value
return self.last_checkin
end
expose :GetLastCheckinTime do |args|
args.declare :time, :uint32, :out, {}
end
# GetConfig
# * config (map/O)
# A map(parameter, value) representing the configuration for the node supplied
def GetConfig()
log.debug "GetConfig called on node #{self.inspect}"
config = Group.DEFAULT_GROUP.GetConfig
log.debug "Starting with DEFAULT_GROUP config, which is #{config.inspect}"
memberships.reverse_each do |grp|
log.debug("#{self.name} is a member of #{grp.name}")
log.debug("#{grp.name} has #{grp.features.size} features")
config = config.merge(grp.GetConfig)
end
config = config.merge(idgroup.GetConfig)
config
end
expose :GetConfig do |args|
args.declare :config, :map, :out, {}
end
# CheckConfigVersion
# * version (uint32/I)
def CheckConfigVersion(version)
# Print values of input parameters
log.debug "CheckConfigVersion: version => #{version.inspect}"
end
expose :CheckConfigVersion do |args|
args.declare :version, :uint32, :in, {}
end
def GetIdentityGroup
log.debug "GetIdentityGroup called on node #{self.inspect}"
self.idgroup ||= id_group_init
end
expose :GetIdentityGroup do |args|
args.declare :group, :objId, :out, {}
end
# ModifyMemberships
# * command (sstr/I)
# Valid commands are 'ADD', 'REMOVE', and 'REPLACE'.
# * groups (map/I)
# A list of groups, in inverse priority order (most important first)
# * options (map/I)
def ModifyMemberships(command,groups,options={})
# Print values of input parameters
log.debug "ModifyMemberships: command => #{command.inspect}"
log.debug "ModifyMemberships: groups => #{groups.inspect}"
log.debug "ModifyMemberships: options => #{options.inspect}"
groups = FakeList.normalize(groups).to_a.map do |gn|
group = Group.find_first_by_name(gn)
raise "Invalid group #{gn.inspect}" unless group
group
end
command = command.upcase
case command
when "ADD", "REMOVE" then
groups.each do |grow|
gn = grow.name
# Delete any prior mappings for each supplied grp in either case
NodeMembership.find_by(:node=>self, :grp=>grow).map {|nm| nm.delete unless nm.grp.is_identity_group}
# Add new mappings when requested
NodeMembership.create(:node=>self, :grp=>grow) if command == "ADD"
end
when "REPLACE" then
memberships.map {|nm| nm.delete}
groups.each do |grow|
gn = grow.name
NodeMembership.create(:node=>self, :grp=>grow)
end
else raise ArgumentError.new("invalid command #{command}")
end
DirtyElement.dirty_node(self)
end
expose :ModifyMemberships do |args|
args.declare :command, :sstr, :in, {}
args.declare :groups, :map, :in, {}
args.declare :options, :map, :in, {}
end
# GetMemberships
# * list (map/O)
# A list of the groups associated with this node, in inverse priority order (most important first), not including the identity group
def GetMemberships()
log.debug "GetMemberships called on node #{self.inspect}"
FakeList[*memberships.map{|g| g.name}]
end
expose :GetMemberships do |args|
args.declare :groups, :map, :out, {}
end
# Validate ensures the following for a given node N:
# 1. if N enables some feature F that depends on F', N must also include F',
# enable F', or enable some feature F'' that includes F'
# 2. if N enables some feature F that depends on some param P being set,
# N must provide a value for P
# 3. if N sets some param P that depends on some other param P',
# N must also set P'
#
# Other consistency properties are ensured by other parts of the store (e.g.
# that a group does not enable conflicting features). Returns true if the
# configuration is valid, or an explanation if it is not.
def validate
my_config = self.GetConfig # FIXME: it would be nice to not calculate this redundantly
orphaned_deps = Feature.dependencies_for_node(self) - Feature.features_for_node(self)
unset_params = my_unset_params(my_config)
orphaned_params = []
return true if orphaned_deps == [] && unset_params == [] && orphaned_params == []
result = {}
result["Unsatisfied feature dependencies"] = orphaned_deps if orphaned_deps != []
result["Unset necessary parameters"] = unset_params if unset_params != []
result["Unsatisfied parameter dependencies"] = orphaned_params if orphaned_params != []
[self.name, result]
end
def Node.get_dirty_nodes
return Node.find_all() if DirtyElement.find_first_by_kind(DirtyElement.const_get("KIND_EVERYTHING"))
Node._get_dirty_nodes
end
declare_custom_query :_get_dirty_nodes, <<-QUERY
SELECT * FROM __TABLE__ WHERE row_id IN (
SELECT nodemembership.node AS node FROM dirtyelement JOIN nodemembership WHERE dirtyelement.grp = nodemembership.grp UNION
SELECT node FROM dirtyelement UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupfeatures, nodemembership WHERE dirtyelement.feature = groupfeatures.feature AND nodemembership.grp = groupfeatures.grp UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupparams, nodemembership WHERE dirtyelement.parameter = groupparams.param AND nodemembership.grp = groupparams.grp UNION
SELECT nodemembership.node AS node FROM dirtyelement JOIN groupfeatures, nodemembership WHERE dirtyelement.feature = groupfeatures.feature AND nodemembership.grp = groupfeatures.grp
)
QUERY
private
def my_unset_params(my_config = nil)
my_config ||= self.GetConfig
mc_params = Parameter.s_that_must_change
(my_config.keys & mc_params.keys).inject([]) do |acc,param|
dv = Parameter.find_first_by_name(param).default_val
acc << param if my_config[param] = dv
acc
end
end
def my_features
my_groups.inject([]) do |acc, grp|
current_features = grp.features
acc |= grp.features
acc
end
end
def my_groups
[Group.DEFAULT_GROUP] + memberships + [idgroup]
end
def idgroupname
"+++#{Digest::MD5.hexdigest(self.name)}"
end
def id_group_init
ig = Group.create(:name=>idgroupname, :is_identity_group=>true)
NodeMembership.create(:node=>self, :grp=>ig)
ig
end
def memberships
NodeMembership.find_by(:node=>self).map{|nm| nm.grp}.select {|g| not g.is_identity_group}
end
end
end
end
end
|
# coding: utf-8
#* internal/action.rb -- The framework of mysh internal actions.
module Mysh
#The mysh internal action class.
class Action
#The name of the action.
attr_reader :name
#The description of the action.
attr_reader :description
#The action of the action.
attr_reader :action
#Setup an internal action.
def initialize(name, description)
@name, @description = name, description.in_array
@exec_binding = mysh_binding
end
#Get information about the action.
def action_info
[@name].concat(@description)
end
private
#Create a binding for mysh to execute expressions in.
def mysh_binding
binding
end
#Evaluate the string in the my shell context.
def mysh_eval(str)
@exec_binding.eval(str)
end
end
end
Removed unused property.
# coding: utf-8
#* internal/action.rb -- The framework of mysh internal actions.
module Mysh
#The mysh internal action class.
class Action
#The name of the action.
attr_reader :name
#The description of the action.
attr_reader :description
#Setup an internal action.
def initialize(name, description)
@name, @description = name, description.in_array
@exec_binding = mysh_binding
end
#Get information about the action.
def action_info
[@name].concat(@description)
end
private
#Create a binding for mysh to execute expressions in.
def mysh_binding
binding
end
#Evaluate the string in the my shell context.
def mysh_eval(str)
@exec_binding.eval(str)
end
end
end
|
module CloudDeploy
class S3Helper
require 'aws-sdk'
def initialize (options = {})
if (options[:access_key_id] == nil || options[:access_key_id] == '')
raise "access_key_id cannot be empty or nil"
end
if (options[:secret_access_key] == nil || options[:secret_access_key] == '')
raise "secret_access_key cannot be empty or nil"
end
AWS.config({
:access_key_id => options[:access_key_id],
:secret_access_key => options[:secret_access_key]
})
end
def put_asset_in_s3(asset_location, bucket, s3_path = "")
puts "Copying asset #{asset_location} to S3 bucket #{bucket}"
s3 = AWS::S3.new
bucket = s3.buckets[bucket]
Dir.glob(asset_location) do |file_name|
base_name = File.basename(file_name)
remote_name = "#{base_name}"
if (s3_path != "")
remote_name = "#{s3_path}/#{base_name}"
end
puts " # Uploading #{remote_name}"
#Uploading with a temp name and renaming to get around some weird bug.
obj = bucket.objects["_#{remote_name}"]
obj.write(:data => File.open(file_name), :content_length => File.size(file_name), :content_type => 'application/zip', :multipart_threshold => 100 * 1024 * 1024)
obj.move_to(remote_name)
end
puts "Finished pushing assets to S3!"
end
end
class AWSDeployer
require 'aws-sdk'
require 'curses'
def initialize(options = {})
@template_location = options[:template_location]
@stack_name = options[:stack_name]
@cfn_vars = options[:cfn_vars]
@use_curses = options[:use_curses]
if (access_key_id == nil || access_key_id == '')
raise "access_key_id cannot be empty or nil"
end
if (secret_access_key == nil || secret_access_key == '')
raise "secret_access_key cannot be empty or nil"
end
@access_key_id = access_key_id
@secret_access_key = secret_access_key
configure_aws()
end
def configure_aws()
AWS.config({
:access_key_id => @access_key_id,
:secret_access_key => @secret_access_key
})
end
def put_asset_in_s3(asset_location, bucket)
puts "Copying asset #{asset_location} to S3 bucket #{bucket}"
s3 = AWS::S3.new
bucket = s3.buckets[bucket]
Dir.glob(asset_location) do |file_name|
base_name = File.basename(file_name)
remote_name = "#{base_name}"
puts " # Uploading #{remote_name}"
#Uploading with a temp name and renaming to get around some weird bug.
obj = bucket.objects["_#{remote_name}"]
obj.write(:data => File.open(file_name), :content_length => File.size(file_name), :content_type => 'application/zip', :multipart_threshold => 100 * 1024 * 1024)
obj.move_to(remote_name)
end
puts "Finished pushing assets to S3!"
end
def validate_template(cloudformation, template_contents)
puts " # validating template"
validationResponse = cloudformation.validate_template(template_contents)
if (validationResponse[:code])
raise "invalid template: #{validationResponse[:message]}"
else
puts " # # template VALID!"
end
end
def check_if_exists(stack_name)
cloudformation = AWS::CloudFormation.new
if (cloudformation.stacks[stack_name].exists?)
puts "stack exists"
return true
end
puts "stack doesn't exist"
return false
end
def deploy_cloudformation_template()
puts "Getting CloudFormation template at #{@template_location}"
app_template = File.read(@template_location, :encoding => 'UTF-8')
cloudformation = AWS::CloudFormation.new
app_stackname = current_stack_name
puts "deploying #{app_stackname}"
validate_template(cloudformation, app_template)
puts " # creating stack"
stack = cloudformation.stacks.create(app_stackname, app_template,
:capabilities => ['CAPABILITY_IAM'],
:disable_rollback => true,
:parameters => @cfn_vars
)
if (@use_curses)
check_stack_status_curses(current_stack_name)
else
check_stack_status(current_stack_name)
end
@stack_outputs = {}
stack.outputs.each do |output|
@stack_outputs[output.key] = output.value
end
end
def delete_stack(stack_name)
puts "deleting #{stack_name}"
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
puts "#{stack_name} has current status #{stack.status}"
stack.delete
puts "AWS has been informed to delete #{stack_name} #{stack.status}."
if (@use_curses)
check_stack_status_curses(stack_name, {
:force_delete => true
})
else
check_stack_status(stack_name, {
:force_delete => true
})
end
puts "Delete has finished!"
end
def check_stack_status(stack_name, options = {})
status_title_message = "Monitoring AWS Stack Events for #{stack_name}"
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
if (stack.status == "CREATE_COMPLETE")
puts " # Create Complete!"
else
finished = false
while (!finished)
if (stack == nil || !stack.exists? || stack.status == "DELETE_COMPLETE")
puts "success! stack deleted."
finished = true
break
end
if (stack.status == "CREATE_COMPLETE")
puts "success! stack created!"
finished = true
break
elsif (stack.status == "CREATE_FAILED")
puts "failed to create #{@app_name} stack. #{stack.status_reason}"
finished = true
break
elsif (stack.status == "DELETE_FAILED")
if (options[:force_delete])
puts " # Delete failed, attempting delete again"
stack.delete
else
puts "failed to delete #{stack_name} stack. #{stack.status_reason}"
finished = true
break
end
end
index = 2
stack.events.each do |event|
event_message = "[#{event.timestamp}] #{event.logical_resource_id}: #{event.resource_status} #{event.resource_status_reason}"
if (event_message.include? "CREATE_COMPLETE")
end
index += 1
end
wait_sec = 15 # this is an interval to wait before checking the cloudformation stack status again
while (wait_sec > 0)
sleep 1
wait_sec -= 1
end
end
end
stack.events.each do |event|
puts "#{event.timestamp},#{event.logical_resource_id}:,#{event.resource_status},#{event.resource_status_reason}"
end
puts "Status summary: #{stack.status} #{stack.status_reason}"
end
def check_stack_status_curses(stack_name, options = {})
begin
require 'curses'
rescue Exception
puts "Curses dependency doesn't exist, using non curses version..."
return check_stack_status(stack_name, options)
end
Curses.init_screen
Curses.start_color
status_title_message = "Monitoring AWS Stack Events for #{stack_name}"
Curses.refresh
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
if (stack.status == "CREATE_COMPLETE")
Curses.setpos(1,0)
Curses.addstr("#{stack_name} is created")
sleep 2
Curses.close_screen
else
finished = false
while (!finished)
Curses.addstr(status_title_message)
if (stack == nil || !stack.exists? || stack.status == "DELETE_COMPLETE")
Curses.close_screen
puts "success! stack deleted."
finished = true
break
end
if (stack.status == "CREATE_COMPLETE")
Curses.close_screen
puts "success! stack created!"
finished = true
break
elsif (stack.status == "CREATE_FAILED")
Curses.close_screen
puts "failed to create #{@app_name} stack. #{stack.status_reason}"
finished = true
break
elsif (stack.status == "DELETE_FAILED")
if (options[:force_delete])
Curses.setpos(1, 0)
Curses.addstr("Delete failed, attempting delete again.")
stack.delete
else
Curses.close_screen
puts "failed to delete #{stack_name} stack. #{stack.status_reason}"
finished = true
break
end
end
index = 2
stack.events.each do |event|
event_message = "[#{event.timestamp}] #{event.logical_resource_id}: #{event.resource_status} #{event.resource_status_reason}"
if (event_message.include? "CREATE_COMPLETE")
end
Curses.setpos(index, 0)
Curses.addstr(event_message)
index += 1
end
Curses.refresh
wait_sec = 15 # this is an interval to wait before checking the cloudformation stack status again
while (wait_sec > 0)
Curses.setpos(1, (wait_sec-15))
Curses.addstr(">")
Curses.refresh
sleep 1
wait_sec -= 1
end
Curses.clear
end
end
stack.events.each do |event|
puts "#{event.timestamp},#{event.logical_resource_id}:,#{event.resource_status},#{event.resource_status_reason}"
end
puts "Status summary: #{stack.status} #{stack.status_reason}"
end
def switch_elastic_ip(elastic_ip, options = {})
@elastic_ip = elastic_ip
puts "Switching elastic IP for #{@app_name}"
instanceId = @stack_outputs['InstanceId']
if (instanceId == nil || instanceId == "")
instanceId = options[:instance_id]
if (instanceId == nil || instanceId == "")
raise "Instance Id is not found."
end
end
begin
eip = AWS::EC2::ElasticIp.new(@elastic_ip)
associateOptions = {
:instance => instanceId
}
if (eip.exists?)
eip.associate(associateOptions)
puts "New instance now associated with #{@elastic_ip}"
end
rescue Exception => ex
raise "problem setting changing elastic ip. Exception Message: #{ex.message}"
end
end
def delete_old_stacks(app_env, app_name)
puts "DELETING OLD STACKS FOR #{app_env} APP #{app_name}"
current_app_stackname = current_stack_name
cloudformation = AWS::CloudFormation.new
cloudformation.stacks.each do |stack|
puts " # checking #{stack.name}"
if (stack.name == current_app_stackname)
puts " # # leaving #{stack.name}. (This is the current stack)"
elsif (stack.name.include? "#{app_env}") && (stack.name.include? "#{app_name}")
puts " # # DELETING stack: #{stack.name}"
stack.delete
puts " # # Delete sent!"
elsif
puts " # # leaving #{stack.name}. (This stack isn't my responsibility)"
end
end
end
private
def current_stack_name
return "#{@stack_name}".gsub(".", "-")
end
end
class EBDeployer
require 'aws-sdk'
def initialize(options = {
:app_name => "default",
:environment_name => "dev"
})
@app_name = options[:app_name]
@app_environment = options[:environment_name]
if (options[:access_key_id] == nil || options[:access_key_id] == '')
raise "access_key_id cannot be empty or nil"
end
if (options[:secret_access_key] == nil || options[:secret_access_key] == '')
raise "secret_access_key cannot be empty or nil"
end
@access_key_id = options[:access_key_id]
@secret_access_key = options[:secret_access_key]
end
def update_application(version, source_bundle, option_settings = [])
beanstalk = AWS::ElasticBeanstalk.new(
:access_key_id => @access_key_id,
:secret_access_key => @secret_access_key)
application_versions = beanstalk.client.describe_application_versions({
:application_name => @app_name,
:version_labels => [version]
})
if (application_versions.application_versions.length == 0)
version_response = beanstalk.client.create_application_version({
:application_name => @app_name,
:version_label => version,
:source_bundle => {
:s3_bucket => source_bundle[:s3_bucket],
:s3_key => source_bundle[:s3_key]
},
:auto_create_application => true
})
end
beanstalk.client.update_environment({
:environment_name => @app_environment,
:version_label => version,
:option_settings => option_settings
})
end
end
end
added key source back in to AWSDeployer class initializer
module CloudDeploy
class S3Helper
require 'aws-sdk'
def initialize (options = {})
if (options[:access_key_id] == nil || options[:access_key_id] == '')
raise "access_key_id cannot be empty or nil"
end
if (options[:secret_access_key] == nil || options[:secret_access_key] == '')
raise "secret_access_key cannot be empty or nil"
end
AWS.config({
:access_key_id => options[:access_key_id],
:secret_access_key => options[:secret_access_key]
})
end
def put_asset_in_s3(asset_location, bucket, s3_path = "")
puts "Copying asset #{asset_location} to S3 bucket #{bucket}"
s3 = AWS::S3.new
bucket = s3.buckets[bucket]
Dir.glob(asset_location) do |file_name|
base_name = File.basename(file_name)
remote_name = "#{base_name}"
if (s3_path != "")
remote_name = "#{s3_path}/#{base_name}"
end
puts " # Uploading #{remote_name}"
#Uploading with a temp name and renaming to get around some weird bug.
obj = bucket.objects["_#{remote_name}"]
obj.write(:data => File.open(file_name), :content_length => File.size(file_name), :content_type => 'application/zip', :multipart_threshold => 100 * 1024 * 1024)
obj.move_to(remote_name)
end
puts "Finished pushing assets to S3!"
end
end
class AWSDeployer
require 'aws-sdk'
require 'curses'
def initialize(options = {})
@template_location = options[:template_location]
@stack_name = options[:stack_name]
@cfn_vars = options[:cfn_vars]
@use_curses = options[:use_curses]
if (options[:access_key_id] == nil || options[:access_key_id] == '')
raise "access_key_id cannot be empty or nil"
end
if (options[:secret_access_key] == nil || options[:secret_access_key] == '')
raise "secret_access_key cannot be empty or nil"
end
@access_key_id = options[:access_key_id]
@secret_access_key = options[:secret_access_key]
configure_aws()
end
def configure_aws()
AWS.config({
:access_key_id => @access_key_id,
:secret_access_key => @secret_access_key
})
end
def put_asset_in_s3(asset_location, bucket)
puts "Copying asset #{asset_location} to S3 bucket #{bucket}"
s3 = AWS::S3.new
bucket = s3.buckets[bucket]
Dir.glob(asset_location) do |file_name|
base_name = File.basename(file_name)
remote_name = "#{base_name}"
puts " # Uploading #{remote_name}"
#Uploading with a temp name and renaming to get around some weird bug.
obj = bucket.objects["_#{remote_name}"]
obj.write(:data => File.open(file_name), :content_length => File.size(file_name), :content_type => 'application/zip', :multipart_threshold => 100 * 1024 * 1024)
obj.move_to(remote_name)
end
puts "Finished pushing assets to S3!"
end
def validate_template(cloudformation, template_contents)
puts " # validating template"
validationResponse = cloudformation.validate_template(template_contents)
if (validationResponse[:code])
raise "invalid template: #{validationResponse[:message]}"
else
puts " # # template VALID!"
end
end
def check_if_exists(stack_name)
cloudformation = AWS::CloudFormation.new
if (cloudformation.stacks[stack_name].exists?)
puts "stack exists"
return true
end
puts "stack doesn't exist"
return false
end
def deploy_cloudformation_template()
puts "Getting CloudFormation template at #{@template_location}"
app_template = File.read(@template_location, :encoding => 'UTF-8')
cloudformation = AWS::CloudFormation.new
app_stackname = current_stack_name
puts "deploying #{app_stackname}"
validate_template(cloudformation, app_template)
puts " # creating stack"
stack = cloudformation.stacks.create(app_stackname, app_template,
:capabilities => ['CAPABILITY_IAM'],
:disable_rollback => true,
:parameters => @cfn_vars
)
if (@use_curses)
check_stack_status_curses(current_stack_name)
else
check_stack_status(current_stack_name)
end
@stack_outputs = {}
stack.outputs.each do |output|
@stack_outputs[output.key] = output.value
end
end
def delete_stack(stack_name)
puts "deleting #{stack_name}"
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
puts "#{stack_name} has current status #{stack.status}"
stack.delete
puts "AWS has been informed to delete #{stack_name} #{stack.status}."
if (@use_curses)
check_stack_status_curses(stack_name, {
:force_delete => true
})
else
check_stack_status(stack_name, {
:force_delete => true
})
end
puts "Delete has finished!"
end
def check_stack_status(stack_name, options = {})
status_title_message = "Monitoring AWS Stack Events for #{stack_name}"
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
if (stack.status == "CREATE_COMPLETE")
puts " # Create Complete!"
else
finished = false
while (!finished)
if (stack == nil || !stack.exists? || stack.status == "DELETE_COMPLETE")
puts "success! stack deleted."
finished = true
break
end
if (stack.status == "CREATE_COMPLETE")
puts "success! stack created!"
finished = true
break
elsif (stack.status == "CREATE_FAILED")
puts "failed to create #{@app_name} stack. #{stack.status_reason}"
finished = true
break
elsif (stack.status == "DELETE_FAILED")
if (options[:force_delete])
puts " # Delete failed, attempting delete again"
stack.delete
else
puts "failed to delete #{stack_name} stack. #{stack.status_reason}"
finished = true
break
end
end
index = 2
stack.events.each do |event|
event_message = "[#{event.timestamp}] #{event.logical_resource_id}: #{event.resource_status} #{event.resource_status_reason}"
if (event_message.include? "CREATE_COMPLETE")
end
index += 1
end
wait_sec = 15 # this is an interval to wait before checking the cloudformation stack status again
while (wait_sec > 0)
sleep 1
wait_sec -= 1
end
end
end
stack.events.each do |event|
puts "#{event.timestamp},#{event.logical_resource_id}:,#{event.resource_status},#{event.resource_status_reason}"
end
puts "Status summary: #{stack.status} #{stack.status_reason}"
end
def check_stack_status_curses(stack_name, options = {})
begin
require 'curses'
rescue Exception
puts "Curses dependency doesn't exist, using non curses version..."
return check_stack_status(stack_name, options)
end
Curses.init_screen
Curses.start_color
status_title_message = "Monitoring AWS Stack Events for #{stack_name}"
Curses.refresh
cloudformation = AWS::CloudFormation.new
stack = cloudformation.stacks[stack_name]
if (stack.status == "CREATE_COMPLETE")
Curses.setpos(1,0)
Curses.addstr("#{stack_name} is created")
sleep 2
Curses.close_screen
else
finished = false
while (!finished)
Curses.addstr(status_title_message)
if (stack == nil || !stack.exists? || stack.status == "DELETE_COMPLETE")
Curses.close_screen
puts "success! stack deleted."
finished = true
break
end
if (stack.status == "CREATE_COMPLETE")
Curses.close_screen
puts "success! stack created!"
finished = true
break
elsif (stack.status == "CREATE_FAILED")
Curses.close_screen
puts "failed to create #{@app_name} stack. #{stack.status_reason}"
finished = true
break
elsif (stack.status == "DELETE_FAILED")
if (options[:force_delete])
Curses.setpos(1, 0)
Curses.addstr("Delete failed, attempting delete again.")
stack.delete
else
Curses.close_screen
puts "failed to delete #{stack_name} stack. #{stack.status_reason}"
finished = true
break
end
end
index = 2
stack.events.each do |event|
event_message = "[#{event.timestamp}] #{event.logical_resource_id}: #{event.resource_status} #{event.resource_status_reason}"
if (event_message.include? "CREATE_COMPLETE")
end
Curses.setpos(index, 0)
Curses.addstr(event_message)
index += 1
end
Curses.refresh
wait_sec = 15 # this is an interval to wait before checking the cloudformation stack status again
while (wait_sec > 0)
Curses.setpos(1, (wait_sec-15))
Curses.addstr(">")
Curses.refresh
sleep 1
wait_sec -= 1
end
Curses.clear
end
end
stack.events.each do |event|
puts "#{event.timestamp},#{event.logical_resource_id}:,#{event.resource_status},#{event.resource_status_reason}"
end
puts "Status summary: #{stack.status} #{stack.status_reason}"
end
def switch_elastic_ip(elastic_ip, options = {})
@elastic_ip = elastic_ip
puts "Switching elastic IP for #{@app_name}"
instanceId = @stack_outputs['InstanceId']
if (instanceId == nil || instanceId == "")
instanceId = options[:instance_id]
if (instanceId == nil || instanceId == "")
raise "Instance Id is not found."
end
end
begin
eip = AWS::EC2::ElasticIp.new(@elastic_ip)
associateOptions = {
:instance => instanceId
}
if (eip.exists?)
eip.associate(associateOptions)
puts "New instance now associated with #{@elastic_ip}"
end
rescue Exception => ex
raise "problem setting changing elastic ip. Exception Message: #{ex.message}"
end
end
def delete_old_stacks(app_env, app_name)
puts "DELETING OLD STACKS FOR #{app_env} APP #{app_name}"
current_app_stackname = current_stack_name
cloudformation = AWS::CloudFormation.new
cloudformation.stacks.each do |stack|
puts " # checking #{stack.name}"
if (stack.name == current_app_stackname)
puts " # # leaving #{stack.name}. (This is the current stack)"
elsif (stack.name.include? "#{app_env}") && (stack.name.include? "#{app_name}")
puts " # # DELETING stack: #{stack.name}"
stack.delete
puts " # # Delete sent!"
elsif
puts " # # leaving #{stack.name}. (This stack isn't my responsibility)"
end
end
end
private
def current_stack_name
return "#{@stack_name}".gsub(".", "-")
end
end
class EBDeployer
require 'aws-sdk'
def initialize(options = {
:app_name => "default",
:environment_name => "dev"
})
@app_name = options[:app_name]
@app_environment = options[:environment_name]
if (options[:access_key_id] == nil || options[:access_key_id] == '')
raise "access_key_id cannot be empty or nil"
end
if (options[:secret_access_key] == nil || options[:secret_access_key] == '')
raise "secret_access_key cannot be empty or nil"
end
@access_key_id = options[:access_key_id]
@secret_access_key = options[:secret_access_key]
end
def update_application(version, source_bundle, option_settings = [])
beanstalk = AWS::ElasticBeanstalk.new(
:access_key_id => @access_key_id,
:secret_access_key => @secret_access_key)
application_versions = beanstalk.client.describe_application_versions({
:application_name => @app_name,
:version_labels => [version]
})
if (application_versions.application_versions.length == 0)
version_response = beanstalk.client.create_application_version({
:application_name => @app_name,
:version_label => version,
:source_bundle => {
:s3_bucket => source_bundle[:s3_bucket],
:s3_key => source_bundle[:s3_key]
},
:auto_create_application => true
})
end
beanstalk.client.update_environment({
:environment_name => @app_environment,
:version_label => version,
:option_settings => option_settings
})
end
end
end |
require 'securerandom'
require_relative 'errors'
require_relative 'vcd_client'
require_relative 'steps'
module VCloudCloud
class Cloud
def initialize(options)
@logger = Bosh::Clouds::Config.logger
@logger.debug "Input cloud options: #{options.inspect}"
@agent_properties = options['agent'] || {}
vcds = options['vcds']
raise ArgumentError, 'Invalid number of VCDs' unless vcds.size == 1
@vcd = vcds[0]
@entities = @vcd['entities']
raise ArgumentError, 'Invalid entities in VCD settings' unless @entities.is_a?(Hash)
@debug = @vcd['debug'] || {}
@client_lock = Mutex.new
end
def create_stemcell(image, _)
(steps "create_stemcell(#{image}, _)" do |s|
s.next Steps::StemcellInfo, image
s.next Steps::CreateTemplate, "sc-#{unique_name}"
s.next Steps::UploadTemplateFiles
s.next Steps::AddCatalogItem, :vapp, s.state[:vapp_template]
end)[:catalog_item].urn
end
def delete_stemcell(catalog_vapp_id)
steps "delete_stemcell(#{catalog_vapp_id})" do |s|
catalog_vapp = client.resolve_entity catalog_vapp_id
raise "Catalog vApp #{id} not found" unless catalog_vapp
vapp = client.resolve_link catalog_vapp.entity
client.wait_entity vapp, true
client.invoke :delete, vapp.remove_link
client.invoke :delete, catalog_vapp.href
end
end
def create_vm(agent_id, catalog_vapp_id, resource_pool, networks, disk_locality = nil, environment = nil)
(steps "create_vm(#{agent_id}, #{catalog_vapp_id}, #{resource_pool}, ...)" do |s|
# request name available for recomposing vApps
requested_name = environment && environment['vapp']
vapp_name = requested_name.nil? ? agent_id : "vapp-tmp-#{unique_name}"
# disk_locality should be an array of disk ids
disk_locality = independent_disks disk_locality
# agent_id is used as vm name
description = @entities['description']
# if requested_name is present, we need to recompose vApp
container_vapp = nil
unless requested_name.nil?
begin
@logger.debug "Requesting container vApp: #{requested_name}"
container_vapp = client.vapp_by_name requested_name
rescue ObjectNotFoundError
# ignored, keep container_vapp nil
@logger.debug "Invalid container vApp: #{requested_name}"
vapp_name = agent_id
end
end
s.next Steps::Instantiate, catalog_vapp_id, vapp_name, description, disk_locality
client.flush_cache # flush cached vdc which contains vapp list
vapp = s.state[:vapp]
vm = s.state[:vm] = vapp.vms[0]
# perform recomposing
if container_vapp
existing_vm_hrefs = container_vapp.vms.map { |vm| vm.href }
client.wait_entity container_vapp
s.next Steps::Recompose, container_vapp
client.flush_cache
vapp = s.state[:vapp] = client.reload vapp
client.wait_entity vapp
s.next Steps::Delete, vapp, true
client.flush_cache
vapp = s.state[:vapp] = client.reload container_vapp
vm_href = vapp.vms.map { |vm| vm.href } - existing_vm_hrefs
raise "New virtual machine not found in recomposed vApp" if vm_href.empty?
vm = s.state[:vm] = client.resolve_link vm_href[0]
end
# save original disk configuration
s.state[:disks] = Array.new(vm.hardware_section.hard_disks)
reconfigure_vm s, agent_id, description, resource_pool, networks
# create env and generate env ISO image
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::CreateAgentEnv, networks, environment, @agent_properties
save_agent_env s
# power on
s.next Steps::PowerOn, :vm
end)[:vm].urn
end
def reboot_vm(vm_id)
steps "reboot_vm(#{vm_id})" do |s|
vm = s.state[:vm] = client.resolve_link client.resolve_entity(vm_id)
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
s.next Steps::PowerOn, :vm
elsif vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:POWERED_OFF].to_s
s.next Steps::PowerOn, :vm
else
s.next Steps::Reboot, :vm
end
end
end
def has_vm?(vm_id)
vm = client.resolve_entity vm_id
vm.type == VCloudSdk::Xml::MEDIA_TYPE[:VM]
rescue RestClient::Exception # invalid ID will get 403
false
rescue ObjectNotFoundError
false
end
def delete_vm(vm_id)
steps "delete_vm(#{vm_id})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
# power off vm first
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::PowerOff, :vm
vapp_link = vm.container_vapp_link
if @debug['delete_vm']
s.next Steps::Undeploy, s.state[:vm]
s.next Steps::Delete, s.state[:vm], true
end
if @debug['delete_empty_vapp']
vapp = s.state[:vapp] = client.resolve_link vapp_link
if vapp.vms.size == 0
if vapp['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vapp
end
vapp = s.state[:vapp]
if vapp['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:POWERED_ON].to_s
s.next Steps::PowerOff, :vapp
end
s.next Steps::Delete, s.state[:vapp], true
client.flush_cache
end
end
end
end
def configure_networks(vm_id, networks)
steps "configure_networks(#{vm_id}, #{networks})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
# power off vm first
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::PowerOff, :vm
# load container vApp
vapp = s.state[:vapp] = client.resolve_link vm.container_vapp_link
reconfigure_vm s, nil, nil, nil, networks
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
vm = s.state[:vm] = client.reload vm
Steps::CreateAgentEnv.update_network_env s.state[:env], vm, networks
save_agent_env s
# power on
s.next Steps::PowerOn, :vm
end
end
def create_disk(size_mb, vm_locality = nil)
(steps "create_disk(#{size_mb}, #{vm_locality.inspect})" do |s|
# vm_locality is used as vm_id
vm = vm_locality.nil? ? nil : client.resolve_entity(vm_locality)
s.next Steps::CreateDisk, unique_name, size_mb, vm
end)[:disk].urn
end
def attach_disk(vm_id, disk_id)
steps "attach_disk(#{vm_id}, #{disk_id})" do |s|
s.state[:vm] = client.resolve_entity vm_id
s.state[:disk] = client.resolve_entity disk_id
s.next Steps::AttachDetachDisk, :attach
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
s.state[:env]['disks'] ||= {}
s.state[:env]['disks']['persistent'] ||= {}
s.state[:env]['disks']['persistent'][disk_id] = disk_id
save_agent_env s
end
end
def detach_disk(vm_id, disk_id)
steps "detach_disk(#{vm_id}, #{disk_id})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
s.state[:disk] = client.resolve_entity disk_id
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::AttachDetachDisk, :detach
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
env = s.state[:env]
if env['disks'] && env['disks']['persistent'].is_a?(Hash)
env['disks']['persistent'].delete disk_id
end
save_agent_env s
end
end
def delete_disk(disk_id)
steps "delete_disk(#{disk_id})" do |s|
disk = client.resolve_entity disk_id
s.next Steps::Delete, disk
end
end
def get_disk_size_mb(disk_id)
client.resolve_entity(disk_id).size_mb
end
def validate_deployment(old_manifest, new_manifest)
end
private
def client
@client_lock.synchronize do
@client = VCloudClient.new(@vcd, @logger) if @client.nil?
end
@client
end
def steps(name, options = {}, &block)
Transaction.perform name, client(), options, &block
end
def unique_name
SecureRandom.uuid.to_s
end
def independent_disks(disk_locality)
disk_locality ||= []
@logger.info "Instantiate vApp accessible to disks: #{disk_locality.join(',')}"
disk_locality.map do |disk_id|
client.resolve_entity disk_id
end
end
def network_names(networks)
networks.map { |k,v| v['cloud_properties']['name'] }.uniq
end
def reconfigure_vm(s, name, description, resource_pool, networks)
net_names = network_names networks
s.next Steps::AddNetworks, net_names
s.next Steps::ReconfigureVM, name, description, resource_pool, networks
s.next Steps::DeleteUnusedNetworks, net_names
end
def save_agent_env(s)
s.next Steps::SaveAgentEnv
vm = s.state[:vm]
# eject and delete old env ISO
s.next Steps::EjectCatalogMedia, vm.name
s.next Steps::DeleteCatalogMedia, vm.name
# attach new env ISO
storage_profiles = client.vdc.storage_profiles || []
media_storage_profile = storage_profiles.find { |sp| sp['name'] == @entities['media_storage_profile'] }
s.next Steps::UploadCatalogMedia, vm.name, s.state[:iso], 'iso', media_storage_profile
s.next Steps::AddCatalogItem, :media, s.state[:media]
s.next Steps::InsertCatalogMedia, vm.name
s.state[:vm] = client.reload vm
end
end
end
bugfix: 54418438: delete environment iso after vm is deleted
require 'securerandom'
require_relative 'errors'
require_relative 'vcd_client'
require_relative 'steps'
module VCloudCloud
class Cloud
def initialize(options)
@logger = Bosh::Clouds::Config.logger
@logger.debug "Input cloud options: #{options.inspect}"
@agent_properties = options['agent'] || {}
vcds = options['vcds']
raise ArgumentError, 'Invalid number of VCDs' unless vcds.size == 1
@vcd = vcds[0]
@entities = @vcd['entities']
raise ArgumentError, 'Invalid entities in VCD settings' unless @entities.is_a?(Hash)
@debug = @vcd['debug'] || {}
@client_lock = Mutex.new
end
def create_stemcell(image, _)
(steps "create_stemcell(#{image}, _)" do |s|
s.next Steps::StemcellInfo, image
s.next Steps::CreateTemplate, "sc-#{unique_name}"
s.next Steps::UploadTemplateFiles
s.next Steps::AddCatalogItem, :vapp, s.state[:vapp_template]
end)[:catalog_item].urn
end
def delete_stemcell(catalog_vapp_id)
steps "delete_stemcell(#{catalog_vapp_id})" do |s|
catalog_vapp = client.resolve_entity catalog_vapp_id
raise "Catalog vApp #{id} not found" unless catalog_vapp
vapp = client.resolve_link catalog_vapp.entity
client.wait_entity vapp, true
client.invoke :delete, vapp.remove_link
client.invoke :delete, catalog_vapp.href
end
end
def create_vm(agent_id, catalog_vapp_id, resource_pool, networks, disk_locality = nil, environment = nil)
(steps "create_vm(#{agent_id}, #{catalog_vapp_id}, #{resource_pool}, ...)" do |s|
# request name available for recomposing vApps
requested_name = environment && environment['vapp']
vapp_name = requested_name.nil? ? agent_id : "vapp-tmp-#{unique_name}"
# disk_locality should be an array of disk ids
disk_locality = independent_disks disk_locality
# agent_id is used as vm name
description = @entities['description']
# if requested_name is present, we need to recompose vApp
container_vapp = nil
unless requested_name.nil?
begin
@logger.debug "Requesting container vApp: #{requested_name}"
container_vapp = client.vapp_by_name requested_name
rescue ObjectNotFoundError
# ignored, keep container_vapp nil
@logger.debug "Invalid container vApp: #{requested_name}"
vapp_name = agent_id
end
end
s.next Steps::Instantiate, catalog_vapp_id, vapp_name, description, disk_locality
client.flush_cache # flush cached vdc which contains vapp list
vapp = s.state[:vapp]
vm = s.state[:vm] = vapp.vms[0]
# perform recomposing
if container_vapp
existing_vm_hrefs = container_vapp.vms.map { |vm| vm.href }
client.wait_entity container_vapp
s.next Steps::Recompose, container_vapp
client.flush_cache
vapp = s.state[:vapp] = client.reload vapp
client.wait_entity vapp
s.next Steps::Delete, vapp, true
client.flush_cache
vapp = s.state[:vapp] = client.reload container_vapp
vm_href = vapp.vms.map { |vm| vm.href } - existing_vm_hrefs
raise "New virtual machine not found in recomposed vApp" if vm_href.empty?
vm = s.state[:vm] = client.resolve_link vm_href[0]
end
# save original disk configuration
s.state[:disks] = Array.new(vm.hardware_section.hard_disks)
reconfigure_vm s, agent_id, description, resource_pool, networks
# create env and generate env ISO image
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::CreateAgentEnv, networks, environment, @agent_properties
save_agent_env s
# power on
s.next Steps::PowerOn, :vm
end)[:vm].urn
end
def reboot_vm(vm_id)
steps "reboot_vm(#{vm_id})" do |s|
vm = s.state[:vm] = client.resolve_link client.resolve_entity(vm_id)
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
s.next Steps::PowerOn, :vm
elsif vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:POWERED_OFF].to_s
s.next Steps::PowerOn, :vm
else
s.next Steps::Reboot, :vm
end
end
end
def has_vm?(vm_id)
vm = client.resolve_entity vm_id
vm.type == VCloudSdk::Xml::MEDIA_TYPE[:VM]
rescue RestClient::Exception # invalid ID will get 403
false
rescue ObjectNotFoundError
false
end
def delete_vm(vm_id)
steps "delete_vm(#{vm_id})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
# power off vm first
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::PowerOff, :vm
vapp_link = vm.container_vapp_link
if @debug['delete_vm']
s.next Steps::Undeploy, s.state[:vm]
s.next Steps::Delete, s.state[:vm], true
s.next Steps::DeleteCatalogMedia, vm.name
end
if @debug['delete_empty_vapp']
vapp = s.state[:vapp] = client.resolve_link vapp_link
if vapp.vms.size == 0
if vapp['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vapp
end
vapp = s.state[:vapp]
if vapp['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:POWERED_ON].to_s
s.next Steps::PowerOff, :vapp
end
s.next Steps::Delete, s.state[:vapp], true
client.flush_cache
end
end
end
end
def configure_networks(vm_id, networks)
steps "configure_networks(#{vm_id}, #{networks})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
# power off vm first
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::PowerOff, :vm
# load container vApp
vapp = s.state[:vapp] = client.resolve_link vm.container_vapp_link
reconfigure_vm s, nil, nil, nil, networks
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
vm = s.state[:vm] = client.reload vm
Steps::CreateAgentEnv.update_network_env s.state[:env], vm, networks
save_agent_env s
# power on
s.next Steps::PowerOn, :vm
end
end
def create_disk(size_mb, vm_locality = nil)
(steps "create_disk(#{size_mb}, #{vm_locality.inspect})" do |s|
# vm_locality is used as vm_id
vm = vm_locality.nil? ? nil : client.resolve_entity(vm_locality)
s.next Steps::CreateDisk, unique_name, size_mb, vm
end)[:disk].urn
end
def attach_disk(vm_id, disk_id)
steps "attach_disk(#{vm_id}, #{disk_id})" do |s|
s.state[:vm] = client.resolve_entity vm_id
s.state[:disk] = client.resolve_entity disk_id
s.next Steps::AttachDetachDisk, :attach
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
s.state[:env]['disks'] ||= {}
s.state[:env]['disks']['persistent'] ||= {}
s.state[:env]['disks']['persistent'][disk_id] = disk_id
save_agent_env s
end
end
def detach_disk(vm_id, disk_id)
steps "detach_disk(#{vm_id}, #{disk_id})" do |s|
vm = s.state[:vm] = client.resolve_entity vm_id
s.state[:disk] = client.resolve_entity disk_id
if vm['status'] == VCloudSdk::Xml::RESOURCE_ENTITY_STATUS[:SUSPENDED].to_s
s.next Steps::DiscardSuspendedState, :vm
end
s.next Steps::AttachDetachDisk, :detach
# update environment
s.state[:env_metadata_key] = @entities['vm_metadata_key']
s.next Steps::LoadAgentEnv
env = s.state[:env]
if env['disks'] && env['disks']['persistent'].is_a?(Hash)
env['disks']['persistent'].delete disk_id
end
save_agent_env s
end
end
def delete_disk(disk_id)
steps "delete_disk(#{disk_id})" do |s|
disk = client.resolve_entity disk_id
s.next Steps::Delete, disk
end
end
def get_disk_size_mb(disk_id)
client.resolve_entity(disk_id).size_mb
end
def validate_deployment(old_manifest, new_manifest)
end
private
def client
@client_lock.synchronize do
@client = VCloudClient.new(@vcd, @logger) if @client.nil?
end
@client
end
def steps(name, options = {}, &block)
Transaction.perform name, client(), options, &block
end
def unique_name
SecureRandom.uuid.to_s
end
def independent_disks(disk_locality)
disk_locality ||= []
@logger.info "Instantiate vApp accessible to disks: #{disk_locality.join(',')}"
disk_locality.map do |disk_id|
client.resolve_entity disk_id
end
end
def network_names(networks)
networks.map { |k,v| v['cloud_properties']['name'] }.uniq
end
def reconfigure_vm(s, name, description, resource_pool, networks)
net_names = network_names networks
s.next Steps::AddNetworks, net_names
s.next Steps::ReconfigureVM, name, description, resource_pool, networks
s.next Steps::DeleteUnusedNetworks, net_names
end
def save_agent_env(s)
s.next Steps::SaveAgentEnv
vm = s.state[:vm]
# eject and delete old env ISO
s.next Steps::EjectCatalogMedia, vm.name
s.next Steps::DeleteCatalogMedia, vm.name
# attach new env ISO
storage_profiles = client.vdc.storage_profiles || []
media_storage_profile = storage_profiles.find { |sp| sp['name'] == @entities['media_storage_profile'] }
s.next Steps::UploadCatalogMedia, vm.name, s.state[:iso], 'iso', media_storage_profile
s.next Steps::AddCatalogItem, :media, s.state[:media]
s.next Steps::InsertCatalogMedia, vm.name
s.state[:vm] = client.reload vm
end
end
end
|
# Complete schema for CloudCrowd.
ActiveRecord::Schema.define(:version => CloudCrowd::SCHEMA_VERSION) do
create_table "jobs", :force => true do |t|
t.integer "status", :null => false
t.text "inputs", :null => false
t.string "action", :null => false
t.text "options", :null => false
t.text "outputs"
t.float "time"
t.string "callback_url"
t.string "email"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "node_records", :force => true do |t|
t.string "host", :null => false
t.string "ip_address", :null => false
t.integer "port", :null => false
t.string "enabled_actions", :default => '', :null => false
t.boolean "busy", :default => false, :null => false
t.string "tag"
t.integer "max_workers"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "work_units", :force => true do |t|
t.integer "status", :null => false
t.integer "job_id", :null => false
t.text "input", :null => false
t.string "action", :null => false
t.integer "attempts", :default => 0, :null => false
t.integer "node_record_id"
t.integer "worker_pid"
t.integer "reservation"
t.float "time"
t.text "output"
t.datetime "created_at"
t.datetime "updated_at"
end
# Here be indices. After looking, it seems faster not to have them at all.
#
add_index "jobs", ["status"], :name => "index_jobs_on_status"
add_index "work_units", ["job_id"], :name => "index_work_units_on_job_id"
add_index "work_units", ["worker_pid"], :name => "index_work_units_on_worker_pid"
add_index "work_units", ["worker_pid", "status"], :name => "index_work_units_on_worker_pid_and_status"
add_index "work_units", ["worker_pid", "node_record_id"], :name => "index_work_units_on_worker_pid_and_node_record_id"
end
move to a text column for enabled_actions for safety's sake
# Complete schema for CloudCrowd.
ActiveRecord::Schema.define(:version => CloudCrowd::SCHEMA_VERSION) do
create_table "jobs", :force => true do |t|
t.integer "status", :null => false
t.text "inputs", :null => false
t.string "action", :null => false
t.text "options", :null => false
t.text "outputs"
t.float "time"
t.string "callback_url"
t.string "email"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "node_records", :force => true do |t|
t.string "host", :null => false
t.string "ip_address", :null => false
t.integer "port", :null => false
t.text "enabled_actions", :default => '', :null => false
t.boolean "busy", :default => false, :null => false
t.string "tag"
t.integer "max_workers"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "work_units", :force => true do |t|
t.integer "status", :null => false
t.integer "job_id", :null => false
t.text "input", :null => false
t.string "action", :null => false
t.integer "attempts", :default => 0, :null => false
t.integer "node_record_id"
t.integer "worker_pid"
t.integer "reservation"
t.float "time"
t.text "output"
t.datetime "created_at"
t.datetime "updated_at"
end
# Here be indices. After looking, it seems faster not to have them at all.
#
add_index "jobs", ["status"], :name => "index_jobs_on_status"
add_index "work_units", ["job_id"], :name => "index_work_units_on_job_id"
add_index "work_units", ["worker_pid"], :name => "index_work_units_on_worker_pid"
add_index "work_units", ["worker_pid", "status"], :name => "index_work_units_on_worker_pid_and_status"
add_index "work_units", ["worker_pid", "node_record_id"], :name => "index_work_units_on_worker_pid_and_node_record_id"
end
|
module CloudCrowd
# The Worker, run at intervals by the Daemon, fetches WorkUnits from the
# central server and dispatches Actions to process them. Workers only fetch
# units that they are able to handle (for which they have an action in their
# actions directory). If communication with the central server is interrupted,
# the WorkUnit will repeatedly attempt to complete its unit -- every
# Worker::RETRY_WAIT seconds. Any exceptions that take place during
# the course of the Action will cause the Worker to mark the WorkUnit as
# having failed.
class Worker
# Wait five seconds to retry, after internal communcication errors.
RETRY_WAIT = 5
attr_reader :action
# Spinning up a worker will create a new AssetStore with a persistent
# connection to S3. This AssetStore gets passed into each action, for use
# as it is run.
def initialize(node, work_unit)
Signal.trap('INT') { kill_worker_thread_and_exit }
Signal.trap('KILL') { kill_worker_thread_and_exit }
Signal.trap('TERM') { kill_worker_thread_and_exit }
@pid = $$
@node = node
setup_work_unit(work_unit)
run
end
# # Ask the central server for the first WorkUnit in line.
# def fetch_work_unit
# keep_trying_to "fetch a new work unit" do
# unit_json = @server['/work'].post(base_params)
# setup_work_unit(unit_json)
# end
# end
# Return output to the central server, marking the current work unit as done.
def complete_work_unit(result)
keep_trying_to "complete work unit" do
data = completion_params.merge({:status => 'succeeded', :output => result})
@node.server["/work/#{data[:id]}"].put(data)
log "finished #{display_work_unit} in #{data[:time]} seconds"
end
end
# Mark the current work unit as failed, returning the exception to central.
def fail_work_unit(exception)
keep_trying_to "mark work unit as failed" do
data = completion_params.merge({:status => 'failed', :output => {'output' => exception.message}.to_json})
@node.server["/work/#{data[:id]}"].put(data)
log "failed #{display_work_unit} in #{data[:time]} seconds\n#{exception.message}\n#{exception.backtrace}"
end
end
# We expect and require internal communication between the central server
# and the workers to succeed. If it fails for any reason, log it, and then
# keep trying the same request.
def keep_trying_to(title)
begin
yield
rescue Exception => e
log "failed to #{title} -- retry in #{RETRY_WAIT} seconds"
log e.message
log e.backtrace
sleep RETRY_WAIT
retry
end
end
# Loggable string of the current work unit.
def display_work_unit
"unit ##{@options['work_unit_id']} (#{@action_name}/#{CloudCrowd.display_status(@status)})"
end
# Executes the current work unit, catching all exceptions as failures.
def run_work_unit
@worker_thread = Thread.new do
begin
result = nil
@action = CloudCrowd.actions[@action_name].new(@status, @input, @options, @node.asset_store)
Dir.chdir(@action.work_directory) do
result = case @status
when PROCESSING then @action.process
when SPLITTING then @action.split
when MERGING then @action.merge
else raise Error::StatusUnspecified, "work units must specify their status"
end
end
complete_work_unit({'output' => result}.to_json)
rescue Exception => e
fail_work_unit(e)
end
end
@worker_thread.join
end
# Wraps <tt>run_work_unit</tt> to benchmark the execution time, if requested.
def run
return run_work_unit unless @options['benchmark']
status = CloudCrowd.display_status(@status)
log("ran #{@action_name}/#{status} in " + Benchmark.measure { run_work_unit }.to_s)
end
private
# Common parameters to send back to central.
def base_params
@base_params ||= {
:pid => @pid
}
end
# Common parameters to send back to central upon unit completion,
# regardless of success or failure.
def completion_params
base_params.merge({
:id => @options['work_unit_id'],
:time => Time.now - @start_time
})
end
# Extract our instance variables from a WorkUnit's JSON.
def setup_work_unit(unit)
return false unless unit
@start_time = Time.now
@action_name, @input, @options, @status = unit['action'], unit['input'], unit['options'], unit['status']
@options['job_id'] = unit['job_id']
@options['work_unit_id'] = unit['id']
@options['attempts'] ||= unit['attempts']
log "fetched #{display_work_unit}"
return true
end
# Log a message to the daemon log. Includes PID for identification.
def log(message)
puts "Worker ##{@pid}: #{message}" unless ENV['RACK_ENV'] == 'test'
end
# When we're done with a unit, clear out our instance variables to make way
# for the next one. Also, remove all of the unit's temporary storage.
def clear_work_unit
@action.cleanup_work_directory
@action, @action_name, @input, @options, @start_time = nil, nil, nil, nil, nil
end
# Force the worker to quit, even if it's in the middle of processing.
# If it had checked out a work unit, the node should have released it on
# the central server already.
def kill_worker_thread_and_exit
@worker_thread.kill if @worker_thread
Process.exit
end
end
end
worker threads should be harsher about getting out of the way
module CloudCrowd
# The Worker, run at intervals by the Daemon, fetches WorkUnits from the
# central server and dispatches Actions to process them. Workers only fetch
# units that they are able to handle (for which they have an action in their
# actions directory). If communication with the central server is interrupted,
# the WorkUnit will repeatedly attempt to complete its unit -- every
# Worker::RETRY_WAIT seconds. Any exceptions that take place during
# the course of the Action will cause the Worker to mark the WorkUnit as
# having failed.
class Worker
# Wait five seconds to retry, after internal communcication errors.
RETRY_WAIT = 5
attr_reader :action
# Spinning up a worker will create a new AssetStore with a persistent
# connection to S3. This AssetStore gets passed into each action, for use
# as it is run.
def initialize(node, work_unit)
Signal.trap('INT') { shut_down }
Signal.trap('KILL') { shut_down }
Signal.trap('TERM') { shut_down }
@pid = $$
@node = node
setup_work_unit(work_unit)
run
end
# # Ask the central server for the first WorkUnit in line.
# def fetch_work_unit
# keep_trying_to "fetch a new work unit" do
# unit_json = @server['/work'].post(base_params)
# setup_work_unit(unit_json)
# end
# end
# Return output to the central server, marking the current work unit as done.
def complete_work_unit(result)
keep_trying_to "complete work unit" do
data = completion_params.merge({:status => 'succeeded', :output => result})
@node.server["/work/#{data[:id]}"].put(data)
log "finished #{display_work_unit} in #{data[:time]} seconds"
end
end
# Mark the current work unit as failed, returning the exception to central.
def fail_work_unit(exception)
keep_trying_to "mark work unit as failed" do
data = completion_params.merge({:status => 'failed', :output => {'output' => exception.message}.to_json})
@node.server["/work/#{data[:id]}"].put(data)
log "failed #{display_work_unit} in #{data[:time]} seconds\n#{exception.message}\n#{exception.backtrace}"
end
end
# We expect and require internal communication between the central server
# and the workers to succeed. If it fails for any reason, log it, and then
# keep trying the same request.
def keep_trying_to(title)
begin
yield
rescue Exception => e
log "failed to #{title} -- retry in #{RETRY_WAIT} seconds"
log e.message
log e.backtrace
sleep RETRY_WAIT
retry
end
end
# Loggable string of the current work unit.
def display_work_unit
"unit ##{@options['work_unit_id']} (#{@action_name}/#{CloudCrowd.display_status(@status)})"
end
# Executes the current work unit, catching all exceptions as failures.
def run_work_unit
@worker_thread = Thread.new do
begin
result = nil
@action = CloudCrowd.actions[@action_name].new(@status, @input, @options, @node.asset_store)
Dir.chdir(@action.work_directory) do
result = case @status
when PROCESSING then @action.process
when SPLITTING then @action.split
when MERGING then @action.merge
else raise Error::StatusUnspecified, "work units must specify their status"
end
end
complete_work_unit({'output' => result}.to_json)
rescue Exception => e
fail_work_unit(e)
end
end
@worker_thread.join
end
# Wraps <tt>run_work_unit</tt> to benchmark the execution time, if requested.
def run
return run_work_unit unless @options['benchmark']
status = CloudCrowd.display_status(@status)
log("ran #{@action_name}/#{status} in " + Benchmark.measure { run_work_unit }.to_s)
end
private
# Common parameters to send back to central.
def base_params
@base_params ||= {
:pid => @pid
}
end
# Common parameters to send back to central upon unit completion,
# regardless of success or failure.
def completion_params
base_params.merge({
:id => @options['work_unit_id'],
:time => Time.now - @start_time
})
end
# Extract our instance variables from a WorkUnit's JSON.
def setup_work_unit(unit)
return false unless unit
@start_time = Time.now
@action_name, @input, @options, @status = unit['action'], unit['input'], unit['options'], unit['status']
@options['job_id'] = unit['job_id']
@options['work_unit_id'] = unit['id']
@options['attempts'] ||= unit['attempts']
log "fetched #{display_work_unit}"
return true
end
# Log a message to the daemon log. Includes PID for identification.
def log(message)
puts "Worker ##{@pid}: #{message}" unless ENV['RACK_ENV'] == 'test'
end
# When we're done with a unit, clear out our instance variables to make way
# for the next one. Also, remove all of the unit's temporary storage.
def clear_work_unit
@action.cleanup_work_directory
@action, @action_name, @input, @options, @start_time = nil, nil, nil, nil, nil
end
# Force the worker to quit, even if it's in the middle of processing.
# If it had checked out a work unit, the node should have released it on
# the central server already.
def shut_down
if @worker_thread
@worker_thread.kill
@worker_thread.kill! if @worker_thread.alive?
end
Process.exit
end
end
end |
## rbenv.rb
#
# Adds capistrano/rbenv specific variables and tasks
# Set the ruby version using the .ruby-version file
# Looks for the file in the project root
set :rbenv_ruby, File.read('.ruby-version').strip
Set rbenv defaults on load:defaults task
## rbenv.rb
#
# Adds capistrano/rbenv specific variables and tasks
namespace :load do
task :defaults do
# Set the ruby version using the .ruby-version file
# Looks for the file in the project root
set :rbenv_ruby, File.read('.ruby-version').strip
end
end
|
module NfseWebiss
class Response
def initialize(method, savon_response)
@method = method
@savon_response = savon_response
@retorno = parse_response
end
attr_reader :method, :savon_response, :retorno
def sucesso?
!retorno[:fault] && !retorno[:mensagem_retorno]
end
def erros
return if sucesso?
retorno[:fault] || retorno[:mensagem_retorno]
end
def [](key)
retorno[key]
end
private
def parse_response
body = savon_response.hash[:envelope][:body]
response, result, resposta = %W(#{method}Response #{method}Result #{METHODS[method]}Resposta).map(&:snakecase).map(&:to_sym)
if body[response]
parsed = nori.parse(body[response][result])[resposta]
parsed[:lista_mensagem_retorno] || parsed
else
body
end
end
def nori
return @nori if @nori
nori_options = {
strip_namespaces: true,
convert_tags_to: ->(tag) { tag.snakecase.to_sym }
}
@nori = Nori.new(nori_options)
end
end
end
problema com html entities no retorno
module NfseWebiss
class Response
def initialize(method, savon_response)
@method = method
@savon_response = savon_response
@retorno = parse_response
end
attr_reader :method, :savon_response, :retorno
def sucesso?
!retorno[:fault] && !retorno[:mensagem_retorno]
end
def erros
return if sucesso?
retorno[:fault] || retorno[:mensagem_retorno]
end
def [](key)
retorno[key]
end
private
def parse_response
body = savon_response.hash[:envelope][:body]
response, result, resposta = %W(#{method}Response #{method}Result #{METHODS[method]}Resposta).map(&:snakecase).map(&:to_sym)
if body[response]
parsed = nori.parse(body[response][result])[resposta].gsub('&', '&') # TODO: arrumar esse gsub
parsed[:lista_mensagem_retorno] || parsed
else
body
end
end
def nori
return @nori if @nori
nori_options = {
strip_namespaces: true,
convert_tags_to: ->(tag) { tag.snakecase.to_sym }
}
@nori = Nori.new(nori_options)
end
end
end
|
module NomadClient
class Connection
def job
Api::Job.new(self)
end
end
module Api
class Job < Path
##
# Query a single job for its specification and status
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def get(id)
connection.get do |req|
req.url "job/#{id}"
end
end
##
# Query the summary of a job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def summary(id)
connection.get do |req|
req.url "job/#{id}/summary"
end
end
##
# Registers a new job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def create(id, job)
connection.post do |req|
req.url "job/#{id}"
req.body = job
end
end
##
# Update a Job in Nomad
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def update(id, job)
connection.put do |req|
req.url "job/#{id}"
req.body = job
end
end
##
# Invoke a dry-run of the scheduler for the job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def plan(id, job)
connection.post do |req|
req.url "job/#{id}/plan"
req.body = job
end
end
##
# Deregisters a job, and stops all allocations part of it.
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def delete(id)
connection.delete do |req|
req.url "job/#{id}"
end
end
alias_method :deregister, :delete
##
# Query the allocations belonging to a single job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def allocations(id)
connection.get do |req|
req.url "job/#{id}/allocations"
end
end
##
# Query the evaluations belonging to a single job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def evaluations(id)
connection.get do |req|
req.url "job/#{id}/evaluations"
end
end
##
# Creates a new evaluation for the given job. This can be used to force run the scheduling logic if necessary
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def evaluate(id)
connection.post do |req|
req.url "job/#{id}/evaluate"
end
end
##
# Forces a new instance of the periodic job. A new instance will be created even if it violates the job's prohibit_overlap settings.
# As such, this should be only used to immediately run a periodic job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def periodic_force(id)
connection.post do |req|
req.url "job/#{id}/periodic/force"
end
end
##
# Reads information about all versions of a job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def versions(id)
connection.post do |req|
req.url "job/#{id}/versions"
end
end
##
# Lists a single job's deployments
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def deployments(id)
connection.post do |req|
req.url "job/#{id}/deployments"
end
end
##
# Returns a single job's most recent deployment
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def most_recent_deployment(id)
connection.post do |req|
req.url "job/#{id}/deployment"
end
end
##
# Dispatches a new instance of a parameterized job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [String] payload Base64 encoded string containing the payload. This is limited to 15 KB
# @param [Hash] meta Arbitrary metadata to pass to the job
# @return [Faraday::Response] A faraday response from Nomad
def dispatch(id, payload: '', meta: nil)
connection.post do |req|
req.url "job/#{id}/dispatch"
req.params[:Payload] = payload
req.params[:Meta] = meta
end
end
##
# Reverts the job to an older version.
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Integer] job_version The job version to revert to
# @param [Integer] enforce_prior_version Value specifying the current job's version.
# This is checked and acts as a check-and-set value before reverting to the specified job
# @return [Faraday::Response] A faraday response from Nomad
def revert(id, job_version: 0, enforce_prior_version: nil)
connection.post do |req|
req.url "job/#{id}/revert"
req.params[:JobVersion] = job_version
req.params[:EnforcePriorVersion] = enforce_prior_version
end
end
##
# Sets the job's stability
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Integer] job_version The job version to set the stability on
# @param [Integer] enforce_prior_version Whether the job should be marked as stable or not
# @return [Faraday::Response] A faraday response from Nomad
def stable(id, job_version: 0, stable: false)
connection.post do |req|
req.url "job/#{id}/stable"
req.params[:JobVersion] = job_version
req.params[:Stable] = stable
end
end
end
end
end
Update methods on deployments endpoints
module NomadClient
class Connection
def job
Api::Job.new(self)
end
end
module Api
class Job < Path
##
# Query a single job for its specification and status
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def get(id)
connection.get do |req|
req.url "job/#{id}"
end
end
##
# Query the summary of a job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def summary(id)
connection.get do |req|
req.url "job/#{id}/summary"
end
end
##
# Registers a new job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def create(id, job)
connection.post do |req|
req.url "job/#{id}"
req.body = job
end
end
##
# Update a Job in Nomad
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def update(id, job)
connection.put do |req|
req.url "job/#{id}"
req.body = job
end
end
##
# Invoke a dry-run of the scheduler for the job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Hash|String] job A hash or json string of a valid Job payload (https://www.nomadproject.io/docs/http/job.html)
# @return [Faraday::Response] A faraday response from Nomad
def plan(id, job)
connection.post do |req|
req.url "job/#{id}/plan"
req.body = job
end
end
##
# Deregisters a job, and stops all allocations part of it.
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def delete(id)
connection.delete do |req|
req.url "job/#{id}"
end
end
alias_method :deregister, :delete
##
# Query the allocations belonging to a single job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def allocations(id)
connection.get do |req|
req.url "job/#{id}/allocations"
end
end
##
# Query the evaluations belonging to a single job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def evaluations(id)
connection.get do |req|
req.url "job/#{id}/evaluations"
end
end
##
# Creates a new evaluation for the given job. This can be used to force run the scheduling logic if necessary
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def evaluate(id)
connection.post do |req|
req.url "job/#{id}/evaluate"
end
end
##
# Forces a new instance of the periodic job. A new instance will be created even if it violates the job's prohibit_overlap settings.
# As such, this should be only used to immediately run a periodic job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def periodic_force(id)
connection.post do |req|
req.url "job/#{id}/periodic/force"
end
end
##
# Reads information about all versions of a job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def versions(id)
connection.post do |req|
req.url "job/#{id}/versions"
end
end
##
# Lists a single job's deployments
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def deployments(id)
connection.get do |req|
req.url "job/#{id}/deployments"
end
end
##
# Returns a single job's most recent deployment
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @return [Faraday::Response] A faraday response from Nomad
def most_recent_deployment(id)
connection.get do |req|
req.url "job/#{id}/deployment"
end
end
##
# Dispatches a new instance of a parameterized job
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [String] payload Base64 encoded string containing the payload. This is limited to 15 KB
# @param [Hash] meta Arbitrary metadata to pass to the job
# @return [Faraday::Response] A faraday response from Nomad
def dispatch(id, payload: '', meta: nil)
connection.post do |req|
req.url "job/#{id}/dispatch"
req.params[:Payload] = payload
req.params[:Meta] = meta
end
end
##
# Reverts the job to an older version.
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Integer] job_version The job version to revert to
# @param [Integer] enforce_prior_version Value specifying the current job's version.
# This is checked and acts as a check-and-set value before reverting to the specified job
# @return [Faraday::Response] A faraday response from Nomad
def revert(id, job_version: 0, enforce_prior_version: nil)
connection.post do |req|
req.url "job/#{id}/revert"
req.params[:JobVersion] = job_version
req.params[:EnforcePriorVersion] = enforce_prior_version
end
end
##
# Sets the job's stability
# https://www.nomadproject.io/docs/http/job.html
#
# @param [String] id The ID of the job according to Nomad
# @param [Integer] job_version The job version to set the stability on
# @param [Integer] enforce_prior_version Whether the job should be marked as stable or not
# @return [Faraday::Response] A faraday response from Nomad
def stable(id, job_version: 0, stable: false)
connection.post do |req|
req.url "job/#{id}/stable"
req.params[:JobVersion] = job_version
req.params[:Stable] = stable
end
end
end
end
end
|
module Oa
module Vkontakte
VERSION = "0.1.3"
end
end
Версия 0.1.4.
module Oa
module Vkontakte
VERSION = "0.1.4"
end
end
|
module OhMyMethod
VERSION = "0.0.5"
end
Version up 0.0.6
module OhMyMethod
VERSION = "0.0.6"
end
|
module OnePassword
VERSION = '0.0.1'
end
Bump patch version to include documentation in gem
module OnePassword
VERSION = '0.0.2'
end
|
module Highcharts
class UnsupportedFeature < RuntimeError; end
class Axis; end
class Chart; end
class Extremes; end
class Highcharts; end
class Options; end
class Point; end
class Renderer; end
class Series; end
module MonkeyPatches
def alias_native(new, old = new, options = {})
if klass = options[:array]
define_method new do |*args, &block|
if value = Native.call(@native, old, *args, &block)
value.map{ |e| klass.new(e) }
end
end
else
super
end
end
end
module Base
include Native
extend MonkeyPatches
def log(s)
%x{ console.log( #{ s } ) }
end
def self.included(klass)
klass.extend self
end
end
end
initial commit
module Highcharts
class UnsupportedFeature < RuntimeError; end
class Axis; end
class Chart; end
class Extremes; end
class Highcharts; end
class Options; end
class Point; end
class Renderer; end
class Series; end
module MonkeyPatches
def alias_native(new, old = new, options = {})
if klass = options[:array]
define_method new do |*args, &block|
if value = Native.call(@native, old, *args, &block)
value.map{ |e| klass.new(e) }
end
end
else
super
end
end
end
module Base
include Native
# extend MonkeyPatches
def log(s)
%x{ console.log( #{ s } ) }
end
def self.included(klass)
klass.extend self
end
end
end
|
module OpsWorks
module CLI
VERSION = '0.4.3'
end
end
Bump version
module OpsWorks
module CLI
VERSION = '0.4.4'
end
end
|
# -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "client_side_validations/version"
Gem::Specification.new do |s|
s.name = "client_side_validations"
s.version = ClientSideValidations::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Brian Cardarella"]
s.email = ["bcardarella@gmail.com"]
s.homepage = "https://github.com/DavyJonesLocker/client_side_validations"
s.summary = %q{Client Side Validations}
s.description = %q{Client Side Validations made easy for your Rails 4.2 applications}
s.license = 'MIT'
s.files = `git ls-files -- {lib/*,vendor/*,*.gemspec}`.split("\n")
s.require_paths = ["lib"]
s.add_development_dependency 'rails', '>= 4.0.0', '< 4.3.0'
s.add_development_dependency 'sqlite3', '~> 1.3'
s.add_development_dependency 'mocha', '~> 1.1'
s.add_development_dependency 'm', '~> 1.3'
s.add_development_dependency 'minitest', '>= 4.7.5', '< 6.0.0'
s.add_development_dependency 'simplecov', '~> 0.9.1'
s.add_development_dependency 'coveralls', '~> 0.7.1'
s.add_development_dependency 'appraisal', '~> 1.0'
if Gem::Version.new(RUBY_VERSION.dup) >= Gem::Version.new('2.0')
s.add_development_dependency 'byebug', '~> 3.5'
else
s.add_development_dependency 'debugger', '~> 1.6'
end
# For QUnit testing
s.add_development_dependency 'sinatra', '~> 1.4'
s.add_development_dependency 'shotgun', '~> 0.9'
s.add_development_dependency 'thin', '~> 1.6'
s.add_development_dependency 'json', '~> 1.8'
s.add_development_dependency 'coffee-script', '~> 2.3'
s.add_development_dependency 'jquery-rails', '>= 3.1.2', '< 5.0.0'
end
Add rails and query-rails as runtime dependency
Fix #592
# -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "client_side_validations/version"
Gem::Specification.new do |s|
s.name = "client_side_validations"
s.version = ClientSideValidations::VERSION
s.platform = Gem::Platform::RUBY
s.authors = ["Brian Cardarella"]
s.email = ["bcardarella@gmail.com"]
s.homepage = "https://github.com/DavyJonesLocker/client_side_validations"
s.summary = %q{Client Side Validations}
s.description = %q{Client Side Validations made easy for your Rails 4.2 applications}
s.license = 'MIT'
s.files = `git ls-files -- {lib/*,vendor/*,*.gemspec}`.split("\n")
s.require_paths = ["lib"]
s.add_dependency 'rails', '>= 4.0.0', '< 4.3.0'
s.add_dependency 'jquery-rails', '>= 3.1.2', '< 5.0.0'
s.add_development_dependency 'sqlite3', '~> 1.3'
s.add_development_dependency 'mocha', '~> 1.1'
s.add_development_dependency 'm', '~> 1.3'
s.add_development_dependency 'minitest', '>= 4.7.5', '< 6.0.0'
s.add_development_dependency 'simplecov', '~> 0.9.1'
s.add_development_dependency 'coveralls', '~> 0.7.1'
s.add_development_dependency 'appraisal', '~> 1.0'
if Gem::Version.new(RUBY_VERSION.dup) >= Gem::Version.new('2.0')
s.add_development_dependency 'byebug', '~> 3.5'
else
s.add_development_dependency 'debugger', '~> 1.6'
end
# For QUnit testing
s.add_development_dependency 'sinatra', '~> 1.4'
s.add_development_dependency 'shotgun', '~> 0.9'
s.add_development_dependency 'thin', '~> 1.6'
s.add_development_dependency 'json', '~> 1.8'
s.add_development_dependency 'coffee-script', '~> 2.3'
end
|
Regenerate gemspec for version
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{tomcat-manager}
s.version = ""
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Dakota Bailey"]
s.date = %q{2012-04-03}
s.description = %q{TODO: longer description of your gem}
s.email = %q{dakota.bailey@gmail.com}
s.extra_rdoc_files = [
"LICENSE",
"README"
]
s.files = [
".document",
"Gemfile",
"LICENSE",
"README",
"Rakefile",
"lib/tomcat_manager.rb",
"lib/tomcat_manager/core.rb",
"lib/tomcat_manager/tomcat_manager.rb",
"test/helper.rb",
"test/test_tomcat-manager.rb",
"tomcat_manager.gemspec"
]
s.homepage = %q{http://github.com/dbailey/tomcat-manager}
s.licenses = ["MIT"]
s.require_paths = ["lib"]
s.rubygems_version = %q{1.3.6}
s.summary = %q{TODO: one-line summary of your gem}
if s.respond_to? :specification_version then
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
s.specification_version = 3
if Gem::Version.new(Gem::RubyGemsVersion) >= Gem::Version.new('1.2.0') then
s.add_development_dependency(%q<shoulda>, [">= 0"])
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
s.add_development_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_development_dependency(%q<jeweler>, ["~> 1.8.3"])
s.add_development_dependency(%q<rcov>, [">= 0"])
else
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.3"])
s.add_dependency(%q<rcov>, [">= 0"])
end
else
s.add_dependency(%q<shoulda>, [">= 0"])
s.add_dependency(%q<rdoc>, ["~> 3.12"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.8.3"])
s.add_dependency(%q<rcov>, [">= 0"])
end
end
|
require 'colored'
module Pod
class Resolver
include Config::Mixin
attr_reader :podfile, :sandbox
attr_accessor :cached_sets, :cached_sources
def initialize(podfile, sandbox)
@podfile = podfile
@sandbox = sandbox
@cached_sets = {}
@cached_sources = Source::Aggregate.new
@log_indent = 0;
end
def resolve
@specs = {}
targets_and_specs = {}
@podfile.target_definitions.values.each do |target_definition|
puts "\nResolving dependencies for target `#{target_definition.name}' (#{target_definition.platform})".green if config.verbose?
@loaded_specs = []
# TODO @podfile.platform will change to target_definition.platform
find_dependency_sets(@podfile, target_definition.dependencies, target_definition)
targets_and_specs[target_definition] = @specs.values_at(*@loaded_specs).sort_by(&:name)
end
# Specification doesn't need to know more about the context, so we assign
# the other specification, of which this pod is a part, to the spec.
@specs.values.sort_by(&:name).each do |spec|
if spec.part_of_other_pod?
spec.part_of_specification = @cached_sets[spec.part_of.name].specification
end
end
targets_and_specs
end
private
def find_cached_set(dependency, platform)
@cached_sets[dependency.name] ||= begin
if dependency.specification
Specification::Set::External.new(dependency.specification)
elsif external_source = dependency.external_source
# The platform isn't actually being used by the LocalPod instance
# that's being used behind the scenes, but passing it anyways for
# completeness sake.
specification = external_source.specification_from_sandbox(@sandbox, platform)
Specification::Set::External.new(specification)
else
@cached_sources.search(dependency)
end
end
end
def find_dependency_sets(dependent_specification, dependencies, target_definition)
@log_indent += 1
dependencies.each do |dependency|
puts ' ' * @log_indent + "- #{dependency}" if config.verbose?
set = find_cached_set(dependency, target_definition.platform)
set.required_by(dependent_specification)
# Ensure we don't resolve the same spec twice for one target
unless @loaded_specs.include?(dependency.name)
# Get a reference to the spec that’s actually being loaded.
# If it’s a subspec dependency, e.g. 'RestKit/Network', then
# find that subspec.
spec = set.specification
if dependency.subspec_dependency?
spec = spec.subspec_by_name(dependency.name)
end
validate_platform!(spec, target_definition)
@loaded_specs << spec.name
@specs[spec.name] = spec
# And recursively load the dependencies of the spec.
# TODO fix the need to return an empty arrayf if there are no deps for the given platform
find_dependency_sets(spec, (spec.dependencies[target_definition.platform.to_sym] || []), target_definition)
end
end
@log_indent -= 1
end
def validate_platform!(spec, target)
unless target.platform.support?(spec.platform)
raise Informative, "[!] The platform required by the target `#{target.name}' `#{target.platform}' does not match that of #{spec} `#{spec.platform}'".red
end
end
end
end
[#246 | Resolver] Validate plaforms against each target.
require 'colored'
module Pod
class Resolver
include Config::Mixin
attr_reader :podfile, :sandbox
attr_accessor :cached_sets, :cached_sources
def initialize(podfile, sandbox)
@podfile = podfile
@sandbox = sandbox
@cached_sets = {}
@cached_sources = Source::Aggregate.new
@log_indent = 0;
end
def resolve
@specs = {}
targets_and_specs = {}
@podfile.target_definitions.values.each do |target_definition|
puts "\nResolving dependencies for target `#{target_definition.name}' (#{target_definition.platform})".green if config.verbose?
@loaded_specs = []
# TODO @podfile.platform will change to target_definition.platform
find_dependency_sets(@podfile, target_definition.dependencies, target_definition)
targets_and_specs[target_definition] = @specs.values_at(*@loaded_specs).sort_by(&:name)
end
# Specification doesn't need to know more about the context, so we assign
# the other specification, of which this pod is a part, to the spec.
@specs.values.sort_by(&:name).each do |spec|
if spec.part_of_other_pod?
spec.part_of_specification = @cached_sets[spec.part_of.name].specification
end
end
targets_and_specs
end
private
def find_cached_set(dependency, platform)
@cached_sets[dependency.name] ||= begin
if dependency.specification
Specification::Set::External.new(dependency.specification)
elsif external_source = dependency.external_source
# The platform isn't actually being used by the LocalPod instance
# that's being used behind the scenes, but passing it anyways for
# completeness sake.
specification = external_source.specification_from_sandbox(@sandbox, platform)
Specification::Set::External.new(specification)
else
@cached_sources.search(dependency)
end
end
end
def find_dependency_sets(dependent_specification, dependencies, target_definition)
@log_indent += 1
dependencies.each do |dependency|
puts ' ' * @log_indent + "- #{dependency}" if config.verbose?
set = find_cached_set(dependency, target_definition.platform)
set.required_by(dependent_specification)
# Ensure we don't resolve the same spec twice for one target
unless @loaded_specs.include?(dependency.name)
# Get a reference to the spec that’s actually being loaded.
# If it’s a subspec dependency, e.g. 'RestKit/Network', then
# find that subspec.
spec = set.specification
if dependency.subspec_dependency?
spec = spec.subspec_by_name(dependency.name)
end
@loaded_specs << spec.name
@specs[spec.name] = spec
# And recursively load the dependencies of the spec.
# TODO fix the need to return an empty arrayf if there are no deps for the given platform
find_dependency_sets(spec, (spec.dependencies[target_definition.platform.to_sym] || []), target_definition)
end
validate_platform!(spec || @specs[dependency.name], target_definition)
end
@log_indent -= 1
end
def validate_platform!(spec, target)
unless target.platform.support?(spec.platform)
raise Informative, "[!] The platform required by the target `#{target.name}' `#{target.platform}' does not match that of #{spec} `#{spec.platform}'".red
end
end
end
end
|
module Pacto
class RequestClause < Hashie::Dash
include Hashie::Extensions::Coercion
# include Hashie::Extensions::IndifferentAccess # remove this if we cleanup string vs symbol
property :host # required?
property :method, required: true
property :schema, default: {}
property :path
property :headers
property :params, default: {}
def initialize(definition)
mash = Hashie::Mash.new definition
mash['method'] = normalize(mash['method'])
super mash
end
def method=(method)
normalize(method)
end
def uri
@uri ||= Pacto::URI.for(host, path, params)
end
# FIXME: Send a PR to Hashie so it doesn't coerce values that already match the target class
def self.coerce(value)
if value.is_a? self
value
else
RequestClause.new value
end
end
private
def normalize(method)
method.to_s.downcase.to_sym
end
end
end
Hashie workaround no longer seems necessary
module Pacto
class RequestClause < Hashie::Dash
include Hashie::Extensions::Coercion
# include Hashie::Extensions::IndifferentAccess # remove this if we cleanup string vs symbol
property :host # required?
property :method, required: true
property :schema, default: {}
property :path
property :headers
property :params, default: {}
def initialize(definition)
mash = Hashie::Mash.new definition
mash['method'] = normalize(mash['method'])
super mash
end
def method=(method)
normalize(method)
end
def uri
@uri ||= Pacto::URI.for(host, path, params)
end
private
def normalize(method)
method.to_s.downcase.to_sym
end
end
end
|
module Paperclip
module Storage
# Amazon's S3 file hosting service is a scalable, easy place to store files for
# distribution. You can find out more about it at http://aws.amazon.com/s3
# There are a few S3-specific options for has_attached_file:
# * +s3_credentials+: Takes a path, a File, or a Hash. The path (or File) must point
# to a YAML file containing the +access_key_id+ and +secret_access_key+ that Amazon
# gives you. You can 'environment-space' this just like you do to your
# database.yml file, so different environments can use different accounts:
# development:
# access_key_id: 123...
# secret_access_key: 123...
# test:
# access_key_id: abc...
# secret_access_key: abc...
# production:
# access_key_id: 456...
# secret_access_key: 456...
# This is not required, however, and the file may simply look like this:
# access_key_id: 456...
# secret_access_key: 456...
# In which case, those access keys will be used in all environments. You can also
# put your bucket name in this file, instead of adding it to the code directly.
# This is useful when you want the same account but a different bucket for
# development versus production.
# * +s3_permissions+: This is a String that should be one of the "canned" access
# policies that S3 provides (more information can be found here:
# http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAccessPolicy.html)
# The default for Paperclip is :public_read.
#
# You can set permission on a per style bases by doing the following:
# :s3_permissions => {
# :original => :private
# }
# Or globaly:
# :s3_permissions => :private
#
# * +s3_protocol+: The protocol for the URLs generated to your S3 assets. Can be either
# 'http' or 'https'. Defaults to 'http' when your :s3_permissions are :public_read (the
# default), and 'https' when your :s3_permissions are anything else.
# * +s3_headers+: A hash of headers such as {'Expires' => 1.year.from_now.httpdate}
# * +bucket+: This is the name of the S3 bucket that will store your files. Remember
# that the bucket must be unique across all of Amazon S3. If the bucket does not exist
# Paperclip will attempt to create it. The bucket name will not be interpolated.
# You can define the bucket as a Proc if you want to determine it's name at runtime.
# Paperclip will call that Proc with attachment as the only argument.
# * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the alias to the
# S3 domain of your bucket. Used with the :s3_alias_url url interpolation. See the
# link in the +url+ entry for more information about S3 domains and buckets.
# * +url+: There are four options for the S3 url. You can choose to have the bucket's name
# placed domain-style (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
# You can also specify a CNAME (which requires the CNAME to be specified as
# :s3_alias_url. You can read more about CNAMEs and S3 at
# http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html
# Normally, this won't matter in the slightest and you can leave the default (which is
# path-style, or :s3_path_url). But in some cases paths don't work and you need to use
# the domain-style (:s3_domain_url). Anything else here will be treated like path-style.
# NOTE: If you use a CNAME for use with CloudFront, you can NOT specify https as your
# :s3_protocol; This is *not supported* by S3/CloudFront. Finally, when using the host
# alias, the :bucket parameter is ignored, as the hostname is used as the bucket name
# by S3. The fourth option for the S3 url is :asset_host, which uses Rails' built-in
# asset_host settings. NOTE: To get the full url from a paperclip'd object, use the
# image_path helper; this is what image_tag uses to generate the url for an img tag.
# * +path+: This is the key under the bucket in which the file will be stored. The
# URL will be constructed from the bucket and the path. This is what you will want
# to interpolate. Keys should be unique, like filenames, and despite the fact that
# S3 (strictly speaking) does not support directories, you can still use a / to
# separate parts of your file name.
# * +region+: If you are using your bucket in Tokyo region, "tokyo" write.
module S3
def self.extended base
begin
require 'aws/s3'
rescue LoadError => e
e.message << " (You may need to install the aws-s3 gem)"
raise e
end unless defined?(AWS::S3)
base.instance_eval do
@s3_credentials = parse_credentials(@options[:s3_credentials])
@region = @options[:region] || @s3_credentials[:region]
@bucket = @options[:bucket] || @s3_credentials[:bucket]
@bucket = @bucket.call(self) if @bucket.is_a?(Proc)
@s3_options = @options[:s3_options] || {}
@s3_permissions = set_permissions(@options[:s3_permissions])
@s3_protocol = @options[:s3_protocol] ||
Proc.new do |style|
(@s3_permissions[style.to_sym] || @s3_permissions[:default]) == :public_read ? 'http' : 'https'
end
@s3_headers = @options[:s3_headers] || {}
@s3_host_alias = @options[:s3_host_alias]
@s3_host_alias = @s3_host_alias.call(self) if @s3_host_alias.is_a?(Proc)
unless @url.to_s.match(/^:s3.*url$/)
@path = @path.gsub(/:url/, @url)
@url = ":s3_path_url"
end
@url = ":asset_host" if @options[:url].to_s == ":asset_host"
AWS::S3::Base.establish_connection!( @s3_options.merge(
:access_key_id => @s3_credentials[:access_key_id],
:secret_access_key => @s3_credentials[:secret_access_key]
))
end
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_alias_url
Paperclip.interpolates(:s3_path_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_name}/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_path_url
Paperclip.interpolates(:s3_domain_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.bucket_name}.#{attachment.s3_host_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_domain_url
Paperclip.interpolates(:asset_host) do |attachment, style|
"#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :asset_host
end
def expiring_url(time = 3600, style_name = default_style)
AWS::S3::S3Object.url_for(path(style_name), bucket_name, :expires_in => time, :use_ssl => (s3_protocol(style_name) == 'https'))
end
def bucket_name
@bucket
end
def s3_host_name
case @region
when "tokyo"
AWS::S3::DEFAULT_HOST.replace "s3-ap-northeast-1.amazonaws.com"
"s3-ap-northeast-1.amazonaws.com"
else
"s3.amazonaws.com"
end
end
def set_permissions permissions
if permissions.is_a?(Hash)
permissions[:default] = permissions[:default] || :public_read
else
permissions = { :default => permissions || :public_read }
end
permissions
end
def s3_host_alias
@s3_host_alias
end
def parse_credentials creds
creds = find_credentials(creds).stringify_keys
env = Object.const_defined?(:Rails) ? Rails.env : nil
(creds[env] || creds).symbolize_keys
end
def exists?(style = default_style)
if original_filename
AWS::S3::S3Object.exists?(path(style), bucket_name)
else
false
end
end
def s3_protocol(style)
if @s3_protocol.is_a?(Proc)
@s3_protocol.call(style)
else
@s3_protocol
end
end
# Returns representation of the data of the file assigned to the given
# style, in the format most representative of the current storage.
def to_file style = default_style
return @queued_for_write[style] if @queued_for_write[style]
filename = path(style)
extname = File.extname(filename)
basename = File.basename(filename, extname)
file = Tempfile.new([basename, extname])
file.binmode
file.write(AWS::S3::S3Object.value(path(style), bucket_name))
file.rewind
return file
end
def create_bucket
AWS::S3::Bucket.create(bucket_name)
end
def flush_writes #:nodoc:
@queued_for_write.each do |style, file|
begin
log("saving #{path(style)}")
AWS::S3::S3Object.store(path(style),
file,
bucket_name,
{:content_type => file.content_type.to_s.strip,
:access => (@s3_permissions[style] || @s3_permissions[:default]),
}.merge(@s3_headers))
rescue AWS::S3::NoSuchBucket => e
create_bucket
retry
rescue AWS::S3::ResponseError => e
raise
end
end
@queued_for_write = {}
end
def flush_deletes #:nodoc:
@queued_for_delete.each do |path|
begin
log("deleting #{path}")
AWS::S3::S3Object.delete(path, bucket_name)
rescue AWS::S3::ResponseError
# Ignore this.
end
end
@queued_for_delete = []
end
def find_credentials creds
case creds
when File
YAML::load(ERB.new(File.read(creds.path)).result)
when String, Pathname
YAML::load(ERB.new(File.read(creds)).result)
when Hash
creds
else
raise ArgumentError, "Credentials are not a path, file, or hash."
end
end
private :find_credentials
end
end
end
remove replace Module constant...
module Paperclip
module Storage
# Amazon's S3 file hosting service is a scalable, easy place to store files for
# distribution. You can find out more about it at http://aws.amazon.com/s3
# There are a few S3-specific options for has_attached_file:
# * +s3_credentials+: Takes a path, a File, or a Hash. The path (or File) must point
# to a YAML file containing the +access_key_id+ and +secret_access_key+ that Amazon
# gives you. You can 'environment-space' this just like you do to your
# database.yml file, so different environments can use different accounts:
# development:
# access_key_id: 123...
# secret_access_key: 123...
# test:
# access_key_id: abc...
# secret_access_key: abc...
# production:
# access_key_id: 456...
# secret_access_key: 456...
# This is not required, however, and the file may simply look like this:
# access_key_id: 456...
# secret_access_key: 456...
# In which case, those access keys will be used in all environments. You can also
# put your bucket name in this file, instead of adding it to the code directly.
# This is useful when you want the same account but a different bucket for
# development versus production.
# * +s3_permissions+: This is a String that should be one of the "canned" access
# policies that S3 provides (more information can be found here:
# http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAccessPolicy.html)
# The default for Paperclip is :public_read.
#
# You can set permission on a per style bases by doing the following:
# :s3_permissions => {
# :original => :private
# }
# Or globaly:
# :s3_permissions => :private
#
# * +s3_protocol+: The protocol for the URLs generated to your S3 assets. Can be either
# 'http' or 'https'. Defaults to 'http' when your :s3_permissions are :public_read (the
# default), and 'https' when your :s3_permissions are anything else.
# * +s3_headers+: A hash of headers such as {'Expires' => 1.year.from_now.httpdate}
# * +bucket+: This is the name of the S3 bucket that will store your files. Remember
# that the bucket must be unique across all of Amazon S3. If the bucket does not exist
# Paperclip will attempt to create it. The bucket name will not be interpolated.
# You can define the bucket as a Proc if you want to determine it's name at runtime.
# Paperclip will call that Proc with attachment as the only argument.
# * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the alias to the
# S3 domain of your bucket. Used with the :s3_alias_url url interpolation. See the
# link in the +url+ entry for more information about S3 domains and buckets.
# * +url+: There are four options for the S3 url. You can choose to have the bucket's name
# placed domain-style (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
# You can also specify a CNAME (which requires the CNAME to be specified as
# :s3_alias_url. You can read more about CNAMEs and S3 at
# http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html
# Normally, this won't matter in the slightest and you can leave the default (which is
# path-style, or :s3_path_url). But in some cases paths don't work and you need to use
# the domain-style (:s3_domain_url). Anything else here will be treated like path-style.
# NOTE: If you use a CNAME for use with CloudFront, you can NOT specify https as your
# :s3_protocol; This is *not supported* by S3/CloudFront. Finally, when using the host
# alias, the :bucket parameter is ignored, as the hostname is used as the bucket name
# by S3. The fourth option for the S3 url is :asset_host, which uses Rails' built-in
# asset_host settings. NOTE: To get the full url from a paperclip'd object, use the
# image_path helper; this is what image_tag uses to generate the url for an img tag.
# * +path+: This is the key under the bucket in which the file will be stored. The
# URL will be constructed from the bucket and the path. This is what you will want
# to interpolate. Keys should be unique, like filenames, and despite the fact that
# S3 (strictly speaking) does not support directories, you can still use a / to
# separate parts of your file name.
# * +region+: If you are using your bucket in Tokyo region, "tokyo" write.
module S3
def self.extended base
begin
require 'aws/s3'
rescue LoadError => e
e.message << " (You may need to install the aws-s3 gem)"
raise e
end unless defined?(AWS::S3)
base.instance_eval do
@s3_credentials = parse_credentials(@options[:s3_credentials])
@region = @options[:region] || @s3_credentials[:region]
@bucket = @options[:bucket] || @s3_credentials[:bucket]
@bucket = @bucket.call(self) if @bucket.is_a?(Proc)
@s3_options = @options[:s3_options] || {}
@s3_permissions = set_permissions(@options[:s3_permissions])
@s3_protocol = @options[:s3_protocol] ||
Proc.new do |style|
(@s3_permissions[style.to_sym] || @s3_permissions[:default]) == :public_read ? 'http' : 'https'
end
@s3_headers = @options[:s3_headers] || {}
@s3_host_alias = @options[:s3_host_alias]
@s3_host_alias = @s3_host_alias.call(self) if @s3_host_alias.is_a?(Proc)
unless @url.to_s.match(/^:s3.*url$/)
@path = @path.gsub(/:url/, @url)
@url = ":s3_path_url"
end
@url = ":asset_host" if @options[:url].to_s == ":asset_host"
AWS::S3::Base.establish_connection!( @s3_options.merge(
:access_key_id => @s3_credentials[:access_key_id],
:secret_access_key => @s3_credentials[:secret_access_key]
))
end
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_alias_url
Paperclip.interpolates(:s3_path_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_name}/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_path_url
Paperclip.interpolates(:s3_domain_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.bucket_name}.#{attachment.s3_host_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_domain_url
Paperclip.interpolates(:asset_host) do |attachment, style|
"#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :asset_host
end
def expiring_url(time = 3600, style_name = default_style)
AWS::S3::S3Object.url_for(path(style_name), bucket_name, :expires_in => time, :use_ssl => (s3_protocol(style_name) == 'https'))
end
def bucket_name
@bucket
end
def s3_host_name
case @region
when "tokyo"
"s3-ap-northeast-1.amazonaws.com"
else
"s3.amazonaws.com"
end
end
def set_permissions permissions
if permissions.is_a?(Hash)
permissions[:default] = permissions[:default] || :public_read
else
permissions = { :default => permissions || :public_read }
end
permissions
end
def s3_host_alias
@s3_host_alias
end
def parse_credentials creds
creds = find_credentials(creds).stringify_keys
env = Object.const_defined?(:Rails) ? Rails.env : nil
(creds[env] || creds).symbolize_keys
end
def exists?(style = default_style)
if original_filename
AWS::S3::S3Object.exists?(path(style), bucket_name)
else
false
end
end
def s3_protocol(style)
if @s3_protocol.is_a?(Proc)
@s3_protocol.call(style)
else
@s3_protocol
end
end
# Returns representation of the data of the file assigned to the given
# style, in the format most representative of the current storage.
def to_file style = default_style
return @queued_for_write[style] if @queued_for_write[style]
filename = path(style)
extname = File.extname(filename)
basename = File.basename(filename, extname)
file = Tempfile.new([basename, extname])
file.binmode
file.write(AWS::S3::S3Object.value(path(style), bucket_name))
file.rewind
return file
end
def create_bucket
AWS::S3::Bucket.create(bucket_name)
end
def flush_writes #:nodoc:
@queued_for_write.each do |style, file|
begin
log("saving #{path(style)}")
AWS::S3::S3Object.store(path(style),
file,
bucket_name,
{:content_type => file.content_type.to_s.strip,
:access => (@s3_permissions[style] || @s3_permissions[:default]),
}.merge(@s3_headers))
rescue AWS::S3::NoSuchBucket => e
create_bucket
retry
rescue AWS::S3::ResponseError => e
raise
end
end
@queued_for_write = {}
end
def flush_deletes #:nodoc:
@queued_for_delete.each do |path|
begin
log("deleting #{path}")
AWS::S3::S3Object.delete(path, bucket_name)
rescue AWS::S3::ResponseError
# Ignore this.
end
end
@queued_for_delete = []
end
def find_credentials creds
case creds
when File
YAML::load(ERB.new(File.read(creds.path)).result)
when String, Pathname
YAML::load(ERB.new(File.read(creds)).result)
when Hash
creds
else
raise ArgumentError, "Credentials are not a path, file, or hash."
end
end
private :find_credentials
end
end
end
|
module Paperclip
module Storage
# Amazon's S3 file hosting service is a scalable, easy place to store files for
# distribution. You can find out more about it at http://aws.amazon.com/s3
# There are a few S3-specific options for has_attached_file:
# * +s3_credentials+: Takes a path, a File, or a Hash. The path (or File) must point
# to a YAML file containing the +access_key_id+ and +secret_access_key+ that Amazon
# gives you. You can 'environment-space' this just like you do to your
# database.yml file, so different environments can use different accounts:
# development:
# access_key_id: 123...
# secret_access_key: 123...
# test:
# access_key_id: abc...
# secret_access_key: abc...
# production:
# access_key_id: 456...
# secret_access_key: 456...
# This is not required, however, and the file may simply look like this:
# access_key_id: 456...
# secret_access_key: 456...
# In which case, those access keys will be used in all environments. You can also
# put your bucket name in this file, instead of adding it to the code directly.
# This is useful when you want the same account but a different bucket for
# development versus production.
# * +s3_permissions+: This is a String that should be one of the "canned" access
# policies that S3 provides (more information can be found here:
# http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAccessPolicy.html)
# The default for Paperclip is :public_read.
#
# You can set permission on a per style bases by doing the following:
# :s3_permissions => {
# :original => :private
# }
# Or globaly:
# :s3_permissions => :private
#
# * +s3_protocol+: The protocol for the URLs generated to your S3 assets. Can be either
# 'http' or 'https'. Defaults to 'http' when your :s3_permissions are :public_read (the
# default), and 'https' when your :s3_permissions are anything else.
# * +s3_headers+: A hash of headers such as {'Expires' => 1.year.from_now.httpdate}
# * +bucket+: This is the name of the S3 bucket that will store your files. Remember
# that the bucket must be unique across all of Amazon S3. If the bucket does not exist
# Paperclip will attempt to create it. The bucket name will not be interpolated.
# You can define the bucket as a Proc if you want to determine it's name at runtime.
# Paperclip will call that Proc with attachment as the only argument.
# * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the alias to the
# S3 domain of your bucket. Used with the :s3_alias_url url interpolation. See the
# link in the +url+ entry for more information about S3 domains and buckets.
# * +url+: There are four options for the S3 url. You can choose to have the bucket's name
# placed domain-style (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
# You can also specify a CNAME (which requires the CNAME to be specified as
# :s3_alias_url. You can read more about CNAMEs and S3 at
# http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html
# Normally, this won't matter in the slightest and you can leave the default (which is
# path-style, or :s3_path_url). But in some cases paths don't work and you need to use
# the domain-style (:s3_domain_url). Anything else here will be treated like path-style.
# NOTE: If you use a CNAME for use with CloudFront, you can NOT specify https as your
# :s3_protocol; This is *not supported* by S3/CloudFront. Finally, when using the host
# alias, the :bucket parameter is ignored, as the hostname is used as the bucket name
# by S3. The fourth option for the S3 url is :asset_host, which uses Rails' built-in
# asset_host settings. NOTE: To get the full url from a paperclip'd object, use the
# image_path helper; this is what image_tag uses to generate the url for an img tag.
# * +path+: This is the key under the bucket in which the file will be stored. The
# URL will be constructed from the bucket and the path. This is what you will want
# to interpolate. Keys should be unique, like filenames, and despite the fact that
# S3 (strictly speaking) does not support directories, you can still use a / to
# separate parts of your file name.
module S3
def self.extended base
begin
require 'aws/s3'
rescue LoadError => e
e.message << " (You may need to install the aws-s3 gem)"
raise e
end unless defined?(AWS::S3)
base.instance_eval do
@s3_credentials = parse_credentials(@options[:s3_credentials])
@bucket = @options[:bucket] || @s3_credentials[:bucket]
@bucket = @bucket.call(self) if @bucket.is_a?(Proc)
@s3_options = @options[:s3_options] || {}
@s3_permissions = set_permissions(@options[:s3_permissions])
@s3_protocol = @options[:s3_protocol] ||
Proc.new do |style|
(@s3_permissions[style.to_sym] || @s3_permissions[:default]) == :public_read ? 'http' : 'https'
end
@s3_headers = @options[:s3_headers] || {}
@s3_host_alias = @options[:s3_host_alias]
@s3_host_alias = @s3_host_alias.call(self) if @s3_host_alias.is_a?(Proc)
unless @url.to_s.match(/^:s3.*url$/)
@path = @path.gsub(/:url/, @url)
@url = ":s3_path_url"
end
@url = ":asset_host" if @options[:url].to_s == ":asset_host"
AWS::S3::Base.establish_connection!( @s3_options.merge(
:access_key_id => @s3_credentials[:access_key_id],
:secret_access_key => @s3_credentials[:secret_access_key]
))
end
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_alias_url
Paperclip.interpolates(:s3_path_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://s3.amazonaws.com/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_path_url
Paperclip.interpolates(:s3_domain_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_domain_url
Paperclip.interpolates(:asset_host) do |attachment, style|
"#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :asset_host
end
def expiring_url(time = 3600, style_name = default_style)
AWS::S3::S3Object.url_for(path(style_name), bucket_name, :expires_in => time, :use_ssl => (s3_protocol(style_name) == 'https'))
end
def bucket_name
@bucket
end
def set_permissions permissions
if permissions.is_a?(Hash)
permissions[:default] = permissions[:default] || :public_read
else
permissions = { :default => permissions || :public_read }
end
permissions
end
def s3_host_alias
@s3_host_alias
end
def parse_credentials creds
creds = find_credentials(creds).stringify_keys
env = Object.const_defined?(:Rails) ? Rails.env : nil
(creds[env] || creds).symbolize_keys
end
def exists?(style = default_style)
if original_filename
AWS::S3::S3Object.exists?(path(style), bucket_name)
else
false
end
end
def s3_protocol(style)
if @s3_protocol.is_a?(Proc)
@s3_protocol.call(style)
else
@s3_protocol
end
end
# Returns representation of the data of the file assigned to the given
# style, in the format most representative of the current storage.
def to_file style = default_style
return @queued_for_write[style] if @queued_for_write[style]
filename = path(style)
extname = File.extname(filename)
basename = File.basename(filename, extname)
file = Tempfile.new([basename, extname])
file.binmode
file.write(AWS::S3::S3Object.value(path(style), bucket_name))
file.rewind
return file
end
def create_bucket
AWS::S3::Bucket.create(bucket_name)
end
def flush_writes #:nodoc:
@queued_for_write.each do |style, file|
begin
log("saving #{path(style)}")
AWS::S3::S3Object.store(path(style),
file,
bucket_name,
{:content_type => file.content_type.to_s.strip,
:access => (@s3_permissions[style] || @s3_permissions[:default]),
}.merge(@s3_headers))
rescue AWS::S3::NoSuchBucket => e
create_bucket
retry
rescue AWS::S3::ResponseError => e
raise
end
end
@queued_for_write = {}
end
def flush_deletes #:nodoc:
@queued_for_delete.each do |path|
begin
log("deleting #{path}")
AWS::S3::S3Object.delete(path, bucket_name)
rescue AWS::S3::ResponseError
# Ignore this.
end
end
@queued_for_delete = []
end
def find_credentials creds
case creds
when File
YAML::load(ERB.new(File.read(creds.path)).result)
when String, Pathname
YAML::load(ERB.new(File.read(creds)).result)
when Hash
creds
else
raise ArgumentError, "Credentials are not a path, file, or hash."
end
end
private :find_credentials
end
end
end
available s3 tokyo region
module Paperclip
module Storage
# Amazon's S3 file hosting service is a scalable, easy place to store files for
# distribution. You can find out more about it at http://aws.amazon.com/s3
# There are a few S3-specific options for has_attached_file:
# * +s3_credentials+: Takes a path, a File, or a Hash. The path (or File) must point
# to a YAML file containing the +access_key_id+ and +secret_access_key+ that Amazon
# gives you. You can 'environment-space' this just like you do to your
# database.yml file, so different environments can use different accounts:
# development:
# access_key_id: 123...
# secret_access_key: 123...
# test:
# access_key_id: abc...
# secret_access_key: abc...
# production:
# access_key_id: 456...
# secret_access_key: 456...
# This is not required, however, and the file may simply look like this:
# access_key_id: 456...
# secret_access_key: 456...
# In which case, those access keys will be used in all environments. You can also
# put your bucket name in this file, instead of adding it to the code directly.
# This is useful when you want the same account but a different bucket for
# development versus production.
# * +s3_permissions+: This is a String that should be one of the "canned" access
# policies that S3 provides (more information can be found here:
# http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAccessPolicy.html)
# The default for Paperclip is :public_read.
#
# You can set permission on a per style bases by doing the following:
# :s3_permissions => {
# :original => :private
# }
# Or globaly:
# :s3_permissions => :private
#
# * +s3_protocol+: The protocol for the URLs generated to your S3 assets. Can be either
# 'http' or 'https'. Defaults to 'http' when your :s3_permissions are :public_read (the
# default), and 'https' when your :s3_permissions are anything else.
# * +s3_headers+: A hash of headers such as {'Expires' => 1.year.from_now.httpdate}
# * +bucket+: This is the name of the S3 bucket that will store your files. Remember
# that the bucket must be unique across all of Amazon S3. If the bucket does not exist
# Paperclip will attempt to create it. The bucket name will not be interpolated.
# You can define the bucket as a Proc if you want to determine it's name at runtime.
# Paperclip will call that Proc with attachment as the only argument.
# * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the alias to the
# S3 domain of your bucket. Used with the :s3_alias_url url interpolation. See the
# link in the +url+ entry for more information about S3 domains and buckets.
# * +url+: There are four options for the S3 url. You can choose to have the bucket's name
# placed domain-style (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
# You can also specify a CNAME (which requires the CNAME to be specified as
# :s3_alias_url. You can read more about CNAMEs and S3 at
# http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html
# Normally, this won't matter in the slightest and you can leave the default (which is
# path-style, or :s3_path_url). But in some cases paths don't work and you need to use
# the domain-style (:s3_domain_url). Anything else here will be treated like path-style.
# NOTE: If you use a CNAME for use with CloudFront, you can NOT specify https as your
# :s3_protocol; This is *not supported* by S3/CloudFront. Finally, when using the host
# alias, the :bucket parameter is ignored, as the hostname is used as the bucket name
# by S3. The fourth option for the S3 url is :asset_host, which uses Rails' built-in
# asset_host settings. NOTE: To get the full url from a paperclip'd object, use the
# image_path helper; this is what image_tag uses to generate the url for an img tag.
# * +path+: This is the key under the bucket in which the file will be stored. The
# URL will be constructed from the bucket and the path. This is what you will want
# to interpolate. Keys should be unique, like filenames, and despite the fact that
# S3 (strictly speaking) does not support directories, you can still use a / to
# separate parts of your file name.
# * +region+: If you are using your bucket in Tokyo region, "tokyo" write.
module S3
def self.extended base
begin
require 'aws/s3'
rescue LoadError => e
e.message << " (You may need to install the aws-s3 gem)"
raise e
end unless defined?(AWS::S3)
base.instance_eval do
@s3_credentials = parse_credentials(@options[:s3_credentials])
@region = @options[:region] || @s3_credentials[:region]
@bucket = @options[:bucket] || @s3_credentials[:bucket]
@bucket = @bucket.call(self) if @bucket.is_a?(Proc)
@s3_options = @options[:s3_options] || {}
@s3_permissions = set_permissions(@options[:s3_permissions])
@s3_protocol = @options[:s3_protocol] ||
Proc.new do |style|
(@s3_permissions[style.to_sym] || @s3_permissions[:default]) == :public_read ? 'http' : 'https'
end
@s3_headers = @options[:s3_headers] || {}
@s3_host_alias = @options[:s3_host_alias]
@s3_host_alias = @s3_host_alias.call(self) if @s3_host_alias.is_a?(Proc)
unless @url.to_s.match(/^:s3.*url$/)
@path = @path.gsub(/:url/, @url)
@url = ":s3_path_url"
end
@url = ":asset_host" if @options[:url].to_s == ":asset_host"
AWS::S3::Base.establish_connection!( @s3_options.merge(
:access_key_id => @s3_credentials[:access_key_id],
:secret_access_key => @s3_credentials[:secret_access_key]
))
end
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_alias_url
Paperclip.interpolates(:s3_path_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_name}/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_path_url
Paperclip.interpolates(:s3_domain_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.bucket_name}.#{attachment.s3_host_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :s3_domain_url
Paperclip.interpolates(:asset_host) do |attachment, style|
"#{attachment.path(style).gsub(%r{^/}, "")}"
end unless Paperclip::Interpolations.respond_to? :asset_host
end
def expiring_url(time = 3600, style_name = default_style)
AWS::S3::S3Object.url_for(path(style_name), bucket_name, :expires_in => time, :use_ssl => (s3_protocol(style_name) == 'https'))
end
def bucket_name
@bucket
end
def s3_host_name
case @region
when "tokyo"
AWS::S3::DEFAULT_HOST.replace "s3-ap-northeast-1.amazonaws.com"
"s3-ap-northeast-1.amazonaws.com"
else
"s3.amazonaws.com"
end
end
def set_permissions permissions
if permissions.is_a?(Hash)
permissions[:default] = permissions[:default] || :public_read
else
permissions = { :default => permissions || :public_read }
end
permissions
end
def s3_host_alias
@s3_host_alias
end
def parse_credentials creds
creds = find_credentials(creds).stringify_keys
env = Object.const_defined?(:Rails) ? Rails.env : nil
(creds[env] || creds).symbolize_keys
end
def exists?(style = default_style)
if original_filename
AWS::S3::S3Object.exists?(path(style), bucket_name)
else
false
end
end
def s3_protocol(style)
if @s3_protocol.is_a?(Proc)
@s3_protocol.call(style)
else
@s3_protocol
end
end
# Returns representation of the data of the file assigned to the given
# style, in the format most representative of the current storage.
def to_file style = default_style
return @queued_for_write[style] if @queued_for_write[style]
filename = path(style)
extname = File.extname(filename)
basename = File.basename(filename, extname)
file = Tempfile.new([basename, extname])
file.binmode
file.write(AWS::S3::S3Object.value(path(style), bucket_name))
file.rewind
return file
end
def create_bucket
AWS::S3::Bucket.create(bucket_name)
end
def flush_writes #:nodoc:
@queued_for_write.each do |style, file|
begin
log("saving #{path(style)}")
AWS::S3::S3Object.store(path(style),
file,
bucket_name,
{:content_type => file.content_type.to_s.strip,
:access => (@s3_permissions[style] || @s3_permissions[:default]),
}.merge(@s3_headers))
rescue AWS::S3::NoSuchBucket => e
create_bucket
retry
rescue AWS::S3::ResponseError => e
raise
end
end
@queued_for_write = {}
end
def flush_deletes #:nodoc:
@queued_for_delete.each do |path|
begin
log("deleting #{path}")
AWS::S3::S3Object.delete(path, bucket_name)
rescue AWS::S3::ResponseError
# Ignore this.
end
end
@queued_for_delete = []
end
def find_credentials creds
case creds
when File
YAML::load(ERB.new(File.read(creds.path)).result)
when String, Pathname
YAML::load(ERB.new(File.read(creds)).result)
when Hash
creds
else
raise ArgumentError, "Credentials are not a path, file, or hash."
end
end
private :find_credentials
end
end
end
|
# Project constants
module PdfXcassets
PROJECT_NAME = 'pdf_xcassets'
PROJECT_DESCRIPTION = 'Generate Xcode xcasset from pdf assets.'
PROJECT_URL = 'https://github.com/dkhamsing'
VERSION = "0.1.0"
end
project url
# Project constants
module PdfXcassets
PROJECT_NAME = 'pdf_xcassets'
PROJECT_DESCRIPTION = 'Generate Xcode xcasset from pdf assets.'
PROJECT_URL = 'https://github.com/dkhamsing/pdf_xcassets'
VERSION = "0.1.0"
end
|
module Percy
class Client
VERSION = '1.13.0'.freeze
end
end
1.13.1
module Percy
class Client
VERSION = '1.13.1'.freeze
end
end
|
#
# Decides what the "current project" is, in relation to the pwd.
#
# - config_yaml: has the project configurations. It has at least one
# project. Each project has an (pivotal) api_token, path, and (pivotal)
# id
#
module PGit
class CurrentProject
attr_accessor :commands
def initialize(config_yaml)
@current_project = find_current_project(config_yaml)
@commands = @current_project["commands"] || {}
end
def path
@current_project["path"]
end
def id
@current_project["id"]
end
def api_token
@current_project["api_token"]
end
def to_hash
{
"id" => id,
"api_token" => api_token,
"path" => @current_project["path"],
"commands" => commands
}
end
def to_h
to_hash
end
private
def escape_slashes(project_path)
project_path.gsub('/','\/')
end
def find_matching_projects(projects)
projects.select do |project|
project_path = project["path"]
extended_path = File.expand_path(project_path, __FILE__)
escaped_project = escape_slashes(extended_path)
Dir.pwd.match(/#{escaped_project}/)
end
end
def find_current_project(config_yaml)
projects = config_yaml["projects"]
matching_projects = find_matching_projects(projects)
PGit::CurrentProject::Validator.new(matching_projects)
find_best_match(matching_projects)
end
def find_best_match(matching_projects)
matching_projects.sort! { |a,b| b["path"].length <=> a["path"].length }
matching_projects.first
end
end
end
[#85179216] simplified call to path
#
# Decides what the "current project" is, in relation to the pwd.
#
# - config_yaml: has the project configurations. It has at least one
# project. Each project has an (pivotal) api_token, path, and (pivotal)
# id
#
module PGit
class CurrentProject
attr_accessor :commands
def initialize(config_yaml)
@current_project = find_current_project(config_yaml)
@commands = @current_project["commands"] || {}
end
def path
@current_project["path"]
end
def id
@current_project["id"]
end
def api_token
@current_project["api_token"]
end
def to_hash
{
"id" => id,
"api_token" => api_token,
"path" => path,
"commands" => commands
}
end
def to_h
to_hash
end
private
def escape_slashes(project_path)
project_path.gsub('/','\/')
end
def find_matching_projects(projects)
projects.select do |project|
project_path = project["path"]
extended_path = File.expand_path(project_path, __FILE__)
escaped_project = escape_slashes(extended_path)
Dir.pwd.match(/#{escaped_project}/)
end
end
def find_current_project(config_yaml)
projects = config_yaml["projects"]
matching_projects = find_matching_projects(projects)
PGit::CurrentProject::Validator.new(matching_projects)
find_best_match(matching_projects)
end
def find_best_match(matching_projects)
matching_projects.sort! { |a,b| b["path"].length <=> a["path"].length }
matching_projects.first
end
end
end
|
#
# Be sure to run `pod lib lint NAME.podspec' to ensure this is a
# valid spec and remove all comments before submitting the spec.
#
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = "podExample"
s.version = “0.1.1”
s.summary = "A short description of podExample."
s.description = <<-DESC
An optional longer description of podExample
* Markdown format.
* Don't worry about the indent, we strip it!
DESC
s.homepage = "https://github.com/juanlu86/cocoaPodExample"
s.screenshots = "www.example.com/screenshots_1", "www.example.com/screenshots_2"
s.license = 'MIT'
s.author = { "Juanlu" => "juanlu.lopez.munoz@gmail.com" }
s.source = { :git => "https://github.com/juanlu86/cocoaPodExample.git", :tag => s.version.to_s }
s.social_media_url = 'https://twitter.com/EXAMPLE'
# s.platform = :ios, '5.0'
# s.ios.deployment_target = '5.0'
# s.osx.deployment_target = '10.7'
s.requires_arc = true
s.source_files = 'Classes'
s.resources = 'Assets/*.png'
s.ios.exclude_files = 'Classes/osx'
s.osx.exclude_files = 'Classes/ios'
# s.public_header_files = 'Classes/**/*.h'
# s.frameworks = 'SomeFramework', 'AnotherFramework'
# s.dependency 'JSONKit', '~> 1.4'
end
Fixed syntax error in .podspec file
#
# Be sure to run `pod lib lint NAME.podspec' to ensure this is a
# valid spec and remove all comments before submitting the spec.
#
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = "podExample"
s.version = "0.1.1"
s.summary = "A short description of podExample."
s.description = <<-DESC
An optional longer description of podExample
* Markdown format.
* Don't worry about the indent, we strip it!
DESC
s.homepage = "https://github.com/juanlu86/cocoaPodExample"
s.screenshots = "www.example.com/screenshots_1", "www.example.com/screenshots_2"
s.license = 'MIT'
s.author = { "Juanlu" => "juanlu.lopez.munoz@gmail.com" }
s.source = { :git => "https://github.com/juanlu86/cocoaPodExample.git", :tag => s.version.to_s }
s.social_media_url = 'https://twitter.com/EXAMPLE'
# s.platform = :ios, '5.0'
# s.ios.deployment_target = '5.0'
# s.osx.deployment_target = '10.7'
s.requires_arc = true
s.source_files = 'Classes'
s.resources = 'Assets/*.png'
s.ios.exclude_files = 'Classes/osx'
s.osx.exclude_files = 'Classes/ios'
# s.public_header_files = 'Classes/**/*.h'
# s.frameworks = 'SomeFramework', 'AnotherFramework'
# s.dependency 'JSONKit', '~> 1.4'
end
|
# encoding: UTF-8
require 'set'
require 'plucky/normalizers/criteria_hash_value'
require 'plucky/normalizers/criteria_hash_key'
module Plucky
class CriteriaHash
attr_reader :source, :options
# Internal: Used to determine if criteria keys match simple id lookup.
SimpleIdQueryKeys = [:_id].to_set
# Internal: Used to determine if criteria keys match simple id and type
# lookup (for single collection inheritance).
SimpleIdAndTypeQueryKeys = [:_id, :_type].to_set
# Internal: Used to quickly check if it is possible that the
# criteria hash is simple.
SimpleQueryMaxSize = [SimpleIdQueryKeys.size, SimpleIdAndTypeQueryKeys.size].max
def initialize(hash={}, options={})
@source, @options = {}, options
hash.each { |key, value| self[key] = value }
end
def initialize_copy(source)
super
@options = @options.dup
@source = @source.dup
each do |key, value|
self[key] = value.clone if value.duplicable?
end
end
def []=(key, value)
normalized_key = normalized_key(key)
if key.is_a?(SymbolOperator)
operator = :"$#{key.operator}"
normalized_value = normalized_value(normalized_key, operator, value)
source[normalized_key] ||= {}
source[normalized_key][operator] = normalized_value
else
if key == :conditions
value.each { |k, v| self[k] = v }
else
normalized_value = normalized_value(normalized_key, normalized_key, value)
source[normalized_key] = normalized_value
end
end
end
def ==(other)
source == other.source
end
def to_hash
source
end
def merge(other)
target = source.dup
other.source.each_key do |key|
value, other_value = target[key], other[key]
target[key] =
if target.key?(key)
value_is_hash = value.is_a?(Hash)
other_is_hash = other_value.is_a?(Hash)
if value_is_hash && other_is_hash
value.update(other_value) do |key, old_value, new_value|
if old_value.is_a?(Hash) && new_value.is_a?(Hash)
self.class.new(old_value).merge(self.class.new(new_value)).to_hash
else
Array(old_value).concat(Array(new_value)).uniq
end
end
elsif value_is_hash && !other_is_hash
if modifier_key = value.keys.detect { |k| Plucky.modifier?(k) }
value[modifier_key].concat(Array(other_value)).uniq!
else
# kaboom! Array(value).concat(Array(other_value)).uniq
end
elsif other_is_hash && !value_is_hash
if modifier_key = other_value.keys.detect { |k| Plucky.modifier?(k) }
other_value[modifier_key].concat(Array(value)).uniq!
else
# kaboom! Array(value).concat(Array(other_value)).uniq
end
else
Array(value).concat(Array(other_value)).uniq
end
else
other_value
end
end
self.class.new(target)
end
def merge!(other)
merge(other).to_hash.each do |key, value|
self[key] = value
end
self
end
def object_ids
@options[:object_ids] ||= []
end
def object_ids=(value)
raise ArgumentError unless value.is_a?(Array)
@options[:object_ids] = value.flatten
end
# The definition of simple is querying by only _id or _id and _type.
# If this is the case, you can use IdentityMap in library to not perform
# query and instead just return from map.
def simple?
return false if keys.size > SimpleQueryMaxSize
key_set = keys.to_set
key_set == SimpleIdQueryKeys || key_set == SimpleIdAndTypeQueryKeys
end
def method_missing(method, *args, &block)
@source.send(method, *args, &block)
end
def object_id?(key)
object_ids.include?(key.to_sym)
end
def normalized_key(key)
key_normalizer.call(key)
end
def key_normalizer
@key_normalizer ||= options.fetch(:key_normalizer) {
Normalizers::CriteriaHashKey.new
}
end
def normalized_value(parent_key, key, value)
value_normalizer.call(parent_key, key, value)
end
def value_normalizer
@value_normalizer ||= options.fetch(:value_normalizer) {
Normalizers::CriteriaHashValue.new(self)
}
end
end
end
Rename source to original
# encoding: UTF-8
require 'set'
require 'plucky/normalizers/criteria_hash_value'
require 'plucky/normalizers/criteria_hash_key'
module Plucky
class CriteriaHash
attr_reader :source, :options
# Internal: Used to determine if criteria keys match simple id lookup.
SimpleIdQueryKeys = [:_id].to_set
# Internal: Used to determine if criteria keys match simple id and type
# lookup (for single collection inheritance).
SimpleIdAndTypeQueryKeys = [:_id, :_type].to_set
# Internal: Used to quickly check if it is possible that the
# criteria hash is simple.
SimpleQueryMaxSize = [SimpleIdQueryKeys.size, SimpleIdAndTypeQueryKeys.size].max
def initialize(hash={}, options={})
@source, @options = {}, options
hash.each { |key, value| self[key] = value }
end
def initialize_copy(original)
super
@options = @options.dup
@source = @source.dup
each do |key, value|
self[key] = value.clone if value.duplicable?
end
end
def []=(key, value)
normalized_key = normalized_key(key)
if key.is_a?(SymbolOperator)
operator = :"$#{key.operator}"
normalized_value = normalized_value(normalized_key, operator, value)
source[normalized_key] ||= {}
source[normalized_key][operator] = normalized_value
else
if key == :conditions
value.each { |k, v| self[k] = v }
else
normalized_value = normalized_value(normalized_key, normalized_key, value)
source[normalized_key] = normalized_value
end
end
end
def ==(other)
source == other.source
end
def to_hash
source
end
def merge(other)
target = source.dup
other.source.each_key do |key|
value, other_value = target[key], other[key]
target[key] =
if target.key?(key)
value_is_hash = value.is_a?(Hash)
other_is_hash = other_value.is_a?(Hash)
if value_is_hash && other_is_hash
value.update(other_value) do |key, old_value, new_value|
if old_value.is_a?(Hash) && new_value.is_a?(Hash)
self.class.new(old_value).merge(self.class.new(new_value)).to_hash
else
Array(old_value).concat(Array(new_value)).uniq
end
end
elsif value_is_hash && !other_is_hash
if modifier_key = value.keys.detect { |k| Plucky.modifier?(k) }
value[modifier_key].concat(Array(other_value)).uniq!
else
# kaboom! Array(value).concat(Array(other_value)).uniq
end
elsif other_is_hash && !value_is_hash
if modifier_key = other_value.keys.detect { |k| Plucky.modifier?(k) }
other_value[modifier_key].concat(Array(value)).uniq!
else
# kaboom! Array(value).concat(Array(other_value)).uniq
end
else
Array(value).concat(Array(other_value)).uniq
end
else
other_value
end
end
self.class.new(target)
end
def merge!(other)
merge(other).to_hash.each do |key, value|
self[key] = value
end
self
end
def object_ids
@options[:object_ids] ||= []
end
def object_ids=(value)
raise ArgumentError unless value.is_a?(Array)
@options[:object_ids] = value.flatten
end
# The definition of simple is querying by only _id or _id and _type.
# If this is the case, you can use IdentityMap in library to not perform
# query and instead just return from map.
def simple?
return false if keys.size > SimpleQueryMaxSize
key_set = keys.to_set
key_set == SimpleIdQueryKeys || key_set == SimpleIdAndTypeQueryKeys
end
def method_missing(method, *args, &block)
@source.send(method, *args, &block)
end
def object_id?(key)
object_ids.include?(key.to_sym)
end
def normalized_key(key)
key_normalizer.call(key)
end
def key_normalizer
@key_normalizer ||= options.fetch(:key_normalizer) {
Normalizers::CriteriaHashKey.new
}
end
def normalized_value(parent_key, key, value)
value_normalizer.call(parent_key, key, value)
end
def value_normalizer
@value_normalizer ||= options.fetch(:value_normalizer) {
Normalizers::CriteriaHashValue.new(self)
}
end
end
end
|
require 'populate_me/utils'
module PopulateMe
class MissingDocumentError < StandardError; end
module Document
def self.included base
base.extend ClassMethods
[:save,:create,:update,:delete].each do |cb|
base.before cb, :recurse_callback
base.after cb, :recurse_callback
end
base.before :create, :ensure_id
base.after :create, :ensure_not_new
base.before :delete, :ensure_delete_related
base.after :delete, :ensure_new
end
module ClassMethods
attr_writer :fields, :documents
attr_accessor :callbacks, :label_field
def to_s
super.gsub(/[A-Z]/, ' \&')[1..-1].gsub('::','')
end
def to_s_short
self.name[/[^:]+$/].gsub(/[A-Z]/, ' \&')[1..-1]
end
def to_s_plural; "#{self.to_s}s"; end
def to_s_short_plural; "#{self.to_s_short}s"; end
def label sym
@label_field = sym.to_sym
end
def fields; @fields ||= {}; end
def field name, o={}
set_id_field if self.fields.empty?&&o[:type]!=:id
complete_field_options name, o
if o[:type]==:list
define_method(name) do
var = "@#{name}"
instance_variable_set(var, instance_variable_get(var)||[])
end
else
attr_accessor name
end
self.fields[name] = o
end
def complete_field_options name, o={}
o[:field_name] = name
Utils.ensure_key o, :type, :string
Utils.ensure_key o, :form_field, ![:id,:position].include?(o[:type])
o[:wrap] = false unless o[:form_field]
Utils.ensure_key o, :wrap, ![:hidden,:list].include?(o[:type])
Utils.ensure_key o, :label, Utils.label_for_field(name)
if o[:type]==:attachment
o[:class_name] = Utils.guess_related_class_name(PopulateMe,o[:class_name]||:attachment)
end
if o[:type]==:list
o[:class_name] = Utils.guess_related_class_name(self.name,o[:class_name]||name)
o[:dasherized_class_name] = Utils.dasherize_class_name o[:class_name]
else
Utils.ensure_key o, :input_attributes, {}
o[:input_attributes][:type] = :hidden if o[:type]==:hidden
unless o[:type]==:text
Utils.ensure_key o[:input_attributes], :type, :text
end
end
end
def set_id_field
field :id, {type: :id}
end
def position_field o={}
name = o[:name]||'position'
o[:type] = :position
field name, o
sort_by name
end
def label_field
@label_field || self.fields.keys[1]
end
def relationships; @relationships ||= {}; end
def relationship name, o={}
o[:class_name] = Utils.guess_related_class_name(self.name,o[:class_name]||name)
Utils.ensure_key o, :label, name.to_s.capitalize
Utils.ensure_key o, :foreign_key, "#{Utils.dasherize_class_name(self.name).gsub('-','_')}_id"
o[:foreign_key] = o[:foreign_key].to_sym
Utils.ensure_key o, :dependent, true
self.relationships[name] = o
end
def documents; @documents ||= []; end
def from_hash hash, o={}
self.new(_is_new: false).set_from_hash hash, o
end
def sort_by f, direction=:asc
raise(ArgumentError) unless [:asc,:desc].include? direction
raise(ArgumentError) unless self.new.respond_to? f
@sort_proc = Proc.new do |a,b|
a,b = b,a if direction==:desc
a.__send__(f)<=>b.__send__(f)
end
self
end
def id_string_key
(self.fields.keys[0]||'id').to_s
end
def set_indexes f, ids=[]
ids.each_with_index do |id,i|
self.documents.each do |d|
d[f.to_s] = i if d[self.id_string_key]==id
end
end
self
end
def admin_get id
hash = self.documents.find{|doc| doc[self.id_string_key]==id }
return nil if hash.nil?
from_hash hash
end
def admin_find o={}
o[:query] ||= {}
docs = self.documents.map do |d|
self.from_hash(d)
end.find_all do |d|
o[:query].inject(true) do |out,(k,v)|
out && (d.__send__(k)==v)
end
end
docs.sort!(&@sort_proc) if @sort_proc.is_a?(Proc)
docs
end
# Callbacks
def register_callback name, item=nil, options={}, &block
name = name.to_sym
if block_given?
options = item || {}
item = block
end
@callbacks ||= {}
@callbacks[name] ||= []
if options[:prepend]
@callbacks[name].unshift item
else
@callbacks[name] << item
end
end
def before name, item=nil, options={}, &block
register_callback "before_#{name}", item, options, &block
end
def after name, item=nil, options={}, &block
register_callback "after_#{name}", item, options, &block
end
end
attr_accessor :id, :_errors, :_is_new
def errors; self._errors; end
def new?; self._is_new; end
def persistent_instance_variables
instance_variables.select do |k|
if self.class.fields.empty?
k !~ /^@_/
else
self.class.fields.key? k[1..-1].to_sym
end
end
end
def to_h
persistent_instance_variables.inject({'_class'=>self.class.name}) do |h,var|
k = var.to_s[1..-1]
v = instance_variable_get var
if v.is_a? Array
h[k] = v.map(&:to_h)
else
h[k] = v
end
h
end
end
alias_method :to_hash, :to_h
def nested_docs
persistent_instance_variables.map do |var|
instance_variable_get var
end.find_all do |val|
val.is_a? Array
end.flatten
end
def == other
return false unless other.respond_to?(:to_h)
other.to_h==to_h
end
def inspect
"#<#{self.class.name}:#{to_h.inspect}>"
end
def to_s
return inspect if self.class.label_field.nil?
me = __send__(self.class.label_field)
Utils.blank?(me) ? inspect : me
end
def initialize attributes=nil
self._is_new = true
set attributes if attributes
self._errors = {}
end
def set attributes
attributes.dup.each do |k,v|
__send__ "#{k}=", v
end
self
end
def set_defaults o={}
self.class.fields.each do |k,v|
if v.key?(:default)&&(__send__(k).nil?||o[:force])
set k.to_sym => Utils.get_value(v[:default],self)
end
end
self
end
def set_from_hash hash, o={}
raise(TypeError, "#{hash} is not a Hash") unless hash.is_a? Hash
hash = hash.dup # Leave original untouched
hash.delete('_class')
hash.each do |k,v|
if v.is_a? Array
__send__(k.to_sym).clear
v.each do |d|
obj = Utils.resolve_class_name(d['_class']).new.set_from_hash(d)
__send__(k.to_sym) << obj
end
else
v = typecast(k.to_sym,v) if o[:typecast]
set k.to_sym => v
end
end
self
end
def attachment f
attacher = Utils.resolve_class_name self.class.fields[f][:class_name]
attacher.new self, f
end
# Typecasting
def typecast k, v
return Utils.automatic_typecast(v) unless self.class.fields.key?(k)
meth = "typecast_#{self.class.fields[k][:type]}".to_sym
return Utils.automatic_typecast(v) unless respond_to?(meth)
__send__ meth, k, v
end
def typecast_integer k, v
v.to_i
end
def typecast_price k, v
return nil if Utils.blank?(v)
Utils.parse_price(v)
end
def typecast_date k, v
if v[/\d\d(\/|-)\d\d(\/|-)\d\d\d\d/]
Date.parse v
else
nil
end
end
def typecast_datetime k, v
if v[/\d\d(\/|-)\d\d(\/|-)\d\d\d\d \d\d?:\d\d?:\d\d?/]
d,m,y,h,min,s = v.split(/[-:\s\/]/)
Time.utc(y,m,d,h,min,s)
else
nil
end
end
def typecast_attachment k, v
attached = self.attachment k
if Utils.blank? v
attached.delete
return nil
elsif v.is_a?(Hash)&&v.key?(:tempfile)
return attached.create v
end
end
# Callbacks
def exec_callback name
name = name.to_sym
return self if self.class.callbacks[name].nil?
self.class.callbacks[name].each do |job|
if job.respond_to?(:call)
self.instance_exec name, &job
else
meth = self.method(job)
meth.arity==1 ? meth.call(name) : meth.call
end
end
self
end
def recurse_callback name
nested_docs.each do |d|
d.exec_callback name
end
end
def ensure_id # before_create
if self.id.nil?
self.id = Utils::generate_random_id
end
self
end
def ensure_new; self._is_new = true; end # after_delete
def ensure_not_new; self._is_new = false; end # after_create
def ensure_delete_related # before_delete
self.class.relationships.each do |k,v|
if v[:dependent]
klass = Utils.resolve_class_name v[:class_name]
next if klass.nil?
klass.admin_find(query: {v[:foreign_key]=>self.id}).each do |d|
d.delete
end
end
end
end
# Validation
def error_on k,v
self._errors[k] = (self._errors[k]||[]) << v
self
end
def valid?
self._errors = {}
exec_callback :before_validate
validate
exec_callback :after_validate
nested_docs.reduce self._errors.empty? do |result,d|
result &= d.valid?
end
end
def validate; end
def error_report
report = self._errors.dup || {}
persistent_instance_variables.each do |var|
value = instance_variable_get var
if value.is_a? Array
k = var[1..-1].to_sym
report[k] = []
value.each do |d|
report[k] << d.error_report
end
end
end
report
end
# Saving
def save
return unless valid?
exec_callback :before_save
if new?
exec_callback :before_create
id = perform_create
exec_callback :after_create
else
exec_callback :before_update
id = perform_update
exec_callback :after_update
end
exec_callback :after_save
id
end
def perform_create
self.class.documents << self.to_h
self.id
end
def perform_update
index = self.class.documents.index{|d| d['id']==self.id }
raise MissingDocumentError, "No document can be found with this ID: #{self.id}" if self.id.nil?||index.nil?
self.class.documents[index] = self.to_h
end
# Deletion
def delete o={}
exec_callback :before_delete
perform_delete
exec_callback :after_delete
end
def perform_delete
index = self.class.documents.index{|d| d['id']==self.id }
raise MissingDocumentError, "No document can be found with this ID: #{self.id}" if self.id.nil?||index.nil?
self.class.documents.delete_at(index)
end
# Related to the Admin interface #############
def to_admin_url
"#{Utils.dasherize_class_name(self.class.name)}/#{id}".sub(/\/$/,'')
end
# Admin list
module ClassMethods
def sort_field_for o={}
filter = o[:params][:filter]
return nil if !filter.nil?&&filter.size>1
expected_scope = filter.nil? ? nil : filter.keys[0].to_sym
f = self.fields.find do |k,v|
v[:type]==:position&&v[:scope]==expected_scope
end
f.nil? ? nil : f[0]
end
def to_admin_list o={}
o[:params] ||= {}
unless o[:params][:filter].nil?
query = o[:params][:filter].inject({}) do |query, (k,v)|
query[k.to_sym] = self.new.typecast(k,v)
query
end
new_data = Rack::Utils.build_nested_query(data: o[:params][:filter])
end
{
template: 'template_list',
page_title: self.to_s_short_plural,
dasherized_class_name: PopulateMe::Utils.dasherize_class_name(self.name),
new_data: new_data,
sort_field: self.sort_field_for(o),
# 'command_plus'=> !self.populate_config[:no_plus],
# 'command_search'=> !self.populate_config[:no_search],
items: self.admin_find(query: query).map do |d|
d.to_admin_list_item(o)
end
}
end
end
def to_admin_list_item o={}
{
class_name: self.class.name,
id: self.id,
admin_url: to_admin_url,
title: to_s,
local_menu: self.class.relationships.inject([]) do |out,(k,v)|
unless v[:hidden]
out << {
title: "#{v[:label]}",
href: "#{o[:request].script_name}/list/#{Utils.dasherize_class_name(v[:class_name])}?filter[#{v[:foreign_key]}]=#{self.id}"
}
out
end
end
}
end
# Forms
def to_admin_form o={}
o[:input_name_prefix] ||= 'data'
class_item = {
type: :hidden,
input_name: "#{o[:input_name_prefix]}[_class]",
input_value: self.class.name,
}
self.class.complete_field_options :_class, class_item
items = self.class.fields.inject([class_item]) do |out,(k,item)|
item = item.dup
if item[:form_field]
outcast k, item, o
out << item
end
out
end
{
template: "template#{'_nested' if o[:nested]}_form",
page_title: self.new? ? "New #{self.class.to_s_short}" : self.to_s,
admin_url: self.to_admin_url,
is_new: self.new?,
fields: items
}
end
def outcast field, item, o={}
item[:input_name] = "#{o[:input_name_prefix]}[#{item[:field_name]}]"
unless item[:type]==:list
Utils.ensure_key item, :input_value, self.__send__(field)
end
meth = "outcast_#{item[:type]}".to_sym
__send__(meth, field, item, o) if respond_to?(meth)
end
def outcast_list field, item, o={}
item[:items] = self.__send__(field).map do |nested|
nested.to_admin_form(o.merge(input_name_prefix: item[:input_name]+'[]'))
end
end
def outcast_select field, item, o={}
unless item[:select_options].nil?
opts = Utils.get_value(item[:select_options],self).dup
opts.map! do |opt|
if opt.is_a?(String)||opt.is_a?(Symbol)
opt = [opt.to_s.capitalize,opt]
end
if opt.is_a?(Array)
opt = {description: opt[0].to_s, value: opt[1].to_s}
end
opt[:selected] = true if item[:input_value]==opt[:value]
opt
end
item[:select_options] = opts
end
end
def outcast_attachment field, item, o={}
item[:url] = self.attachment(field).url
end
end
end
Delete document attachments before_delete
require 'populate_me/utils'
module PopulateMe
class MissingDocumentError < StandardError; end
module Document
def self.included base
base.extend ClassMethods
[:save,:create,:update,:delete].each do |cb|
base.before cb, :recurse_callback
base.after cb, :recurse_callback
end
base.before :create, :ensure_id
base.after :create, :ensure_not_new
base.before :delete, :ensure_delete_related
base.before :delete, :ensure_delete_attachments
base.after :delete, :ensure_new
end
module ClassMethods
attr_writer :fields, :documents
attr_accessor :callbacks, :label_field
def to_s
super.gsub(/[A-Z]/, ' \&')[1..-1].gsub('::','')
end
def to_s_short
self.name[/[^:]+$/].gsub(/[A-Z]/, ' \&')[1..-1]
end
def to_s_plural; "#{self.to_s}s"; end
def to_s_short_plural; "#{self.to_s_short}s"; end
def label sym
@label_field = sym.to_sym
end
def fields; @fields ||= {}; end
def field name, o={}
set_id_field if self.fields.empty?&&o[:type]!=:id
complete_field_options name, o
if o[:type]==:list
define_method(name) do
var = "@#{name}"
instance_variable_set(var, instance_variable_get(var)||[])
end
else
attr_accessor name
end
self.fields[name] = o
end
def complete_field_options name, o={}
o[:field_name] = name
Utils.ensure_key o, :type, :string
Utils.ensure_key o, :form_field, ![:id,:position].include?(o[:type])
o[:wrap] = false unless o[:form_field]
Utils.ensure_key o, :wrap, ![:hidden,:list].include?(o[:type])
Utils.ensure_key o, :label, Utils.label_for_field(name)
if o[:type]==:attachment
o[:class_name] = Utils.guess_related_class_name(PopulateMe,o[:class_name]||:attachment)
end
if o[:type]==:list
o[:class_name] = Utils.guess_related_class_name(self.name,o[:class_name]||name)
o[:dasherized_class_name] = Utils.dasherize_class_name o[:class_name]
else
Utils.ensure_key o, :input_attributes, {}
o[:input_attributes][:type] = :hidden if o[:type]==:hidden
unless o[:type]==:text
Utils.ensure_key o[:input_attributes], :type, :text
end
end
end
def set_id_field
field :id, {type: :id}
end
def position_field o={}
name = o[:name]||'position'
o[:type] = :position
field name, o
sort_by name
end
def label_field
@label_field || self.fields.keys[1]
end
def relationships; @relationships ||= {}; end
def relationship name, o={}
o[:class_name] = Utils.guess_related_class_name(self.name,o[:class_name]||name)
Utils.ensure_key o, :label, name.to_s.capitalize
Utils.ensure_key o, :foreign_key, "#{Utils.dasherize_class_name(self.name).gsub('-','_')}_id"
o[:foreign_key] = o[:foreign_key].to_sym
Utils.ensure_key o, :dependent, true
self.relationships[name] = o
end
def documents; @documents ||= []; end
def from_hash hash, o={}
self.new(_is_new: false).set_from_hash hash, o
end
def sort_by f, direction=:asc
raise(ArgumentError) unless [:asc,:desc].include? direction
raise(ArgumentError) unless self.new.respond_to? f
@sort_proc = Proc.new do |a,b|
a,b = b,a if direction==:desc
a.__send__(f)<=>b.__send__(f)
end
self
end
def id_string_key
(self.fields.keys[0]||'id').to_s
end
def set_indexes f, ids=[]
ids.each_with_index do |id,i|
self.documents.each do |d|
d[f.to_s] = i if d[self.id_string_key]==id
end
end
self
end
def admin_get id
hash = self.documents.find{|doc| doc[self.id_string_key]==id }
return nil if hash.nil?
from_hash hash
end
def admin_find o={}
o[:query] ||= {}
docs = self.documents.map do |d|
self.from_hash(d)
end.find_all do |d|
o[:query].inject(true) do |out,(k,v)|
out && (d.__send__(k)==v)
end
end
docs.sort!(&@sort_proc) if @sort_proc.is_a?(Proc)
docs
end
# Callbacks
def register_callback name, item=nil, options={}, &block
name = name.to_sym
if block_given?
options = item || {}
item = block
end
@callbacks ||= {}
@callbacks[name] ||= []
if options[:prepend]
@callbacks[name].unshift item
else
@callbacks[name] << item
end
end
def before name, item=nil, options={}, &block
register_callback "before_#{name}", item, options, &block
end
def after name, item=nil, options={}, &block
register_callback "after_#{name}", item, options, &block
end
end
attr_accessor :id, :_errors, :_is_new
def errors; self._errors; end
def new?; self._is_new; end
def persistent_instance_variables
instance_variables.select do |k|
if self.class.fields.empty?
k !~ /^@_/
else
self.class.fields.key? k[1..-1].to_sym
end
end
end
def to_h
persistent_instance_variables.inject({'_class'=>self.class.name}) do |h,var|
k = var.to_s[1..-1]
v = instance_variable_get var
if v.is_a? Array
h[k] = v.map(&:to_h)
else
h[k] = v
end
h
end
end
alias_method :to_hash, :to_h
def nested_docs
persistent_instance_variables.map do |var|
instance_variable_get var
end.find_all do |val|
val.is_a? Array
end.flatten
end
def == other
return false unless other.respond_to?(:to_h)
other.to_h==to_h
end
def inspect
"#<#{self.class.name}:#{to_h.inspect}>"
end
def to_s
return inspect if self.class.label_field.nil?
me = __send__(self.class.label_field)
Utils.blank?(me) ? inspect : me
end
def initialize attributes=nil
self._is_new = true
set attributes if attributes
self._errors = {}
end
def set attributes
attributes.dup.each do |k,v|
__send__ "#{k}=", v
end
self
end
def set_defaults o={}
self.class.fields.each do |k,v|
if v.key?(:default)&&(__send__(k).nil?||o[:force])
set k.to_sym => Utils.get_value(v[:default],self)
end
end
self
end
def set_from_hash hash, o={}
raise(TypeError, "#{hash} is not a Hash") unless hash.is_a? Hash
hash = hash.dup # Leave original untouched
hash.delete('_class')
hash.each do |k,v|
if v.is_a? Array
__send__(k.to_sym).clear
v.each do |d|
obj = Utils.resolve_class_name(d['_class']).new.set_from_hash(d)
__send__(k.to_sym) << obj
end
else
v = typecast(k.to_sym,v) if o[:typecast]
set k.to_sym => v
end
end
self
end
def attachment f
attacher = Utils.resolve_class_name self.class.fields[f][:class_name]
attacher.new self, f
end
# Typecasting
def typecast k, v
return Utils.automatic_typecast(v) unless self.class.fields.key?(k)
meth = "typecast_#{self.class.fields[k][:type]}".to_sym
return Utils.automatic_typecast(v) unless respond_to?(meth)
__send__ meth, k, v
end
def typecast_integer k, v
v.to_i
end
def typecast_price k, v
return nil if Utils.blank?(v)
Utils.parse_price(v)
end
def typecast_date k, v
if v[/\d\d(\/|-)\d\d(\/|-)\d\d\d\d/]
Date.parse v
else
nil
end
end
def typecast_datetime k, v
if v[/\d\d(\/|-)\d\d(\/|-)\d\d\d\d \d\d?:\d\d?:\d\d?/]
d,m,y,h,min,s = v.split(/[-:\s\/]/)
Time.utc(y,m,d,h,min,s)
else
nil
end
end
def typecast_attachment k, v
attached = self.attachment k
if Utils.blank? v
attached.delete
return nil
elsif v.is_a?(Hash)&&v.key?(:tempfile)
return attached.create v
end
end
# Callbacks
def exec_callback name
name = name.to_sym
return self if self.class.callbacks[name].nil?
self.class.callbacks[name].each do |job|
if job.respond_to?(:call)
self.instance_exec name, &job
else
meth = self.method(job)
meth.arity==1 ? meth.call(name) : meth.call
end
end
self
end
def recurse_callback name
nested_docs.each do |d|
d.exec_callback name
end
end
def ensure_id # before_create
if self.id.nil?
self.id = Utils::generate_random_id
end
self
end
def ensure_new; self._is_new = true; end # after_delete
def ensure_not_new; self._is_new = false; end # after_create
def ensure_delete_related # before_delete
self.class.relationships.each do |k,v|
if v[:dependent]
klass = Utils.resolve_class_name v[:class_name]
next if klass.nil?
klass.admin_find(query: {v[:foreign_key]=>self.id}).each do |d|
d.delete
end
end
end
end
def ensure_delete_attachments # before_delete
self.class.fields.each do |k,v|
if v[:type]==:attachment
self.attachment(k).delete
end
end
end
# Validation
def error_on k,v
self._errors[k] = (self._errors[k]||[]) << v
self
end
def valid?
self._errors = {}
exec_callback :before_validate
validate
exec_callback :after_validate
nested_docs.reduce self._errors.empty? do |result,d|
result &= d.valid?
end
end
def validate; end
def error_report
report = self._errors.dup || {}
persistent_instance_variables.each do |var|
value = instance_variable_get var
if value.is_a? Array
k = var[1..-1].to_sym
report[k] = []
value.each do |d|
report[k] << d.error_report
end
end
end
report
end
# Saving
def save
return unless valid?
exec_callback :before_save
if new?
exec_callback :before_create
id = perform_create
exec_callback :after_create
else
exec_callback :before_update
id = perform_update
exec_callback :after_update
end
exec_callback :after_save
id
end
def perform_create
self.class.documents << self.to_h
self.id
end
def perform_update
index = self.class.documents.index{|d| d['id']==self.id }
raise MissingDocumentError, "No document can be found with this ID: #{self.id}" if self.id.nil?||index.nil?
self.class.documents[index] = self.to_h
end
# Deletion
def delete o={}
exec_callback :before_delete
perform_delete
exec_callback :after_delete
end
def perform_delete
index = self.class.documents.index{|d| d['id']==self.id }
raise MissingDocumentError, "No document can be found with this ID: #{self.id}" if self.id.nil?||index.nil?
self.class.documents.delete_at(index)
end
# Related to the Admin interface #############
def to_admin_url
"#{Utils.dasherize_class_name(self.class.name)}/#{id}".sub(/\/$/,'')
end
# Admin list
module ClassMethods
def sort_field_for o={}
filter = o[:params][:filter]
return nil if !filter.nil?&&filter.size>1
expected_scope = filter.nil? ? nil : filter.keys[0].to_sym
f = self.fields.find do |k,v|
v[:type]==:position&&v[:scope]==expected_scope
end
f.nil? ? nil : f[0]
end
def to_admin_list o={}
o[:params] ||= {}
unless o[:params][:filter].nil?
query = o[:params][:filter].inject({}) do |query, (k,v)|
query[k.to_sym] = self.new.typecast(k,v)
query
end
new_data = Rack::Utils.build_nested_query(data: o[:params][:filter])
end
{
template: 'template_list',
page_title: self.to_s_short_plural,
dasherized_class_name: PopulateMe::Utils.dasherize_class_name(self.name),
new_data: new_data,
sort_field: self.sort_field_for(o),
# 'command_plus'=> !self.populate_config[:no_plus],
# 'command_search'=> !self.populate_config[:no_search],
items: self.admin_find(query: query).map do |d|
d.to_admin_list_item(o)
end
}
end
end
def to_admin_list_item o={}
{
class_name: self.class.name,
id: self.id,
admin_url: to_admin_url,
title: to_s,
local_menu: self.class.relationships.inject([]) do |out,(k,v)|
unless v[:hidden]
out << {
title: "#{v[:label]}",
href: "#{o[:request].script_name}/list/#{Utils.dasherize_class_name(v[:class_name])}?filter[#{v[:foreign_key]}]=#{self.id}"
}
out
end
end
}
end
# Forms
def to_admin_form o={}
o[:input_name_prefix] ||= 'data'
class_item = {
type: :hidden,
input_name: "#{o[:input_name_prefix]}[_class]",
input_value: self.class.name,
}
self.class.complete_field_options :_class, class_item
items = self.class.fields.inject([class_item]) do |out,(k,item)|
item = item.dup
if item[:form_field]
outcast k, item, o
out << item
end
out
end
{
template: "template#{'_nested' if o[:nested]}_form",
page_title: self.new? ? "New #{self.class.to_s_short}" : self.to_s,
admin_url: self.to_admin_url,
is_new: self.new?,
fields: items
}
end
def outcast field, item, o={}
item[:input_name] = "#{o[:input_name_prefix]}[#{item[:field_name]}]"
unless item[:type]==:list
Utils.ensure_key item, :input_value, self.__send__(field)
end
meth = "outcast_#{item[:type]}".to_sym
__send__(meth, field, item, o) if respond_to?(meth)
end
def outcast_list field, item, o={}
item[:items] = self.__send__(field).map do |nested|
nested.to_admin_form(o.merge(input_name_prefix: item[:input_name]+'[]'))
end
end
def outcast_select field, item, o={}
unless item[:select_options].nil?
opts = Utils.get_value(item[:select_options],self).dup
opts.map! do |opt|
if opt.is_a?(String)||opt.is_a?(Symbol)
opt = [opt.to_s.capitalize,opt]
end
if opt.is_a?(Array)
opt = {description: opt[0].to_s, value: opt[1].to_s}
end
opt[:selected] = true if item[:input_value]==opt[:value]
opt
end
item[:select_options] = opts
end
end
def outcast_attachment field, item, o={}
item[:url] = self.attachment(field).url
end
end
end
|
module PrettyBacon
VERSION = "0.0.1"
end
[Release] 0.0.2
module PrettyBacon
VERSION = "0.0.2"
end
|
require 'set'
module ActionController #:nodoc:
module Caching
# Action caching is similar to page caching by the fact that the entire
# output of the response is cached, but unlike page caching, every
# request still goes through the Action Pack. The key benefit
# of this is that filters are run before the cache is served, which
# allows for authentication and other restrictions on whether someone
# is allowed to see the cache. Example:
#
# class ListsController < ApplicationController
# before_filter :authenticate, :except => :public
# caches_page :public
# caches_action :index, :show, :feed
# end
#
# In this example, the public action doesn't require authentication,
# so it's possible to use the faster page caching method. But both
# the show and feed action are to be shielded behind the authenticate
# filter, so we need to implement those as action caches.
#
# Action caching internally uses the fragment caching and an around
# filter to do the job. The fragment cache is named according to both
# the current host and the path. So a page that is accessed at
# <tt>http://david.example.com/lists/show/1</tt> will result in a fragment named
# <tt>david.example.com/lists/show/1</tt>. This allows the cacher to
# differentiate between <tt>david.example.com/lists/</tt> and
# <tt>jamis.example.com/lists/</tt> -- which is a helpful way of assisting
# the subdomain-as-account-key pattern.
#
# Different representations of the same resource, e.g.
# <tt>http://david.example.com/lists</tt> and
# <tt>http://david.example.com/lists.xml</tt>
# are treated like separate requests and so are cached separately.
# Keep in mind when expiring an action cache that
# <tt>:action => 'lists'</tt> is not the same as
# <tt>:action => 'list', :format => :xml</tt>.
#
# You can set modify the default action cache path by passing a
# :cache_path option. This will be passed directly to
# ActionCachePath.path_for. This is handy for actions with multiple
# possible routes that should be cached differently. If a block is
# given, it is called with the current controller instance.
#
# And you can also use :if (or :unless) to pass a Proc that
# specifies when the action should be cached.
#
# Finally, if you are using memcached, you can also pass :expires_in.
#
# class ListsController < ApplicationController
# before_filter :authenticate, :except => :public
# caches_page :public
# caches_action :index, :if => proc do |c|
# !c.request.format.json? # cache if is not a JSON request
# end
#
# caches_action :show, :cache_path => { :project => 1 },
# :expires_in => 1.hour
#
# caches_action :feed, :cache_path => proc do |controller|
# if controller.params[:user_id]
# controller.send(:user_list_url,
# controller.params[:user_id], controller.params[:id])
# else
# controller.send(:list_url, controller.params[:id])
# end
# end
# end
#
# If you pass :layout => false, it will only cache your action
# content. It is useful when your layout has dynamic information.
#
# Note: If action caching is being performed for different MIME types
# and those MIME types are being determined by HTTP_ACCEPT header atttribute
# and noth using params[:format] then both the cached data and the content-type
# of the response could be wrong. The safest way to use action caching is to
# pass non-html attribute as params[:format] .
module Actions
extend ActiveSupport::Concern
module ClassMethods
# Declares that +actions+ should be cached.
# See ActionController::Caching::Actions for details.
def caches_action(*actions)
return unless cache_configured?
options = actions.extract_options!
options[:layout] = true unless options.key?(:layout)
filter_options = options.extract!(:if, :unless).merge(:only => actions)
cache_options = options.extract!(:layout, :cache_path).merge(:store_options => options)
around_filter ActionCacheFilter.new(cache_options), filter_options
end
end
def _save_fragment(name, options)
return unless caching_allowed?
content = response_body
content = content.join if content.is_a?(Array)
write_fragment(name, content, options)
end
protected
def expire_action(options = {})
return unless cache_configured?
actions = options[:action]
if actions.is_a?(Array)
actions.each {|action| expire_action(options.merge(:action => action)) }
else
expire_fragment(ActionCachePath.new(self, options, false).path)
end
end
class ActionCacheFilter #:nodoc:
def initialize(options, &block)
@cache_path, @store_options, @cache_layout =
options.values_at(:cache_path, :store_options, :layout)
end
def filter(controller)
path_options = if @cache_path.respond_to?(:call)
controller.instance_exec(controller, &@cache_path)
else
@cache_path
end
cache_path = ActionCachePath.new(controller, path_options || {})
body = controller.read_fragment(cache_path.path, @store_options)
unless body
controller.action_has_layout = false unless @cache_layout
yield
controller.action_has_layout = true
body = controller._save_fragment(cache_path.path, @store_options)
end
body = controller.render_to_string(:text => body, :layout => true) unless @cache_layout
controller.response_body = body
controller.content_type = Mime[cache_path.extension || :html]
end
end
class ActionCachePath
attr_reader :path, :extension
# If +infer_extension+ is true, the cache path extension is looked up from the request's
# path and format. This is desirable when reading and writing the cache, but not when
# expiring the cache - expire_action should expire the same files regardless of the
# request format.
def initialize(controller, options = {}, infer_extension = true)
if infer_extension
@extension = controller.params[:format]
options.reverse_merge!(:format => @extension) if options.is_a?(Hash)
end
path = controller.url_for(options).split(%r{://}).last
@path = normalize!(path)
end
private
def normalize!(path)
path << 'index' if path[-1] == ?/
path << ".#{extension}" if extension and !path.ends_with?(extension)
URI.parser.unescape(path)
end
end
end
end
end
makes a pass to the action caching rdoc
require 'set'
module ActionController #:nodoc:
module Caching
# Action caching is similar to page caching by the fact that the entire
# output of the response is cached, but unlike page caching, every
# request still goes through Action Pack. The key benefit of this is
# that filters run before the cache is served, which allows for
# authentication and other restrictions on whether someone is allowed
# to execute such action. Example:
#
# class ListsController < ApplicationController
# before_filter :authenticate, :except => :public
#
# caches_page :public
# caches_action :index, :show
# end
#
# In this example, the +public+ action doesn't require authentication
# so it's possible to use the faster page caching. On the other hand
# +index+ and +show+ require authentication. They can still be cached,
# but we need action caching for them.
#
# Action caching uses fragment caching internally and an around
# filter to do the job. The fragment cache is named according to
# the host and path of the request. A page that is accessed at
# <tt>http://david.example.com/lists/show/1</tt> will result in a fragment named
# <tt>david.example.com/lists/show/1</tt>. This allows the cacher to
# differentiate between <tt>david.example.com/lists/</tt> and
# <tt>jamis.example.com/lists/</tt> -- which is a helpful way of assisting
# the subdomain-as-account-key pattern.
#
# Different representations of the same resource, e.g.
# <tt>http://david.example.com/lists</tt> and
# <tt>http://david.example.com/lists.xml</tt>
# are treated like separate requests and so are cached separately.
# Keep in mind when expiring an action cache that
# <tt>:action => 'lists'</tt> is not the same as
# <tt>:action => 'list', :format => :xml</tt>.
#
# You can set modify the default action cache path by passing a
# <tt>:cache_path</tt> option. This will be passed directly to
# <tt>ActionCachePath.path_for</tt>. This is handy for actions with
# multiple possible routes that should be cached differently. If a
# block is given, it is called with the current controller instance.
#
# And you can also use <tt>:if</tt> (or <tt>:unless</tt>) to pass a
# proc that specifies when the action should be cached.
#
# Finally, if you are using memcached, you can also pass <tt>:expires_in</tt>.
#
# The following example depicts some of the points made above:
#
# class ListsController < ApplicationController
# before_filter :authenticate, :except => :public
#
# caches_page :public
#
# caches_action :index, :if => proc do |c|
# !c.request.format.json? # cache if is not a JSON request
# end
#
# caches_action :show, :cache_path => { :project => 1 },
# :expires_in => 1.hour
#
# caches_action :feed, :cache_path => proc do |c|
# if c.params[:user_id]
# c.send(:user_list_url,
# c.params[:user_id], c.params[:id])
# else
# c.send(:list_url, c.params[:id])
# end
# end
# end
#
# If you pass <tt>:layout => false</tt>, it will only cache your action
# content. That's useful when your layout has dynamic information.
#
# Warning: If the format of the request is determined by the Accept HTTP
# header the Content-Type of the cached response could be wrong because
# no information about the MIME type is stored in the cache key. So, if
# you first ask for MIME type M in the Accept header, a cache entry is
# created, and then perform a second resquest to the same resource asking
# for a different MIME type, you'd get the content cached for M.
#
# The <tt>:format</tt> parameter is taken into account though. The safest
# way to cache by MIME type is to pass the format in the route.
module Actions
extend ActiveSupport::Concern
module ClassMethods
# Declares that +actions+ should be cached.
# See ActionController::Caching::Actions for details.
def caches_action(*actions)
return unless cache_configured?
options = actions.extract_options!
options[:layout] = true unless options.key?(:layout)
filter_options = options.extract!(:if, :unless).merge(:only => actions)
cache_options = options.extract!(:layout, :cache_path).merge(:store_options => options)
around_filter ActionCacheFilter.new(cache_options), filter_options
end
end
def _save_fragment(name, options)
return unless caching_allowed?
content = response_body
content = content.join if content.is_a?(Array)
write_fragment(name, content, options)
end
protected
def expire_action(options = {})
return unless cache_configured?
actions = options[:action]
if actions.is_a?(Array)
actions.each {|action| expire_action(options.merge(:action => action)) }
else
expire_fragment(ActionCachePath.new(self, options, false).path)
end
end
class ActionCacheFilter #:nodoc:
def initialize(options, &block)
@cache_path, @store_options, @cache_layout =
options.values_at(:cache_path, :store_options, :layout)
end
def filter(controller)
path_options = if @cache_path.respond_to?(:call)
controller.instance_exec(controller, &@cache_path)
else
@cache_path
end
cache_path = ActionCachePath.new(controller, path_options || {})
body = controller.read_fragment(cache_path.path, @store_options)
unless body
controller.action_has_layout = false unless @cache_layout
yield
controller.action_has_layout = true
body = controller._save_fragment(cache_path.path, @store_options)
end
body = controller.render_to_string(:text => body, :layout => true) unless @cache_layout
controller.response_body = body
controller.content_type = Mime[cache_path.extension || :html]
end
end
class ActionCachePath
attr_reader :path, :extension
# If +infer_extension+ is true, the cache path extension is looked up from the request's
# path and format. This is desirable when reading and writing the cache, but not when
# expiring the cache - expire_action should expire the same files regardless of the
# request format.
def initialize(controller, options = {}, infer_extension = true)
if infer_extension
@extension = controller.params[:format]
options.reverse_merge!(:format => @extension) if options.is_a?(Hash)
end
path = controller.url_for(options).split(%r{://}).last
@path = normalize!(path)
end
private
def normalize!(path)
path << 'index' if path[-1] == ?/
path << ".#{extension}" if extension and !path.ends_with?(extension)
URI.parser.unescape(path)
end
end
end
end
end
|
def blacklisted? name
case name.downcase
when /^vim?$/, 'screen', /^rubygems?$/ then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/bin.
EOS
when 'libarchive', 'libpcap' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
EOS
when 'libiconv' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
Some build scripts fail to detect it correctly, please check existing
formulae for solutions.
EOS
when 'libxml', 'libxlst' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
However not all build scripts look for these hard enough, so you may need
to call ENV.libxml2 in your formula's install function.
EOS
when 'freetype', 'libpng' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/X11/lib.
However not all build scripts look here, so you may need to call ENV.x11
in your formula's install function.
EOS
when 'wxwidgets' then <<-EOS.undent
An old version of wxWidgets can be found in /usr/X11/lib. However, Homebrew
does provide a newer version, 2.8.10:
brew install wxmac
EOS
when 'tex', 'tex-live', 'texlive' then <<-EOS.undent
Installing TeX from source is weird and gross, requires a lot of patches,
and only builds 32-bit (and thus can't use Homebrew deps on Snow Leopard.)
We recommend using a MacTeX distribution: http://www.tug.org/mactex/
EOS
when 'pip' then <<-EOS.undent
Install pip with easy_install:
easy_install pip
EOS
when 'macruby' then <<-EOS.undent
MacRuby works better when you install their package:
http://www.macruby.org/downloads.html
Although if you prefer, there is a formula in homebrew-alt.
EOS
when 'npm' then <<-EOS.undent
Homebrew does not provide npm because it is self-updating. To install it, first
`brew install nodejs' and then:
curl http://npmjs.org/install.sh | sh
EOS
when /(lib)?lzma/
"lzma is now part of the xz formula."
when 'xcode' then <<-EOS.undent
Xcode can be installed via the App Store (on Lion or newer), or from:
http://connect.apple.com/
EOS
end
end
corrected the macruby download link
Signed-off-by: Adam Vandenberg <34c2b6407fd5a10249a15d699d40f9ed1782e98c@gmail.com>
def blacklisted? name
case name.downcase
when /^vim?$/, 'screen', /^rubygems?$/ then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/bin.
EOS
when 'libarchive', 'libpcap' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
EOS
when 'libiconv' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
Some build scripts fail to detect it correctly, please check existing
formulae for solutions.
EOS
when 'libxml', 'libxlst' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/lib.
However not all build scripts look for these hard enough, so you may need
to call ENV.libxml2 in your formula's install function.
EOS
when 'freetype', 'libpng' then <<-EOS.undent
Apple distributes #{name} with OS X, you can find it in /usr/X11/lib.
However not all build scripts look here, so you may need to call ENV.x11
in your formula's install function.
EOS
when 'wxwidgets' then <<-EOS.undent
An old version of wxWidgets can be found in /usr/X11/lib. However, Homebrew
does provide a newer version, 2.8.10:
brew install wxmac
EOS
when 'tex', 'tex-live', 'texlive' then <<-EOS.undent
Installing TeX from source is weird and gross, requires a lot of patches,
and only builds 32-bit (and thus can't use Homebrew deps on Snow Leopard.)
We recommend using a MacTeX distribution: http://www.tug.org/mactex/
EOS
when 'pip' then <<-EOS.undent
Install pip with easy_install:
easy_install pip
EOS
when 'macruby' then <<-EOS.undent
MacRuby works better when you install their package:
http://www.macruby.org/
Although if you prefer, there is a formula in homebrew-alt.
EOS
when 'npm' then <<-EOS.undent
Homebrew does not provide npm because it is self-updating. To install it, first
`brew install nodejs' and then:
curl http://npmjs.org/install.sh | sh
EOS
when /(lib)?lzma/
"lzma is now part of the xz formula."
when 'xcode' then <<-EOS.undent
Xcode can be installed via the App Store (on Lion or newer), or from:
http://connect.apple.com/
EOS
end
end
|
require 'formula'
require 'utils'
require 'superenv'
module Homebrew extend self
def audit
formula_count = 0
problem_count = 0
ENV.setup_build_environment
ff = if ARGV.named.empty?
Formula
else
ARGV.formulae
end
ff.each do |f|
fa = FormulaAuditor.new f
fa.audit
unless fa.problems.empty?
puts "#{f.name}:"
fa.problems.each { |p| puts " * #{p}" }
puts
formula_count += 1
problem_count += fa.problems.size
end
end
unless problem_count.zero?
ofail "#{problem_count} problems in #{formula_count} formulae"
end
end
end
class Module
def redefine_const(name, value)
__send__(:remove_const, name) if const_defined?(name)
const_set(name, value)
end
end
# Formula extensions for auditing
class Formula
def head_only?
@head and @stable.nil?
end
def text
@text ||= FormulaText.new(@path)
end
end
class FormulaText
def initialize path
@text = path.open('r') { |f| f.read }
end
def without_patch
@text.split("__END__")[0].strip()
end
def has_DATA?
/\bDATA\b/ =~ @text
end
def has_END?
/^__END__$/ =~ @text
end
def has_trailing_newline?
/\Z\n/ =~ @text
end
end
class FormulaAuditor
attr_reader :f, :text, :problems
BUILD_TIME_DEPS = %W[
autoconf
automake
boost-build
bsdmake
cmake
imake
intltool
libtool
pkg-config
scons
smake
swig
]
def initialize f
@f = f
@problems = []
@text = f.text.without_patch
@specs = %w{stable devel head}.map { |s| f.send(s) }.compact
# We need to do this in case the formula defines a patch that uses DATA.
f.class.redefine_const :DATA, ""
end
def audit_file
unless f.path.stat.mode.to_s(8) == "100644"
problem "Incorrect file permissions: chmod 644 #{f.path}"
end
if f.text.has_DATA? and not f.text.has_END?
problem "'DATA' was found, but no '__END__'"
end
if f.text.has_END? and not f.text.has_DATA?
problem "'__END__' was found, but 'DATA' is not used"
end
unless f.text.has_trailing_newline?
problem "File should end with a newline"
end
end
def audit_deps
# Don't depend_on aliases; use full name
@@aliases ||= Formula.aliases
f.deps.select { |d| @@aliases.include? d.name }.each do |d|
problem "Dependency #{d} is an alias; use the canonical name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenever possible.
f.deps.each do |dep|
begin
dep_f = dep.to_formula
rescue FormulaUnavailableError
problem "Can't find dependency #{dep.name.inspect}."
next
end
dep.options.reject do |opt|
dep_f.build.has_option?(opt.name)
end.each do |opt|
problem "Dependency #{dep} does not define option #{opt.name.inspect}"
end
case dep.name
when *BUILD_TIME_DEPS
# TODO: this should really be only dep.build? but maybe some formula
# depends on the current behavior to be audit-clean?
next if dep.tags.any?
next if f.name =~ /automake/ && dep.name == 'autoconf'
# This is actually a libltdl dep that gets converted to a non-build time
# libtool dep, but I don't of a good way to encode this in the dep object
next if f.name == 'imagemagick' && dep.name == 'libtool'
problem %{#{dep} dependency should be "depends_on '#{dep}' => :build"}
when "git", "ruby", "emacs", "mercurial"
problem <<-EOS.undent
Don't use #{dep} as a dependency. We allow non-Homebrew
#{dep} installations.
EOS
when 'python', 'python2', 'python3'
problem <<-EOS.undent
Don't use #{dep} as a dependency (string).
We have special `depends_on :python` (or :python2 or :python3 )
that works with brewed and system Python and allows us to support
bindings for 2.x and 3.x in parallel and much more.
EOS
when 'gfortran'
problem "Use `depends_on :fortran` instead of `depends_on 'gfortran'`"
when 'open-mpi', 'mpich2'
problem <<-EOS.undent
There are multiple conflicting ways to install MPI. Use an MPIDependency:
depends_on :mpi => [<lang list>]
Where <lang list> is a comma delimited list that can include:
:cc, :cxx, :f77, :f90
EOS
end
end
end
def audit_conflicts
f.conflicts.each do |c|
begin
Formula.factory(c.name)
rescue FormulaUnavailableError
problem "Can't find conflicting formula #{c.name.inspect}."
end
end
end
def audit_urls
unless f.homepage =~ %r[^https?://]
problem "The homepage should start with http or https (url is #{f.homepage})."
end
# Check for http:// GitHub homepage urls, https:// is preferred.
# Note: only check homepages that are repo pages, not *.github.com hosts
if f.homepage =~ %r[^http://github\.com/]
problem "Use https:// URLs for homepages on GitHub (url is #{f.homepage})."
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problem "Google Code homepage should end with a slash (url is #{f.homepage})."
end
if f.homepage =~ %r[^http://.*\.github\.com/]
problem "GitHub pages should use the github.io domain (url is #{f.homepage})"
end
urls = @specs.map(&:url)
# Check GNU urls; doesn't apply to mirrors
urls.grep(%r[^(?:https?|ftp)://(?!alpha).+/gnu/]) do |u|
problem "\"ftpmirror.gnu.org\" is preferred for GNU software (url is #{u})."
end
# the rest of the checks apply to mirrors as well
urls.concat(@specs.map(&:mirrors).flatten)
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problem "Don't use #{$1}use_mirror in SourceForge urls (url is #{p})."
end
if p =~ /\/download$/
problem "Don't use /download in SourceForge urls (url is #{p})."
end
if p =~ %r[^https?://sourceforge\.]
problem "Use http://downloads.sourceforge.net to get geolocation (url is #{p})."
end
if p =~ %r[^https?://prdownloads\.]
problem "Don't use prdownloads in SourceForge urls (url is #{p}).\n" +
"\tSee: http://librelist.com/browser/homebrew/2011/1/12/prdownloads-is-bad/"
end
if p =~ %r[^http://\w+\.dl\.]
problem "Don't use specific dl mirrors in SourceForge urls (url is #{p})."
end
end
# Check for git:// GitHub repo urls, https:// is preferred.
urls.grep(%r[^git://[^/]*github\.com/]) do |u|
problem "Use https:// URLs for accessing GitHub repositories (url is #{u})."
end
# Check for http:// GitHub repo urls, https:// is preferred.
urls.grep(%r[^http://github\.com/.*\.git$]) do |u|
problem "Use https:// URLs for accessing GitHub repositories (url is #{u})."
end
# Use new-style archive downloads
urls.select { |u| u =~ %r[https://.*/(?:tar|zip)ball/] and not u =~ %r[\.git$] }.each do |u|
problem "Use /archive/ URLs for GitHub tarballs (url is #{u})."
end
if urls.any? { |u| u =~ /\.xz/ } && !f.deps.any? { |d| d.name == "xz" }
problem "Missing a build-time dependency on 'xz'"
end
end
def audit_specs
problem "Head-only (no stable download)" if f.head_only?
[:stable, :devel].each do |spec|
s = f.send(spec)
next if s.nil?
if s.version.to_s.empty?
problem "Invalid or missing #{spec} version"
else
version_text = s.version unless s.version.detected_from_url?
version_url = Version.detect(s.url, s.specs)
if version_url.to_s == version_text.to_s && s.version.instance_of?(Version)
problem "#{spec} version #{version_text} is redundant with version scanned from URL"
end
end
if s.version.to_s =~ /^v/
problem "#{spec} version #{s.version} should not have a leading 'v'"
end
cksum = s.checksum
next if cksum.nil?
case cksum.hash_type
when :md5
problem "md5 checksums are deprecated, please use sha1 or sha256"
next
when :sha1 then len = 40
when :sha256 then len = 64
end
if cksum.empty?
problem "#{cksum.hash_type} is empty"
else
problem "#{cksum.hash_type} should be #{len} characters" unless cksum.hexdigest.length == len
problem "#{cksum.hash_type} contains invalid characters" unless cksum.hexdigest =~ /^[a-fA-F0-9]+$/
problem "#{cksum.hash_type} should be lowercase" unless cksum.hexdigest == cksum.hexdigest.downcase
end
end
# Check for :using that is already detected from the url
@specs.each do |s|
next if s.using.nil?
url_strategy = DownloadStrategyDetector.detect(s.url)
using_strategy = DownloadStrategyDetector.detect('', s.using)
problem "redundant :using specification in url or head" if url_strategy == using_strategy
end
end
def audit_patches
Patches.new(f.patches).select(&:external?).each do |p|
case p.url
when %r[raw\.github\.com], %r[gist\.github\.com/raw]
unless p.url =~ /[a-fA-F0-9]{40}/
problem "GitHub/Gist patches should specify a revision:\n#{p.url}"
end
when %r[macports/trunk]
problem "MacPorts patches should specify a revision instead of trunk:\n#{p.url}"
end
end
end
def audit_text
if text =~ /<(Formula|AmazonWebServicesFormula|ScriptFileFormula|GithubGistFormula)/
problem "Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# system "cmake/)
problem "Commented cmake call found"
end
# Comments from default template
if (text =~ /# if this fails, try separate make\/make install steps/)
problem "Please remove default template comments"
end
if (text =~ /# PLEASE REMOVE/)
problem "Please remove default template comments"
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problem "Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problem "\"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!).*[ ,]"#\{([\w.]+)\}"/
problem "Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problem "Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share)[/'"])}
problem "\"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problem "\"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problem "\"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problem "\"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problem "\"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problem "\"#{$1}\" should be \"\#{#{$2}}\""
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problem "Commented-out dep #{$1}"
end
# No trailing whitespace, please
if text =~ /[\t ]+$/
problem "Trailing whitespace was found"
end
if text =~ /if\s+ARGV\.include\?\s+'--(HEAD|devel)'/
problem "Use \"if ARGV.build_#{$1.downcase}?\" instead"
end
if text =~ /make && make/
problem "Use separate make calls"
end
if text =~ /^[ ]*\t/
problem "Use spaces instead of tabs for indentation"
end
# xcodebuild should specify SYMROOT
if text =~ /system\s+['"]xcodebuild/ and not text =~ /SYMROOT=/
problem "xcodebuild should be passed an explicit \"SYMROOT\""
end
if text =~ /ENV\.x11/
problem "Use \"depends_on :x11\" instead of \"ENV.x11\""
end
# Avoid hard-coding compilers
if text =~ %r{(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?(gcc|llvm-gcc|clang)['" ]}
problem "Use \"\#{ENV.cc}\" instead of hard-coding \"#{$3}\""
end
if text =~ %r{(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?((g|llvm-g|clang)\+\+)['" ]}
problem "Use \"\#{ENV.cxx}\" instead of hard-coding \"#{$3}\""
end
if text =~ /system\s+['"](env|export)/
problem "Use ENV instead of invoking '#{$1}' to modify the environment"
end
if text =~ /version == ['"]HEAD['"]/
problem "Use 'build.head?' instead of inspecting 'version'"
end
if text =~ /build\.include\?\s+['"]\-\-(.*)['"]/
problem "Reference '#{$1}' without dashes"
end
if text =~ /build\.with\?\s+['"]-?-?with-(.*)['"]/
problem "No double 'with': Use `build.with? '#{$1}'` to check for \"--with-#{$1}\""
end
if text =~ /build\.without\?\s+['"]-?-?without-(.*)['"]/
problem "No double 'without': Use `build.without? '#{$1}'` to check for \"--without-#{$1}\""
end
if text =~ /ARGV\.(?!(debug\?|verbose\?|find[\(\s]))/
problem "Use build instead of ARGV to check options"
end
if text =~ /def options/
problem "Use new-style option definitions"
end
if text =~ /MACOS_VERSION/
problem "Use MacOS.version instead of MACOS_VERSION"
end
cats = %w{leopard snow_leopard lion mountain_lion}.join("|")
if text =~ /MacOS\.(?:#{cats})\?/
problem "\"#{$&}\" is deprecated, use a comparison to MacOS.version instead"
end
if text =~ /skip_clean\s+:all/
problem "`skip_clean :all` is deprecated; brew no longer strips symbols"
end
if text =~ /depends_on [A-Z][\w:]+\.new$/
problem "`depends_on` can take requirement classes instead of instances"
end
if text =~ /^def (\w+).*$/
problem "Define method #{$1.inspect} in the class body, not at the top-level"
end
if text =~ /ENV.fortran/
problem "Use `depends_on :fortran` instead of `ENV.fortran`"
end
end
def audit_python
if text =~ /(def\s*)?which_python/
problem "Replace `which_python` by `python.xy`, which returns e.g. 'python2.7'."
end
if text =~ /which\(?["']python/
problem "Don't locate python with `which 'python'`, use `python.binary` instead"
end
# Checks that apply only to code in def install
if text =~ /(\s*)def\s+install((.*\n)*?)(\1end)/
install_body = $2
if install_body =~ /system\(?\s*['"]python/
problem "Instead of `system 'python', ...`, call `system python, ...`."
end
if text =~ /system\(?\s*python\.binary/
problem "Instead of `system python.binary, ...`, call `system python, ...`."
end
end
# Checks that apply only to code in def caveats
if text =~ /(\s*)def\s+caveats((.*\n)*?)(\1end)/ || /(\s*)def\s+caveats;(.*?)end/
caveats_body = $2
if caveats_body =~ /[ \{=](python[23]?)\.(.*\w)/
# So if in the body of caveats there is a `python.whatever` called,
# check that there is a guard like `if python` or similiar:
python = $1
method = $2
unless caveats_body =~ /(if python[23]?)|(if build\.with\?\s?\(?['"]python)|(unless build.without\?\s?\(?['"]python)/
problem "Please guard `#{python}.#{method}` like so `#{python}.#{method} if #{python}`"
end
end
end
if f.requirements.any?{ |r| r.kind_of?(PythonInstalled) }
# Don't check this for all formulae, because some are allowed to set the
# PYTHONPATH. E.g. python.rb itself needs to set it.
if text =~ /ENV\.append.*PYTHONPATH/ || text =~ /ENV\[['"]PYTHONPATH['"]\]\s*=[^=]/
problem "Don't set the PYTHONPATH, instead declare `depends_on :python`."
end
else
# So if there is no PythonInstalled requirement, we can check if the
# formula still uses python and should add a `depends_on :python`
unless f.name.to_s =~ /(pypy[0-9]*)|(python[0-9]*)/
if text =~ /system.["' ]?python([0-9"'])?/
problem "If the formula uses Python, it should declare so by `depends_on :python#{$1}`"
end
if text =~ /setup\.py/
problem <<-EOS.undent
If the formula installs Python bindings you should declare `depends_on :python[3]`"
EOS
end
end
end
# Todo:
# The python do ... end block is possibly executed twice. Once for
# python 2.x and once for 3.x. So if a `system 'make'` is called, a
# `system 'make clean'` should also be called at the end of the block.
end
def audit
audit_file
audit_specs
audit_urls
audit_deps
audit_conflicts
audit_patches
audit_text
audit_python
end
private
def problem p
@problems << p
end
end
audit more template comments
require 'formula'
require 'utils'
require 'superenv'
module Homebrew extend self
def audit
formula_count = 0
problem_count = 0
ENV.setup_build_environment
ff = if ARGV.named.empty?
Formula
else
ARGV.formulae
end
ff.each do |f|
fa = FormulaAuditor.new f
fa.audit
unless fa.problems.empty?
puts "#{f.name}:"
fa.problems.each { |p| puts " * #{p}" }
puts
formula_count += 1
problem_count += fa.problems.size
end
end
unless problem_count.zero?
ofail "#{problem_count} problems in #{formula_count} formulae"
end
end
end
class Module
def redefine_const(name, value)
__send__(:remove_const, name) if const_defined?(name)
const_set(name, value)
end
end
# Formula extensions for auditing
class Formula
def head_only?
@head and @stable.nil?
end
def text
@text ||= FormulaText.new(@path)
end
end
class FormulaText
def initialize path
@text = path.open('r') { |f| f.read }
end
def without_patch
@text.split("__END__")[0].strip()
end
def has_DATA?
/\bDATA\b/ =~ @text
end
def has_END?
/^__END__$/ =~ @text
end
def has_trailing_newline?
/\Z\n/ =~ @text
end
end
class FormulaAuditor
attr_reader :f, :text, :problems
BUILD_TIME_DEPS = %W[
autoconf
automake
boost-build
bsdmake
cmake
imake
intltool
libtool
pkg-config
scons
smake
swig
]
def initialize f
@f = f
@problems = []
@text = f.text.without_patch
@specs = %w{stable devel head}.map { |s| f.send(s) }.compact
# We need to do this in case the formula defines a patch that uses DATA.
f.class.redefine_const :DATA, ""
end
def audit_file
unless f.path.stat.mode.to_s(8) == "100644"
problem "Incorrect file permissions: chmod 644 #{f.path}"
end
if f.text.has_DATA? and not f.text.has_END?
problem "'DATA' was found, but no '__END__'"
end
if f.text.has_END? and not f.text.has_DATA?
problem "'__END__' was found, but 'DATA' is not used"
end
unless f.text.has_trailing_newline?
problem "File should end with a newline"
end
end
def audit_deps
# Don't depend_on aliases; use full name
@@aliases ||= Formula.aliases
f.deps.select { |d| @@aliases.include? d.name }.each do |d|
problem "Dependency #{d} is an alias; use the canonical name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenever possible.
f.deps.each do |dep|
begin
dep_f = dep.to_formula
rescue FormulaUnavailableError
problem "Can't find dependency #{dep.name.inspect}."
next
end
dep.options.reject do |opt|
dep_f.build.has_option?(opt.name)
end.each do |opt|
problem "Dependency #{dep} does not define option #{opt.name.inspect}"
end
case dep.name
when *BUILD_TIME_DEPS
# TODO: this should really be only dep.build? but maybe some formula
# depends on the current behavior to be audit-clean?
next if dep.tags.any?
next if f.name =~ /automake/ && dep.name == 'autoconf'
# This is actually a libltdl dep that gets converted to a non-build time
# libtool dep, but I don't of a good way to encode this in the dep object
next if f.name == 'imagemagick' && dep.name == 'libtool'
problem %{#{dep} dependency should be "depends_on '#{dep}' => :build"}
when "git", "ruby", "emacs", "mercurial"
problem <<-EOS.undent
Don't use #{dep} as a dependency. We allow non-Homebrew
#{dep} installations.
EOS
when 'python', 'python2', 'python3'
problem <<-EOS.undent
Don't use #{dep} as a dependency (string).
We have special `depends_on :python` (or :python2 or :python3 )
that works with brewed and system Python and allows us to support
bindings for 2.x and 3.x in parallel and much more.
EOS
when 'gfortran'
problem "Use `depends_on :fortran` instead of `depends_on 'gfortran'`"
when 'open-mpi', 'mpich2'
problem <<-EOS.undent
There are multiple conflicting ways to install MPI. Use an MPIDependency:
depends_on :mpi => [<lang list>]
Where <lang list> is a comma delimited list that can include:
:cc, :cxx, :f77, :f90
EOS
end
end
end
def audit_conflicts
f.conflicts.each do |c|
begin
Formula.factory(c.name)
rescue FormulaUnavailableError
problem "Can't find conflicting formula #{c.name.inspect}."
end
end
end
def audit_urls
unless f.homepage =~ %r[^https?://]
problem "The homepage should start with http or https (url is #{f.homepage})."
end
# Check for http:// GitHub homepage urls, https:// is preferred.
# Note: only check homepages that are repo pages, not *.github.com hosts
if f.homepage =~ %r[^http://github\.com/]
problem "Use https:// URLs for homepages on GitHub (url is #{f.homepage})."
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problem "Google Code homepage should end with a slash (url is #{f.homepage})."
end
if f.homepage =~ %r[^http://.*\.github\.com/]
problem "GitHub pages should use the github.io domain (url is #{f.homepage})"
end
urls = @specs.map(&:url)
# Check GNU urls; doesn't apply to mirrors
urls.grep(%r[^(?:https?|ftp)://(?!alpha).+/gnu/]) do |u|
problem "\"ftpmirror.gnu.org\" is preferred for GNU software (url is #{u})."
end
# the rest of the checks apply to mirrors as well
urls.concat(@specs.map(&:mirrors).flatten)
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problem "Don't use #{$1}use_mirror in SourceForge urls (url is #{p})."
end
if p =~ /\/download$/
problem "Don't use /download in SourceForge urls (url is #{p})."
end
if p =~ %r[^https?://sourceforge\.]
problem "Use http://downloads.sourceforge.net to get geolocation (url is #{p})."
end
if p =~ %r[^https?://prdownloads\.]
problem "Don't use prdownloads in SourceForge urls (url is #{p}).\n" +
"\tSee: http://librelist.com/browser/homebrew/2011/1/12/prdownloads-is-bad/"
end
if p =~ %r[^http://\w+\.dl\.]
problem "Don't use specific dl mirrors in SourceForge urls (url is #{p})."
end
end
# Check for git:// GitHub repo urls, https:// is preferred.
urls.grep(%r[^git://[^/]*github\.com/]) do |u|
problem "Use https:// URLs for accessing GitHub repositories (url is #{u})."
end
# Check for http:// GitHub repo urls, https:// is preferred.
urls.grep(%r[^http://github\.com/.*\.git$]) do |u|
problem "Use https:// URLs for accessing GitHub repositories (url is #{u})."
end
# Use new-style archive downloads
urls.select { |u| u =~ %r[https://.*/(?:tar|zip)ball/] and not u =~ %r[\.git$] }.each do |u|
problem "Use /archive/ URLs for GitHub tarballs (url is #{u})."
end
if urls.any? { |u| u =~ /\.xz/ } && !f.deps.any? { |d| d.name == "xz" }
problem "Missing a build-time dependency on 'xz'"
end
end
def audit_specs
problem "Head-only (no stable download)" if f.head_only?
[:stable, :devel].each do |spec|
s = f.send(spec)
next if s.nil?
if s.version.to_s.empty?
problem "Invalid or missing #{spec} version"
else
version_text = s.version unless s.version.detected_from_url?
version_url = Version.detect(s.url, s.specs)
if version_url.to_s == version_text.to_s && s.version.instance_of?(Version)
problem "#{spec} version #{version_text} is redundant with version scanned from URL"
end
end
if s.version.to_s =~ /^v/
problem "#{spec} version #{s.version} should not have a leading 'v'"
end
cksum = s.checksum
next if cksum.nil?
case cksum.hash_type
when :md5
problem "md5 checksums are deprecated, please use sha1 or sha256"
next
when :sha1 then len = 40
when :sha256 then len = 64
end
if cksum.empty?
problem "#{cksum.hash_type} is empty"
else
problem "#{cksum.hash_type} should be #{len} characters" unless cksum.hexdigest.length == len
problem "#{cksum.hash_type} contains invalid characters" unless cksum.hexdigest =~ /^[a-fA-F0-9]+$/
problem "#{cksum.hash_type} should be lowercase" unless cksum.hexdigest == cksum.hexdigest.downcase
end
end
# Check for :using that is already detected from the url
@specs.each do |s|
next if s.using.nil?
url_strategy = DownloadStrategyDetector.detect(s.url)
using_strategy = DownloadStrategyDetector.detect('', s.using)
problem "redundant :using specification in url or head" if url_strategy == using_strategy
end
end
def audit_patches
Patches.new(f.patches).select(&:external?).each do |p|
case p.url
when %r[raw\.github\.com], %r[gist\.github\.com/raw]
unless p.url =~ /[a-fA-F0-9]{40}/
problem "GitHub/Gist patches should specify a revision:\n#{p.url}"
end
when %r[macports/trunk]
problem "MacPorts patches should specify a revision instead of trunk:\n#{p.url}"
end
end
end
def audit_text
if text =~ /<(Formula|AmazonWebServicesFormula|ScriptFileFormula|GithubGistFormula)/
problem "Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# system "cmake/)
problem "Commented cmake call found"
end
# Comments from default template
if (text =~ /# PLEASE REMOVE/)
problem "Please remove default template comments"
end
if (text =~ /# if this fails, try separate make\/make install steps/)
problem "Please remove default template comments"
end
if (text =~ /# if your formula requires any X11\/XQuartz components/)
problem "Please remove default template comments"
end
if (text =~ /# if your formula's build system can't parallelize/)
problem "Please remove default template comments"
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problem "Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problem "\"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!).*[ ,]"#\{([\w.]+)\}"/
problem "Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problem "Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share)[/'"])}
problem "\"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problem "\"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problem "\"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problem "\"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problem "\"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problem "\"#{$1}\" should be \"\#{#{$2}}\""
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problem "Commented-out dep #{$1}"
end
# No trailing whitespace, please
if text =~ /[\t ]+$/
problem "Trailing whitespace was found"
end
if text =~ /if\s+ARGV\.include\?\s+'--(HEAD|devel)'/
problem "Use \"if ARGV.build_#{$1.downcase}?\" instead"
end
if text =~ /make && make/
problem "Use separate make calls"
end
if text =~ /^[ ]*\t/
problem "Use spaces instead of tabs for indentation"
end
# xcodebuild should specify SYMROOT
if text =~ /system\s+['"]xcodebuild/ and not text =~ /SYMROOT=/
problem "xcodebuild should be passed an explicit \"SYMROOT\""
end
if text =~ /ENV\.x11/
problem "Use \"depends_on :x11\" instead of \"ENV.x11\""
end
# Avoid hard-coding compilers
if text =~ %r{(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?(gcc|llvm-gcc|clang)['" ]}
problem "Use \"\#{ENV.cc}\" instead of hard-coding \"#{$3}\""
end
if text =~ %r{(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?((g|llvm-g|clang)\+\+)['" ]}
problem "Use \"\#{ENV.cxx}\" instead of hard-coding \"#{$3}\""
end
if text =~ /system\s+['"](env|export)/
problem "Use ENV instead of invoking '#{$1}' to modify the environment"
end
if text =~ /version == ['"]HEAD['"]/
problem "Use 'build.head?' instead of inspecting 'version'"
end
if text =~ /build\.include\?\s+['"]\-\-(.*)['"]/
problem "Reference '#{$1}' without dashes"
end
if text =~ /build\.with\?\s+['"]-?-?with-(.*)['"]/
problem "No double 'with': Use `build.with? '#{$1}'` to check for \"--with-#{$1}\""
end
if text =~ /build\.without\?\s+['"]-?-?without-(.*)['"]/
problem "No double 'without': Use `build.without? '#{$1}'` to check for \"--without-#{$1}\""
end
if text =~ /ARGV\.(?!(debug\?|verbose\?|find[\(\s]))/
problem "Use build instead of ARGV to check options"
end
if text =~ /def options/
problem "Use new-style option definitions"
end
if text =~ /MACOS_VERSION/
problem "Use MacOS.version instead of MACOS_VERSION"
end
cats = %w{leopard snow_leopard lion mountain_lion}.join("|")
if text =~ /MacOS\.(?:#{cats})\?/
problem "\"#{$&}\" is deprecated, use a comparison to MacOS.version instead"
end
if text =~ /skip_clean\s+:all/
problem "`skip_clean :all` is deprecated; brew no longer strips symbols"
end
if text =~ /depends_on [A-Z][\w:]+\.new$/
problem "`depends_on` can take requirement classes instead of instances"
end
if text =~ /^def (\w+).*$/
problem "Define method #{$1.inspect} in the class body, not at the top-level"
end
if text =~ /ENV.fortran/
problem "Use `depends_on :fortran` instead of `ENV.fortran`"
end
end
def audit_python
if text =~ /(def\s*)?which_python/
problem "Replace `which_python` by `python.xy`, which returns e.g. 'python2.7'."
end
if text =~ /which\(?["']python/
problem "Don't locate python with `which 'python'`, use `python.binary` instead"
end
# Checks that apply only to code in def install
if text =~ /(\s*)def\s+install((.*\n)*?)(\1end)/
install_body = $2
if install_body =~ /system\(?\s*['"]python/
problem "Instead of `system 'python', ...`, call `system python, ...`."
end
if text =~ /system\(?\s*python\.binary/
problem "Instead of `system python.binary, ...`, call `system python, ...`."
end
end
# Checks that apply only to code in def caveats
if text =~ /(\s*)def\s+caveats((.*\n)*?)(\1end)/ || /(\s*)def\s+caveats;(.*?)end/
caveats_body = $2
if caveats_body =~ /[ \{=](python[23]?)\.(.*\w)/
# So if in the body of caveats there is a `python.whatever` called,
# check that there is a guard like `if python` or similiar:
python = $1
method = $2
unless caveats_body =~ /(if python[23]?)|(if build\.with\?\s?\(?['"]python)|(unless build.without\?\s?\(?['"]python)/
problem "Please guard `#{python}.#{method}` like so `#{python}.#{method} if #{python}`"
end
end
end
if f.requirements.any?{ |r| r.kind_of?(PythonInstalled) }
# Don't check this for all formulae, because some are allowed to set the
# PYTHONPATH. E.g. python.rb itself needs to set it.
if text =~ /ENV\.append.*PYTHONPATH/ || text =~ /ENV\[['"]PYTHONPATH['"]\]\s*=[^=]/
problem "Don't set the PYTHONPATH, instead declare `depends_on :python`."
end
else
# So if there is no PythonInstalled requirement, we can check if the
# formula still uses python and should add a `depends_on :python`
unless f.name.to_s =~ /(pypy[0-9]*)|(python[0-9]*)/
if text =~ /system.["' ]?python([0-9"'])?/
problem "If the formula uses Python, it should declare so by `depends_on :python#{$1}`"
end
if text =~ /setup\.py/
problem <<-EOS.undent
If the formula installs Python bindings you should declare `depends_on :python[3]`"
EOS
end
end
end
# Todo:
# The python do ... end block is possibly executed twice. Once for
# python 2.x and once for 3.x. So if a `system 'make'` is called, a
# `system 'make clean'` should also be called at the end of the block.
end
def audit
audit_file
audit_specs
audit_urls
audit_deps
audit_conflicts
audit_patches
audit_text
audit_python
end
private
def problem p
@problems << p
end
end
|
require 'formula'
require 'utils'
# Use "brew audit --strict" to enable even stricter checks.
def strict?
ARGV.flag? "--strict"
end
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|lib|libexec|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|lib|libexec|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Empty checksums
if text =~ /md5\s+(\'\'|\"\")/
problems << " * md5 is empty"
end
if text =~ /sha1\s+(\'\'|\"\")/
problems << " * sha1 is empty"
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--HEAD'/
problems << " * Use \"if ARGV.build_head?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^\t/
problems << " * Use spaces instead of tabs for indentation"
end if strict?
# Formula depends_on gfortran
if text =~ /^\s*depends_on\s*(\'|\")gfortran(\'|\").*/
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end unless name == "gfortran" # Gfortran itself has this text in the caveats
# xcodebuild should specify SYMROOT
if text =~ /xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end if strict?
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal'
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^http?://.*\bsourceforge\.]
if p =~ /\?use_mirror=/
problems << " * Update this url (don't use ?use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check Debian urls
urls.each do |p|
next unless p =~ %r[/debian/pool/]
unless p =~ %r[^http://mirrors\.kernel\.org/debian/pool/]
problems << " * \"mirrors.kernel.org\" is the preferred mirror for debian software."
end
end if strict?
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git"
problems << " * Don't use Git as a dependency; we allow non-Homebrew git installs."
end
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
return problems
end
def audit_formula_caveats f
problems = []
if f.caveats.to_s =~ /^\s*\$\s+/
problems << " * caveats should not use '$' prompts in multiline commands."
end if strict?
return problems
end
module Homebrew extend self
def audit
errors = false
ff.each do |f|
problems = []
problems += audit_formula_instance f
problems += audit_formula_urls f
problems += audit_formula_caveats f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
end
end
exit 1 if errors
end
end
audit: check for redundant 'version'
require 'formula'
require 'utils'
# Use "brew audit --strict" to enable even stricter checks.
def strict?
ARGV.flag? "--strict"
end
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|lib|libexec|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|lib|libexec|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Empty checksums
if text =~ /md5\s+(\'\'|\"\")/
problems << " * md5 is empty"
end
if text =~ /sha1\s+(\'\'|\"\")/
problems << " * sha1 is empty"
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--HEAD'/
problems << " * Use \"if ARGV.build_head?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^\t/
problems << " * Use spaces instead of tabs for indentation"
end if strict?
# Formula depends_on gfortran
if text =~ /^\s*depends_on\s*(\'|\")gfortran(\'|\").*/
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end unless name == "gfortran" # Gfortran itself has this text in the caveats
# xcodebuild should specify SYMROOT
if text =~ /xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end if strict?
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal'
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_version f, text
# Version as defined in the DSL (or nil)
version_text = f.class.send('version').to_s
# Version as determined from the URL
version_url = Pathname.new(f.url).version
if version_url == version_text
return [" * version "+version_text+" is redundant with version scanned from url"]
end
return []
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^http?://.*\bsourceforge\.]
if p =~ /\?use_mirror=/
problems << " * Update this url (don't use ?use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check Debian urls
urls.each do |p|
next unless p =~ %r[/debian/pool/]
unless p =~ %r[^http://mirrors\.kernel\.org/debian/pool/]
problems << " * \"mirrors.kernel.org\" is the preferred mirror for debian software."
end
end if strict?
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git"
problems << " * Don't use Git as a dependency; we allow non-Homebrew git installs."
end
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
return problems
end
def audit_formula_caveats f
problems = []
if f.caveats.to_s =~ /^\s*\$\s+/
problems << " * caveats should not use '$' prompts in multiline commands."
end if strict?
return problems
end
module Homebrew extend self
def audit
errors = false
ff.each do |f|
problems = []
problems += audit_formula_instance f
problems += audit_formula_urls f
problems += audit_formula_caveats f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
problems += audit_formula_version(f, text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
end
end
exit 1 if errors
end
end
|
require 'formula'
require 'utils'
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# build tools should be flagged properly
build_deps = %w{autoconf automake boost-build cmake
imake libtool pkg-config scons smake}
if text =~ /depends_on ['"](#{build_deps*'|'})['"]$/
problems << " * #{$1} dependency should be \"depends_on '#{$1}' => :build\""
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--(HEAD|devel)'/
problems << " * Use \"if ARGV.build_#{$1.downcase}?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^[ ]*\t/
problems << " * Use spaces instead of tabs for indentation"
end
# xcodebuild should specify SYMROOT
if text =~ /system\s+['"]xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end
# using ARGV.flag? for formula options is generally a bad thing
if text =~ /ARGV\.flag\?/
problems << " * Use 'ARGV.include?' instead of 'ARGV.flag?'"
end
# MacPorts patches should specify a revision, not trunk
if text =~ %r[macports/trunk]
problems << " * MacPorts patches should specify a revision instead of trunk"
end
# Avoid hard-coding compilers
if text =~ %r[(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?(gcc|llvm-gcc|clang)['" ]]
problems << " * Use \"\#{ENV.cc}\" instead of hard-coding \"#{$3}\""
end
if text =~ %r[(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?((g|llvm-g|clang)\+\+)['" ]]
problems << " * Use \"\#{ENV.cxx}\" instead of hard-coding \"#{$3}\""
end
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD' || o == '--devel'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal' and text =~ /ARGV\.build_universal\?/
next if o == '--32-bit' and text =~ /ARGV\.build_32_bit\?/
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_version f, text
# Version as defined in the DSL (or nil)
version_text = f.class.send('version').to_s
# Version as determined from the URL
version_url = Pathname.new(f.url).version
if version_url == version_text
return [" * version #{version_text} is redundant with version scanned from url"]
end
return []
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
urls.uniq! # head-only formulae result in duplicate entries
# Check GNU urls; doesn't apply to mirrors
urls.each do |p|
if p =~ %r[^(https?|ftp)://(.+)/gnu/]
problems << " * \"ftpmirror.gnu.org\" is preferred for GNU software."
end
end
# the rest of the checks apply to mirrors as well
f.mirrors.each do |m|
mirror = m.values_at :url
urls << (mirror.to_s rescue nil)
end
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problems << " * Update this url (don't use #{$1}use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
return problems
end
def audit_formula_specs text
problems = []
if text =~ /devel .+(url '.+').+(url '.+')/m
problems << " * 'devel' block found before stable 'url'"
end
if text =~ /devel .+(head '.+')/m
problems << " * 'devel' block found before 'head'"
end
if text =~ /devel do\s+end/
problems << " * Empty 'devel' block found"
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git", "python", "ruby", "emacs", "mysql", "postgresql", "mercurial"
problems << <<-EOS
* Don't use #{d} as a dependency. We allow non-Homebrew
#{d} installations.
EOS
when 'gfortran'
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
%w[md5 sha1 sha256].each do |checksum|
hash = f.instance_variable_get("@#{checksum}")
next if hash.nil?
hash = hash.strip
len = case checksum
when 'md5' then 32
when 'sha1' then 40
when 'sha256' then 64
end
if hash.empty?
problems << " * #{checksum} is empty"
else
problems << " * #{checksum} should be #{len} characters" unless hash.length == len
problems << " * #{checksum} contains invalid characters" unless hash =~ /^[a-fA-F0-9]+$/
problems << " * #{checksum} should be lowercase" unless hash == hash.downcase
end
end
return problems
end
module Homebrew extend self
def audit
errors = false
brew_count = 0
problem_count = 0
ff.each do |f|
problems = []
if f.unstable and f.standard.nil?
problems += [' * head-only formula']
end
problems += audit_formula_instance f
problems += audit_formula_urls f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
# files should end with a newline
if text =~ /.+\z/
problems << " * File should end with a newline"
end
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
problems += audit_formula_version(f, text_without_patch)
problems += audit_formula_specs(text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
brew_count += 1
problem_count += problems.size
end
end
if errors
puts "#{problem_count} problems in #{brew_count} brews"
exit 1
end
end
end
Add specialty formulase to inheritance check
require 'formula'
require 'utils'
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula|ScriptFileFormula|GithubGistFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# build tools should be flagged properly
build_deps = %w{autoconf automake boost-build cmake
imake libtool pkg-config scons smake}
if text =~ /depends_on ['"](#{build_deps*'|'})['"]$/
problems << " * #{$1} dependency should be \"depends_on '#{$1}' => :build\""
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--(HEAD|devel)'/
problems << " * Use \"if ARGV.build_#{$1.downcase}?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^[ ]*\t/
problems << " * Use spaces instead of tabs for indentation"
end
# xcodebuild should specify SYMROOT
if text =~ /system\s+['"]xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end
# using ARGV.flag? for formula options is generally a bad thing
if text =~ /ARGV\.flag\?/
problems << " * Use 'ARGV.include?' instead of 'ARGV.flag?'"
end
# MacPorts patches should specify a revision, not trunk
if text =~ %r[macports/trunk]
problems << " * MacPorts patches should specify a revision instead of trunk"
end
# Avoid hard-coding compilers
if text =~ %r[(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?(gcc|llvm-gcc|clang)['" ]]
problems << " * Use \"\#{ENV.cc}\" instead of hard-coding \"#{$3}\""
end
if text =~ %r[(system|ENV\[.+\]\s?=)\s?['"](/usr/bin/)?((g|llvm-g|clang)\+\+)['" ]]
problems << " * Use \"\#{ENV.cxx}\" instead of hard-coding \"#{$3}\""
end
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD' || o == '--devel'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal' and text =~ /ARGV\.build_universal\?/
next if o == '--32-bit' and text =~ /ARGV\.build_32_bit\?/
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_version f, text
# Version as defined in the DSL (or nil)
version_text = f.class.send('version').to_s
# Version as determined from the URL
version_url = Pathname.new(f.url).version
if version_url == version_text
return [" * version #{version_text} is redundant with version scanned from url"]
end
return []
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
urls.uniq! # head-only formulae result in duplicate entries
# Check GNU urls; doesn't apply to mirrors
urls.each do |p|
if p =~ %r[^(https?|ftp)://(.+)/gnu/]
problems << " * \"ftpmirror.gnu.org\" is preferred for GNU software."
end
end
# the rest of the checks apply to mirrors as well
f.mirrors.each do |m|
mirror = m.values_at :url
urls << (mirror.to_s rescue nil)
end
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problems << " * Update this url (don't use #{$1}use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
return problems
end
def audit_formula_specs text
problems = []
if text =~ /devel .+(url '.+').+(url '.+')/m
problems << " * 'devel' block found before stable 'url'"
end
if text =~ /devel .+(head '.+')/m
problems << " * 'devel' block found before 'head'"
end
if text =~ /devel do\s+end/
problems << " * Empty 'devel' block found"
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git", "python", "ruby", "emacs", "mysql", "postgresql", "mercurial"
problems << <<-EOS
* Don't use #{d} as a dependency. We allow non-Homebrew
#{d} installations.
EOS
when 'gfortran'
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
%w[md5 sha1 sha256].each do |checksum|
hash = f.instance_variable_get("@#{checksum}")
next if hash.nil?
hash = hash.strip
len = case checksum
when 'md5' then 32
when 'sha1' then 40
when 'sha256' then 64
end
if hash.empty?
problems << " * #{checksum} is empty"
else
problems << " * #{checksum} should be #{len} characters" unless hash.length == len
problems << " * #{checksum} contains invalid characters" unless hash =~ /^[a-fA-F0-9]+$/
problems << " * #{checksum} should be lowercase" unless hash == hash.downcase
end
end
return problems
end
module Homebrew extend self
def audit
errors = false
brew_count = 0
problem_count = 0
ff.each do |f|
problems = []
if f.unstable and f.standard.nil?
problems += [' * head-only formula']
end
problems += audit_formula_instance f
problems += audit_formula_urls f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
# files should end with a newline
if text =~ /.+\z/
problems << " * File should end with a newline"
end
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
problems += audit_formula_version(f, text_without_patch)
problems += audit_formula_specs(text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
brew_count += 1
problem_count += problems.size
end
end
if errors
puts "#{problem_count} problems in #{brew_count} brews"
exit 1
end
end
end
|
#!/usr/bin/env ruby
# -*- coding: utf-8 -*-
require 'optparse'
require 'erb'
require 'pseudohiki/version'
require 'pseudohiki/blockparser'
require 'pseudohiki/autolink'
require 'pseudohiki/htmlformat'
require 'pseudohiki/plaintextformat'
require 'pseudohiki/markdownformat'
require 'pseudohiki/utils'
require 'htmlelement/htmltemplate'
require 'htmlelement'
require 'htmlelement/utils'
module PseudoHiki
class PageComposer
HEADING_WITH_ID_PAT = /^(!{2,3})\[([A-Za-z][0-9A-Za-z_\-.:]*)\]\s*/o
PlainFormat = PlainTextFormat.create
class BaseComposer
def initialize(options)
@options = options
end
def compose_body(tree)
@options.formatter.format(tree)
end
def create_style(path_to_css_file); "".freeze; end
private
def toc_item_pat?(node)
node.kind_of?(PseudoHiki::BlockParser::HeadingLeaf) and
(2..3).include? node.level and
node.node_id
end
def collect_nodes_for_table_of_contents(tree)
Utils::NodeCollector.select(tree) {|node| toc_item_pat?(node) }
end
def to_plain(line)
PlainFormat.format(line).to_s
end
end
class HtmlComposer < BaseComposer
def initialize(options)
super(options)
@link_manager = setup_link_manager(options)
@relative_link = options[:relative_link]
end
def compose_body(tree)
super(tree).tap do |html|
if @relative_link and @link_manager
@link_manager.use_relative_path_for_in_domain_links(html)
end
end
end
def create_table_of_contents(tree)
@options.formatter.format(create_toc_tree(tree)).tap do |toc|
toc.traverse do |element|
if element.kind_of? HtmlElement and element.tagname == "a"
element["title"] = "toc_item: " + element.children.join.chomp
end
end
end
end
def create_main(toc, body, h1)
return nil unless @options[:toc]
main = formatter.create_element("section").tap do |element|
element["id"] = "main"
element.push h1 unless h1.empty?
element.push create_toc_container(toc)
element.push create_contents_container(body)
end
end
def create_style(path_to_css_file)
style = formatter.create_element("style").tap do |element|
element["type"] = "text/css"
open(File.expand_path(path_to_css_file)) do |css_file|
element.push css_file.read
end
end
end
private
def setup_link_manager(options)
if options[:domain_name]
domain_name = @options[:domain_name]
alternative_names = @options[:alternative_domain_names]
HtmlElement::Utils::LinkManager.new(domain_name, alternative_names)
end
end
def formatter
@formatter ||= @options.html_template.new
end
def create_toc_tree(tree, newline=nil)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |line|
format("%s[[%s|#%s]]#{newline}",
'*' * line.level,
to_plain(line).lstrip,
line.node_id.upcase)
end
BlockParser.parse(toc_lines)
end
def create_toc_container(toc)
formatter.create_element("section").tap do |elm|
elm["id"] = "toc"
title = @options[:toc]
elm.push formatter.create_element("h2", title) unless title.empty?
elm.push toc
end
end
def create_contents_container(body)
formatter.create_element("section").tap do |elm|
elm["id"] = "contents"
elm.push body
end
end
end
class PlainComposer < BaseComposer
def create_table_of_contents(tree)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |toc_node|
('*' * toc_node.level) + to_plain(toc_node)
end
@options.formatter.format(BlockParser.parse(toc_lines))
end
def create_main(toc, body, h1)
contents = [body]
contents.unshift toc unless toc.empty?
if title = @options[:toc]
toc_title = @options.formatter.format(BlockParser.parse("!!" + title))
contents.unshift toc_title
end
contents.unshift h1 unless h1.empty?
contents.join($/)
end
end
class GfmComposer < PlainComposer
def create_table_of_contents(tree)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |toc_node|
format("%s[[%s|#%s]]#{$/}",
'*' * toc_node.level,
to_plain(toc_node).strip,
gfm_id(toc_node))
end
@options.formatter.format(BlockParser.parse(toc_lines))
end
private
def gfm_id(heading_node)
MarkDownFormat.convert_into_gfm_id_format(to_plain(heading_node).strip)
end
end
def initialize(options)
@options = options
@composer = select_composer.new(options)
end
def select_composer
return GfmComposer if @options[:html_version].version == "gfm"
@options.html_template ? HtmlComposer : PlainComposer
end
def create_table_of_contents(tree)
return "" unless @options[:toc]
@composer.create_table_of_contents(tree)
end
def split_main_heading(input_lines)
return "" unless @options[:split_main_heading]
h1_pos = input_lines.find_index {|line| /^![^!]/o =~ line }
return "" unless h1_pos
tree = BlockParser.parse([input_lines.delete_at(h1_pos)])
@options.formatter.format(tree)
end
def compose_html(input_lines)
h1 = split_main_heading(input_lines)
css = @options[:css]
tree = BlockParser.parse(input_lines)
toc = create_table_of_contents(tree)
body = @composer.compose_body(tree)
title = @options.title
main = @composer.create_main(toc, body, h1)
choose_template(main, body, binding)
end
def choose_template(main, body, current_binding)
if @options[:template]
html = ERB.new(@options.read_template_file).result(current_binding)
else
html = @options.create_html_template_with_current_options
embed_css = @options[:embed_css]
html.head.push @composer.create_style(embed_css) if embed_css
html.push main || body
end
html
end
end
class OptionManager
include HtmlElement::CHARSET
PlainVerboseFormat = PlainTextFormat.create(:verbose_mode => true)
MDFormat = MarkDownFormat.create
GFMFormat = MarkDownFormat.create(:gfm_style => true)
Formatter = Struct.new(:version, :formatter, :template, :ext, :opt_pat)
VERSIONS = [
["html4", HtmlFormat, HtmlTemplate, ".html", /^h/io],
["xhtml1", XhtmlFormat, XhtmlTemplate, ".html", /^x/io],
["html5", Xhtml5Format, Xhtml5Template, ".html", /^h5/io],
["plain", PageComposer::PlainFormat, nil, ".plain", /^p/io],
["plain_verbose", PlainVerboseFormat, nil, ".plain", /^pv/io],
["markdown", MDFormat, nil, ".md", /^m/io],
["gfm", GFMFormat, nil, ".md", /^g/io]
].map {|args| Formatter.new(*args) }
ENCODING_REGEXP = {
/^u/io => 'utf8',
/^e/io => 'euc-jp',
/^s/io => 'sjis',
/^l[a-zA-Z]*1/io => 'latin1'
}
BOM = "\xef\xbb\xbf"
BOM.force_encoding("ASCII-8BIT") if BOM.respond_to? :encoding
FILE_HEADER_PAT = /^\/\//
ENCODING_TO_CHARSET = {
'utf8' => UTF8,
'euc-jp' => EUC_JP,
'sjis' => SJIS,
'latin1' => LATIN1
}
@default_options = {
:html_version => VERSIONS[0],
:lang => 'en',
:encoding => 'utf8',
:title => nil,
:css => "default.css",
:embed_css => nil,
:base => nil,
:template => nil,
:output => nil,
:force => false,
:toc => nil,
:split_main_heading => false
}
attr_accessor :need_output_file, :default_title
attr_reader :input_file_basename
def self.remove_bom(input=ARGF)
return if input == ARGF and input.filename == "-"
bom = input.read(3)
input.rewind unless BOM == bom
end
def self.default_options
@default_options.dup
end
def initialize(options=nil)
@options = options || self.class.default_options
@written_option_pat = {}
@options.keys.each do |opt|
@written_option_pat[opt] = /^\/\/#{opt}:\s*(.*)$/
end
end
def [](key)
@options[key]
end
def[]=(key, value)
@options[key] = value
end
def win32?
true if RUBY_PLATFORM =~ /win/i
end
def value_given?(value)
value and not value.empty?
end
def html_template
self[:html_version].template
end
def formatter
self[:html_version].formatter
end
def charset
ENCODING_TO_CHARSET[self[:encoding]]
end
def base
base_dir = self[:base]
if base_dir and base_dir !~ /[\/\\]\.*$/o
base_dir = File.join(base_dir, ".")
base_dir = "file:///" + base_dir if base_dir !~ /^\./o and win32?
end
base_dir
end
def title
self[:title] || @default_title || "-"
end
def read_template_file
File.read(File.expand_path(self[:template]), :encoding => charset)
end
def set_html_version(version)
VERSIONS.each do |v|
if v.version == version
return self[:html_version] = v
else
self[:html_version] = v if v.opt_pat =~ version
end
end
STDERR.puts "\"#{version}\" is an invalid option for --format-version. \
\"#{self[:html_version].version}\" is chosen instead."
end
def set_html_encoding(given_opt)
if ENCODING_REGEXP.values.include? given_opt
self[:encoding] = given_opt
else
ENCODING_REGEXP.each do |pat, encoding|
self[:encoding] = encoding if pat =~ given_opt
end
STDERR.puts "\"#{self[:encoding]}\" is chosen as an encoding system, \
instead of \"#{given_opt}\"."
end
end
def setup_ruby_encoding(given_opt)
return nil unless String.new.respond_to? :encoding
external, internal = given_opt.split(/:/o, 2)
Encoding.default_external = external if external and not external.empty?
Encoding.default_internal = internal if internal and not internal.empty?
end
def setup_command_line_options
OptionParser.new("USAGE: #{File.basename($0)} [OPTION]... [FILE]...
Convert texts written in a Hiki-like notation into another format.") do |opt|
opt.version = PseudoHiki::VERSION
opt.on("-f [format_version]", "--format-version [=format_version]",
"Choose a formart for the output. Available options: \
html4, xhtml1, html5, plain, plain_verbose, markdown or gfm \
(default: #{self[:html_version].version})") do |version|
set_html_version(version)
end
opt.on("-l [lang]", "--lang [=lang]",
"Set the value of charset attributes \
(default: #{self[:lang]})") do |lang|
self[:lang] = lang if value_given?(lang)
end
opt.on("-e [encoding]", "--format-encoding [=encoding]",
"Available options: utf8, euc-jp, sjis, latin1 \
(default: #{self[:encoding]})") do |given_opt|
set_html_encoding(given_opt)
end
opt.on("-E [ex[:in]]", "--encoding [=ex[:in]]",
"Specify the default external and internal character encodings \
(same as the option of MRI)") do |given_opt|
setup_ruby_encoding(given_opt)
end
# use '-w' to avoid the conflict with the short option for '[-t]emplate'
opt.on("-w [(window) title]", "--title [=title]",
"Set the value of the <title> element \
(default: the basename of the input file)") do |title|
self[:title] = title if value_given?(title)
end
opt.on("-c [css]", "--css [=css]",
"Set the path to a css file to be used \
(default: #{self[:css]})") do |css|
self[:css] = css
end
opt.on("-C [path_to_css_file]", "--embed-css [=path_to_css_file]",
"Set the path to a css file to embed \
(default: not to embed)") do |path_to_css_file|
self[:embed_css] = path_to_css_file
end
opt.on("-b [base]", "--base [=base]",
"Specify the value of href attribute of the <base> element \
(default: not specified)") do |base_dir|
self[:base] = base_dir if value_given?(base_dir)
end
opt.on("-t [template]", "--template [=template]",
"Specify a template file in eruby format with \"<%= body %>\" \
inside (default: not specified)") do |template|
self[:template] = template if value_given?(template)
end
opt.on("-o [output]", "--output [=output]",
"Output to the specified file. If no file is given, \
\"[input_file_basename].html\" will be used.(default: STDOUT)") do |output|
self[:output] = File.expand_path(output) if value_given?(output)
@need_output_file = true
end
opt.on("-F", "--force",
"Force to apply command line options. \
(default: false)") do |force|
self[:force] = force
end
opt.on("-m [contents-title]", "--table-of-contents [=contents-title]",
"Include the list of h2 and/or h3 headings with ids. \
(default: nil)") do |toc_title|
self[:toc] = toc_title
end
opt.on("-s", "--split-main-heading",
"Split the first h1 element") do |should_be_split|
self[:split_main_heading] = should_be_split
end
opt.on("-W", "--with-wikiname",
"Use WikiNames") do |with_wikiname|
if with_wikiname
auto_linker = PseudoHiki::AutoLink::WikiName.new
PseudoHiki::BlockParser.auto_linker = auto_linker
end
end
opt.on("-d [domain_name(s)]", "--domain-name [=domain_name(s)]",
"Specify domain name(s)") do |domain_name|
names = domain_name.split(/;\s*/)
self[:domain_name] = names.shift
self[:alternative_domain_names] = names
end
opt.on("-r", "--relative-links-in-html",
"Replace absolute paths with relative ones. \
*** THIS OPTION IS EXPERIMENTAL ***") do |relative_link|
self[:relative_link] = relative_link
end
opt
end
end
def check_argv
case ARGV.length
when 0
if @need_output_file and not self[:output]
raise "You must specify a file name for output"
end
when 1
read_input_filename(ARGV[0])
end
end
def parse_command_line_options
opt = setup_command_line_options
yield opt if block_given?
opt.parse!
check_argv
@default_title = @input_file_basename
end
def set_options_from_input_file(input_lines)
input_lines.each do |line|
break if FILE_HEADER_PAT !~ line
line = line.chomp
@options.keys.each do |opt|
next if self[opt] and self[:force]
self[opt] = $1 if @written_option_pat[opt] =~ line
end
end
end
def create_html_template_with_current_options
return [] unless html_template
html = html_template.new
html.charset = charset
html.language = self[:lang]
html.default_css = self[:css] if self[:css]
html.base = base if self[:base]
html.title = title
html
end
def read_input_filename(filename)
@input_file_dir, @input_file_name = File.split(File.expand_path(filename))
@input_file_basename = File.basename(@input_file_name, ".*")
end
def output_filename
return nil unless @need_output_file
if self[:output]
File.expand_path(self[:output])
else
ext = self[:html_version].ext
File.join(@input_file_dir, @input_file_basename + ext)
end
end
def open_output
if output_filename
open(output_filename, "w") {|f| yield f }
else
yield STDOUT
end
end
end
end
introduced OptionManager#parse_opt_setup_ruby_encoding()
#!/usr/bin/env ruby
# -*- coding: utf-8 -*-
require 'optparse'
require 'erb'
require 'pseudohiki/version'
require 'pseudohiki/blockparser'
require 'pseudohiki/autolink'
require 'pseudohiki/htmlformat'
require 'pseudohiki/plaintextformat'
require 'pseudohiki/markdownformat'
require 'pseudohiki/utils'
require 'htmlelement/htmltemplate'
require 'htmlelement'
require 'htmlelement/utils'
module PseudoHiki
class PageComposer
HEADING_WITH_ID_PAT = /^(!{2,3})\[([A-Za-z][0-9A-Za-z_\-.:]*)\]\s*/o
PlainFormat = PlainTextFormat.create
class BaseComposer
def initialize(options)
@options = options
end
def compose_body(tree)
@options.formatter.format(tree)
end
def create_style(path_to_css_file); "".freeze; end
private
def toc_item_pat?(node)
node.kind_of?(PseudoHiki::BlockParser::HeadingLeaf) and
(2..3).include? node.level and
node.node_id
end
def collect_nodes_for_table_of_contents(tree)
Utils::NodeCollector.select(tree) {|node| toc_item_pat?(node) }
end
def to_plain(line)
PlainFormat.format(line).to_s
end
end
class HtmlComposer < BaseComposer
def initialize(options)
super(options)
@link_manager = setup_link_manager(options)
@relative_link = options[:relative_link]
end
def compose_body(tree)
super(tree).tap do |html|
if @relative_link and @link_manager
@link_manager.use_relative_path_for_in_domain_links(html)
end
end
end
def create_table_of_contents(tree)
@options.formatter.format(create_toc_tree(tree)).tap do |toc|
toc.traverse do |element|
if element.kind_of? HtmlElement and element.tagname == "a"
element["title"] = "toc_item: " + element.children.join.chomp
end
end
end
end
def create_main(toc, body, h1)
return nil unless @options[:toc]
main = formatter.create_element("section").tap do |element|
element["id"] = "main"
element.push h1 unless h1.empty?
element.push create_toc_container(toc)
element.push create_contents_container(body)
end
end
def create_style(path_to_css_file)
style = formatter.create_element("style").tap do |element|
element["type"] = "text/css"
open(File.expand_path(path_to_css_file)) do |css_file|
element.push css_file.read
end
end
end
private
def setup_link_manager(options)
if options[:domain_name]
domain_name = @options[:domain_name]
alternative_names = @options[:alternative_domain_names]
HtmlElement::Utils::LinkManager.new(domain_name, alternative_names)
end
end
def formatter
@formatter ||= @options.html_template.new
end
def create_toc_tree(tree, newline=nil)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |line|
format("%s[[%s|#%s]]#{newline}",
'*' * line.level,
to_plain(line).lstrip,
line.node_id.upcase)
end
BlockParser.parse(toc_lines)
end
def create_toc_container(toc)
formatter.create_element("section").tap do |elm|
elm["id"] = "toc"
title = @options[:toc]
elm.push formatter.create_element("h2", title) unless title.empty?
elm.push toc
end
end
def create_contents_container(body)
formatter.create_element("section").tap do |elm|
elm["id"] = "contents"
elm.push body
end
end
end
class PlainComposer < BaseComposer
def create_table_of_contents(tree)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |toc_node|
('*' * toc_node.level) + to_plain(toc_node)
end
@options.formatter.format(BlockParser.parse(toc_lines))
end
def create_main(toc, body, h1)
contents = [body]
contents.unshift toc unless toc.empty?
if title = @options[:toc]
toc_title = @options.formatter.format(BlockParser.parse("!!" + title))
contents.unshift toc_title
end
contents.unshift h1 unless h1.empty?
contents.join($/)
end
end
class GfmComposer < PlainComposer
def create_table_of_contents(tree)
toc_lines = collect_nodes_for_table_of_contents(tree).map do |toc_node|
format("%s[[%s|#%s]]#{$/}",
'*' * toc_node.level,
to_plain(toc_node).strip,
gfm_id(toc_node))
end
@options.formatter.format(BlockParser.parse(toc_lines))
end
private
def gfm_id(heading_node)
MarkDownFormat.convert_into_gfm_id_format(to_plain(heading_node).strip)
end
end
def initialize(options)
@options = options
@composer = select_composer.new(options)
end
def select_composer
return GfmComposer if @options[:html_version].version == "gfm"
@options.html_template ? HtmlComposer : PlainComposer
end
def create_table_of_contents(tree)
return "" unless @options[:toc]
@composer.create_table_of_contents(tree)
end
def split_main_heading(input_lines)
return "" unless @options[:split_main_heading]
h1_pos = input_lines.find_index {|line| /^![^!]/o =~ line }
return "" unless h1_pos
tree = BlockParser.parse([input_lines.delete_at(h1_pos)])
@options.formatter.format(tree)
end
def compose_html(input_lines)
h1 = split_main_heading(input_lines)
css = @options[:css]
tree = BlockParser.parse(input_lines)
toc = create_table_of_contents(tree)
body = @composer.compose_body(tree)
title = @options.title
main = @composer.create_main(toc, body, h1)
choose_template(main, body, binding)
end
def choose_template(main, body, current_binding)
if @options[:template]
html = ERB.new(@options.read_template_file).result(current_binding)
else
html = @options.create_html_template_with_current_options
embed_css = @options[:embed_css]
html.head.push @composer.create_style(embed_css) if embed_css
html.push main || body
end
html
end
end
class OptionManager
include HtmlElement::CHARSET
PlainVerboseFormat = PlainTextFormat.create(:verbose_mode => true)
MDFormat = MarkDownFormat.create
GFMFormat = MarkDownFormat.create(:gfm_style => true)
Formatter = Struct.new(:version, :formatter, :template, :ext, :opt_pat)
VERSIONS = [
["html4", HtmlFormat, HtmlTemplate, ".html", /^h/io],
["xhtml1", XhtmlFormat, XhtmlTemplate, ".html", /^x/io],
["html5", Xhtml5Format, Xhtml5Template, ".html", /^h5/io],
["plain", PageComposer::PlainFormat, nil, ".plain", /^p/io],
["plain_verbose", PlainVerboseFormat, nil, ".plain", /^pv/io],
["markdown", MDFormat, nil, ".md", /^m/io],
["gfm", GFMFormat, nil, ".md", /^g/io]
].map {|args| Formatter.new(*args) }
ENCODING_REGEXP = {
/^u/io => 'utf8',
/^e/io => 'euc-jp',
/^s/io => 'sjis',
/^l[a-zA-Z]*1/io => 'latin1'
}
BOM = "\xef\xbb\xbf"
BOM.force_encoding("ASCII-8BIT") if BOM.respond_to? :encoding
FILE_HEADER_PAT = /^\/\//
ENCODING_TO_CHARSET = {
'utf8' => UTF8,
'euc-jp' => EUC_JP,
'sjis' => SJIS,
'latin1' => LATIN1
}
@default_options = {
:html_version => VERSIONS[0],
:lang => 'en',
:encoding => 'utf8',
:title => nil,
:css => "default.css",
:embed_css => nil,
:base => nil,
:template => nil,
:output => nil,
:force => false,
:toc => nil,
:split_main_heading => false
}
attr_accessor :need_output_file, :default_title
attr_reader :input_file_basename
def self.remove_bom(input=ARGF)
return if input == ARGF and input.filename == "-"
bom = input.read(3)
input.rewind unless BOM == bom
end
def self.default_options
@default_options.dup
end
def initialize(options=nil)
@options = options || self.class.default_options
@written_option_pat = {}
@options.keys.each do |opt|
@written_option_pat[opt] = /^\/\/#{opt}:\s*(.*)$/
end
end
def [](key)
@options[key]
end
def[]=(key, value)
@options[key] = value
end
def win32?
true if RUBY_PLATFORM =~ /win/i
end
def value_given?(value)
value and not value.empty?
end
def html_template
self[:html_version].template
end
def formatter
self[:html_version].formatter
end
def charset
ENCODING_TO_CHARSET[self[:encoding]]
end
def base
base_dir = self[:base]
if base_dir and base_dir !~ /[\/\\]\.*$/o
base_dir = File.join(base_dir, ".")
base_dir = "file:///" + base_dir if base_dir !~ /^\./o and win32?
end
base_dir
end
def title
self[:title] || @default_title || "-"
end
def read_template_file
File.read(File.expand_path(self[:template]), :encoding => charset)
end
def set_html_version(version)
VERSIONS.each do |v|
if v.version == version
return self[:html_version] = v
else
self[:html_version] = v if v.opt_pat =~ version
end
end
STDERR.puts "\"#{version}\" is an invalid option for --format-version. \
\"#{self[:html_version].version}\" is chosen instead."
end
def set_html_encoding(given_opt)
if ENCODING_REGEXP.values.include? given_opt
self[:encoding] = given_opt
else
ENCODING_REGEXP.each do |pat, encoding|
self[:encoding] = encoding if pat =~ given_opt
end
STDERR.puts "\"#{self[:encoding]}\" is chosen as an encoding system, \
instead of \"#{given_opt}\"."
end
end
def setup_ruby_encoding(given_opt)
return nil unless String.new.respond_to? :encoding
external, internal = given_opt.split(/:/o, 2)
Encoding.default_external = external if external and not external.empty?
Encoding.default_internal = internal if internal and not internal.empty?
end
def parse_opt_setup_ruby_encoding(opt)
opt.on("-E [ex[:in]]", "--encoding [=ex[:in]]",
"Specify the default external and internal character encodings \
(same as the option of MRI)") do |given_opt|
setup_ruby_encoding(given_opt)
end
end
def setup_command_line_options
OptionParser.new("USAGE: #{File.basename($0)} [OPTION]... [FILE]...
Convert texts written in a Hiki-like notation into another format.") do |opt|
opt.version = PseudoHiki::VERSION
parse_opt_setup_ruby_encoding(opt)
opt.on("-f [format_version]", "--format-version [=format_version]",
"Choose a formart for the output. Available options: \
html4, xhtml1, html5, plain, plain_verbose, markdown or gfm \
(default: #{self[:html_version].version})") do |version|
set_html_version(version)
end
opt.on("-l [lang]", "--lang [=lang]",
"Set the value of charset attributes \
(default: #{self[:lang]})") do |lang|
self[:lang] = lang if value_given?(lang)
end
opt.on("-e [encoding]", "--format-encoding [=encoding]",
"Available options: utf8, euc-jp, sjis, latin1 \
(default: #{self[:encoding]})") do |given_opt|
set_html_encoding(given_opt)
end
# use '-w' to avoid the conflict with the short option for '[-t]emplate'
opt.on("-w [(window) title]", "--title [=title]",
"Set the value of the <title> element \
(default: the basename of the input file)") do |title|
self[:title] = title if value_given?(title)
end
opt.on("-c [css]", "--css [=css]",
"Set the path to a css file to be used \
(default: #{self[:css]})") do |css|
self[:css] = css
end
opt.on("-C [path_to_css_file]", "--embed-css [=path_to_css_file]",
"Set the path to a css file to embed \
(default: not to embed)") do |path_to_css_file|
self[:embed_css] = path_to_css_file
end
opt.on("-b [base]", "--base [=base]",
"Specify the value of href attribute of the <base> element \
(default: not specified)") do |base_dir|
self[:base] = base_dir if value_given?(base_dir)
end
opt.on("-t [template]", "--template [=template]",
"Specify a template file in eruby format with \"<%= body %>\" \
inside (default: not specified)") do |template|
self[:template] = template if value_given?(template)
end
opt.on("-o [output]", "--output [=output]",
"Output to the specified file. If no file is given, \
\"[input_file_basename].html\" will be used.(default: STDOUT)") do |output|
self[:output] = File.expand_path(output) if value_given?(output)
@need_output_file = true
end
opt.on("-F", "--force",
"Force to apply command line options. \
(default: false)") do |force|
self[:force] = force
end
opt.on("-m [contents-title]", "--table-of-contents [=contents-title]",
"Include the list of h2 and/or h3 headings with ids. \
(default: nil)") do |toc_title|
self[:toc] = toc_title
end
opt.on("-s", "--split-main-heading",
"Split the first h1 element") do |should_be_split|
self[:split_main_heading] = should_be_split
end
opt.on("-W", "--with-wikiname",
"Use WikiNames") do |with_wikiname|
if with_wikiname
auto_linker = PseudoHiki::AutoLink::WikiName.new
PseudoHiki::BlockParser.auto_linker = auto_linker
end
end
opt.on("-d [domain_name(s)]", "--domain-name [=domain_name(s)]",
"Specify domain name(s)") do |domain_name|
names = domain_name.split(/;\s*/)
self[:domain_name] = names.shift
self[:alternative_domain_names] = names
end
opt.on("-r", "--relative-links-in-html",
"Replace absolute paths with relative ones. \
*** THIS OPTION IS EXPERIMENTAL ***") do |relative_link|
self[:relative_link] = relative_link
end
opt
end
end
def check_argv
case ARGV.length
when 0
if @need_output_file and not self[:output]
raise "You must specify a file name for output"
end
when 1
read_input_filename(ARGV[0])
end
end
def parse_command_line_options
opt = setup_command_line_options
yield opt if block_given?
opt.parse!
check_argv
@default_title = @input_file_basename
end
def set_options_from_input_file(input_lines)
input_lines.each do |line|
break if FILE_HEADER_PAT !~ line
line = line.chomp
@options.keys.each do |opt|
next if self[opt] and self[:force]
self[opt] = $1 if @written_option_pat[opt] =~ line
end
end
end
def create_html_template_with_current_options
return [] unless html_template
html = html_template.new
html.charset = charset
html.language = self[:lang]
html.default_css = self[:css] if self[:css]
html.base = base if self[:base]
html.title = title
html
end
def read_input_filename(filename)
@input_file_dir, @input_file_name = File.split(File.expand_path(filename))
@input_file_basename = File.basename(@input_file_name, ".*")
end
def output_filename
return nil unless @need_output_file
if self[:output]
File.expand_path(self[:output])
else
ext = self[:html_version].ext
File.join(@input_file_dir, @input_file_basename + ext)
end
end
def open_output
if output_filename
open(output_filename, "w") {|f| yield f }
else
yield STDOUT
end
end
end
end
|
require 'formula'
require 'utils'
# Use "brew audit --strict" to enable even stricter checks.
def strict?
ARGV.flag? "--strict"
end
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# cmake, pkg-config, and scons are build-time deps
if text =~ /depends_on ['"](cmake|pkg-config|scons)['"]$/
problems << " * #{$1} dependency should be \"depends_on '#{$1}' => :build\""
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Empty checksums
if text =~ /md5\s+(\'\'|\"\")/
problems << " * md5 is empty"
end
if text =~ /sha1\s+(\'\'|\"\")/
problems << " * sha1 is empty"
end
if text =~ /sha256\s+(\'\'|\"\")/
problems << " * sha256 is empty"
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--HEAD'/
problems << " * Use \"if ARGV.build_head?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^[ ]*\t/
problems << " * Use spaces instead of tabs for indentation"
end
# Formula depends_on gfortran
if text =~ /^\s*depends_on\s*(\'|\")gfortran(\'|\").*/
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end unless name == "gfortran" # Gfortran itself has this text in the caveats
# xcodebuild should specify SYMROOT
if text =~ /xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end
# using ARGV.flag? for formula options is generally a bad thing
if text =~ /ARGV\.flag\?/
problems << " * Use 'ARGV.include?' instead of 'ARGV.flag?'"
end
# MacPorts patches should specify a revision, not trunk
if text =~ %r[macports/trunk]
problems << " * MacPorts patches should specify a revision instead of trunk"
end
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD' || o == '--devel'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal' and text =~ /ARGV\.build_universal\?/
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_version f, text
# Version as defined in the DSL (or nil)
version_text = f.class.send('version').to_s
# Version as determined from the URL
version_url = Pathname.new(f.url).version
if version_url == version_text
return [" * version #{version_text} is redundant with version scanned from url"]
end
return []
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
urls.uniq! # head-only formulae result in duplicate entries
f.mirrors.each do |m|
mirror = m.values_at :url
urls << (mirror.to_s rescue nil)
end
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problems << " * Update this url (don't use #{$1}use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
# Check GNU urls
urls.each do |p|
if p =~ %r[^(https?|ftp)://(.+)/gnu/]
problems << " * \"ftpmirror.gnu.org\" is preferred for GNU software."
end
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git"
problems << " * Don't use Git as a dependency; we allow non-Homebrew git installs."
end
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
return problems
end
module Homebrew extend self
def audit
errors = false
ff.each do |f|
problems = []
problems += audit_formula_instance f
problems += audit_formula_urls f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
problems += audit_formula_version(f, text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
end
end
exit 1 if errors
end
end
audit: warn about more "forbidden" dependencies
Signed-off-by: Jack Nagel <43386ce32af96f5c56f2a88e458cb94cebee3751@gmail.com>
require 'formula'
require 'utils'
# Use "brew audit --strict" to enable even stricter checks.
def strict?
ARGV.flag? "--strict"
end
def ff
return Formula.all if ARGV.named.empty?
return ARGV.formulae
end
def audit_formula_text name, text
problems = []
if text =~ /<(Formula|AmazonWebServicesFormula)/
problems << " * Use a space in class inheritance: class Foo < #{$1}"
end
# Commented-out cmake support from default template
if (text =~ /# depends_on 'cmake'/) or (text =~ /# system "cmake/)
problems << " * Commented cmake support found."
end
# 2 (or more in an if block) spaces before depends_on, please
if text =~ /^\ ?depends_on/
problems << " * Check indentation of 'depends_on'."
end
# cmake, pkg-config, and scons are build-time deps
if text =~ /depends_on ['"](cmake|pkg-config|scons)['"]$/
problems << " * #{$1} dependency should be \"depends_on '#{$1}' => :build\""
end
# FileUtils is included in Formula
if text =~ /FileUtils\.(\w+)/
problems << " * Don't need 'FileUtils.' before #{$1}."
end
# Check for long inreplace block vars
if text =~ /inreplace .* do \|(.{2,})\|/
problems << " * \"inreplace <filenames> do |s|\" is preferred over \"|#{$1}|\"."
end
# Check for string interpolation of single values.
if text =~ /(system|inreplace|gsub!|change_make_var!) .* ['"]#\{(\w+)\}['"]/
problems << " * Don't need to interpolate \"#{$2}\" with #{$1}"
end
# Check for string concatenation; prefer interpolation
if text =~ /(#\{\w+\s*\+\s*['"][^}]+\})/
problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
end
# Prefer formula path shortcuts in Pathname+
if text =~ %r{\(\s*(prefix\s*\+\s*(['"])(bin|include|libexec|lib|sbin|share))}
problems << " * \"(#{$1}...#{$2})\" should be \"(#{$3}+...)\""
end
if text =~ %r[((man)\s*\+\s*(['"])(man[1-8])(['"]))]
problems << " * \"#{$1}\" should be \"#{$4}\""
end
# Prefer formula path shortcuts in strings
if text =~ %r[(\#\{prefix\}/(bin|include|libexec|lib|sbin|share))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
if text =~ %r[((\#\{prefix\}/share/man/|\#\{man\}/)(man[1-8]))]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[((\#\{share\}/(man)))[/'"]]
problems << " * \"#{$1}\" should be \"\#{#{$3}}\""
end
if text =~ %r[(\#\{prefix\}/share/(info|man))]
problems << " * \"#{$1}\" should be \"\#{#{$2}}\""
end
# Empty checksums
if text =~ /md5\s+(\'\'|\"\")/
problems << " * md5 is empty"
end
if text =~ /sha1\s+(\'\'|\"\")/
problems << " * sha1 is empty"
end
if text =~ /sha256\s+(\'\'|\"\")/
problems << " * sha256 is empty"
end
# Commented-out depends_on
if text =~ /#\s*depends_on\s+(.+)\s*$/
problems << " * Commented-out dep #{$1}."
end
# No trailing whitespace, please
if text =~ /(\t|[ ])+$/
problems << " * Trailing whitespace was found."
end
if text =~ /if\s+ARGV\.include\?\s+'--HEAD'/
problems << " * Use \"if ARGV.build_head?\" instead"
end
if text =~ /make && make/
problems << " * Use separate make calls."
end
if text =~ /^[ ]*\t/
problems << " * Use spaces instead of tabs for indentation"
end
# Formula depends_on gfortran
if text =~ /^\s*depends_on\s*(\'|\")gfortran(\'|\").*/
problems << " * Use ENV.fortran during install instead of depends_on 'gfortran'"
end unless name == "gfortran" # Gfortran itself has this text in the caveats
# xcodebuild should specify SYMROOT
if text =~ /xcodebuild/ and not text =~ /SYMROOT=/
problems << " * xcodebuild should be passed an explicit \"SYMROOT\""
end
# using ARGV.flag? for formula options is generally a bad thing
if text =~ /ARGV\.flag\?/
problems << " * Use 'ARGV.include?' instead of 'ARGV.flag?'"
end
# MacPorts patches should specify a revision, not trunk
if text =~ %r[macports/trunk]
problems << " * MacPorts patches should specify a revision instead of trunk"
end
return problems
end
def audit_formula_options f, text
problems = []
# Find possible options
options = []
text.scan(/ARGV\.include\?[ ]*\(?(['"])(.+?)\1/) { |m| options << m[1] }
options.reject! {|o| o.include? "#"}
options.uniq!
# Find documented options
begin
opts = f.options
documented_options = []
opts.each{ |o| documented_options << o[0] }
documented_options.reject! {|o| o.include? "="}
rescue
documented_options = []
end
if options.length > 0
options.each do |o|
next if o == '--HEAD' || o == '--devel'
problems << " * Option #{o} is not documented" unless documented_options.include? o
end
end
if documented_options.length > 0
documented_options.each do |o|
next if o == '--universal' and text =~ /ARGV\.build_universal\?/
problems << " * Option #{o} is unused" unless options.include? o
end
end
return problems
end
def audit_formula_version f, text
# Version as defined in the DSL (or nil)
version_text = f.class.send('version').to_s
# Version as determined from the URL
version_url = Pathname.new(f.url).version
if version_url == version_text
return [" * version #{version_text} is redundant with version scanned from url"]
end
return []
end
def audit_formula_urls f
problems = []
unless f.homepage =~ %r[^https?://]
problems << " * The homepage should start with http or https."
end
urls = [(f.url rescue nil), (f.head rescue nil)].reject {|p| p.nil?}
urls.uniq! # head-only formulae result in duplicate entries
f.mirrors.each do |m|
mirror = m.values_at :url
urls << (mirror.to_s rescue nil)
end
# Check SourceForge urls
urls.each do |p|
# Is it a filedownload (instead of svnroot)
next if p =~ %r[/svnroot/]
next if p =~ %r[svn\.sourceforge]
# Is it a sourceforge http(s) URL?
next unless p =~ %r[^https?://.*\bsourceforge\.]
if p =~ /(\?|&)use_mirror=/
problems << " * Update this url (don't use #{$1}use_mirror)."
end
if p =~ /\/download$/
problems << " * Update this url (don't use /download)."
end
if p =~ %r[^http://prdownloads\.]
problems << " * Update this url (don't use prdownloads)."
end
if p =~ %r[^http://\w+\.dl\.]
problems << " * Update this url (don't use specific dl mirrors)."
end
end
# Check for git:// urls; https:// is preferred.
urls.each do |p|
if p =~ %r[^git://github\.com/]
problems << " * Use https:// URLs for accessing repositories on GitHub."
end
end
# Check GNU urls
urls.each do |p|
if p =~ %r[^(https?|ftp)://(.+)/gnu/]
problems << " * \"ftpmirror.gnu.org\" is preferred for GNU software."
end
end
return problems
end
def audit_formula_instance f
problems = []
# Don't depend_on aliases; use full name
aliases = Formula.aliases
f.deps.select {|d| aliases.include? d}.each do |d|
problems << " * Dep #{d} is an alias; switch to the real name."
end
# Check for things we don't like to depend on.
# We allow non-Homebrew installs whenever possible.
f.deps.each do |d|
begin
dep_f = Formula.factory d
rescue
problems << " * Can't find dependency \"#{d}\"."
end
case d
when "git", "python", "ruby", "emacs"
problems << " * Don't use #{d} as a dependency; we allow non-Homebrew #{d} installs."
end
end
# Google Code homepages should end in a slash
if f.homepage =~ %r[^https?://code\.google\.com/p/[^/]+[^/]$]
problems << " * Google Code homepage should end with a slash."
end
return problems
end
module Homebrew extend self
def audit
errors = false
ff.each do |f|
problems = []
problems += audit_formula_instance f
problems += audit_formula_urls f
perms = File.stat(f.path).mode
if perms.to_s(8) != "100644"
problems << " * permissions wrong; chmod 644 #{f.path}"
end
text = ""
File.open(f.path, "r") { |afile| text = afile.read }
# DATA with no __END__
if (text =~ /\bDATA\b/) and not (text =~ /^\s*__END__\s*$/)
problems << " * 'DATA' was found, but no '__END__'"
end
problems += [' * invalid or missing version'] if f.version.to_s.empty?
# Don't try remaining audits on text in __END__
text_without_patch = (text.split("__END__")[0]).strip()
problems += audit_formula_text(f.name, text_without_patch)
problems += audit_formula_options(f, text_without_patch)
problems += audit_formula_version(f, text_without_patch)
unless problems.empty?
errors = true
puts "#{f.name}:"
puts problems * "\n"
puts
end
end
exit 1 if errors
end
end
|
# See: #10295 for more details.
#
# This is a workaround for bug: #4248 whereby ruby files outside of the normal
# provider/type path do not load until pluginsync has occured on the puppetmaster
#
# In this case I'm trying the relative path first, then falling back to normal
# mechanisms. This should be fixed in future versions of puppet but it looks
# like we'll need to maintain this for some time perhaps.
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__),"..",".."))
require 'puppet/util/firewall'
Puppet::Type.newtype(:firewall) do
include Puppet::Util::Firewall
@doc = <<-EOS
This type provides the capability to manage firewall rules within
puppet.
**Autorequires:** If Puppet is managing the iptables or ip6tables chains
specified in the `chain` or `jump` parameters, the firewall resource
will autorequire those firewallchain resources.
EOS
feature :rate_limiting, "Rate limiting features."
feature :snat, "Source NATing"
feature :dnat, "Destination NATing"
feature :interface_match, "Interface matching"
feature :icmp_match, "Matching ICMP types"
feature :owner, "Matching owners"
feature :state_match, "Matching stateful firewall states"
feature :reject_type, "The ability to control reject messages"
feature :log_level, "The ability to control the log level"
feature :log_prefix, "The ability to add prefixes to log messages"
feature :mark, "Set the netfilter mark value associated with the packet"
feature :tcp_flags, "The ability to match on particular TCP flag settings"
feature :pkttype, "Match a packet type"
feature :socket, "Match open sockets"
# provider specific features
feature :iptables, "The provider provides iptables features."
ensurable do
desc <<-EOS
Manage the state of this rule. The default action is *present*.
EOS
newvalue(:present) do
provider.insert
end
newvalue(:absent) do
provider.delete
end
defaultto :present
end
newparam(:name) do
desc <<-EOS
The canonical name of the rule. This name is also used for ordering
so make sure you prefix the rule with a number:
000 this runs first
999 this runs last
Depending on the provider, the name of the rule can be stored using
the comment feature of the underlying firewall subsystem.
EOS
isnamevar
# Keep rule names simple - they must start with a number
newvalues(/^\d+[[:alpha:][:digit:][:punct:][:space:]]+$/)
end
newproperty(:action) do
desc <<-EOS
This is the action to perform on a match. Can be one of:
* accept - the packet is accepted
* reject - the packet is rejected with a suitable ICMP response
* drop - the packet is dropped
If you specify no value it will simply match the rule but perform no
action unless you provide a provider specific parameter (such as *jump*).
EOS
newvalues(:accept, :reject, :drop)
end
# Generic matching properties
newproperty(:source) do
desc <<-EOS
An array of source addresses. For example:
source => '192.168.2.0/24'
The source can also be an IPv6 address if your provider supports it.
EOS
munge do |value|
@resource.host_to_ip(value)
end
end
newproperty(:destination) do
desc <<-EOS
An array of destination addresses to match. For example:
destination => '192.168.1.0/24'
The destination can also be an IPv6 address if your provider supports it.
EOS
munge do |value|
@resource.host_to_ip(value)
end
end
newproperty(:sport, :array_matching => :all) do
desc <<-EOS
The source port to match for this filter (if the protocol supports
ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:dport, :array_matching => :all) do
desc <<-EOS
The destination port to match for this filter (if the protocol supports
ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:port, :array_matching => :all) do
desc <<-EOS
The destination or source port to match for this filter (if the protocol
supports ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:proto) do
desc <<-EOS
The specific protocol to match for this rule. By default this is
*tcp*.
EOS
newvalues(:tcp, :udp, :icmp, :"ipv6-icmp", :esp, :ah, :vrrp, :igmp, :ipencap, :ospf, :gre, :all)
defaultto "tcp"
end
# tcp-specific
newproperty(:tcp_flags, :required_features => :tcp_flags) do
desc <<-EOS
Match when the TCP flags are as specified.
Is a string with a list of comma-separated flag names for the mask,
then a space, then a comma-separated list of flags that should be set.
The flags are: SYN ACK FIN RST URG PSH ALL NONE
Note that you specify them in the order that iptables --list-rules
would list them to avoid having puppet think you changed the flags.
Example: FIN,SYN,RST,ACK SYN matches packets with the SYN bit set and the
ACK,RST and FIN bits cleared. Such packets are used to request
TCP connection initiation.
EOS
end
# Iptables specific
newproperty(:chain, :required_features => :iptables) do
desc <<-EOS
Name of the chain to use. Can be one of the built-ins:
* INPUT
* FORWARD
* OUTPUT
* PREROUTING
* POSTROUTING
Or you can provide a user-based chain.
The default value is 'INPUT'.
EOS
defaultto "INPUT"
newvalue(/^[a-zA-Z0-9\-_]+$/)
end
newproperty(:table, :required_features => :iptables) do
desc <<-EOS
Table to use. Can be one of:
* nat
* mangle
* filter
* raw
* rawpost
By default the setting is 'filter'.
EOS
newvalues(:nat, :mangle, :filter, :raw, :rawpost)
defaultto "filter"
end
newproperty(:jump, :required_features => :iptables) do
desc <<-EOS
The value for the iptables --jump parameter. Normal values are:
* QUEUE
* RETURN
* DNAT
* SNAT
* LOG
* MASQUERADE
* REDIRECT
* MARK
But any valid chain name is allowed.
For the values ACCEPT, DROP and REJECT you must use the generic
'action' parameter. This is to enfore the use of generic parameters where
possible for maximum cross-platform modelling.
If you set both 'accept' and 'jump' parameters, you will get an error as
only one of the options should be set.
EOS
validate do |value|
unless value =~ /^[a-zA-Z0-9\-_]+$/
raise ArgumentError, <<-EOS
Jump destination must consist of alphanumeric characters, an
underscore or a yphen.
EOS
end
if ["accept","reject","drop"].include?(value.downcase)
raise ArgumentError, <<-EOS
Jump destination should not be one of ACCEPT, REJECT or DROP. Use
the action property instead.
EOS
end
end
end
# Interface specific matching properties
newproperty(:iniface, :required_features => :interface_match) do
desc <<-EOS
Input interface to filter on.
EOS
newvalues(/^[a-zA-Z0-9\-\._\+]+$/)
end
newproperty(:outiface, :required_features => :interface_match) do
desc <<-EOS
Output interface to filter on.
EOS
newvalues(/^[a-zA-Z0-9\-\._\+]+$/)
end
# NAT specific properties
newproperty(:tosource, :required_features => :snat) do
desc <<-EOS
When using jump => "SNAT" you can specify the new source address using
this parameter.
EOS
end
newproperty(:todest, :required_features => :dnat) do
desc <<-EOS
When using jump => "DNAT" you can specify the new destination address
using this paramter.
EOS
end
newproperty(:toports, :required_features => :dnat) do
desc <<-EOS
For DNAT this is the port that will replace the destination port.
EOS
end
# Reject ICMP type
newproperty(:reject, :required_features => :reject_type) do
desc <<-EOS
When combined with jump => "REJECT" you can specify a different icmp
response to be sent back to the packet sender.
EOS
end
# Logging properties
newproperty(:log_level, :required_features => :log_level) do
desc <<-EOS
When combined with jump => "LOG" specifies the system log level to log
to.
EOS
munge do |value|
if value.kind_of?(String)
value = @resource.log_level_name_to_number(value)
else
value
end
if value == nil && value != ""
self.fail("Unable to determine log level")
end
value
end
end
newproperty(:log_prefix, :required_features => :log_prefix) do
desc <<-EOS
When combined with jump => "LOG" specifies the log prefix to use when
logging.
EOS
end
# ICMP matching property
newproperty(:icmp, :required_features => :icmp_match) do
desc <<-EOS
When matching ICMP packets, this is the type of ICMP packet to match.
A value of "any" is not supported. To achieve this behaviour the
parameter should simply be omitted or undefined.
EOS
validate do |value|
if value == "any"
raise ArgumentError,
"Value 'any' is not valid. This behaviour should be achieved " \
"by omitting or undefining the ICMP parameter."
end
end
munge do |value|
if value.kind_of?(String)
# ICMP codes differ between IPv4 and IPv6.
case @resource[:provider]
when :iptables
protocol = 'inet'
when :ip6tables
protocol = 'inet6'
else
self.fail("cannot work out protocol family")
end
value = @resource.icmp_name_to_number(value, protocol)
else
value
end
if value == nil && value != ""
self.fail("cannot work out icmp type")
end
value
end
end
newproperty(:state, :array_matching => :all, :required_features =>
:state_match) do
desc <<-EOS
Matches a packet based on its state in the firewall stateful inspection
table. Values can be:
* INVALID
* ESTABLISHED
* NEW
* RELATED
EOS
newvalues(:INVALID,:ESTABLISHED,:NEW,:RELATED)
# States should always be sorted. This normalizes the resource states to
# keep it consistent with the sorted result from iptables-save.
def should=(values)
@should = super(values).sort_by {|sym| sym.to_s}
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
# Rate limiting properties
newproperty(:limit, :required_features => :rate_limiting) do
desc <<-EOS
Rate limiting value for matched packets. The format is:
rate/[/second/|/minute|/hour|/day].
Example values are: '50/sec', '40/min', '30/hour', '10/day'."
EOS
end
newproperty(:burst, :required_features => :rate_limiting) do
desc <<-EOS
Rate limiting burst value (per second) before limit checks apply.
EOS
newvalue(/^\d+$/)
end
newproperty(:uid, :required_features => :owner) do
desc <<-EOS
UID or Username owner matching rule. Accepts a string argument
only, as iptables does not accept multiple uid in a single
statement.
EOS
end
newproperty(:gid, :required_features => :owner) do
desc <<-EOS
GID or Group owner matching rule. Accepts a string argument
only, as iptables does not accept multiple gid in a single
statement.
EOS
end
newproperty(:set_mark, :required_features => :mark) do
desc <<-EOS
Set the Netfilter mark value associated with the packet. Accepts either of:
mark/mask or mark. These will be converted to hex if they are not already.
EOS
munge do |value|
int_or_hex = '[a-fA-F0-9x]'
match = value.to_s.match("(#{int_or_hex}+)(/)?(#{int_or_hex}+)?")
mark = @resource.to_hex32(match[1])
# Values that can't be converted to hex.
# Or contain a trailing slash with no mask.
if mark.nil? or (mark and match[2] and match[3].nil?)
raise ArgumentError, "MARK value must be integer or hex between 0 and 0xffffffff"
end
# Old iptables does not support a mask. New iptables will expect one.
iptables_version = Facter.fact('iptables_version').value
mask_required = (iptables_version and Puppet::Util::Package.versioncmp(iptables_version, '1.4.1') >= 0)
if mask_required
if match[3].nil?
value = "#{mark}/0xffffffff"
else
mask = @resource.to_hex32(match[3])
if mask.nil?
raise ArgumentError, "MARK mask must be integer or hex between 0 and 0xffffffff"
end
value = "#{mark}/#{mask}"
end
else
unless match[3].nil?
raise ArgumentError, "iptables version #{iptables_version} does not support masks on MARK rules"
end
value = mark
end
value
end
end
newproperty(:pkttype, :required_features => :pkttype) do
desc <<-EOS
Sets the packet type to match.
EOS
newvalues(:unicast, :broadcast, :multicast)
end
newproperty(:socket, :required_features => :socket) do
desc <<-EOS
If true, matches if an open socket can be found by doing a coket lookup
on the packet.
EOS
newvalues(:true, :false)
end
newparam(:line) do
desc <<-EOS
Read-only property for caching the rule line.
EOS
end
autorequire(:firewallchain) do
case value(:provider)
when :iptables
protocol = "IPv4"
when :ip6tables
protocol = "IPv6"
else
return
end
reqs = []
[value(:chain), value(:jump)].each do |chain|
reqs << "#{chain}:#{value(:table)}:#{protocol}" unless chain.nil?
end
reqs
end
validate do
debug("[validate]")
# TODO: this is put here to skip validation if ensure is not set. This
# is because there is a revalidation stage called later where the values
# are not set correctly. I tried tracing it - but have put in this
# workaround instead to skip. Must get to the bottom of this.
if ! value(:ensure)
return
end
# First we make sure the chains and tables are valid combinations
if value(:table).to_s == "filter" &&
value(:chain) =~ /PREROUTING|POSTROUTING/
self.fail "PREROUTING and POSTROUTING cannot be used in table 'filter'"
end
if value(:table).to_s == "nat" && value(:chain) =~ /INPUT|FORWARD/
self.fail "INPUT and FORWARD cannot be used in table 'nat'"
end
if value(:table).to_s == "raw" &&
value(:chain) =~ /INPUT|FORWARD|POSTROUTING/
self.fail "INPUT, FORWARD and POSTROUTING cannot be used in table raw"
end
# Now we analyse the individual properties to make sure they apply to
# the correct combinations.
if value(:iniface)
unless value(:chain).to_s =~ /INPUT|FORWARD|PREROUTING/
self.fail "Parameter iniface only applies to chains " \
"INPUT,FORWARD,PREROUTING"
end
end
if value(:outiface)
unless value(:chain).to_s =~ /OUTPUT|FORWARD|POSTROUTING/
self.fail "Parameter outiface only applies to chains " \
"OUTPUT,FORWARD,POSTROUTING"
end
end
if value(:uid)
unless value(:chain).to_s =~ /OUTPUT|POSTROUTING/
self.fail "Parameter uid only applies to chains " \
"OUTPUT,POSTROUTING"
end
end
if value(:gid)
unless value(:chain).to_s =~ /OUTPUT|POSTROUTING/
self.fail "Parameter gid only applies to chains " \
"OUTPUT,POSTROUTING"
end
end
if value(:set_mark)
unless value(:jump).to_s =~ /MARK/ &&
value(:chain).to_s =~ /PREROUTING|OUTPUT/ &&
value(:table).to_s =~ /mangle/
self.fail "Parameter set_mark only applies to " \
"the PREROUTING or OUTPUT chain of the mangle table and when jump => MARK"
end
end
if value(:dport)
unless value(:proto).to_s =~ /tcp|udp|sctp/
self.fail "[%s] Parameter dport only applies to sctp, tcp and udp " \
"protocols. Current protocol is [%s] and dport is [%s]" %
[value(:name), should(:proto), should(:dport)]
end
end
if value(:jump).to_s == "DNAT"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => DNAT only applies to table => nat"
end
unless value(:todest)
self.fail "Parameter jump => DNAT must have todest parameter"
end
end
if value(:jump).to_s == "SNAT"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => SNAT only applies to table => nat"
end
unless value(:tosource)
self.fail "Parameter jump => DNAT must have tosource parameter"
end
end
if value(:jump).to_s == "REDIRECT"
unless value(:toports)
self.fail "Parameter jump => REDIRECT missing mandatory toports " \
"parameter"
end
end
if value(:jump).to_s == "MASQUERADE"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => MASQUERADE only applies to table => nat"
end
end
if value(:log_prefix) || value(:log_level)
unless value(:jump).to_s == "LOG"
self.fail "Parameter log_prefix and log_level require jump => LOG"
end
end
if value(:burst) && ! value(:limit)
self.fail "burst makes no sense without limit"
end
if value(:action) && value(:jump)
self.fail "Only one of the parameters 'action' and 'jump' can be set"
end
end
end
Fix error reporting for insane hostnames.
If you put some really silly values in (e.g. /) into hostnames then
the error message s super super cryptic.
This patch fixes that, so it's at least obvious what / where / why it's
failing if you use --trace --debug
# See: #10295 for more details.
#
# This is a workaround for bug: #4248 whereby ruby files outside of the normal
# provider/type path do not load until pluginsync has occured on the puppetmaster
#
# In this case I'm trying the relative path first, then falling back to normal
# mechanisms. This should be fixed in future versions of puppet but it looks
# like we'll need to maintain this for some time perhaps.
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__),"..",".."))
require 'puppet/util/firewall'
Puppet::Type.newtype(:firewall) do
include Puppet::Util::Firewall
@doc = <<-EOS
This type provides the capability to manage firewall rules within
puppet.
**Autorequires:** If Puppet is managing the iptables or ip6tables chains
specified in the `chain` or `jump` parameters, the firewall resource
will autorequire those firewallchain resources.
EOS
feature :rate_limiting, "Rate limiting features."
feature :snat, "Source NATing"
feature :dnat, "Destination NATing"
feature :interface_match, "Interface matching"
feature :icmp_match, "Matching ICMP types"
feature :owner, "Matching owners"
feature :state_match, "Matching stateful firewall states"
feature :reject_type, "The ability to control reject messages"
feature :log_level, "The ability to control the log level"
feature :log_prefix, "The ability to add prefixes to log messages"
feature :mark, "Set the netfilter mark value associated with the packet"
feature :tcp_flags, "The ability to match on particular TCP flag settings"
feature :pkttype, "Match a packet type"
feature :socket, "Match open sockets"
# provider specific features
feature :iptables, "The provider provides iptables features."
ensurable do
desc <<-EOS
Manage the state of this rule. The default action is *present*.
EOS
newvalue(:present) do
provider.insert
end
newvalue(:absent) do
provider.delete
end
defaultto :present
end
newparam(:name) do
desc <<-EOS
The canonical name of the rule. This name is also used for ordering
so make sure you prefix the rule with a number:
000 this runs first
999 this runs last
Depending on the provider, the name of the rule can be stored using
the comment feature of the underlying firewall subsystem.
EOS
isnamevar
# Keep rule names simple - they must start with a number
newvalues(/^\d+[[:alpha:][:digit:][:punct:][:space:]]+$/)
end
newproperty(:action) do
desc <<-EOS
This is the action to perform on a match. Can be one of:
* accept - the packet is accepted
* reject - the packet is rejected with a suitable ICMP response
* drop - the packet is dropped
If you specify no value it will simply match the rule but perform no
action unless you provide a provider specific parameter (such as *jump*).
EOS
newvalues(:accept, :reject, :drop)
end
# Generic matching properties
newproperty(:source) do
desc <<-EOS
An array of source addresses. For example:
source => '192.168.2.0/24'
The source can also be an IPv6 address if your provider supports it.
EOS
munge do |value|
begin
@resource.host_to_ip(value)
rescue Exception => e
self.fail("host_to_ip failed for #{value}, exception #{e}")
end
end
end
newproperty(:destination) do
desc <<-EOS
An array of destination addresses to match. For example:
destination => '192.168.1.0/24'
The destination can also be an IPv6 address if your provider supports it.
EOS
munge do |value|
@resource.host_to_ip(value)
end
end
newproperty(:sport, :array_matching => :all) do
desc <<-EOS
The source port to match for this filter (if the protocol supports
ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:dport, :array_matching => :all) do
desc <<-EOS
The destination port to match for this filter (if the protocol supports
ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:port, :array_matching => :all) do
desc <<-EOS
The destination or source port to match for this filter (if the protocol
supports ports). Will accept a single element or an array.
For some firewall providers you can pass a range of ports in the format:
<start_number>-<ending_number>
For example:
1-1024
This would cover ports 1 to 1024.
EOS
munge do |value|
@resource.string_to_port(value, :proto)
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
newproperty(:proto) do
desc <<-EOS
The specific protocol to match for this rule. By default this is
*tcp*.
EOS
newvalues(:tcp, :udp, :icmp, :"ipv6-icmp", :esp, :ah, :vrrp, :igmp, :ipencap, :ospf, :gre, :all)
defaultto "tcp"
end
# tcp-specific
newproperty(:tcp_flags, :required_features => :tcp_flags) do
desc <<-EOS
Match when the TCP flags are as specified.
Is a string with a list of comma-separated flag names for the mask,
then a space, then a comma-separated list of flags that should be set.
The flags are: SYN ACK FIN RST URG PSH ALL NONE
Note that you specify them in the order that iptables --list-rules
would list them to avoid having puppet think you changed the flags.
Example: FIN,SYN,RST,ACK SYN matches packets with the SYN bit set and the
ACK,RST and FIN bits cleared. Such packets are used to request
TCP connection initiation.
EOS
end
# Iptables specific
newproperty(:chain, :required_features => :iptables) do
desc <<-EOS
Name of the chain to use. Can be one of the built-ins:
* INPUT
* FORWARD
* OUTPUT
* PREROUTING
* POSTROUTING
Or you can provide a user-based chain.
The default value is 'INPUT'.
EOS
defaultto "INPUT"
newvalue(/^[a-zA-Z0-9\-_]+$/)
end
newproperty(:table, :required_features => :iptables) do
desc <<-EOS
Table to use. Can be one of:
* nat
* mangle
* filter
* raw
* rawpost
By default the setting is 'filter'.
EOS
newvalues(:nat, :mangle, :filter, :raw, :rawpost)
defaultto "filter"
end
newproperty(:jump, :required_features => :iptables) do
desc <<-EOS
The value for the iptables --jump parameter. Normal values are:
* QUEUE
* RETURN
* DNAT
* SNAT
* LOG
* MASQUERADE
* REDIRECT
* MARK
But any valid chain name is allowed.
For the values ACCEPT, DROP and REJECT you must use the generic
'action' parameter. This is to enfore the use of generic parameters where
possible for maximum cross-platform modelling.
If you set both 'accept' and 'jump' parameters, you will get an error as
only one of the options should be set.
EOS
validate do |value|
unless value =~ /^[a-zA-Z0-9\-_]+$/
raise ArgumentError, <<-EOS
Jump destination must consist of alphanumeric characters, an
underscore or a yphen.
EOS
end
if ["accept","reject","drop"].include?(value.downcase)
raise ArgumentError, <<-EOS
Jump destination should not be one of ACCEPT, REJECT or DROP. Use
the action property instead.
EOS
end
end
end
# Interface specific matching properties
newproperty(:iniface, :required_features => :interface_match) do
desc <<-EOS
Input interface to filter on.
EOS
newvalues(/^[a-zA-Z0-9\-\._\+]+$/)
end
newproperty(:outiface, :required_features => :interface_match) do
desc <<-EOS
Output interface to filter on.
EOS
newvalues(/^[a-zA-Z0-9\-\._\+]+$/)
end
# NAT specific properties
newproperty(:tosource, :required_features => :snat) do
desc <<-EOS
When using jump => "SNAT" you can specify the new source address using
this parameter.
EOS
end
newproperty(:todest, :required_features => :dnat) do
desc <<-EOS
When using jump => "DNAT" you can specify the new destination address
using this paramter.
EOS
end
newproperty(:toports, :required_features => :dnat) do
desc <<-EOS
For DNAT this is the port that will replace the destination port.
EOS
end
# Reject ICMP type
newproperty(:reject, :required_features => :reject_type) do
desc <<-EOS
When combined with jump => "REJECT" you can specify a different icmp
response to be sent back to the packet sender.
EOS
end
# Logging properties
newproperty(:log_level, :required_features => :log_level) do
desc <<-EOS
When combined with jump => "LOG" specifies the system log level to log
to.
EOS
munge do |value|
if value.kind_of?(String)
value = @resource.log_level_name_to_number(value)
else
value
end
if value == nil && value != ""
self.fail("Unable to determine log level")
end
value
end
end
newproperty(:log_prefix, :required_features => :log_prefix) do
desc <<-EOS
When combined with jump => "LOG" specifies the log prefix to use when
logging.
EOS
end
# ICMP matching property
newproperty(:icmp, :required_features => :icmp_match) do
desc <<-EOS
When matching ICMP packets, this is the type of ICMP packet to match.
A value of "any" is not supported. To achieve this behaviour the
parameter should simply be omitted or undefined.
EOS
validate do |value|
if value == "any"
raise ArgumentError,
"Value 'any' is not valid. This behaviour should be achieved " \
"by omitting or undefining the ICMP parameter."
end
end
munge do |value|
if value.kind_of?(String)
# ICMP codes differ between IPv4 and IPv6.
case @resource[:provider]
when :iptables
protocol = 'inet'
when :ip6tables
protocol = 'inet6'
else
self.fail("cannot work out protocol family")
end
value = @resource.icmp_name_to_number(value, protocol)
else
value
end
if value == nil && value != ""
self.fail("cannot work out icmp type")
end
value
end
end
newproperty(:state, :array_matching => :all, :required_features =>
:state_match) do
desc <<-EOS
Matches a packet based on its state in the firewall stateful inspection
table. Values can be:
* INVALID
* ESTABLISHED
* NEW
* RELATED
EOS
newvalues(:INVALID,:ESTABLISHED,:NEW,:RELATED)
# States should always be sorted. This normalizes the resource states to
# keep it consistent with the sorted result from iptables-save.
def should=(values)
@should = super(values).sort_by {|sym| sym.to_s}
end
def is_to_s(value)
should_to_s(value)
end
def should_to_s(value)
value = [value] unless value.is_a?(Array)
value.join(',')
end
end
# Rate limiting properties
newproperty(:limit, :required_features => :rate_limiting) do
desc <<-EOS
Rate limiting value for matched packets. The format is:
rate/[/second/|/minute|/hour|/day].
Example values are: '50/sec', '40/min', '30/hour', '10/day'."
EOS
end
newproperty(:burst, :required_features => :rate_limiting) do
desc <<-EOS
Rate limiting burst value (per second) before limit checks apply.
EOS
newvalue(/^\d+$/)
end
newproperty(:uid, :required_features => :owner) do
desc <<-EOS
UID or Username owner matching rule. Accepts a string argument
only, as iptables does not accept multiple uid in a single
statement.
EOS
end
newproperty(:gid, :required_features => :owner) do
desc <<-EOS
GID or Group owner matching rule. Accepts a string argument
only, as iptables does not accept multiple gid in a single
statement.
EOS
end
newproperty(:set_mark, :required_features => :mark) do
desc <<-EOS
Set the Netfilter mark value associated with the packet. Accepts either of:
mark/mask or mark. These will be converted to hex if they are not already.
EOS
munge do |value|
int_or_hex = '[a-fA-F0-9x]'
match = value.to_s.match("(#{int_or_hex}+)(/)?(#{int_or_hex}+)?")
mark = @resource.to_hex32(match[1])
# Values that can't be converted to hex.
# Or contain a trailing slash with no mask.
if mark.nil? or (mark and match[2] and match[3].nil?)
raise ArgumentError, "MARK value must be integer or hex between 0 and 0xffffffff"
end
# Old iptables does not support a mask. New iptables will expect one.
iptables_version = Facter.fact('iptables_version').value
mask_required = (iptables_version and Puppet::Util::Package.versioncmp(iptables_version, '1.4.1') >= 0)
if mask_required
if match[3].nil?
value = "#{mark}/0xffffffff"
else
mask = @resource.to_hex32(match[3])
if mask.nil?
raise ArgumentError, "MARK mask must be integer or hex between 0 and 0xffffffff"
end
value = "#{mark}/#{mask}"
end
else
unless match[3].nil?
raise ArgumentError, "iptables version #{iptables_version} does not support masks on MARK rules"
end
value = mark
end
value
end
end
newproperty(:pkttype, :required_features => :pkttype) do
desc <<-EOS
Sets the packet type to match.
EOS
newvalues(:unicast, :broadcast, :multicast)
end
newproperty(:socket, :required_features => :socket) do
desc <<-EOS
If true, matches if an open socket can be found by doing a coket lookup
on the packet.
EOS
newvalues(:true, :false)
end
newparam(:line) do
desc <<-EOS
Read-only property for caching the rule line.
EOS
end
autorequire(:firewallchain) do
case value(:provider)
when :iptables
protocol = "IPv4"
when :ip6tables
protocol = "IPv6"
else
return
end
reqs = []
[value(:chain), value(:jump)].each do |chain|
reqs << "#{chain}:#{value(:table)}:#{protocol}" unless chain.nil?
end
reqs
end
validate do
debug("[validate]")
# TODO: this is put here to skip validation if ensure is not set. This
# is because there is a revalidation stage called later where the values
# are not set correctly. I tried tracing it - but have put in this
# workaround instead to skip. Must get to the bottom of this.
if ! value(:ensure)
return
end
# First we make sure the chains and tables are valid combinations
if value(:table).to_s == "filter" &&
value(:chain) =~ /PREROUTING|POSTROUTING/
self.fail "PREROUTING and POSTROUTING cannot be used in table 'filter'"
end
if value(:table).to_s == "nat" && value(:chain) =~ /INPUT|FORWARD/
self.fail "INPUT and FORWARD cannot be used in table 'nat'"
end
if value(:table).to_s == "raw" &&
value(:chain) =~ /INPUT|FORWARD|POSTROUTING/
self.fail "INPUT, FORWARD and POSTROUTING cannot be used in table raw"
end
# Now we analyse the individual properties to make sure they apply to
# the correct combinations.
if value(:iniface)
unless value(:chain).to_s =~ /INPUT|FORWARD|PREROUTING/
self.fail "Parameter iniface only applies to chains " \
"INPUT,FORWARD,PREROUTING"
end
end
if value(:outiface)
unless value(:chain).to_s =~ /OUTPUT|FORWARD|POSTROUTING/
self.fail "Parameter outiface only applies to chains " \
"OUTPUT,FORWARD,POSTROUTING"
end
end
if value(:uid)
unless value(:chain).to_s =~ /OUTPUT|POSTROUTING/
self.fail "Parameter uid only applies to chains " \
"OUTPUT,POSTROUTING"
end
end
if value(:gid)
unless value(:chain).to_s =~ /OUTPUT|POSTROUTING/
self.fail "Parameter gid only applies to chains " \
"OUTPUT,POSTROUTING"
end
end
if value(:set_mark)
unless value(:jump).to_s =~ /MARK/ &&
value(:chain).to_s =~ /PREROUTING|OUTPUT/ &&
value(:table).to_s =~ /mangle/
self.fail "Parameter set_mark only applies to " \
"the PREROUTING or OUTPUT chain of the mangle table and when jump => MARK"
end
end
if value(:dport)
unless value(:proto).to_s =~ /tcp|udp|sctp/
self.fail "[%s] Parameter dport only applies to sctp, tcp and udp " \
"protocols. Current protocol is [%s] and dport is [%s]" %
[value(:name), should(:proto), should(:dport)]
end
end
if value(:jump).to_s == "DNAT"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => DNAT only applies to table => nat"
end
unless value(:todest)
self.fail "Parameter jump => DNAT must have todest parameter"
end
end
if value(:jump).to_s == "SNAT"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => SNAT only applies to table => nat"
end
unless value(:tosource)
self.fail "Parameter jump => DNAT must have tosource parameter"
end
end
if value(:jump).to_s == "REDIRECT"
unless value(:toports)
self.fail "Parameter jump => REDIRECT missing mandatory toports " \
"parameter"
end
end
if value(:jump).to_s == "MASQUERADE"
unless value(:table).to_s =~ /nat/
self.fail "Parameter jump => MASQUERADE only applies to table => nat"
end
end
if value(:log_prefix) || value(:log_level)
unless value(:jump).to_s == "LOG"
self.fail "Parameter log_prefix and log_level require jump => LOG"
end
end
if value(:burst) && ! value(:limit)
self.fail "burst makes no sense without limit"
end
if value(:action) && value(:jump)
self.fail "Only one of the parameters 'action' and 'jump' can be set"
end
end
end
|
module CompilerConstants
GNU_GCC_VERSIONS = 3..9
GNU_GCC_REGEXP = /^gcc-(4\.[3-9])$/
end
Compiler = Struct.new(:name, :version, :priority)
class CompilerFailure
attr_reader :name
attr_rw :cause, :version
# Allows Apple compiler `fails_with` statements to keep using `build`
# even though `build` and `version` are the same internally
alias_method :build, :version
def self.for_standard standard
COLLECTIONS.fetch(standard) do
raise ArgumentError, "\"#{standard}\" is not a recognized standard"
end
end
def self.create(spec, &block)
# Non-Apple compilers are in the format fails_with compiler => version
if spec.is_a?(Hash)
_, major_version = spec.first
name = "gcc-#{major_version}"
# so fails_with :gcc => '4.8' simply marks all 4.8 releases incompatible
version = "#{major_version}.999"
else
name = spec
version = 9999
end
new(name, version, &block)
end
def initialize(name, version, &block)
@name = name
@version = version
instance_eval(&block) if block_given?
end
def ===(compiler)
name == compiler.name && version >= (compiler.version || 0)
end
MESSAGES = {
:cxx11 => "This compiler does not support C++11"
}
cxx11 = proc { cause MESSAGES[:cxx11] }
COLLECTIONS = {
:cxx11 => [
create(:gcc_4_0, &cxx11),
create(:gcc, &cxx11),
create(:llvm, &cxx11),
create(:clang) { build 425; cause MESSAGES[:cxx11] },
create(:gcc => "4.3", &cxx11),
create(:gcc => "4.4", &cxx11),
create(:gcc => "4.5", &cxx11),
create(:gcc => "4.6", &cxx11),
],
:openmp => [
create(:clang) { cause "clang does not support OpenMP" },
]
}
end
class CompilerQueue
def initialize
@array = []
end
def <<(o)
@array << o
self
end
def pop
@array.delete(@array.max { |a, b| a.priority <=> b.priority })
end
def empty?
@array.empty?
end
end
class CompilerSelector
def initialize(f, versions=MacOS)
@f = f
@versions = versions
@compilers = CompilerQueue.new
%w{clang llvm gcc gcc_4_0}.map(&:to_sym).each do |cc|
version = @versions.send("#{cc}_build_version")
unless version.nil?
@compilers << Compiler.new(cc, version, priority_for(cc))
end
end
# non-Apple GCC 4.x
CompilerConstants::GNU_GCC_VERSIONS.each do |v|
name = "gcc-4.#{v}"
version = @versions.non_apple_gcc_version(name)
unless version.nil?
# priority is based on version, with newest preferred first
@compilers << Compiler.new(name, version, 1.0 + v/10.0)
end
end
end
# Attempts to select an appropriate alternate compiler, but
# if none can be found raises CompilerError instead
def compiler
while cc = @compilers.pop
return cc.name unless @f.fails_with?(cc)
end
raise CompilerSelectionError.new(@f)
end
private
def priority_for(cc)
case cc
when :clang then @versions.clang_build_version >= 318 ? 3 : 0.5
when :gcc then 2.5
when :llvm then 2
when :gcc_4_0 then 0.25
# non-Apple gcc compilers
else 1.5
end
end
end
Remove dead code
module CompilerConstants
GNU_GCC_VERSIONS = 3..9
GNU_GCC_REGEXP = /^gcc-(4\.[3-9])$/
end
Compiler = Struct.new(:name, :version, :priority)
class CompilerFailure
attr_reader :name
attr_rw :cause, :version
# Allows Apple compiler `fails_with` statements to keep using `build`
# even though `build` and `version` are the same internally
alias_method :build, :version
def self.for_standard standard
COLLECTIONS.fetch(standard) do
raise ArgumentError, "\"#{standard}\" is not a recognized standard"
end
end
def self.create(spec, &block)
# Non-Apple compilers are in the format fails_with compiler => version
if spec.is_a?(Hash)
_, major_version = spec.first
name = "gcc-#{major_version}"
# so fails_with :gcc => '4.8' simply marks all 4.8 releases incompatible
version = "#{major_version}.999"
else
name = spec
version = 9999
end
new(name, version, &block)
end
def initialize(name, version, &block)
@name = name
@version = version
instance_eval(&block) if block_given?
end
def ===(compiler)
name == compiler.name && version >= (compiler.version || 0)
end
MESSAGES = {
:cxx11 => "This compiler does not support C++11"
}
cxx11 = proc { cause MESSAGES[:cxx11] }
COLLECTIONS = {
:cxx11 => [
create(:gcc_4_0, &cxx11),
create(:gcc, &cxx11),
create(:llvm, &cxx11),
create(:clang) { build 425; cause MESSAGES[:cxx11] },
create(:gcc => "4.3", &cxx11),
create(:gcc => "4.4", &cxx11),
create(:gcc => "4.5", &cxx11),
create(:gcc => "4.6", &cxx11),
],
:openmp => [
create(:clang) { cause "clang does not support OpenMP" },
]
}
end
class CompilerQueue
def initialize
@array = []
end
def <<(o)
@array << o
self
end
def pop
@array.delete(@array.max { |a, b| a.priority <=> b.priority })
end
def empty?
@array.empty?
end
end
class CompilerSelector
def initialize(f, versions=MacOS)
@f = f
@versions = versions
@compilers = CompilerQueue.new
%w{clang llvm gcc gcc_4_0}.map(&:to_sym).each do |cc|
version = @versions.send("#{cc}_build_version")
unless version.nil?
@compilers << Compiler.new(cc, version, priority_for(cc))
end
end
# non-Apple GCC 4.x
CompilerConstants::GNU_GCC_VERSIONS.each do |v|
name = "gcc-4.#{v}"
version = @versions.non_apple_gcc_version(name)
unless version.nil?
# priority is based on version, with newest preferred first
@compilers << Compiler.new(name, version, 1.0 + v/10.0)
end
end
end
# Attempts to select an appropriate alternate compiler, but
# if none can be found raises CompilerError instead
def compiler
while cc = @compilers.pop
return cc.name unless @f.fails_with?(cc)
end
raise CompilerSelectionError.new(@f)
end
private
def priority_for(cc)
case cc
when :clang then @versions.clang_build_version >= 318 ? 3 : 0.5
when :gcc then 2.5
when :llvm then 2
when :gcc_4_0 then 0.25
end
end
end
|
require "compilers"
class CxxStdlib
include CompilerConstants
class CompatibilityError < StandardError
def initialize(formula, dep, stdlib)
super <<-EOS.undent
#{formula.name} dependency #{dep.name} was built with a different C++ standard
library (#{stdlib.type_string} from #{stdlib.compiler}). This may cause problems at runtime.
EOS
end
end
def self.create(type, compiler)
if type && ![:libstdcxx, :libcxx].include?(type)
raise ArgumentError, "Invalid C++ stdlib type: #{type}"
end
klass = GNU_GCC_REGEXP === compiler.to_s ? GnuStdlib : AppleStdlib
klass.new(type, compiler)
end
def self.check_compatibility(formula, deps, keg, compiler)
return if formula.skip_cxxstdlib_check?
stdlib = create(keg.detect_cxx_stdlibs.first, compiler)
begin
stdlib.check_dependencies(formula, deps)
rescue CompatibilityError => e
opoo e.message
end
end
attr_reader :type, :compiler
def initialize(type, compiler)
@type = type
@compiler = compiler.to_sym
end
# If either package doesn't use C++, all is well
# libstdc++ and libc++ aren't ever intercompatible
# libstdc++ is compatible across Apple compilers, but
# not between Apple and GNU compilers, or between GNU compiler versions
def compatible_with?(other)
return true if type.nil? || other.type.nil?
return false unless type == other.type
apple_compiler? && other.apple_compiler? ||
!other.apple_compiler? && compiler.to_s[4..6] == other.compiler.to_s[4..6]
end
def check_dependencies(formula, deps)
deps.each do |dep|
# Software is unlikely to link against libraries from build-time deps, so
# it doesn't matter if they link against different C++ stdlibs.
next if dep.build?
dep_stdlib = Tab.for_formula(dep.to_formula).cxxstdlib
if !compatible_with? dep_stdlib
raise CompatibilityError.new(formula, dep, dep_stdlib)
end
end
end
def type_string
type.to_s.gsub(/cxx$/, 'c++')
end
def inspect
"#<#{self.class.name}: #{compiler} #{type}>"
end
class AppleStdlib < CxxStdlib
def apple_compiler?
true
end
end
class GnuStdlib < CxxStdlib
def apple_compiler?
false
end
end
end
cxxstdlib: use Formula#full_name
require "compilers"
class CxxStdlib
include CompilerConstants
class CompatibilityError < StandardError
def initialize(formula, dep, stdlib)
super <<-EOS.undent
#{formula.full_name} dependency #{dep.name} was built with a different C++ standard
library (#{stdlib.type_string} from #{stdlib.compiler}). This may cause problems at runtime.
EOS
end
end
def self.create(type, compiler)
if type && ![:libstdcxx, :libcxx].include?(type)
raise ArgumentError, "Invalid C++ stdlib type: #{type}"
end
klass = GNU_GCC_REGEXP === compiler.to_s ? GnuStdlib : AppleStdlib
klass.new(type, compiler)
end
def self.check_compatibility(formula, deps, keg, compiler)
return if formula.skip_cxxstdlib_check?
stdlib = create(keg.detect_cxx_stdlibs.first, compiler)
begin
stdlib.check_dependencies(formula, deps)
rescue CompatibilityError => e
opoo e.message
end
end
attr_reader :type, :compiler
def initialize(type, compiler)
@type = type
@compiler = compiler.to_sym
end
# If either package doesn't use C++, all is well
# libstdc++ and libc++ aren't ever intercompatible
# libstdc++ is compatible across Apple compilers, but
# not between Apple and GNU compilers, or between GNU compiler versions
def compatible_with?(other)
return true if type.nil? || other.type.nil?
return false unless type == other.type
apple_compiler? && other.apple_compiler? ||
!other.apple_compiler? && compiler.to_s[4..6] == other.compiler.to_s[4..6]
end
def check_dependencies(formula, deps)
deps.each do |dep|
# Software is unlikely to link against libraries from build-time deps, so
# it doesn't matter if they link against different C++ stdlibs.
next if dep.build?
dep_stdlib = Tab.for_formula(dep.to_formula).cxxstdlib
if !compatible_with? dep_stdlib
raise CompatibilityError.new(formula, dep, dep_stdlib)
end
end
end
def type_string
type.to_s.gsub(/cxx$/, 'c++')
end
def inspect
"#<#{self.class.name}: #{compiler} #{type}>"
end
class AppleStdlib < CxxStdlib
def apple_compiler?
true
end
end
class GnuStdlib < CxxStdlib
def apple_compiler?
false
end
end
end
|
# frozen_string_literal: true
require "formula_installer"
require "development_tools"
require "messages"
module Homebrew
module_function
def reinstall_formula(f, build_from_source: false)
if f.opt_prefix.directory?
keg = Keg.new(f.opt_prefix.resolved_path)
keg_had_linked_opt = true
keg_was_linked = keg.linked?
backup keg
end
build_options = BuildOptions.new(Options.create(ARGV.flags_only), f.options)
options = build_options.used_options
options |= f.build.used_options
options &= f.options
fi = FormulaInstaller.new(f)
fi.options = options
fi.build_bottle = ARGV.build_bottle? || (!f.bottle_defined? && f.build.bottle?)
fi.interactive = ARGV.interactive?
fi.git = ARGV.git?
fi.link_keg ||= keg_was_linked if keg_had_linked_opt
fi.build_from_source = true if build_from_source
fi.prelude
oh1 "Reinstalling #{Formatter.identifier(f.full_name)} #{options.to_a.join " "}"
fi.install
fi.finish
rescue FormulaInstallationAlreadyAttemptedError
nil
rescue Exception # rubocop:disable Lint/RescueException
ignore_interrupts { restore_backup(keg, keg_was_linked) }
raise
else
backup_path(keg).rmtree if backup_path(keg).exist?
end
def backup(keg)
keg.unlink
keg.rename backup_path(keg)
end
def restore_backup(keg, keg_was_linked)
path = backup_path(keg)
return unless path.directory?
Pathname.new(keg).rmtree if keg.exist?
path.rename keg
keg.link if keg_was_linked
end
def backup_path(path)
Pathname.new "#{path}.reinstall"
end
end
reinstall: don't build bottles unnecessarily.
Apply the same changes from #6066.
Also apply the same logics from `brew upgrade` to preserve
`installed_as_dependency`/`installed_on_request` settings
when reinstalling formulae.
# frozen_string_literal: true
require "formula_installer"
require "development_tools"
require "messages"
module Homebrew
module_function
def reinstall_formula(f, build_from_source: false)
if f.opt_prefix.directory?
keg = Keg.new(f.opt_prefix.resolved_path)
tab = Tab.for_keg(keg)
keg_had_linked_opt = true
keg_was_linked = keg.linked?
backup keg
end
build_options = BuildOptions.new(Options.create(ARGV.flags_only), f.options)
options = build_options.used_options
options |= f.build.used_options
options &= f.options
fi = FormulaInstaller.new(f)
fi.options = options
fi.build_bottle = ARGV.build_bottle?
fi.interactive = ARGV.interactive?
fi.git = ARGV.git?
fi.link_keg ||= keg_was_linked if keg_had_linked_opt
fi.build_from_source = true if build_from_source
if tab
fi.build_bottle ||= tab.built_bottle?
fi.installed_as_dependency = tab.installed_as_dependency
fi.installed_on_request = tab.installed_on_request
end
fi.prelude
oh1 "Reinstalling #{Formatter.identifier(f.full_name)} #{options.to_a.join " "}"
fi.install
fi.finish
rescue FormulaInstallationAlreadyAttemptedError
nil
rescue Exception # rubocop:disable Lint/RescueException
ignore_interrupts { restore_backup(keg, keg_was_linked) }
raise
else
backup_path(keg).rmtree if backup_path(keg).exist?
end
def backup(keg)
keg.unlink
keg.rename backup_path(keg)
end
def restore_backup(keg, keg_was_linked)
path = backup_path(keg)
return unless path.directory?
Pathname.new(keg).rmtree if keg.exist?
path.rename keg
keg.link if keg_was_linked
end
def backup_path(path)
Pathname.new "#{path}.reinstall"
end
end
|
#
# Be sure to run `pod spec lint MGSplitViewController.podspec' to ensure this is a
# valid spec and to remove all comments including this before submitting the spec.
#
# To learn more about Podspec attributes see http://docs.cocoapods.org/specification.html
# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/
#
Pod::Spec.new do |s|
s.name = "MGSplitViewController"
s.version = "1.0.0"
s.summary = "A flexible, advanced split-view controller for iPad developers.."
s.description = <<-DESC
MGSplitViewController is a replacement for UISplitViewController, with various useful enhancements.
DESC
s.homepage = "https://github.com/tafax/MGSplitViewController"
s.license = "BSD"
s.author = { "Matteo Tafani Alunno" => "mtafanialunno@vendini.com" }
s.platform = :ios, "7.0"
s.source = { :git => "https://github.com/tafax/MGSplitViewController.git", :tag => 'v1.0.0' }
s.source_files = "Classes", "Classes/**/*.{h,m}"
s.exclude_files = "Classes/Exclude"
s.requires_arc = true
# s.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" }
end
Fixed minimum platform version in podspec
#
# Be sure to run `pod spec lint MGSplitViewController.podspec' to ensure this is a
# valid spec and to remove all comments including this before submitting the spec.
#
# To learn more about Podspec attributes see http://docs.cocoapods.org/specification.html
# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/
#
Pod::Spec.new do |s|
s.name = "MGSplitViewController"
s.version = "1.0.0"
s.summary = "A flexible, advanced split-view controller for iPad developers.."
s.description = <<-DESC
MGSplitViewController is a replacement for UISplitViewController, with various useful enhancements.
DESC
s.homepage = "https://github.com/tafax/MGSplitViewController"
s.license = "BSD"
s.author = { "Matteo Tafani Alunno" => "mtafanialunno@vendini.com" }
s.platform = :ios, "5.0"
s.source = { :git => "https://github.com/tafax/MGSplitViewController.git", :tag => 'v1.0.0' }
s.source_files = "Classes", "Classes/**/*.{h,m}"
s.exclude_files = "Classes/Exclude"
s.requires_arc = true
# s.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" }
end
|
require "pathname"
require "yaml"
require "pwrake/option/host_map"
require "pwrake/option/option_filesystem"
module Pwrake
START_TIME = Time.now
class Option < Hash
def initialize
load_pwrake_conf
init_options
init_pass_env
if self['SHOW_CONF']
require "yaml"
YAML.dump(self,$stdout)
exit
elsif self['REPORT_DIR']
require 'pwrake/report'
Report.new(self,[]).report_html
exit
end
end
def init
Log.info "Options:"
self.each do |k,v|
Log.info " #{k} = #{v.inspect}"
end
#
setup_hosts
setup_filesystem # require 'option_filesystem.rb'
#
if self['LOG_DIR'] && self['GC_LOG_FILE']
GC::Profiler.enable
end
end
attr_reader :counter
attr_accessor :total_cores
DEFAULT_CONFFILES = ["pwrake_conf.yaml","PwrakeConf.yaml"]
# ----- init -----
def load_pwrake_conf
# Read pwrake_conf
pwrake_conf = Rake.application.options.pwrake_conf
if pwrake_conf
if !File.exist?(pwrake_conf)
raise "Configuration file not found: #{pwrake_conf}"
end
else
pwrake_conf = DEFAULT_CONFFILES.find{|fn| File.exist?(fn)}
end
self['PWRAKE_CONF'] = pwrake_conf
if pwrake_conf.nil?
@yaml = {}
else
require "yaml"
@yaml = open(pwrake_conf){|f| YAML.load(f) }
end
end
def init_options
option_data.each do |a|
prc = nil
keys = []
case a
when String
keys << a
when Array
a.each do |x|
case x
when String
keys << x
when Proc
prc = x
end
end
end
key = keys[0]
val = search_opts(keys)
val = prc.call(val) if prc
self[key] = val if !val.nil?
instance_variable_set("@"+key.downcase, val)
end
feedback_options
Rake.verbose(false) if Rake.application.options.silent
end
def option_data
[
'DRYRUN',
'IGNORE_SYSTEM',
'IGNORE_DEPRECATE',
'LOAD_SYSTEM',
'NOSEARCH',
'RAKELIB',
'SHOW_PREREQS',
'SILENT',
'TRACE',
'BACKTRACE',
'TRACE_OUTPUT',
'TRACE_RULES',
'FILESYSTEM',
'SSH_OPTION',
'PASS_ENV',
'GFARM2FS_OPTION',
'GFARM2FS_DEBUG',
['GFARM2FS_DEBUG_WAIT', proc{|v| v ? v.to_i : 1}],
'GNU_TIME',
'DEBUG',
'PLOT_PARALLELISM',
'SHOW_CONF',
['REPORT_DIR','REPORT'],
'REPORT_IMAGE',
'FAILED_TARGET', # rename(default), delete, leave
'FAILURE_TERMINATION', # wait, kill, continue
'QUEUE_PRIORITY', # RANK(default), FIFO, LIFO, DFS
'NOACTION_QUEUE_PRIORITY', # FIFO(default), LIFO, RAND
#'NUM_NOACTION_THREADS', # default=4 when gfarm, else 1
'GRAPH_PARTITION',
'PLOT_PARTITION',
['HOSTFILE','HOSTS'],
['LOG_DIR','LOG',
proc{|v|
if v
if v == "" || !v.kind_of?(String)
v = "Pwrake%Y%m%d-%H%M%S"
end
d = v = format_time_pid(v)
i = 1
while File.exist?(d)
d = "#{v}.#{i}"
i += 1
end
d
end
}],
['LOG_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"pwrake.log"
end
}],
['TASK_CSV_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"task.csv"
end
}],
['COMMAND_CSV_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"command.csv"
end
}],
['GC_LOG_FILE',
proc{|v|
if v
if v.kind_of?(String) && v != ""
v
else
"gc.log"
end
end
}],
['NUM_THREADS', proc{|v| v && v.to_i}],
['SHELL_START_INTERVAL', proc{|v| (v || 0.012).to_f}],
['HEARTBEAT', proc{|v| (v || 240).to_i}],
['RETRY', proc{|v| (v || 1).to_i}],
['DISABLE_AFFINITY', proc{|v| v || ENV['AFFINITY']=='off'}],
['DISABLE_STEAL', proc{|v| v || ENV['STEAL']=='off'}],
['GFARM_BASEDIR', proc{|v| v || '/tmp'}],
['GFARM_PREFIX', proc{|v| v || "pwrake_#{ENV['USER']}"}],
['GFARM_SUBDIR', proc{|v| v || '/'}],
['MAX_GFWHERE_WORKER', proc{|v| (v || 8).to_i}],
['MASTER_HOSTNAME', proc{|v| (v || begin;`hostname -f`;rescue;end || '').chomp}],
['WORK_DIR', proc{|v|
v ||= '%CWD_RELATIVE_TO_HOME'
v.sub('%CWD_RELATIVE_TO_HOME',cwd_relative_to_home)
}],
]
end
def format_time_pid(v)
START_TIME.strftime(v).sub("%$","%05d"%Process.pid)
end
def feedback_options
opts = Rake.application.options
['DRYRUN',
'IGNORE_SYSTEM',
'IGNORE_DEPRECATE',
'LOAD_SYSTEM',
'NOSEARCH',
'RAKELIB',
'SHOW_PREREQS',
'SILENT',
'TRACE',
'BACKTRACE',
'TRACE_OUTPUT',
'TRACE_RULES'
].each do |k|
if v=self[k]
m = (k.downcase+"=").to_sym
opts.send(m,v)
end
end
case opts.trace_output
when 'stdout'
opts.trace_output = $stdout
when 'stderr', nil
opts.trace_output = $stderr
end
end
# Priority of Option:
# command_option > ENV > pwrake_conf > DEFAULT_OPTIONS
def search_opts(keys)
val = Rake.application.options.send(keys[0].downcase.to_sym)
return parse_opt(val) if !val.nil?
#
keys.each do |k|
val = ENV[k.upcase]
return parse_opt(val) if !val.nil?
end
#
return nil if !@yaml
keys.each do |k|
val = @yaml[k.upcase]
return val if !val.nil?
end
nil
end
def parse_opt(s)
case s
when /^(false|nil|off)$/i
false
when /^(true|on)$/i
true
when $stdout
"stdout"
when $stderr
"stderr"
else
s
end
end
def cwd_relative_to_home
Pathname.pwd.relative_path_from(Pathname.new(ENV['HOME'])).to_s
end
def cwd_relative_if_under_home
home = Pathname.new(ENV['HOME']).realpath
path = pwd = Pathname.pwd.realpath
while path != home
if path.root?
return pwd.to_s
end
path = path.parent
end
return pwd.relative_path_from(home).to_s
end
# ------------------------------------------------------------------------
def init_pass_env
if envs = self['PASS_ENV']
pass_env = {}
case envs
when Array
envs.each do |k|
k = k.to_s
if v = ENV[k]
pass_env[k] = v
end
end
when Hash
envs.each do |k,v|
k = k.to_s
if v = ENV[k] || v
pass_env[k] = v
end
end
else
raise "invalid option for PASS_ENV in pwrake_conf.yaml"
end
if pass_env.empty?
self.delete('PASS_ENV')
else
self['PASS_ENV'] = pass_env
end
end
end
# ------------------------------------------------------------------------
def setup_hosts
if @hostfile && @num_threads
raise "Cannot set `hostfile' and `num_threads' simultaneously"
end
@host_map = HostMap.new(@hostfile || @num_threads)
end
attr_reader :host_map
def clear_gfarm2fs
setup_hosts
d = File.join(self['GFARM_BASEDIR'],self['GFARM_PREFIX'])
rcmd = "
for i in #{d}*; do
if [ -d \"$i\" ]; then
case \"$i\" in
*_000) ;;
*) fusermount -u $i; rmdir $i ;;
esac
fi
done
sleep 1
for i in #{d}*_000; do
if [ -d \"$i\" ]; then
fusermount -u $i; rmdir $i
fi
done
"
threads = []
@host_map.each do |k,hosts|
hosts.each do |info|
threads << Thread.new do
system "ssh #{info.name} '#{rcmd}'"
end
end
end
threads.each{|t| t.join}
end
end
end
add definition of Pwrake::Option
require "pathname"
require "yaml"
require "pwrake/option/host_map"
module Pwrake; class Option < Hash; end; end
require "pwrake/option/option_filesystem"
module Pwrake
START_TIME = Time.now
class Option < Hash
def initialize
load_pwrake_conf
init_options
init_pass_env
if self['SHOW_CONF']
require "yaml"
YAML.dump(self,$stdout)
exit
elsif self['REPORT_DIR']
require 'pwrake/report'
Report.new(self,[]).report_html
exit
end
end
def init
Log.info "Options:"
self.each do |k,v|
Log.info " #{k} = #{v.inspect}"
end
#
setup_hosts
setup_filesystem # require 'option_filesystem.rb'
#
if self['LOG_DIR'] && self['GC_LOG_FILE']
GC::Profiler.enable
end
end
attr_reader :counter
attr_accessor :total_cores
DEFAULT_CONFFILES = ["pwrake_conf.yaml","PwrakeConf.yaml"]
# ----- init -----
def load_pwrake_conf
# Read pwrake_conf
pwrake_conf = Rake.application.options.pwrake_conf
if pwrake_conf
if !File.exist?(pwrake_conf)
raise "Configuration file not found: #{pwrake_conf}"
end
else
pwrake_conf = DEFAULT_CONFFILES.find{|fn| File.exist?(fn)}
end
self['PWRAKE_CONF'] = pwrake_conf
if pwrake_conf.nil?
@yaml = {}
else
require "yaml"
@yaml = open(pwrake_conf){|f| YAML.load(f) }
end
end
def init_options
option_data.each do |a|
prc = nil
keys = []
case a
when String
keys << a
when Array
a.each do |x|
case x
when String
keys << x
when Proc
prc = x
end
end
end
key = keys[0]
val = search_opts(keys)
val = prc.call(val) if prc
self[key] = val if !val.nil?
instance_variable_set("@"+key.downcase, val)
end
feedback_options
Rake.verbose(false) if Rake.application.options.silent
end
def option_data
[
'DRYRUN',
'IGNORE_SYSTEM',
'IGNORE_DEPRECATE',
'LOAD_SYSTEM',
'NOSEARCH',
'RAKELIB',
'SHOW_PREREQS',
'SILENT',
'TRACE',
'BACKTRACE',
'TRACE_OUTPUT',
'TRACE_RULES',
'FILESYSTEM',
'SSH_OPTION',
'PASS_ENV',
'GFARM2FS_OPTION',
'GFARM2FS_DEBUG',
['GFARM2FS_DEBUG_WAIT', proc{|v| v ? v.to_i : 1}],
'GNU_TIME',
'DEBUG',
'PLOT_PARALLELISM',
'SHOW_CONF',
['REPORT_DIR','REPORT'],
'REPORT_IMAGE',
'FAILED_TARGET', # rename(default), delete, leave
'FAILURE_TERMINATION', # wait, kill, continue
'QUEUE_PRIORITY', # RANK(default), FIFO, LIFO, DFS
'NOACTION_QUEUE_PRIORITY', # FIFO(default), LIFO, RAND
#'NUM_NOACTION_THREADS', # default=4 when gfarm, else 1
'GRAPH_PARTITION',
'PLOT_PARTITION',
['HOSTFILE','HOSTS'],
['LOG_DIR','LOG',
proc{|v|
if v
if v == "" || !v.kind_of?(String)
v = "Pwrake%Y%m%d-%H%M%S"
end
d = v = format_time_pid(v)
i = 1
while File.exist?(d)
d = "#{v}.#{i}"
i += 1
end
d
end
}],
['LOG_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"pwrake.log"
end
}],
['TASK_CSV_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"task.csv"
end
}],
['COMMAND_CSV_FILE',
proc{|v|
if v.kind_of?(String) && v != ""
v
else
"command.csv"
end
}],
['GC_LOG_FILE',
proc{|v|
if v
if v.kind_of?(String) && v != ""
v
else
"gc.log"
end
end
}],
['NUM_THREADS', proc{|v| v && v.to_i}],
['SHELL_START_INTERVAL', proc{|v| (v || 0.012).to_f}],
['HEARTBEAT', proc{|v| (v || 240).to_i}],
['RETRY', proc{|v| (v || 1).to_i}],
['DISABLE_AFFINITY', proc{|v| v || ENV['AFFINITY']=='off'}],
['DISABLE_STEAL', proc{|v| v || ENV['STEAL']=='off'}],
['GFARM_BASEDIR', proc{|v| v || '/tmp'}],
['GFARM_PREFIX', proc{|v| v || "pwrake_#{ENV['USER']}"}],
['GFARM_SUBDIR', proc{|v| v || '/'}],
['MAX_GFWHERE_WORKER', proc{|v| (v || 8).to_i}],
['MASTER_HOSTNAME', proc{|v| (v || begin;`hostname -f`;rescue;end || '').chomp}],
['WORK_DIR', proc{|v|
v ||= '%CWD_RELATIVE_TO_HOME'
v.sub('%CWD_RELATIVE_TO_HOME',cwd_relative_to_home)
}],
]
end
def format_time_pid(v)
START_TIME.strftime(v).sub("%$","%05d"%Process.pid)
end
def feedback_options
opts = Rake.application.options
['DRYRUN',
'IGNORE_SYSTEM',
'IGNORE_DEPRECATE',
'LOAD_SYSTEM',
'NOSEARCH',
'RAKELIB',
'SHOW_PREREQS',
'SILENT',
'TRACE',
'BACKTRACE',
'TRACE_OUTPUT',
'TRACE_RULES'
].each do |k|
if v=self[k]
m = (k.downcase+"=").to_sym
opts.send(m,v)
end
end
case opts.trace_output
when 'stdout'
opts.trace_output = $stdout
when 'stderr', nil
opts.trace_output = $stderr
end
end
# Priority of Option:
# command_option > ENV > pwrake_conf > DEFAULT_OPTIONS
def search_opts(keys)
val = Rake.application.options.send(keys[0].downcase.to_sym)
return parse_opt(val) if !val.nil?
#
keys.each do |k|
val = ENV[k.upcase]
return parse_opt(val) if !val.nil?
end
#
return nil if !@yaml
keys.each do |k|
val = @yaml[k.upcase]
return val if !val.nil?
end
nil
end
def parse_opt(s)
case s
when /^(false|nil|off)$/i
false
when /^(true|on)$/i
true
when $stdout
"stdout"
when $stderr
"stderr"
else
s
end
end
def cwd_relative_to_home
Pathname.pwd.relative_path_from(Pathname.new(ENV['HOME'])).to_s
end
def cwd_relative_if_under_home
home = Pathname.new(ENV['HOME']).realpath
path = pwd = Pathname.pwd.realpath
while path != home
if path.root?
return pwd.to_s
end
path = path.parent
end
return pwd.relative_path_from(home).to_s
end
# ------------------------------------------------------------------------
def init_pass_env
if envs = self['PASS_ENV']
pass_env = {}
case envs
when Array
envs.each do |k|
k = k.to_s
if v = ENV[k]
pass_env[k] = v
end
end
when Hash
envs.each do |k,v|
k = k.to_s
if v = ENV[k] || v
pass_env[k] = v
end
end
else
raise "invalid option for PASS_ENV in pwrake_conf.yaml"
end
if pass_env.empty?
self.delete('PASS_ENV')
else
self['PASS_ENV'] = pass_env
end
end
end
# ------------------------------------------------------------------------
def setup_hosts
if @hostfile && @num_threads
raise "Cannot set `hostfile' and `num_threads' simultaneously"
end
@host_map = HostMap.new(@hostfile || @num_threads)
end
attr_reader :host_map
def clear_gfarm2fs
setup_hosts
d = File.join(self['GFARM_BASEDIR'],self['GFARM_PREFIX'])
rcmd = "
for i in #{d}*; do
if [ -d \"$i\" ]; then
case \"$i\" in
*_000) ;;
*) fusermount -u $i; rmdir $i ;;
esac
fi
done
sleep 1
for i in #{d}*_000; do
if [ -d \"$i\" ]; then
fusermount -u $i; rmdir $i
fi
done
"
threads = []
@host_map.each do |k,hosts|
hosts.each do |info|
threads << Thread.new do
system "ssh #{info.name} '#{rcmd}'"
end
end
end
threads.each{|t| t.join}
end
end
end
|
#!/usr/bin/env ruby
# encoding: UTF-8
module CommonCore
class Loader
class << self
def math_standards
load_standards_from_path File.expand_path('../../../data/Mathematics/StandardItems/Grade*/*.xml', __FILE__)
end
def math_domains
load_domains_from_path File.expand_path('../../../data/Mathematics/Standards/*.xml', __FILE__)
end
def ela_standards
load_standards_from_path File.expand_path('../../../data/ELA_08302010/StandardItems/Grade*/*.xml', __FILE__)
end
private
def load_standards_from_path(path)
(Dir.glob(path).map do |filename|
CommonCore::Standard.new(Nokogiri::XML(Pathname.new filename)) rescue nil
end).compact
end
def load_domains_from_path(path)
(Dir.glob(path).map do |filename|
CommonCore::Domain.new(Nokogiri::XML(Pathname.new filename)) rescue nil
end).compact
end
end
end
end
consolidate
#!/usr/bin/env ruby
# encoding: UTF-8
module CommonCore
class Loader
class << self
def math_standards
load_elements_from_path(CommonCore::Standard, File.expand_path('../../../data/Mathematics/StandardItems/Grade*/*.xml', __FILE__))
end
def math_domains
load_elements_from_path(CommonCore::Domain, File.expand_path('../../../data/Mathematics/Standards/*.xml', __FILE__))
end
def ela_standards
load_elements_from_path(CommonCore::Standard, File.expand_path('../../../data/ELA_08302010/StandardItems/Grade*/*.xml', __FILE__))
end
private
def load_elements_from_path(klass, path)
(Dir.glob(path).map do |filename|
klass.new(Nokogiri::XML(Pathname.new filename)) rescue nil
end).compact
end
end
end
end
|
# These are the values which are defaults for the Alerter settings in {ComparisonGrapher#graph_and_stats} function
Graphing_defaults = {email_alert: true}
module Ms
# This is the class which handles the responses from DB queries and generates comparison graphs
class ComparisonGrapher
class << self
# Takdes a DataMapper query and turns the matches into the same data structure as is produced by parsing the data file.
# @param [Array] An array containing the matches to the DataMapper database query
# @return [Hash] A hash of a hash of a hash, containing the data desired, but it really should be an array of out_hashes, right?
def match_to_hash(matches)
# matches is the result of a Msrun.all OR Msrun.first OR Msrun.get(*args)
@data = {}
matches.each do |msrun|
next if msrun.metric.nil?
index = msrun.raw_id.to_s
@data[index] = {'timestamp' => msrun.rawtime || Time.random(1)}
@@categories.each do |cat|
@data[index][cat] = msrun.metric.send(cat.to_sym).hashes
@data[index][cat].keys.each do |subcat|
@data[index][cat][subcat].delete('id'.to_sym)
@data[index][cat][subcat].delete("#{cat}_id".to_sym)
end
end
end
@data # as a hash of a hash of a hash
end
# This fxn produces an array containing {Measurement} structs which contain the data found in all the matches produced by a DataMapper DB query
# @param [Array] an Array of matches
# @return [Array] an Array containing all the measurements found in the DB matches given
def slice_matches(matches)
measures = []; @data = {}
# Why is this line of code here?
# debugger
matches = [matches] if !matches.is_a? DataMapper::Collection and !matches.is_a? Array
matches.each do |msrun|
next if msrun.nil? or msrun.metric.nil?
index = msrun.raw_id.to_s
@data[index] = {'timestamp' => msrun.rawtime || Time.random(1)}
@@categories.each do |cat|
if cat == "uplc"
arr = [{"hplc_max_p" => msrun.hplc_max_p || 0}, {'hplc_avg_p' => msrun.hplc_avg_p || 0}, {'hplc_std_p' => msrun.hplc_std_p || 0}]
arr.each do |prop|
measures << Measurement.new(prop.keys.first, index, @data[index]['timestamp'], prop[prop.keys.first], cat.to_sym, :pressure_trace)
end
else
@data[index][cat] = msrun.metric.send(cat.to_sym).hashes
@data[index][cat].keys.each do |subcat|
@data[index][cat][subcat].delete('id'.to_sym)
@data[index][cat][subcat].delete("#{cat}_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_msrun_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_msrun_raw_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_metric_input_file".to_sym)
@data[index][cat][subcat].each { |property, value|
measures << Measurement.new( property, index, @data[index]['timestamp'], value, cat.to_sym, subcat.to_sym) }
end
end
end
end
measures.sort_by {|measure| measure.category}
end
# This function takes the same parameters as {#graph_matches} and accomplishes the same result, as well as generating and returning, instead of the filenames, a hash containing the information needed to do cool stuff
# @param [Array, Array] Arrays of measurements sliced from the results of two DataMapper DB queries, the first of which represents the newest in a QC run, which will be compared to the previous values
# @return [Hash] ### WHAT WILL IT CONTAIN? THE VARIANCE AND THE MEAN? OR A RANGE OF ALLOWED VALUES, or a true false value??? ##### ... I'm not yet sure, thank you very much
def graph_and_stats(old_measures, new_measure, comparison_folder, opts = {})
options = Graphing_defaults.merge(opts)
default_variance = QcConfig[:default_allowed_variance]
require 'rserve/simpler'
FileUtils.mkdir_p(comparison_folder)
graphfiles = []
measures = [new_measure, old_measures]
data_hash = {}
r_object = Rserve::Simpler.new
r_object.converse('library("beanplot")')
r_object.converse "setwd('#{Dir.pwd}')"
@@categories.map do |cat|
data_hash[cat.to_sym] = {}
subcats = measures.first.map{|meas| meas.subcat if meas.category == cat.to_sym}.compact.uniq
subcats.each do |subcategory|
data_hash[cat.to_sym][subcategory] = {}
graphfile_prefix = File.join(comparison_folder, cat, subcategory.to_s)
FileUtils.mkdir_p(graphfile_prefix)
new_structs = measures.first.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
old_structs = measures.last.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
[new_structs, old_structs].each do |structs|
structs.each do |str|
str.value = str.value.to_f
str.name = str.name.to_s
str.category = @@name_legend[str.category.to_s]
str.subcat = @@name_legend[str.subcat.to_s]
str.time = str.time.to_s.gsub(/T/, ' ').gsub(/-(\d*):00/,' \100')
end
end
datafr_new = Rserve::DataFrame.from_structs(new_structs)
datafr_old = Rserve::DataFrame.from_structs(old_structs)
r_object.converse( df_new: datafr_new ) do
%Q{df_new$time <- strptime(as.character(df_new$time), "%Y-%m-%d %X")
df_new$name <- factor(df_new$name)
df_new$category <-factor(df_new$category)
df_new$subcat <- factor(df_new$subcat)
df_new$raw_id <- factor(df_new$raw_id)
}
end # new datafr converse
r_object.converse( df_old: datafr_old) do
%Q{df_old$time <- strptime(as.character(df_old$time), "%Y-%m-%d %X")
df_old$name <- factor(df_old$name)
df_old$category <-factor(df_old$category)
df_old$subcat <- factor(df_old$subcat)
df_old$raw_id <- factor(df_old$raw_id)
}
end # old datafr converse
count = new_structs.map {|str| str.name }.uniq.compact.length
i = 1;
names = r_object.converse("levels(df_old$name)")
while i <= count
r_object.converse do
%Q{ df_new.#{i} <- subset(df_new, name == levels(df_new$name)[[#{i}]])
df_old.#{i} <- subset(df_old, name == levels(df_old$name)[[#{i}]])
old_time_plot <- data.frame(df_old.#{i}$time, df_old.#{i}$value)
new_time_plot <- data.frame(df_new.#{i}$time, df_new.#{i}$value)
old_time_plot <- old_time_plot[order(df_old.#{i}$time), ]
new_time_plot <- new_time_plot[order(df_new.#{i}$time), ]
}
end
# Configure the environment for the graphing, by setting up the numbered categories
curr_name = r_object.converse("levels(df_old$name)[[#{i}]]")
## THIS IS WHERE WE DO THE CALCULATIONS
if not QcConfig[cat.to_sym][subcategory.to_s.split('_').map{|word| word.capitalize}.join("").to_sym].nil?
t = QcConfig[cat.to_sym][subcategory.to_s.split('_').map{|word| word.capitalize}.join("").to_sym][curr_name]
variance = t.is_a?(Numeric) ? t : default_variance
mean = r_object.converse("mean(df_old.#{i}$value)")
sd = r_object.converse("try(sd(df_old.#{i}$value), silent=TRUE)")
data_hash[cat.to_sym][subcategory][curr_name] = [mean, sd]
new_point = r_object.converse("df_new.#{i}$value")
range = mean-variance*sd..mean+variance*sd
Alerter.create("#{cat.to_sym}--#{subcategory}--#{curr_name} has exceeded range: #{range} Mean #{mean} Variance #{variance} Standard deviation #{sd} Value #{new_point}", { :email => options[:email_alert] }) if not ( range === new_point or range.member?(new_point) )
end
## END
graphfile = File.join([graphfile_prefix, curr_name + '.svg'])
graphfiles << graphfile
name = @@name_legend[curr_name]
r_object.converse(%Q{svg(file="#{graphfile}", bg="transparent", height=3, width=7.5)})
r_object.converse('par(mar=c(1,1,1,1), oma=c(2,1,1,1))')
r_object.converse do
%Q{ tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1))
tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1)) }
end
r_object.converse %Q{ band1 <- try(bw.SJ(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_new.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- 0.99 }
r_object.converse "ylim = range(density(c(df_old.#{i}$value, df_new.#{i}$value), bw=band1)[[1]])"
t_test = r_object.converse ("try(t.test(df_old.#{i}$value, df_new.#{i}$value), silent=TRUE)")
case t_test
when String
t_test_out = "ERR: Data are constant"
when Float
t_test_out = "%.2g" % t_test
end
r_object.converse %Q{ xlim = range(old_time_plot$df_old.#{i}.time, new_time_plot$df_new.#{i}.time) }
r_object.converse %Q{ beanplot(df_old.#{i}$value, df_new.#{i}$value, side='both', log="", names="p-value:#{t_test_out}", col=list('deepskyblue4',c('firebrick', 'black')), innerborder='black', bw=band1)}
r_object.converse do
%Q{ plot(old_time_plot, type='l', lwd=2.5, xlim = xlim, ylim = ylim, col='deepskyblue4', pch=15)
if (length(df_new.#{i}$value) > 4) {
lines(new_time_plot,type='l',ylab=df_new.#{i}$name[[1]], col='firebrick', pch=16, lwd=3 )
} else {
points(new_time_plot,ylab=df_new.#{i}$name[[1]], col='skyblue4', bg='firebrick', pch=21, cex=1.2)
}
title <- "#{@@name_legend[cat]}--#{@@name_legend[subcategory.to_s]}--#{name}"
if (nchar(title) > 80) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.7)
} else if (nchar(title) > 100 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.6)
} else if (nchar(title) > 120 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.5)
} else {
mtext(title, side=3, line=0, outer=TRUE)
}
}
end
r_object.converse "dev.off()" # This line must end the loop, to prevent R from crashing.
i +=1
end # while loop
end # subcats
end # categories
# graphfiles
# TODO Do I send the email here?
data_hash
end # graph_and_stats
# This function generates a comparison between the two sets of data, which are sliced by {#slice_matches}, graphing the results as SVG files.
# @param [Array, Array] Arrays of measurements sliced from the results of two DataMapper DB queries
# @return [Array] An array which contains all of the files produced by the process. This will likely be an array of approximately 400 filenames.
def graph_matches(old_measures, new_measures, comparison_folder, opts = {})
options = Graphing_defaults.merge(opts)
require 'rserve/simpler'
FileUtils.mkdir_p(comparison_folder)
graphfiles = []
measures = [new_measures, old_measures]
#$DEBUG = true
r_object = Rserve::Simpler.new
r_object.converse('library("beanplot")')
r_object.converse "setwd('#{Dir.pwd}')"
#r_object.converse('library("Cairo")')
@@categories.map do |cat|
subcats = measures.first.map{|meas| meas.subcat if meas.category == cat.to_sym}.compact.uniq
#p Dir.exist?(File.join(AppConfig[:comparison_directory], comparison_folder.to_s, cat))
#p subcats
subcats.each do |subcategory|
graphfile_prefix = File.join(comparison_folder, cat, subcategory.to_s)
FileUtils.mkdir_p(graphfile_prefix)
#p Dir.exist?(graphfile_prefix)
new_structs = measures.first.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
old_structs = measures.last.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
[new_structs, old_structs].each do |structs|
structs.each do |str|
str.value = str.value.to_f
str.name = str.name.to_s
str.category = @@name_legend[str.category.to_s]
str.subcat = @@name_legend[str.subcat.to_s]
str.time = str.time.to_s.gsub(/T/, ' ').gsub(/-(\d*):00/,' \100')
end
end
datafr_new = Rserve::DataFrame.from_structs(new_structs)
datafr_old = Rserve::DataFrame.from_structs(old_structs)
r_object.converse( df_new: datafr_new ) do
%Q{df_new$time <- strptime(as.character(df_new$time), "%Y-%m-%d %X")
df_new$name <- factor(df_new$name)
df_new$category <-factor(df_new$category)
df_new$subcat <- factor(df_new$subcat)
df_new$raw_id <- factor(df_new$raw_id)
}
end # new datafr converse
r_object.converse( df_old: datafr_old) do
%Q{df_old$time <- strptime(as.character(df_old$time), "%Y-%m-%d %X")
df_old$name <- factor(df_old$name)
df_old$category <-factor(df_old$category)
df_old$subcat <- factor(df_old$subcat)
df_old$raw_id <- factor(df_old$raw_id)
}
end # old datafr converse
count = new_structs.map {|str| str.name }.uniq.compact.length
i = 1;
names = r_object.converse("levels(df_old$name)")
while i <= count
r_object.converse do
%Q{ df_new.#{i} <- subset(df_new, name == levels(df_new$name)[[#{i}]])
df_old.#{i} <- subset(df_old, name == levels(df_old$name)[[#{i}]])
old_time_plot <- data.frame(df_old.#{i}$time, df_old.#{i}$value)
new_time_plot <- data.frame(df_new.#{i}$time, df_new.#{i}$value)
old_time_plot <- old_time_plot[order(df_old.#{i}$time), ]
new_time_plot <- new_time_plot[order(df_new.#{i}$time), ]
}
end
# p r_object.converse "summary(df_old.#{i})" if $DEBUG
# p r_object.converse "summary(df_new.#{i})" if $DEBUG
# Configure the environment for the graphing, by setting up the numbered categories
curr_name = r_object.converse("levels(df_old$name)[[#{i}]]")
graphfile = File.join([graphfile_prefix, curr_name + ".svg"])
graphfiles << graphfile
name = @@name_legend[curr_name]
r_object.converse(%Q{svg(file="#{graphfile}", bg="transparent", height=3, width=7.5)})
r_object.converse('par(mar=c(1,1,1,1), oma=c(2,1,1,1))')
r_object.converse do
%Q{ tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1))
tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1)) }
end
r_object.converse %Q{ band1 <- try(bw.SJ(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_new.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- 0.99
}
r_object.converse "ylim = range(density(c(df_old.#{i}$value, df_new.#{i}$value), bw=band1)[[1]])"
t_test = r_object.converse ("try(t.test(df_old.#{i}$value, df_new.#{i}$value)$p.value, silent=TRUE)")
# p r_object.converse( "df_old.#{i}$value" ) if $DEBUG
# p r_object.converse( "df_new.#{i}$value" ) if $DEBUG
case t_test
when String
t_test_out = "ERR: Data are constant"
when Float
t_test_out = "%.2g" % t_test
end
r_object.converse %Q{ xlim = range(old_time_plot$df_old.#{i}.time, new_time_plot$df_new.#{i}.time) }
r_object.converse %Q{beanplot(df_old.#{i}$value, df_new.#{i}$value, side='both', log="", names="p-value: #{t_test_out}", col=list('deepskyblue4',c('firebrick', 'black')), innerborder='black', bw=band1)}
r_object.converse do
# TODO!!!
%Q{ plot(old_time_plot, type='l', lwd=2.5, xlim = xlim, ylim = ylim, col='deepskyblue4', pch=15)
if (length(df_new.#{i}$value) > 4) {
lines(new_time_plot,type='l',ylab=df_new.#{i}$name[[1]], col='firebrick', pch=16, lwd=3 )
} else {
points(new_time_plot,ylab=df_new.#{i}$name[[1]], col='skyblue4', bg='firebrick', pch=21, cex=1.2)
}
title <- "#{@@name_legend[cat]}\t#{@@name_legend[subcategory.to_s]}\t#{name}"
if (nchar(title) > 80) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.75)
} else if (nchar(title) > 100 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.65)
} else if (nchar(title) > 120 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.55)
} else {
mtext(title, side=3, line=0, outer=TRUE)
}
}
end
r_object.converse "dev.off()" #### This line must conclude each loop, as far as R is concerned.
i +=1
end # while loop
end # subcats
end # categories
graphfiles
end # graph_files
@@categories = ["uplc", "chromatography", "ms1", "dynamic_sampling", "ion_source", "ion_treatment", "peptide_ids", "ms2", "run_comparison"]
@@name_legend = { "uplc"=>"UPLC","chromatography"=>"Chromatography", "ms1"=>"MS1", "ms2"=>"MS2", "dynamic_sampling"=>"Dynamic Sampling", "ion_source"=>"Ion Source", "ion_treatment"=>"Ion Treatment", "peptide_ids"=> "Peptide IDs", "run_comparison"=>"Run Comparison", "id_charge_distributions_at_different_ms1max_quartiles_for_charges_1_4"=>"ID Charge Distributions At Different MS1max Quartiles For Charges 1-4", "precursor_m_z_averages_and_differences_from_1st_quartile_largest_of_different_ms1total_tic_quartiles_over_full_elution_period"=>"Precursor m/z Averages and Differences from 1st Quartile (Largest) of Different MS1Total (TIC) Quartiles Over Full Elution Period",
"number_of_compounds_in_common"=>"Number of Compounds in Common", "fraction_of_overlapping_compounds_relative_to_first_index"=>"Fraction of Overlapping Compounds - relative to first index", "fraction_of_overlapping_compounds_relative_to_second_index"=>"Fraction of Overlapping Compounds - relative to second index", "median_retention_rank_differences_for_compounds_in_common_percent"=>"Median Retention Rank Differences for Compounds in Common (Percent)", "avg_1_60_1_60"=>"Avg\t1.60\t1.60",
"average_retention_rank_differences_for_compounds_in_common_percent"=>"Average Retention Rank Differences for Compounds in Common (Percent)", "avg_2_30_2_30"=>"Avg\t2.30\t2.30", "number_of_matching_identified_ions_between_runs"=>"Number of Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Matching Identified Ions Between Runs", "avg_1_00_1_00"=>"Avg\t1.00\t1.00", "relative_uncorrected_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Uncorrected Deviations in MS1 Max For Matching Identified Ions Between Runs", "avg_0_00_0_00"=>"Avg\t0.00\t0.00",
"relative_corrected_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Corrected Deviations in MS1 Max For Matching Identified Ions Between Runs", "relative_rt_trends_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative RT Trends in MS1 Max For Matching Identified Ions Between Runs",
"relative_rt_trends_corrected_deviations_of_ms1_max_for_matching_identified_ions_between_runs"=>"Relative RT Trends / Corrected Deviations of MS1 Max For Matching Identified Ions Between Runs", "median_relative_intensities_in_ms1_max_for_matching_identified_ions_between_runs"=>"Median Relative Intensities in MS1 Max For Matching Identified Ions Between Runs", "number_of_matching_doubly_charged_identified_ions_between_runs"=>"Number of Matching Doubly Charged Identified Ions Between Runs",
"relative_deviations_in_ms1_max_for_doubly_charged_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Doubly Charged Matching Identified Ions Between Runs", "number_of_matching_triply_charged_identified_ions_between_runs"=>"Number of Matching Triply Charged Identified Ions Between Runs", "relative_deviations_in_ms1_max_for_triply_charged_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Triply Charged Matching Identified Ions Between Runs",
"relative_2_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative 2 * Deviations in MS1 Max For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_different_rt_quartiles_for_matching_identified_ions_between_runs_single_table"=>"Relative Deviations in MS1 Max at Different RT Quartiles For Matching Identified Ions Between Runs - Single Table", "relative_deviations_in_ms1_max_at_1_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 1 RT Quartile For Matching Identified Ions Between Runs",
"relative_deviations_in_ms1_max_at_2_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 2 RT Quartile For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_3_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 3 RT Quartile For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_4_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 4 RT Quartile For Matching Identified Ions Between Runs", "number_of_excess_early_eluting_identified_species"=>"Number of Excess Early Eluting Identified Species",
"number_of_excess_late_eluting_identified_species"=>"Number of Excess Late Eluting Identified Species", "peak_widths_at_half_max_at_rt_deciles"=>"Peak widths at half max at RT deciles", "rt_peptide_deciles"=>"RT peptide deciles", "oversampling_vs_rt"=>"Oversampling vs RT", "retention_time_decile_intervals"=>"Retention Time Decile Intervals", "matching_low_high_rt_peptide_ions"=>"Matching Low:High RT Peptide Ions", "rt_median_differences_for_matching_peptides"=>"RT Median Differences for Matching Peptides", "median_dev"=>"Median Dev", "median_skew"=>"Median Skew", "fraction_of_1_rt_decile_peps_in_common"=>"Fraction of 1 RT Decile Peps In Common",
"fraction_of_2_rt_decile_peps_in_common"=>"Fraction of 2 RT Decile Peps In Common", "fraction_of_3_rt_decile_peps_in_common"=>"Fraction of 3 RT Decile Peps In Common", "fraction_of_4_rt_decile_peps_in_common"=>"Fraction of 4 RT Decile Peps In Common", "fraction_of_5_rt_decile_peps_in_common"=>"Fraction of 5 RT Decile Peps In Common", "fraction_of_6_rt_decile_peps_in_common"=>"Fraction of 6 RT Decile Peps In Common", "fraction_of_7_rt_decile_peps_in_common"=>"Fraction of 7 RT Decile Peps In Common", "fraction_of_8_rt_decile_peps_in_common"=>"Fraction of 8 RT Decile Peps In Common", "fraction_of_9_rt_decile_peps_in_common"=>"Fraction of 9 RT Decile Peps In Common",
"fraction_of_10_rt_decile_peps_in_common"=>"Fraction of 10 RT Decile Peps In Common", "end_interrun_and_decile_results"=>"End Interrun and Decile Results", "ab_deviation_vs_difference_in_run_order"=>"Ab Deviation vs Difference in Run Order - ", "median_rt_rank_vs_difference_in_run_order"=>"Median RT Rank vs Difference in Run Order", "begin_runseries_results"=>"Begin Runseries Results", "begin_series_1"=>"Begin Series=1", "files_analyzed_2"=>"Files Analyzed (2)", "run_number_1_2"=>"Run Number\t1\t2\t", "spectrum_counts"=>"Spectrum Counts", "first_and_last_ms1_rt_min"=>"First and Last MS1 RT (min)", "tryptic_peptide_counts"=>"Tryptic Peptide Counts",
"peptide_counts"=>"Peptide Counts", "middle_peptide_retention_time_period_min"=>"Middle Peptide Retention Time Period (min)", "ms1_during_middle_and_early_peptide_retention_period"=>"MS1 During Middle (and Early) Peptide Retention Period", "ms1_total_ion_current_for_different_rt_periods"=>"MS1 Total Ion Current For Different RT Periods", "total_ion_current_for_ids_at_peak_maxima"=>"Total Ion Current For IDs at Peak Maxima", "precursor_m_z_for_ids"=>"Precursor m/z for IDs", "number_of_ions_vs_charge"=>"Number of Ions vs Charge", "averages_vs_rt_for_ided_peptides"=>"Averages vs RT for IDed Peptides", "precursor_m_z_peptide_ion_m_z_2_charge_only_reject_0_45_m_z"=>"Precursor m/z - Peptide Ion m/z (+2 Charge Only, Reject >0.45 m/z)",
"ion_ids_by_charge_state_relative_to_2"=>"Ion IDs by Charge State (Relative to +2)", "average_peptide_lengths_for_different_charge_states"=>"Average Peptide Lengths for Different Charge States", "average_peptide_lengths_for_charge_2_for_different_numbers_of_mobile_protons"=>"Average Peptide Lengths For Charge 2 for Different Numbers of Mobile Protons", "numbers_of_ion_ids_at_different_charges_with_1_mobile_proton"=>"Numbers of Ion Ids at Different Charges with 1 Mobile Proton",
"percent_of_ids_at_different_charges_and_mobile_protons_relative_to_ids_with_1_mobile_proton"=>"Percent of IDs at Different Charges and Mobile Protons Relative to IDs with 1 Mobile Proton", "precursor_m_z_monoisotope_exact_m_z"=>"Precursor m/z - Monoisotope Exact m/z", "ms2_id_spectra"=>"MS2 ID Spectra", "ms1_id_max"=>"MS1 ID Max", "ms1_id_abund_at_ms2_acquisition"=>"MS1 ID Abund at MS2 Acquisition", "ms2_id_abund_reported"=>"MS2 ID Abund Reported",
"max_peak_width_for_ids_sec"=>"Max Peak Width for IDs (sec)", "peak_width_at_half_height_for_ids"=>"Peak Width at Half Height for IDs", "peak_widths_at_half_max_over_rt_deciles_for_ids"=>"Peak Widths at Half Max over RT deciles for IDs", "nearby_resampling_of_ids_oversampling_details"=>"Nearby Resampling of IDs - Oversampling Details", "wide_rt_differences_for_ids_4_min"=>"Wide RT Differences for IDs (> 4 min)",
"fraction_of_repeat_peptide_ids_with_divergent_rt_rt_vs_rt_best_id_chromatographic_bleed"=>"Fraction of Repeat Peptide IDs with Divergent RT (RT vs RT-best ID) - Chromatographic 'Bleed'", "early_and_late_rt_oversampling_spectrum_ids_unique_peptide_ids_chromatographic_flow_through_bleed"=>"Early and Late RT Oversampling (Spectrum IDs/Unique Peptide IDs) - Chromatographic: Flow Through/Bleed",
"peptide_ion_ids_by_3_spectra_hi_vs_1_3_spectra_lo_extreme_oversampling"=>"Peptide Ion IDs by > 3 Spectra (Hi) vs 1-3 Spectra (Lo) - Extreme Oversampling", "ratios_of_peptide_ions_ided_by_different_numbers_of_spectra_oversampling_measure"=>"Ratios of Peptide Ions IDed by Different Numbers of Spectra - Oversampling Measure",
"single_spectrum_peptide_ion_identifications_oversampling_measure"=>"Single Spectrum Peptide Ion Identifications - Oversampling Measure", "ms1max_ms1sampled_abundance_ratio_ids_inefficient_sampling"=>"MS1max/MS1sampled Abundance Ratio IDs - Inefficient Sampling", "rt_ms1max_rt_ms2_for_ids_sec"=>"RT(MS1max)-RT(MS2) for IDs (sec) ",
"ion_injection_times_for_ids_ms"=>"Ion Injection Times for IDs (ms)", "relative_fraction_of_peptides_in_retention_decile_matching_a_peptide_in_other_runs"=>"Relative Fraction of Peptides in Retention Decile Matching a Peptide in Other Runs", "relative_uniqueness_of_peptides_in_decile_found_anywhere_in_other_runs"=>"Relative Uniqueness of Peptides in Decile Found Anywhere in Other Runs", "differences_in_elution_rank_percent_of_matching_peptides_in_other_runs"=>"Differences in Elution Rank (Percent) of Matching Peptides in Other Runs", "median_ratios_of_ms1_intensities_of_matching_peptides_in_other_runs"=>"Median Ratios of MS1 Intensities of Matching Peptides in Other Runs",
"uncorrected_and_rt_corrected_relative_intensities_of_matching_peptides_in_other_runs"=>"Uncorrected and RT Corrected Relative Intensities of Matching Peptides in Other Runs", "magnitude_of_rt_correction_of_intensities_of_matching_peptides_in_other_runs"=>"Magnitude of RT Correction of Intensities of Matching Peptides in Other Runs", "top_ion_abundance_measures"=>"Top Ion Abundance Measures", "end_series_1"=>"End Series=1", "end_runseries_results"=>"End Runseries Results", "precursor_m_z_averages_at_different_ms1total_tic_quartiles_over_middle_elution_period"=>"Precursor m/z Averages at Different MS1Total (TIC) Quartiles Over Middle Elution Period", "run_q1_precursor_m_z_q2_q1_q3_q1_q4_q1_q1_tic_1000_q2_q1_q3_q1_q4_q1"=>"Run #, Q1 precursor m/z, Q2-Q1, Q3-Q1, Q4-Q1, Q1 TIC/1000, Q2/Q1, Q3/Q1, Q4/Q1", "_1"=>"1", "_2"=>"2", ""=>"", "decile"=>"Decile",
"run_1"=>"Run 1", "run_2"=>"Run 2", "run"=>"Run", "median"=>"Median ", "avg"=>"Avg", "lab_1"=>"Lab 1", "avgdel"=>"AvgDel", "meddel"=>"MedDel", "diff1"=>"Diff1", "ms2_scans"=>"MS2 scans", "ms1_scans_full"=>"MS1 Scans/Full", "ms1_scans_other"=>"MS1 Scans/Other", "first_ms1"=>"First MS1", "last_ms1"=>"Last MS1 ", "peptides"=>"Peptides", "ions"=>"Ions ", "identifications"=>"Identifications", "abundance_pct"=>"Abundance Pct", "abundance_1000"=>"Abundance/1000", "ions_peptide"=>"Ions/Peptide", "ids_peptide"=>"IDs/Peptide", "semi_tryp_peps"=>"Semi/Tryp Peps", "semi_tryp_cnts"=>"Semi/Tryp Cnts", "semi_tryp_abund"=>"Semi/Tryp Abund", "miss_tryp_peps"=>"Miss/Tryp Peps", "miss_tryp_cnts"=>"Miss/Tryp Cnts", "miss_tryp_abund"=>"Miss/Tryp Abund", "net_oversample"=>"Net Oversample", "half_period"=>"Half Period",
"start_time"=>"Start Time", "mid_time"=>"Mid Time", "qratio_time"=>"Qratio Time", "ms1_scans"=>"MS1 Scans", "pep_id_rate"=>"Pep ID Rate", "id_rate"=>"ID Rate ", "id_efficiency"=>"ID Efficiency", "s_n_median"=>"S/N Median", "tic_median_1000"=>"TIC Median/1000",
"npeaks_median"=>"NPeaks Median", "scan_to_scan"=>"Scan-to-Scan", "s2s_3q_med"=>"S2S-3Q/Med", "s2s_1qrt_med"=>"S2S-1Qrt/Med", "s2s_2qrt_med"=>"S2S-2Qrt/Med", "s2s_3qrt_med"=>"S2S-3Qrt/Med", "s2s_4qrt_med"=>"S2S-4Qrt/Med", "esi_off_middle"=>"ESI Off Middle",
"esi_off_early"=>"ESI Off Early", "max_ms1_jump"=>"Max MS1 Jump", "max_ms1_fall"=>"Max MS1 Fall", "ms1_jumps_10x"=>"MS1 Jumps >10x", "ms1_falls_10x"=>"MS1 Falls >10x", "_1st_quart_id"=>"1st Quart ID", "middle_id"=>"Middle ID", "last_id_quart"=>"Last ID Quart", "to_end_of_run"=>"To End of Run", "med_tic_id_1000"=>"Med TIC ID/1000",
"interq_tic"=>"InterQ TIC", "mid_interq_tic"=>"Mid InterQ TIC", "half_width"=>"Half Width", "quart_ratio"=>"Quart Ratio", "precursor_min"=>"Precursor Min", "precursor_max"=>"Precursor Max", "med_q1_tic"=>"Med @ Q1 TIC", "med_q4_tic"=>"Med @ Q4 TIC", "med_q1_rt"=>"Med @ Q1 RT", "med_q4_rt"=>"Med @ Q4 RT", "med_charge_1"=>"Med Charge +1",
"med_charge_2"=>"Med Charge +2", "med_charge_3"=>"Med Charge +3", "med_charge_4"=>"Med Charge +4", "charge_1"=>"Charge +1", "charge_2"=>"Charge +2", "charge_3"=>"Charge +3", "charge_4"=>"Charge +4", "charge_5"=>"Charge +5", "length_q1"=>"Length Q1", "length_q4"=>"Length Q4", "charge_q1"=>"Charge Q1", "charge_q4"=>"Charge Q4", "spectra"=>"Spectra ",
"mean_absolute"=>"Mean Absolute", "ppm_median"=>"ppm Median", "ppm_interq"=>"ppm InterQ", "_2_ion_count"=>"+2 Ion Count", "naa_ch_2_mp_1"=>"NAA,Ch=2,MP=1", "naa_ch_2_mp_0"=>"NAA,Ch=2,MP=0", "naa_ch_2_mp_2"=>"NAA,Ch=2,MP=2", "ch_1_mp_1"=>"Ch=1 MP=1",
"ch_2_mp_1"=>"Ch=2 MP=1", "ch_3_mp_1"=>"Ch=3 MP=1", "ch_4_mp_1"=>"Ch=4 MP=1", "ch_1_mp_0"=>"Ch=1 MP=0", "ch_2_mp_0"=>"Ch=2 MP=0", "ch_3_mp_0"=>"Ch=3 MP=0", "more_than_100"=>"More Than 100", "betw_100_0_50_0"=>"Betw 100.0-50.0", "betw_50_0_25_0"=>"Betw 50.0-25.0", "betw_25_0_12_5"=>"Betw 25.0-12.5", "betw_12_5_6_3"=>"Betw 12.5-6.3",
"betw_6_3_3_1"=>"Betw 6.3-3.1", "betw_3_1_1_6"=>"Betw 3.1-1.6", "betw_1_6_0_8"=>"Betw 1.6-0.8", "top_half"=>"Top Half", "next_half_2"=>"Next Half (2)", "next_half_3"=>"Next Half (3)", "next_half_4"=>"Next Half (4)", "next_half_5"=>"Next Half (5)", "next_half_6"=>"Next Half (6)", "next_half_7"=>"Next Half (7)", "next_half_8"=>"Next Half (8)",
"npeaks_interq"=>"NPeaks InterQ", "s_n_interq"=>"S/N InterQ", "id_score_median"=>"ID Score Median", "id_score_interq"=>"ID Score InterQ", "idsc_med_q1msmx"=>"IDSc Med Q1Msmx", "median_midrt"=>"Median MidRT", "_75_25_midrt"=>"75/25 MidRT", "_95_5_midrt"=>"95/5 MidRT", "_75_25_pctile"=>"75/25 Pctile", "_95_5_pctile"=>"95/5 Pctile",
"median_value"=>"Median Value", "third_quart"=>"Third Quart", "last_decile"=>"Last Decile", "med_top_quart"=>"Med Top Quart",
"med_top_16th"=>"Med Top 16th", "med_top_100"=>"Med Top 100", "median_disper"=>"Median Disper", "med_quart_disp"=>"Med Quart Disp", "med_16th_disp"=>"Med 16th Disp", "med_100_disp"=>"Med 100 Disp", "_3quart_value"=>"3Quart Value", "_9dec_value"=>"9Dec Value", "ms1_interscan_s"=>"MS1 Interscan/s", "ms1_scan_fwhm"=>"MS1 Scan/FWHM",
"ids_used"=>"IDs Used ", "first_decile"=>"First Decile", "repeated_ids"=>"Repeated IDs", "med_rt_diff_s"=>"Med RT Diff/s", "_1q_rt_diff_s"=>"1Q RT Diff/s",
"_1dec_rt_diff_s"=>"1Dec RT Diff/s", "median_dm_z"=>"Median dm/z", "quart_dm_z"=>"Quart dm/z", "_4_min"=>"+ 4 min ", "pep_ions_hi"=>"Pep Ions (Hi)",
"ratio_hi_lo"=>"Ratio Hi/Lo", "spec_cnts_hi"=>"Spec Cnts (Hi)", "spec_pep_hi"=>"Spec/Pep (Hi)", "spec_cnt_excess"=>"Spec Cnt Excess", "once_twice"=>"Once/Twice",
"twice_thrice"=>"Twice/Thrice", "peptide_ions"=>"Peptide Ions", "fract_1_ions"=>"Fract >1 Ions", "_1_vs_1_pepion"=>"1 vs >1 PepIon", "_1_vs_1_spec"=>"1 vs >1 Spec",
"median_all_ids"=>"Median All IDs", "_3q_all_ids"=>"3Q All IDs", "_9dec_all_ids"=>"9Dec All IDs", "med_top_dec"=>"Med Top Dec", "med_bottom_1_2"=>"Med Bottom 1/2",
"med_diff_abs"=>"Med Diff Abs", "median_diff"=>"Median Diff", "first_quart"=>"First Quart", "ms1_median"=>"MS1 Median", "ms1_maximum"=>"MS1 Maximum",
"ms2_median"=>"MS2 Median", "ms2_maximun"=>"MS2 Maximun", "ms2_fract_max"=>"MS2 Fract Max", "all_deciles"=>"All Deciles", "comp_to_first"=>"Comp to First",
"comp_to_last"=>"Comp to Last", "average_diff"=>"Average Diff", "median_2_diff"=>"Median*2 Diff", "comp_to_first_2"=>"Comp to First*2", "comp_to_last_2"=>"Comp to Last*2", "uncor_rel_first"=>"Uncor rel First", "uncor_rel_last"=>"Uncor rel Last",
"corr_rel_first"=>"Corr rel First", "corr_rel_last"=>"Corr rel Last", "top_10_abund"=>"Top 10% Abund", "top_25_abund"=>"Top 25% Abund", "top_50_abund"=>"Top 50% Abund", "fractab_top"=>"Fractab Top", "fractab_top_10"=>"Fractab Top 10", "fractab_top_100"=>"Fractab Top 100"}
end #class << self
end #ComparisonGrapher
end #module Ms
changing title format for beanplots
# These are the values which are defaults for the Alerter settings in {ComparisonGrapher#graph_and_stats} function
Graphing_defaults = {email_alert: true}
module Ms
# This is the class which handles the responses from DB queries and generates comparison graphs
class ComparisonGrapher
class << self
# Takdes a DataMapper query and turns the matches into the same data structure as is produced by parsing the data file.
# @param [Array] An array containing the matches to the DataMapper database query
# @return [Hash] A hash of a hash of a hash, containing the data desired, but it really should be an array of out_hashes, right?
def match_to_hash(matches)
# matches is the result of a Msrun.all OR Msrun.first OR Msrun.get(*args)
@data = {}
matches.each do |msrun|
next if msrun.metric.nil?
index = msrun.raw_id.to_s
@data[index] = {'timestamp' => msrun.rawtime || Time.random(1)}
@@categories.each do |cat|
@data[index][cat] = msrun.metric.send(cat.to_sym).hashes
@data[index][cat].keys.each do |subcat|
@data[index][cat][subcat].delete('id'.to_sym)
@data[index][cat][subcat].delete("#{cat}_id".to_sym)
end
end
end
@data # as a hash of a hash of a hash
end
# This fxn produces an array containing {Measurement} structs which contain the data found in all the matches produced by a DataMapper DB query
# @param [Array] an Array of matches
# @return [Array] an Array containing all the measurements found in the DB matches given
def slice_matches(matches)
measures = []; @data = {}
# Why is this line of code here?
# debugger
matches = [matches] if !matches.is_a? DataMapper::Collection and !matches.is_a? Array
matches.each do |msrun|
next if msrun.nil? or msrun.metric.nil?
index = msrun.raw_id.to_s
@data[index] = {'timestamp' => msrun.rawtime || Time.random(1)}
@@categories.each do |cat|
if cat == "uplc"
arr = [{"hplc_max_p" => msrun.hplc_max_p || 0}, {'hplc_avg_p' => msrun.hplc_avg_p || 0}, {'hplc_std_p' => msrun.hplc_std_p || 0}]
arr.each do |prop|
measures << Measurement.new(prop.keys.first, index, @data[index]['timestamp'], prop[prop.keys.first], cat.to_sym, :pressure_trace)
end
else
@data[index][cat] = msrun.metric.send(cat.to_sym).hashes
@data[index][cat].keys.each do |subcat|
@data[index][cat][subcat].delete('id'.to_sym)
@data[index][cat][subcat].delete("#{cat}_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_msrun_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_msrun_raw_id".to_sym)
@data[index][cat][subcat].delete("#{cat}_metric_metric_input_file".to_sym)
@data[index][cat][subcat].each { |property, value|
measures << Measurement.new( property, index, @data[index]['timestamp'], value, cat.to_sym, subcat.to_sym) }
end
end
end
end
measures.sort_by {|measure| measure.category}
end
# This function takes the same parameters as {#graph_matches} and accomplishes the same result, as well as generating and returning, instead of the filenames, a hash containing the information needed to do cool stuff
# @param [Array, Array] Arrays of measurements sliced from the results of two DataMapper DB queries, the first of which represents the newest in a QC run, which will be compared to the previous values
# @return [Hash] ### WHAT WILL IT CONTAIN? THE VARIANCE AND THE MEAN? OR A RANGE OF ALLOWED VALUES, or a true false value??? ##### ... I'm not yet sure, thank you very much
def graph_and_stats(old_measures, new_measure, comparison_folder, opts = {})
options = Graphing_defaults.merge(opts)
default_variance = QcConfig[:default_allowed_variance]
require 'rserve/simpler'
FileUtils.mkdir_p(comparison_folder)
graphfiles = []
measures = [new_measure, old_measures]
data_hash = {}
r_object = Rserve::Simpler.new
r_object.converse('library("beanplot")')
r_object.converse "setwd('#{Dir.pwd}')"
@@categories.map do |cat|
data_hash[cat.to_sym] = {}
subcats = measures.first.map{|meas| meas.subcat if meas.category == cat.to_sym}.compact.uniq
subcats.each do |subcategory|
data_hash[cat.to_sym][subcategory] = {}
graphfile_prefix = File.join(comparison_folder, cat, subcategory.to_s)
FileUtils.mkdir_p(graphfile_prefix)
new_structs = measures.first.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
old_structs = measures.last.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
[new_structs, old_structs].each do |structs|
structs.each do |str|
str.value = str.value.to_f
str.name = str.name.to_s
str.category = @@name_legend[str.category.to_s]
str.subcat = @@name_legend[str.subcat.to_s]
str.time = str.time.to_s.gsub(/T/, ' ').gsub(/-(\d*):00/,' \100')
end
end
datafr_new = Rserve::DataFrame.from_structs(new_structs)
datafr_old = Rserve::DataFrame.from_structs(old_structs)
r_object.converse( df_new: datafr_new ) do
%Q{df_new$time <- strptime(as.character(df_new$time), "%Y-%m-%d %X")
df_new$name <- factor(df_new$name)
df_new$category <-factor(df_new$category)
df_new$subcat <- factor(df_new$subcat)
df_new$raw_id <- factor(df_new$raw_id)
}
end # new datafr converse
r_object.converse( df_old: datafr_old) do
%Q{df_old$time <- strptime(as.character(df_old$time), "%Y-%m-%d %X")
df_old$name <- factor(df_old$name)
df_old$category <-factor(df_old$category)
df_old$subcat <- factor(df_old$subcat)
df_old$raw_id <- factor(df_old$raw_id)
}
end # old datafr converse
count = new_structs.map {|str| str.name }.uniq.compact.length
i = 1;
names = r_object.converse("levels(df_old$name)")
while i <= count
r_object.converse do
%Q{ df_new.#{i} <- subset(df_new, name == levels(df_new$name)[[#{i}]])
df_old.#{i} <- subset(df_old, name == levels(df_old$name)[[#{i}]])
old_time_plot <- data.frame(df_old.#{i}$time, df_old.#{i}$value)
new_time_plot <- data.frame(df_new.#{i}$time, df_new.#{i}$value)
old_time_plot <- old_time_plot[order(df_old.#{i}$time), ]
new_time_plot <- new_time_plot[order(df_new.#{i}$time), ]
}
end
# Configure the environment for the graphing, by setting up the numbered categories
curr_name = r_object.converse("levels(df_old$name)[[#{i}]]")
## THIS IS WHERE WE DO THE CALCULATIONS
if not QcConfig[cat.to_sym][subcategory.to_s.split('_').map{|word| word.capitalize}.join("").to_sym].nil?
t = QcConfig[cat.to_sym][subcategory.to_s.split('_').map{|word| word.capitalize}.join("").to_sym][curr_name]
variance = t.is_a?(Numeric) ? t : default_variance
mean = r_object.converse("mean(df_old.#{i}$value)")
sd = r_object.converse("try(sd(df_old.#{i}$value), silent=TRUE)")
data_hash[cat.to_sym][subcategory][curr_name] = [mean, sd]
new_point = r_object.converse("df_new.#{i}$value")
range = mean-variance*sd..mean+variance*sd
Alerter.create("#{cat.to_sym}--#{subcategory}--#{curr_name} has exceeded range: #{range} Mean #{mean} Variance #{variance} Standard deviation #{sd} Value #{new_point}", { :email => options[:email_alert] }) if not ( range === new_point or range.member?(new_point) )
end
## END
graphfile = File.join([graphfile_prefix, curr_name + '.svg'])
graphfiles << graphfile
name = @@name_legend[curr_name]
r_object.converse(%Q{svg(file="#{graphfile}", bg="transparent", height=3, width=7.5)})
r_object.converse('par(mar=c(1,1,1,1), oma=c(2,1,1,1))')
r_object.converse do
%Q{ tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1))
tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1)) }
end
r_object.converse %Q{ band1 <- try(bw.SJ(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_new.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- 0.99 }
r_object.converse "ylim = range(density(c(df_old.#{i}$value, df_new.#{i}$value), bw=band1)[[1]])"
t_test = r_object.converse ("try(t.test(df_old.#{i}$value, df_new.#{i}$value), silent=TRUE)")
case t_test
when String
t_test_out = "ERR: Data are constant"
when Float
t_test_out = "%.2g" % t_test
end
r_object.converse %Q{ xlim = range(old_time_plot$df_old.#{i}.time, new_time_plot$df_new.#{i}.time) }
r_object.converse %Q{ beanplot(df_old.#{i}$value, df_new.#{i}$value, side='both', log="", names="p-value:#{t_test_out}", col=list('deepskyblue4',c('firebrick', 'black')), innerborder='black', bw=band1)}
r_object.converse do
%Q{ plot(old_time_plot, type='l', lwd=2.5, xlim = xlim, ylim = ylim, col='deepskyblue4', pch=15)
if (length(df_new.#{i}$value) > 4) {
lines(new_time_plot,type='l',ylab=df_new.#{i}$name[[1]], col='firebrick', pch=16, lwd=3 )
} else {
points(new_time_plot,ylab=df_new.#{i}$name[[1]], col='skyblue4', bg='firebrick', pch=21, cex=1.2)
}
title <- "#{@@name_legend[cat]}--#{@@name_legend[subcategory.to_s]}--#{name}"
if (nchar(title) > 80) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.7)
} else if (nchar(title) > 100 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.6)
} else if (nchar(title) > 120 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.5)
} else {
mtext(title, side=3, line=0, outer=TRUE)
}
}
end
r_object.converse "dev.off()" # This line must end the loop, to prevent R from crashing.
i +=1
end # while loop
end # subcats
end # categories
# graphfiles
# TODO Do I send the email here?
data_hash
end # graph_and_stats
# This function generates a comparison between the two sets of data, which are sliced by {#slice_matches}, graphing the results as SVG files.
# @param [Array, Array] Arrays of measurements sliced from the results of two DataMapper DB queries
# @return [Array] An array which contains all of the files produced by the process. This will likely be an array of approximately 400 filenames.
def graph_matches(old_measures, new_measures, comparison_folder, opts = {})
options = Graphing_defaults.merge(opts)
require 'rserve/simpler'
FileUtils.mkdir_p(comparison_folder)
graphfiles = []
measures = [new_measures, old_measures]
#$DEBUG = true
r_object = Rserve::Simpler.new
r_object.converse('library("beanplot")')
r_object.converse "setwd('#{Dir.pwd}')"
#r_object.converse('library("Cairo")')
@@categories.map do |cat|
subcats = measures.first.map{|meas| meas.subcat if meas.category == cat.to_sym}.compact.uniq
#p Dir.exist?(File.join(AppConfig[:comparison_directory], comparison_folder.to_s, cat))
#p subcats
subcats.each do |subcategory|
graphfile_prefix = File.join(comparison_folder, cat, subcategory.to_s)
FileUtils.mkdir_p(graphfile_prefix)
#p Dir.exist?(graphfile_prefix)
new_structs = measures.first.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
old_structs = measures.last.map{|meas| meas if meas.subcat == subcategory.to_sym}.compact
[new_structs, old_structs].each do |structs|
structs.each do |str|
str.value = str.value.to_f
str.name = str.name.to_s
str.category = @@name_legend[str.category.to_s]
str.subcat = @@name_legend[str.subcat.to_s]
str.time = str.time.to_s.gsub(/T/, ' ').gsub(/-(\d*):00/,' \100')
end
end
datafr_new = Rserve::DataFrame.from_structs(new_structs)
datafr_old = Rserve::DataFrame.from_structs(old_structs)
r_object.converse( df_new: datafr_new ) do
%Q{df_new$time <- strptime(as.character(df_new$time), "%Y-%m-%d %X")
df_new$name <- factor(df_new$name)
df_new$category <-factor(df_new$category)
df_new$subcat <- factor(df_new$subcat)
df_new$raw_id <- factor(df_new$raw_id)
}
end # new datafr converse
r_object.converse( df_old: datafr_old) do
%Q{df_old$time <- strptime(as.character(df_old$time), "%Y-%m-%d %X")
df_old$name <- factor(df_old$name)
df_old$category <-factor(df_old$category)
df_old$subcat <- factor(df_old$subcat)
df_old$raw_id <- factor(df_old$raw_id)
}
end # old datafr converse
count = new_structs.map {|str| str.name }.uniq.compact.length
i = 1;
names = r_object.converse("levels(df_old$name)")
while i <= count
r_object.converse do
%Q{ df_new.#{i} <- subset(df_new, name == levels(df_new$name)[[#{i}]])
df_old.#{i} <- subset(df_old, name == levels(df_old$name)[[#{i}]])
old_time_plot <- data.frame(df_old.#{i}$time, df_old.#{i}$value)
new_time_plot <- data.frame(df_new.#{i}$time, df_new.#{i}$value)
old_time_plot <- old_time_plot[order(df_old.#{i}$time), ]
new_time_plot <- new_time_plot[order(df_new.#{i}$time), ]
}
end
# p r_object.converse "summary(df_old.#{i})" if $DEBUG
# p r_object.converse "summary(df_new.#{i})" if $DEBUG
# Configure the environment for the graphing, by setting up the numbered categories
curr_name = r_object.converse("levels(df_old$name)[[#{i}]]")
graphfile = File.join([graphfile_prefix, curr_name + ".svg"])
graphfiles << graphfile
name = @@name_legend[curr_name]
r_object.converse(%Q{svg(file="#{graphfile}", bg="transparent", height=3, width=7.5)})
r_object.converse('par(mar=c(1,1,1,1), oma=c(2,1,1,1))')
r_object.converse do
%Q{ tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1))
tmp <- layout(matrix(c(1,2),1,2,byrow=T), widths=c(3,4), heights=c(1,1)) }
end
r_object.converse %Q{ band1 <- try(bw.SJ(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_old.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- try(bw.nrd0(df_new.#{i}$value), silent=TRUE)
if(inherits(band1, 'try-error')) band1 <- 0.99
}
r_object.converse "ylim = range(density(c(df_old.#{i}$value, df_new.#{i}$value), bw=band1)[[1]])"
t_test = r_object.converse ("try(t.test(df_old.#{i}$value, df_new.#{i}$value)$p.value, silent=TRUE)")
# p r_object.converse( "df_old.#{i}$value" ) if $DEBUG
# p r_object.converse( "df_new.#{i}$value" ) if $DEBUG
case t_test
when String
t_test_out = "ERR: Data are constant"
when Float
t_test_out = "%.2g" % t_test
end
r_object.converse %Q{ xlim = range(old_time_plot$df_old.#{i}.time, new_time_plot$df_new.#{i}.time) }
r_object.converse %Q{beanplot(df_old.#{i}$value, df_new.#{i}$value, side='both', log="", names="p-value: #{t_test_out}", col=list('deepskyblue4',c('firebrick', 'black')), innerborder='black', bw=band1)}
r_object.converse do
# TODO!!!
%Q{ plot(old_time_plot, type='l', lwd=2.5, xlim = xlim, ylim = ylim, col='deepskyblue4', pch=15)
if (length(df_new.#{i}$value) > 4) {
lines(new_time_plot,type='l',ylab=df_new.#{i}$name[[1]], col='firebrick', pch=16, lwd=3 )
} else {
points(new_time_plot,ylab=df_new.#{i}$name[[1]], col='skyblue4', bg='firebrick', pch=21, cex=1.2)
}
title <- "#{@@name_legend[cat]}-\t-#{@@name_legend[subcategory.to_s]}-\t-#{name}"
if (nchar(title) > 80) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.75)
} else if (nchar(title) > 100 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.65)
} else if (nchar(title) > 120 ) {
mtext(title, side=3, line=0, outer=TRUE, cex=0.55)
} else {
mtext(title, side=3, line=0, outer=TRUE)
}
}
end
r_object.converse "dev.off()" #### This line must conclude each loop, as far as R is concerned.
i +=1
end # while loop
end # subcats
end # categories
graphfiles
end # graph_files
@@categories = ["uplc", "chromatography", "ms1", "dynamic_sampling", "ion_source", "ion_treatment", "peptide_ids", "ms2", "run_comparison"]
@@name_legend = { "uplc"=>"UPLC","chromatography"=>"Chromatography", "ms1"=>"MS1", "ms2"=>"MS2", "dynamic_sampling"=>"Dynamic Sampling", "ion_source"=>"Ion Source", "ion_treatment"=>"Ion Treatment", "peptide_ids"=> "Peptide IDs", "run_comparison"=>"Run Comparison", "id_charge_distributions_at_different_ms1max_quartiles_for_charges_1_4"=>"ID Charge Distributions At Different MS1max Quartiles For Charges 1-4", "precursor_m_z_averages_and_differences_from_1st_quartile_largest_of_different_ms1total_tic_quartiles_over_full_elution_period"=>"Precursor m/z Averages and Differences from 1st Quartile (Largest) of Different MS1Total (TIC) Quartiles Over Full Elution Period",
"number_of_compounds_in_common"=>"Number of Compounds in Common", "fraction_of_overlapping_compounds_relative_to_first_index"=>"Fraction of Overlapping Compounds - relative to first index", "fraction_of_overlapping_compounds_relative_to_second_index"=>"Fraction of Overlapping Compounds - relative to second index", "median_retention_rank_differences_for_compounds_in_common_percent"=>"Median Retention Rank Differences for Compounds in Common (Percent)", "avg_1_60_1_60"=>"Avg\t1.60\t1.60",
"average_retention_rank_differences_for_compounds_in_common_percent"=>"Average Retention Rank Differences for Compounds in Common (Percent)", "avg_2_30_2_30"=>"Avg\t2.30\t2.30", "number_of_matching_identified_ions_between_runs"=>"Number of Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Matching Identified Ions Between Runs", "avg_1_00_1_00"=>"Avg\t1.00\t1.00", "relative_uncorrected_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Uncorrected Deviations in MS1 Max For Matching Identified Ions Between Runs", "avg_0_00_0_00"=>"Avg\t0.00\t0.00",
"relative_corrected_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative Corrected Deviations in MS1 Max For Matching Identified Ions Between Runs", "relative_rt_trends_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative RT Trends in MS1 Max For Matching Identified Ions Between Runs",
"relative_rt_trends_corrected_deviations_of_ms1_max_for_matching_identified_ions_between_runs"=>"Relative RT Trends / Corrected Deviations of MS1 Max For Matching Identified Ions Between Runs", "median_relative_intensities_in_ms1_max_for_matching_identified_ions_between_runs"=>"Median Relative Intensities in MS1 Max For Matching Identified Ions Between Runs", "number_of_matching_doubly_charged_identified_ions_between_runs"=>"Number of Matching Doubly Charged Identified Ions Between Runs",
"relative_deviations_in_ms1_max_for_doubly_charged_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Doubly Charged Matching Identified Ions Between Runs", "number_of_matching_triply_charged_identified_ions_between_runs"=>"Number of Matching Triply Charged Identified Ions Between Runs", "relative_deviations_in_ms1_max_for_triply_charged_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max For Triply Charged Matching Identified Ions Between Runs",
"relative_2_deviations_in_ms1_max_for_matching_identified_ions_between_runs"=>"Relative 2 * Deviations in MS1 Max For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_different_rt_quartiles_for_matching_identified_ions_between_runs_single_table"=>"Relative Deviations in MS1 Max at Different RT Quartiles For Matching Identified Ions Between Runs - Single Table", "relative_deviations_in_ms1_max_at_1_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 1 RT Quartile For Matching Identified Ions Between Runs",
"relative_deviations_in_ms1_max_at_2_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 2 RT Quartile For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_3_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 3 RT Quartile For Matching Identified Ions Between Runs", "relative_deviations_in_ms1_max_at_4_rt_quartile_for_matching_identified_ions_between_runs"=>"Relative Deviations in MS1 Max at 4 RT Quartile For Matching Identified Ions Between Runs", "number_of_excess_early_eluting_identified_species"=>"Number of Excess Early Eluting Identified Species",
"number_of_excess_late_eluting_identified_species"=>"Number of Excess Late Eluting Identified Species", "peak_widths_at_half_max_at_rt_deciles"=>"Peak widths at half max at RT deciles", "rt_peptide_deciles"=>"RT peptide deciles", "oversampling_vs_rt"=>"Oversampling vs RT", "retention_time_decile_intervals"=>"Retention Time Decile Intervals", "matching_low_high_rt_peptide_ions"=>"Matching Low:High RT Peptide Ions", "rt_median_differences_for_matching_peptides"=>"RT Median Differences for Matching Peptides", "median_dev"=>"Median Dev", "median_skew"=>"Median Skew", "fraction_of_1_rt_decile_peps_in_common"=>"Fraction of 1 RT Decile Peps In Common",
"fraction_of_2_rt_decile_peps_in_common"=>"Fraction of 2 RT Decile Peps In Common", "fraction_of_3_rt_decile_peps_in_common"=>"Fraction of 3 RT Decile Peps In Common", "fraction_of_4_rt_decile_peps_in_common"=>"Fraction of 4 RT Decile Peps In Common", "fraction_of_5_rt_decile_peps_in_common"=>"Fraction of 5 RT Decile Peps In Common", "fraction_of_6_rt_decile_peps_in_common"=>"Fraction of 6 RT Decile Peps In Common", "fraction_of_7_rt_decile_peps_in_common"=>"Fraction of 7 RT Decile Peps In Common", "fraction_of_8_rt_decile_peps_in_common"=>"Fraction of 8 RT Decile Peps In Common", "fraction_of_9_rt_decile_peps_in_common"=>"Fraction of 9 RT Decile Peps In Common",
"fraction_of_10_rt_decile_peps_in_common"=>"Fraction of 10 RT Decile Peps In Common", "end_interrun_and_decile_results"=>"End Interrun and Decile Results", "ab_deviation_vs_difference_in_run_order"=>"Ab Deviation vs Difference in Run Order - ", "median_rt_rank_vs_difference_in_run_order"=>"Median RT Rank vs Difference in Run Order", "begin_runseries_results"=>"Begin Runseries Results", "begin_series_1"=>"Begin Series=1", "files_analyzed_2"=>"Files Analyzed (2)", "run_number_1_2"=>"Run Number\t1\t2\t", "spectrum_counts"=>"Spectrum Counts", "first_and_last_ms1_rt_min"=>"First and Last MS1 RT (min)", "tryptic_peptide_counts"=>"Tryptic Peptide Counts",
"peptide_counts"=>"Peptide Counts", "middle_peptide_retention_time_period_min"=>"Middle Peptide Retention Time Period (min)", "ms1_during_middle_and_early_peptide_retention_period"=>"MS1 During Middle (and Early) Peptide Retention Period", "ms1_total_ion_current_for_different_rt_periods"=>"MS1 Total Ion Current For Different RT Periods", "total_ion_current_for_ids_at_peak_maxima"=>"Total Ion Current For IDs at Peak Maxima", "precursor_m_z_for_ids"=>"Precursor m/z for IDs", "number_of_ions_vs_charge"=>"Number of Ions vs Charge", "averages_vs_rt_for_ided_peptides"=>"Averages vs RT for IDed Peptides", "precursor_m_z_peptide_ion_m_z_2_charge_only_reject_0_45_m_z"=>"Precursor m/z - Peptide Ion m/z (+2 Charge Only, Reject >0.45 m/z)",
"ion_ids_by_charge_state_relative_to_2"=>"Ion IDs by Charge State (Relative to +2)", "average_peptide_lengths_for_different_charge_states"=>"Average Peptide Lengths for Different Charge States", "average_peptide_lengths_for_charge_2_for_different_numbers_of_mobile_protons"=>"Average Peptide Lengths For Charge 2 for Different Numbers of Mobile Protons", "numbers_of_ion_ids_at_different_charges_with_1_mobile_proton"=>"Numbers of Ion Ids at Different Charges with 1 Mobile Proton",
"percent_of_ids_at_different_charges_and_mobile_protons_relative_to_ids_with_1_mobile_proton"=>"Percent of IDs at Different Charges and Mobile Protons Relative to IDs with 1 Mobile Proton", "precursor_m_z_monoisotope_exact_m_z"=>"Precursor m/z - Monoisotope Exact m/z", "ms2_id_spectra"=>"MS2 ID Spectra", "ms1_id_max"=>"MS1 ID Max", "ms1_id_abund_at_ms2_acquisition"=>"MS1 ID Abund at MS2 Acquisition", "ms2_id_abund_reported"=>"MS2 ID Abund Reported",
"max_peak_width_for_ids_sec"=>"Max Peak Width for IDs (sec)", "peak_width_at_half_height_for_ids"=>"Peak Width at Half Height for IDs", "peak_widths_at_half_max_over_rt_deciles_for_ids"=>"Peak Widths at Half Max over RT deciles for IDs", "nearby_resampling_of_ids_oversampling_details"=>"Nearby Resampling of IDs - Oversampling Details", "wide_rt_differences_for_ids_4_min"=>"Wide RT Differences for IDs (> 4 min)",
"fraction_of_repeat_peptide_ids_with_divergent_rt_rt_vs_rt_best_id_chromatographic_bleed"=>"Fraction of Repeat Peptide IDs with Divergent RT (RT vs RT-best ID) - Chromatographic 'Bleed'", "early_and_late_rt_oversampling_spectrum_ids_unique_peptide_ids_chromatographic_flow_through_bleed"=>"Early and Late RT Oversampling (Spectrum IDs/Unique Peptide IDs) - Chromatographic: Flow Through/Bleed",
"peptide_ion_ids_by_3_spectra_hi_vs_1_3_spectra_lo_extreme_oversampling"=>"Peptide Ion IDs by > 3 Spectra (Hi) vs 1-3 Spectra (Lo) - Extreme Oversampling", "ratios_of_peptide_ions_ided_by_different_numbers_of_spectra_oversampling_measure"=>"Ratios of Peptide Ions IDed by Different Numbers of Spectra - Oversampling Measure",
"single_spectrum_peptide_ion_identifications_oversampling_measure"=>"Single Spectrum Peptide Ion Identifications - Oversampling Measure", "ms1max_ms1sampled_abundance_ratio_ids_inefficient_sampling"=>"MS1max/MS1sampled Abundance Ratio IDs - Inefficient Sampling", "rt_ms1max_rt_ms2_for_ids_sec"=>"RT(MS1max)-RT(MS2) for IDs (sec) ",
"ion_injection_times_for_ids_ms"=>"Ion Injection Times for IDs (ms)", "relative_fraction_of_peptides_in_retention_decile_matching_a_peptide_in_other_runs"=>"Relative Fraction of Peptides in Retention Decile Matching a Peptide in Other Runs", "relative_uniqueness_of_peptides_in_decile_found_anywhere_in_other_runs"=>"Relative Uniqueness of Peptides in Decile Found Anywhere in Other Runs", "differences_in_elution_rank_percent_of_matching_peptides_in_other_runs"=>"Differences in Elution Rank (Percent) of Matching Peptides in Other Runs", "median_ratios_of_ms1_intensities_of_matching_peptides_in_other_runs"=>"Median Ratios of MS1 Intensities of Matching Peptides in Other Runs",
"uncorrected_and_rt_corrected_relative_intensities_of_matching_peptides_in_other_runs"=>"Uncorrected and RT Corrected Relative Intensities of Matching Peptides in Other Runs", "magnitude_of_rt_correction_of_intensities_of_matching_peptides_in_other_runs"=>"Magnitude of RT Correction of Intensities of Matching Peptides in Other Runs", "top_ion_abundance_measures"=>"Top Ion Abundance Measures", "end_series_1"=>"End Series=1", "end_runseries_results"=>"End Runseries Results", "precursor_m_z_averages_at_different_ms1total_tic_quartiles_over_middle_elution_period"=>"Precursor m/z Averages at Different MS1Total (TIC) Quartiles Over Middle Elution Period", "run_q1_precursor_m_z_q2_q1_q3_q1_q4_q1_q1_tic_1000_q2_q1_q3_q1_q4_q1"=>"Run #, Q1 precursor m/z, Q2-Q1, Q3-Q1, Q4-Q1, Q1 TIC/1000, Q2/Q1, Q3/Q1, Q4/Q1", "_1"=>"1", "_2"=>"2", ""=>"", "decile"=>"Decile",
"run_1"=>"Run 1", "run_2"=>"Run 2", "run"=>"Run", "median"=>"Median ", "avg"=>"Avg", "lab_1"=>"Lab 1", "avgdel"=>"AvgDel", "meddel"=>"MedDel", "diff1"=>"Diff1", "ms2_scans"=>"MS2 scans", "ms1_scans_full"=>"MS1 Scans/Full", "ms1_scans_other"=>"MS1 Scans/Other", "first_ms1"=>"First MS1", "last_ms1"=>"Last MS1 ", "peptides"=>"Peptides", "ions"=>"Ions ", "identifications"=>"Identifications", "abundance_pct"=>"Abundance Pct", "abundance_1000"=>"Abundance/1000", "ions_peptide"=>"Ions/Peptide", "ids_peptide"=>"IDs/Peptide", "semi_tryp_peps"=>"Semi/Tryp Peps", "semi_tryp_cnts"=>"Semi/Tryp Cnts", "semi_tryp_abund"=>"Semi/Tryp Abund", "miss_tryp_peps"=>"Miss/Tryp Peps", "miss_tryp_cnts"=>"Miss/Tryp Cnts", "miss_tryp_abund"=>"Miss/Tryp Abund", "net_oversample"=>"Net Oversample", "half_period"=>"Half Period",
"start_time"=>"Start Time", "mid_time"=>"Mid Time", "qratio_time"=>"Qratio Time", "ms1_scans"=>"MS1 Scans", "pep_id_rate"=>"Pep ID Rate", "id_rate"=>"ID Rate ", "id_efficiency"=>"ID Efficiency", "s_n_median"=>"S/N Median", "tic_median_1000"=>"TIC Median/1000",
"npeaks_median"=>"NPeaks Median", "scan_to_scan"=>"Scan-to-Scan", "s2s_3q_med"=>"S2S-3Q/Med", "s2s_1qrt_med"=>"S2S-1Qrt/Med", "s2s_2qrt_med"=>"S2S-2Qrt/Med", "s2s_3qrt_med"=>"S2S-3Qrt/Med", "s2s_4qrt_med"=>"S2S-4Qrt/Med", "esi_off_middle"=>"ESI Off Middle",
"esi_off_early"=>"ESI Off Early", "max_ms1_jump"=>"Max MS1 Jump", "max_ms1_fall"=>"Max MS1 Fall", "ms1_jumps_10x"=>"MS1 Jumps >10x", "ms1_falls_10x"=>"MS1 Falls >10x", "_1st_quart_id"=>"1st Quart ID", "middle_id"=>"Middle ID", "last_id_quart"=>"Last ID Quart", "to_end_of_run"=>"To End of Run", "med_tic_id_1000"=>"Med TIC ID/1000",
"interq_tic"=>"InterQ TIC", "mid_interq_tic"=>"Mid InterQ TIC", "half_width"=>"Half Width", "quart_ratio"=>"Quart Ratio", "precursor_min"=>"Precursor Min", "precursor_max"=>"Precursor Max", "med_q1_tic"=>"Med @ Q1 TIC", "med_q4_tic"=>"Med @ Q4 TIC", "med_q1_rt"=>"Med @ Q1 RT", "med_q4_rt"=>"Med @ Q4 RT", "med_charge_1"=>"Med Charge +1",
"med_charge_2"=>"Med Charge +2", "med_charge_3"=>"Med Charge +3", "med_charge_4"=>"Med Charge +4", "charge_1"=>"Charge +1", "charge_2"=>"Charge +2", "charge_3"=>"Charge +3", "charge_4"=>"Charge +4", "charge_5"=>"Charge +5", "length_q1"=>"Length Q1", "length_q4"=>"Length Q4", "charge_q1"=>"Charge Q1", "charge_q4"=>"Charge Q4", "spectra"=>"Spectra ",
"mean_absolute"=>"Mean Absolute", "ppm_median"=>"ppm Median", "ppm_interq"=>"ppm InterQ", "_2_ion_count"=>"+2 Ion Count", "naa_ch_2_mp_1"=>"NAA,Ch=2,MP=1", "naa_ch_2_mp_0"=>"NAA,Ch=2,MP=0", "naa_ch_2_mp_2"=>"NAA,Ch=2,MP=2", "ch_1_mp_1"=>"Ch=1 MP=1",
"ch_2_mp_1"=>"Ch=2 MP=1", "ch_3_mp_1"=>"Ch=3 MP=1", "ch_4_mp_1"=>"Ch=4 MP=1", "ch_1_mp_0"=>"Ch=1 MP=0", "ch_2_mp_0"=>"Ch=2 MP=0", "ch_3_mp_0"=>"Ch=3 MP=0", "more_than_100"=>"More Than 100", "betw_100_0_50_0"=>"Betw 100.0-50.0", "betw_50_0_25_0"=>"Betw 50.0-25.0", "betw_25_0_12_5"=>"Betw 25.0-12.5", "betw_12_5_6_3"=>"Betw 12.5-6.3",
"betw_6_3_3_1"=>"Betw 6.3-3.1", "betw_3_1_1_6"=>"Betw 3.1-1.6", "betw_1_6_0_8"=>"Betw 1.6-0.8", "top_half"=>"Top Half", "next_half_2"=>"Next Half (2)", "next_half_3"=>"Next Half (3)", "next_half_4"=>"Next Half (4)", "next_half_5"=>"Next Half (5)", "next_half_6"=>"Next Half (6)", "next_half_7"=>"Next Half (7)", "next_half_8"=>"Next Half (8)",
"npeaks_interq"=>"NPeaks InterQ", "s_n_interq"=>"S/N InterQ", "id_score_median"=>"ID Score Median", "id_score_interq"=>"ID Score InterQ", "idsc_med_q1msmx"=>"IDSc Med Q1Msmx", "median_midrt"=>"Median MidRT", "_75_25_midrt"=>"75/25 MidRT", "_95_5_midrt"=>"95/5 MidRT", "_75_25_pctile"=>"75/25 Pctile", "_95_5_pctile"=>"95/5 Pctile",
"median_value"=>"Median Value", "third_quart"=>"Third Quart", "last_decile"=>"Last Decile", "med_top_quart"=>"Med Top Quart",
"med_top_16th"=>"Med Top 16th", "med_top_100"=>"Med Top 100", "median_disper"=>"Median Disper", "med_quart_disp"=>"Med Quart Disp", "med_16th_disp"=>"Med 16th Disp", "med_100_disp"=>"Med 100 Disp", "_3quart_value"=>"3Quart Value", "_9dec_value"=>"9Dec Value", "ms1_interscan_s"=>"MS1 Interscan/s", "ms1_scan_fwhm"=>"MS1 Scan/FWHM",
"ids_used"=>"IDs Used ", "first_decile"=>"First Decile", "repeated_ids"=>"Repeated IDs", "med_rt_diff_s"=>"Med RT Diff/s", "_1q_rt_diff_s"=>"1Q RT Diff/s",
"_1dec_rt_diff_s"=>"1Dec RT Diff/s", "median_dm_z"=>"Median dm/z", "quart_dm_z"=>"Quart dm/z", "_4_min"=>"+ 4 min ", "pep_ions_hi"=>"Pep Ions (Hi)",
"ratio_hi_lo"=>"Ratio Hi/Lo", "spec_cnts_hi"=>"Spec Cnts (Hi)", "spec_pep_hi"=>"Spec/Pep (Hi)", "spec_cnt_excess"=>"Spec Cnt Excess", "once_twice"=>"Once/Twice",
"twice_thrice"=>"Twice/Thrice", "peptide_ions"=>"Peptide Ions", "fract_1_ions"=>"Fract >1 Ions", "_1_vs_1_pepion"=>"1 vs >1 PepIon", "_1_vs_1_spec"=>"1 vs >1 Spec",
"median_all_ids"=>"Median All IDs", "_3q_all_ids"=>"3Q All IDs", "_9dec_all_ids"=>"9Dec All IDs", "med_top_dec"=>"Med Top Dec", "med_bottom_1_2"=>"Med Bottom 1/2",
"med_diff_abs"=>"Med Diff Abs", "median_diff"=>"Median Diff", "first_quart"=>"First Quart", "ms1_median"=>"MS1 Median", "ms1_maximum"=>"MS1 Maximum",
"ms2_median"=>"MS2 Median", "ms2_maximun"=>"MS2 Maximun", "ms2_fract_max"=>"MS2 Fract Max", "all_deciles"=>"All Deciles", "comp_to_first"=>"Comp to First",
"comp_to_last"=>"Comp to Last", "average_diff"=>"Average Diff", "median_2_diff"=>"Median*2 Diff", "comp_to_first_2"=>"Comp to First*2", "comp_to_last_2"=>"Comp to Last*2", "uncor_rel_first"=>"Uncor rel First", "uncor_rel_last"=>"Uncor rel Last",
"corr_rel_first"=>"Corr rel First", "corr_rel_last"=>"Corr rel Last", "top_10_abund"=>"Top 10% Abund", "top_25_abund"=>"Top 25% Abund", "top_50_abund"=>"Top 50% Abund", "fractab_top"=>"Fractab Top", "fractab_top_10"=>"Fractab Top 10", "fractab_top_100"=>"Fractab Top 100"}
end #class << self
end #ComparisonGrapher
end #module Ms
|
module Rack
class Console
class Version
MAJOR = 1
MINOR = 3
PATCH = 0
def self.to_s
[MAJOR, MINOR, PATCH].join('.')
end
end
VERSION = Version.to_s
end
end
Bump version to 1.3.1
Signed-off-by: David Celis <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@davidcel.is>
module Rack
class Console
class Version
MAJOR = 1
MINOR = 3
PATCH = 1
def self.to_s
[MAJOR, MINOR, PATCH].join('.')
end
end
VERSION = Version.to_s
end
end
|
require 'rack'
require 'openssl'
require 'time'
require 'uri'
module OpenID
VERSION="0.0"
NS="http://specs.openid.net/auth/2.0".freeze
IDENTIFIER_SELECT="http://specs.openid.net/auth/2.0/identifier_select".freeze
SERVER="http://specs.openid.net/auth/2.0/server".freeze
SIGNON="http://specs.openid.net/auth/2.0/signon".freeze
class << self
# Implements OpenID btwoc function
def btwoc(n)
n = n.to_i
raise if n < 0
r = (n % 0x100).chr
r = (n % 0x100).chr + r while (n /= 0x100) > 0
r = 0.chr + r if r[0].ord >= 0x80
r
end
# Inverse form of btwoc
def ctwob(s)
n, sl = 0, s.length - 1
0.upto(sl) {|i|
n += s[i].ord * 0x100 ** (sl - i)
}
n
end
# Encode OpenID parameters as a HTTP GET query string
def url_encode(h); h.map { |k,v| "openid.#{Rack::Utils.escape(k)}=#{Rack::Utils.escape(v)}" }.join('&') end
# Encode OpenID parameters as Key-Value format
def kv_encode(h); h.map {|k,v| k.to_s + ":" + v.to_s + 10.chr }.join end
# Decode OpenID parameters from Key-Value format
def kv_decode(s); Hash[*s.split(10.chr).map {|l| l.split(":", 2) }.flatten] end
# Encode in base64
def base64_encode(s); [s].pack("m0") end
# Decode from base64
def base64_decode(s); s.unpack("m0").first end
# Generate _bytes_ random bytes
def random_bytes(bytes); OpenSSL::Random.random_bytes(bytes) end
# Generate a random string _length_ long
def random_string(length); random_bytes(length / 2).unpack("H*")[0] end
# Generate an OpenID signature
def gen_sig(mac, params)
signed = params["signed"].split(",").map {|k| [k, params[k]]}
if mac.length == 20
OpenID.base64_encode(Signatures["HMAC-SHA1"].sign( mac, kv_encode(signed)))
else
OpenID.base64_encode(Signatures["HMAC-SHA256"].sign(mac, kv_encode(signed)))
end
end
def gen_handle; Time.now.utc.iso8601 + OpenID.random_string(6) end
def gen_nonce; Time.now.utc.iso8601 + OpenID.random_string(6) end
end
module Signatures # :nodoc: all
Associations = {}
def self.[](k); Associations[k] end
def self.[]=(k, v); Associations[k] = v end
class Assoc
def initialize(digest); @digest = digest end
def sign(mac, value); OpenSSL::HMAC.digest(@digest.new, mac, value) end
def size; @digest.new.size end
def gen_mac; OpenID.random_bytes(size) end
end
Associations["HMAC-SHA1"] = Assoc.new(OpenSSL::Digest::SHA1)
Associations["HMAC-SHA256"] = Assoc.new(OpenSSL::Digest::SHA256)
end
module DH # :nodoc: all
DEFAULT_MODULUS=0xDCF93A0B883972EC0E19989AC5A2CE310E1D37717E8D9571BB7623731866E61EF75A2E27898B057F9891C2E27A639C3F29B60814581CD3B2CA3986D2683705577D45C2E7E52DC81C7A171876E5CEA74B1448BFDFAF18828EFD2519F14E45E3826634AF1949E5B535CC829A483B8A76223E5D490A257F05BDFF16F2FB22C583AB
DEFAULT_GEN=2
Sessions = {}
def self.[](k); Sessions[k] end
def self.[]=(k, v); Sessions[k] = v end
class SHA_ANY
class MissingKey < StandardError; end
def compatible_key_size?(size); @digest.new.size == size end
def initialize(digest); @digest = digest end
def to_hash(mac, p, g, consumer_public_key)
raise MissingKey if consumer_public_key.nil?
c = OpenSSL::BN.new(consumer_public_key.to_s)
dh = gen_key(p || DEFAULT_MODULUS, g || DEFAULT_GEN)
shared = OpenSSL::BN.new(dh.compute_key(c), 2)
shared_hashed = @digest.digest(OpenID.btwoc(shared))
{
"dh_server_public" => OpenID.base64_encode(OpenID.btwoc(dh.pub_key)),
"enc_mac_key" => OpenID.base64_encode(sxor(shared_hashed, mac))
}
end
private
def gen_key(p, g)
dh = OpenSSL::PKey::DH.new
dh.p = p
dh.g = g
dh.generate_key!
end
def sxor(s1, s2)
s1.bytes.zip(s2.bytes).map { |x,y| x^y }.pack('C*')
end
end
class NoEncryption
def self.compatible_key_size?(size); true end
def self.to_hash(mac, p, g, c); {"mac_key" => OpenID.base64_encode(mac)} end
end
Sessions["DH-SHA1"] = SHA_ANY.new(OpenSSL::Digest::SHA1)
Sessions["DH-SHA256"] = SHA_ANY.new(OpenSSL::Digest::SHA256)
Sessions["no-encryption"] = NoEncryption
end
end
module Rack # :nodoc:
class OpenIDRequest
FIELDS = %w(
assoc_handle assoc_type claimed_id contact delegate dh_consumer_public dh_gen
dh_modulus error identity invalidate_handle mode ns op_endpoint
realm reference response_nonce return_to server session_type sig
signed trust_root).freeze
MODES = %w(associate checkid_setup checkid_immediate check_authentication).freeze
attr_reader :env
def initialize(env) @env = env end
def params; @env['openid.provider.request.params'] ||= extract_open_id_params end
def [](k); params[k] end
def []=(k, v); params[k] = v end
# Some accessor helpers
FIELDS.each { |field|
class_eval %{def #{field}; params["#{field}"] end}
class_eval %{def #{field}=(v); params["#{field}"] = v end}
}
MODES.each { |field|
class_eval %{def #{field}?; valid? and mode == "#{field}" end}
}
def valid?; mode and Request.new(@env).path_info == "/" end
def identifier_select?; OpenID::IDENTIFIER_SELECT == identity end
def dh_modulus; params['dh_modulus'] && OpenID.ctwob(OpenID.base64_decode(params['dh_modulus'])) end
def dh_gen; params['dh_gen'] && OpenID.ctwob(OpenID.base64_decode(params['dh_gen'])) end
def dh_consumer_public; params['dh_consumer_public'] && OpenID.ctwob(OpenID.base64_decode(params['dh_consumer_public'])) end
def session_type; OpenID::DH[params['session_type']] end
def assoc_type; OpenID::Signatures[params['assoc_type']] end
def realm_wildcard?; params['realm'] =~ %r(^https?://\.\*) end
def realm_url; URI(realm.sub(".*", "")) rescue nil end
def realm_match?(url)
return true if realm.nil? or url.nil?
realm = realm_url
url = URI(url)
!realm.fragment and
realm.scheme == url.scheme and
realm_wildcard? ? %r(\.?#{Regexp.escape(realm.host)}$) =~ url.host : realm.host == url.host and
realm.port == url.port and
%r(^#{Regexp.escape(realm.path)}) =~ url.path
rescue URI::InvalidURIError
false
end
def nonces; @env['openid.provider.nonces'] end
def handles; @env['openid.provider.handles'] end
def private_handles; @env['openid.provider.private_handles'] end
def options; @env['openid.provider.options'] end
private
def extract_open_id_params
openid_params = {}
Request.new(@env).params.each { |k,v| openid_params[$'] = v if k =~ /^openid\./ }
openid_params
end
end
class OpenIDResponse
class NoReturnTo < StandardError
attr_reader :res
def initialize(res)
@res = res
res.error!("no return_to", "orig_mode" => @res["mode"]) if not res.error?
end
end
MODES = %w(error cancel setup_needed id_res is_valid)
MAX_REDIRECT_SIZE = 1024
def self.gen_html_fields(h)
h.map {|k,v|
"<input type='hidden' name='openid.#{k}' value='#{v}' />"
}.join("\n")
end
OpenIDRequest::FIELDS.each { |field|
class_eval %{def #{field}; params["#{field}"] end}
class_eval %{def #{field}=(v); params["#{field}"] = v end}
}
MODES.each { |field|
class_eval %{def #{field}?; mode == "#{field}" end}
}
def initialize(h = {})
@h = h.merge("ns" => OpenID::NS)
@direct = true
@return_to = nil
end
def [](k) @h[k] end
def []=(k,v) @h[k] = v end
def params; @h end
def direct?; @direct end
def direct!; @direct = true end
def indirect?; !direct? end
def indirect!(return_to)
raise NoReturnTo.new(self) if return_to.nil?
@return_to = return_to
@direct = false
end
def html_form?; indirect? and OpenID.url_encode(@h).size > MAX_REDIRECT_SIZE end
def redirect?; !html_form? end
def negative?; cancel? or setup_needed? end
def positive?; id_res? end
def error!(error, h = {})
@h.merge!(h)
@h.merge! "mode" => "error", "error" => error
finish!
end
def negative!(h = {})
@h.merge!(h)
@h["mode"] = "cancel"
finish!
end
def positive!(h = {})
@h.merge!(h)
@h["mode"] = "id_res"
finish!
end
def http_status
if direct?
error? ? 400 : 200
else
html_form? ? 200 : 302
end
end
def http_headers
headers = {"Content-Type" => "text/plain"}
headers.merge!("Content-Length" => http_body.size.to_s)
if direct?
headers
else
if html_form?
headers.merge!("Content-Type" => "text/html")
else
d = URI(@return_to)
d.query = d.query ? d.query + "&" + OpenID.url_encode(@h) : OpenID.url_encode(@h)
headers.merge!("Location" => d.to_s)
end
end
end
def http_body
if direct?
OpenID.kv_encode(@h)
else
if html_form?
%(
<html><body onload='this.openid_form.submit();'>
<form name='openid_form' method='post' action='#{@return_to}'>"
#{OpenIDResponse.gen_html_fields(@h)}
<input type='submit' /></form></body></html>
)
else
""
end
end
end
def each; yield http_body end
def finish!; [http_status, http_headers, self] end
alias :to_a :finish!
end
# This is a Rack middleware:
# Rack::Builder.new {
# use Rack::OpenIDProvider, custom_options
# run MyProvider.new
# }
class OpenIDProvider
FIELD_SIGNED = %w(op_endpoint return_to response_nonce assoc_handle claimed_id identity)
class XRDS
CONTENT_TYPE = "application/xrds+xml".freeze
CONTENT =
%{<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="0">
<Type>#{OpenID::SERVER}</Type>
<URI>%s</URI>
</Service>
</XRD>
</xrds:XRDS>}.freeze
def initialize(app) @app = app end
def call(env)
if serve?(env)
content = CONTENT % Request.new(env.merge("PATH_INFO" => "/", "QUERY_STRING" => "")).url
[200, {"Content-Type" => CONTENT_TYPE, "Content-Length" => content.size.to_s}, [content] ]
else
@app.call(env)
end
end
def serve?(env)
req, oreq = Request.new(env), OpenIDRequest.new(env)
!oreq.valid? and oreq.options['xrds'] and
(req.path_info == "/" or req.path == "/") and
env['HTTP_ACCEPT'].include?(CONTENT_TYPE)
end
end
class HandleRequests
class NotSupported < StandardError; end
class IncompatibleTypes < StandardError; end
class NoSecureChannel < StandardError; end
def initialize(app) @app = app end
def call(env)
req = OpenIDRequest.new(env)
# Before filters
if (req.checkid_setup? or req.checkid_immediate?) and res = check_req(req)
c,h,b = res.finish!
else
c,h,b = @app.call(env)
end
# After filters
if req.valid? and c == 404 and h["X-Cascade"] == "pass"
case req.mode
when "associate"
c,h,b = associate(req)
when "checkid_setup", "checkid_immediate"
res = OpenIDResponse.new
res.negative!
c,h,b = finish_checkid! req, res
when "check_authentication"
c,h,b = check_authentication(req)
else
c,h,b = OpenIDResponse.new.error!("Unknown mode")
end
elsif OpenIDResponse === b and (b.negative? or b.positive?)
c,h,b = finish_checkid!(req, b)
end
# Finish filter
if OpenIDResponse === b
finish_error!(req, b) if b.error?
b.indirect!(req.return_to) if indirect?(req, b)
c,h,b = b.finish!
end
[c,h,b]
rescue OpenIDResponse::NoReturnTo => e
finish_error!(req, e.res)
end
private
def check_req(req)
res = OpenIDResponse.new
if !req.return_to and !req.realm
res.error!("The request has no return_to and no realm")
elsif req.realm and !req.realm_url
res.error!("Invalid realm")
elsif !req.realm_match?(req.return_to)
res.error!("return_to url does not match the realm")
else
false
end
end
def associate(req)
res = OpenIDResponse.new
raise NotSupported if req.session_type.nil? or req.assoc_type.nil?
raise IncompatibleTypes if !req.session_type.compatible_key_size?(req.assoc_type.size)
raise NoSecureChannel if req['session_type'] == "no-encryption" and req.env["rack.url_scheme"] != "https"
mac = req.assoc_type.gen_mac
handle = OpenID.gen_handle
res["assoc_handle"] = handle
res["session_type"] = req['session_type']
res["assoc_type"] = req['assoc_type']
res["expires_in"] = req.options['handle_timeout']
res.params.merge! req.session_type.to_hash(mac, req.dh_modulus, req.dh_gen, req.dh_consumer_public)
req.handles[handle] = mac
res.finish!
rescue IncompatibleTypes
res.error!("session and association types are incompatible")
rescue NotSupported
res.error!("session type or association type not supported", "error_code" => "unsupported-type")
rescue NoSecureChannel
res.error!("\"no-encryption\" session type requested without https connection")
rescue OpenID::DH::SHA_ANY::MissingKey
res.error!("dh_consumer_public missing")
end
def finish_checkid!(req, res)
if res.negative?
res["mode"] = "setup_needed" if req.checkid_immediate?
elsif res.positive?
assoc_handle = req.assoc_handle
mac = req.handles[assoc_handle]
if mac.nil? # Generate a mac and invalidate the association handle
invalidate_handle = assoc_handle
mac = OpenID::Signatures["HMAC-SHA256"].gen_mac
req.private_handles[assoc_handle = OpenID.gen_handle] = mac
end
req.nonces[nonce = OpenID.gen_nonce] = assoc_handle
res["op_endpoint"] = req.options["op_endpoint"] || Request.new(req.env.merge("PATH_INFO" => "/", "QUERY_STRING" => "")).url
res["return_to"] = req.return_to
res["response_nonce"] = nonce
res["assoc_handle"] = assoc_handle
res["invalidate_handle"] = invalidate_handle if invalidate_handle
res["signed"] = FIELD_SIGNED.select {|field| res[field] }.join(",")
res["sig"] = OpenID.gen_sig(mac, res.params)
end
res.finish!
end
def check_authentication(req)
assoc_handle = req.assoc_handle
invalidate_handle = req.invalidate_handle
nonce = req.response_nonce
# Check if assoc_handle, nonce and signature are valid. Then delete the response nonce
if mac = req.private_handles[assoc_handle] and req.nonces.delete(nonce) == assoc_handle and OpenID.gen_sig(mac, req.params) == req['sig']
res = OpenIDResponse.new("is_valid" => "true")
res["invalidate_handle"] = invalidate_handle if invalidate_handle && req.handles[invalidate_handle].nil?
res.finish!
else
OpenIDResponse.new("is_valid" => "false").finish!
end
end
def finish_error!(req, res)
res["contact"] = req.options["contact"] if req.options["contact"]
res["reference"] = req.options["reference"] if req.options["reference"]
res.finish!
end
def indirect?(req, res)
res.negative? or res.positive? or
req.checkid_setup? or req.checkid_immediate? or
((!req.valid? or req.env['HTTP_REFERER']) and req.return_to)
end
end
DEFAULT_OPTIONS = {
'handle_timeout' => 36000, 'private_handle_timeout' => 300, 'nonce_timeout' => 300,
'handles' => {}, 'private_handles' => {}, 'nonces' => {},
'xrds' => true
}
DEFAULT_MIDDLEWARES = [XRDS, HandleRequests]
attr_reader :options, :handles, :private_handles, :nonces
def initialize(app, options = {})
@options = DEFAULT_OPTIONS.merge(options)
@middleware = DEFAULT_MIDDLEWARES.reverse.inject(app) {|a, m| m.new(a)}
@handles = @options.delete('handles')
@private_handles = @options.delete('private_handles')
@nonces = @options.delete('nonces')
end
def call(env)
sev_env(env)
clean_handles
@middleware.call(env)
end
private
def clean_handles; end
def sev_env(env)
env['openid.provider.options'] ||= @options
env['openid.provider.nonces'] ||= @nonces
env['openid.provider.handles'] ||= @handles
env['openid.provider.private_handles'] ||= @private_handles
end
end
end
require 'rack/openid-provider-sreg'
clean_handles implemetation
require 'rack'
require 'openssl'
require 'time'
require 'uri'
module OpenID
VERSION="0.0"
NS="http://specs.openid.net/auth/2.0".freeze
IDENTIFIER_SELECT="http://specs.openid.net/auth/2.0/identifier_select".freeze
SERVER="http://specs.openid.net/auth/2.0/server".freeze
SIGNON="http://specs.openid.net/auth/2.0/signon".freeze
class << self
# Implements OpenID btwoc function
def btwoc(n)
n = n.to_i
raise if n < 0
r = (n % 0x100).chr
r = (n % 0x100).chr + r while (n /= 0x100) > 0
r = 0.chr + r if r[0].ord >= 0x80
r
end
# Inverse form of btwoc
def ctwob(s)
n, sl = 0, s.length - 1
0.upto(sl) {|i|
n += s[i].ord * 0x100 ** (sl - i)
}
n
end
# Encode OpenID parameters as a HTTP GET query string
def url_encode(h); h.map { |k,v| "openid.#{Rack::Utils.escape(k)}=#{Rack::Utils.escape(v)}" }.join('&') end
# Encode OpenID parameters as Key-Value format
def kv_encode(h); h.map {|k,v| k.to_s + ":" + v.to_s + 10.chr }.join end
# Decode OpenID parameters from Key-Value format
def kv_decode(s); Hash[*s.split(10.chr).map {|l| l.split(":", 2) }.flatten] end
# Encode in base64
def base64_encode(s); [s].pack("m0") end
# Decode from base64
def base64_decode(s); s.unpack("m0").first end
# Generate _bytes_ random bytes
def random_bytes(bytes); OpenSSL::Random.random_bytes(bytes) end
# Generate a random string _length_ long
def random_string(length); random_bytes(length / 2).unpack("H*")[0] end
# Generate an OpenID signature
def gen_sig(mac, params)
signed = params["signed"].split(",").map {|k| [k, params[k]]}
if mac.length == 20
OpenID.base64_encode(Signatures["HMAC-SHA1"].sign( mac, kv_encode(signed)))
else
OpenID.base64_encode(Signatures["HMAC-SHA256"].sign(mac, kv_encode(signed)))
end
end
end
module Signatures # :nodoc: all
Associations = {}
def self.[](k); Associations[k] end
def self.[]=(k, v); Associations[k] = v end
class Assoc
def initialize(digest); @digest = digest end
def sign(mac, value); OpenSSL::HMAC.digest(@digest.new, mac, value) end
def size; @digest.new.size end
def gen_mac; OpenID.random_bytes(size) end
end
Associations["HMAC-SHA1"] = Assoc.new(OpenSSL::Digest::SHA1)
Associations["HMAC-SHA256"] = Assoc.new(OpenSSL::Digest::SHA256)
end
module DH # :nodoc: all
DEFAULT_MODULUS=0xDCF93A0B883972EC0E19989AC5A2CE310E1D37717E8D9571BB7623731866E61EF75A2E27898B057F9891C2E27A639C3F29B60814581CD3B2CA3986D2683705577D45C2E7E52DC81C7A171876E5CEA74B1448BFDFAF18828EFD2519F14E45E3826634AF1949E5B535CC829A483B8A76223E5D490A257F05BDFF16F2FB22C583AB
DEFAULT_GEN=2
Sessions = {}
def self.[](k); Sessions[k] end
def self.[]=(k, v); Sessions[k] = v end
class SHA_ANY
class MissingKey < StandardError; end
def compatible_key_size?(size); @digest.new.size == size end
def initialize(digest); @digest = digest end
def to_hash(mac, p, g, consumer_public_key)
raise MissingKey if consumer_public_key.nil?
c = OpenSSL::BN.new(consumer_public_key.to_s)
dh = gen_key(p || DEFAULT_MODULUS, g || DEFAULT_GEN)
shared = OpenSSL::BN.new(dh.compute_key(c), 2)
shared_hashed = @digest.digest(OpenID.btwoc(shared))
{
"dh_server_public" => OpenID.base64_encode(OpenID.btwoc(dh.pub_key)),
"enc_mac_key" => OpenID.base64_encode(sxor(shared_hashed, mac))
}
end
private
def gen_key(p, g)
dh = OpenSSL::PKey::DH.new
dh.p = p
dh.g = g
dh.generate_key!
end
def sxor(s1, s2)
s1.bytes.zip(s2.bytes).map { |x,y| x^y }.pack('C*')
end
end
class NoEncryption
def self.compatible_key_size?(size); true end
def self.to_hash(mac, p, g, c); {"mac_key" => OpenID.base64_encode(mac)} end
end
Sessions["DH-SHA1"] = SHA_ANY.new(OpenSSL::Digest::SHA1)
Sessions["DH-SHA256"] = SHA_ANY.new(OpenSSL::Digest::SHA256)
Sessions["no-encryption"] = NoEncryption
end
end
module Rack # :nodoc:
class OpenIDRequest
FIELDS = %w(
assoc_handle assoc_type claimed_id contact delegate dh_consumer_public dh_gen
dh_modulus error identity invalidate_handle mode ns op_endpoint
realm reference response_nonce return_to server session_type sig
signed trust_root).freeze
MODES = %w(associate checkid_setup checkid_immediate check_authentication).freeze
attr_reader :env
def initialize(env) @env = env end
def params; @env['openid.provider.request.params'] ||= extract_open_id_params end
def [](k); params[k] end
def []=(k, v); params[k] = v end
# Some accessor helpers
FIELDS.each { |field|
class_eval %{def #{field}; params["#{field}"] end}
class_eval %{def #{field}=(v); params["#{field}"] = v end}
}
MODES.each { |field|
class_eval %{def #{field}?; valid? and mode == "#{field}" end}
}
def valid?; mode and Request.new(@env).path_info == "/" end
def identifier_select?; OpenID::IDENTIFIER_SELECT == identity end
def dh_modulus; params['dh_modulus'] && OpenID.ctwob(OpenID.base64_decode(params['dh_modulus'])) end
def dh_gen; params['dh_gen'] && OpenID.ctwob(OpenID.base64_decode(params['dh_gen'])) end
def dh_consumer_public; params['dh_consumer_public'] && OpenID.ctwob(OpenID.base64_decode(params['dh_consumer_public'])) end
def session_type; OpenID::DH[params['session_type']] end
def assoc_type; OpenID::Signatures[params['assoc_type']] end
def realm_wildcard?; params['realm'] =~ %r(^https?://\.\*) end
def realm_url; URI(realm.sub(".*", "")) rescue nil end
def realm_match?(url)
return true if realm.nil? or url.nil?
realm = realm_url
url = URI(url)
!realm.fragment and
realm.scheme == url.scheme and
realm_wildcard? ? %r(\.?#{Regexp.escape(realm.host)}$) =~ url.host : realm.host == url.host and
realm.port == url.port and
%r(^#{Regexp.escape(realm.path)}) =~ url.path
rescue URI::InvalidURIError
false
end
def nonces; @env['openid.provider.nonces'] end
def handles; @env['openid.provider.handles'] end
def private_handles; @env['openid.provider.private_handles'] end
def options; @env['openid.provider.options'] end
private
def extract_open_id_params
openid_params = {}
Request.new(@env).params.each { |k,v| openid_params[$'] = v if k =~ /^openid\./ }
openid_params
end
end
class OpenIDResponse
class NoReturnTo < StandardError
attr_reader :res
def initialize(res)
@res = res
res.error!("no return_to", "orig_mode" => @res["mode"]) if not res.error?
end
end
MODES = %w(error cancel setup_needed id_res is_valid)
MAX_REDIRECT_SIZE = 1024
def self.gen_html_fields(h)
h.map {|k,v|
"<input type='hidden' name='openid.#{k}' value='#{v}' />"
}.join("\n")
end
OpenIDRequest::FIELDS.each { |field|
class_eval %{def #{field}; params["#{field}"] end}
class_eval %{def #{field}=(v); params["#{field}"] = v end}
}
MODES.each { |field|
class_eval %{def #{field}?; mode == "#{field}" end}
}
def initialize(h = {})
@h = h.merge("ns" => OpenID::NS)
@direct = true
@return_to = nil
end
def [](k) @h[k] end
def []=(k,v) @h[k] = v end
def params; @h end
def direct?; @direct end
def direct!; @direct = true end
def indirect?; !direct? end
def indirect!(return_to)
raise NoReturnTo.new(self) if return_to.nil?
@return_to = return_to
@direct = false
end
def html_form?; indirect? and OpenID.url_encode(@h).size > MAX_REDIRECT_SIZE end
def redirect?; !html_form? end
def negative?; cancel? or setup_needed? end
def positive?; id_res? end
def error!(error, h = {})
@h.merge!(h)
@h.merge! "mode" => "error", "error" => error
finish!
end
def negative!(h = {})
@h.merge!(h)
@h["mode"] = "cancel"
finish!
end
def positive!(h = {})
@h.merge!(h)
@h["mode"] = "id_res"
finish!
end
def http_status
if direct?
error? ? 400 : 200
else
html_form? ? 200 : 302
end
end
def http_headers
headers = {"Content-Type" => "text/plain"}
headers.merge!("Content-Length" => http_body.size.to_s)
if direct?
headers
else
if html_form?
headers.merge!("Content-Type" => "text/html")
else
d = URI(@return_to)
d.query = d.query ? d.query + "&" + OpenID.url_encode(@h) : OpenID.url_encode(@h)
headers.merge!("Location" => d.to_s)
end
end
end
def http_body
if direct?
OpenID.kv_encode(@h)
else
if html_form?
%(
<html><body onload='this.openid_form.submit();'>
<form name='openid_form' method='post' action='#{@return_to}'>"
#{OpenIDResponse.gen_html_fields(@h)}
<input type='submit' /></form></body></html>
)
else
""
end
end
end
def each; yield http_body end
def finish!; [http_status, http_headers, self] end
alias :to_a :finish!
end
# This is a Rack middleware:
# Rack::Builder.new {
# use Rack::OpenIDProvider, custom_options
# run MyProvider.new
# }
class OpenIDProvider
FIELD_SIGNED = %w(op_endpoint return_to response_nonce assoc_handle claimed_id identity)
class XRDS
CONTENT_TYPE = "application/xrds+xml".freeze
CONTENT =
%{<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="0">
<Type>#{OpenID::SERVER}</Type>
<URI>%s</URI>
</Service>
</XRD>
</xrds:XRDS>}.freeze
def initialize(app) @app = app end
def call(env)
if serve?(env)
content = CONTENT % Request.new(env.merge("PATH_INFO" => "/", "QUERY_STRING" => "")).url
[200, {"Content-Type" => CONTENT_TYPE, "Content-Length" => content.size.to_s}, [content] ]
else
@app.call(env)
end
end
def serve?(env)
req, oreq = Request.new(env), OpenIDRequest.new(env)
!oreq.valid? and oreq.options['xrds'] and
(req.path_info == "/" or req.path == "/") and
env['HTTP_ACCEPT'].include?(CONTENT_TYPE)
end
end
class HandleRequests
class NotSupported < StandardError; end
class IncompatibleTypes < StandardError; end
class NoSecureChannel < StandardError; end
def initialize(app) @app = app end
def call(env)
req = OpenIDRequest.new(env)
# Before filters
if (req.checkid_setup? or req.checkid_immediate?) and res = check_req(req)
c,h,b = res.finish!
else
c,h,b = @app.call(env)
end
# After filters
if req.valid? and c == 404 and h["X-Cascade"] == "pass"
case req.mode
when "associate"
c,h,b = associate(req)
when "checkid_setup", "checkid_immediate"
res = OpenIDResponse.new
res.negative!
c,h,b = finish_checkid! req, res
when "check_authentication"
c,h,b = check_authentication(req)
else
c,h,b = OpenIDResponse.new.error!("Unknown mode")
end
elsif OpenIDResponse === b and (b.negative? or b.positive?)
c,h,b = finish_checkid!(req, b)
end
# Finish filter
if OpenIDResponse === b
finish_error!(req, b) if b.error?
b.indirect!(req.return_to) if indirect?(req, b)
c,h,b = b.finish!
end
[c,h,b]
rescue OpenIDResponse::NoReturnTo => e
finish_error!(req, e.res)
end
private
def check_req(req)
res = OpenIDResponse.new
if !req.return_to and !req.realm
res.error!("The request has no return_to and no realm")
elsif req.realm and !req.realm_url
res.error!("Invalid realm")
elsif !req.realm_match?(req.return_to)
res.error!("return_to url does not match the realm")
else
false
end
end
def associate(req)
res = OpenIDResponse.new
raise NotSupported if req.session_type.nil? or req.assoc_type.nil?
raise IncompatibleTypes if !req.session_type.compatible_key_size?(req.assoc_type.size)
raise NoSecureChannel if req['session_type'] == "no-encryption" and req.env["rack.url_scheme"] != "https"
mac = req.assoc_type.gen_mac
handle = OpenIDProvider.gen_handle
res["assoc_handle"] = handle
res["session_type"] = req['session_type']
res["assoc_type"] = req['assoc_type']
res["expires_in"] = req.options['handle_timeout']
res.params.merge! req.session_type.to_hash(mac, req.dh_modulus, req.dh_gen, req.dh_consumer_public)
req.handles[handle] = mac
res.finish!
rescue IncompatibleTypes
res.error!("session and association types are incompatible")
rescue NotSupported
res.error!("session type or association type not supported", "error_code" => "unsupported-type")
rescue NoSecureChannel
res.error!("\"no-encryption\" session type requested without https connection")
rescue OpenID::DH::SHA_ANY::MissingKey
res.error!("dh_consumer_public missing")
end
def finish_checkid!(req, res)
if res.negative?
res["mode"] = "setup_needed" if req.checkid_immediate?
elsif res.positive?
assoc_handle = req.assoc_handle
mac = req.handles[assoc_handle]
if mac.nil? or OpenIDProvider.handle_gracetime?(req, assoc_handle)
# Handle is too old or unknown
invalidate_handle = assoc_handle
mac = OpenID::Signatures["HMAC-SHA256"].gen_mac
req.private_handles[assoc_handle = OpenIDProvider.gen_handle] = mac
end
req.nonces[nonce = OpenIDProvider.gen_nonce] = assoc_handle
res["op_endpoint"] = req.options["op_endpoint"] || Request.new(req.env.merge("PATH_INFO" => "/", "QUERY_STRING" => "")).url
res["return_to"] = req.return_to
res["response_nonce"] = nonce
res["assoc_handle"] = assoc_handle
res["invalidate_handle"] = invalidate_handle if invalidate_handle
res["signed"] = FIELD_SIGNED.select {|field| res[field] }.join(",")
res["sig"] = OpenID.gen_sig(mac, res.params)
end
res.finish!
end
def check_authentication(req)
assoc_handle = req.assoc_handle
invalidate_handle = req.invalidate_handle
nonce = req.response_nonce
# Check if assoc_handle, nonce and signature are valid. Then delete the response nonce
if mac = req.private_handles[assoc_handle] and req.nonces.delete(nonce) == assoc_handle and OpenID.gen_sig(mac, req.params) == req['sig']
res = OpenIDResponse.new("is_valid" => "true")
res["invalidate_handle"] = invalidate_handle if invalidate_handle && req.handles[invalidate_handle].nil?
res.finish!
else
OpenIDResponse.new("is_valid" => "false").finish!
end
end
def finish_error!(req, res)
res["contact"] = req.options["contact"] if req.options["contact"]
res["reference"] = req.options["reference"] if req.options["reference"]
res.finish!
end
def indirect?(req, res)
res.negative? or res.positive? or
req.checkid_setup? or req.checkid_immediate? or
((!req.valid? or req.env['HTTP_REFERER']) and req.return_to)
end
end
DEFAULT_OPTIONS = {
'handle_timeout' => 36000, 'private_handle_timeout' => 300, 'nonce_timeout' => 300,
'handles' => {}, 'private_handles' => {}, 'nonces' => {},
'xrds' => true
}
DEFAULT_MIDDLEWARES = [XRDS, HandleRequests]
attr_reader :options, :handles, :private_handles, :nonces
def initialize(app, options = {})
@options = DEFAULT_OPTIONS.merge(options)
@middleware = DEFAULT_MIDDLEWARES.reverse.inject(app) {|a, m| m.new(a)}
@handles = @options.delete('handles')
@private_handles = @options.delete('private_handles')
@nonces = @options.delete('nonces')
end
def call(env)
sev_env(env)
clean_handles
@middleware.call(env)
end
private
def clean_handles
@nonces.delete_if { |k,v|
lifetime = OpenIDProvider.handle_lifetime(k) rescue nil
lifetime.nil? or lifetime >= @options['nonce_timeout']
}
@private_handles.delete_if { |k,v|
lifetime = OpenIDProvider.handle_lifetime(k) rescue nil
lifetime.nil? or lifetime >= @options['private_handle_timeout']
}
@handles.delete_if { |k,v|
lifetime = OpenIDProvider.handle_lifetime(k) rescue nil
lifetime.nil? or lifetime >= @options['handle_timeout'] + @options['private_handle_timeout']
}
end
def sev_env(env)
env['openid.provider.options'] ||= @options
env['openid.provider.nonces'] ||= @nonces
env['openid.provider.handles'] ||= @handles
env['openid.provider.private_handles'] ||= @private_handles
end
class << self
def gen_handle; Time.now.utc.iso8601 + OpenID.random_string(6) end
alias :gen_nonce :gen_handle
def handle_gracetime?(req, h)
handle_lifetime(h) > req.options['handle_timeout']
end
def handle_lifetime(h)
Time.now.utc - (Time.iso8601(h[/^[0-9TZ:-]*Z/]) rescue Time.utc(0))
end
end
end
end
require 'rack/openid-provider-sreg'
|
require 'hmac-sha2'
module Rack
class UrlAuth
class Signer
attr_reader :secret
def initialize(secret)
@secret = secret
end
def sign(message)
HMAC::SHA256.hexdigest secret, message
end
def verify(message, signature)
actual = Digest::SHA1.hexdigest sign(message)
expected = Digest::SHA1.hexdigest signature
actual == expected
end
def sign_url(url, method)
purl = URI.parse url
query = Rack::Utils.parse_query purl.query
query.merge! 'signature' => sign(method.to_s.downcase + url)
build_url purl, query
end
def verify_url(url, method)
purl = URI.parse url
query = Rack::Utils.parse_query(purl.query)
signature = query.delete('signature').to_s
verify method.to_s.downcase + build_url(purl, query), signature
end
private
def build_url(purl, query)
query = Rack::Utils.build_query(query)
unless purl.scheme
raise(ArgumentError, 'URI protocol must be provided')
end
url_ary = [purl.scheme, '://', purl.host]
url_ary.push( ':', purl.port ) unless [80, 443].include?(purl.port)
url_ary.push( purl.path )
url_ary.push( '?', query ) unless query.empty?
url_ary.join
end
end
end
end
don't append : after host if port for parsed url is nil
require 'hmac-sha2'
module Rack
class UrlAuth
class Signer
attr_reader :secret
def initialize(secret)
@secret = secret
end
def sign(message)
HMAC::SHA256.hexdigest secret, message
end
def verify(message, signature)
actual = Digest::SHA1.hexdigest sign(message)
expected = Digest::SHA1.hexdigest signature
actual == expected
end
def sign_url(url, method)
purl = URI.parse url
query = Rack::Utils.parse_query purl.query
query.merge! 'signature' => sign(method.to_s.downcase + url)
build_url purl, query
end
def verify_url(url, method)
purl = URI.parse url
query = Rack::Utils.parse_query(purl.query)
signature = query.delete('signature').to_s
verify method.to_s.downcase + build_url(purl, query), signature
end
private
def build_url(purl, query)
query = Rack::Utils.build_query(query)
unless purl.scheme
raise(ArgumentError, 'URI protocol must be provided')
end
url_ary = [purl.scheme, '://', purl.host]
url_ary.push( ':', purl.port ) unless [80, 443, nil].include?(purl.port)
url_ary.push( purl.path )
url_ary.push( '?', query ) unless query.empty?
url_ary.join
end
end
end
end
|
require 'fog'
require 'fog/core'
require 'json'
module Fog
module Monitoring
class Rackspace < Fog::Service
ENDPOINT = 'https://monitoring.api.rackspacecloud.com/v1.0'
requires :rackspace_api_key, :rackspace_username
recognizes :rackspace_auth_url, :persistent
recognizes :rackspace_auth_token, :rackspace_service_url, :rackspace_account_id
model_path 'rackspace-monitoring/monitoring/models'
model :entity
collection :entities
model :check
collection :checks
model :alarm
collection :alarms
model :alarm_example
collection :alarm_examples
model :agent_token
collection :agent_tokens
request_path 'rackspace-monitoring/monitoring/requests'
request :list_agent_tokens
request :list_alarms
request :list_alarm_examples
request :list_checks
request :list_entities
request :list_overview
request :get_agent_token
request :get_alarm
request :get_alarm_example
request :get_check
request :get_entity
request :create_agent_token
request :create_alarm
request :create_check
request :create_entity
request :update_check
request :update_entity
request :delete_check
request :delete_entity
request :evaluate_alarm_example
class Mock
end
class Real
def initialize(options={})
@rackspace_api_key = options[:rackspace_api_key]
@rackspace_username = options[:rackspace_username]
@rackspace_auth_url = options[:rackspace_auth_url]
@rackspace_auth_token = options[:rackspace_auth_token]
@rackspace_account_id = options[:rackspace_account_id]
@rackspace_service_url = options[:rackspace_service_url] || ENDPOINT
@rackspace_must_reauthenticate = false
@connection_options = options[:connection_options] || {}
authenticate
@persistent = options[:persistent] || false
@ignore_errors = options[:ignore_errors] || false
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
end
def reload
@connection.reset
end
def request(params)
begin
begin
response = @connection.request(params.merge({
:headers => {
'Content-Type' => 'application/json',
'X-Auth-Token' => @auth_token
}.merge!(params[:headers] || {}),
:host => @host,
:path => "#{@path}/#{params[:path]}"
}))
rescue Excon::Errors::Unauthorized => error
if error.response.body != 'Bad username or password' # token expiration
@rackspace_must_reauthenticate = true
authenticate
retry
else # bad credentials
raise error
end
rescue Excon::Errors::HTTPStatusError => error
raise case error
when Excon::Errors::NotFound
Fog::Monitoring::Rackspace::NotFound.slurp(error)
else
error
end
end
unless response.body.empty?
response.body = JSON.parse(response.body)
end
response
rescue Exception => error
if @ignore_errors
print "Error occurred: " + error.message
else
raise error
end
end
end
private
def authenticate
if @rackspace_must_reauthenticate || @rackspace_auth_token.nil? || @account_id.nil?
options = {
:rackspace_api_key => @rackspace_api_key,
:rackspace_username => @rackspace_username,
:rackspace_auth_url => @rackspace_auth_url
}
credentials = Fog::Rackspace.authenticate(options)
@auth_token = credentials['X-Auth-Token']
@account_id = credentials['X-Server-Management-Url'].match(/.*\/([\d]+)$/)[1]
else
@auth_token = @rackspace_auth_token
@account_id = @rackspace_account_id
end
uri = URI.parse("#{@rackspace_service_url}/#{@account_id}")
@host = uri.host
@path = uri.path
@port = uri.port
@scheme = uri.scheme
end
end
end
end
end
Call it raise_errors instead.
require 'fog'
require 'fog/core'
require 'json'
module Fog
module Monitoring
class Rackspace < Fog::Service
ENDPOINT = 'https://monitoring.api.rackspacecloud.com/v1.0'
requires :rackspace_api_key, :rackspace_username
recognizes :rackspace_auth_url, :persistent
recognizes :rackspace_auth_token, :rackspace_service_url, :rackspace_account_id
model_path 'rackspace-monitoring/monitoring/models'
model :entity
collection :entities
model :check
collection :checks
model :alarm
collection :alarms
model :alarm_example
collection :alarm_examples
model :agent_token
collection :agent_tokens
request_path 'rackspace-monitoring/monitoring/requests'
request :list_agent_tokens
request :list_alarms
request :list_alarm_examples
request :list_checks
request :list_entities
request :list_overview
request :get_agent_token
request :get_alarm
request :get_alarm_example
request :get_check
request :get_entity
request :create_agent_token
request :create_alarm
request :create_check
request :create_entity
request :update_check
request :update_entity
request :delete_check
request :delete_entity
request :evaluate_alarm_example
class Mock
end
class Real
def initialize(options={})
@rackspace_api_key = options[:rackspace_api_key]
@rackspace_username = options[:rackspace_username]
@rackspace_auth_url = options[:rackspace_auth_url]
@rackspace_auth_token = options[:rackspace_auth_token]
@rackspace_account_id = options[:rackspace_account_id]
@rackspace_service_url = options[:rackspace_service_url] || ENDPOINT
@rackspace_must_reauthenticate = false
@connection_options = options[:connection_options] || {}
if options.has_key("raise_errors")
@raise_errors = options[:raise_errors]
else
@raise_errors = true
end
authenticate
@persistent = options[:persistent] || false
@connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}", @persistent, @connection_options)
end
def reload
@connection.reset
end
def request(params)
begin
begin
response = @connection.request(params.merge({
:headers => {
'Content-Type' => 'application/json',
'X-Auth-Token' => @auth_token
}.merge!(params[:headers] || {}),
:host => @host,
:path => "#{@path}/#{params[:path]}"
}))
rescue Excon::Errors::Unauthorized => error
if error.response.body != 'Bad username or password' # token expiration
@rackspace_must_reauthenticate = true
authenticate
retry
else # bad credentials
raise error
end
rescue Excon::Errors::HTTPStatusError => error
raise case error
when Excon::Errors::NotFound
Fog::Monitoring::Rackspace::NotFound.slurp(error)
else
error
end
end
unless response.body.empty?
response.body = JSON.parse(response.body)
end
response
rescue Exception => error
if @raise_errors
raise error
else
print "Error occurred: " + error.message
end
end
end
private
def authenticate
if @rackspace_must_reauthenticate || @rackspace_auth_token.nil? || @account_id.nil?
options = {
:rackspace_api_key => @rackspace_api_key,
:rackspace_username => @rackspace_username,
:rackspace_auth_url => @rackspace_auth_url
}
credentials = Fog::Rackspace.authenticate(options)
@auth_token = credentials['X-Auth-Token']
@account_id = credentials['X-Server-Management-Url'].match(/.*\/([\d]+)$/)[1]
else
@auth_token = @rackspace_auth_token
@account_id = @rackspace_account_id
end
uri = URI.parse("#{@rackspace_service_url}/#{@account_id}")
@host = uri.host
@path = uri.path
@port = uri.port
@scheme = uri.scheme
end
end
end
end
end
|
require 'zmq'
module Rackstash
class ZmqLogger
include Rackstash::LogSeverity
attr_accessor :level
# Use either ZMQ::PUB or ZMQ::PUSH as the socket type.
# The main difference in our domain is the behavoir when reaching the
# high water mark (if configured). The ZMQ::PUB type silently discards
# messages while the ZMQ::PUSH type blocks.
#
# The remote ZMQ socket must be configured equivalently.
def initialize(address, level=DEBUG, zmq_socket_type=ZMQ::PUB, zmq_options={})
@level = level
@context = ZMQ::Context.new
@socket = @context.socket(ZMQ::PUSH)
@socket.connect("tcp://#{address}")
zmq_options.each do |k,v|
@socket.setsockopt(k, v)
end
end
def zmq_setsockopt(key, value)
@socket.setsockopt(k, v)
end
def zmq_getsockopt(key, value)
@socket.setsockopt(k, v)
end
def add(severity, message = nil, progname = nil, &block)
return if level > severity
message = (message || (block && block.call) || progname).to_s
@socket.send(message, ZMQ::NOBLOCK)
end
Severities.each do |severity|
class_eval <<-EOT, __FILE__, __LINE__ + 1
def #{severity.to_s.downcase}(message = nil, progname = nil, &block) # def debug(message = nil, progname = nil, &block)
add(#{severity}, message, progname, &block) # add(DEBUG, message, progname, &block)
end # end
#
def #{severity.to_s.downcase}? # def debug?
#{severity} >= level # DEBUG >= level
end # end
EOT
end
def auto_flushing
1
end
def flush
# We flush automatically after each #add.
# We are non-blocking anyway \o/
end
def close
@context.close
@socket.close
end
##
# :singleton-method:
# Set to false to disable the silencer
cattr_accessor :silencer
self.silencer = true
# Silences the logger for the duration of the block.
def silence(temporary_level = ERROR)
if silencer
begin
old_logger_level, self.level = level, temporary_level
yield self
ensure
self.level = old_logger_level
end
else
yield self
end
end
end
end
Connect ther ZMQ socket after setting the options to allow for per-client configs
require 'zmq'
module Rackstash
class ZmqLogger
include Rackstash::LogSeverity
attr_accessor :level
# Use either ZMQ::PUB or ZMQ::PUSH as the socket type.
# The main difference in our domain is the behavoir when reaching the
# high water mark (if configured). The ZMQ::PUB type silently discards
# messages while the ZMQ::PUSH type blocks.
#
# The remote ZMQ socket must be configured equivalently.
def initialize(address, level=DEBUG, zmq_socket_type=ZMQ::PUB, zmq_options={})
@level = level
@context = ZMQ::Context.new
@socket = @context.socket(ZMQ::PUSH)
zmq_options.each do |k,v|
@socket.setsockopt(k, v)
end
@socket.connect("tcp://#{address}")
end
def zmq_setsockopt(key, value)
@socket.setsockopt(k, v)
end
def zmq_getsockopt(key, value)
@socket.setsockopt(k, v)
end
def add(severity, message = nil, progname = nil, &block)
return if level > severity
message = (message || (block && block.call) || progname).to_s
@socket.send(message, ZMQ::NOBLOCK)
end
Severities.each do |severity|
class_eval <<-EOT, __FILE__, __LINE__ + 1
def #{severity.to_s.downcase}(message = nil, progname = nil, &block) # def debug(message = nil, progname = nil, &block)
add(#{severity}, message, progname, &block) # add(DEBUG, message, progname, &block)
end # end
#
def #{severity.to_s.downcase}? # def debug?
#{severity} >= level # DEBUG >= level
end # end
EOT
end
def auto_flushing
1
end
def flush
# We flush automatically after each #add.
# We are non-blocking anyway \o/
end
def close
@context.close
@socket.close
end
##
# :singleton-method:
# Set to false to disable the silencer
cattr_accessor :silencer
self.silencer = true
# Silences the logger for the duration of the block.
def silence(temporary_level = ERROR)
if silencer
begin
old_logger_level, self.level = level, temporary_level
yield self
ensure
self.level = old_logger_level
end
else
yield self
end
end
end
end
|
module RailsConfig
VERSION = "0.2.0"
end
Bumping version to 0.2.1
module RailsConfig
VERSION = "0.2.1"
end |
# Copyright (c) 2006- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
#
# Author: Mark Slee <mcslee@facebook.com>
#
require 'thrift/protocol'
require 'thrift/protocol/binaryprotocol'
require 'thrift/transport'
module Thrift
class Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil)
@processor = processor
@serverTransport = serverTransport
@transportFactory = transportFactory ? transportFactory : Thrift::TransportFactory.new
@protocolFactory = protocolFactory ? protocolFactory : Thrift::BinaryProtocolFactory.new
end
def serve; nil; end
end
deprecate_class! :TServer => Server
class SimpleServer < Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil)
super(processor, serverTransport, transportFactory, protocolFactory)
end
def serve
begin
@serverTransport.listen
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
begin
while (true)
@processor.process(prot, prot)
end
rescue Thrift::TransportException, Thrift::ProtocolException => ttx
#print ttx,"\n"
ensure
trans.close
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TSimpleServer => SimpleServer
end
begin
require 'fastthread'
rescue LoadError
require 'thread'
end
module Thrift
class ThreadedServer < Server
def serve
begin
@serverTransport.listen
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
Thread.new(prot, trans) do |p, t|
begin
while (true)
@processor.process(p, p)
end
rescue Thrift::TransportException, Thrift::ProtocolException => e
ensure
t.close
end
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TThreadedServer => ThreadedServer
class ThreadPoolServer < Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil, num=20)
super(processor, serverTransport, transportFactory, protocolFactory)
@thread_q = SizedQueue.new(num)
@exception_q = Queue.new
@running = false
end
## exceptions that happen in worker threads will be relayed here and
## must be caught. 'retry' can be used to continue. (threads will
## continue to run while the exception is being handled.)
def rescuable_serve
Thread.new { serve } unless @running
raise @exception_q.pop
end
## exceptions that happen in worker threads simply cause that thread
## to die and another to be spawned in its place.
def serve
@serverTransport.listen
begin
while (true)
@thread_q.push(:token)
Thread.new do
begin
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
begin
while (true)
@processor.process(prot, prot)
end
rescue Thrift::TransportException, Thrift::ProtocolException => e
ensure
trans.close
end
end
rescue Exception => e
@exception_q.push(e)
ensure
@thread_q.pop # thread died!
end
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TThreadPoolServer => ThreadPoolServer
end
Fix blanket exception rescue in ThreadPoolServer
git-svn-id: 8d8e29b1fb681c884914062b88f3633e3a187774@668957 13f79535-47bb-0310-9956-ffa450edef68
# Copyright (c) 2006- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
#
# Author: Mark Slee <mcslee@facebook.com>
#
require 'thrift/protocol'
require 'thrift/protocol/binaryprotocol'
require 'thrift/transport'
module Thrift
class Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil)
@processor = processor
@serverTransport = serverTransport
@transportFactory = transportFactory ? transportFactory : Thrift::TransportFactory.new
@protocolFactory = protocolFactory ? protocolFactory : Thrift::BinaryProtocolFactory.new
end
def serve; nil; end
end
deprecate_class! :TServer => Server
class SimpleServer < Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil)
super(processor, serverTransport, transportFactory, protocolFactory)
end
def serve
begin
@serverTransport.listen
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
begin
while (true)
@processor.process(prot, prot)
end
rescue Thrift::TransportException, Thrift::ProtocolException => ttx
#print ttx,"\n"
ensure
trans.close
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TSimpleServer => SimpleServer
end
begin
require 'fastthread'
rescue LoadError
require 'thread'
end
module Thrift
class ThreadedServer < Server
def serve
begin
@serverTransport.listen
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
Thread.new(prot, trans) do |p, t|
begin
while (true)
@processor.process(p, p)
end
rescue Thrift::TransportException, Thrift::ProtocolException => e
ensure
t.close
end
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TThreadedServer => ThreadedServer
class ThreadPoolServer < Server
def initialize(processor, serverTransport, transportFactory=nil, protocolFactory=nil, num=20)
super(processor, serverTransport, transportFactory, protocolFactory)
@thread_q = SizedQueue.new(num)
@exception_q = Queue.new
@running = false
end
## exceptions that happen in worker threads will be relayed here and
## must be caught. 'retry' can be used to continue. (threads will
## continue to run while the exception is being handled.)
def rescuable_serve
Thread.new { serve } unless @running
raise @exception_q.pop
end
## exceptions that happen in worker threads simply cause that thread
## to die and another to be spawned in its place.
def serve
@serverTransport.listen
begin
while (true)
@thread_q.push(:token)
Thread.new do
begin
while (true)
client = @serverTransport.accept
trans = @transportFactory.get_transport(client)
prot = @protocolFactory.get_protocol(trans)
begin
while (true)
@processor.process(prot, prot)
end
rescue Thrift::TransportException, Thrift::ProtocolException => e
ensure
trans.close
end
end
rescue => e
@exception_q.push(e)
ensure
@thread_q.pop # thread died!
end
end
end
ensure
@serverTransport.close
end
end
end
deprecate_class! :TThreadPoolServer => ThreadPoolServer
end
|
require 'ebnf/ll1/lexer'
module RDF::Turtle
module Terminals
# Definitions of token regular expressions used for lexical analysis
##
# Unicode regular expressions for Ruby 1.9+ with the Oniguruma engine.
U_CHARS1 = Regexp.compile(<<-EOS.gsub(/\s+/, ''))
[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]|
[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]|
[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]|
[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]|[\\u{10000}-\\u{EFFFF}]
EOS
U_CHARS2 = Regexp.compile("\\u00B7|[\\u0300-\\u036F]|[\\u203F-\\u2040]").freeze
IRI_RANGE = Regexp.compile("[[^<>\"{}|^`\\\\]&&[^\\x00-\\x20]]").freeze
# 26
UCHAR = EBNF::LL1::Lexer::UCHAR
# 170s
PERCENT = /%[0-9A-Fa-f]{2}/.freeze
# 172s
PN_LOCAL_ESC = /\\[_~\.\-\!$\&'\(\)\*\+,;=:\/\?\#@%]/.freeze
# 169s
PLX = /#{PERCENT}|#{PN_LOCAL_ESC}/.freeze.freeze
# 163s
PN_CHARS_BASE = /[A-Z]|[a-z]|#{U_CHARS1}/.freeze
# 164s
PN_CHARS_U = /_|#{PN_CHARS_BASE}/.freeze
# 166s
PN_CHARS = /-|[0-9]|#{PN_CHARS_U}|#{U_CHARS2}/.freeze
PN_LOCAL_BODY = /(?:(?:\.|:|#{PN_CHARS}|#{PLX})*(?:#{PN_CHARS}|:|#{PLX}))?/.freeze
PN_CHARS_BODY = /(?:(?:\.|#{PN_CHARS})*#{PN_CHARS})?/.freeze
# 167s
PN_PREFIX = /#{PN_CHARS_BASE}#{PN_CHARS_BODY}/.freeze
# 168s
PN_LOCAL = /(?:[0-9]|:|#{PN_CHARS_U}|#{PLX})#{PN_LOCAL_BODY}/.freeze
# 154s
EXPONENT = /[eE][+-]?[0-9]+/
# 159s
ECHAR = /\\[tbnrf\\"']/
# 18
IRIREF = /<(?:#{IRI_RANGE}|#{UCHAR})*>/.freeze
# 139s
PNAME_NS = /#{PN_PREFIX}?:/.freeze
# 140s
PNAME_LN = /#{PNAME_NS}#{PN_LOCAL}/.freeze
# 141s
BLANK_NODE_LABEL = /_:(?:[0-9]|#{PN_CHARS_U})(?:(?:#{PN_CHARS}|\.)*#{PN_CHARS})?/.freeze
# 144s
LANGTAG = /@[a-zA-Z]+(?:-[a-zA-Z0-9]+)*/.freeze
# 19
INTEGER = /[+-]?[0-9]+/.freeze
# 20
DECIMAL = /[+-]?(?:[0-9]*\.[0-9]+)/.freeze
# 21
DOUBLE = /[+-]?(?:[0-9]+\.[0-9]*#{EXPONENT}|\.?[0-9]+#{EXPONENT})/.freeze
# 22
STRING_LITERAL_SINGLE_QUOTE = /'(?:[^\'\\\n\r]|#{ECHAR}|#{UCHAR})*'/.freeze
# 23
STRING_LITERAL_QUOTE = /"(?:[^\"\\\n\r]|#{ECHAR}|#{UCHAR})*"/.freeze.freeze
# 24
STRING_LITERAL_LONG_SINGLE_QUOTE = /'''(?:(?:'|'')?(?:[^'\\]|#{ECHAR}|#{UCHAR}))*'''/m.freeze
# 25
STRING_LITERAL_LONG_QUOTE = /"""(?:(?:"|"")?(?:[^"\\]|#{ECHAR}|#{UCHAR}))*"""/m.freeze
# 161s
WS = / |\t|\r|\n /.freeze
# 162s
ANON = /\[#{WS}*\]/m.freeze
# 28t
PREFIX = /@?prefix/i.freeze
# 29t
BASE = /@?base/i.freeze
end
end
Fix PN_LOCAL_ESC regexp
require 'ebnf/ll1/lexer'
module RDF::Turtle
module Terminals
# Definitions of token regular expressions used for lexical analysis
##
# Unicode regular expressions for Ruby 1.9+ with the Oniguruma engine.
U_CHARS1 = Regexp.compile(<<-EOS.gsub(/\s+/, ''))
[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]|
[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]|
[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]|
[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]|[\\u{10000}-\\u{EFFFF}]
EOS
U_CHARS2 = Regexp.compile("\\u00B7|[\\u0300-\\u036F]|[\\u203F-\\u2040]").freeze
IRI_RANGE = Regexp.compile("[[^<>\"{}|^`\\\\]&&[^\\x00-\\x20]]").freeze
# 26
UCHAR = EBNF::LL1::Lexer::UCHAR
# 170s
PERCENT = /%[0-9A-Fa-f]{2}/.freeze
# 172s
PN_LOCAL_ESC = /\\[_~\.\-\!$\&'\(\)\*\+,;=\/\?\#@%]/.freeze
# 169s
PLX = /#{PERCENT}|#{PN_LOCAL_ESC}/.freeze.freeze
# 163s
PN_CHARS_BASE = /[A-Z]|[a-z]|#{U_CHARS1}/.freeze
# 164s
PN_CHARS_U = /_|#{PN_CHARS_BASE}/.freeze
# 166s
PN_CHARS = /-|[0-9]|#{PN_CHARS_U}|#{U_CHARS2}/.freeze
PN_LOCAL_BODY = /(?:(?:\.|:|#{PN_CHARS}|#{PLX})*(?:#{PN_CHARS}|:|#{PLX}))?/.freeze
PN_CHARS_BODY = /(?:(?:\.|#{PN_CHARS})*#{PN_CHARS})?/.freeze
# 167s
PN_PREFIX = /#{PN_CHARS_BASE}#{PN_CHARS_BODY}/.freeze
# 168s
PN_LOCAL = /(?:[0-9]|:|#{PN_CHARS_U}|#{PLX})#{PN_LOCAL_BODY}/.freeze
# 154s
EXPONENT = /[eE][+-]?[0-9]+/
# 159s
ECHAR = /\\[tbnrf\\"']/
# 18
IRIREF = /<(?:#{IRI_RANGE}|#{UCHAR})*>/.freeze
# 139s
PNAME_NS = /#{PN_PREFIX}?:/.freeze
# 140s
PNAME_LN = /#{PNAME_NS}#{PN_LOCAL}/.freeze
# 141s
BLANK_NODE_LABEL = /_:(?:[0-9]|#{PN_CHARS_U})(?:(?:#{PN_CHARS}|\.)*#{PN_CHARS})?/.freeze
# 144s
LANGTAG = /@[a-zA-Z]+(?:-[a-zA-Z0-9]+)*/.freeze
# 19
INTEGER = /[+-]?[0-9]+/.freeze
# 20
DECIMAL = /[+-]?(?:[0-9]*\.[0-9]+)/.freeze
# 21
DOUBLE = /[+-]?(?:[0-9]+\.[0-9]*#{EXPONENT}|\.?[0-9]+#{EXPONENT})/.freeze
# 22
STRING_LITERAL_SINGLE_QUOTE = /'(?:[^\'\\\n\r]|#{ECHAR}|#{UCHAR})*'/.freeze
# 23
STRING_LITERAL_QUOTE = /"(?:[^\"\\\n\r]|#{ECHAR}|#{UCHAR})*"/.freeze
# 24
STRING_LITERAL_LONG_SINGLE_QUOTE = /'''(?:(?:'|'')?(?:[^'\\]|#{ECHAR}|#{UCHAR}))*'''/m.freeze
# 25
STRING_LITERAL_LONG_QUOTE = /"""(?:(?:"|"")?(?:[^"\\]|#{ECHAR}|#{UCHAR}))*"""/m.freeze
# 161s
WS = / |\t|\r|\n /.freeze
# 162s
ANON = /\[#{WS}*\]/m.freeze
# 28t
PREFIX = /@?prefix/i.freeze
# 29t
BASE = /@?base/i.freeze
end
end |
require 'aws/s3'
require 'heroku/command/base'
require 'progress_bar'
module Refinery
module S3assets
class MyRailtie < Rails::Railtie
rake_tasks do
Dir[File.join(File.dirname(__FILE__),'tasks/*.rake')].each { |f| load f }
end
end
class Util
def self.pull
raise(StandardError, "no S3_KEY config var or environment variable found") if s3_config[:s3_key].nil?
raise(StandardError, "no S3_SECRET config var or environment variable found") if s3_config[:s3_secret].nil?
raise(StandardError, "no S3_BUCKET config var or environment variable found") if s3_config[:s3_bucket].nil?
copy_s3_bucket(s3_config[:s3_key], s3_config[:s3_secret], s3_config[:s3_bucket], 'public/system')
end
private
def self.copy_s3_bucket(s3_key, s3_secret, s3_bucket, output_path)
AWS::S3::Base.establish_connection!(:access_key_id => s3_key, :secret_access_key => s3_secret)
bucket = AWS::S3::Bucket.find(s3_bucket)
puts "There are #{Image.count} images in the #{s3_bucket} bucket"
Image.all.each do |image|
s3_object = AWS::S3::S3Object.find image.image_uid,s3_bucket
dest = File.join(output_path,"images",s3_object.key)
copy_s3_object(s3_object,dest)
end
puts "\n\nThere are #{Resource.count} resources in the #{s3_bucket} bucket"
Resource.all.each do |resource|
s3_object = AWS::S3::S3Object.find resource.file_uid,s3_bucket
dest = File.join(output_path,"resources",s3_object.key)
copy_s3_object(s3_object,dest)
end
end
def self.copy_s3_object(s3_object, to)
FileUtils::mkdir_p File.dirname(to), :verbose => false
filesize = s3_object.about['content-length'].to_f
puts "Saving #{s3_object.key} (#{filesize} bytes):"
bar = ProgressBar.new(filesize, :percentage, :counter)
open(to, 'wb') do |f|
s3_object.value do |chunk|
bar.increment! chunk.size
f.write chunk
end
end
puts "\n=======================================\n"
end
def self.s3_config
return @s3_config unless @s3_config.nil?
begin
base = Heroku::Command::BaseWithApp.new
app = base.app
rescue
puts "This does not look like a Heroku app!"
exit
end
config_vars = base.heroku.config_vars(app)
@s3_config = {
:s3_key => ENV['S3_KEY'] || config_vars['S3_KEY'],
:s3_secret => ENV['S3_SECRET'] || config_vars['S3_SECRET'],
:s3_bucket => ENV['S3_BUCKET'] || config_vars['S3_BUCKET']
}
end
end
end
end
update path to work with refinerycms 2.0
require 'aws/s3'
require 'heroku/command/base'
require 'progress_bar'
module Refinery
module S3assets
class MyRailtie < Rails::Railtie
rake_tasks do
Dir[File.join(File.dirname(__FILE__),'tasks/*.rake')].each { |f| load f }
end
end
class Util
def self.pull
raise(StandardError, "no S3_KEY config var or environment variable found") if s3_config[:s3_key].nil?
raise(StandardError, "no S3_SECRET config var or environment variable found") if s3_config[:s3_secret].nil?
raise(StandardError, "no S3_BUCKET config var or environment variable found") if s3_config[:s3_bucket].nil?
copy_s3_bucket(s3_config[:s3_key], s3_config[:s3_secret], s3_config[:s3_bucket], 'public/system/refinery')
end
private
def self.copy_s3_bucket(s3_key, s3_secret, s3_bucket, output_path)
AWS::S3::Base.establish_connection!(:access_key_id => s3_key, :secret_access_key => s3_secret)
bucket = AWS::S3::Bucket.find(s3_bucket)
puts "There are #{Image.count} images in the #{s3_bucket} bucket"
Image.all.each do |image|
s3_object = AWS::S3::S3Object.find image.image_uid,s3_bucket
dest = File.join(output_path,"images",s3_object.key)
copy_s3_object(s3_object,dest)
end
puts "\n\nThere are #{Resource.count} resources in the #{s3_bucket} bucket"
Resource.all.each do |resource|
s3_object = AWS::S3::S3Object.find resource.file_uid,s3_bucket
dest = File.join(output_path,"resources",s3_object.key)
copy_s3_object(s3_object,dest)
end
end
def self.copy_s3_object(s3_object, to)
FileUtils::mkdir_p File.dirname(to), :verbose => false
filesize = s3_object.about['content-length'].to_f
puts "Saving #{s3_object.key} (#{filesize} bytes):"
bar = ProgressBar.new(filesize, :percentage, :counter)
open(to, 'wb') do |f|
s3_object.value do |chunk|
bar.increment! chunk.size
f.write chunk
end
end
puts "\n=======================================\n"
end
def self.s3_config
return @s3_config unless @s3_config.nil?
begin
base = Heroku::Command::BaseWithApp.new
app = base.app
rescue
puts "This does not look like a Heroku app!"
exit
end
config_vars = base.heroku.config_vars(app)
@s3_config = {
:s3_key => ENV['S3_KEY'] || config_vars['S3_KEY'],
:s3_secret => ENV['S3_SECRET'] || config_vars['S3_SECRET'],
:s3_bucket => ENV['S3_BUCKET'] || config_vars['S3_BUCKET']
}
end
end
end
end
|
module RemoteAttrAccessor
end
require_relative 'remote_attr_accessor/config'
require_relative 'remote_attr_accessor/base'
require_relative 'remote_attr_accessor/api'
require_relative 'remote_attr_accessor/attrs'
require_relative 'remote_attr_accessor/bulk'
require_relative 'remote_attr_accessor/version'
change version order
module RemoteAttrAccessor
end
require_relative 'remote_attr_accessor/version'
require_relative 'remote_attr_accessor/config'
require_relative 'remote_attr_accessor/base'
require_relative 'remote_attr_accessor/api'
require_relative 'remote_attr_accessor/attrs'
require_relative 'remote_attr_accessor/bulk'
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
Gem::Specification.new do |spec|
spec.name = "administrate-field-lat_lng"
spec.version = "1.9.0"
spec.authors = ["Rich Daley"]
spec.email = ["rich@fishpercolator.co.uk"]
spec.summary = %q{Administrate field for latitude/longitude}
spec.description = %q{Adds an Administrate::Field::LatLng for viewing & editing latitude/longitude fields on a map}
spec.homepage = "https://github.com/fishpercolator/administrate-field-lat_lng"
spec.license = "MIT"
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
spec.add_dependency 'administrate', '>= 0.3', '< 0.13'
spec.add_dependency 'leaflet-rails', '~> 1.0'
spec.add_development_dependency "bundler", "~> 1.11"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "rspec", "~> 3.0"
spec.add_development_dependency "pry", "~> 0.10.3"
spec.add_development_dependency "erubis", "~> 2.7"
end
New gem version
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
Gem::Specification.new do |spec|
spec.name = "administrate-field-lat_lng"
spec.version = "1.10.0"
spec.authors = ["Quinn Daley"]
spec.email = ["quinn@fishpercolator.co.uk"]
spec.summary = %q{Administrate field for latitude/longitude}
spec.description = %q{Adds an Administrate::Field::LatLng for viewing & editing latitude/longitude fields on a map}
spec.homepage = "https://github.com/fishpercolator/administrate-field-lat_lng"
spec.license = "MIT"
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
spec.add_dependency 'administrate', '>= 0.3', '< 0.13'
spec.add_dependency 'leaflet-rails', '~> 1.0'
spec.add_development_dependency "bundler", "~> 1.11"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "rspec", "~> 3.0"
spec.add_development_dependency "pry", "~> 0.10.3"
spec.add_development_dependency "erubis", "~> 2.7"
end
|
require 'logger'
require 'thor'
module ConfigCurator
# Thor class for the `curate` command.
class CLI < Thor
default_task :install
class_option :verbose, type: :boolean, aliases: %i(v)
class_option :quiet, type: :boolean, aliases: %i(q)
class_option :debug, type: :boolean
# Installs the collection.
# @param manifest [String] path to the manifest file to use
# @return [Boolean] value of {Collection#install} or {Collection#install?}
desc 'install', 'Installs all units in collection.'
option :dryrun,
type: :boolean, aliases: %i(n),
desc: %q{Only simulate the install. Don't make any actual changes.}
def install(manifest = 'manifest.yml')
unless File.exist? manifest
logger.fatal { "Manifest file '#{manifest}' does not exist." }
return false
end
collection.load_manifest manifest
result = options[:dryrun] ? collection.install? : collection.install
msg = install_message(result, options[:dryrun])
result ? logger.info(msg) : logger.error(msg)
result
end
no_commands do
# Makes a collection object to use for the instance.
# @return [Collection] the collection object
def collection
@collection ||= Collection.new logger: logger
end
# Logger instance to use.
# @return [Logger] logger instance
def logger
@logger ||= Logger.new($stdout).tap do |log|
log.progname = 'curate'
log.formatter = proc do |severity, _, _, msg|
"#{severity} -- #{msg}\n"
end
log.level = \
if options[:debug]
Logger::DEBUG
elsif options[:verbose]
Logger::INFO
elsif options[:quiet]
Logger::FATAL
else
Logger::WARN
end
end
end
end
private
def install_message(result, dryrun)
"Install #{'simulation ' if dryrun}" + \
if result
'completed without error.'
elsif result.nil?
'failed.'
else
'failed. No changes were made.'
end
end
end
end
Move log_level to private method.
require 'logger'
require 'thor'
module ConfigCurator
# Thor class for the `curate` command.
class CLI < Thor
default_task :install
class_option :verbose, type: :boolean, aliases: %i(v)
class_option :quiet, type: :boolean, aliases: %i(q)
class_option :debug, type: :boolean
# Installs the collection.
# @param manifest [String] path to the manifest file to use
# @return [Boolean] value of {Collection#install} or {Collection#install?}
desc 'install', 'Installs all units in collection.'
option :dryrun,
type: :boolean, aliases: %i(n),
desc: %q{Only simulate the install. Don't make any actual changes.}
def install(manifest = 'manifest.yml')
unless File.exist? manifest
logger.fatal { "Manifest file '#{manifest}' does not exist." }
return false
end
collection.load_manifest manifest
result = options[:dryrun] ? collection.install? : collection.install
msg = install_message(result, options[:dryrun])
result ? logger.info(msg) : logger.error(msg)
result
end
no_commands do
# Makes a collection object to use for the instance.
# @return [Collection] the collection object
def collection
@collection ||= Collection.new logger: logger
end
# Logger instance to use.
# @return [Logger] logger instance
def logger
@logger ||= Logger.new($stdout).tap do |log|
log.progname = 'curate'
log.formatter = proc do |severity, _, _, msg|
"#{severity} -- #{msg}\n"
end
log.level = log_level(options)
end
end
end
private
def log_level(options)
if options[:debug]
Logger::DEBUG
elsif options[:verbose]
Logger::INFO
elsif options[:quiet]
Logger::FATAL
else
Logger::WARN
end
end
def install_message(result, dryrun)
"Install #{'simulation ' if dryrun}" + \
if result
'completed without error.'
elsif result.nil?
'failed.'
else
'failed. No changes were made.'
end
end
end
end
|
module Restforce
# :nodoc:
module DB
VERSION = "2.2.2"
end
end
Bump version to 2.2.3
module Restforce
# :nodoc:
module DB
VERSION = "2.2.3"
end
end
|
# encoding: utf-8
#
# Copyright (c) 2002-2007 Minero Aoki
# 2008-2012 Minero Aoki, Kenshi Muto
#
# This program is free software.
# You can distribute or modify this program under the terms of
# the GNU LGPL, Lesser General Public License version 2.1.
#
require 'review/builder'
require 'review/htmlutils'
require 'review/textutils'
require 'nkf'
module ReVIEW
class IDGXMLBuilder < Builder
include TextUtils
include HTMLUtils
[:ttbold, :hint, :maru, :keytop, :labelref, :ref, :pageref, :balloon].each {|e| Compiler.definline(e) }
Compiler.defsingle(:dtp, 1)
Compiler.defblock(:insn, 0..1)
Compiler.defblock(:memo, 0..1)
Compiler.defblock(:tip, 0..1)
Compiler.defblock(:info, 0..1)
Compiler.defblock(:planning, 0..1)
Compiler.defblock(:best, 0..1)
Compiler.defblock(:important, 0..1)
Compiler.defblock(:security, 0..1)
Compiler.defblock(:caution, 0..1)
Compiler.defblock(:notice, 0..1)
Compiler.defblock(:point, 0..1)
Compiler.defblock(:shoot, 0..1)
Compiler.defblock(:reference, 0)
Compiler.defblock(:term, 0)
Compiler.defblock(:link, 0..1)
Compiler.defblock(:practice, 0)
Compiler.defblock(:expert, 0)
Compiler.defblock(:rawblock, 0)
def pre_paragraph
'<p>'
end
def post_paragraph
'</p>'
end
def extname
'.xml'
end
def builder_init(no_error = false)
@no_error = no_error
end
private :builder_init
def builder_init_file
@warns = []
@errors = []
@section = 0
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
@noindent = nil
@rootelement = "doc"
@secttags = nil
@tsize = nil
@texblockequation = 0
@texinlineequation = 0
print %Q(<?xml version="1.0" encoding="UTF-8"?>\n)
print %Q(<#{@rootelement} xmlns:aid="http://ns.adobe.com/AdobeInDesign/4.0/">)
alias puts print unless ReVIEW.book.param["nolf"].nil?
@secttags = true unless ReVIEW.book.param["structuredxml"].nil?
end
private :builder_init_file
def result
s = ""
unless @secttags.nil?
s += "</sect4>" if @subsubsubsection > 0
s += "</sect3>" if @subsubsection > 0
s += "</sect2>" if @subsection > 0
s += "</sect>" if @section > 0
s += "</chapter>" if @chapter.number > 0
end
messages() + @output.string + s + "</#{@rootelement}>\n"
end
def warn(msg)
if @no_error
@warns.push [@location.filename, @location.lineno, msg]
puts "----WARNING: #{escape_html(msg)}----"
else
$stderr.puts "#{@location}: warning: #{msg}"
end
end
def error(msg)
if @no_error
@errors.push [@location.filename, @location.lineno, msg]
puts "----ERROR: #{escape_html(msg)}----"
else
$stderr.puts "#{@location}: error: #{msg}"
end
end
def messages
error_messages() + warning_messages()
end
def error_messages
return '' if @errors.empty?
"<h2>Syntax Errors</h2>\n" +
"<ul>\n" +
@errors.map {|file, line, msg|
"<li>#{escape_html(file)}:#{line}: #{escape_html(msg.to_s)}</li>\n"
}.join('') +
"</ul>\n"
end
def warning_messages
return '' if @warns.empty?
"<h2>Warnings</h2>\n" +
"<ul>\n" +
@warns.map {|file, line, msg|
"<li>#{escape_html(file)}:#{line}: #{escape_html(msg)}</li>\n"
}.join('') +
"</ul>\n"
end
def headline(level, label, caption)
prefix = ""
case level
when 1
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
print "</sect>" if @section > 0
end
print %Q(<chapter id="chap:#{@chapter.number}">) unless @secttags.nil?
if @chapter.number.to_s =~ /\A\d+$/
prefix = "#{I18n.t("chapter", @chapter.number)}#{I18n.t("chapter_postfix")}"
elsif @chapter.number.present?
prefix = "#{@chapter.number}#{I18n.t("chapter_postfix")}"
end
@section = 0
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
when 2
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
print "</sect>" if @section > 0
end
@section += 1
print %Q(<sect id="sect:#{@chapter.number}.#{@section}">) unless @secttags.nil?
prefix = @chapter.present? ? "#{@chapter.number}.#{@section} " : ""
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
when 3
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
end
@subsection += 1
print %Q(<sect2 id="sect:#{@chapter.number}.#{@section}.#{@subsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}#{I18n.t("chapter_postfix")}" : ""
@subsubsection = 0
@subsubsubsection = 0
when 4
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
end
@subsubsection += 1
print %Q(<sect3 id="sect:#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}#{I18n.t("chapter_postfix")}" : ""
@subsubsubsection = 0
when 5
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
end
@subsubsubsection += 1
print %Q(<sect4 id="sect:#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}.#{@subsubsubsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}.#{@subsubsubsection}#{I18n.t("chapter_postfix")}" : ""
else
raise "caption level too deep or unsupported: #{level}"
end
prefix = "" if (level.to_i > ReVIEW.book.param["secnolevel"])
label = label.nil? ? "" : " id=\"#{label}\""
toccaption = escape_html(compile_inline(caption.gsub(/@<fn>\{.+?\}/, '')).gsub(/<[^>]+>/, ''))
puts %Q(<title#{label} aid:pstyle="h#{level}">#{prefix}#{compile_inline(caption)}</title><?dtp level="#{level}" section="#{prefix}#{toccaption}"?>)
end
def ul_begin
level = block_given? ? yield : ""
level = nil if level == 1
puts "<ul#{level == 1 ? nil : level}>"
end
def ul_item(lines)
puts %Q(<li aid:pstyle="ul-item">#{lines.join.chomp}</li>)
end
def ul_item_begin(lines)
print %Q(<li aid:pstyle="ul-item">#{lines.join.chomp})
end
def ul_item_end
puts "</li>"
end
def choice_single_begin
puts "<choice type='single'>"
end
def choice_multi_begin
puts "<choice type='multi'>"
end
def choice_single_end
puts "</choice>"
end
def choice_multi_end
puts "</choice>"
end
def ul_end
level = block_given? ? yield : ""
level = nil if level == 1
puts "</ul#{level}>"
end
def ol_begin
puts '<ol>'
end
def ol_item(lines, num)
puts %Q(<li aid:pstyle="ol-item" num="#{num}">#{lines.join.chomp}</li>)
end
def ol_end
puts '</ol>'
end
def dl_begin
puts '<dl>'
end
def dt(line)
puts "<dt>#{line}</dt>"
end
def dd(lines)
puts "<dd>#{lines.join.chomp}</dd>"
end
def dl_end
puts '</dl>'
end
def paragraph(lines)
if @noindent.nil?
if lines[0] =~ /\A(\t+)/
puts %Q(<p inlist="#{$1.size}">#{lines.join('').sub(/\A\t+/, "")}</p>)
else
puts "<p>#{lines.join}</p>"
end
else
puts %Q(<p aid:pstyle="noindent" noindent='1'>#{lines.join}</p>)
@noindent = nil
end
end
def read(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
puts %Q[<lead>#{split_paragraph(lines).join}</lead>]
else
puts %Q[<p aid:pstyle="lead">#{lines.join}</p>]
end
end
alias :lead read
def inline_list(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='list'>#{I18n.t("list")}#{I18n.t("format_number_without_chapter", [chapter.list(id).number])}</span>"
else
"<span type='list'>#{I18n.t("list")}#{I18n.t("format_number", [get_chap(chapter), chapter.list(id).number])}</span>"
end
end
def list_header(id, caption)
puts %Q[<codelist>]
if get_chap.nil?
puts %Q[<caption>#{I18n.t("list")}#{I18n.t("format_number_without_chapter", [@chapter.list(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("list")}#{I18n.t("format_number", [get_chap, @chapter.list(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def list_body(lines)
print %Q(<pre>)
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print "<listinfo line=\"#{no}\""
print " begin=\"1\"" if no == 1
print " end=\"#{no}\"" if no == lines.size
print ">"
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts "</pre></codelist>"
end
def emlist(lines, caption = nil)
quotedlist lines, 'emlist', caption
end
def emlistnum(lines, caption = nil)
_lines = []
lines.each_with_index do |line, i|
_lines << detab("<span type='lineno'>" + (i + 1).to_s.rjust(2) + ": </span>" + line)
end
quotedlist _lines, 'emlistnum', caption
end
def listnum_body(lines)
print %Q(<pre>)
lines.each_with_index do |line, i|
print detab("<span type='lineno'>" + (i + 1).to_s.rjust(2) + ": </span>" + line)
print "\n"
end
puts "</pre></codelist>"
end
def cmd(lines, caption = nil)
quotedlist lines, 'cmd', caption
end
def quotedlist(lines, css_class, caption)
print %Q[<list type='#{css_class}'>]
puts "<caption aid:pstyle='#{css_class}-title'>#{compile_inline(caption)}</caption>" unless caption.nil?
print %Q[<pre>]
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print "<listinfo line=\"#{no}\""
print " begin=\"1\"" if no == 1
print " end=\"#{no}\"" if no == lines.size
print ">"
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts '</pre></list>'
end
private :quotedlist
def quote(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
blocked_lines = split_paragraph(lines)
puts "<quote>#{blocked_lines.join("")}</quote>"
else
puts "<quote>#{lines.join("\n")}</quote>"
end
end
def inline_table(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='table'>#{I18n.t("table")}#{I18n.t("format_number_without_chapter", [chapter.table(id).number])}</span>"
else
"<span type='table'>#{I18n.t("table")}#{I18n.t("format_number", [get_chap(chapter), chapter.table(id).number])}</span>"
end
end
def inline_img(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='image'>#{I18n.t("image")}#{I18n.t("format_number_without_chapter", [chapter.image(id).number])}</span>"
else
"<span type='image'>#{I18n.t("image")}#{I18n.t("format_number", [get_chap(chapter), chapter.image(id).number])}</span>"
end
end
def handle_metric(str)
k, v = str.split('=', 2)
return %Q|#{k}=\"#{v.sub(/\A["']/, '').sub(/["']\Z/, '')}\"|
end
def result_metric(array)
" #{array.join(' ')}"
end
def image_image(id, caption, metric=nil)
metrics = parse_metric("idgxml", metric)
puts "<img>"
puts %Q[<Image href="file://#{@chapter.image(id).path.sub(/\A.\//, "")}"#{metrics} />]
image_header id, caption
puts "</img>"
end
def image_dummy(id, caption, lines)
if ReVIEW.book.param["subdirmode"]
warn "image file not exist: images/#{@chapter.id}/#{id}.eps" unless File.exist?("images/#{@chapter.id}/#{id}.eps")
elsif ReVIEW.book.param["singledirmode"]
warn "image file not exist: images/#{@chapter.id}/#{id}.eps" unless File.exist?("images/#{id}.eps")
else
warn "image file not exist: images/#{@chapter.id}-#{id}.eps" unless File.exist?("images/#{@chapter.id}-#{id}.eps")
end
puts "<img>"
print %Q[<pre aid:pstyle="dummyimage">]
lines.each do |line|
print detab(line)
print "\n"
end
print %Q[</pre>]
image_header id, caption
puts "</img>"
end
def image_header(id, caption)
if get_chap.nil?
puts %Q[<caption>#{I18n.t("image")}#{I18n.t("format_number_without_chapter", [@chapter.image(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("image")}#{I18n.t("format_number", [get_chap, @chapter.image(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def texequation(lines)
@texblockequation += 1
puts %Q[<replace idref="texblock-#{@texblockequation}">]
puts '<pre>'
puts "#{lines.join("\n")}"
puts '</pre>'
puts '</replace>'
end
def table(lines, id = nil, caption = nil)
tablewidth = nil
col = 0
unless ReVIEW.book.param["tableopt"].nil?
tablewidth = ReVIEW.book.param["tableopt"].split(",")[0].to_f / 0.351 # mm -> pt
end
puts "<table>"
rows = []
sepidx = nil
lines.each_with_index do |line, idx|
if /\A[\=\-]{12}/ =~ line
sepidx ||= idx
next
end
if tablewidth.nil?
rows.push(line.gsub(/\t\.\t/, "\t\t").gsub(/\t\.\.\t/, "\t.\t").gsub(/\t\.\Z/, "\t").gsub(/\t\.\.\Z/, "\t.").gsub(/\A\./, ""))
else
rows.push(line.gsub(/\t\.\t/, "\tDUMMYCELLSPLITTER\t").gsub(/\t\.\.\t/, "\t.\t").gsub(/\t\.\Z/, "\tDUMMYCELLSPLITTER").gsub(/\t\.\.\Z/, "\t.").gsub(/\A\./, ""))
end
_col = rows[rows.length - 1].split(/\t/).length
col = _col if _col > col
end
cellwidth = []
unless tablewidth.nil?
if @tsize.nil?
col.times {|n| cellwidth[n] = tablewidth / col }
else
cellwidth = @tsize.split(/\s*,\s*/)
totallength = 0
cellwidth.size.times do |n|
cellwidth[n] = cellwidth[n].to_f / 0.351 # mm -> pt
totallength = totallength + cellwidth[n]
warn "total length exceeds limit for table: #{id}" if totallength > tablewidth
end
if cellwidth.size < col
cw = (tablewidth - totallength) / (col - cellwidth.size)
warn "auto cell sizing exceeds limit for table: #{id}" if cw <= 0
for i in cellwidth.size..(col - 1)
cellwidth[i] = cw
end
end
end
end
begin
table_header id, caption unless caption.nil?
rescue KeyError => err
error "no such table: #{id}"
end
return if rows.empty?
if tablewidth.nil?
print "<tbody>"
else
print %Q[<tbody xmlns:aid5="http://ns.adobe.com/AdobeInDesign/5.0/" aid:table="table" aid:trows="#{rows.length}" aid:tcols="#{col}">]
end
if sepidx
sepidx.times do |y|
if tablewidth.nil?
puts %Q[<tr type="header">#{rows.shift}</tr>]
else
i = 0
rows.shift.split(/\t/).each_with_index do |cell, x|
print %Q[<td xyh="#{x + 1},#{y + 1},#{sepidx}" aid:table="cell" aid:theader="1" aid:crows="1" aid:ccols="1" aid:ccolwidth="#{sprintf("%.13f", cellwidth[i])}">#{cell.sub("DUMMYCELLSPLITTER", "")}</td>]
i += 1
end
end
end
end
trputs(tablewidth, rows, cellwidth, sepidx)
puts "</tbody></table>"
@tsize = nil
end
def trputs(tablewidth, rows, cellwidth, sepidx)
sepidx = 0 if sepidx.nil?
if tablewidth.nil?
lastline = rows.pop
rows.each {|row| puts %Q[<tr>#{row}</tr>] }
puts %Q[<tr type="lastline">#{lastline}</tr>] unless lastline.nil?
else
rows.each_with_index do |row, y|
i = 0
row.split(/\t/).each_with_index do |cell, x|
print %Q[<td xyh="#{x + 1},#{y + 1 + sepidx},#{sepidx}" aid:table="cell" aid:crows="1" aid:ccols="1" aid:ccolwidth="#{sprintf("%.13f", cellwidth[i])}">#{cell.sub("DUMMYCELLSPLITTER", "")}</td>]
i += 1
end
end
end
end
def table_header(id, caption)
if get_chap.nil?
puts %Q[<caption>#{I18n.t("table")}#{I18n.t("format_number_without_chapter", [@chapter.table(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("table")}#{I18n.t("format_number", [get_chap, @chapter.table(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def table_begin(ncols)
end
def tr(rows)
puts %Q[<tr>#{rows.join("\t")}</tr>]
end
def th(str)
%Q[<?dtp tablerow header?>#{str}]
end
def td(str)
str
end
def table_end
print "<?dtp tablerow last?>"
end
def comment(str)
print %Q(<!-- [Comment] #{escape_html(str)} -->)
end
def footnote(id, str)
# see inline_fn
end
def inline_fn(id)
%Q[<footnote>#{compile_inline(@chapter.footnote(id).content.strip)}</footnote>]
end
def compile_ruby(base, ruby)
%Q[<GroupRuby><aid:ruby xmlns:aid="http://ns.adobe.com/AdobeInDesign/3.0/"><aid:rb>#{escape_html(base.sub(/\A\s+/, "").sub(/\s+$/, ""))}</aid:rb><aid:rt>#{escape_html(ruby.sub(/\A\s+/, "").sub(/\s+$/, ""))}</aid:rt></aid:ruby></GroupRuby>]
end
def compile_kw(word, alt)
'<keyword>' +
if alt
then escape_html("#{word}(#{alt.strip})")
else escape_html(word)
end +
'</keyword>' +
%Q[<index value="#{escape_html(word)}" />] +
if alt
alt.split(/\s*,\s*/).collect! {|e| %Q[<index value="#{escape_html(e.strip)}" />] }.join
else
""
end
end
def compile_href(url, label)
%Q[<a linkurl='#{url}'>#{label.nil? ? url : label}</a>]
end
def inline_sup(str)
%Q[<sup>#{escape_html(str)}</sup>]
end
def inline_sub(str)
%Q[<sub>#{escape_html(str)}</sub>]
end
def inline_raw(str)
%Q[#{super(str).gsub("\\n", "\n")}]
end
def inline_hint(str)
if ReVIEW.book.param["nolf"].nil?
%Q[\n<hint>#{escape_html(str)}</hint>]
else
%Q[<hint>#{escape_html(str)}</hint>]
end
end
def inline_maru(str)
if str =~ /\A\d+\Z/
sprintf("&#x%x;", 9311 + str.to_i)
elsif str =~ /\A[A-Z]\Z/
begin
sprintf("&#x%x;", 9398 + str.codepoints.to_a[0] - 65)
rescue NoMethodError
sprintf("&#x%x;", 9398 + str[0] - 65)
end
elsif str =~ /\A[a-z]\Z/
begin
sprintf("&#x%x;", 9392 + str.codepoints.to_a[0] - 65)
rescue NoMethodError
sprintf("&#x%x;", 9392 + str[0] - 65)
end
else
raise "can't parse maru: #{str}"
end
end
def inline_idx(str)
%Q(#{escape_html(str)}<index value="#{escape_html(str)}" />)
end
def inline_hidx(str)
%Q(<index value="#{escape_html(str)}" />)
end
def inline_ami(str)
%Q(<ami>#{escape_html(str)}</ami>)
end
def inline_i(str)
%Q(<i>#{escape_html(str)}</i>)
end
def inline_b(str)
%Q(<b>#{escape_html(str)}</b>)
end
def inline_tt(str)
%Q(<tt>#{escape_html(str)}</tt>)
end
def inline_ttb(str)
index = escape_html(str).gsub(/<.*?>/, "").gsub(/\*/, "ESCAPED_ASTERISK").gsub(/'/, "")
%Q(<tt style='bold'>#{escape_html(str)}</tt><index value='#{index}' />)
end
alias :inline_ttbold inline_ttb
def inline_tti(str)
%Q(<tt style='italic'>#{escape_html(str)}</tt>)
end
def inline_u(str)
%Q(<underline>#{escape_html(str)}</underline>)
end
def inline_icon(id)
begin
%Q[<Image href="file://#{@chapter.image(id).path.sub(/\A\.\//, "")}" type="inline" />]
rescue
warn "no such icon image: #{id}"
""
end
end
def inline_bou(str)
%Q[<bou>#{escape_html(str)}</bou>]
end
def inline_keytop(str)
%Q[<keytop>#{escape_html(str)}</keytop>]
end
def inline_labelref(idref)
%Q[<ref idref='#{escape_html(idref)}'>「●● #{escape_html(idref)}」</ref>] # FIXME:節名とタイトルも込みで要出力
end
alias inline_ref inline_labelref
def inline_pageref(idref)
%Q[<pageref idref='#{escape_html(idref)}'>●●</pageref>] # ページ番号を参照
end
def inline_balloon(str)
%Q[<balloon>#{escape_html(str).gsub(/@maru\[(\d+)\]/) {|m| inline_maru($1)}}</balloon>]
end
def inline_uchar(str)
%Q[&#x#{str};]
end
def inline_m(str)
@texinlineequation += 1
%Q[<replace idref="texinline-#{@texinlineequation}"><pre>#{escape_html(str)}</pre></replace>]
end
def noindent
@noindent = true
end
def linebreak
# FIXME:pが閉じちゃってるので一度戻らないといけないが、難しい…。
puts "<br />"
end
def pagebreak
puts "<pagebreak />"
end
def nonum_begin(level, label, caption)
puts %Q[<title aid:pstyle="h#{level}">#{compile_inline(caption)}</title><?dtp level="#{level}" section="#{escape_html(compile_inline(caption))}"?>]
end
def nonum_end(level)
end
def circle_begin(level, label, caption)
puts %Q[<title aid:pstyle="smallcircle">•#{compile_inline(caption)}</title>]
end
def circle_end(level)
end
def common_column_begin(type, caption)
print "<#{type}column>"
puts %Q[<title aid:pstyle="#{type}column-title">#{compile_inline(caption)}</title>]
end
def common_column_end(type)
puts "</#{type}column>"
end
def column_begin(level, label, caption)
common_column_begin("", caption)
end
def column_end(level)
common_column_end("")
end
def xcolumn_begin(level, label, caption)
common_column_begin("x", caption)
end
def xcolumn_end(level)
common_column_end("x")
end
def world_begin(level, label, caption)
common_column_begin("world", caption)
end
def world_end(level)
common_column_end("world")
end
def hood_begin(level, label, caption)
common_column_begin("hood", caption)
end
def hood_end(level)
common_column_end("hood")
end
def edition_begin(level, label, caption)
common_column_begin("edition", caption)
end
def edition_end(level)
common_column_end("edition")
end
def insideout_begin(level, label, caption)
common_column_begin("insideout", caption)
end
def insideout_end(level)
common_column_end("insideout")
end
def ref_begin(level, label, caption)
if !label.nil?
puts "<reference id='#{label}'>"
else
puts "<reference>"
end
end
def ref_end(level)
puts "</reference>"
end
def sup_begin(level, label, caption)
if !label.nil?
puts "<supplement id='#{label}'>"
else
puts "<supplement>"
end
end
def sup_end(level)
puts "</supplement>"
end
def flushright(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
puts split_paragraph(lines).join.gsub("<p>", "<p align='right'>")
else
puts "<p align='right'>#{lines.join("\n")}</p>"
end
end
def centering(lines)
puts split_paragraph(lines).join.gsub("<p>", "<p align='center'>")
end
def captionblock(type, lines, caption, specialstyle = nil)
print "<#{type}>"
style = specialstyle.nil? ? "#{type}-title" : specialstyle
puts "<title aid:pstyle='#{style}'>#{compile_inline(caption)}</title>" unless caption.nil?
if ReVIEW.book.param["deprecated-blocklines"].nil?
blocked_lines = split_paragraph(lines)
puts "#{blocked_lines.join}</#{type}>"
else
puts "#{lines.join("\n")}</#{type}>"
end
end
def note(lines, caption = nil)
captionblock("note", lines, caption)
end
def memo(lines, caption = nil)
captionblock("memo", lines, caption)
end
def tip(lines, caption = nil)
captionblock("tip", lines, caption)
end
def info(lines, caption = nil)
captionblock("info", lines, caption)
end
def planning(lines, caption = nil)
captionblock("planning", lines, caption)
end
def best(lines, caption = nil)
captionblock("best", lines, caption)
end
def important(lines, caption = nil)
captionblock("important", lines, caption)
end
def security(lines, caption = nil)
captionblock("security", lines, caption)
end
def caution(lines, caption = nil)
captionblock("caution", lines, caption)
end
def term(lines)
captionblock("term", lines, nil)
end
def link(lines, caption = nil)
captionblock("link", lines, caption)
end
def notice(lines, caption = nil)
if caption.nil?
captionblock("notice", lines, nil)
else
captionblock("notice-t", lines, caption, "notice-title")
end
end
def point(lines, caption = nil)
if caption.nil?
captionblock("point", lines, nil)
else
captionblock("point-t", lines, caption, "point-title")
end
end
def shoot(lines, caption = nil)
if caption.nil?
captionblock("shoot", lines, nil)
else
captionblock("shoot-t", lines, caption, "shoot-title")
end
end
def reference(lines)
captionblock("reference", lines, nil)
end
def practice(lines)
captionblock("practice", lines, nil)
end
def expert(lines)
captionblock("expert", lines, nil)
end
def syntaxblock(type, lines, caption)
if caption.nil?
puts %Q[<#{type}>]
else
titleopentag = %Q[caption aid:pstyle="#{type}-title"]
titleclosetag = "caption"
if type == "insn"
titleopentag = %Q[floattitle type="insn"]
titleclosetag = "floattitle"
end
puts %Q[<#{type}><#{titleopentag}>#{compile_inline(caption)}</#{titleclosetag}>]
end
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print %Q[<listinfo line="#{no}"]
print %Q[ begin="1"] if no == 1
print %Q[ end="#{no}"] if no == lines.size
print %Q[>]
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts "</#{type}>"
end
def insn(lines, caption = nil)
syntaxblock("insn", lines, caption)
end
def box(lines, caption = nil)
syntaxblock("box", lines, caption)
end
def indepimage(id, caption=nil, metric=nil)
metrics = parse_metric("idgxml", metric)
puts "<img>"
begin
puts %Q[<Image href="file://#{@chapter.image(id).path.sub(/\A\.\//, "")}"#{metrics} />]
rescue
warn %Q[no such image: #{id}]
end
puts %Q[<caption>#{compile_inline(caption)}</caption>] if !caption.nil? && !caption.empty?
puts "</img>"
end
alias :numberlessimage indepimage
def label(id)
# FIXME
print "<label id='#{id}' />"
end
def tsize(str)
@tsize = str
end
def dtp(str)
print %Q(<?dtp #{str} ?>)
end
def hr
print "<hr />"
end
def bpo(lines)
puts %Q[<bpo>#{lines.join("\n")}</bpo>]
end
def inline_dtp(str)
"<?dtp #{str} ?>"
end
def inline_code(str)
%Q[<tt type='inline-code'>#{escape_html(str)}</tt>]
end
def inline_br(str)
"\n"
end
def rawblock(lines)
no = 1
lines.each do |l|
print l.gsub("<", "<").gsub(">", ">").gsub(""", "\"").gsub("&", "&")
print "\n" unless lines.length == no
no = no + 1
end
end
def text(str)
str
end
def inline_chapref(id)
chs = ["", "「", "」"]
unless ReVIEW.book.param["chapref"].nil?
_chs = convert_inencoding(ReVIEW.book.param["chapref"],
ReVIEW.book.param["inencoding"]).split(",")
if _chs.size != 3
error "--chapsplitter must have exactly 3 parameters with comma."
else
chs = _chs
end
else
end
s = "#{chs[0]}#{@chapter.env.chapter_index.number(id)}#{chs[1]}#{@chapter.env.chapter_index.title(id)}#{chs[2]}"
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{s}</link>)
else
s
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def inline_chap(id)
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{@chapter.env.chapter_index.number(id)}</link>)
else
@chapter.env.chapter_index.number(id)
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def inline_title(id)
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{@chapter.env.chapter_index.title(id)}</link>)
else
@chapter.env.chapter_index.title(id)
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def source_header(caption)
puts %Q[<source>]
puts %Q[<caption>#{compile_inline(caption)}</caption>]
end
def source_body(lines)
puts %Q[<pre>]
lines.each do |line|
print detab(line)
print "\n"
end
puts %Q[</pre></source>]
end
def bibpaper(lines, id, caption)
bibpaper_header id, caption
unless lines.empty?
bibpaper_bibpaper id, caption, lines
end
puts %Q(</bibitem>)
end
def bibpaper_header(id, caption)
puts %Q(<bibitem id="bib-#{id}">)
puts "<caption><span type='bibno'>[#{@chapter.bibpaper(id).number}] </span>#{compile_inline(caption)}</caption>"
end
def bibpaper_bibpaper(id, caption, lines)
print split_paragraph(lines).join("")
end
def inline_bib(id)
%Q(<span type='bibref' idref='#{id}'>[#{@chapter.bibpaper(id).number}]</span>)
end
def inline_recipe(id)
# FIXME
%Q(<recipe idref="#{escape_html(id)}">[XXX]「#{escape_html(id)}」 p.XX</recipe>)
end
def nofunc_text(str)
escape_html(str)
end
def image_ext
"eps"
end
end
end # module ReVIEW
escape href URL on IDGXMLBuilder.
# encoding: utf-8
#
# Copyright (c) 2002-2007 Minero Aoki
# 2008-2012 Minero Aoki, Kenshi Muto
#
# This program is free software.
# You can distribute or modify this program under the terms of
# the GNU LGPL, Lesser General Public License version 2.1.
#
require 'review/builder'
require 'review/htmlutils'
require 'review/textutils'
require 'nkf'
module ReVIEW
class IDGXMLBuilder < Builder
include TextUtils
include HTMLUtils
[:ttbold, :hint, :maru, :keytop, :labelref, :ref, :pageref, :balloon].each {|e| Compiler.definline(e) }
Compiler.defsingle(:dtp, 1)
Compiler.defblock(:insn, 0..1)
Compiler.defblock(:memo, 0..1)
Compiler.defblock(:tip, 0..1)
Compiler.defblock(:info, 0..1)
Compiler.defblock(:planning, 0..1)
Compiler.defblock(:best, 0..1)
Compiler.defblock(:important, 0..1)
Compiler.defblock(:security, 0..1)
Compiler.defblock(:caution, 0..1)
Compiler.defblock(:notice, 0..1)
Compiler.defblock(:point, 0..1)
Compiler.defblock(:shoot, 0..1)
Compiler.defblock(:reference, 0)
Compiler.defblock(:term, 0)
Compiler.defblock(:link, 0..1)
Compiler.defblock(:practice, 0)
Compiler.defblock(:expert, 0)
Compiler.defblock(:rawblock, 0)
def pre_paragraph
'<p>'
end
def post_paragraph
'</p>'
end
def extname
'.xml'
end
def builder_init(no_error = false)
@no_error = no_error
end
private :builder_init
def builder_init_file
@warns = []
@errors = []
@section = 0
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
@noindent = nil
@rootelement = "doc"
@secttags = nil
@tsize = nil
@texblockequation = 0
@texinlineequation = 0
print %Q(<?xml version="1.0" encoding="UTF-8"?>\n)
print %Q(<#{@rootelement} xmlns:aid="http://ns.adobe.com/AdobeInDesign/4.0/">)
alias puts print unless ReVIEW.book.param["nolf"].nil?
@secttags = true unless ReVIEW.book.param["structuredxml"].nil?
end
private :builder_init_file
def result
s = ""
unless @secttags.nil?
s += "</sect4>" if @subsubsubsection > 0
s += "</sect3>" if @subsubsection > 0
s += "</sect2>" if @subsection > 0
s += "</sect>" if @section > 0
s += "</chapter>" if @chapter.number > 0
end
messages() + @output.string + s + "</#{@rootelement}>\n"
end
def warn(msg)
if @no_error
@warns.push [@location.filename, @location.lineno, msg]
puts "----WARNING: #{escape_html(msg)}----"
else
$stderr.puts "#{@location}: warning: #{msg}"
end
end
def error(msg)
if @no_error
@errors.push [@location.filename, @location.lineno, msg]
puts "----ERROR: #{escape_html(msg)}----"
else
$stderr.puts "#{@location}: error: #{msg}"
end
end
def messages
error_messages() + warning_messages()
end
def error_messages
return '' if @errors.empty?
"<h2>Syntax Errors</h2>\n" +
"<ul>\n" +
@errors.map {|file, line, msg|
"<li>#{escape_html(file)}:#{line}: #{escape_html(msg.to_s)}</li>\n"
}.join('') +
"</ul>\n"
end
def warning_messages
return '' if @warns.empty?
"<h2>Warnings</h2>\n" +
"<ul>\n" +
@warns.map {|file, line, msg|
"<li>#{escape_html(file)}:#{line}: #{escape_html(msg)}</li>\n"
}.join('') +
"</ul>\n"
end
def headline(level, label, caption)
prefix = ""
case level
when 1
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
print "</sect>" if @section > 0
end
print %Q(<chapter id="chap:#{@chapter.number}">) unless @secttags.nil?
if @chapter.number.to_s =~ /\A\d+$/
prefix = "#{I18n.t("chapter", @chapter.number)}#{I18n.t("chapter_postfix")}"
elsif @chapter.number.present?
prefix = "#{@chapter.number}#{I18n.t("chapter_postfix")}"
end
@section = 0
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
when 2
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
print "</sect>" if @section > 0
end
@section += 1
print %Q(<sect id="sect:#{@chapter.number}.#{@section}">) unless @secttags.nil?
prefix = @chapter.present? ? "#{@chapter.number}.#{@section} " : ""
@subsection = 0
@subsubsection = 0
@subsubsubsection = 0
when 3
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
print "</sect2>" if @subsection > 0
end
@subsection += 1
print %Q(<sect2 id="sect:#{@chapter.number}.#{@section}.#{@subsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}#{I18n.t("chapter_postfix")}" : ""
@subsubsection = 0
@subsubsubsection = 0
when 4
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
print "</sect3>" if @subsubsection > 0
end
@subsubsection += 1
print %Q(<sect3 id="sect:#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}#{I18n.t("chapter_postfix")}" : ""
@subsubsubsection = 0
when 5
unless @secttags.nil?
print "</sect4>" if @subsubsubsection > 0
end
@subsubsubsection += 1
print %Q(<sect4 id="sect:#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}.#{@subsubsubsection}">) unless @secttags.nil?
prefix = @chapter.number.present? ? "#{@chapter.number}.#{@section}.#{@subsection}.#{@subsubsection}.#{@subsubsubsection}#{I18n.t("chapter_postfix")}" : ""
else
raise "caption level too deep or unsupported: #{level}"
end
prefix = "" if (level.to_i > ReVIEW.book.param["secnolevel"])
label = label.nil? ? "" : " id=\"#{label}\""
toccaption = escape_html(compile_inline(caption.gsub(/@<fn>\{.+?\}/, '')).gsub(/<[^>]+>/, ''))
puts %Q(<title#{label} aid:pstyle="h#{level}">#{prefix}#{compile_inline(caption)}</title><?dtp level="#{level}" section="#{prefix}#{toccaption}"?>)
end
def ul_begin
level = block_given? ? yield : ""
level = nil if level == 1
puts "<ul#{level == 1 ? nil : level}>"
end
def ul_item(lines)
puts %Q(<li aid:pstyle="ul-item">#{lines.join.chomp}</li>)
end
def ul_item_begin(lines)
print %Q(<li aid:pstyle="ul-item">#{lines.join.chomp})
end
def ul_item_end
puts "</li>"
end
def choice_single_begin
puts "<choice type='single'>"
end
def choice_multi_begin
puts "<choice type='multi'>"
end
def choice_single_end
puts "</choice>"
end
def choice_multi_end
puts "</choice>"
end
def ul_end
level = block_given? ? yield : ""
level = nil if level == 1
puts "</ul#{level}>"
end
def ol_begin
puts '<ol>'
end
def ol_item(lines, num)
puts %Q(<li aid:pstyle="ol-item" num="#{num}">#{lines.join.chomp}</li>)
end
def ol_end
puts '</ol>'
end
def dl_begin
puts '<dl>'
end
def dt(line)
puts "<dt>#{line}</dt>"
end
def dd(lines)
puts "<dd>#{lines.join.chomp}</dd>"
end
def dl_end
puts '</dl>'
end
def paragraph(lines)
if @noindent.nil?
if lines[0] =~ /\A(\t+)/
puts %Q(<p inlist="#{$1.size}">#{lines.join('').sub(/\A\t+/, "")}</p>)
else
puts "<p>#{lines.join}</p>"
end
else
puts %Q(<p aid:pstyle="noindent" noindent='1'>#{lines.join}</p>)
@noindent = nil
end
end
def read(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
puts %Q[<lead>#{split_paragraph(lines).join}</lead>]
else
puts %Q[<p aid:pstyle="lead">#{lines.join}</p>]
end
end
alias :lead read
def inline_list(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='list'>#{I18n.t("list")}#{I18n.t("format_number_without_chapter", [chapter.list(id).number])}</span>"
else
"<span type='list'>#{I18n.t("list")}#{I18n.t("format_number", [get_chap(chapter), chapter.list(id).number])}</span>"
end
end
def list_header(id, caption)
puts %Q[<codelist>]
if get_chap.nil?
puts %Q[<caption>#{I18n.t("list")}#{I18n.t("format_number_without_chapter", [@chapter.list(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("list")}#{I18n.t("format_number", [get_chap, @chapter.list(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def list_body(lines)
print %Q(<pre>)
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print "<listinfo line=\"#{no}\""
print " begin=\"1\"" if no == 1
print " end=\"#{no}\"" if no == lines.size
print ">"
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts "</pre></codelist>"
end
def emlist(lines, caption = nil)
quotedlist lines, 'emlist', caption
end
def emlistnum(lines, caption = nil)
_lines = []
lines.each_with_index do |line, i|
_lines << detab("<span type='lineno'>" + (i + 1).to_s.rjust(2) + ": </span>" + line)
end
quotedlist _lines, 'emlistnum', caption
end
def listnum_body(lines)
print %Q(<pre>)
lines.each_with_index do |line, i|
print detab("<span type='lineno'>" + (i + 1).to_s.rjust(2) + ": </span>" + line)
print "\n"
end
puts "</pre></codelist>"
end
def cmd(lines, caption = nil)
quotedlist lines, 'cmd', caption
end
def quotedlist(lines, css_class, caption)
print %Q[<list type='#{css_class}'>]
puts "<caption aid:pstyle='#{css_class}-title'>#{compile_inline(caption)}</caption>" unless caption.nil?
print %Q[<pre>]
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print "<listinfo line=\"#{no}\""
print " begin=\"1\"" if no == 1
print " end=\"#{no}\"" if no == lines.size
print ">"
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts '</pre></list>'
end
private :quotedlist
def quote(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
blocked_lines = split_paragraph(lines)
puts "<quote>#{blocked_lines.join("")}</quote>"
else
puts "<quote>#{lines.join("\n")}</quote>"
end
end
def inline_table(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='table'>#{I18n.t("table")}#{I18n.t("format_number_without_chapter", [chapter.table(id).number])}</span>"
else
"<span type='table'>#{I18n.t("table")}#{I18n.t("format_number", [get_chap(chapter), chapter.table(id).number])}</span>"
end
end
def inline_img(id)
chapter, id = extract_chapter_id(id)
if get_chap(chapter).nil?
"<span type='image'>#{I18n.t("image")}#{I18n.t("format_number_without_chapter", [chapter.image(id).number])}</span>"
else
"<span type='image'>#{I18n.t("image")}#{I18n.t("format_number", [get_chap(chapter), chapter.image(id).number])}</span>"
end
end
def handle_metric(str)
k, v = str.split('=', 2)
return %Q|#{k}=\"#{v.sub(/\A["']/, '').sub(/["']\Z/, '')}\"|
end
def result_metric(array)
" #{array.join(' ')}"
end
def image_image(id, caption, metric=nil)
metrics = parse_metric("idgxml", metric)
puts "<img>"
puts %Q[<Image href="file://#{@chapter.image(id).path.sub(/\A.\//, "")}"#{metrics} />]
image_header id, caption
puts "</img>"
end
def image_dummy(id, caption, lines)
if ReVIEW.book.param["subdirmode"]
warn "image file not exist: images/#{@chapter.id}/#{id}.eps" unless File.exist?("images/#{@chapter.id}/#{id}.eps")
elsif ReVIEW.book.param["singledirmode"]
warn "image file not exist: images/#{@chapter.id}/#{id}.eps" unless File.exist?("images/#{id}.eps")
else
warn "image file not exist: images/#{@chapter.id}-#{id}.eps" unless File.exist?("images/#{@chapter.id}-#{id}.eps")
end
puts "<img>"
print %Q[<pre aid:pstyle="dummyimage">]
lines.each do |line|
print detab(line)
print "\n"
end
print %Q[</pre>]
image_header id, caption
puts "</img>"
end
def image_header(id, caption)
if get_chap.nil?
puts %Q[<caption>#{I18n.t("image")}#{I18n.t("format_number_without_chapter", [@chapter.image(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("image")}#{I18n.t("format_number", [get_chap, @chapter.image(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def texequation(lines)
@texblockequation += 1
puts %Q[<replace idref="texblock-#{@texblockequation}">]
puts '<pre>'
puts "#{lines.join("\n")}"
puts '</pre>'
puts '</replace>'
end
def table(lines, id = nil, caption = nil)
tablewidth = nil
col = 0
unless ReVIEW.book.param["tableopt"].nil?
tablewidth = ReVIEW.book.param["tableopt"].split(",")[0].to_f / 0.351 # mm -> pt
end
puts "<table>"
rows = []
sepidx = nil
lines.each_with_index do |line, idx|
if /\A[\=\-]{12}/ =~ line
sepidx ||= idx
next
end
if tablewidth.nil?
rows.push(line.gsub(/\t\.\t/, "\t\t").gsub(/\t\.\.\t/, "\t.\t").gsub(/\t\.\Z/, "\t").gsub(/\t\.\.\Z/, "\t.").gsub(/\A\./, ""))
else
rows.push(line.gsub(/\t\.\t/, "\tDUMMYCELLSPLITTER\t").gsub(/\t\.\.\t/, "\t.\t").gsub(/\t\.\Z/, "\tDUMMYCELLSPLITTER").gsub(/\t\.\.\Z/, "\t.").gsub(/\A\./, ""))
end
_col = rows[rows.length - 1].split(/\t/).length
col = _col if _col > col
end
cellwidth = []
unless tablewidth.nil?
if @tsize.nil?
col.times {|n| cellwidth[n] = tablewidth / col }
else
cellwidth = @tsize.split(/\s*,\s*/)
totallength = 0
cellwidth.size.times do |n|
cellwidth[n] = cellwidth[n].to_f / 0.351 # mm -> pt
totallength = totallength + cellwidth[n]
warn "total length exceeds limit for table: #{id}" if totallength > tablewidth
end
if cellwidth.size < col
cw = (tablewidth - totallength) / (col - cellwidth.size)
warn "auto cell sizing exceeds limit for table: #{id}" if cw <= 0
for i in cellwidth.size..(col - 1)
cellwidth[i] = cw
end
end
end
end
begin
table_header id, caption unless caption.nil?
rescue KeyError => err
error "no such table: #{id}"
end
return if rows.empty?
if tablewidth.nil?
print "<tbody>"
else
print %Q[<tbody xmlns:aid5="http://ns.adobe.com/AdobeInDesign/5.0/" aid:table="table" aid:trows="#{rows.length}" aid:tcols="#{col}">]
end
if sepidx
sepidx.times do |y|
if tablewidth.nil?
puts %Q[<tr type="header">#{rows.shift}</tr>]
else
i = 0
rows.shift.split(/\t/).each_with_index do |cell, x|
print %Q[<td xyh="#{x + 1},#{y + 1},#{sepidx}" aid:table="cell" aid:theader="1" aid:crows="1" aid:ccols="1" aid:ccolwidth="#{sprintf("%.13f", cellwidth[i])}">#{cell.sub("DUMMYCELLSPLITTER", "")}</td>]
i += 1
end
end
end
end
trputs(tablewidth, rows, cellwidth, sepidx)
puts "</tbody></table>"
@tsize = nil
end
def trputs(tablewidth, rows, cellwidth, sepidx)
sepidx = 0 if sepidx.nil?
if tablewidth.nil?
lastline = rows.pop
rows.each {|row| puts %Q[<tr>#{row}</tr>] }
puts %Q[<tr type="lastline">#{lastline}</tr>] unless lastline.nil?
else
rows.each_with_index do |row, y|
i = 0
row.split(/\t/).each_with_index do |cell, x|
print %Q[<td xyh="#{x + 1},#{y + 1 + sepidx},#{sepidx}" aid:table="cell" aid:crows="1" aid:ccols="1" aid:ccolwidth="#{sprintf("%.13f", cellwidth[i])}">#{cell.sub("DUMMYCELLSPLITTER", "")}</td>]
i += 1
end
end
end
end
def table_header(id, caption)
if get_chap.nil?
puts %Q[<caption>#{I18n.t("table")}#{I18n.t("format_number_without_chapter", [@chapter.table(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
else
puts %Q[<caption>#{I18n.t("table")}#{I18n.t("format_number", [get_chap, @chapter.table(id).number])}#{I18n.t("caption_prefix_idgxml")}#{compile_inline(caption)}</caption>]
end
end
def table_begin(ncols)
end
def tr(rows)
puts %Q[<tr>#{rows.join("\t")}</tr>]
end
def th(str)
%Q[<?dtp tablerow header?>#{str}]
end
def td(str)
str
end
def table_end
print "<?dtp tablerow last?>"
end
def comment(str)
print %Q(<!-- [Comment] #{escape_html(str)} -->)
end
def footnote(id, str)
# see inline_fn
end
def inline_fn(id)
%Q[<footnote>#{compile_inline(@chapter.footnote(id).content.strip)}</footnote>]
end
def compile_ruby(base, ruby)
%Q[<GroupRuby><aid:ruby xmlns:aid="http://ns.adobe.com/AdobeInDesign/3.0/"><aid:rb>#{escape_html(base.sub(/\A\s+/, "").sub(/\s+$/, ""))}</aid:rb><aid:rt>#{escape_html(ruby.sub(/\A\s+/, "").sub(/\s+$/, ""))}</aid:rt></aid:ruby></GroupRuby>]
end
def compile_kw(word, alt)
'<keyword>' +
if alt
then escape_html("#{word}(#{alt.strip})")
else escape_html(word)
end +
'</keyword>' +
%Q[<index value="#{escape_html(word)}" />] +
if alt
alt.split(/\s*,\s*/).collect! {|e| %Q[<index value="#{escape_html(e.strip)}" />] }.join
else
""
end
end
def compile_href(url, label)
%Q[<a linkurl='#{escape_html(url)}'>#{label.nil? ? escape_html(url) : escape_html(label)}</a>]
end
def inline_sup(str)
%Q[<sup>#{escape_html(str)}</sup>]
end
def inline_sub(str)
%Q[<sub>#{escape_html(str)}</sub>]
end
def inline_raw(str)
%Q[#{super(str).gsub("\\n", "\n")}]
end
def inline_hint(str)
if ReVIEW.book.param["nolf"].nil?
%Q[\n<hint>#{escape_html(str)}</hint>]
else
%Q[<hint>#{escape_html(str)}</hint>]
end
end
def inline_maru(str)
if str =~ /\A\d+\Z/
sprintf("&#x%x;", 9311 + str.to_i)
elsif str =~ /\A[A-Z]\Z/
begin
sprintf("&#x%x;", 9398 + str.codepoints.to_a[0] - 65)
rescue NoMethodError
sprintf("&#x%x;", 9398 + str[0] - 65)
end
elsif str =~ /\A[a-z]\Z/
begin
sprintf("&#x%x;", 9392 + str.codepoints.to_a[0] - 65)
rescue NoMethodError
sprintf("&#x%x;", 9392 + str[0] - 65)
end
else
raise "can't parse maru: #{str}"
end
end
def inline_idx(str)
%Q(#{escape_html(str)}<index value="#{escape_html(str)}" />)
end
def inline_hidx(str)
%Q(<index value="#{escape_html(str)}" />)
end
def inline_ami(str)
%Q(<ami>#{escape_html(str)}</ami>)
end
def inline_i(str)
%Q(<i>#{escape_html(str)}</i>)
end
def inline_b(str)
%Q(<b>#{escape_html(str)}</b>)
end
def inline_tt(str)
%Q(<tt>#{escape_html(str)}</tt>)
end
def inline_ttb(str)
index = escape_html(str).gsub(/<.*?>/, "").gsub(/\*/, "ESCAPED_ASTERISK").gsub(/'/, "")
%Q(<tt style='bold'>#{escape_html(str)}</tt><index value='#{index}' />)
end
alias :inline_ttbold inline_ttb
def inline_tti(str)
%Q(<tt style='italic'>#{escape_html(str)}</tt>)
end
def inline_u(str)
%Q(<underline>#{escape_html(str)}</underline>)
end
def inline_icon(id)
begin
%Q[<Image href="file://#{@chapter.image(id).path.sub(/\A\.\//, "")}" type="inline" />]
rescue
warn "no such icon image: #{id}"
""
end
end
def inline_bou(str)
%Q[<bou>#{escape_html(str)}</bou>]
end
def inline_keytop(str)
%Q[<keytop>#{escape_html(str)}</keytop>]
end
def inline_labelref(idref)
%Q[<ref idref='#{escape_html(idref)}'>「●● #{escape_html(idref)}」</ref>] # FIXME:節名とタイトルも込みで要出力
end
alias inline_ref inline_labelref
def inline_pageref(idref)
%Q[<pageref idref='#{escape_html(idref)}'>●●</pageref>] # ページ番号を参照
end
def inline_balloon(str)
%Q[<balloon>#{escape_html(str).gsub(/@maru\[(\d+)\]/) {|m| inline_maru($1)}}</balloon>]
end
def inline_uchar(str)
%Q[&#x#{str};]
end
def inline_m(str)
@texinlineequation += 1
%Q[<replace idref="texinline-#{@texinlineequation}"><pre>#{escape_html(str)}</pre></replace>]
end
def noindent
@noindent = true
end
def linebreak
# FIXME:pが閉じちゃってるので一度戻らないといけないが、難しい…。
puts "<br />"
end
def pagebreak
puts "<pagebreak />"
end
def nonum_begin(level, label, caption)
puts %Q[<title aid:pstyle="h#{level}">#{compile_inline(caption)}</title><?dtp level="#{level}" section="#{escape_html(compile_inline(caption))}"?>]
end
def nonum_end(level)
end
def circle_begin(level, label, caption)
puts %Q[<title aid:pstyle="smallcircle">•#{compile_inline(caption)}</title>]
end
def circle_end(level)
end
def common_column_begin(type, caption)
print "<#{type}column>"
puts %Q[<title aid:pstyle="#{type}column-title">#{compile_inline(caption)}</title>]
end
def common_column_end(type)
puts "</#{type}column>"
end
def column_begin(level, label, caption)
common_column_begin("", caption)
end
def column_end(level)
common_column_end("")
end
def xcolumn_begin(level, label, caption)
common_column_begin("x", caption)
end
def xcolumn_end(level)
common_column_end("x")
end
def world_begin(level, label, caption)
common_column_begin("world", caption)
end
def world_end(level)
common_column_end("world")
end
def hood_begin(level, label, caption)
common_column_begin("hood", caption)
end
def hood_end(level)
common_column_end("hood")
end
def edition_begin(level, label, caption)
common_column_begin("edition", caption)
end
def edition_end(level)
common_column_end("edition")
end
def insideout_begin(level, label, caption)
common_column_begin("insideout", caption)
end
def insideout_end(level)
common_column_end("insideout")
end
def ref_begin(level, label, caption)
if !label.nil?
puts "<reference id='#{label}'>"
else
puts "<reference>"
end
end
def ref_end(level)
puts "</reference>"
end
def sup_begin(level, label, caption)
if !label.nil?
puts "<supplement id='#{label}'>"
else
puts "<supplement>"
end
end
def sup_end(level)
puts "</supplement>"
end
def flushright(lines)
if ReVIEW.book.param["deprecated-blocklines"].nil?
puts split_paragraph(lines).join.gsub("<p>", "<p align='right'>")
else
puts "<p align='right'>#{lines.join("\n")}</p>"
end
end
def centering(lines)
puts split_paragraph(lines).join.gsub("<p>", "<p align='center'>")
end
def captionblock(type, lines, caption, specialstyle = nil)
print "<#{type}>"
style = specialstyle.nil? ? "#{type}-title" : specialstyle
puts "<title aid:pstyle='#{style}'>#{compile_inline(caption)}</title>" unless caption.nil?
if ReVIEW.book.param["deprecated-blocklines"].nil?
blocked_lines = split_paragraph(lines)
puts "#{blocked_lines.join}</#{type}>"
else
puts "#{lines.join("\n")}</#{type}>"
end
end
def note(lines, caption = nil)
captionblock("note", lines, caption)
end
def memo(lines, caption = nil)
captionblock("memo", lines, caption)
end
def tip(lines, caption = nil)
captionblock("tip", lines, caption)
end
def info(lines, caption = nil)
captionblock("info", lines, caption)
end
def planning(lines, caption = nil)
captionblock("planning", lines, caption)
end
def best(lines, caption = nil)
captionblock("best", lines, caption)
end
def important(lines, caption = nil)
captionblock("important", lines, caption)
end
def security(lines, caption = nil)
captionblock("security", lines, caption)
end
def caution(lines, caption = nil)
captionblock("caution", lines, caption)
end
def term(lines)
captionblock("term", lines, nil)
end
def link(lines, caption = nil)
captionblock("link", lines, caption)
end
def notice(lines, caption = nil)
if caption.nil?
captionblock("notice", lines, nil)
else
captionblock("notice-t", lines, caption, "notice-title")
end
end
def point(lines, caption = nil)
if caption.nil?
captionblock("point", lines, nil)
else
captionblock("point-t", lines, caption, "point-title")
end
end
def shoot(lines, caption = nil)
if caption.nil?
captionblock("shoot", lines, nil)
else
captionblock("shoot-t", lines, caption, "shoot-title")
end
end
def reference(lines)
captionblock("reference", lines, nil)
end
def practice(lines)
captionblock("practice", lines, nil)
end
def expert(lines)
captionblock("expert", lines, nil)
end
def syntaxblock(type, lines, caption)
if caption.nil?
puts %Q[<#{type}>]
else
titleopentag = %Q[caption aid:pstyle="#{type}-title"]
titleclosetag = "caption"
if type == "insn"
titleopentag = %Q[floattitle type="insn"]
titleclosetag = "floattitle"
end
puts %Q[<#{type}><#{titleopentag}>#{compile_inline(caption)}</#{titleclosetag}>]
end
no = 1
lines.each do |line|
unless ReVIEW.book.param["listinfo"].nil?
print %Q[<listinfo line="#{no}"]
print %Q[ begin="1"] if no == 1
print %Q[ end="#{no}"] if no == lines.size
print %Q[>]
end
print detab(line)
print "\n"
print "</listinfo>" unless ReVIEW.book.param["listinfo"].nil?
no += 1
end
puts "</#{type}>"
end
def insn(lines, caption = nil)
syntaxblock("insn", lines, caption)
end
def box(lines, caption = nil)
syntaxblock("box", lines, caption)
end
def indepimage(id, caption=nil, metric=nil)
metrics = parse_metric("idgxml", metric)
puts "<img>"
begin
puts %Q[<Image href="file://#{@chapter.image(id).path.sub(/\A\.\//, "")}"#{metrics} />]
rescue
warn %Q[no such image: #{id}]
end
puts %Q[<caption>#{compile_inline(caption)}</caption>] if !caption.nil? && !caption.empty?
puts "</img>"
end
alias :numberlessimage indepimage
def label(id)
# FIXME
print "<label id='#{id}' />"
end
def tsize(str)
@tsize = str
end
def dtp(str)
print %Q(<?dtp #{str} ?>)
end
def hr
print "<hr />"
end
def bpo(lines)
puts %Q[<bpo>#{lines.join("\n")}</bpo>]
end
def inline_dtp(str)
"<?dtp #{str} ?>"
end
def inline_code(str)
%Q[<tt type='inline-code'>#{escape_html(str)}</tt>]
end
def inline_br(str)
"\n"
end
def rawblock(lines)
no = 1
lines.each do |l|
print l.gsub("<", "<").gsub(">", ">").gsub(""", "\"").gsub("&", "&")
print "\n" unless lines.length == no
no = no + 1
end
end
def text(str)
str
end
def inline_chapref(id)
chs = ["", "「", "」"]
unless ReVIEW.book.param["chapref"].nil?
_chs = convert_inencoding(ReVIEW.book.param["chapref"],
ReVIEW.book.param["inencoding"]).split(",")
if _chs.size != 3
error "--chapsplitter must have exactly 3 parameters with comma."
else
chs = _chs
end
else
end
s = "#{chs[0]}#{@chapter.env.chapter_index.number(id)}#{chs[1]}#{@chapter.env.chapter_index.title(id)}#{chs[2]}"
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{s}</link>)
else
s
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def inline_chap(id)
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{@chapter.env.chapter_index.number(id)}</link>)
else
@chapter.env.chapter_index.number(id)
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def inline_title(id)
if ReVIEW.book.param["chapterlink"]
%Q(<link href="#{id}">#{@chapter.env.chapter_index.title(id)}</link>)
else
@chapter.env.chapter_index.title(id)
end
rescue KeyError
error "unknown chapter: #{id}"
nofunc_text("[UnknownChapter:#{id}]")
end
def source_header(caption)
puts %Q[<source>]
puts %Q[<caption>#{compile_inline(caption)}</caption>]
end
def source_body(lines)
puts %Q[<pre>]
lines.each do |line|
print detab(line)
print "\n"
end
puts %Q[</pre></source>]
end
def bibpaper(lines, id, caption)
bibpaper_header id, caption
unless lines.empty?
bibpaper_bibpaper id, caption, lines
end
puts %Q(</bibitem>)
end
def bibpaper_header(id, caption)
puts %Q(<bibitem id="bib-#{id}">)
puts "<caption><span type='bibno'>[#{@chapter.bibpaper(id).number}] </span>#{compile_inline(caption)}</caption>"
end
def bibpaper_bibpaper(id, caption, lines)
print split_paragraph(lines).join("")
end
def inline_bib(id)
%Q(<span type='bibref' idref='#{id}'>[#{@chapter.bibpaper(id).number}]</span>)
end
def inline_recipe(id)
# FIXME
%Q(<recipe idref="#{escape_html(id)}">[XXX]「#{escape_html(id)}」 p.XX</recipe>)
end
def nofunc_text(str)
escape_html(str)
end
def image_ext
"eps"
end
end
end # module ReVIEW
|
require 'json'
require 'tempfile'
::Capistrano::Configuration.instance(:must_exist).load do
namespace :roundsman do
def run_list(*recipes)
if recipes.any?
set :run_list, recipes
install_ruby if fetch(:run_roundsman_checks, true) && install_ruby?
run_chef
else
Array(fetch(:run_list))
end
end
def set_default(name, *args, &block)
@_defaults ||= []
@_overridden_defaults ||= []
@_defaults << name
if exists?(name)
@_overridden_defaults << name
else
set(name, *args, &block)
end
end
def roundsman_working_dir(*path)
ensure_roundsman_working_dir
File.join(fetch(:roundsman_working_dir), *path)
end
def sudo(command, *args)
run "#{top.sudo} #{command}", *args
end
def run(*args)
if fetch(:stream_roundsman_output)
top.stream *args
else
top.run *args
end
end
set_default :roundsman_working_dir, "/tmp/roundsman"
set_default :stream_roundsman_output, true
set_default(:roundsman_user) { fetch(:user) { capture('whoami').strip } }
set_default :debug_chef, false
set_default :package_manager, 'apt-get'
set_default :run_roundsman_checks, true
desc "Lists configuration"
task :configuration do
@_defaults.sort_by {|sym| sym.to_s}.each do |name|
display_name = ":#{name},".ljust(30)
if variables[name].is_a?(Proc)
value = "<block>"
else
value = fetch(name).inspect
value = "#{value[0..40]}... (truncated)" if value.length > 40
end
overridden = @_overridden_defaults.include?(name) ? "(overridden)" : ""
puts "set #{display_name} #{value} #{overridden}"
end
end
desc "Prepares the server for chef"
task :install_ruby do
install.default
end
desc "Runs chef"
task :run_chef do
chef.default
end
def ensure_roundsman_working_dir
unless @ensured_roundsman_working_dir
run "mkdir -p #{fetch(:roundsman_working_dir)}"
run "mkdir -p #{fetch(:roundsman_working_dir)}/cache"
sudo "chown -R #{fetch(:roundsman_user)} #{fetch(:roundsman_working_dir)}"
@ensured_roundsman_working_dir = true
end
end
def install_ruby?
installed_version = capture("ruby --version || true").strip
if installed_version.include?("not found")
logger.info "No version of Ruby could be found."
return true
end
required_version = fetch(:ruby_version).gsub("-", "")
if installed_version.include?(required_version)
if fetch(:care_about_ruby_version)
logger.info "Ruby #{installed_version} matches the required version: #{required_version}."
return false
else
logger.info "Already installed Ruby #{installed_version}, not #{required_version}. Set :care_about_ruby_version if you want to fix this."
return false
end
else
logger.info "Ruby version mismatch. Installed version: #{installed_version}, required is #{required_version}"
return true
end
end
namespace :install do
set_default :ruby_version, "1.9.3-p194"
set_default :care_about_ruby_version, true
set_default :ruby_install_dir, "/usr/local"
set_default :ruby_dependencies do
%w(git-core curl build-essential bison openssl
libreadline6 libreadline6-dev zlib1g zlib1g-dev libssl-dev
libyaml-dev libxml2-dev libxslt-dev autoconf libc6-dev ncurses-dev
vim wget tree)
end
set_default :ruby_install_script do
%Q{
set -e
cd #{roundsman_working_dir}
rm -rf ruby-build
git clone -q https://github.com/sstephenson/ruby-build.git
cd ruby-build
./install.sh
CONFIGURE_OPTS='--disable-install-rdoc' ruby-build #{fetch(:ruby_version)} #{fetch(:ruby_install_dir)}
}
end
task :default, :except => { :no_release => true } do
if install_ruby?
dependencies
ruby
end
end
desc "Installs ruby."
task :ruby, :except => { :no_release => true } do
put fetch(:ruby_install_script), roundsman_working_dir("install_ruby.sh"), :via => :scp
sudo "bash #{roundsman_working_dir("install_ruby.sh")}"
end
desc "Installs the dependencies needed for Ruby"
task :dependencies, :except => { :no_release => true } do
ensure_supported_distro
sudo "#{fetch(:package_manager)} -yq update"
sudo "#{fetch(:package_manager)} -yq install #{fetch(:ruby_dependencies).join(' ')}"
end
desc "Checks if the ruby version installed matches the version specified"
task :check_ruby_version do
abort if install_ruby?
end
def distribution
@distribution ||= capture("cat /etc/issue").strip
end
def ensure_supported_distro
unless @ensured_supported_distro
logger.info "Using Linux distribution #{distribution}"
abort "This distribution is not (yet) supported." unless distribution.include?("Ubuntu")
@ensured_supported_distro = true
end
end
end
namespace :chef do
set_default :chef_version, "~> 0.10.8"
set_default :cookbooks_directory, ["config/cookbooks"]
set_default :copyfile_disable, false
set_default :filter_sensitive_settings, [ /password/, /filter_sensitive_settings/ ]
task :default, :except => { :no_release => true } do
ensure_cookbooks_exists
prepare_chef
chef_solo
end
desc "Generates the config and copies over the cookbooks to the server"
task :prepare_chef, :except => { :no_release => true } do
install if fetch(:run_roundsman_checks, true) && install_chef?
ensure_cookbooks_exists
generate_config
generate_attributes
copy_cookbooks
end
desc "Installs chef"
task :install, :except => { :no_release => true } do
sudo "gem uninstall -xaI chef || true"
sudo "gem install chef -v #{fetch(:chef_version).inspect} --quiet --no-ri --no-rdoc"
sudo "gem install ruby-shadow --quiet --no-ri --no-rdoc"
end
desc "Runs the existing chef configuration"
task :chef_solo, :except => { :no_release => true } do
logger.info "Now running #{fetch(:run_list).join(', ')}"
sudo "chef-solo -c #{roundsman_working_dir("solo.rb")} -j #{roundsman_working_dir("solo.json")}#{' -l debug' if fetch(:debug_chef)}"
end
def ensure_cookbooks_exists
abort "You must specify at least one recipe when running roundsman.chef" if fetch(:run_list, []).empty?
abort "No cookbooks found in #{fetch(:cookbooks_directory).inspect}" if cookbooks_paths.empty?
end
def cookbooks_paths
Array(fetch(:cookbooks_directory)).select { |path| File.exist?(path) }
end
def install_chef?
required_version = fetch(:chef_version).inspect
output = capture("gem list -i -v #{required_version} || true").strip
output == "false"
end
def generate_config
cookbook_string = cookbooks_paths.map { |c| "File.join(root, #{c.to_s.inspect})" }.join(', ')
solo_rb = <<-RUBY
root = File.expand_path(File.dirname(__FILE__))
file_cache_path File.join(root, "cache")
cookbook_path [ #{cookbook_string} ]
RUBY
put solo_rb, roundsman_working_dir("solo.rb"), :via => :scp
end
def generate_attributes
attrs = remove_procs_from_hash variables.dup
put attrs.to_json, roundsman_working_dir("solo.json"), :via => :scp
end
# Recursively removes procs from hashes. Procs can exist because you specified them like this:
#
# set(:root_password) { Capistrano::CLI.password_prompt("Root password: ") }
def remove_procs_from_hash(hash)
new_hash = {}
hash.each do |key, value|
next if fetch(:filter_sensitive_settings).find { |regex| regex.match(key.to_s) }
real_value = if value.respond_to?(:call)
begin
value.call
rescue ::Capistrano::CommandError => e
logger.debug "Could not get the value of #{key}: #{e.message}"
nil
end
else
value
end
if real_value.is_a?(Hash)
real_value = remove_procs_from_hash(real_value)
end
if real_value && !real_value.class.to_s.include?("Capistrano") # skip capistrano tasks
new_hash[key] = real_value
end
end
new_hash
end
def copy_cookbooks
tar_file = Tempfile.new("cookbooks.tar")
begin
tar_file.close
env_vars = fetch(:copyfile_disable) && RUBY_PLATFORM.downcase.include?('darwin') ? "COPYFILE_DISABLE=true" : ""
system "#{env_vars} tar -cjf #{tar_file.path} #{cookbooks_paths.join(' ')}"
upload tar_file.path, roundsman_working_dir("cookbooks.tar"), :via => :scp
run "cd #{roundsman_working_dir} && tar -xjf cookbooks.tar"
ensure
tar_file.unlink
end
end
end
end
end
Update lib/roundsman/capistrano.rb
use correct chef version that fixes bug with moneta gem.
require 'json'
require 'tempfile'
::Capistrano::Configuration.instance(:must_exist).load do
namespace :roundsman do
def run_list(*recipes)
if recipes.any?
set :run_list, recipes
install_ruby if fetch(:run_roundsman_checks, true) && install_ruby?
run_chef
else
Array(fetch(:run_list))
end
end
def set_default(name, *args, &block)
@_defaults ||= []
@_overridden_defaults ||= []
@_defaults << name
if exists?(name)
@_overridden_defaults << name
else
set(name, *args, &block)
end
end
def roundsman_working_dir(*path)
ensure_roundsman_working_dir
File.join(fetch(:roundsman_working_dir), *path)
end
def sudo(command, *args)
run "#{top.sudo} #{command}", *args
end
def run(*args)
if fetch(:stream_roundsman_output)
top.stream *args
else
top.run *args
end
end
set_default :roundsman_working_dir, "/tmp/roundsman"
set_default :stream_roundsman_output, true
set_default(:roundsman_user) { fetch(:user) { capture('whoami').strip } }
set_default :debug_chef, false
set_default :package_manager, 'apt-get'
set_default :run_roundsman_checks, true
desc "Lists configuration"
task :configuration do
@_defaults.sort_by {|sym| sym.to_s}.each do |name|
display_name = ":#{name},".ljust(30)
if variables[name].is_a?(Proc)
value = "<block>"
else
value = fetch(name).inspect
value = "#{value[0..40]}... (truncated)" if value.length > 40
end
overridden = @_overridden_defaults.include?(name) ? "(overridden)" : ""
puts "set #{display_name} #{value} #{overridden}"
end
end
desc "Prepares the server for chef"
task :install_ruby do
install.default
end
desc "Runs chef"
task :run_chef do
chef.default
end
def ensure_roundsman_working_dir
unless @ensured_roundsman_working_dir
run "mkdir -p #{fetch(:roundsman_working_dir)}"
run "mkdir -p #{fetch(:roundsman_working_dir)}/cache"
sudo "chown -R #{fetch(:roundsman_user)} #{fetch(:roundsman_working_dir)}"
@ensured_roundsman_working_dir = true
end
end
def install_ruby?
installed_version = capture("ruby --version || true").strip
if installed_version.include?("not found")
logger.info "No version of Ruby could be found."
return true
end
required_version = fetch(:ruby_version).gsub("-", "")
if installed_version.include?(required_version)
if fetch(:care_about_ruby_version)
logger.info "Ruby #{installed_version} matches the required version: #{required_version}."
return false
else
logger.info "Already installed Ruby #{installed_version}, not #{required_version}. Set :care_about_ruby_version if you want to fix this."
return false
end
else
logger.info "Ruby version mismatch. Installed version: #{installed_version}, required is #{required_version}"
return true
end
end
namespace :install do
set_default :ruby_version, "1.9.3-p194"
set_default :care_about_ruby_version, true
set_default :ruby_install_dir, "/usr/local"
set_default :ruby_dependencies do
%w(git-core curl build-essential bison openssl
libreadline6 libreadline6-dev zlib1g zlib1g-dev libssl-dev
libyaml-dev libxml2-dev libxslt-dev autoconf libc6-dev ncurses-dev
vim wget tree)
end
set_default :ruby_install_script do
%Q{
set -e
cd #{roundsman_working_dir}
rm -rf ruby-build
git clone -q https://github.com/sstephenson/ruby-build.git
cd ruby-build
./install.sh
CONFIGURE_OPTS='--disable-install-rdoc' ruby-build #{fetch(:ruby_version)} #{fetch(:ruby_install_dir)}
}
end
task :default, :except => { :no_release => true } do
if install_ruby?
dependencies
ruby
end
end
desc "Installs ruby."
task :ruby, :except => { :no_release => true } do
put fetch(:ruby_install_script), roundsman_working_dir("install_ruby.sh"), :via => :scp
sudo "bash #{roundsman_working_dir("install_ruby.sh")}"
end
desc "Installs the dependencies needed for Ruby"
task :dependencies, :except => { :no_release => true } do
ensure_supported_distro
sudo "#{fetch(:package_manager)} -yq update"
sudo "#{fetch(:package_manager)} -yq install #{fetch(:ruby_dependencies).join(' ')}"
end
desc "Checks if the ruby version installed matches the version specified"
task :check_ruby_version do
abort if install_ruby?
end
def distribution
@distribution ||= capture("cat /etc/issue").strip
end
def ensure_supported_distro
unless @ensured_supported_distro
logger.info "Using Linux distribution #{distribution}"
abort "This distribution is not (yet) supported." unless distribution.include?("Ubuntu")
@ensured_supported_distro = true
end
end
end
namespace :chef do
set_default :chef_version, "~> 10.18.2"
set_default :cookbooks_directory, ["config/cookbooks"]
set_default :copyfile_disable, false
set_default :filter_sensitive_settings, [ /password/, /filter_sensitive_settings/ ]
task :default, :except => { :no_release => true } do
ensure_cookbooks_exists
prepare_chef
chef_solo
end
desc "Generates the config and copies over the cookbooks to the server"
task :prepare_chef, :except => { :no_release => true } do
install if fetch(:run_roundsman_checks, true) && install_chef?
ensure_cookbooks_exists
generate_config
generate_attributes
copy_cookbooks
end
desc "Installs chef"
task :install, :except => { :no_release => true } do
sudo "gem uninstall -xaI chef || true"
sudo "gem install chef -v #{fetch(:chef_version).inspect} --quiet --no-ri --no-rdoc"
sudo "gem install ruby-shadow --quiet --no-ri --no-rdoc"
end
desc "Runs the existing chef configuration"
task :chef_solo, :except => { :no_release => true } do
logger.info "Now running #{fetch(:run_list).join(', ')}"
sudo "chef-solo -c #{roundsman_working_dir("solo.rb")} -j #{roundsman_working_dir("solo.json")}#{' -l debug' if fetch(:debug_chef)}"
end
def ensure_cookbooks_exists
abort "You must specify at least one recipe when running roundsman.chef" if fetch(:run_list, []).empty?
abort "No cookbooks found in #{fetch(:cookbooks_directory).inspect}" if cookbooks_paths.empty?
end
def cookbooks_paths
Array(fetch(:cookbooks_directory)).select { |path| File.exist?(path) }
end
def install_chef?
required_version = fetch(:chef_version).inspect
output = capture("gem list -i -v #{required_version} || true").strip
output == "false"
end
def generate_config
cookbook_string = cookbooks_paths.map { |c| "File.join(root, #{c.to_s.inspect})" }.join(', ')
solo_rb = <<-RUBY
root = File.expand_path(File.dirname(__FILE__))
file_cache_path File.join(root, "cache")
cookbook_path [ #{cookbook_string} ]
RUBY
put solo_rb, roundsman_working_dir("solo.rb"), :via => :scp
end
def generate_attributes
attrs = remove_procs_from_hash variables.dup
put attrs.to_json, roundsman_working_dir("solo.json"), :via => :scp
end
# Recursively removes procs from hashes. Procs can exist because you specified them like this:
#
# set(:root_password) { Capistrano::CLI.password_prompt("Root password: ") }
def remove_procs_from_hash(hash)
new_hash = {}
hash.each do |key, value|
next if fetch(:filter_sensitive_settings).find { |regex| regex.match(key.to_s) }
real_value = if value.respond_to?(:call)
begin
value.call
rescue ::Capistrano::CommandError => e
logger.debug "Could not get the value of #{key}: #{e.message}"
nil
end
else
value
end
if real_value.is_a?(Hash)
real_value = remove_procs_from_hash(real_value)
end
if real_value && !real_value.class.to_s.include?("Capistrano") # skip capistrano tasks
new_hash[key] = real_value
end
end
new_hash
end
def copy_cookbooks
tar_file = Tempfile.new("cookbooks.tar")
begin
tar_file.close
env_vars = fetch(:copyfile_disable) && RUBY_PLATFORM.downcase.include?('darwin') ? "COPYFILE_DISABLE=true" : ""
system "#{env_vars} tar -cjf #{tar_file.path} #{cookbooks_paths.join(' ')}"
upload tar_file.path, roundsman_working_dir("cookbooks.tar"), :via => :scp
run "cd #{roundsman_working_dir} && tar -xjf cookbooks.tar"
ensure
tar_file.unlink
end
end
end
end
end
|
# frozen_string_literal: true
# Load gem Config if present
# @see https://rubygems.org/gems/config
begin
require 'config'
if defined?(Config)
config_conf = [
File.join(Dir.pwd, 'config', 'settings.yml'),
File.join(Dir.pwd, 'config', 'settings', "#{ENV['RAILS_ENV']}.yml")
]
Config.load_and_set_settings(config_conf)
end
rescue LoadError => exception
p 'Don\'t use gem config'
p exception
end
Write exception when anther error to 'LoadError'
# frozen_string_literal: true
# Load gem Config if present
# @see https://rubygems.org/gems/config
begin
require 'config'
if defined?(Config)
config_conf = [
File.join(Dir.pwd, 'config', 'settings.yml'),
File.join(Dir.pwd, 'config', 'settings', "#{ENV['RAILS_ENV']}.yml")
]
Config.load_and_set_settings(config_conf)
end
rescue LoadError => exception
p "Don't use gem config : #{exception}"
rescue => exception
p exception
end
|
require "calil"
module Ruboty
module Handlers
class Book < Base
BORDER_STR = '============================================================'
on(
/book/,
name: "book",
description: "Maekawa Librarian\n\texample: @maekawa book title:機械学習. creator:松尾."
)
def book(message)
message.reply(message.body)
begin
query = message.body.sub("@*#{message.robot.name} book ", '').strip
message.reply(query)
raise if query.empty?
hash = {}
query.split(".").each { |tag|
tag.strip!
kv = tag.split(":")
raise if kv.length <= 1
key = kv[0]
values = kv[1]
hash[key] = []
values.split(",").each { |v|
hash[key] << v
}
}
message.reply(hash.inspect)
raise "no_title" if hash.key?('title') == false
rescue Exception => e
if e.to_s == "no_title"
message.reply("タイトルぐらい入力してほしいにゃ")
else
message.reply("構文エラーにゃ\nhelpでも見るにゃ")
end
return
end
if (2 <= hash['title'].length)
connector_type = Ruboty::Maekawa::Searcher::CONNECTOR_OR
hash = { title: hash['title'] }
else
connector_type = Ruboty::Maekawa::Searcher::CONNECTOR_AND
end
hash['mediatype'] = 1
message.reply("このクエリで調べるにゃ\n#{hash.inspect}")
books = Ruboty::Maekawa::Searcher.request(connector_type, hash)
books.delete_if do |book|
book.isbn.empty?
end
books.uniq! do |book|
book.isbn
end
if books.length == 0
message.reply("見つからなかったにゃ〜")
return
end
title_str = ''
isbns = []
books.each do |book|
next if book.nil?
title_str += "- #{book.title}\n"
isbns << book.isbn
end
message.reply("検索結果にゃ\n#{title_str}\n大学図書館にあるかどうか調べるにゃ\n#{BORDER_STR}")
results = ''
calil_books = Calil::Book.find(isbns, %w(Univ_Aizu))
calil_books.each.with_index(0) { |cbook, i|
libaizu = cbook.systems[0]
next if (libaizu.reserveurl.nil? || libaizu.reserveurl.empty?)
results += "- #{books[i].title}\n #{libaizu.libkeys.inspect}\n #{libaizu.reserveurl}\n"
}
message.reply("#{results}\n#{BORDER_STR}\n大学にあるのはこのぐらいみたいだにゃ")
end
end
end
end
[FIX] message purse
require "calil"
module Ruboty
module Handlers
class Book < Base
BORDER_STR = '============================================================'
on(
/book/,
name: "book",
description: "Maekawa Librarian\n\texample: @maekawa book title:人工知能. creator:松尾. from:2013."
)
def book(message)
return if !message.body.match(/^@*#{message.robot.name} book.*/)
begin
query = message.body.sub(/^@*#{message.robot.name} book /, '').strip
raise if query.empty?
hash = {}
query.split(".").each { |tag|
tag.strip!
kv = tag.split(":")
raise if kv.length <= 1
key = kv[0]
values = kv[1]
hash[key] = []
values.split(",").each { |v|
hash[key] << v
}
}
raise "no_title" if hash.key?('title') == false
rescue Exception => e
if e.to_s == "no_title"
message.reply("タイトルぐらい入力してほしいにゃ")
else
message.reply("構文エラーにゃ\nhelpでも見るにゃ")
end
return
end
if (2 <= hash['title'].length)
connector_type = Ruboty::Maekawa::Searcher::CONNECTOR_OR
hash = { title: hash['title'] }
else
connector_type = Ruboty::Maekawa::Searcher::CONNECTOR_AND
end
hash['mediatype'] = 1
message.reply("このクエリで調べるにゃ\n#{hash.inspect}")
books = Ruboty::Maekawa::Searcher.request(connector_type, hash)
books.delete_if do |book|
book.isbn.empty?
end
books.uniq! do |book|
book.isbn
end
if books.length == 0
message.reply("見つからなかったにゃ〜")
return
end
title_str = ''
isbns = []
books.each do |book|
next if book.nil?
title_str += "- #{book.title}\n"
isbns << book.isbn
end
message.reply("検索結果にゃ\n#{title_str}\n大学図書館にあるかどうか調べるにゃ\n#{BORDER_STR}")
results = ''
calil_books = Calil::Book.find(isbns, %w(Univ_Aizu))
calil_books.each.with_index(0) { |cbook, i|
libaizu = cbook.systems[0]
next if (libaizu.reserveurl.nil? || libaizu.reserveurl.empty?)
results += "- #{books[i].title}\n #{libaizu.libkeys.inspect}\n #{libaizu.reserveurl}\n"
}
message.reply("#{results}\n#{BORDER_STR}\n大学にあるのはこのぐらいみたいだにゃ")
end
end
end
end
|
module Rufus
class Scheduler
class << self
#--
# time and string methods
#++
def parse(o, opts={})
opts[:no_error] = true
parse_cron(o, opts) ||
parse_in(o, opts) || # covers 'every' schedule strings
parse_at(o, opts) ||
fail(ArgumentError.new("couldn't parse #{o.inspect} (#{o.class})"))
end
def parse_cron(o, opts={})
opts[:no_error] ?
Fugit.parse_cron(o) :
Fugit.do_parse_cron(o)
end
def parse_in(o, opts={})
#o.is_a?(String) ? parse_duration(o, opts) : o
return parse_duration(o, opts) if o.is_a?(String)
return o if o.is_a?(Numeric)
fail ArgumentError.new("couldn't parse time point in #{o.inspect}")
rescue ArgumentError => ae
return nil if opts[:no_error]
fail ae
end
def parse_at(o, opts={})
return o if o.is_a?(EoTime)
return EoTime.make(o) if o.is_a?(Time)
EoTime.parse(o, opts)
rescue StandardError => se
return nil if opts[:no_error]
fail se
end
# Turns a string like '1m10s' into a float like '70.0', more formally,
# turns a time duration expressed as a string into a Float instance
# (millisecond count).
#
# w -> week
# d -> day
# h -> hour
# m -> minute
# s -> second
# M -> month
# y -> year
# 'nada' -> millisecond
#
# Some examples:
#
# Rufus::Scheduler.parse_duration "0.5" # => 0.5
# Rufus::Scheduler.parse_duration "500" # => 0.5
# Rufus::Scheduler.parse_duration "1000" # => 1.0
# Rufus::Scheduler.parse_duration "1h" # => 3600.0
# Rufus::Scheduler.parse_duration "1h10s" # => 3610.0
# Rufus::Scheduler.parse_duration "1w2d" # => 777600.0
#
# Negative time strings are OK (Thanks Danny Fullerton):
#
# Rufus::Scheduler.parse_duration "-0.5" # => -0.5
# Rufus::Scheduler.parse_duration "-1h" # => -3600.0
#
def parse_duration(str, opts={})
d =
opts[:no_error] ?
Fugit::Duration.parse(str, opts) :
Fugit::Duration.do_parse(str, opts)
d ?
d.to_sec :
nil
end
# Turns a number of seconds into a a time string
#
# Rufus.to_duration 0 # => '0s'
# Rufus.to_duration 60 # => '1m'
# Rufus.to_duration 3661 # => '1h1m1s'
# Rufus.to_duration 7 * 24 * 3600 # => '1w'
# Rufus.to_duration 30 * 24 * 3600 + 1 # => "4w2d1s"
#
# It goes from seconds to the year. Months are not counted (as they
# are of variable length). Weeks are counted.
#
# For 30 days months to be counted, the second parameter of this
# method can be set to true.
#
# Rufus.to_duration 30 * 24 * 3600 + 1, true # => "1M1s"
#
# If a Float value is passed, milliseconds will be displayed without
# 'marker'
#
# Rufus.to_duration 0.051 # => "51"
# Rufus.to_duration 7.051 # => "7s51"
# Rufus.to_duration 0.120 + 30 * 24 * 3600 + 1 # => "4w2d1s120"
#
# (this behaviour mirrors the one found for parse_time_string()).
#
# Options are :
#
# * :months, if set to true, months (M) of 30 days will be taken into
# account when building up the result
# * :drop_seconds, if set to true, seconds and milliseconds will be
# trimmed from the result
#
def to_duration(seconds, options={})
#d = Fugit::Duration.parse(seconds, options).deflate
#d = d.drop_seconds if options[:drop_seconds]
#d = d.deflate(:month => options[:months]) if options[:months]
#d.to_rufus_s
to_fugit_duration(seconds, options).to_rufus_s
end
# Turns a number of seconds (integer or Float) into a hash like in :
#
# Rufus.to_duration_hash 0.051
# # => { :s => 0.051 }
# Rufus.to_duration_hash 7.051
# # => { :s => 7.051 }
# Rufus.to_duration_hash 0.120 + 30 * 24 * 3600 + 1
# # => { :w => 4, :d => 2, :s => 1.120 }
#
# This method is used by to_duration behind the scenes.
#
# Options are :
#
# * :months, if set to true, months (M) of 30 days will be taken into
# account when building up the result
# * :drop_seconds, if set to true, seconds and milliseconds will be
# trimmed from the result
#
def to_duration_hash(seconds, options={})
to_fugit_duration(seconds, options).to_rufus_h
end
# Used by both .to_duration and .to_duration_hash
#
def to_fugit_duration(seconds, options={})
d = Fugit::Duration
.parse(seconds, options)
.deflate
d = d.drop_seconds if options[:drop_seconds]
d = d.deflate(:month => options[:months]) if options[:months]
d
end
#--
# misc
#++
# Produces the UTC string representation of a Time instance
#
# like "2009/11/23 11:11:50.947109 UTC"
#
def utc_to_s(t=Time.now)
"#{t.utc.strftime('%Y-%m-%d %H:%M:%S')}.#{sprintf('%06d', t.usec)} UTC"
end
# Produces a hour/min/sec/milli string representation of Time instance
#
def h_to_s(t=Time.now)
"#{t.strftime('%H:%M:%S')}.#{sprintf('%06d', t.usec)}"
end
end
# Debugging tools...
#
class D
def self.h_to_s(t=Time.now); Rufus::Scheduler.h_to_s(t); end
end
end
end
Point to the short term
module Rufus
class Scheduler
class << self
#--
# time and string methods
#++
def parse(o, opts={})
opts[:no_error] = true
parse_cron(o, opts) ||
parse_in(o, opts) || # covers 'every' schedule strings
parse_at(o, opts) ||
fail(ArgumentError.new("couldn't parse #{o.inspect} (#{o.class})"))
end
def parse_cron(o, opts={})
opts[:no_error] ?
Fugit.parse_cron(o) :
Fugit.do_parse_cron(o)
end
def parse_in(o, opts={})
#o.is_a?(String) ? parse_duration(o, opts) : o
return parse_duration(o, opts) if o.is_a?(String)
return o if o.is_a?(Numeric)
fail ArgumentError.new("couldn't parse time point in #{o.inspect}")
rescue ArgumentError => ae
return nil if opts[:no_error]
fail ae
end
def parse_at(o, opts={})
return o if o.is_a?(EoTime)
return EoTime.make(o) if o.is_a?(Time)
EoTime.parse(o, opts)
rescue StandardError => se
return nil if opts[:no_error]
fail se
end
# Turns a string like '1m10s' into a float like '70.0', more formally,
# turns a time duration expressed as a string into a Float instance
# (millisecond count).
#
# w -> week
# d -> day
# h -> hour
# m -> minute
# s -> second
# M -> month
# y -> year
# 'nada' -> millisecond
#
# Some examples:
#
# Rufus::Scheduler.parse_duration "0.5" # => 0.5
# Rufus::Scheduler.parse_duration "500" # => 0.5
# Rufus::Scheduler.parse_duration "1000" # => 1.0
# Rufus::Scheduler.parse_duration "1h" # => 3600.0
# Rufus::Scheduler.parse_duration "1h10s" # => 3610.0
# Rufus::Scheduler.parse_duration "1w2d" # => 777600.0
#
# Negative time strings are OK (Thanks Danny Fullerton):
#
# Rufus::Scheduler.parse_duration "-0.5" # => -0.5
# Rufus::Scheduler.parse_duration "-1h" # => -3600.0
#
def parse_duration(str, opts={})
d =
opts[:no_error] ?
Fugit::Duration.parse(str, opts) :
Fugit::Duration.do_parse(str, opts)
d ?
d.to_sec :
nil
end
# Turns a number of seconds into a a time string
#
# Rufus.to_duration 0 # => '0s'
# Rufus.to_duration 60 # => '1m'
# Rufus.to_duration 3661 # => '1h1m1s'
# Rufus.to_duration 7 * 24 * 3600 # => '1w'
# Rufus.to_duration 30 * 24 * 3600 + 1 # => "4w2d1s"
#
# It goes from seconds to the year. Months are not counted (as they
# are of variable length). Weeks are counted.
#
# For 30 days months to be counted, the second parameter of this
# method can be set to true.
#
# Rufus.to_duration 30 * 24 * 3600 + 1, true # => "1M1s"
#
# If a Float value is passed, milliseconds will be displayed without
# 'marker'
#
# Rufus.to_duration 0.051 # => "51"
# Rufus.to_duration 7.051 # => "7s51"
# Rufus.to_duration 0.120 + 30 * 24 * 3600 + 1 # => "4w2d1s120"
#
# (this behaviour mirrors the one found for parse_time_string()).
#
# Options are :
#
# * :months, if set to true, months (M) of 30 days will be taken into
# account when building up the result
# * :drop_seconds, if set to true, seconds and milliseconds will be
# trimmed from the result
#
def to_duration(seconds, options={})
#d = Fugit::Duration.parse(seconds, options).deflate
#d = d.drop_seconds if options[:drop_seconds]
#d = d.deflate(:month => options[:months]) if options[:months]
#d.to_rufus_s
to_fugit_duration(seconds, options).to_rufus_s
end
# Turns a number of seconds (integer or Float) into a hash like in :
#
# Rufus.to_duration_hash 0.051
# # => { :s => 0.051 }
# Rufus.to_duration_hash 7.051
# # => { :s => 7.051 }
# Rufus.to_duration_hash 0.120 + 30 * 24 * 3600 + 1
# # => { :w => 4, :d => 2, :s => 1.120 }
#
# This method is used by to_duration behind the scenes.
#
# Options are :
#
# * :months, if set to true, months (M) of 30 days will be taken into
# account when building up the result
# * :drop_seconds, if set to true, seconds and milliseconds will be
# trimmed from the result
#
def to_duration_hash(seconds, options={})
to_fugit_duration(seconds, options).to_rufus_h
end
# Used by both .to_duration and .to_duration_hash
#
def to_fugit_duration(seconds, options={})
d = Fugit::Duration
.parse(seconds, options)
.deflate
d = d.drop_seconds if options[:drop_seconds]
d = d.deflate(:month => options[:months]) if options[:months]
d
end
#--
# misc
#++
# Produces the UTC string representation of a Time instance
#
# like "2009/11/23 11:11:50.947109 UTC"
#
def utc_to_s(t=Time.now)
"#{t.utc.strftime('%Y-%m-%d %H:%M:%S')}.#{sprintf('%06d', t.usec)} UTC"
# 1.9.x
#"#{t.dup.strftime('%F %T.%6N')} UTC"
end
# Produces a hour/min/sec/milli string representation of Time instance
#
def h_to_s(t=Time.now)
"#{t.strftime('%H:%M:%S')}.#{sprintf('%06d', t.usec)}"
# 1.9.x
#t.dup.strftime('%T.%6N')
end
end
# Debugging tools...
#
class D
def self.h_to_s(t=Time.now); Rufus::Scheduler.h_to_s(t); end
end
end
end
|
Pod::Spec.new do |s|
s.name = "PhotoBrowser"
s.version = "4.0.0"
s.summary = "PhotoBrowser"
s.homepage = "https://github.com/timRabbit/PhotoBrowser"
s.social_media_url = "https://github.com/timRabbit/PhotoBrowser"
s.platform = :ios,'8.0'
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { " tim" => "491590253@qq.com" }
s.source = { :git => "https://github.com/timRabbit/PhotoBrowser.git",:tag => s.version.to_s }
s.ios.deployment_target = "8.0"
s.requires_arc = true
s.framework = "CoreFoundation","Foundation","CoreGraphics","Security","UIKit"
s.library = "z.1.1.3","stdc++","sqlite3"
s.source_files = 'PhotoBrowser/PhotoBrowser'
#s.resources = 'SIDADView/*.{bundle}'
# s.dependency 'XAspect'
# s.dependency 'TimCore/TimCore','~>1.2.12'
# s.dependency 'JPush-iOS-SDK','~>3.0.2'
s.ios.frameworks = 'UserNotifications'
# s.subspec 'YMCitySelect' do |sp|
# sp.source_files = 'YMCitySelect/*.{h,m,mm}'
# sp.resources = "Extend/**/*.{png}"
# sp.requires_arc = true
# sp.xcconfig = { 'HEADER_SEARCH_PATHS' => '$(SDKROOT)/usr/include/libz, $(SDKROOT)/usr/include/libxml2', 'CLANG_CXX_LANGUAGE_STANDARD' => 'gnu++0x', 'CLANG_CXX_LIBRARY' => 'libstdc++', 'CLANG_WARN_DIRECT_OBJC_ISA_USAGE' => 'YES'}
# sp.dependency 'FontIcon'
# sp.prefix_header_contents = '#import "EasyIOS.h"'
# end
end
ZLPhotoBrowser
Pod::Spec.new do |s|
s.name = "PhotoBrowser"
s.version = "4.0.0"
s.summary = "PhotoBrowser"
s.homepage = "https://github.com/timRabbit/PhotoBrowser"
s.social_media_url = "https://github.com/timRabbit/PhotoBrowser"
s.platform = :ios,'8.0'
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { " tim" => "491590253@qq.com" }
s.source = { :git => "https://github.com/timRabbit/ZLPhotoBrowser.git",:tag => s.version.to_s }
s.ios.deployment_target = "8.0"
s.requires_arc = true
s.framework = "CoreFoundation","Foundation","CoreGraphics","Security","UIKit"
s.library = "z.1.1.3","stdc++","sqlite3"
s.source_files = 'PhotoBrowser/PhotoBrowser'
#s.resources = 'SIDADView/*.{bundle}'
# s.dependency 'XAspect'
# s.dependency 'TimCore/TimCore','~>1.2.12'
# s.dependency 'JPush-iOS-SDK','~>3.0.2'
s.ios.frameworks = 'UserNotifications'
# s.subspec 'YMCitySelect' do |sp|
# sp.source_files = 'YMCitySelect/*.{h,m,mm}'
# sp.resources = "Extend/**/*.{png}"
# sp.requires_arc = true
# sp.xcconfig = { 'HEADER_SEARCH_PATHS' => '$(SDKROOT)/usr/include/libz, $(SDKROOT)/usr/include/libxml2', 'CLANG_CXX_LANGUAGE_STANDARD' => 'gnu++0x', 'CLANG_CXX_LIBRARY' => 'libstdc++', 'CLANG_WARN_DIRECT_OBJC_ISA_USAGE' => 'YES'}
# sp.dependency 'FontIcon'
# sp.prefix_header_contents = '#import "EasyIOS.h"'
# end
end
|
module Serialism
# Combines a set of items and a serializer class.
#
# Example:
#
# class Foo
# attr_accessor :id
# end
#
# class FooSerializer < Serialism::Serializer
# attributes :id
# end
#
# Serialism::Collection.new(a_bunch_of_foo_instances, serializer: FooSerializer).to_csv
# #=> returns a CSV string
class Collection
attr_reader :items
# create a new collection
#
# @param [Enumerable] items
# A collection of items.
# All member items should be encodable by `serializer`.
# @param [Serialism::Serializer] serializer
# The serializer class used to encode members of `items`.
def initialize(items = [], serializer:)
if !serializer.respond_to?(:attributes)
raise ArgumentError, 'serializer must implement a class-level :attributes method'
end
if !serializer.instance_methods.include?(:render)
raise ArgumentError, 'serializer must implement an instance-level :render method'
end
@serializer = serializer
self.items = items
end
# Set the items in the collection.
#
# Replaces any previous items already in the collection.
#
# @param [#each] items an enumerable collection of items
# @return [Serialism::Collection]
def items=(items)
raise ArgumentError, 'argument must respond_to :each' if !items.respond_to?(:each)
raise ArgumentError, 'argument must respond_to :map' if !items.respond_to?(:map)
@items = items
self
end
# return the attributes for the collection
#
# @return [Array]
def attributes
return [] if items.empty?
@serializer.attributes
end
# Generate a csv string for the collection
#
# When members of the array returned by the serializer are themselves arrays,
# these sub-arrays will be joined using "," prior to being added to the main
# CSV.
#
# @return [String]
def to_csv
require 'csv'
CSV.generate do |csv|
csv << attributes
items.each do |t|
row = @serializer.new(t).render.values.map do |cell|
csv_value_to_s(cell)
end
csv << row
end
end
end
def to_json
require 'json'
JSON.dump(items.map { |t| @serializer.new(t).render })
end
# this generates an array of arrays
# headers are [0] data starts at [1]
def to_a
output = []
output << attributes
items.each do |t|
row = @serializer.new(t).render.values.map do |cell|
csv_value_to_s(cell)
end
output << row
end
output
end
private
# convert complex cells to comma-separated strings
def csv_value_to_s(cell)
cell.is_a?(Array) ? cell.join(',') : cell
end
end
end
implment render_row
module Serialism
# Combines a set of items and a serializer class.
#
# Example:
#
# class Foo
# attr_accessor :id
# end
#
# class FooSerializer < Serialism::Serializer
# attributes :id
# end
#
# Serialism::Collection.new(a_bunch_of_foo_instances, serializer: FooSerializer).to_csv
# #=> returns a CSV string
class Collection
attr_reader :items
# create a new collection
#
# @param [Enumerable] items
# A collection of items.
# All member items should be encodable by `serializer`.
# @param [Serialism::Serializer] serializer
# The serializer class used to encode members of `items`.
def initialize(items = [], serializer:)
if !serializer.respond_to?(:attributes)
raise ArgumentError, 'serializer must implement a class-level :attributes method'
end
if !serializer.instance_methods.include?(:render)
raise ArgumentError, 'serializer must implement an instance-level :render method'
end
@serializer = serializer
self.items = items
end
# Set the items in the collection.
#
# Replaces any previous items already in the collection.
#
# @param [#each] items an enumerable collection of items
# @return [Serialism::Collection]
def items=(items)
raise ArgumentError, 'argument must respond_to :each' if !items.respond_to?(:each)
raise ArgumentError, 'argument must respond_to :map' if !items.respond_to?(:map)
@items = items
self
end
# return the attributes for the collection
#
# @return [Array]
def attributes
return [] if items.empty?
@serializer.attributes
end
# Generate a csv string for the collection
#
# When members of the array returned by the serializer are themselves arrays,
# these sub-arrays will be joined using "," prior to being added to the main
# CSV.
#
# @return [String]
def to_csv
require 'csv'
CSV.generate do |csv|
csv << attributes
items.map do |i|
csv << render_row(i)
end
end
end
def to_json
require 'json'
JSON.dump(items.map { |t| @serializer.new(t).render })
end
# this generates an array of arrays
# headers are [0] data starts at [1]
def to_a
output = []
output << attributes
output += items.map { |i| render_row(i) }
end
private
def render_row(row)
@serializer.new(row).render.values.map do |cell|
cell.is_a?(Array) ? cell.join(',') : cell
end
end
end
end
|
require 'sfn'
module Sfn
class Command
# Cloudformation describe command
class Describe < Command
include Sfn::CommandModule::Base
# information available
unless(defined?(AVAILABLE_DISPLAYS))
AVAILABLE_DISPLAYS = [:resources, :outputs]
end
# Run the stack describe action
def execute!
stack_name = name_args.last
stack = provider.connection.stacks.get(stack_name)
if(stack)
display = [].tap do |to_display|
AVAILABLE_DISPLAYS.each do |display_option|
if(config[display_option])
to_display << display_option
end
end
end
display = AVAILABLE_DISPLAYS.dup if display.empty?
display.each do |display_method|
self.send(display_method, stack)
ui.info ''
end
else
ui.fatal "Failed to find requested stack: #{ui.color(stack_name, :bold, :red)}"
exit -1
end
end
# Display resources
#
# @param stack [Miasma::Models::Orchestration::Stack]
def resources(stack)
stack_resources = stack.resources.all.sort do |x, y|
y.updated <=> x.updated
end.map do |resource|
Smash.new(resource.attributes)
end
things_output(stack.name, stack_resources, :resources)
end
# Display outputs
#
# @param stack [Miasma::Models::Orchestration::Stack]
def outputs(stack)
ui.info "Outputs for stack: #{ui.color(stack.name, :bold)}"
unless(stack.outputs.empty?)
stack.outputs.each do |output|
key, value = output.key, output.value
key = snake(key).to_s.split('_').map(&:capitalize).join(' ')
ui.info [' ', ui.color("#{key}:", :bold), value].join(' ')
end
else
ui.info " #{ui.color('No outputs found')}"
end
end
# @return [Array<String>] default attributes
def default_attributes
%w(updated logical_id type status status_reason)
end
end
end
end
Update describe implementation
require 'sfn'
module Sfn
class Command
# Cloudformation describe command
class Describe < Command
include Sfn::CommandModule::Base
# information available
unless(defined?(AVAILABLE_DISPLAYS))
AVAILABLE_DISPLAYS = [:resources, :outputs]
end
# Run the stack describe action
def execute!
stack_name = name_args.last
stack = provider.connection.stacks.get(stack_name)
if(stack)
display = [].tap do |to_display|
AVAILABLE_DISPLAYS.each do |display_option|
if(config[display_option])
to_display << display_option
end
end
end
display = AVAILABLE_DISPLAYS.dup if display.empty?
display.each do |display_method|
self.send(display_method, stack)
end
else
ui.fatal "Failed to find requested stack: #{ui.color(stack_name, :bold, :red)}"
exit -1
end
end
# Display resources
#
# @param stack [Miasma::Models::Orchestration::Stack]
def resources(stack)
stack_resources = stack.resources.all.sort do |x, y|
y.updated <=> x.updated
end.map do |resource|
Smash.new(resource.attributes)
end
ui.table(self) do
table(:border => false) do
row(:header => true) do
allowed_attributes.each do |attr|
column as_title(attr), :width => stack_resources.map{|r| r[attr].to_s.length}.push(as_title(attr).length).max + 2
end
end
stack_resources.each do |resource|
row do
allowed_attributes.each do |attr|
column resource[attr]
end
end
end
end
end.display
end
# Display outputs
#
# @param stack [Miasma::Models::Orchestration::Stack]
def outputs(stack)
ui.info "Outputs for stack: #{ui.color(stack.name, :bold)}"
unless(stack.outputs.empty?)
stack.outputs.each do |output|
key, value = output.key, output.value
key = snake(key).to_s.split('_').map(&:capitalize).join(' ')
ui.info [' ', ui.color("#{key}:", :bold), value].join(' ')
end
else
ui.info " #{ui.color('No outputs found')}"
end
end
# @return [Array<String>] default attributes
def default_attributes
%w(updated logical_id type status status_reason)
end
end
end
end
|
require "sidekiq/logging/json/version"
require "sidekiq/logging/json"
require "json"
module Sidekiq
module Logging
module Json
class Logger < Sidekiq::Logging::Pretty
# Provide a call() method that returns the formatted message.
def call(severity, time, program_name, message)
{
'@timestamp' => time.utc.iso8601,
:pid => ::Process.pid,
:tid => "TID-#{Thread.current.object_id.to_s(36)}",
:context => "#{context}",
:severity => severity,
:program_name => program_name,
:type => 'sidekiq',
:status => nil,
:run_time => nil
}.merge(process_message(message)).to_json + "\r\n"
end
def process_message(message)
result = message.match(/INFO: (done|start)(: ([0-9\.]+) sec)?$/)
return { message: message } unless result
{
:message => message, # The full message
:status => result[1], # start or done
:run_time => result[3] && result[3].to_f # run time in seconds
}
end
end
end
end
end
Fix parsing for runtime, status when the status is reported failure.
require "sidekiq/logging/json/version"
require "sidekiq/logging/json"
require "json"
module Sidekiq
module Logging
module Json
class Logger < Sidekiq::Logging::Pretty
# Provide a call() method that returns the formatted message.
def call(severity, time, program_name, message)
{
'@timestamp' => time.utc.iso8601,
:pid => ::Process.pid,
:tid => "TID-#{Thread.current.object_id.to_s(36)}",
:context => "#{context}",
:severity => severity,
:program_name => program_name,
:type => 'sidekiq',
:status => nil,
:run_time => nil
}.merge(process_message(message)).to_json + "\r\n"
end
def process_message(message)
result = message.match(/INFO: (done|start|fail)(: ([0-9\.]+) sec)?$/)
return { message: message } unless result
{
:message => message, # The full message
:status => result[1], # start or done
:run_time => result[3] && result[3].to_f # run time in seconds
}
end
end
end
end
end
|
require 'singleton'
require 'em-websocket'
require 'connection'
module SimplePusher
class Server
include Singleton
attr :host, :port
def initialize(options={})
@host = "0.0.0.0"
@port = options[:port] || 8088
end
def self.start(options={})
$stderr.puts "Start SimplePush ..." if SimplePusher.configuration.debug
EM::WebSocket.run(:host => instance.host, :port => instance.port) do |socket|
socket.onopen do |handshake|
$stderr.puts "on open:" + handshake.inspect if SimplePusher.configuration.debug
Connection.add_client(socket)
end
socket.onclose do
$stderr.puts "on close"
Connection.remove_client(socket)
end
socket.onmessage do |message|
$stderr.puts "on message: " + message if SimplePusher.configuration.debug
#action, *message = msg.split(":")
#
#case action
# when 'broadcast'
# message = message.join(":")
# Client.broadcast(message)
# when 'emit'
# event = message[0]
# message = message[1..-1].join(":")
# #TODO
# else
#
#end
end
end
end
end
end
Fixed server.rb argument.
require 'singleton'
require 'em-websocket'
require 'connection'
module SimplePusher
class Server
include Singleton
attr :host, :port
def initialize
@host = "0.0.0.0"
@port = SimplePusher.configuration.port
end
def self.start
$stderr.puts "Start SimplePush ..." if SimplePusher.configuration.debug
EM::WebSocket.run(:host => instance.host, :port => instance.port) do |socket|
socket.onopen do |handshake|
$stderr.puts "on open:" + handshake.inspect if SimplePusher.configuration.debug
Connection.add_client(socket)
end
socket.onclose do
$stderr.puts "on close"
Connection.remove_client(socket)
end
socket.onmessage do |message|
$stderr.puts "on message: " + message if SimplePusher.configuration.debug
#action, *message = msg.split(":")
#
#case action
# when 'broadcast'
# message = message.join(":")
# Client.broadcast(message)
# when 'emit'
# event = message[0]
# message = message[1..-1].join(":")
# #TODO
# else
#
#end
end
end
end
end
end |
require 'sinatra/json'
require 'sinatra/base'
$KCODE = "UTF-8"
module Sinatra
#
# = Sinatra::RespondWith
#
# These extensions let Sinatra automatically choose what template to render or
# action to perform depending on the request's Accept header.
#
# Example:
#
# # Without Sinatra::RespondWith
# get '/' do
# data = { :name => 'example' }
# request.accept.each do |type|
# case type.to_s
# when 'text/html'
# halt haml(:index, :locals => data)
# when 'text/json'
# halt data.to_json
# when 'application/atom+xml'
# halt nokogiri(:'index.atom', :locals => data)
# when 'application/xml', 'text/xml'
# halt nokogiri(:'index.xml', :locals => data)
# when 'text/plain'
# halt 'just an example'
# end
# end
# error 406
# end
#
# # With Sinatra::RespondWith
# get '/' do
# respond_with :index, :name => 'example' do |f|
# f.txt { 'just an example' }
# end
# end
#
# Both helper methods +respond_to+ and +respond_with+ let you define custom
# handlers like the one above for +text/plain+. +respond_with+ additionally
# takes a template name and/or an object to offer the following default
# behavior:
#
# * If a template name is given, search for a template called
# +name.format.engine+ (+index.xml.nokogiri+ in the above example).
# * If a template name is given, search for a templated called +name.engine+
# for engines known to result in the requested format (+index.haml+).
# * If a file extension associated with the mime type is known to Sinatra, and
# the object responds to +to_extension+, call that method and use the result
# (+data.to_json+).
#
# == Security
#
# Since methods are triggered based on client input, this can lead to security
# issues (but not as severe as those might appear in the first place: keep in
# mind that only known file extensions are used). You should limit
# the possible formats you serve.
#
# This is possible with the +provides+ condition:
#
# get '/', :provides => [:html, :json, :xml, :atom] do
# respond_with :index, :name => 'example'
# end
#
# However, since you have to set +provides+ for every route, this extension
# adds an app global (class method) `respond_to`, that lets you define content
# types for all routes:
#
# respond_to :html, :json, :xml, :atom
# get('/a') { respond_with :index, :name => 'a' }
# get('/b') { respond_with :index, :name => 'b' }
#
# == Custom Types
#
# Use the +on+ method for defining actions for custom types:
#
# get '/' do
# respond_to do |f|
# f.xml { nokogiri :index }
# f.on('application/custom') { custom_action }
# f.on('text/*') { data.to_s }
# f.on('*/*') { "matches everything" }
# end
# end
#
# Definition order does not matter.
module RespondWith
class Format
def initialize(app)
@app, @map, @generic, @default = app, {}, {}, nil
end
def on(type, &block)
@app.settings.mime_types(type).each do |mime|
case mime
when '*/*' then @default = block
when /^([^\/]+)\/\*$/ then @generic[$1] = block
else @map[mime] = block
end
end
end
def finish
yield self if block_given?
mime_type = @app.content_type ||
@app.request.preferred_type(@map.keys) ||
@app.request.preferred_type ||
'text/html'
type = mime_type.split(/\s*;\s*/, 2).first
handlers = [@map[type], @generic[type[/^[^\/]+/]], @default].compact
handlers.each do |block|
if result = block.call(type)
@app.content_type mime_type
@app.halt result
end
end
@app.halt 406
end
def method_missing(method, *args, &block)
return super if args.any? or block.nil? or not @app.mime_type(method)
on(method, &block)
end
end
module Helpers
include Sinatra::JSON
def respond_with(template, object = nil, &block)
object, template = template, nil unless Symbol === template
format = Format.new(self)
format.on "*/*" do |type|
exts = settings.ext_map[type]
exts << :xml if type.end_with? '+xml'
if template
args = template_cache.fetch(type, template) { template_for(template, exts) }
if args.any?
locals = { :object => object }
locals.merge! object.to_hash if object.respond_to? :to_hash
renderer = args.first
options = args[1..-1] + [{:locals => locals}]
halt send(renderer, *options)
end
end
if object
exts.each do |ext|
halt json(object) if ext == :json
next unless object.respond_to? method = "to_#{ext}"
halt(*object.send(method))
end
end
false
end
format.finish(&block)
end
def respond_to(&block)
Format.new(self).finish(&block)
end
private
def template_for(name, exts)
# in production this is cached, so don't worry too much about runtime
possible = []
settings.template_engines[:all].each do |engine|
exts.each { |ext| possible << [engine, "#{name}.#{ext}"] }
end
exts.each do |ext|
settings.template_engines[ext].each { |e| possible << [e, name] }
end
possible.each do |engine, template|
# not exactly like Tilt[engine], but does not trigger a require
if Tilt.respond_to?(:mappings)
klass = Tilt.mappings[Tilt.normalize(engine)].first
else
klass = Tilt[engine]
end
find_template(settings.views, template, klass) do |file|
next unless File.exist? file
return settings.rendering_method(engine) << template.to_sym
end
end
[] # nil or false would not be cached
end
end
def remap_extensions
ext_map.clear
Rack::Mime::MIME_TYPES.each { |e,t| ext_map[t] << e[1..-1].to_sym }
ext_map['text/javascript'] << 'js'
ext_map['text/xml'] << 'xml'
end
def mime_type(*)
result = super
remap_extensions
result
end
def respond_to(*formats)
if formats.any?
@respond_to ||= []
@respond_to.concat formats
elsif @respond_to.nil? and superclass.respond_to? :respond_to
superclass.respond_to
else
@respond_to
end
end
def rendering_method(engine)
return [engine] if Sinatra::Templates.method_defined? engine
return [:mab] if engine.to_sym == :markaby
[:render, :engine]
end
private
def compile!(verb, path, block, options = {})
options[:provides] ||= respond_to if respond_to
super
end
def self.jrubyify(engs)
not_supported = [:markdown]
engs.keys.each do |key|
engs[key].collect! { |eng| (eng == :yajl) ? :json_pure : eng }
engs[key].delete_if { |eng| not_supported.include?(eng) }
end
engs
end
def self.engines
engines = {
:css => [:less, :sass, :scss],
:xml => [:builder, :nokogiri],
:js => [:coffee],
:html => [:erb, :erubis, :haml, :slim, :liquid, :radius, :mab,
:markdown, :textile, :rdoc],
:all => (Sinatra::Templates.instance_methods.map(&:to_sym) +
[:mab] - [:find_template, :markaby]),
:json => [:yajl],
}
engines.default = []
(defined? JRUBY_VERSION) ? jrubyify(engines) : engines
end
def self.registered(base)
base.set :ext_map, Hash.new { |h,k| h[k] = [] }
base.set :template_engines, engines
base.remap_extensions
base.helpers Helpers
end
end
register RespondWith
Delegator.delegate :respond_to
end
only assign kcode if ruby version is < 1.9.0
require 'sinatra/json'
require 'sinatra/base'
$KCODE = "UTF-8" unless RUBY_VERSION > "1.9.0"
module Sinatra
#
# = Sinatra::RespondWith
#
# These extensions let Sinatra automatically choose what template to render or
# action to perform depending on the request's Accept header.
#
# Example:
#
# # Without Sinatra::RespondWith
# get '/' do
# data = { :name => 'example' }
# request.accept.each do |type|
# case type.to_s
# when 'text/html'
# halt haml(:index, :locals => data)
# when 'text/json'
# halt data.to_json
# when 'application/atom+xml'
# halt nokogiri(:'index.atom', :locals => data)
# when 'application/xml', 'text/xml'
# halt nokogiri(:'index.xml', :locals => data)
# when 'text/plain'
# halt 'just an example'
# end
# end
# error 406
# end
#
# # With Sinatra::RespondWith
# get '/' do
# respond_with :index, :name => 'example' do |f|
# f.txt { 'just an example' }
# end
# end
#
# Both helper methods +respond_to+ and +respond_with+ let you define custom
# handlers like the one above for +text/plain+. +respond_with+ additionally
# takes a template name and/or an object to offer the following default
# behavior:
#
# * If a template name is given, search for a template called
# +name.format.engine+ (+index.xml.nokogiri+ in the above example).
# * If a template name is given, search for a templated called +name.engine+
# for engines known to result in the requested format (+index.haml+).
# * If a file extension associated with the mime type is known to Sinatra, and
# the object responds to +to_extension+, call that method and use the result
# (+data.to_json+).
#
# == Security
#
# Since methods are triggered based on client input, this can lead to security
# issues (but not as severe as those might appear in the first place: keep in
# mind that only known file extensions are used). You should limit
# the possible formats you serve.
#
# This is possible with the +provides+ condition:
#
# get '/', :provides => [:html, :json, :xml, :atom] do
# respond_with :index, :name => 'example'
# end
#
# However, since you have to set +provides+ for every route, this extension
# adds an app global (class method) `respond_to`, that lets you define content
# types for all routes:
#
# respond_to :html, :json, :xml, :atom
# get('/a') { respond_with :index, :name => 'a' }
# get('/b') { respond_with :index, :name => 'b' }
#
# == Custom Types
#
# Use the +on+ method for defining actions for custom types:
#
# get '/' do
# respond_to do |f|
# f.xml { nokogiri :index }
# f.on('application/custom') { custom_action }
# f.on('text/*') { data.to_s }
# f.on('*/*') { "matches everything" }
# end
# end
#
# Definition order does not matter.
module RespondWith
class Format
def initialize(app)
@app, @map, @generic, @default = app, {}, {}, nil
end
def on(type, &block)
@app.settings.mime_types(type).each do |mime|
case mime
when '*/*' then @default = block
when /^([^\/]+)\/\*$/ then @generic[$1] = block
else @map[mime] = block
end
end
end
def finish
yield self if block_given?
mime_type = @app.content_type ||
@app.request.preferred_type(@map.keys) ||
@app.request.preferred_type ||
'text/html'
type = mime_type.split(/\s*;\s*/, 2).first
handlers = [@map[type], @generic[type[/^[^\/]+/]], @default].compact
handlers.each do |block|
if result = block.call(type)
@app.content_type mime_type
@app.halt result
end
end
@app.halt 406
end
def method_missing(method, *args, &block)
return super if args.any? or block.nil? or not @app.mime_type(method)
on(method, &block)
end
end
module Helpers
include Sinatra::JSON
def respond_with(template, object = nil, &block)
object, template = template, nil unless Symbol === template
format = Format.new(self)
format.on "*/*" do |type|
exts = settings.ext_map[type]
exts << :xml if type.end_with? '+xml'
if template
args = template_cache.fetch(type, template) { template_for(template, exts) }
if args.any?
locals = { :object => object }
locals.merge! object.to_hash if object.respond_to? :to_hash
renderer = args.first
options = args[1..-1] + [{:locals => locals}]
halt send(renderer, *options)
end
end
if object
exts.each do |ext|
halt json(object) if ext == :json
next unless object.respond_to? method = "to_#{ext}"
halt(*object.send(method))
end
end
false
end
format.finish(&block)
end
def respond_to(&block)
Format.new(self).finish(&block)
end
private
def template_for(name, exts)
# in production this is cached, so don't worry too much about runtime
possible = []
settings.template_engines[:all].each do |engine|
exts.each { |ext| possible << [engine, "#{name}.#{ext}"] }
end
exts.each do |ext|
settings.template_engines[ext].each { |e| possible << [e, name] }
end
possible.each do |engine, template|
# not exactly like Tilt[engine], but does not trigger a require
if Tilt.respond_to?(:mappings)
klass = Tilt.mappings[Tilt.normalize(engine)].first
else
klass = Tilt[engine]
end
find_template(settings.views, template, klass) do |file|
next unless File.exist? file
return settings.rendering_method(engine) << template.to_sym
end
end
[] # nil or false would not be cached
end
end
def remap_extensions
ext_map.clear
Rack::Mime::MIME_TYPES.each { |e,t| ext_map[t] << e[1..-1].to_sym }
ext_map['text/javascript'] << 'js'
ext_map['text/xml'] << 'xml'
end
def mime_type(*)
result = super
remap_extensions
result
end
def respond_to(*formats)
if formats.any?
@respond_to ||= []
@respond_to.concat formats
elsif @respond_to.nil? and superclass.respond_to? :respond_to
superclass.respond_to
else
@respond_to
end
end
def rendering_method(engine)
return [engine] if Sinatra::Templates.method_defined? engine
return [:mab] if engine.to_sym == :markaby
[:render, :engine]
end
private
def compile!(verb, path, block, options = {})
options[:provides] ||= respond_to if respond_to
super
end
def self.jrubyify(engs)
not_supported = [:markdown]
engs.keys.each do |key|
engs[key].collect! { |eng| (eng == :yajl) ? :json_pure : eng }
engs[key].delete_if { |eng| not_supported.include?(eng) }
end
engs
end
def self.engines
engines = {
:css => [:less, :sass, :scss],
:xml => [:builder, :nokogiri],
:js => [:coffee],
:html => [:erb, :erubis, :haml, :slim, :liquid, :radius, :mab,
:markdown, :textile, :rdoc],
:all => (Sinatra::Templates.instance_methods.map(&:to_sym) +
[:mab] - [:find_template, :markaby]),
:json => [:yajl],
}
engines.default = []
(defined? JRUBY_VERSION) ? jrubyify(engines) : engines
end
def self.registered(base)
base.set :ext_map, Hash.new { |h,k| h[k] = [] }
base.set :template_engines, engines
base.remap_extensions
base.helpers Helpers
end
end
register RespondWith
Delegator.delegate :respond_to
end
|
require 'ostruct'
module SkeletonRuby
module Config
def self.configure
yield(config) if block_given?
end
def self.method_missing(method, *args, &block)
config.send(method, *args, &block)
end
private
def self.config
@@config ||= OpenStruct.new
end
end
end
Added inspect and respond_to_missing? to config.
require 'ostruct'
module SkeletonRuby
module Config
def self.configure
yield(config) if block_given?
end
def self.inspect
config.inspect
end
def self.method_missing(method, *args, &block)
config.send(method, *args, &block)
end
def self.respond_to_missing?(method, include_private_methods = false)
config.respond_to?(method) || super
end
private
def self.config
@@config ||= OpenStruct.new
end
end
end
|
Add podspec
# coding: utf-8
Pod::Spec.new do |s|
s.name = "NSErrorPointerWrapper"
s.version = "0.1.0"
s.summary = "Wrapper for handling iOS SDK APIs that take in error by reference. Written in Swift."
s.description = <<-DESC
Swift wrapper functions that simplify handling of methods that take in `NSErrorPoint`. Some of the benefits:
- no more declaring that pesky `NSError` variable to pass by reference (use `$0` shorthand argument name in closure passed to wrapper)
- handlers chaining for success and failure
- streamlined downcasting handling (needed because often the result of those methods is `AnyObject?` instance)
DESC
s.homepage = "https://github.com/mr-v/NSErrorPointerWrapper"
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { "mr-v" => "witold.skibniewski@gmail.com" }
s.platform = :ios, "8.0"
# s.ios.deployment_target = "7.0"
# s.osx.deployment_target = "10.9"
s.source = { :git => "https://github.com/mr-v/NSErrorPointerWrapper.git", :tag => "0.1.0" }
s.source_files = "NSErrorPointerWrapper"
end
|
class Nglib < Formula
desc "C++ Library of NETGEN's tetrahedral mesh generator"
homepage "https://sourceforge.net/projects/netgen-mesher/"
url "https://downloads.sourceforge.net/project/netgen-mesher/netgen-mesher/5.3/netgen-5.3.1.tar.gz"
sha256 "cb97f79d8f4d55c00506ab334867285cde10873c8a8dc783522b47d2bc128bf9"
bottle do
cellar :any
rebuild 1
sha256 "89dcf7bde5bec5a03f8c6810cfb5848082c64f68bcdd816714f0f925b98fd3b5" => :sierra
sha256 "6eb7f3cf7a00c68f351816970408f780264855d7d86365a427d19c81e803d606" => :el_capitan
sha256 "60161c1f084017f4ff9ece29807988924137150e979f176d2ad4ebad3e0fd64c" => :yosemite
end
# These two conflict with each other, so we'll have at most one.
depends_on "opencascade" => :optional
depends_on "oce" => :optional
# Patch three issues:
# Makefile - remove TCL scripts that aren't reuquired without NETGEN.
# configure - remove TCL libs that caused issues with ld (#3624).
# Partition_Loop2d.cxx - Fix PI that was used rather than M_PI
patch :DATA
def install
args = %W[
--disable-dependency-tracking
--prefix=#{prefix}
--disable-gui
--enable-nglib
]
if build.with?("opencascade") || build.with?("oce")
args << "--enable-occ"
cad_kernel = Formula[build.with?("opencascade") ? "opencascade" : "oce"]
if build.with? "opencascade"
args << "--with-occ=#{cad_kernel.opt_prefix}/include"
# A couple mesh output functions were dropped in OpenCASCADE 6.8.1
# Would fix via patch, but the relevant files has windows line endings,
# which seem to cause problems when embedded in DATA section patches.
system "sed", "-i", "-e", "s/\\(.*RelativeMode.*\\)/\\/\\/ \\1/",
"#{buildpath}/libsrc/occ/occgeom.cpp"
system "sed", "-i", "-e", "s/\\(.*SetDeflection.*\\)/\\/\\/ \\1/",
"#{buildpath}/libsrc/occ/occgeom.cpp"
else
args << "--with-occ=#{cad_kernel.opt_prefix}/include/oce"
# These fix problematic hard-coded paths in the netgen make file
args << "CPPFLAGS=-I#{cad_kernel.opt_prefix}/include/oce"
args << "LDFLAGS=-L#{cad_kernel.opt_prefix}/lib/"
end
end
system "./configure", *args
system "make", "install"
# The nglib installer doesn't include some important headers by default.
# This follows a pattern used on other platforms to make a set of sub
# directories within include/ to contain these headers.
subdirs = ["csg", "general", "geom2d", "gprim", "include", "interface",
"linalg", "meshing", "occ", "stlgeom", "visualization"]
subdirs.each do |subdir|
(include/"netgen"/subdir).mkpath
(include/"netgen"/subdir).install Dir.glob("libsrc/#{subdir}/*.{h,hpp}")
end
end
test do
(testpath/"test.cpp").write <<-EOS.undent
#include<iostream>
namespace nglib {
#include <nglib.h>
}
int main(int argc, char **argv) {
nglib::Ng_Init();
nglib::Ng_Mesh *mesh(nglib::Ng_NewMesh());
nglib::Ng_DeleteMesh(mesh);
nglib::Ng_Exit();
return 0;
}
EOS
system ENV.cxx, "-Wall", "-o", "test", "test.cpp",
"-I#{include}", "-L#{lib}", "-lnglib"
system "./test"
end
end
__END__
diff -ur a/configure b/configure
--- a/configure 2014-10-07 00:04:36.000000000 +1300
+++ b/configure 2016-11-12 21:43:00.000000000 +1300
@@ -15354,7 +15354,7 @@
OCCFLAGS="-DOCCGEOMETRY -I$occdir/inc -I/usr/include/opencascade"
- OCCLIBS="-L$occdir/lib -lTKernel -lTKGeomBase -lTKMath -lTKG2d -lTKG3d -lTKXSBase -lTKOffset -lTKFillet -lTKShHealing -lTKMesh -lTKMeshVS -lTKTopAlgo -lTKGeomAlgo -lTKBool -lTKPrim -lTKBO -lTKIGES -lTKBRep -lTKSTEPBase -lTKSTEP -lTKSTL -lTKSTEPAttr -lTKSTEP209 -lTKXDESTEP -lTKXDEIGES -lTKXCAF -lTKLCAF -lFWOSPlugin"
+ OCCLIBS="-L$occdir/lib -lFWOSPlugin"
# -lTKDCAF
diff -ur a/libsrc/occ/Partition_Loop2d.cxx b/libsrc/occ/Partition_Loop2d.cxx
--- a/libsrc/occ/Partition_Loop2d.cxx 2016-03-16 07:44:06.000000000 -0700
+++ b/libsrc/occ/Partition_Loop2d.cxx 2016-03-16 07:45:40.000000000 -0700
@@ -52,6 +52,10 @@
#include <gp_Pnt.hxx>
#include <gp_Pnt2d.hxx>
+#ifndef PI
+ #define PI M_PI
+#endif
+
//=======================================================================
//function : Partition_Loop2d
//purpose :
diff -ur a/ng/Makefile.in b/ng/Makefile.in
--- a/ng/Makefile.in 2014-10-06 04:04:37.000000000 -0700
+++ b/ng/Makefile.in 2016-03-19 14:43:51.000000000 -0700
@@ -327,10 +327,7 @@
# /opt/netgen/lib/libngsolve.a /opt/netgen/lib/libngcomp.a /opt/netgen/lib/libngcomp.a /opt/netgen/lib/libngfemng.a /opt/netgen/lib/libngmg.a /opt/netgen/lib/libngla.a /opt/netgen/lib/libngbla.a /opt/netgen/lib/libngstd.a -L/opt/intel/mkl/10.2.1.017/lib/em64t /opt/intel/mkl/10.2.1.017/lib/em64t/libmkl_solver_lp64.a -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core
#
#
-dist_bin_SCRIPTS = dialog.tcl menustat.tcl ngicon.tcl ng.tcl \
-ngvisual.tcl sockets.tcl drawing.tcl nghelp.tcl ngshell.tcl \
-ngtesting.tcl parameters.tcl variables.tcl csgeom.tcl stlgeom.tcl \
-occgeom.tcl acisgeom.tcl netgen.ocf
+dist_bin_SCRIPTS =
netgen_LDFLAGS = -export-dynamic
all: all-am
nglib 5.3.1: opencascade 7.x compatibility
Closes #5171.
Signed-off-by: Jonathan Chang <0bd51b8a224feeddb634965eebae512adcd49a8d@gmail.com>
class Nglib < Formula
desc "C++ Library of NETGEN's tetrahedral mesh generator"
homepage "https://sourceforge.net/projects/netgen-mesher/"
url "https://downloads.sourceforge.net/project/netgen-mesher/netgen-mesher/5.3/netgen-5.3.1.tar.gz"
sha256 "cb97f79d8f4d55c00506ab334867285cde10873c8a8dc783522b47d2bc128bf9"
revision 1
bottle do
cellar :any
rebuild 1
sha256 "89dcf7bde5bec5a03f8c6810cfb5848082c64f68bcdd816714f0f925b98fd3b5" => :sierra
sha256 "6eb7f3cf7a00c68f351816970408f780264855d7d86365a427d19c81e803d606" => :el_capitan
sha256 "60161c1f084017f4ff9ece29807988924137150e979f176d2ad4ebad3e0fd64c" => :yosemite
end
# These two conflict with each other, so we'll have at most one.
depends_on "opencascade" => :recommended
depends_on "oce" => :optional
# Patch two main issues:
# Makefile - remove TCL scripts that aren't reuquired without NETGEN.
# Partition_Loop2d.cxx - Fix PI that was used rather than M_PI
patch do
url "https://raw.githubusercontent.com/Homebrew/formula-patches/20850ac/nglib/define-PI-and-avoid-tcl-install.diff"
sha256 "1f97e60328f6ab59e41d0fa096acbe07efd4c0a600d8965cc7dc5706aec25da4"
end
# OpenCascase 7.x compatibility patches
if build.with? "opencascade"
patch do
url "https://raw.githubusercontent.com/Homebrew/formula-patches/20850ac/nglib/occt7.x-compatibility-patches.diff"
sha256 "c3f222b47c5da2cf8f278718dbc599fab682546380210a86a6538c3f2d9f1b27"
end
end
def install
ENV.cxx11 if build.with? "opencascade"
cad_kernel = Formula[build.with?("opencascade") ? "opencascade" : "oce"]
# Set OCC search path to Homebrew prefix
inreplace "configure" do |s|
s.gsub!(%r{(OCCFLAGS="-DOCCGEOMETRY -I\$occdir/inc )(.*$)}, "\\1-I#{cad_kernel.opt_include}/#{cad_kernel}\"")
s.gsub!(/(^.*OCCLIBS="-L.*)( -lFWOSPlugin")/, "\\1\"") if build.with? "opencascade"
s.gsub!(%r{(OCCLIBS="-L\$occdir/lib)(.*$)}, "\\1\"") if OS.mac?
end
args = %W[
--disable-dependency-tracking
--prefix=#{prefix}
--disable-gui
--enable-nglib
]
if build.with?("opencascade") || build.with?("oce")
args << "--enable-occ"
if build.with? "opencascade"
args << "--with-occ=#{cad_kernel.opt_prefix}"
else
args << "--with-occ=#{cad_kernel.opt_prefix}/include/oce"
# These fix problematic hard-coded paths in the netgen make file
args << "CPPFLAGS=-I#{cad_kernel.opt_prefix}/include/oce"
args << "LDFLAGS=-L#{cad_kernel.opt_prefix}/lib/"
end
end
system "./configure", *args
system "make", "install"
# The nglib installer doesn't include some important headers by default.
# This follows a pattern used on other platforms to make a set of sub
# directories within include/ to contain these headers.
subdirs = ["csg", "general", "geom2d", "gprim", "include", "interface",
"linalg", "meshing", "occ", "stlgeom", "visualization"]
subdirs.each do |subdir|
(include/"netgen"/subdir).mkpath
(include/"netgen"/subdir).install Dir.glob("libsrc/#{subdir}/*.{h,hpp}")
end
end
test do
(testpath/"test.cpp").write <<-EOS.undent
#include<iostream>
namespace nglib {
#include <nglib.h>
}
int main(int argc, char **argv) {
nglib::Ng_Init();
nglib::Ng_Mesh *mesh(nglib::Ng_NewMesh());
nglib::Ng_DeleteMesh(mesh);
nglib::Ng_Exit();
return 0;
}
EOS
system ENV.cxx, "-Wall", "-o", "test", "test.cpp",
"-I#{include}", "-L#{lib}", "-lnglib", "-lTKIGES"
system "./test"
end
end
|
require 'uri'
module URI
#
# Expands a URI decoded path, into a proper absolute path.
#
# @param [String] path
# The path from a URI.
#
# @return [String]
# The expanded path.
#
# @example
# URI.expand_path('./path')
# # => "path"
#
# @example
# URI.expand_path('test/../path')
# # => "path"
#
# @example
# URI.exand_path('/test/path/')
# # => "/test/path/"
#
# @example
# URI.expand_path('/test/../path')
# # => "/path"
#
def URI.expand_path(path)
dirs = path.gsub(/[\/]{2,}/,'/').scan(/[^\/]*\/|[^\/]+$/)
new_dirs = []
dirs.each do |dir|
if (dir == '..' || dir == '../')
unless new_dirs == ['/']
new_dirs.pop
end
elsif (dir != '.' && dir != './')
new_dirs.push(dir)
end
end
unless new_dirs.empty?
new_dirs.join
else
'/'
end
end
end
Removed an unnecessary check.
require 'uri'
module URI
#
# Expands a URI decoded path, into a proper absolute path.
#
# @param [String] path
# The path from a URI.
#
# @return [String]
# The expanded path.
#
# @example
# URI.expand_path('./path')
# # => "path"
#
# @example
# URI.expand_path('test/../path')
# # => "path"
#
# @example
# URI.exand_path('/test/path/')
# # => "/test/path/"
#
# @example
# URI.expand_path('/test/../path')
# # => "/path"
#
def URI.expand_path(path)
dirs = path.gsub(/[\/]{2,}/,'/').scan(/[^\/]*\/|[^\/]+$/)
new_dirs = []
dirs.each do |dir|
if (dir == '..' || dir == '../')
new_dirs.pop
elsif (dir != '.' && dir != './')
new_dirs.push(dir)
end
end
unless new_dirs.empty?
new_dirs.join
else
'/'
end
end
end
|
module SportsSouth
VERSION = "0.1.1"
end
Bump to v0.1.2
module SportsSouth
VERSION = "0.1.2"
end
|
module Spree
module Search
class Sunspot < defined?(Spree::Search::MultiDomain) ? Spree::Search::MultiDomain : Spree::Core::Search::Base
def retrieve_products
conf = Spree::Search.configuration
# send(name) looks in @properties
@properties[:sunspot] = ::Sunspot.search(Spree::Product) do
# This is a little tricky to understand
# - we are sending the block value as a method
# - Spree::Search::Base is using method_missing() to return the param values
conf.display_facets.each do |name|
with("#{name}", send(name)) if send(name).present?
facet("#{name}")
end
with(:price, Range.new(price.split('-').first, price.split('-').last)) if price
facet(:price) do
conf.price_ranges.each do |range|
row(range) do
with(:price, Range.new(range.split('-').first, range.split('-').last))
end
end
# TODO add greater than range
end
facet(:taxon_ids)
with(:taxon_ids, send(:taxon).id) if send(:taxon)
if send(:sort) == :score
order_by :themesort
order_by :position
order_by :subposition
end
order_by sort.to_sym, order.to_sym
with(:is_active, true)
keywords(query)
paginate(:page => page, :per_page => per_page)
end
self.sunspot.results
end
def retrieve_themes
conf = Spree::Search.configuration
# send(name) looks in @properties
@properties[:sunspot] = ::Sunspot.search(Spree::Product) do
# This is a little tricky to understand
# - we are sending the block value as a method
# - Spree::Search::Base is using method_missing() to return the param values
conf.display_facets.each do |name|
with("#{name}", send(name)) if send(name).present?
facet("#{name}")
end
with(:price, Range.new(price.split('-').first, price.split('-').last)) if price
facet(:price) do
conf.price_ranges.each do |range|
row(range) do
with(:price, Range.new(range.split('-').first, range.split('-').last))
end
end
# TODO add greater than range
end
facet(:taxon_ids)
with(:taxon_ids, send(:taxon).id) if send(:taxon)
facet :themesort
#facet :saletype
#with(:saletype, send(:saletype)) if send(:saletype)
order_by :themesort
with(:is_active, true)
with(:featured, true)
keywords(query)
paginate(:page => 1, :per_page => 180)
end
self.sunspot.results
end
protected
def prepare(params)
# super copies over :taxon and other variables into properties
# as well as handles pagination
super
# TODO should do some parameter cleaning here: only allow valid search params to be passed through
# the faceting partial is kind of 'dumb' about the params object: doesn't clean it out and just
# dumps all the params into the query string
@properties[:query] = params[:keywords]
@properties[:price] = params[:price]
@properties[:sort] = params[:sort] || :score
@properties[:order] = params[:order] || :desc
Spree::Search.configuration.display_facets.each do |name|
@properties[name] ||= params["#{name}"]
end
end
end
end
end
Updated Searcher
module Spree
module Search
class Sunspot < defined?(Spree::Search::MultiDomain) ? Spree::Search::MultiDomain : Spree::Core::Search::Base
def retrieve_products
retrieve_indexed.results
end
def retrieve_hits
retrieve_indexed.hits
end
def retrieve_themes
retrieve_themes.results
end
def retrieve_theme_hits
retrieve_themes.hits
end
protected
def retrieve_indexed
conf = Spree::Search.configuration
# send(name) looks in @properties
@properties[:sunspot] = ::Sunspot.search(Spree::Product) do
# This is a little tricky to understand
# - we are sending the block value as a method
# - Spree::Search::Base is using method_missing() to return the param values
conf.display_facets.each do |name|
with("#{name}", send(name)) if send(name).present?
facet("#{name}")
end
with(:price, Range.new(price.split('-').first, price.split('-').last)) if price
facet(:price) do
conf.price_ranges.each do |range|
row(range) do
with(:price, Range.new(range.split('-').first, range.split('-').last))
end
end
# TODO add greater than range
end
facet(:taxon_ids)
with(:taxon_ids, send(:taxon).id) if send(:taxon)
if send(:sort) == :score
order_by :themesort
order_by :position
order_by :subposition
end
order_by sort.to_sym, order.to_sym
with(:is_active, true)
keywords(query)
paginate(:page => page, :per_page => per_page)
end
self.sunspot
end
def retrieve_themes
conf = Spree::Search.configuration
# send(name) looks in @properties
@properties[:sunspot] = ::Sunspot.search(Spree::Product) do
# This is a little tricky to understand
# - we are sending the block value as a method
# - Spree::Search::Base is using method_missing() to return the param values
conf.display_facets.each do |name|
with("#{name}", send(name)) if send(name).present?
facet("#{name}")
end
with(:price, Range.new(price.split('-').first, price.split('-').last)) if price
facet(:price) do
conf.price_ranges.each do |range|
row(range) do
with(:price, Range.new(range.split('-').first, range.split('-').last))
end
end
# TODO add greater than range
end
facet(:taxon_ids)
with(:taxon_ids, send(:taxon).id) if send(:taxon)
facet :themesort
#facet :saletype
#with(:saletype, send(:saletype)) if send(:saletype)
#with(:featured, true)
order_by :themesort
with(:is_active, true)
with(:featured, true)
keywords(query)
paginate(:page => 1, :per_page => 180)
end
self.sunspot
end
def prepare(params)
# super copies over :taxon and other variables into properties
# as well as handles pagination
super
# TODO should do some parameter cleaning here: only allow valid search params to be passed through
# the faceting partial is kind of 'dumb' about the params object: doesn't clean it out and just
# dumps all the params into the query string
@properties[:query] = params[:keywords]
@properties[:price] = params[:price]
@properties[:sort] = params[:sort] || :score
@properties[:order] = params[:order] || :desc
Spree::Search.configuration.display_facets.each do |name|
@properties[name] ||= params["#{name}"]
end
end
end
end
end
|
require 'spree/core/search/base'
require 'spree/sunspot/filter/filter'
require 'spree/sunspot/filter/condition'
require 'spree/sunspot/filter/param'
require 'spree/sunspot/filter/query'
module Spree
module Sunspot
class Search < Spree::Core::Search::Base
def solr_search
@solr_search
end
def retrieve_products(featured = 0, paginate = true, boosts = nil)
if boosts.nil?
boosts = {
# :group => 4.0,
# :name => 2.0,
# :theme => 1.0,
# :for => 1.0,
# :material => 1.0,
# :saletype => 1.0,
# :pattern => 1.0,
# :brand => 1.0,
# :size => 1.0,
# :shape => 1.0,
# :color => 1.0,
# :description => 0.8,
# :category => 0.5,
# :type => 0.5,
}
end
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
# Full text search
unless @term.nil?
field_term = @term.split(' ').select{|t|!["party"].include?(t)}.join(' ')
q.fulltext(field_term) do
fields(
:category,
:group,
:type,
:name,
:theme,
:for,
:pattern,
:color,
:material,
:size,
:brand,
:taxon,
:related_taxons,
:shape
)
boost_fields(boosts)
minimum_match 1
end
end
# Add facets
list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:for,:saletype,:pattern]
list.each do |facet|
q.facet(facet)
end
# Filter results
q.with(:is_active, true)
if @properties[:price].present? then
low = @properties[:price].first
high = @properties[:price].last
q.with(:price,low..high)
end
if featured > 0 then
q.with(:featured, 1)
end
# Order results
unless @term.nil?
q.order_by(:score, :desc)
end
q.order_by(:in_stock, :desc)
q.order_by(:missing_image)
q.order_by(:theme)
q.order_by(:position)
q.order_by(:subposition)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
# Paginate
if paginate
q.paginate(:page => @properties[:page] || 1, :per_page => @properties[:per_page] || Spree::Config[:products_per_page])
else
q.paginate(:page => @properties[:page] || 1, :per_page => 1000) # Could do Spree::Product.count, but we'll save the query and just assume 1000
end
end
# Add filter queries based on url params
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.execute
@solr_search.hits
end
def retrieve_related(theme)
@related = ::Sunspot.new_search(Spree::Product) do |q|
q.with(:is_active, true)
q.with(:related,theme.to_s)
q.order_by(:missing_image)
q.order_by(:in_stock, :desc)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
q.order_by(:theme)
q.order_by(:position)
q.order_by(:subposition)
q.paginate(:page => 1, :per_page => 1000)
end
@related.execute
@related.hits
end
def groups(category)
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
#list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:for,:agegroup]
#list.each do |facet|
q.facet(:group, :limit => -1)
#end
q.with(:is_active, true)
q.with(:category, category)
q.keywords(keywords)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
q.order_by(:position)
q.order_by(:subposition)
if @properties[:price].present? then
low = @properties[:price].first
high = @properties[:price].last
q.with(:price,low..high)
end
q.paginate(:page => @properties[:page] || 1, :per_page => @properties[:per_page] || Spree::Config[:products_per_page])
end
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.execute
@solr_search.facets.first.rows
end
def similar_products(product, *field_names)
products_search = ::Sunspot.more_like_this(product) do
fields *field_names
boost_by_relevance true
paginate :per_page => total_similar_products * 4, :page => 1
end
# get active, in-stock products only.
base_scope = get_common_base_scope
hits = []
if products_search.total > 0
hits = products_search.hits.collect{|hit| hit.primary_key.to_i}
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id in (?)", hits] )
else
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id = -1"] )
end
products_scope = @product_group.apply_on(base_scope)
products_results = products_scope.includes([:images, :master]).page(1)
# return top N most-relevant products (i.e. in the same order returned by more_like_this)
@similar_products = products_results.sort_by{ |p| hits.find_index(p.id) }.shift(total_similar_products)
end
def intercept
return {} unless keywords
#search keywords for matches in taxon names
# Spree::Taxon.all(:order => 'length(name) desc, name').each do |cat|
# if key.include? cat.name then
# redirect.update(:permalink => cat)
# key = key.gsub(cat.name,'')
# break
# end
# end
#select facets for
# matches = [:category, :group, :type, :theme, :keyword, :color, :shape, :size, :pattern, :count]
# @facet_match = ::Sunspot.new_search(Spree::Product) do |q|
# matches.sort_by(&:length).reverse.each do |facet|
# q.facet facet, :limit => -1, :sort => :count
# end
# q.paginate(page: 1, per_page: Spree::Product.count)
# end
# @facet_match.execute
# @facet_match.hits.each do |hit|
# if hit.stored(:sku) == keywords.upcase then
# return {:product => hit }
# end
# end
# matches.each do |face|
# @facet_match.facet(face).rows.each do |row|
# if key.match("\\b#{row.value}\\b") then
# key = key.gsub(row.value,'')
# redirect.update(face => row.value)
# elsif key.match("\\b#{row.value.singularize}\\b")
# key = key.gsub(row.value.singularize,'')
# redirect.update(face => row.value)
# end
# end
# end
# Redirect to product on sku match
product_match = Spree::Product.joins(:master).where("spree_variants.sku = ?", keywords).take(1)
unless product_match.nil? || product_match.empty?
return {:product => product_match[0]}
end
# redirect.update(:keywords => key.strip.split.join(' ')) unless key.strip.empty?
# params["q"] = keywords
{}
end
protected
def get_base_scope
base_scope = Spree::Product.active
base_scope = base_scope.in_taxon(taxon) unless taxon.blank?
base_scope = get_products_conditions_for(base_scope, keywords)
base_scope = add_search_scopes(base_scope)
end
# TODO: This method is shit; clean it up John. At least you were paid to write this =P
def get_products_conditions_for(base_scope, query)
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
q.keywords(query)
q.order_by(
ordering_property.flatten.first,
ordering_property.flatten.last)
# Use a high per_page here so that all results are retrieved when setting base_scope at the end of this method.
# Otherwise you'll never have more than the first page of results from here returned, when pagination is done
# during the retrieve_products method.
q.paginate(page: 1, per_page: Spree::Product.count)
end
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.build do |query|
build_facet_query(query)
end
@solr_search.execute
if @solr_search.total > 0
@hits = @solr_search.hits.collect{|hit| hit.primary_key.to_i}
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id in (?)", @hits] )
else
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id = -1"] )
end
base_scope
end
def prepare(params)
super
@term = params[:keywords]
filter = {}
filter = {:taxon_ids => taxon.self_and_descendants.map(&:id) + taxon.related_ids} unless taxon.class == NilClass
list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:for,:saletype,:keyword,:pattern,:supplements]
list.each do |prop|
filter.update(prop.to_s => params[prop.to_s].split(',')) unless !params[prop.to_s].present?
end
#if @properties[:taxon].respond_to?(:id)
#filter.update(:taxon_ids => [@properties[:taxon][:id].to_s])
#end
filter.merge!(params[:s]) unless !params[:s].present?
@properties[:filters] = filter
@properties[:price] = params[:price].split('-') if params[:price].present?
@properties[:order_by] = params[:order_by] || params['order_by'] || []
@properties[:total_similar_products] = params[:total_similar_products].to_i > 0 ?
params[:total_similar_products].to_i :
Spree::Config[:total_similar_products]
end
def build_facet_query(query)
Setup.query_filters.filters.each do |filter|
if filter.values.any? && filter.values.first.is_a?(Range)
query.facet(filter.search_param) do
filter.values.each do |value|
row(value) do
with(filter.search_param, value)
end
end
end
else
query.facet(
filter.search_param,
exclude: property_exclusion( filter.exclusion )
)
end
# Temporary hack to allow for geodistancing
unless @properties[:location_coords].nil?
coords = @properties[:location_coords].split(',')
coords.flatten
lat = coords[0]
long = coords[1]
query.with(:location).in_radius( lat, long, 50 )
end
end
end
def property_exclusion(filter)
return nil if filter.blank?
prop = @properties[:filters].select{ |f| f == filter.to_s }
prop[filter] unless prop.empty?
end
def ordering_property
@properties[:order_by] = @properties[:order_by].blank? ? %w(score desc) : @properties[:order_by].split(',')
@properties[:order_by].flatten
end
end
end
end
Switch 'for' property to 'gender'
require 'spree/core/search/base'
require 'spree/sunspot/filter/filter'
require 'spree/sunspot/filter/condition'
require 'spree/sunspot/filter/param'
require 'spree/sunspot/filter/query'
module Spree
module Sunspot
class Search < Spree::Core::Search::Base
def solr_search
@solr_search
end
def retrieve_products(featured = 0, paginate = true, boosts = nil)
if boosts.nil?
boosts = {
# :group => 4.0,
# :name => 2.0,
# :theme => 1.0,
# :for => 1.0,
# :material => 1.0,
# :saletype => 1.0,
# :pattern => 1.0,
# :brand => 1.0,
# :size => 1.0,
# :shape => 1.0,
# :color => 1.0,
# :description => 0.8,
# :category => 0.5,
# :type => 0.5,
}
end
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
# Full text search
unless @term.nil?
field_term = @term.split(' ').select{|t|!["party"].include?(t)}.join(' ')
q.fulltext(field_term) do
fields(
:category,
:group,
:type,
:name,
:theme,
:gender,
:pattern,
:color,
:material,
:size,
:brand,
:taxon,
:related_taxons,
:shape
)
boost_fields(boosts)
minimum_match 1
end
end
# Add facets
list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:saletype,:pattern,:gender]
list.each do |facet|
q.facet(facet)
end
# Filter results
q.with(:is_active, true)
if @properties[:price].present? then
low = @properties[:price].first
high = @properties[:price].last
q.with(:price,low..high)
end
if featured > 0 then
q.with(:featured, 1)
end
# Order results
unless @term.nil?
q.order_by(:score, :desc)
end
q.order_by(:in_stock, :desc)
q.order_by(:missing_image)
q.order_by(:theme)
q.order_by(:position)
q.order_by(:subposition)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
# Paginate
if paginate
q.paginate(:page => @properties[:page] || 1, :per_page => @properties[:per_page] || Spree::Config[:products_per_page])
else
q.paginate(:page => @properties[:page] || 1, :per_page => 1000) # Could do Spree::Product.count, but we'll save the query and just assume 1000
end
end
# Add filter queries based on url params
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.execute
@solr_search.hits
end
def retrieve_related(theme)
@related = ::Sunspot.new_search(Spree::Product) do |q|
q.with(:is_active, true)
q.with(:related,theme.to_s)
q.order_by(:missing_image)
q.order_by(:in_stock, :desc)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
q.order_by(:theme)
q.order_by(:position)
q.order_by(:subposition)
q.paginate(:page => 1, :per_page => 1000)
end
@related.execute
@related.hits
end
def groups(category)
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
#list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:for,:agegroup]
#list.each do |facet|
q.facet(:group, :limit => -1)
#end
q.with(:is_active, true)
q.with(:category, category)
q.keywords(keywords)
unless @properties[:order_by].empty?
sort = @properties[:order_by].split(',')
q.order_by(sort[0],sort[1])
end
q.order_by(:position)
q.order_by(:subposition)
if @properties[:price].present? then
low = @properties[:price].first
high = @properties[:price].last
q.with(:price,low..high)
end
q.paginate(:page => @properties[:page] || 1, :per_page => @properties[:per_page] || Spree::Config[:products_per_page])
end
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.execute
@solr_search.facets.first.rows
end
def similar_products(product, *field_names)
products_search = ::Sunspot.more_like_this(product) do
fields *field_names
boost_by_relevance true
paginate :per_page => total_similar_products * 4, :page => 1
end
# get active, in-stock products only.
base_scope = get_common_base_scope
hits = []
if products_search.total > 0
hits = products_search.hits.collect{|hit| hit.primary_key.to_i}
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id in (?)", hits] )
else
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id = -1"] )
end
products_scope = @product_group.apply_on(base_scope)
products_results = products_scope.includes([:images, :master]).page(1)
# return top N most-relevant products (i.e. in the same order returned by more_like_this)
@similar_products = products_results.sort_by{ |p| hits.find_index(p.id) }.shift(total_similar_products)
end
def intercept
return {} unless keywords
#search keywords for matches in taxon names
# Spree::Taxon.all(:order => 'length(name) desc, name').each do |cat|
# if key.include? cat.name then
# redirect.update(:permalink => cat)
# key = key.gsub(cat.name,'')
# break
# end
# end
#select facets for
# matches = [:category, :group, :type, :theme, :keyword, :color, :shape, :size, :pattern, :count]
# @facet_match = ::Sunspot.new_search(Spree::Product) do |q|
# matches.sort_by(&:length).reverse.each do |facet|
# q.facet facet, :limit => -1, :sort => :count
# end
# q.paginate(page: 1, per_page: Spree::Product.count)
# end
# @facet_match.execute
# @facet_match.hits.each do |hit|
# if hit.stored(:sku) == keywords.upcase then
# return {:product => hit }
# end
# end
# matches.each do |face|
# @facet_match.facet(face).rows.each do |row|
# if key.match("\\b#{row.value}\\b") then
# key = key.gsub(row.value,'')
# redirect.update(face => row.value)
# elsif key.match("\\b#{row.value.singularize}\\b")
# key = key.gsub(row.value.singularize,'')
# redirect.update(face => row.value)
# end
# end
# end
# Redirect to product on sku match
product_match = Spree::Product.joins(:master).where("spree_variants.sku = ?", keywords).take(1)
unless product_match.nil? || product_match.empty?
return {:product => product_match[0]}
end
# redirect.update(:keywords => key.strip.split.join(' ')) unless key.strip.empty?
# params["q"] = keywords
{}
end
protected
def get_base_scope
base_scope = Spree::Product.active
base_scope = base_scope.in_taxon(taxon) unless taxon.blank?
base_scope = get_products_conditions_for(base_scope, keywords)
base_scope = add_search_scopes(base_scope)
end
# TODO: This method is shit; clean it up John. At least you were paid to write this =P
def get_products_conditions_for(base_scope, query)
@solr_search = ::Sunspot.new_search(Spree::Product) do |q|
q.keywords(query)
q.order_by(
ordering_property.flatten.first,
ordering_property.flatten.last)
# Use a high per_page here so that all results are retrieved when setting base_scope at the end of this method.
# Otherwise you'll never have more than the first page of results from here returned, when pagination is done
# during the retrieve_products method.
q.paginate(page: 1, per_page: Spree::Product.count)
end
unless @properties[:filters].blank?
conditions = Spree::Sunspot::Filter::Query.new( @properties[:filters] )
@solr_search = conditions.build_search( @solr_search )
end
@solr_search.build do |query|
build_facet_query(query)
end
@solr_search.execute
if @solr_search.total > 0
@hits = @solr_search.hits.collect{|hit| hit.primary_key.to_i}
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id in (?)", @hits] )
else
base_scope = base_scope.where( ["#{Spree::Product.table_name}.id = -1"] )
end
base_scope
end
def prepare(params)
super
@term = params[:keywords]
filter = {}
filter = {:taxon_ids => taxon.self_and_descendants.map(&:id) + taxon.related_ids} unless taxon.class == NilClass
list = [:category,:group,:type,:theme,:color,:shape,:brand,:size,:material,:for,:saletype,:keyword,:pattern,:supplements,:gender]
list.each do |prop|
filter.update(prop.to_s => params[prop.to_s].split(',')) unless !params[prop.to_s].present?
end
#if @properties[:taxon].respond_to?(:id)
#filter.update(:taxon_ids => [@properties[:taxon][:id].to_s])
#end
filter.merge!(params[:s]) unless !params[:s].present?
@properties[:filters] = filter
@properties[:price] = params[:price].split('-') if params[:price].present?
@properties[:order_by] = params[:order_by] || params['order_by'] || []
@properties[:total_similar_products] = params[:total_similar_products].to_i > 0 ?
params[:total_similar_products].to_i :
Spree::Config[:total_similar_products]
end
def build_facet_query(query)
Setup.query_filters.filters.each do |filter|
if filter.values.any? && filter.values.first.is_a?(Range)
query.facet(filter.search_param) do
filter.values.each do |value|
row(value) do
with(filter.search_param, value)
end
end
end
else
query.facet(
filter.search_param,
exclude: property_exclusion( filter.exclusion )
)
end
# Temporary hack to allow for geodistancing
unless @properties[:location_coords].nil?
coords = @properties[:location_coords].split(',')
coords.flatten
lat = coords[0]
long = coords[1]
query.with(:location).in_radius( lat, long, 50 )
end
end
end
def property_exclusion(filter)
return nil if filter.blank?
prop = @properties[:filters].select{ |f| f == filter.to_s }
prop[filter] unless prop.empty?
end
def ordering_property
@properties[:order_by] = @properties[:order_by].blank? ? %w(score desc) : @properties[:order_by].split(',')
@properties[:order_by].flatten
end
end
end
end
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
# stub: validate_url 1.0.12 ruby lib
Gem::Specification.new do |s|
s.name = "validate_url".freeze
s.version = "1.0.12"
s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
s.require_paths = ["lib".freeze]
s.authors = ["Tanel Suurhans".freeze, "Tarmo Lehtpuu".freeze, "Vladimir Krylov".freeze]
s.date = "2020-09-22"
s.description = "Library for validating urls in Rails.".freeze
s.email = ["tanel.suurhans@perfectline.co".freeze, "tarmo.lehtpuu@perfectline.co".freeze, "vladimir.krylov@perfectline.co".freeze]
s.extra_rdoc_files = [
"LICENSE.md",
"README.md"
]
s.files = [
"init.rb",
"install.rb",
"lib/locale/ar.yml",
"lib/locale/de.yml",
"lib/locale/en.yml",
"lib/locale/es.yml",
"lib/locale/fr.yml",
"lib/locale/it.yml",
"lib/locale/ja.yml",
"lib/locale/km.yml",
"lib/locale/pl.yml",
"lib/locale/pt-BR.yml",
"lib/locale/ro.yml",
"lib/locale/ru.yml",
"lib/locale/tr.yml",
"lib/locale/vi.yml",
"lib/locale/zh-CN.yml",
"lib/locale/zh-TW.yml",
"lib/validate_url.rb",
"lib/validate_url/rspec_matcher.rb"
]
s.homepage = "http://github.com/perfectline/validates_url/tree/master".freeze
s.rubygems_version = "3.0.8".freeze
s.summary = "Library for validating urls in Rails.".freeze
if s.respond_to? :specification_version then
s.specification_version = 4
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_development_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_development_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_development_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_development_dependency(%q<rspec>.freeze, [">= 0"])
s.add_development_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_runtime_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_runtime_dependency(%q<public_suffix>.freeze, [">= 0"])
else
s.add_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_dependency(%q<rspec>.freeze, [">= 0"])
s.add_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_dependency(%q<public_suffix>.freeze, [">= 0"])
end
else
s.add_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_dependency(%q<rspec>.freeze, [">= 0"])
s.add_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_dependency(%q<public_suffix>.freeze, [">= 0"])
end
end
bump new version
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
# stub: validate_url 1.0.13 ruby lib
Gem::Specification.new do |s|
s.name = "validate_url".freeze
s.version = "1.0.13"
s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
s.require_paths = ["lib".freeze]
s.authors = ["Tanel Suurhans".freeze, "Tarmo Lehtpuu".freeze, "Vladimir Krylov".freeze]
s.date = "2020-09-23"
s.description = "Library for validating urls in Rails.".freeze
s.email = ["tanel.suurhans@perfectline.co".freeze, "tarmo.lehtpuu@perfectline.co".freeze, "vladimir.krylov@perfectline.co".freeze]
s.extra_rdoc_files = [
"LICENSE.md",
"README.md"
]
s.files = [
"init.rb",
"install.rb",
"lib/locale/ar.yml",
"lib/locale/de.yml",
"lib/locale/en.yml",
"lib/locale/es.yml",
"lib/locale/fr.yml",
"lib/locale/it.yml",
"lib/locale/ja.yml",
"lib/locale/km.yml",
"lib/locale/pl.yml",
"lib/locale/pt-BR.yml",
"lib/locale/ro.yml",
"lib/locale/ru.yml",
"lib/locale/tr.yml",
"lib/locale/vi.yml",
"lib/locale/zh-CN.yml",
"lib/locale/zh-TW.yml",
"lib/validate_url.rb",
"lib/validate_url/rspec_matcher.rb"
]
s.homepage = "http://github.com/perfectline/validates_url/tree/master".freeze
s.rubygems_version = "3.0.8".freeze
s.summary = "Library for validating urls in Rails.".freeze
if s.respond_to? :specification_version then
s.specification_version = 4
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_development_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_development_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_development_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_development_dependency(%q<rspec>.freeze, [">= 0"])
s.add_development_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_runtime_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_runtime_dependency(%q<public_suffix>.freeze, [">= 0"])
else
s.add_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_dependency(%q<rspec>.freeze, [">= 0"])
s.add_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_dependency(%q<public_suffix>.freeze, [">= 0"])
end
else
s.add_dependency(%q<jeweler>.freeze, [">= 0"])
s.add_dependency(%q<sqlite3>.freeze, [">= 0"])
s.add_dependency(%q<activerecord>.freeze, [">= 0"])
s.add_dependency(%q<rspec>.freeze, [">= 0"])
s.add_dependency(%q<diff-lcs>.freeze, [">= 1.1.2"])
s.add_dependency(%q<activemodel>.freeze, [">= 3.0.0"])
s.add_dependency(%q<public_suffix>.freeze, [">= 0"])
end
end
|
require "starter/tasks/starter"
desc "Create an issue on GitHub"
task "github:issue" => "github_repo" do
repo = $STARTER[:github_repo]
options = {}
$stdout.print "Title: "; $stdout.flush
options[:title] = $stdin.readline.strip
$stdout.print "Description: "; $stdout.flush
options[:description] = $stdin.readline.strip
labels = repo.labels.map { |label| label["name"] }.join(" ")
$stdout.print "Labels (separate with spaces: [#{labels}]): "; $stdout.flush
options[:labels] = $stdin.readline.strip.split(" ")
$stdout.puts "Milestone:"
repo.milestones.each do |milestone|
$stdout.puts "#{milestone['number']} - #{milestone['title']}"
end
milestone = $stdin.readline.strip
options[:milestone] = milestone unless milestone.empty?
print "Issue details: "
if Starter::Prompt.confirm("Create this issue?")
result = repo.issues.create(options)
if result["errors"]
result["errors"].each do |error|
$stderr.puts "#{error['resource']}: #{error['message']}"
end
else
$stdout.puts "Issue ##{result['number']} created."
end
end
end
task "github_repo" => %w[ github_settings github_password ] do
require 'ghee'
settings = $STARTER[:settings][:github]
user, password, repo = settings.values_at(:user, :password, :repo)
ghee = Ghee.basic_auth(user,password)
repo = ghee.repos(repo[:owner], repo[:name])
if repo["message"]
puts repo["message"]
exit
else
$STARTER[:github_repo] = repo
end
end
task "github_password" => "github_settings" do
require "starter/password"
if $STARTER[:settings][:github][:password] == nil
password = Starter::Password.request("GitHub")
$STARTER[:settings][:github][:password] = password
end
end
task "read_settings" do
require "yaml"
begin
$STARTER[:settings] = YAML.load_file("settings.yml")
rescue Errno::ENOENT
$stderr.puts "You do not appear to have a settings.yml file."
if Starter::Prompt.confirm("Create a stubbed settings file?")
File.open("settings.yml", "w") do |f|
settings = {
:github => {
:user => "YOURUSERNAME",
:repo => {:owner => "OWNERNAME", :name => $STARTER[:directory]}
}
}
YAML.dump(settings, f)
puts "Created settings.yml. Now go edit it and add it to .gitignore."
end
end
exit
end
end
task "github_settings" => "read_settings" do
if $STARTER[:settings][:github] == nil
$stderr.puts "Looks like your settings.yml file isn't set up with a github stanza."
exit
end
end
fixes #10 - multiple issue creation
require "starter/tasks/starter"
desc "Create an issue on GitHub"
task "github:issue" => "github_repo" do
repo = $STARTER[:github_repo]
loop do
options = {}
$stdout.print "Title: "; $stdout.flush
options[:title] = $stdin.readline.strip
$stdout.print "Description: "; $stdout.flush
options[:description] = $stdin.readline.strip
labels = repo.labels.map { |label| label["name"] }.join(" ")
$stdout.print "Labels (separate with spaces: [#{labels}]): "; $stdout.flush
options[:labels] = $stdin.readline.strip.split(" ")
$stdout.puts "Milestone:"
repo.milestones.each do |milestone|
$stdout.puts "#{milestone['number']} - #{milestone['title']}"
end
milestone = $stdin.readline.strip
options[:milestone] = milestone unless milestone.empty?
print "Issue details: "
pp options
if Starter::Prompt.confirm("Create this issue?")
result = repo.issues.create(options)
if result["errors"]
result["errors"].each do |error|
$stderr.puts "#{error['resource']}: #{error['message']}"
end
exit
else
$stdout.puts "Issue ##{result['number']} created."
unless Starter::Prompt.confirm("Create another?")
break
end
end
end
end
end
task "github_repo" => %w[ github_settings github_password ] do
require 'ghee'
settings = $STARTER[:settings][:github]
user, password, repo = settings.values_at(:user, :password, :repo)
ghee = Ghee.basic_auth(user,password)
repo = ghee.repos(repo[:owner], repo[:name])
if repo["message"]
puts repo["message"]
exit
else
$STARTER[:github_repo] = repo
end
end
task "github_password" => "github_settings" do
require "starter/password"
if $STARTER[:settings][:github][:password] == nil
password = Starter::Password.request("GitHub")
$STARTER[:settings][:github][:password] = password
end
end
task "read_settings" do
require "yaml"
begin
$STARTER[:settings] = YAML.load_file("settings.yml")
rescue Errno::ENOENT
$stderr.puts "You do not appear to have a settings.yml file."
if Starter::Prompt.confirm("Create a stubbed settings file?")
File.open("settings.yml", "w") do |f|
settings = {
:github => {
:user => "YOURUSERNAME",
:repo => {:owner => "OWNERNAME", :name => $STARTER[:directory]}
}
}
YAML.dump(settings, f)
puts "Created settings.yml. Now go edit it and add it to .gitignore."
end
end
exit
end
end
task "github_settings" => "read_settings" do
if $STARTER[:settings][:github] == nil
$stderr.puts "Looks like your settings.yml file isn't set up with a github stanza."
exit
end
end
|
module StatHat
module Json
VERSION = "0.0.2"
end
end
bump version to 0.0.3
module StatHat
module Json
VERSION = "0.0.3"
end
end
|
module BBLib
class FuzzyMatcher
attr_reader :threshold, :algorithms
attr_accessor :case_sensitive, :remove_symbols, :move_articles, :convert_roman, :a, :b
def initialize threshold: 75, case_sensitive: true, remove_symbols: false, move_articles: false, convert_roman: true
self.threshold = threshold
setup_algorithms
@case_sensitive, @remove_symbols, @move_articles, @convert_roman = case_sensitive, remove_symbols, move_articles, convert_roman
end
# Calculates a percentage match between string a and string b.
def similarity a, b
prep_strings a, b
return 100.0 if @a == @b
score, total_weight = 0, @algorithms.map{|alg, v| v[:weight] }.inject{ |sum, w| sum+=w }
@algorithms.each do |algo, vals|
next unless vals[:weight] > 0
score+= @a.send(vals[:signature], @b) * vals[:weight]
end
score / total_weight
end
# Checks to see if the match percentage between Strings a and b are equal to or greater than the threshold.
def match? a, b
similarity(a, b) >= @threshold.to_f
end
# Returns the best match from array b to string a based on percent.
def best_match a, b
similarities(a, b).max_by{ |k, v| v}[0]
end
# Returns a hash of array 'b' with the percentage match to a. If sort is true, the hash is sorted desc by match percent.
def similarities a, b, sort: false
matches = Hash.new
[b].flatten.each{ |m| matches[m] = self.similarity(a, m) }
sort ? matches.sort_by{ |k, v| v }.reverse.to_h : matches
end
def threshold= threshold
@threshold = BBLib.keep_between(threshold, 0, 100)
end
def set_weight algorithm, weight
return nil unless @algorithms.include? algorithm
@algorithms[algorithm][:weight] = BBLib.keep_between(weight, 0, nil)
end
def algorithms
@algorithms.keys
end
private
def setup_algorithms
@algorithms = {
levenshtein: {weight: 10, signature: :levenshtein_similarity},
composition: {weight: 5, signature: :composition_similarity},
numeric: {weight: 0, signature: :numeric_similarity},
phrase: {weight: 0, signature: :phrase_similarity}
# FUTURE qwerty: {weight: 0, signature: :qwerty_similarity}
}
end
def prep_strings a, b
@a, @b = a.to_s.dup, b.to_s.dup
methods = [
@case_sensitive ? nil : :downcase,
@remove_symbols ? :drop_symbols : nil,
@convert_roman ? :from_roman : nil,
@move_articles ? :move_articles : nil
].reject(&:nil?).each do |method|
@a, @b = @a.send(method), @b.send(method)
end
end
end
end
Ported FuzzyMatcher over to LazyClass.
module BBLib
class FuzzyMatcher < LazyClass
attr_float_between 0, 100, :threshold, default: 75
attr_bool :case_sensitive, default: true
attr_bool :remove_symbols, :move_articles, :convert_roman, default: false
# Calculates a percentage match between string a and string b.
def similarity a, b
prep_strings a, b
return 100.0 if @a == @b
score, total_weight = 0, @algorithms.map{|alg, v| v[:weight] }.inject{ |sum, w| sum+=w }
@algorithms.each do |algo, vals|
next unless vals[:weight] > 0
score+= @a.send(vals[:signature], @b) * vals[:weight]
end
score / total_weight
end
# Checks to see if the match percentage between Strings a and b are equal to or greater than the threshold.
def match? a, b
similarity(a, b) >= @threshold.to_f
end
# Returns the best match from array b to string a based on percent.
def best_match a, b
similarities(a, b).max_by{ |k, v| v}[0]
end
# Returns a hash of array 'b' with the percentage match to a. If sort is true, the hash is sorted desc by match percent.
def similarities a, b, sort: false
matches = Hash.new
[b].flatten.each{ |m| matches[m] = self.similarity(a, m) }
sort ? matches.sort_by{ |k, v| v }.reverse.to_h : matches
end
def set_weight algorithm, weight
return nil unless @algorithms.include? algorithm
@algorithms[algorithm][:weight] = BBLib.keep_between(weight, 0, nil)
end
def algorithms
@algorithms.keys
end
private
def lazy_setup
@algorithms = {
levenshtein: {weight: 10, signature: :levenshtein_similarity},
composition: {weight: 5, signature: :composition_similarity},
numeric: {weight: 0, signature: :numeric_similarity},
phrase: {weight: 0, signature: :phrase_similarity}
# FUTURE qwerty: {weight: 0, signature: :qwerty_similarity}
}
end
def prep_strings a, b
@a, @b = a.to_s.dup, b.to_s.dup
methods = [
@case_sensitive ? nil : :downcase,
@remove_symbols ? :drop_symbols : nil,
@convert_roman ? :from_roman : nil,
@move_articles ? :move_articles : nil
].reject(&:nil?).each do |method|
@a, @b = @a.send(method), @b.send(method)
end
end
end
end
|
module StringTools
VERSION = '0.12.2'.freeze
end
Release 0.13.0
module StringTools
VERSION = '0.13.0'.freeze
end
|
require 'java'
require 'json'
require_relative './pdf_render'
require_relative './core_ext'
module Tabula
module TableGuesser
def TableGuesser.find_and_write_rects(filename, output_dir)
#writes to JSON the rectangles on each page in the specified PDF.
open(File.join(output_dir, "tables.json"), 'w') do |f|
f.write( JSON.dump(find_rects(filename).map{|a| a.map{|r| r.dims.map(&:to_i) }} ))
end
end
def TableGuesser.find_rects(filename)
pdf = load_pdfbox_pdf(filename)
if pdf.getNumberOfPages == 0
puts "not a pdf!"
exit
end
puts "pages: " + pdf.getNumberOfPages.to_s
tables = []
pdf.getNumberOfPages.times do |i|
#gotcha: with PDFView, PDF pages are 1-indexed. If you ask for page 0 and then page 1, you'll get the first page twice. So start with index 1.
tables << find_rects_on_page(pdf, i + 1)
end
tables
end
def TableGuesser.find_lines(filename)
if pdf.getNumberOfPages == 0
puts "not a pdf!"
exit
end
puts "pages: " + pdf.getNumberOfPages.to_s
lines = []
pdf.getNumberOfPages.times do |i|
lines << detect_lines_in_pdf_page(filename, i)
end
lines
end
def TableGuesser.find_lines_on_page(pdf, page_number_zero_indexed)
Tabula::Extraction::LineExtractor.lines_in_pdf_page(pdf, page_number_zero_indexed, {:render_pdf => false})
end
def TableGuesser.find_rects_on_page(pdf, page_index)
find_rects_from_lines(find_lines_on_page(pdf, page_index, 10))
end
def TableGuesser.find_rects_from_lines(lines)
horizontal_lines = lines.select(&:horizontal?)
vertical_lines = lines.select(&:vertical?)
find_tables(vertical_lines, horizontal_lines).inject([]) do |memo, next_rect|
java.awt.geom.Rectangle2D::Float.unionize( memo, next_rect )
end.compact.reject{|r| r.area == 0 }.sort_by(&:area).reverse
end
def TableGuesser.euclidean_distance(x1, y1, x2, y2)
return Math.sqrt( ((x1 - x2) ** 2) + ((y1 - y2) ** 2) )
end
def TableGuesser.is_upward_oriented(line, y_value)
#return true if this line is oriented upwards, i.e. if the majority of it's length is above y_value.
return (y_value - line.top > line.bottom - y_value);
end
def TableGuesser.find_tables(verticals, horizontals)
#
# Find all the rectangles in the vertical and horizontal lines given.
#
# Rectangles are deduped with hashRectangle, which considers two rectangles identical if each point rounds to the same tens place as the other.
#
# TODO: generalize this.
#
corner_proximity_threshold = 0.005;
rectangles = []
#find rectangles with one horizontal line and two vertical lines that end within $threshold to the ends of the horizontal line.
[true, false].each do |up_or_down_lines|
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
has_vertical_line_from_the_left = false
left_vertical_line = nil
#for the left vertical line.
verticals.each do |vertical_line|
#1. if it is correctly oriented (up or down) given the outer loop here. (We don't want a false-positive rectangle with one "arm" going down, and one going up.)
next unless is_upward_oriented(vertical_line, horizontal_line.top) == up_or_down_lines
vertical_line_length = vertical_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
#make this the left vertical line:
#2. if it begins near the left vertex of the horizontal line.
if euclidean_distance(horizontal_line.left, horizontal_line.top, vertical_line.left, vertical_line.top) < corner_proximity ||
euclidean_distance(horizontal_line.left, horizontal_line.top, vertical_line.left, vertical_line.bottom) < corner_proximity
#3. if it is farther to the left of the line we already have.
if left_vertical_line.nil? || left_vertical_line.left> vertical_line.left #is this line is more to the left than left_vertical_line. #"What's your opinion on Das Kapital?"
has_vertical_line_from_the_left = true
left_vertical_line = vertical_line
end
end
end
has_vertical_line_from_the_right = false;
right_vertical_line = nil
#for the right vertical line.
verticals.each do |vertical_line|
next unless is_upward_oriented(vertical_line, horizontal_line.top) == up_or_down_lines
vertical_line_length = vertical_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(horizontal_line.right, horizontal_line.top, vertical_line.left, vertical_line.top) < corner_proximity ||
euclidean_distance(horizontal_line.right, horizontal_line.top, vertical_line.left, vertical_line.bottom) < corner_proximity
if right_vertical_line.nil? || right_vertical_line.right > vertical_line.right #is this line is more to the right than right_vertical_line. #"Can you recite all of John Galt's speech?"
#do two passes to guarantee we don't get a horizontal line with a upwards and downwards line coming from each of its corners.
#i.e. ensuring that both "arms" of the rectangle have the same orientation (up or down).
has_vertical_line_from_the_right = true
right_vertical_line = vertical_line
end
end
end
if has_vertical_line_from_the_right && has_vertical_line_from_the_left
#in case we eventually tolerate not-quite-vertical lines, this computers the distance in Y directly, rather than depending on the vertical lines' lengths.
height = [left_vertical_line.bottom - left_vertical_line.top, right_vertical_line.bottom - right_vertical_line.top].max
top = [left_vertical_line.top, right_vertical_line.top].min
width = horizontal_line.right - horizontal_line.left
left = horizontal_line.left
r = java.awt.geom.Rectangle2D::Float.new( left, top, width, height ) #x, y, w, h
#rectangles.put(hashRectangle(r), r); #TODO: I dont' think I need this now that I'm in Rubyland
rectangles << r
end
end
#find rectangles with one vertical line and two horizontal lines that end within $threshold to the ends of the vertical line.
verticals.each do |vertical_line|
vertical_line_length = vertical_line.length
has_horizontal_line_from_the_top = false
top_horizontal_line = nil
#for the top horizontal line.
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(vertical_line.left, vertical_line.top, horizontal_line.left, horizontal_line.top) < corner_proximity ||
euclidean_distance(vertical_line.left, vertical_line.top, horizontal_line.right, horizontal_line.top) < corner_proximity
if top_horizontal_line.nil? || top_horizontal_line.top > horizontal_line.top #is this line is more to the top than the one we've got already.
has_horizontal_line_from_the_top = true;
top_horizontal_line = horizontal_line;
end
end
end
has_horizontal_line_from_the_bottom = false;
bottom_horizontal_line = nil
#for the bottom horizontal line.
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(vertical_line.left, vertical_line.bottom, horizontal_line.left, horizontal_line.top) < corner_proximity ||
euclidean_distance(vertical_line.left, vertical_line.bottom, horizontal_line.left, horizontal_line.top) < corner_proximity
if bottom_horizontal_line.nil? || bottom_horizontal_line.bottom > horizontal_line.bottom #is this line is more to the bottom than the one we've got already.
has_horizontal_line_from_the_bottom = true;
bottom_horizontal_line = horizontal_line;
end
end
end
if has_horizontal_line_from_the_bottom && has_horizontal_line_from_the_top
x = [top_horizontal_line.left, bottom_horizontal_line.left].min
y = vertical_line.top
width = [top_horizontal_line.right - top_horizontal_line.left, bottom_horizontal_line.right - bottom_horizontal_line.right].max
height = vertical_line.bottom - vertical_line.top
r = java.awt.geom.Rectangle2D::Float.new( x, y, width, height ) #x, y, w, h
#rectangles.put(hashRectangle(r), r);
rectangles << r
end
end
end
return rectangles.uniq &:similarity_hash
end
end
end
unneeded requires
require 'json'
module Tabula
module TableGuesser
def TableGuesser.find_and_write_rects(filename, output_dir)
#writes to JSON the rectangles on each page in the specified PDF.
open(File.join(output_dir, "tables.json"), 'w') do |f|
f.write( JSON.dump(find_rects(filename).map{|a| a.map{|r| r.dims.map(&:to_i) }} ))
end
end
def TableGuesser.find_rects(filename)
pdf = load_pdfbox_pdf(filename)
if pdf.getNumberOfPages == 0
puts "not a pdf!"
exit
end
puts "pages: " + pdf.getNumberOfPages.to_s
tables = []
pdf.getNumberOfPages.times do |i|
#gotcha: with PDFView, PDF pages are 1-indexed. If you ask for page 0 and then page 1, you'll get the first page twice. So start with index 1.
tables << find_rects_on_page(pdf, i + 1)
end
tables
end
def TableGuesser.find_lines(filename)
if pdf.getNumberOfPages == 0
puts "not a pdf!"
exit
end
puts "pages: " + pdf.getNumberOfPages.to_s
lines = []
pdf.getNumberOfPages.times do |i|
lines << detect_lines_in_pdf_page(filename, i)
end
lines
end
def TableGuesser.find_lines_on_page(pdf, page_number_zero_indexed)
Tabula::Extraction::LineExtractor.lines_in_pdf_page(pdf, page_number_zero_indexed, {:render_pdf => false})
end
def TableGuesser.find_rects_on_page(pdf, page_index)
find_rects_from_lines(find_lines_on_page(pdf, page_index, 10))
end
def TableGuesser.find_rects_from_lines(lines)
horizontal_lines = lines.select(&:horizontal?)
vertical_lines = lines.select(&:vertical?)
find_tables(vertical_lines, horizontal_lines).inject([]) do |memo, next_rect|
java.awt.geom.Rectangle2D::Float.unionize( memo, next_rect )
end.compact.reject{|r| r.area == 0 }.sort_by(&:area).reverse
end
def TableGuesser.euclidean_distance(x1, y1, x2, y2)
return Math.sqrt( ((x1 - x2) ** 2) + ((y1 - y2) ** 2) )
end
def TableGuesser.is_upward_oriented(line, y_value)
#return true if this line is oriented upwards, i.e. if the majority of it's length is above y_value.
return (y_value - line.top > line.bottom - y_value);
end
def TableGuesser.find_tables(verticals, horizontals)
#
# Find all the rectangles in the vertical and horizontal lines given.
#
# Rectangles are deduped with hashRectangle, which considers two rectangles identical if each point rounds to the same tens place as the other.
#
# TODO: generalize this.
#
corner_proximity_threshold = 0.005;
rectangles = []
#find rectangles with one horizontal line and two vertical lines that end within $threshold to the ends of the horizontal line.
[true, false].each do |up_or_down_lines|
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
has_vertical_line_from_the_left = false
left_vertical_line = nil
#for the left vertical line.
verticals.each do |vertical_line|
#1. if it is correctly oriented (up or down) given the outer loop here. (We don't want a false-positive rectangle with one "arm" going down, and one going up.)
next unless is_upward_oriented(vertical_line, horizontal_line.top) == up_or_down_lines
vertical_line_length = vertical_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
#make this the left vertical line:
#2. if it begins near the left vertex of the horizontal line.
if euclidean_distance(horizontal_line.left, horizontal_line.top, vertical_line.left, vertical_line.top) < corner_proximity ||
euclidean_distance(horizontal_line.left, horizontal_line.top, vertical_line.left, vertical_line.bottom) < corner_proximity
#3. if it is farther to the left of the line we already have.
if left_vertical_line.nil? || left_vertical_line.left> vertical_line.left #is this line is more to the left than left_vertical_line. #"What's your opinion on Das Kapital?"
has_vertical_line_from_the_left = true
left_vertical_line = vertical_line
end
end
end
has_vertical_line_from_the_right = false;
right_vertical_line = nil
#for the right vertical line.
verticals.each do |vertical_line|
next unless is_upward_oriented(vertical_line, horizontal_line.top) == up_or_down_lines
vertical_line_length = vertical_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(horizontal_line.right, horizontal_line.top, vertical_line.left, vertical_line.top) < corner_proximity ||
euclidean_distance(horizontal_line.right, horizontal_line.top, vertical_line.left, vertical_line.bottom) < corner_proximity
if right_vertical_line.nil? || right_vertical_line.right > vertical_line.right #is this line is more to the right than right_vertical_line. #"Can you recite all of John Galt's speech?"
#do two passes to guarantee we don't get a horizontal line with a upwards and downwards line coming from each of its corners.
#i.e. ensuring that both "arms" of the rectangle have the same orientation (up or down).
has_vertical_line_from_the_right = true
right_vertical_line = vertical_line
end
end
end
if has_vertical_line_from_the_right && has_vertical_line_from_the_left
#in case we eventually tolerate not-quite-vertical lines, this computers the distance in Y directly, rather than depending on the vertical lines' lengths.
height = [left_vertical_line.bottom - left_vertical_line.top, right_vertical_line.bottom - right_vertical_line.top].max
top = [left_vertical_line.top, right_vertical_line.top].min
width = horizontal_line.right - horizontal_line.left
left = horizontal_line.left
r = java.awt.geom.Rectangle2D::Float.new( left, top, width, height ) #x, y, w, h
#rectangles.put(hashRectangle(r), r); #TODO: I dont' think I need this now that I'm in Rubyland
rectangles << r
end
end
#find rectangles with one vertical line and two horizontal lines that end within $threshold to the ends of the vertical line.
verticals.each do |vertical_line|
vertical_line_length = vertical_line.length
has_horizontal_line_from_the_top = false
top_horizontal_line = nil
#for the top horizontal line.
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(vertical_line.left, vertical_line.top, horizontal_line.left, horizontal_line.top) < corner_proximity ||
euclidean_distance(vertical_line.left, vertical_line.top, horizontal_line.right, horizontal_line.top) < corner_proximity
if top_horizontal_line.nil? || top_horizontal_line.top > horizontal_line.top #is this line is more to the top than the one we've got already.
has_horizontal_line_from_the_top = true;
top_horizontal_line = horizontal_line;
end
end
end
has_horizontal_line_from_the_bottom = false;
bottom_horizontal_line = nil
#for the bottom horizontal line.
horizontals.each do |horizontal_line|
horizontal_line_length = horizontal_line.length
longer_line_length = [horizontal_line_length, vertical_line_length].max
corner_proximity = corner_proximity_threshold * longer_line_length
if euclidean_distance(vertical_line.left, vertical_line.bottom, horizontal_line.left, horizontal_line.top) < corner_proximity ||
euclidean_distance(vertical_line.left, vertical_line.bottom, horizontal_line.left, horizontal_line.top) < corner_proximity
if bottom_horizontal_line.nil? || bottom_horizontal_line.bottom > horizontal_line.bottom #is this line is more to the bottom than the one we've got already.
has_horizontal_line_from_the_bottom = true;
bottom_horizontal_line = horizontal_line;
end
end
end
if has_horizontal_line_from_the_bottom && has_horizontal_line_from_the_top
x = [top_horizontal_line.left, bottom_horizontal_line.left].min
y = vertical_line.top
width = [top_horizontal_line.right - top_horizontal_line.left, bottom_horizontal_line.right - bottom_horizontal_line.right].max
height = vertical_line.bottom - vertical_line.top
r = java.awt.geom.Rectangle2D::Float.new( x, y, width, height ) #x, y, w, h
#rectangles.put(hashRectangle(r), r);
rectangles << r
end
end
end
return rectangles.uniq &:similarity_hash
end
end
end
|
require 'asset_processor'
require 'asset_replication_checker'
# rubocop:disable Metrics/BlockLength
namespace :govuk_assets do
desc 'Store values generated from file metadata for all GOV.UK assets'
task store_values_generated_from_file_metadata: :environment do
processor = AssetProcessor.new
processor.process_all_assets_with do |asset_id|
AssetFileMetadataWorker.perform_async(asset_id)
end
end
desc 'Trigger replication for all non-replicated GOV.UK assets'
task trigger_replication_for_non_replicated_assets: :environment do
processor = AssetProcessor.new
processor.process_all_assets_with do |asset_id|
AssetTriggerReplicationWorker.perform_async(asset_id)
end
end
desc 'Check all GOV.UK assets have been replicated'
task check_all_assets_have_been_replicated: :environment do
checker = AssetReplicationChecker.new
checker.check_all_assets
end
desc 'Upload GOV.UK assets marked as deleted to cloud storage'
task upload_assets_marked_as_deleted_to_cloud_storage: :environment do
processor = AssetProcessor.new(scope: Asset.deleted, report_progress_every: 100)
processor.process_all_assets_with do |asset_id|
DeletedAssetSaveToCloudStorageWorker.perform_async(asset_id)
end
end
end
# rubocop:enable Metrics/BlockLength
Add Rake task to store file metadata values for deleted assets
We recently realised [1] that when we did the initial upload of existing
assets to S3, we forgot to include assets marked as deleted. Since these
assets can feasibly be restored, we ought to store the relevant file on
S3 like any other asset.
We plan to upload the relevant files to S3 using the recently introduced
`upload_assets_marked_as_deleted_to_cloud_storage` Rake task. However,
this commit introduces another Rake task which stores values generated
from the file metadata as we did previously for non-deleted assets using
the `store_values_generated_from_file_metadata` Rake task.
[1]: https://github.com/alphagov/asset-manager/issues/301
require 'asset_processor'
require 'asset_replication_checker'
# rubocop:disable Metrics/BlockLength
namespace :govuk_assets do
desc 'Store values generated from file metadata for all GOV.UK assets'
task store_values_generated_from_file_metadata: :environment do
processor = AssetProcessor.new
processor.process_all_assets_with do |asset_id|
AssetFileMetadataWorker.perform_async(asset_id)
end
end
desc 'Store values generated from file metadata for GOV.UK assets marked as deleted'
task store_values_generated_from_file_metadata_for_assets_marked_as_deleted: :environment do
processor = AssetProcessor.new(scope: Asset.deleted, report_progress_every: 100)
processor.process_all_assets_with do |asset_id|
DeletedAssetFileMetadataWorker.perform_async(asset_id)
end
end
desc 'Trigger replication for all non-replicated GOV.UK assets'
task trigger_replication_for_non_replicated_assets: :environment do
processor = AssetProcessor.new
processor.process_all_assets_with do |asset_id|
AssetTriggerReplicationWorker.perform_async(asset_id)
end
end
desc 'Check all GOV.UK assets have been replicated'
task check_all_assets_have_been_replicated: :environment do
checker = AssetReplicationChecker.new
checker.check_all_assets
end
desc 'Upload GOV.UK assets marked as deleted to cloud storage'
task upload_assets_marked_as_deleted_to_cloud_storage: :environment do
processor = AssetProcessor.new(scope: Asset.deleted, report_progress_every: 100)
processor.process_all_assets_with do |asset_id|
DeletedAssetSaveToCloudStorageWorker.perform_async(asset_id)
end
end
end
# rubocop:enable Metrics/BlockLength
|
# frozen_string_literal: true
module TopsConnect
VERSION = '0.4.1'
end
v0.4.2
Signed-off-by: Steven Hoffman <f5bcacb8ce1651b7d1e56f2693431f20e085fdeb@valenciamgmt.com>
# frozen_string_literal: true
module TopsConnect
VERSION = '0.4.2'
end
|
require 'savon'
module Tourico
module HTTPService
class << self
def make_request(action, args, options = {})
puts 'Making hotels request to Tourico'
client = Savon.client do
log true
wsdl `Tourico.hotel_service_link`
soap_header 'aut:AuthenticationHeader' => {
'aut:LoginName' => Tourico.login_name,
'aut:Password' => Tourico.password,
'aut:Culture' => Tourico.culture,
'aut:Version' => Tourico.hotels_service_version
}
namespaces(
'xmlns:env' => 'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns:aut' => 'http://schemas.tourico.com/webservices/authentication',
'xmlns:hot' => 'http://tourico.com/webservices/hotelv3',
'xmlns:wsdl' => 'http://tourico.com/webservices/hotelv3',
'xmlns:hot1' => 'http://schemas.tourico.com/webservices/hotelv3')
end
response = client.call(action, message: args)
puts 'Finished request for Tourico'
if response.success?
response.to_hash
else
nil
end
end
def make_request_reservation_service(action, args, options = {})
puts 'Making reservations request to Tourico'
client = Savon.client do
wsdl Tourico.reservation_service_link
soap_header 'aut:AuthenticationHeader' => {
'aut:LoginName' => Tourico.login_name,
'aut:Password' => Tourico.password,
'aut:Culture' => Tourico.culture,
'aut:Version' => Tourico.reservations_service_version
}
namespaces(
'xmlns:env' => 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:web' => 'http://tourico.com/webservices/',
'xmlns:hot' => 'http://tourico.com/webservices/',
'xmlns:wsdl' => 'http://tourico.com/webservices/',
'xmlns:trav' => 'http://tourico.com/travelservices/')
end
response = client.call(action, message: args)
puts 'Finished request for Tourico'
response.to_hash
end
end
end
end
fix Tourico service link
require 'savon'
module Tourico
module HTTPService
class << self
def make_request(action, args, options = {})
puts 'Making hotels request to Tourico'
client = Savon.client do
log true
wsdl Tourico.hotel_service_link
soap_header 'aut:AuthenticationHeader' => {
'aut:LoginName' => Tourico.login_name,
'aut:Password' => Tourico.password,
'aut:Culture' => Tourico.culture,
'aut:Version' => Tourico.hotels_service_version
}
namespaces(
'xmlns:env' => 'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns:aut' => 'http://schemas.tourico.com/webservices/authentication',
'xmlns:hot' => 'http://tourico.com/webservices/hotelv3',
'xmlns:wsdl' => 'http://tourico.com/webservices/hotelv3',
'xmlns:hot1' => 'http://schemas.tourico.com/webservices/hotelv3')
end
response = client.call(action, message: args)
puts 'Finished request for Tourico'
if response.success?
response.to_hash
else
nil
end
end
def make_request_reservation_service(action, args, options = {})
puts 'Making reservations request to Tourico'
client = Savon.client do
wsdl Tourico.reservation_service_link
soap_header 'aut:AuthenticationHeader' => {
'aut:LoginName' => Tourico.login_name,
'aut:Password' => Tourico.password,
'aut:Culture' => Tourico.culture,
'aut:Version' => Tourico.reservations_service_version
}
namespaces(
'xmlns:env' => 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:web' => 'http://tourico.com/webservices/',
'xmlns:hot' => 'http://tourico.com/webservices/',
'xmlns:wsdl' => 'http://tourico.com/webservices/',
'xmlns:trav' => 'http://tourico.com/travelservices/')
end
response = client.call(action, message: args)
puts 'Finished request for Tourico'
response.to_hash
end
end
end
end
|
module CtagsRuby
VERSION = "0.0.1"
end
v0.0.2
module CtagsRuby
VERSION = "0.0.2"
end
|
require "cutthroat/error"
module Cutthroat
class MortgageError < CutthroatError
end
class AlreadyMortgaged < MortgageError
end
class NotOwner < MortgageError
end
class NotMortgaged < MortgageError
end
class Location
attr_reader :position
attr_reader :name
attr_reader :type
attr_reader :group
attr_reader :land_price
attr_reader :rent
attr_reader :owner
attr_reader :is_mortgaged
def to_s
@name
end
def to_i
@position
end
def trigger_action(player)
if !@action.nil?
send @action, player
end
if player.touched_go and not player.in_jail
player.receive(SALARY)
end
if owner.nil?
if player.buy_property?(self)
record_rights(player)
end
elsif (owner != player && is_mortgaged != true)
rent = calculate_rent(player)
player.charge(rent)
owner.receive(rent)
end
end
def mortgage(player)
raise AlreadyMortgaged, "#{self} is already mortgaged" if @is_mortgaged == true
raise NotOwner, "#{self} is not your property" if @owner != player
player.receive(land_price * MORTGAGE_RATE / 100)
@is_mortgaged = true
end
def cancel_mortgage(player)
raise NotOwner, "#{self} is not your property" if @owner != player
raise NotMortgaged, "#{self} is not mortgaged" if @is_mortgaged != true
player.charge(land_price * (MORTGAGE_RATE + MORTGAGE_DUTY) / 100)
@is_mortgaged = false
end
private
def calculate_rent(player)
game = player.game
properties_in_group = game.find_locations_of_group(group)
if (group == :utility)
eyes = player.last_throw.inject(:+)
if properties_in_group.none?{|p| p.owner.nil?}
eyes * 10
else
eyes * 4
end
else
properties_owned = game.find_locations_owned_by(self.owner)
properties_owned_in_group = properties_owned & properties_in_group
if (group == :railroad)
rent * (2 ** (properties_owned_in_group.length - 1))
else
if properties_owned_in_group == properties_in_group
rent * 2
else
rent
end
end
end
end
def put_in_jail(player)
player.arrest_at(player.game.board.find_jail)
end
def income_tax(player)
ten_percent = player.total_worth / 10
player.charge(ten_percent < MAX_INCOME_TAX ? ten_percent : MAX_INCOME_TAX)
end
def luxury_tax(player)
player.charge(LUXURY_TAX)
end
def chance(player)
# TODO implement chance cards
location = player.game.board.find_go
player.move_to(location)
location.trigger_action(player)
end
private
def record_rights(player)
player.charge(land_price)
@owner = player
end
end
end
remove doubled private
require "cutthroat/error"
module Cutthroat
class MortgageError < CutthroatError
end
class AlreadyMortgaged < MortgageError
end
class NotOwner < MortgageError
end
class NotMortgaged < MortgageError
end
class Location
attr_reader :position
attr_reader :name
attr_reader :type
attr_reader :group
attr_reader :land_price
attr_reader :rent
attr_reader :owner
attr_reader :is_mortgaged
def to_s
@name
end
def to_i
@position
end
def trigger_action(player)
if !@action.nil?
send @action, player
end
if player.touched_go and not player.in_jail
player.receive(SALARY)
end
if owner.nil?
if player.buy_property?(self)
record_rights(player)
end
elsif (owner != player && is_mortgaged != true)
rent = calculate_rent(player)
player.charge(rent)
owner.receive(rent)
end
end
def mortgage(player)
raise AlreadyMortgaged, "#{self} is already mortgaged" if @is_mortgaged == true
raise NotOwner, "#{self} is not your property" if @owner != player
player.receive(land_price * MORTGAGE_RATE / 100)
@is_mortgaged = true
end
def cancel_mortgage(player)
raise NotOwner, "#{self} is not your property" if @owner != player
raise NotMortgaged, "#{self} is not mortgaged" if @is_mortgaged != true
player.charge(land_price * (MORTGAGE_RATE + MORTGAGE_DUTY) / 100)
@is_mortgaged = false
end
private
def calculate_rent(player)
game = player.game
properties_in_group = game.find_locations_of_group(group)
if (group == :utility)
eyes = player.last_throw.inject(:+)
if properties_in_group.none?{|p| p.owner.nil?}
eyes * 10
else
eyes * 4
end
else
properties_owned = game.find_locations_owned_by(self.owner)
properties_owned_in_group = properties_owned & properties_in_group
if (group == :railroad)
rent * (2 ** (properties_owned_in_group.length - 1))
else
if properties_owned_in_group == properties_in_group
rent * 2
else
rent
end
end
end
end
def put_in_jail(player)
player.arrest_at(player.game.board.find_jail)
end
def income_tax(player)
ten_percent = player.total_worth / 10
player.charge(ten_percent < MAX_INCOME_TAX ? ten_percent : MAX_INCOME_TAX)
end
def luxury_tax(player)
player.charge(LUXURY_TAX)
end
def chance(player)
# TODO implement chance cards
location = player.game.board.find_go
player.move_to(location)
location.trigger_action(player)
end
def record_rights(player)
player.charge(land_price)
@owner = player
end
end
end
|
# The DACPClient module
module DACPClient
VERSION = '0.2.5'
end
Bump version
# The DACPClient module
module DACPClient
VERSION = '0.2.6'
end
|
require 'daniel/export/pwsafe'
require 'optparse'
# A password generation tool.
module Daniel
# Export to various formats.
module Export
# Main program for daniel-convert.
class MainProgram < Daniel::Program
def main(args)
options, args = parse_options(args)
return if options[:help]
pass, generator = do_prompt
srcfile = File.new(args[0], 'r')
destfile = File.new(args[1], 'w')
converter = Daniel::Export::PasswordSafe.new(pass, destfile)
srcfile.each_line do |l|
converter.add_entry(generator, l.chomp) unless /^(#|\s*$)/.match(l)
end
end
protected
def parse_options(args)
options = {}
OptionParser.new do |opts|
opts.banner = 'Usage: daniel-export [-r] REMINDERS EXPORT'
opts.on('-r', 'Produce machine-readable output') do
@prompt = :machine
end
opts.on_tail('-h', '--help', 'Show this message') do
puts opts
puts <<-EOM.gsub(/^\s+/, '')
Read reminders line by line from REMINDERS and produce a Password
Safe v3 file in EXPORT.
EOM
options[:help] = true
end
end.parse!(args)
[options, args]
end
def do_prompt
interactive('Enter passphrase: ', ':master-password?')
pass = read_passphrase
generator = PasswordGenerator.new pass
prompt '# ok, checksum is', ':checksum', Util.to_hex(generator.checksum)
[pass, generator]
end
def read_line
STDIN.readline.chomp
end
end
end
end
Omit needless variable.
This cleans up a RuboCop warning about complexity.
Signed-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>
require 'daniel/export/pwsafe'
require 'optparse'
# A password generation tool.
module Daniel
# Export to various formats.
module Export
# Main program for daniel-convert.
class MainProgram < Daniel::Program
def main(args)
options, args = parse_options(args)
return if options[:help]
pass, generator = do_prompt
srcfile = File.new(args[0], 'r')
converter = Daniel::Export::PasswordSafe.new pass,
File.new(args[1], 'w')
srcfile.each_line do |l|
converter.add_entry(generator, l.chomp) unless /^(#|\s*$)/.match(l)
end
end
protected
def parse_options(args)
options = {}
OptionParser.new do |opts|
opts.banner = 'Usage: daniel-export [-r] REMINDERS EXPORT'
opts.on('-r', 'Produce machine-readable output') do
@prompt = :machine
end
opts.on_tail('-h', '--help', 'Show this message') do
puts opts
puts <<-EOM.gsub(/^\s+/, '')
Read reminders line by line from REMINDERS and produce a Password
Safe v3 file in EXPORT.
EOM
options[:help] = true
end
end.parse!(args)
[options, args]
end
def do_prompt
interactive('Enter passphrase: ', ':master-password?')
pass = read_passphrase
generator = PasswordGenerator.new pass
prompt '# ok, checksum is', ':checksum', Util.to_hex(generator.checksum)
[pass, generator]
end
def read_line
STDIN.readline.chomp
end
end
end
end
|
module Datacenter
VERSION = '0.3.0'
end
Release 0.3.1
module Datacenter
VERSION = '0.3.1'
end
|
require 'net/http'
module Datagraph
##
# Base class for Datagraph.org resources.
class Resource
HEADERS = {'Accept' => 'text/plain'} # N-Triples
def self.new(*args, &block)
if self == Resource
case spec = args.first
when Repository::SPEC
Repository.new(*spec.split('/'))
when Account::SPEC
Account.new(spec)
end
else
super
end
end
##
# @return [RDF::URI]
attr_reader :url
##
# @param [RDF::URI, String] url
def initialize(url)
@url = RDF::URI.new(url)
end
##
# Returns `true` if this resource exists on Datagraph.org.
#
# @return [Boolean]
def exists?
head do |response|
case response
when Net::HTTPSuccess then true
when Net::HTTPClientError then false
else true # FIXME: dubious default, for now
end
end
end
##
# Returns the URL of this resource.
#
# @return [RDF::URI]
def to_uri
url
end
##
# Returns the RDF data for this resource.
def to_rdf
get('.nt', 'Accept' => 'text/plain') do |response|
case response
when Net::HTTPSuccess
reader = RDF::NTriples::Reader.new(response.body)
reader.to_a.extend(RDF::Enumerable, RDF::Queryable) # FIXME
end
end
end
##
# Returns a developer-friendly representation of this resource.
#
# @return [String]
def inspect
sprintf("#<%s:%#0x(%s)>", self.class.name, object_id, to_s)
end
##
# Outputs a developer-friendly representation of this resource to
# `stderr`.
#
# @return [void]
def inspect!
warn(inspect)
end
##
# Performs an HTTP HEAD request on this resource.
#
# @param [String, #to_s] format
# @param [Hash{String => String}] headers
# @yield [response]
# @yieldparam [Net::HTTPResponse] response
# @return [Net::HTTPResponse]
def head(format = nil, headers = {}, &block)
Net::HTTP.start(url.host, url.port) do |http|
response = http.head(url.path.to_s + format.to_s, HEADERS.merge(headers))
if block_given?
block.call(response)
else
response
end
end
end
##
# Performs an HTTP GET request on this resource.
#
# @param [String, #to_s] format
# @param [Hash{String => String}] headers
# @yield [response]
# @yieldparam [Net::HTTPResponse] response
# @return [Net::HTTPResponse]
def get(format = nil, headers = {}, &block)
Net::HTTP.start(url.host, url.port) do |http|
response = http.get(url.path.to_s + format.to_s, HEADERS.merge(headers))
if block_given?
block.call(response)
else
response
end
end
end
end # Resource
end # Datagraph
Ensured that Resource instances are comparable.
require 'net/http'
module Datagraph
##
# Base class for Datagraph.org resources.
class Resource
HEADERS = {'Accept' => 'text/plain'} # N-Triples
include Comparable
def self.new(*args, &block)
if self == Resource
case spec = args.first
when Repository::SPEC
Repository.new(*spec.split('/'))
when Account::SPEC
Account.new(spec)
end
else
super
end
end
##
# @return [RDF::URI]
attr_reader :url
##
# @param [RDF::URI, String] url
def initialize(url)
@url = RDF::URI.new(url)
end
##
# Returns `true` if this resource exists on Datagraph.org.
#
# @return [Boolean]
def exists?
head do |response|
case response
when Net::HTTPSuccess then true
when Net::HTTPClientError then false
else true # FIXME: dubious default, for now
end
end
end
##
# Returns `true` if this resource is equal to the given `other`
# resource.
#
# @param [Object] other
# @return [Boolean]
def eql?(other)
other.class.eql?(self.class) && self == other
end
##
# Compares this resources to the given `other` resource.
#
# @param [Object] other
# @return [Integer] `-1`, `0`, or `1`
def <=>(other)
self.to_uri <=> other.to_uri
end
##
# Returns the URL of this resource.
#
# @return [RDF::URI]
def to_uri
url
end
##
# Returns the RDF data for this resource.
def to_rdf
get('.nt', 'Accept' => 'text/plain') do |response|
case response
when Net::HTTPSuccess
reader = RDF::NTriples::Reader.new(response.body)
reader.to_a.extend(RDF::Enumerable, RDF::Queryable) # FIXME
end
end
end
##
# Returns a developer-friendly representation of this resource.
#
# @return [String]
def inspect
sprintf("#<%s:%#0x(%s)>", self.class.name, object_id, to_s)
end
##
# Outputs a developer-friendly representation of this resource to
# `stderr`.
#
# @return [void]
def inspect!
warn(inspect)
end
##
# Performs an HTTP HEAD request on this resource.
#
# @param [String, #to_s] format
# @param [Hash{String => String}] headers
# @yield [response]
# @yieldparam [Net::HTTPResponse] response
# @return [Net::HTTPResponse]
def head(format = nil, headers = {}, &block)
Net::HTTP.start(url.host, url.port) do |http|
response = http.head(url.path.to_s + format.to_s, HEADERS.merge(headers))
if block_given?
block.call(response)
else
response
end
end
end
##
# Performs an HTTP GET request on this resource.
#
# @param [String, #to_s] format
# @param [Hash{String => String}] headers
# @yield [response]
# @yieldparam [Net::HTTPResponse] response
# @return [Net::HTTPResponse]
def get(format = nil, headers = {}, &block)
Net::HTTP.start(url.host, url.port) do |http|
response = http.get(url.path.to_s + format.to_s, HEADERS.merge(headers))
if block_given?
block.call(response)
else
response
end
end
end
end # Resource
end # Datagraph
|
require 'yaml'
require 'json'
require 'nokogiri'
require 'open-uri'
def card_list_of(base_uri, page_id, table_num_limit, pointer)
card_list = []
html = Nokogiri::HTML(open("%s/%d.html"%[base_uri, page_id]))
wiki_body = html.css('#wikibody').first
table_list = wiki_body.css('table')
table_list.each_with_index do |table, i|
next unless i < table_num_limit
tr_list = table.children.css('tr')
tr_list.each_with_index do |tr_elm, j|
# wikiでは、6行ごとに空の行が入っているため
# (見やすさのためと思われる)
next unless (j % 6) != 0
td_list = tr_elm.css('td')
card = {
"id" => pointer.to_s,
"idol_id" => idol_id(td_list[2].inner_text),
"idol_type" => td_list[1].inner_text,
"rare" => td_list[0].inner_text,
"name" => td_list[2].inner_text,
}
card_list.push(card)
pointer = pointer + 1
end
end
card_list
end
def idol_id(card_name)
idol_list = JSON.load(File.read(File.expand_path('../json/idol_list.json', File.dirname(__FILE__))))
idol_id = -1
idol_name = /\S+$/.match(card_name).to_s
idol_list.each do |idol|
if idol_name == idol["name"]
idol_id = idol["id"]
break
end
end
idol_id.to_i
end
# main
config = YAML.load(File.read(File.expand_path('config.yml', File.dirname(__FILE__))))
page_list = config[:page_list]
base_uri = config[:base_uri]
all_card_list = []
pointer = 1
page_list.each do |page_info|
all_card_list.concat(card_list_of(base_uri, page_info[:page_id], page_info[:table_num], pointer))
pointer = all_card_list.length + 1
end
# js and json update
json_card_list = JSON.pretty_generate(all_card_list)
js_json_card_list = "var ___millimas_card_list =\n%s;\n"%[json_card_list]
File.write(File.expand_path('../json/card_list.json', File.dirname(__FILE__)), json_card_list)
File.write(File.expand_path('../js/card_list_json.js', File.dirname(__FILE__)), js_json_card_list)
# readme update
readme = File.read(File.expand_path('../README.md', File.dirname(__FILE__)))
readme.sub!(/『.+?』/, "『" + all_card_list.last['name'] + "』")
File.write(File.expand_path('../README.md', File.dirname(__FILE__)), readme)
exit if `git diff`.empty?
puts `git diff`
puts 'commit? (y/N):'
exit unless STDIN.gets.chomp == 'y'
# auto commit
base_dir = File.expand_path('../', File.dirname(__FILE__))
puts `#{base_dir}/tools/commit_update.sh #{base_dir} "#{all_card_list.last['name']}"`
# vim: sts=2 sw=2 ts=2
Fix to_i position.
require 'yaml'
require 'json'
require 'nokogiri'
require 'open-uri'
def card_list_of(base_uri, page_id, table_num_limit, pointer)
card_list = []
html = Nokogiri::HTML(open("%s/%d.html"%[base_uri, page_id]))
wiki_body = html.css('#wikibody').first
table_list = wiki_body.css('table')
table_list.each_with_index do |table, i|
next unless i < table_num_limit
tr_list = table.children.css('tr')
tr_list.each_with_index do |tr_elm, j|
# wikiでは、6行ごとに空の行が入っているため
# (見やすさのためと思われる)
next unless (j % 6) != 0
td_list = tr_elm.css('td')
card = {
"id" => pointer.to_s,
"idol_id" => idol_id(td_list[2].inner_text),
"idol_type" => td_list[1].inner_text,
"rare" => td_list[0].inner_text,
"name" => td_list[2].inner_text,
}
card_list.push(card)
pointer = pointer + 1
end
end
card_list
end
def idol_id(card_name)
idol_list = JSON.load(File.read(File.expand_path('../json/idol_list.json', File.dirname(__FILE__))))
idol_id = -1
idol_name = /\S+$/.match(card_name).to_s
idol_list.each do |idol|
if idol_name == idol["name"]
idol_id = idol["id"].to_i
break
end
end
idol_id
end
# main
config = YAML.load(File.read(File.expand_path('config.yml', File.dirname(__FILE__))))
page_list = config[:page_list]
base_uri = config[:base_uri]
all_card_list = []
pointer = 1
page_list.each do |page_info|
all_card_list.concat(card_list_of(base_uri, page_info[:page_id], page_info[:table_num], pointer))
pointer = all_card_list.length + 1
end
# js and json update
json_card_list = JSON.pretty_generate(all_card_list)
js_json_card_list = "var ___millimas_card_list =\n%s;\n"%[json_card_list]
File.write(File.expand_path('../json/card_list.json', File.dirname(__FILE__)), json_card_list)
File.write(File.expand_path('../js/card_list_json.js', File.dirname(__FILE__)), js_json_card_list)
# readme update
readme = File.read(File.expand_path('../README.md', File.dirname(__FILE__)))
readme.sub!(/『.+?』/, "『" + all_card_list.last['name'] + "』")
File.write(File.expand_path('../README.md', File.dirname(__FILE__)), readme)
exit if `git diff`.empty?
puts `git diff`
puts 'commit? (y/N):'
exit unless STDIN.gets.chomp == 'y'
# auto commit
base_dir = File.expand_path('../', File.dirname(__FILE__))
puts `#{base_dir}/tools/commit_update.sh #{base_dir} "#{all_card_list.last['name']}"`
# vim: sts=2 sw=2 ts=2
|
module Devise
module Async
module Model
extend ActiveSupport::Concern
included do
after_commit :send_devise_pending_notifications
end
protected
def send_devise_notification(notification)
if self.changed?
devise_pending_notifications << notification
else
Devise::Async::Worker.enqueue(notification, self.class.name, self.id.to_s)
end
end
def send_devise_pending_notifications
devise_pending_notifications.each do |notification|
# Use `id.to_s` to avoid problems with mongoid 2.4.X ids being serialized
# wrong with YAJL.
Devise::Async::Worker.enqueue(notification, self.class.name, self.id.to_s)
end
@devise_pending_notifications = []
end
def devise_pending_notifications
@devise_pending_notifications ||= []
end
end
end
end
do not try to hook after_commit if ORM doesn't support it
module Devise
module Async
module Model
extend ActiveSupport::Concern
included do
if respond_to?(:after_commit) # AR only
after_commit :send_devise_pending_notifications
else # mongoid
after_save :send_devise_pending_notifications
end
end
protected
def send_devise_notification(notification)
if changed?
devise_pending_notifications << notification
else
Devise::Async::Worker.enqueue(notification, self.class.name, self.id.to_s)
end
end
def send_devise_pending_notifications
devise_pending_notifications.each do |notification|
# Use `id.to_s` to avoid problems with mongoid 2.4.X ids being serialized
# wrong with YAJL.
Devise::Async::Worker.enqueue(notification, self.class.name, self.id.to_s)
end
@devise_pending_notifications = []
end
def devise_pending_notifications
@devise_pending_notifications ||= []
end
end
end
end
|
module Diesel
module VERSION
MAJOR = 0
MINOR = 0
TINY = 28
PRE = nil
STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".")
end
def self.gem_version
::Gem::Version.new(VERSION::STRING)
end
end
bumped version
module Diesel
module VERSION
MAJOR = 0
MINOR = 0
TINY = 29
PRE = nil
STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".")
end
def self.gem_version
::Gem::Version.new(VERSION::STRING)
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.