CombinedText stringlengths 4 3.42M |
|---|
module Activejob
module Locks
VERSION = "0.0.3"
end
end
bumping version
module Activejob
module Locks
VERSION = "0.0.4"
end
end
|
class ActiveRecord::Base
include ActionView::Helpers::TagHelper, ActionView::Helpers::TextHelper
def dom_id
[self.class.name.downcase.pluralize.dasherize, id] * '-'
end
# Creates a method to allow the association to be
# set using parameters from a form.
#
# In order to restrict access, any class calling this method
# must have a method named company which will be used to find any
# new associated objects.
#
# The parameters should be a hash of id's of the object to be added.
# Any existing members of the association without an id in parameters
# will be removed from the association.
def self.adds_and_removes_using_params(association)
method_name = "#{ association.to_s.singularize }_attributes="
method = <<-EOS
def #{ method_name }(params)
add_and_delete_from_attributes(\"#{ association }\", params)
end
EOS
class_eval(method)
end
# Called by adds_and_removes_using_params.
# See that method for an explanation.
def add_and_delete_from_attributes(association_name, params)
association_objects = self.send(association_name)
klass = association_objects.build.class
updated = []
params.each do |id, ignored_params|
existing = association_objects.detect { |o| o.id == id.to_i }
if existing.nil?
existing = company.send(association_name).find(id)
association_objects << existing
end
updated << existing
end
missing = association_objects - updated
association_objects.delete(missing)
end
end
Removed helpers form AR::Base for now... [Temp]
class ActiveRecord::Base
# include ActionView::Helpers::TagHelper, ActionView::Helpers::TextHelper
def dom_id
[self.class.name.downcase.pluralize.dasherize, id] * '-'
end
# Creates a method to allow the association to be
# set using parameters from a form.
#
# In order to restrict access, any class calling this method
# must have a method named company which will be used to find any
# new associated objects.
#
# The parameters should be a hash of id's of the object to be added.
# Any existing members of the association without an id in parameters
# will be removed from the association.
def self.adds_and_removes_using_params(association)
method_name = "#{ association.to_s.singularize }_attributes="
method = <<-EOS
def #{ method_name }(params)
add_and_delete_from_attributes(\"#{ association }\", params)
end
EOS
class_eval(method)
end
# Called by adds_and_removes_using_params.
# See that method for an explanation.
def add_and_delete_from_attributes(association_name, params)
association_objects = self.send(association_name)
klass = association_objects.build.class
updated = []
params.each do |id, ignored_params|
existing = association_objects.detect { |o| o.id == id.to_i }
if existing.nil?
existing = company.send(association_name).find(id)
association_objects << existing
end
updated << existing
end
missing = association_objects - updated
association_objects.delete(missing)
end
end
|
# encoding: utf-8
require 'date'
require 'erb'
module GHI
module Formatting
class << self
attr_accessor :paginate
end
self.paginate = true # Default.
autoload :Colors, 'ghi/formatting/colors'
include Colors
CURSOR = {
:up => lambda { |n| "\e[#{n}A" },
:column => lambda { |n| "\e[#{n}G" },
:hide => "\e[?25l",
:show => "\e[?25h"
}
THROBBERS = [
%w(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏),
%w(⠋ ⠙ ⠚ ⠞ ⠖ ⠦ ⠴ ⠲ ⠳ ⠓),
%w(⠄ ⠆ ⠇ ⠋ ⠙ ⠸ ⠰ ⠠ ⠰ ⠸ ⠙ ⠋ ⠇ ⠆ ),
%w(⠋ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋),
%w(⠁ ⠉ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠤ ⠄ ⠄ ⠤ ⠴ ⠲ ⠒ ⠂ ⠂ ⠒ ⠚ ⠙ ⠉ ⠁),
%w(⠈ ⠉ ⠋ ⠓ ⠒ ⠐ ⠐ ⠒ ⠖ ⠦ ⠤ ⠠ ⠠ ⠤ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋ ⠉ ⠈),
%w(⠁ ⠁ ⠉ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠤ ⠄ ⠄ ⠤ ⠠ ⠠ ⠤ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋ ⠉ ⠈ ⠈ ⠉)
]
def puts *strings
strings = strings.flatten.map { |s|
s.gsub(/(^| )*@([^@\s]+)/) {
if $2 == Authorization.username
bright { fg(:yellow) { "#$1@#$2" } }
else
bright { "#$1@#$2" }
end
}
}
super strings
end
def page header = nil, throttle = 0
if paginate?
pager = GHI.config('ghi.pager') || GHI.config('core.pager')
pager ||= ENV['PAGER']
pager ||= 'less'
pager += ' -EKRX -b1' if pager =~ /^less( -[EKRX]+)?$/
if pager && !pager.empty? && pager != 'cat'
$stdout = IO.popen pager, 'w'
end
puts header if header
end
loop do
yield
sleep throttle
end
rescue Errno::EPIPE
exit
ensure
unless $stdout == STDOUT
$stdout.close_write
$stdout = STDOUT
print CURSOR[:show]
exit
end
end
def paginate?
$stdout.tty? && $stdout == STDOUT && Formatting.paginate
end
def truncate string, reserved
result = string.scan(/.{0,#{columns - reserved}}(?:\s|\Z)/).first.strip
result << "..." if result != string
result
end
def indent string, level = 4, maxwidth = columns
string = string.gsub(/\r/, '')
string.gsub!(/[\t ]+$/, '')
string.gsub!(/\n{3,}/, "\n\n")
width = maxwidth - level - 1
lines = string.scan(
/.{0,#{width}}(?:\s|\Z)|[\S]{#{width},}/ # TODO: Test long lines.
).map { |line| " " * level + line.chomp }
format_markdown lines.join("\n").rstrip, level
end
def columns
dimensions[1] || 80
end
def dimensions
`stty size`.chomp.split(' ').map { |n| n.to_i }
end
#--
# Specific formatters:
#++
def format_issues_header
state = assigns[:state] || 'open'
header = "# #{repo || 'Global,'} #{state} issues"
if repo
if milestone = assigns[:milestone]
case milestone
when '*' then header << ' with a milestone'
when 'none' then header << ' without a milestone'
else
header.sub! repo, "#{repo} milestone ##{milestone}"
end
end
if assignee = assigns[:assignee]
header << case assignee
when '*' then ', assigned'
when 'none' then ', unassigned'
else
assignee = 'you' if Authorization.username == assignee
", assigned to #{assignee}"
end
end
if mentioned = assigns[:mentioned]
mentioned = 'you' if Authorization.username == mentioned
header << ", mentioning #{mentioned}"
end
else
header << case assigns[:filter]
when 'created' then ' you created'
when 'mentioned' then ' that mention you'
when 'subscribed' then " you're subscribed to"
else
' assigned to you'
end
end
if labels = assigns[:labels]
header << ", labeled #{assigns[:labels].gsub ',', ', '}"
end
if sort = assigns[:sort]
header << ", by #{sort} #{reverse ? 'ascending' : 'descending'}"
end
format_state assigns[:state], header
end
# TODO: Show milestones.
def format_issues issues, include_repo
return 'None.' if issues.empty?
include_repo and issues.each do |i|
%r{/repos/[^/]+/([^/]+)} === i['url'] and i['repo'] = $1
end
nmax, rmax = %w(number repo).map { |f|
issues.sort_by { |i| i[f].to_s.size }.last[f].to_s.size
}
issues.map { |i|
n, title, labels = i['number'], i['title'], i['labels']
l = 9 + nmax + rmax + no_color { format_labels labels }.to_s.length
a = i['assignee'] && i['assignee']['login'] == Authorization.username
l += 2 if a
p = i['pull_request']['html_url'] and l += 2
c = i['comments']
l += c.to_s.length + 1 unless c == 0
[
" ",
(i['repo'].to_s.rjust(rmax) if i['repo']),
"#{bright { n.to_s.rjust nmax }}:",
truncate(title, l),
format_labels(labels),
(fg('aaaaaa') { c } unless c == 0),
(fg('aaaaaa') { '↑' } if p),
(fg(:yellow) { '@' } if a)
].compact.join ' '
}
end
# TODO: Show milestone, number of comments, pull request attached.
def format_issue i, width = columns
return unless i['created_at']
ERB.new(<<EOF).result binding
<% p = i['pull_request']['html_url'] %>\
<%= bright { no_color { indent '%s%s: %s' % [p ? '↑' : '#', \
*i.values_at('number', 'title')], 0, width } } %>
@<%= i['user']['login'] %> opened this <%= p ? 'pull request' : 'issue' %> \
<%= format_date DateTime.parse(i['created_at']) %>. \
<%= format_state i['state'], format_tag(i['state']), :bg %> \
<% unless i['comments'] == 0 %>\
<%= fg('aaaaaa'){
template = "%d comment"
template << "s" unless i['comments'] == 1
'(' << template % i['comments'] << ')'
} %>\
<% end %>\
<% if i['assignee'] || !i['labels'].empty? %>
<% if i['assignee'] %>@<%= i['assignee']['login'] %> is assigned. <% end %>\
<% unless i['labels'].empty? %><%= format_labels(i['labels']) %><% end %>\
<% end %>\
<% if i['milestone'] %>
Milestone #<%= i['milestone']['number'] %>: <%= i['milestone']['title'] %>\
<%= " \#{bright{fg(:yellow){'⚠'}}}" if past_due? i['milestone'] %>\
<% end %>
<% if i['body'] && !i['body'].empty? %>
<%= indent i['body'], 4, width %>
<% end %>
EOF
end
def format_comments comments
return 'None.' if comments.empty?
comments.map { |comment| format_comment comment }
end
def format_comment c, width = columns
<<EOF
@#{c['user']['login']} commented \
#{format_date DateTime.parse(c['created_at'])}:
#{indent c['body'], 4, width}
EOF
end
def format_milestones milestones
return 'None.' if milestones.empty?
max = milestones.sort_by { |m|
m['number'].to_s.size
}.last['number'].to_s.size
milestones.map { |m|
line = [" #{m['number'].to_s.rjust max }:"]
space = past_due?(m) ? 6 : 4
line << truncate(m['title'], max + space)
line << '⚠' if past_due? m
percent m, line.join(' ')
}
end
def format_milestone m, width = columns
ERB.new(<<EOF).result binding
<%= bright { no_color { \
indent '#%s: %s' % m.values_at('number', 'title'), 0, width } } %>
@<%= m['creator']['login'] %> created this milestone \
<%= format_date DateTime.parse(m['created_at']) %>. \
<%= format_state m['state'], format_tag(m['state']), :bg %>
<% if m['due_on'] %>\
<% due_on = DateTime.parse m['due_on'] %>\
<% if past_due? m %>\
<%= bright{fg(:yellow){"⚠"}} %> \
<%= bright{fg(:red){"Past due by \#{format_date due_on, false}."}} %>
<% else %>\
Due in <%= format_date due_on, false %>.
<% end %>\
<% end %>\
<%= percent m %>
<% if m['description'] && !m['description'].empty? %>
<%= indent m['description'], 4, width %>
<% end %>
EOF
end
def past_due? milestone
return false unless milestone['due_on']
DateTime.parse(milestone['due_on']) <= DateTime.now
end
def percent milestone, string = nil
open, closed = milestone.values_at('open_issues', 'closed_issues')
complete = closed.to_f / (open + closed)
complete = 0 if complete.nan?
i = (columns * complete).round
if string.nil?
string = ' %d%% (%d closed, %d open)' % [complete * 100, closed, open]
end
string = string.ljust columns
[bg('2cc200'){string[0, i]}, string[i, columns - i]].join
end
def format_state state, string = state, layer = :fg
send(layer, state == 'closed' ? 'ff0000' : '2cc200') { string }
end
def format_labels labels
return if labels.empty?
[*labels].map { |l| bg(l['color']) { format_tag l['name'] } }.join ' '
end
def format_tag tag
(colorize? ? ' %s ' : '[%s]') % tag
end
#--
# Helpers:
#++
#--
# TODO: DRY up editor formatters.
#++
def format_editor issue = nil
message = ERB.new(<<EOF).result binding
Please explain the issue. The first line will become the title. Trailing
lines starting with '#' (like these) will be ignored, and empty messages will
not be submitted. Issues are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %>
<%= no_color { format_issue issue, columns - 2 if issue } %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, [
issue['title'] || issue[:title], issue['body'] || issue[:body]
].compact.join("\n\n") if issue
message
end
def format_milestone_editor milestone = nil
message = ERB.new(<<EOF).result binding
Describe the milestone. The first line will become the title. Trailing lines
starting with '#' (like these) will be ignored, and empty messages will not be
submitted. Milestones are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %>
<%= no_color { format_milestone milestone, columns - 2 } if milestone %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, [
milestone['title'], milestone['description']
].join("\n\n") if milestone
message
end
def format_comment_editor issue, comment = nil
message = ERB.new(<<EOF).result binding
Leave a comment. The first line will become the title. Trailing lines starting
with '#' (like these) will be ignored, and empty messages will not be
submitted. Comments are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %> issue #<%= issue['number'] %>
<%= no_color { format_issue issue } if verbose %>\
<%= no_color { format_comment comment, columns - 2 } if comment %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, comment['body'] if comment
message
end
def format_markdown string, indent = 4
c = '268bd2'
# Headers.
string.gsub!(/^( {#{indent}}\#{1,6} .+)$/, bright{'\1'})
string.gsub!(
/(^ {#{indent}}.+$\n^ {#{indent}}[-=]+$)/, bright{'\1'}
)
# Strong.
string.gsub!(
/(^|\s)(\*{2}\w(?:[^*]*\w)?\*{2})(\s|$)/m, '\1' + bright{'\2'} + '\3'
)
string.gsub!(
/(^|\s)(_{2}\w(?:[^_]*\w)?_{2})(\s|$)/m, '\1' + bright {'\2'} + '\3'
)
# Emphasis.
string.gsub!(
/(^|\s)(\*\w(?:[^*]*\w)?\*)(\s|$)/m, '\1' + underline{'\2'} + '\3'
)
string.gsub!(
/(^|\s)(_\w(?:[^_]*\w)?_)(\s|$)/m, '\1' + underline{'\2'} + '\3'
)
# Bullets/Blockquotes.
string.gsub!(/(^ {#{indent}}(?:[*>-]|\d+\.) )/, fg(c){'\1'})
# URIs.
string.gsub!(
%r{\b(<)?(https?://\S+|[^@\s]+@[^@\s]+)(>)?\b},
fg(c){'\1' + underline{'\2'} + '\3'}
)
# Code.
# string.gsub!(
# /
# (^\ {#{indent}}```.*?$)(.+?^\ {#{indent}}```$)|
# (^|[^`])(`[^`]+`)([^`]|$)
# /mx
# ) {
# post = $5
# fg(c){"#$1#$2#$3#$4".gsub(/\e\[[\d;]+m/, '')} + "#{post}"
# }
string
end
def format_date date, suffix = true
days = (interval = DateTime.now - date).to_i.abs
string = if days.zero?
seconds, _ = interval.divmod Rational(1, 86400)
hours, seconds = seconds.divmod 3600
minutes, seconds = seconds.divmod 60
if hours > 0
"#{hours} hour#{'s' unless hours == 1}"
elsif minutes > 0
"#{minutes} minute#{'s' unless minutes == 1}"
else
"#{seconds} second#{'s' unless seconds == 1}"
end
else
"#{days} day#{'s' unless days == 1}"
end
ago = interval < 0 ? 'from now' : 'ago' if suffix
[string, ago].compact.join ' '
end
def throb position = 0, redraw = CURSOR[:up][1]
return yield unless paginate?
throb = THROBBERS[rand(THROBBERS.length)]
throb.reverse! if rand > 0.5
i = rand throb.length
thread = Thread.new do
dot = lambda do
print "\r#{CURSOR[:column][position]}#{throb[i]}#{CURSOR[:hide]}"
i = (i + 1) % throb.length
sleep 0.1 and dot.call
end
dot.call
end
yield
ensure
if thread
thread.kill
puts "\r#{CURSOR[:column][position]}#{redraw}#{CURSOR[:show]}"
end
end
end
end
Make the separator separate from the number
Having the separator as part of the number is annoying when you want to use the number in any bash programming. Currently the way to do this is:
ghi | grep Sponsorship | awk '{gsub(/:/, "", $1); print $1}'
With this fix it's ```ghi | grep Sponsorship | awk '{print $1}'```
just that little bit friendlier
# encoding: utf-8
require 'date'
require 'erb'
module GHI
module Formatting
class << self
attr_accessor :paginate
end
self.paginate = true # Default.
autoload :Colors, 'ghi/formatting/colors'
include Colors
CURSOR = {
:up => lambda { |n| "\e[#{n}A" },
:column => lambda { |n| "\e[#{n}G" },
:hide => "\e[?25l",
:show => "\e[?25h"
}
THROBBERS = [
%w(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏),
%w(⠋ ⠙ ⠚ ⠞ ⠖ ⠦ ⠴ ⠲ ⠳ ⠓),
%w(⠄ ⠆ ⠇ ⠋ ⠙ ⠸ ⠰ ⠠ ⠰ ⠸ ⠙ ⠋ ⠇ ⠆ ),
%w(⠋ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋),
%w(⠁ ⠉ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠤ ⠄ ⠄ ⠤ ⠴ ⠲ ⠒ ⠂ ⠂ ⠒ ⠚ ⠙ ⠉ ⠁),
%w(⠈ ⠉ ⠋ ⠓ ⠒ ⠐ ⠐ ⠒ ⠖ ⠦ ⠤ ⠠ ⠠ ⠤ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋ ⠉ ⠈),
%w(⠁ ⠁ ⠉ ⠙ ⠚ ⠒ ⠂ ⠂ ⠒ ⠲ ⠴ ⠤ ⠄ ⠄ ⠤ ⠠ ⠠ ⠤ ⠦ ⠖ ⠒ ⠐ ⠐ ⠒ ⠓ ⠋ ⠉ ⠈ ⠈ ⠉)
]
def puts *strings
strings = strings.flatten.map { |s|
s.gsub(/(^| )*@([^@\s]+)/) {
if $2 == Authorization.username
bright { fg(:yellow) { "#$1@#$2" } }
else
bright { "#$1@#$2" }
end
}
}
super strings
end
def page header = nil, throttle = 0
if paginate?
pager = GHI.config('ghi.pager') || GHI.config('core.pager')
pager ||= ENV['PAGER']
pager ||= 'less'
pager += ' -EKRX -b1' if pager =~ /^less( -[EKRX]+)?$/
if pager && !pager.empty? && pager != 'cat'
$stdout = IO.popen pager, 'w'
end
puts header if header
end
loop do
yield
sleep throttle
end
rescue Errno::EPIPE
exit
ensure
unless $stdout == STDOUT
$stdout.close_write
$stdout = STDOUT
print CURSOR[:show]
exit
end
end
def paginate?
$stdout.tty? && $stdout == STDOUT && Formatting.paginate
end
def truncate string, reserved
result = string.scan(/.{0,#{columns - reserved}}(?:\s|\Z)/).first.strip
result << "..." if result != string
result
end
def indent string, level = 4, maxwidth = columns
string = string.gsub(/\r/, '')
string.gsub!(/[\t ]+$/, '')
string.gsub!(/\n{3,}/, "\n\n")
width = maxwidth - level - 1
lines = string.scan(
/.{0,#{width}}(?:\s|\Z)|[\S]{#{width},}/ # TODO: Test long lines.
).map { |line| " " * level + line.chomp }
format_markdown lines.join("\n").rstrip, level
end
def columns
dimensions[1] || 80
end
def dimensions
`stty size`.chomp.split(' ').map { |n| n.to_i }
end
#--
# Specific formatters:
#++
def format_issues_header
state = assigns[:state] || 'open'
header = "# #{repo || 'Global,'} #{state} issues"
if repo
if milestone = assigns[:milestone]
case milestone
when '*' then header << ' with a milestone'
when 'none' then header << ' without a milestone'
else
header.sub! repo, "#{repo} milestone ##{milestone}"
end
end
if assignee = assigns[:assignee]
header << case assignee
when '*' then ', assigned'
when 'none' then ', unassigned'
else
assignee = 'you' if Authorization.username == assignee
", assigned to #{assignee}"
end
end
if mentioned = assigns[:mentioned]
mentioned = 'you' if Authorization.username == mentioned
header << ", mentioning #{mentioned}"
end
else
header << case assigns[:filter]
when 'created' then ' you created'
when 'mentioned' then ' that mention you'
when 'subscribed' then " you're subscribed to"
else
' assigned to you'
end
end
if labels = assigns[:labels]
header << ", labeled #{assigns[:labels].gsub ',', ', '}"
end
if sort = assigns[:sort]
header << ", by #{sort} #{reverse ? 'ascending' : 'descending'}"
end
format_state assigns[:state], header
end
# TODO: Show milestones.
def format_issues issues, include_repo
return 'None.' if issues.empty?
include_repo and issues.each do |i|
%r{/repos/[^/]+/([^/]+)} === i['url'] and i['repo'] = $1
end
nmax, rmax = %w(number repo).map { |f|
issues.sort_by { |i| i[f].to_s.size }.last[f].to_s.size
}
issues.map { |i|
n, title, labels = i['number'], i['title'], i['labels']
l = 9 + nmax + rmax + no_color { format_labels labels }.to_s.length
a = i['assignee'] && i['assignee']['login'] == Authorization.username
l += 2 if a
p = i['pull_request']['html_url'] and l += 2
c = i['comments']
l += c.to_s.length + 1 unless c == 0
[
" ",
(i['repo'].to_s.rjust(rmax) if i['repo']),
"#{bright { n.to_s.rjust nmax }}",
":",
truncate(title, l),
format_labels(labels),
(fg('aaaaaa') { c } unless c == 0),
(fg('aaaaaa') { '↑' } if p),
(fg(:yellow) { '@' } if a)
].compact.join ' '
}
end
# TODO: Show milestone, number of comments, pull request attached.
def format_issue i, width = columns
return unless i['created_at']
ERB.new(<<EOF).result binding
<% p = i['pull_request']['html_url'] %>\
<%= bright { no_color { indent '%s%s: %s' % [p ? '↑' : '#', \
*i.values_at('number', 'title')], 0, width } } %>
@<%= i['user']['login'] %> opened this <%= p ? 'pull request' : 'issue' %> \
<%= format_date DateTime.parse(i['created_at']) %>. \
<%= format_state i['state'], format_tag(i['state']), :bg %> \
<% unless i['comments'] == 0 %>\
<%= fg('aaaaaa'){
template = "%d comment"
template << "s" unless i['comments'] == 1
'(' << template % i['comments'] << ')'
} %>\
<% end %>\
<% if i['assignee'] || !i['labels'].empty? %>
<% if i['assignee'] %>@<%= i['assignee']['login'] %> is assigned. <% end %>\
<% unless i['labels'].empty? %><%= format_labels(i['labels']) %><% end %>\
<% end %>\
<% if i['milestone'] %>
Milestone #<%= i['milestone']['number'] %>: <%= i['milestone']['title'] %>\
<%= " \#{bright{fg(:yellow){'⚠'}}}" if past_due? i['milestone'] %>\
<% end %>
<% if i['body'] && !i['body'].empty? %>
<%= indent i['body'], 4, width %>
<% end %>
EOF
end
def format_comments comments
return 'None.' if comments.empty?
comments.map { |comment| format_comment comment }
end
def format_comment c, width = columns
<<EOF
@#{c['user']['login']} commented \
#{format_date DateTime.parse(c['created_at'])}:
#{indent c['body'], 4, width}
EOF
end
def format_milestones milestones
return 'None.' if milestones.empty?
max = milestones.sort_by { |m|
m['number'].to_s.size
}.last['number'].to_s.size
milestones.map { |m|
line = [" #{m['number'].to_s.rjust max }:"]
space = past_due?(m) ? 6 : 4
line << truncate(m['title'], max + space)
line << '⚠' if past_due? m
percent m, line.join(' ')
}
end
def format_milestone m, width = columns
ERB.new(<<EOF).result binding
<%= bright { no_color { \
indent '#%s: %s' % m.values_at('number', 'title'), 0, width } } %>
@<%= m['creator']['login'] %> created this milestone \
<%= format_date DateTime.parse(m['created_at']) %>. \
<%= format_state m['state'], format_tag(m['state']), :bg %>
<% if m['due_on'] %>\
<% due_on = DateTime.parse m['due_on'] %>\
<% if past_due? m %>\
<%= bright{fg(:yellow){"⚠"}} %> \
<%= bright{fg(:red){"Past due by \#{format_date due_on, false}."}} %>
<% else %>\
Due in <%= format_date due_on, false %>.
<% end %>\
<% end %>\
<%= percent m %>
<% if m['description'] && !m['description'].empty? %>
<%= indent m['description'], 4, width %>
<% end %>
EOF
end
def past_due? milestone
return false unless milestone['due_on']
DateTime.parse(milestone['due_on']) <= DateTime.now
end
def percent milestone, string = nil
open, closed = milestone.values_at('open_issues', 'closed_issues')
complete = closed.to_f / (open + closed)
complete = 0 if complete.nan?
i = (columns * complete).round
if string.nil?
string = ' %d%% (%d closed, %d open)' % [complete * 100, closed, open]
end
string = string.ljust columns
[bg('2cc200'){string[0, i]}, string[i, columns - i]].join
end
def format_state state, string = state, layer = :fg
send(layer, state == 'closed' ? 'ff0000' : '2cc200') { string }
end
def format_labels labels
return if labels.empty?
[*labels].map { |l| bg(l['color']) { format_tag l['name'] } }.join ' '
end
def format_tag tag
(colorize? ? ' %s ' : '[%s]') % tag
end
#--
# Helpers:
#++
#--
# TODO: DRY up editor formatters.
#++
def format_editor issue = nil
message = ERB.new(<<EOF).result binding
Please explain the issue. The first line will become the title. Trailing
lines starting with '#' (like these) will be ignored, and empty messages will
not be submitted. Issues are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %>
<%= no_color { format_issue issue, columns - 2 if issue } %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, [
issue['title'] || issue[:title], issue['body'] || issue[:body]
].compact.join("\n\n") if issue
message
end
def format_milestone_editor milestone = nil
message = ERB.new(<<EOF).result binding
Describe the milestone. The first line will become the title. Trailing lines
starting with '#' (like these) will be ignored, and empty messages will not be
submitted. Milestones are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %>
<%= no_color { format_milestone milestone, columns - 2 } if milestone %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, [
milestone['title'], milestone['description']
].join("\n\n") if milestone
message
end
def format_comment_editor issue, comment = nil
message = ERB.new(<<EOF).result binding
Leave a comment. The first line will become the title. Trailing lines starting
with '#' (like these) will be ignored, and empty messages will not be
submitted. Comments are formatted with GitHub Flavored Markdown (GFM):
http://github.github.com/github-flavored-markdown
On <%= repo %> issue #<%= issue['number'] %>
<%= no_color { format_issue issue } if verbose %>\
<%= no_color { format_comment comment, columns - 2 } if comment %>
EOF
message.rstrip!
message.gsub!(/(?!\A)^.*$/) { |line| "# #{line}".rstrip }
message.insert 0, comment['body'] if comment
message
end
def format_markdown string, indent = 4
c = '268bd2'
# Headers.
string.gsub!(/^( {#{indent}}\#{1,6} .+)$/, bright{'\1'})
string.gsub!(
/(^ {#{indent}}.+$\n^ {#{indent}}[-=]+$)/, bright{'\1'}
)
# Strong.
string.gsub!(
/(^|\s)(\*{2}\w(?:[^*]*\w)?\*{2})(\s|$)/m, '\1' + bright{'\2'} + '\3'
)
string.gsub!(
/(^|\s)(_{2}\w(?:[^_]*\w)?_{2})(\s|$)/m, '\1' + bright {'\2'} + '\3'
)
# Emphasis.
string.gsub!(
/(^|\s)(\*\w(?:[^*]*\w)?\*)(\s|$)/m, '\1' + underline{'\2'} + '\3'
)
string.gsub!(
/(^|\s)(_\w(?:[^_]*\w)?_)(\s|$)/m, '\1' + underline{'\2'} + '\3'
)
# Bullets/Blockquotes.
string.gsub!(/(^ {#{indent}}(?:[*>-]|\d+\.) )/, fg(c){'\1'})
# URIs.
string.gsub!(
%r{\b(<)?(https?://\S+|[^@\s]+@[^@\s]+)(>)?\b},
fg(c){'\1' + underline{'\2'} + '\3'}
)
# Code.
# string.gsub!(
# /
# (^\ {#{indent}}```.*?$)(.+?^\ {#{indent}}```$)|
# (^|[^`])(`[^`]+`)([^`]|$)
# /mx
# ) {
# post = $5
# fg(c){"#$1#$2#$3#$4".gsub(/\e\[[\d;]+m/, '')} + "#{post}"
# }
string
end
def format_date date, suffix = true
days = (interval = DateTime.now - date).to_i.abs
string = if days.zero?
seconds, _ = interval.divmod Rational(1, 86400)
hours, seconds = seconds.divmod 3600
minutes, seconds = seconds.divmod 60
if hours > 0
"#{hours} hour#{'s' unless hours == 1}"
elsif minutes > 0
"#{minutes} minute#{'s' unless minutes == 1}"
else
"#{seconds} second#{'s' unless seconds == 1}"
end
else
"#{days} day#{'s' unless days == 1}"
end
ago = interval < 0 ? 'from now' : 'ago' if suffix
[string, ago].compact.join ' '
end
def throb position = 0, redraw = CURSOR[:up][1]
return yield unless paginate?
throb = THROBBERS[rand(THROBBERS.length)]
throb.reverse! if rand > 0.5
i = rand throb.length
thread = Thread.new do
dot = lambda do
print "\r#{CURSOR[:column][position]}#{throb[i]}#{CURSOR[:hide]}"
i = (i + 1) % throb.length
sleep 0.1 and dot.call
end
dot.call
end
yield
ensure
if thread
thread.kill
puts "\r#{CURSOR[:column][position]}#{redraw}#{CURSOR[:show]}"
end
end
end
end
|
module ActsAsReferred
VERSION = "0.1.1"
end
version
module ActsAsReferred
VERSION = "0.1.2"
end
|
require 'sinatra'
require 'grit'
require 'json'
module GitApi
class App < Sinatra::Base
helpers Helpers
before do
content_type 'application/json'
end
# Higher Level Git
#--------------------------------------------------------
# Get basic repo information.
#
# :repo - The String name of the repo (including .git)
#
# Returns a JSON string of the created repo
get '/gitapi/repos/:repo' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo_to_hash(repo).to_json
end
# Create a new bare Git repository.
#
# name - The String name of the repository. The ".git" extension will be added if not provided
# hooks[] - The String array of hooks to enable when creating the repo (e.g. ["post-update", "post-receive"])
#
# Returns a JSON string of the created repo
post '/gitapi/repos' do
repo_name = params[:name]
repo_name += ".git" unless repo_name =~ /\.git/
repo = Grit::Repo.init_bare(File.join(settings.git_path, repo_name))
enable_hooks(File.join(settings.git_path, repo_name), params[:hooks]) if params[:hooks]
repo_to_hash(repo).to_json
end
# Get a list of all branches in repo.
#
# :repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all branches in repo
get '/gitapi/repos/:repo/branches' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
heads = repo.heads
heads.map { |head| head_to_hash(head) }.to_json
end
# Get a list of all files in the branch root folder.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
#
# Returns a JSON string containing an array of all files in branch, plus sha of the tree
get '/gitapi/repos/:repo/branches/:branch/files' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
tree = repo.tree(params[:branch])
tree_to_hash(tree).to_json
end
# Get file (if file is specified) or array of files (if folder is specified) in branch.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file or folder. Can be path in a subfolder (e.g. "images/thumbs/myfile.jpg")
# encoding - If a single blob is returned, this encoding is used for the blob data (defaults to utf-8)
#
# Returns a JSON string containing file content or an array of file names
get '/gitapi/repos/:repo/branches/:branch/files/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
gitobject = get_object_from_tree(repo, params[:branch], params[:splat].first)
if(gitobject.is_a?(Grit::Tree))
tree_to_hash(gitobject).to_json
else
encoding = params[:encoding] || "utf-8"
blob_to_hash(gitobject, encoding).to_json
end
end
# Commit a new file and its data to specified branch. This methods loads all current files in specified branch (or from_branch) into index
# before committing the new file.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# name - The String name of the file (can be a path in folder)
# data - The String data of the file
# encoding - The String encoding of the data ("utf-8" or "base64")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
# from_branch - (Optional) The String of a specific branch whose tree should be loaded into index before committing. Use if creating a new branch.
#
# Returns a JSON string containing sha of the commit
post '/gitapi/repos/:repo/branches/:branch/files' do
sha = make_file(params[:repo], params[:branch], params[:name], params[:data], params[:encoding], params[:user], params[:email], params[:message], params[:from_branch])
commit_to_hash(sha).to_json
end
# Delete a file from the specified branch and commit the deletion. This methods loads all current files in specified branch into index
# before doing the deletion.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file or folder. Can be path in a subfolder (e.g. "images/thumbs/myfile.jpg")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
#
# Returns a JSON string containing sha of the commit
delete '/gitapi/repos/:repo/branches/:branch/files/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
index = Grit::Index.new(repo)
index.read_tree(params[:branch])
index.delete(params[:splat].first)
sha = index.commit(params[:message], [repo.commit(params[:branch])], Grit::Actor.new(params[:user], params[:email]), nil, params[:branch])
commit_to_hash(sha).to_json
end
# Commit a new file and its data to specified branch. This methods loads all current files in specified branch (or from_branch) into index
# before committing the new file.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# name - The String name of the file (can be a path in folder)
# data - The String data of the file
# encoding - The String encoding of the data ("utf-8" or "base64")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
# from_branch - (Optional) The String of a specific branch whose tree should be loaded into index before committing. Use if creating a new branch.
#
# Returns a JSON string containing sha of the commit
post '/gitapi/repos/:repo/branches/:branch/files' do
sha = make_file(params[:repo], params[:branch], params[:name], params[:data], params[:encoding], params[:user], params[:email], params[:message], params[:from_branch])
commit_to_hash(sha).to_json
end
# Commits
#--------------------------------------------------------
# Get commits in repo
#
# :repo - The String name of the repo (including .git)
# start - The String branch name or commit sha of starting point (default: "master")
# max_count - The Integer number of commits to return (default: 10)
# skip - The Integer number of commits to skip. Can be used for pagination. (Default: 0)
# diffs - If included, each commit will be returned with its diff output
#
# Returns a JSON string containing an array of all commits
get '/gitapi/repos/:repo/commits' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
commits = repo.commits(params[:start] || "master", params[:max_count] || 10, params[:skip] || 0)
commits = commits.map { |commit|
commit_hash = commit_baked_to_hash(commit)
if params[:diffs]
commit_hash["diffs"] = commit.diffs.map { |diff| diff_to_hash(diff) }
end
puts commit_hash.inspect
commit_hash
}
commits.to_json
end
# Blobs
#--------------------------------------------------------
# Get blob data from blob sha.
#
# repo - The String name of the repo (including .git)
# sha - The String sha of the blob
#
# Returns a JSON string containing the data of blob
get '/gitapi/repos/:repo/blobs/:sha' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
blob = get_blob(repo, params[:sha])
blob_to_hash(blob).to_json
end
# Refs
#--------------------------------------------------------
# Get all references in repo.
#
# repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/refs' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo.refs_list.map { |ref| ref_to_hash(ref) }.to_json
end
# Create a new reference.
#
# repo - The String name of the repo (including .git)
# ref - The String name of the ref (can currently only create refs/heads, e.g. "master")
# sha - String of the SHA to set this reference to
#
# Returns a JSON string containing an array of all references
post '/gitapi/repos/:repo/refs' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
sha = repo.update_ref(params[:ref], params[:sha])
commit_to_hash(sha).to_json
end
# Tags
#--------------------------------------------------------
# Get all tags in repo. This does not return lightweight tags (tags without a ref).
#
# repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/tags' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo.tags.map { |tag| tag_to_hash(head) }.to_json
end
# Create new tag in repo. Note that creating a tag object does not create the reference that makes a tag in Git.
# If you want to create an annotated tag in Git, you have to do this call to create the tag object, and then
# create the refs/tags/[tag] reference. If you want to create a lightweight tag, you simply have to create
# the reference - this call would be unnecessary.
#
# repo - The String name of the repo (including .git)
# tag - The String name of the tag
# message - The String tag message
# sha - The String sha of the object being tagged (usually a commit sha, but could be a tree or a blob sha)
# type - The String type of the object being tagged (usually "commit", but could be "tree" or "blob")
# user - The String name of the commit user
# email - The String email of the commit user
#
#
# Returns a JSON string containing the data of blob
post '/gitapi/repos/:repo/tags' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
actor = Grit::Actor.new(params[:user], params[:email])
Grit::Tag.create_tag_object(repo, params, actor).to_json
end
# Blame
#--------------------------------------------------------
# Get blame for a specific file in the repo
#
# repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file. Can be path in a subfolder (e.g. "subfolder/myfile.txt")
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/blame/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
#repo.blame
end
end
end
removed puts
require 'sinatra'
require 'grit'
require 'json'
module GitApi
class App < Sinatra::Base
helpers Helpers
before do
content_type 'application/json'
end
# Higher Level Git
#--------------------------------------------------------
# Get basic repo information.
#
# :repo - The String name of the repo (including .git)
#
# Returns a JSON string of the created repo
get '/gitapi/repos/:repo' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo_to_hash(repo).to_json
end
# Create a new bare Git repository.
#
# name - The String name of the repository. The ".git" extension will be added if not provided
# hooks[] - The String array of hooks to enable when creating the repo (e.g. ["post-update", "post-receive"])
#
# Returns a JSON string of the created repo
post '/gitapi/repos' do
repo_name = params[:name]
repo_name += ".git" unless repo_name =~ /\.git/
repo = Grit::Repo.init_bare(File.join(settings.git_path, repo_name))
enable_hooks(File.join(settings.git_path, repo_name), params[:hooks]) if params[:hooks]
repo_to_hash(repo).to_json
end
# Get a list of all branches in repo.
#
# :repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all branches in repo
get '/gitapi/repos/:repo/branches' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
heads = repo.heads
heads.map { |head| head_to_hash(head) }.to_json
end
# Get a list of all files in the branch root folder.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
#
# Returns a JSON string containing an array of all files in branch, plus sha of the tree
get '/gitapi/repos/:repo/branches/:branch/files' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
tree = repo.tree(params[:branch])
tree_to_hash(tree).to_json
end
# Get file (if file is specified) or array of files (if folder is specified) in branch.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file or folder. Can be path in a subfolder (e.g. "images/thumbs/myfile.jpg")
# encoding - If a single blob is returned, this encoding is used for the blob data (defaults to utf-8)
#
# Returns a JSON string containing file content or an array of file names
get '/gitapi/repos/:repo/branches/:branch/files/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
gitobject = get_object_from_tree(repo, params[:branch], params[:splat].first)
if(gitobject.is_a?(Grit::Tree))
tree_to_hash(gitobject).to_json
else
encoding = params[:encoding] || "utf-8"
blob_to_hash(gitobject, encoding).to_json
end
end
# Commit a new file and its data to specified branch. This methods loads all current files in specified branch (or from_branch) into index
# before committing the new file.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# name - The String name of the file (can be a path in folder)
# data - The String data of the file
# encoding - The String encoding of the data ("utf-8" or "base64")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
# from_branch - (Optional) The String of a specific branch whose tree should be loaded into index before committing. Use if creating a new branch.
#
# Returns a JSON string containing sha of the commit
post '/gitapi/repos/:repo/branches/:branch/files' do
sha = make_file(params[:repo], params[:branch], params[:name], params[:data], params[:encoding], params[:user], params[:email], params[:message], params[:from_branch])
commit_to_hash(sha).to_json
end
# Delete a file from the specified branch and commit the deletion. This methods loads all current files in specified branch into index
# before doing the deletion.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file or folder. Can be path in a subfolder (e.g. "images/thumbs/myfile.jpg")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
#
# Returns a JSON string containing sha of the commit
delete '/gitapi/repos/:repo/branches/:branch/files/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
index = Grit::Index.new(repo)
index.read_tree(params[:branch])
index.delete(params[:splat].first)
sha = index.commit(params[:message], [repo.commit(params[:branch])], Grit::Actor.new(params[:user], params[:email]), nil, params[:branch])
commit_to_hash(sha).to_json
end
# Commit a new file and its data to specified branch. This methods loads all current files in specified branch (or from_branch) into index
# before committing the new file.
#
# :repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# name - The String name of the file (can be a path in folder)
# data - The String data of the file
# encoding - The String encoding of the data ("utf-8" or "base64")
# user - The String name of the commit user
# email - The String email of the commit user
# message - The String commit message
# from_branch - (Optional) The String of a specific branch whose tree should be loaded into index before committing. Use if creating a new branch.
#
# Returns a JSON string containing sha of the commit
post '/gitapi/repos/:repo/branches/:branch/files' do
sha = make_file(params[:repo], params[:branch], params[:name], params[:data], params[:encoding], params[:user], params[:email], params[:message], params[:from_branch])
commit_to_hash(sha).to_json
end
# Commits
#--------------------------------------------------------
# Get commits in repo
#
# :repo - The String name of the repo (including .git)
# start - The String branch name or commit sha of starting point (default: "master")
# max_count - The Integer number of commits to return (default: 10)
# skip - The Integer number of commits to skip. Can be used for pagination. (Default: 0)
# diffs - If included, each commit will be returned with its diff output
#
# Returns a JSON string containing an array of all commits
get '/gitapi/repos/:repo/commits' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
commits = repo.commits(params[:start] || "master", params[:max_count] || 10, params[:skip] || 0)
commits = commits.map { |commit|
commit_hash = commit_baked_to_hash(commit)
if params[:diffs]
commit_hash["diffs"] = commit.diffs.map { |diff| diff_to_hash(diff) }
end
commit_hash
}
commits.to_json
end
# Blobs
#--------------------------------------------------------
# Get blob data from blob sha.
#
# repo - The String name of the repo (including .git)
# sha - The String sha of the blob
#
# Returns a JSON string containing the data of blob
get '/gitapi/repos/:repo/blobs/:sha' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
blob = get_blob(repo, params[:sha])
blob_to_hash(blob).to_json
end
# Refs
#--------------------------------------------------------
# Get all references in repo.
#
# repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/refs' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo.refs_list.map { |ref| ref_to_hash(ref) }.to_json
end
# Create a new reference.
#
# repo - The String name of the repo (including .git)
# ref - The String name of the ref (can currently only create refs/heads, e.g. "master")
# sha - String of the SHA to set this reference to
#
# Returns a JSON string containing an array of all references
post '/gitapi/repos/:repo/refs' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
sha = repo.update_ref(params[:ref], params[:sha])
commit_to_hash(sha).to_json
end
# Tags
#--------------------------------------------------------
# Get all tags in repo. This does not return lightweight tags (tags without a ref).
#
# repo - The String name of the repo (including .git)
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/tags' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
repo.tags.map { |tag| tag_to_hash(head) }.to_json
end
# Create new tag in repo. Note that creating a tag object does not create the reference that makes a tag in Git.
# If you want to create an annotated tag in Git, you have to do this call to create the tag object, and then
# create the refs/tags/[tag] reference. If you want to create a lightweight tag, you simply have to create
# the reference - this call would be unnecessary.
#
# repo - The String name of the repo (including .git)
# tag - The String name of the tag
# message - The String tag message
# sha - The String sha of the object being tagged (usually a commit sha, but could be a tree or a blob sha)
# type - The String type of the object being tagged (usually "commit", but could be "tree" or "blob")
# user - The String name of the commit user
# email - The String email of the commit user
#
#
# Returns a JSON string containing the data of blob
post '/gitapi/repos/:repo/tags' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
actor = Grit::Actor.new(params[:user], params[:email])
Grit::Tag.create_tag_object(repo, params, actor).to_json
end
# Blame
#--------------------------------------------------------
# Get blame for a specific file in the repo
#
# repo - The String name of the repo (including .git)
# :branch - The String name of the branch (e.g. "master")
# :* - The String name of the file. Can be path in a subfolder (e.g. "subfolder/myfile.txt")
#
# Returns a JSON string containing an array of all references
get '/gitapi/repos/:repo/blame/*' do
repo = get_repo(File.join(settings.git_path, params[:repo]))
#repo.blame
end
end
end |
module ActsCruddy
module Formats
module Json
def index
@records = record_class.all
render :json => @records
end
def show
render :json => @record
end
def new
render :json => @record
end
def create
if @record.save
render :json => @record, :status => :created, :location => url_for(:action => :show, :id => @record)
else
render :json => @record.errors.full_messages, :status => :unprocessable_entity
end
end
def update
if @record.update_attributes(permitted_params)
render :json => nil, :status => :ok
else
render :json => @record.errors.full_messages, :status => :unprocessable_entity
end
end
def destroy
@record.destroy
head :ok
end
end
end
end
avoid JSON parsing errors on destroy.json
module ActsCruddy
module Formats
module Json
def index
@records = record_class.all
render :json => @records
end
def show
render :json => @record
end
def new
render :json => @record
end
def create
if @record.save
render :json => @record, :status => :created, :location => url_for(:action => :show, :id => @record)
else
render :json => @record.errors.full_messages, :status => :unprocessable_entity
end
end
def update
if @record.update_attributes(permitted_params)
render :json => nil, :status => :ok
else
render :json => @record.errors.full_messages, :status => :unprocessable_entity
end
end
def destroy
@record.destroy
render :json => nil, :status => :ok
end
end
end
end
|
#! /usr/bin/ruby
require 'octokit'
require 'optparse'
require 'English'
require_relative 'opt_parser'
require_relative 'git_op'
# This is a private class, which has the task to execute/run tests
# called by GitbotBackend
class GitbotTestExecutor
def initialize(options)
@options = options
@options.each do |key, value|
instance_variable_set("@#{key}", value)
self.class.send(:attr_accessor, key)
end
end
# this will clone the repo and execute the tests
def pr_test(pr)
git = GitOp.new(@git_dir, pr, @options)
# merge PR-branch to upstream branch
git.merge_pr_totarget(pr.base.ref, pr.head.ref)
# do valid tests and store the result
test_status = run_script
# del branch
git.del_pr_branch(pr.base.ref, pr.head.ref)
test_status
end
# run validation script for validating the PR.
def run_script
script_exists?(@test_file)
puts `#{@test_file}`
$CHILD_STATUS.exitstatus.nonzero? ? 'failure' : 'success'
end
private
def script_exists?(script)
n_exist = "\'#{script}\' doesn't exists.Enter valid file, -t option"
raise n_exist if File.file?(script) == false
end
end
# this the public class is the backend of gitbot,
# were we execute the tests and so on
class GitbotBackend
attr_accessor :j_status, :options, :client, :pr_files, :gbexec
# public method of backend
def initialize(option = nil)
Octokit.auto_paginate = true
@client = Octokit::Client.new(netrc: true)
@options = option.nil? ? OptParser.new.gitbot_options : option
@j_status = ''
@pr_files = []
# each options will generate a object variable dinamically
@options.each do |key, value|
instance_variable_set("@#{key}", value)
self.class.send(:attr_accessor, key)
end
@gbexec = GitbotTestExecutor.new(@options)
end
# public method for get prs opens
# given a repo
def open_prs
prs = @client.pull_requests(@repo, state: 'open')
puts 'no Pull request OPEN on the REPO!' unless prs.any?
prs
end
# public for etrigger the test
def retrigger_check(pr)
return unless retrigger_needed?(pr)
client.create_status(@repo, pr.head.sha, 'pending',
context: @context, description: @description,
target_url: @target_url)
exit 1 if @check
launch_test_and_setup_status(@repo, pr)
j_status == 'success' ? exit(0) : exit(1)
end
# public always rerun tests against the pr number if this exists
def trigger_by_pr_number(pr)
return false if @pr_number.nil?
return false if @pr_number != pr.number
puts "Got triggered by PR_NUMBER OPTION, rerunning on #{@pr_number}"
launch_test_and_setup_status(@repo, pr)
true
end
# this function check if changelog specific test is active.
def changelog_active(pr, comm_st)
return false unless @changelog_test
return false unless changelog_changed(@repo, pr, comm_st)
true
end
def unreviewed_pr_test(pr, comm_st)
return unless unreviewed_pr_ck(comm_st)
pr_all_files_type(@repo, pr.number, @file_type)
return if empty_files_changed_by_pr
# gb.check is true when there is a job running as scheduler
# which doesn't execute the test but trigger another job
return false if @check
launch_test_and_setup_status(@repo, pr)
true
end
def reviewed_pr_test(comm_st, pr)
# if PR status is not on pending and the context is not set,
# we dont run the tests
return false unless context_pr(comm_st) == false ||
pending_pr(comm_st) == true
pr_all_files_type(@repo, pr.number, @file_type)
return true if changelog_active(pr, comm_st)
return false unless @pr_files.any?
exit 1 if @check
launch_test_and_setup_status(@repo, pr)
true
end
private
# this function setup first pending to PR, then execute the tests
# then set the status according to the results of script executed.
# pr_head = is the PR branch
# base = is a the upstream branch, where the pr targets
def launch_test_and_setup_status(repo, pr)
# pending
@client.create_status(repo, pr.head.sha, 'pending',
context: @context, description: @description,
target_url: @target_url)
# do tests
@j_status = gbexec.pr_test(pr)
# set status
@client.create_status(repo, pr.head.sha, @j_status,
context: @context, description: @description,
target_url: @target_url)
end
# this function will check if the PR contains in comment the magic word
# # for retrigger all the tests.
def magicword(repo, pr_number, context)
magic_word_trigger = "@gitbot rerun #{context} !!!"
pr_comment = @client.issue_comments(repo, pr_number)
# a pr contain always a comments, cannot be nil
pr_comment.each do |com|
# FIXME: if user in @org retrigger only
# add org variable somewhere, maybe as option
# next unless @client.organization_member?(@org, com.user.login)
# delete comment otherwise it will be retrigger infinetely
if com.body.include? magic_word_trigger
@client.delete_comment(repo, com.id)
return true
end
end
false
end
# check all files of a Prs Number if they are a specific type
# EX: Pr 56, we check if files are '.rb'
def pr_all_files_type(repo, pr_number, type)
files = @client.pull_request_files(repo, pr_number)
files.each do |file|
@pr_files.push(file.filename) if file.filename.include? type
end
end
# check if the commit of a pr is on pending
def pending_pr(comm_st)
# 2) pending
pending_on_context = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'pending'
pending_on_context = true
end
end
pending_on_context
end
# if the Pr contains magic word, test changelog
# is true
def magic_comment(repo, pr_num)
@client.issue_comments(repo, pr_num).each do |com|
if com.body.include?('no changelog needed!')
@j_status = 'success'
break
end
end
end
# check it the cm of pr contain the context from gitbot already
def context_pr(cm_st)
# 1) context_present == false triggers test. >
# this means the PR is not with context tagged
context_present = false
(0..cm_st.statuses.size - 1).each do |pr_status|
context_present = true if cm_st.statuses[pr_status]['context'] == @context
end
context_present
end
# if the pr has travis test and one custom, we will have 2 elements.
# in this case, if the 1st element doesn't have the state property
# state property is "pending", failure etc.
# if we don't have this, so we have 0 status
# the PRs is "unreviewed"
def unreviewed_pr_ck(comm_st)
puts comm_st.statuses[0]['state']
return false
rescue NoMethodError
return true
end
def success_status?(comm_st)
status = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'success'
status = true
end
end
status
end
def failed_status?(comm_st)
status = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'failure'
status = true
end
end
status
end
# control if the pr change add any files, specified
# it can be also a dir
def empty_files_changed_by_pr
return if pr_files.any?
puts "no files of type #{@file_type} found! skipping"
true
end
def do_changelog_test(repo, pr)
@j_status = 'failure'
pr_all_files_type(repo, pr.number, @file_type)
# if the pr contains changes on .changes file, test ok
@j_status = 'success' if @pr_files.any?
magic_comment(repo, pr.number)
@client.create_status(repo, pr.head.sha, @j_status,
context: @context, description: @description,
target_url: @target_url)
true
end
# do the changelog test and set status
def changelog_changed(repo, pr, comm_st)
return false unless @changelog_test
# only execute 1 time, don"t run if test is failed, or ok
return false if failed_status?(comm_st)
return false if success_status?(comm_st)
do_changelog_test(repo, pr)
end
def retrigger_needed?(pr)
# we want redo sometimes tests
return false unless magicword(@repo, pr.number, @context)
# changelog trigger
if @changelog_test
do_changelog_test(@repo, pr)
return false
end
pr_all_files_type(@repo, pr.number, @file_type)
return false unless @pr_files.any?
# if check is set, the comment in the trigger job will be del.
# so setting it to pending, it will be remembered
true
end
end
Fix comment
#! /usr/bin/ruby
require 'octokit'
require 'optparse'
require 'English'
require_relative 'opt_parser'
require_relative 'git_op'
# This is a private class, which has the task to execute/run tests
# called by GitbotBackend
class GitbotTestExecutor
def initialize(options)
@options = options
@options.each do |key, value|
instance_variable_set("@#{key}", value)
self.class.send(:attr_accessor, key)
end
end
# this will clone the repo and execute the tests
def pr_test(pr)
git = GitOp.new(@git_dir, pr, @options)
# merge PR-branch to upstream branch
git.merge_pr_totarget(pr.base.ref, pr.head.ref)
# do valid tests and store the result
test_status = run_script
# del branch
git.del_pr_branch(pr.base.ref, pr.head.ref)
test_status
end
# run validation script for validating the PR.
def run_script
script_exists?(@test_file)
puts `#{@test_file}`
$CHILD_STATUS.exitstatus.nonzero? ? 'failure' : 'success'
end
private
def script_exists?(script)
n_exist = "\'#{script}\' doesn't exists.Enter valid file, -t option"
raise n_exist if File.file?(script) == false
end
end
# this the public class is the backend of gitbot,
# were we execute the tests and so on
class GitbotBackend
attr_accessor :j_status, :options, :client, :pr_files, :gbexec
# public method of backend
def initialize(option = nil)
Octokit.auto_paginate = true
@client = Octokit::Client.new(netrc: true)
@options = option.nil? ? OptParser.new.gitbot_options : option
@j_status = ''
@pr_files = []
# each options will generate a object variable dinamically
@options.each do |key, value|
instance_variable_set("@#{key}", value)
self.class.send(:attr_accessor, key)
end
@gbexec = GitbotTestExecutor.new(@options)
end
# public method for get prs opens
# given a repo
def open_prs
prs = @client.pull_requests(@repo, state: 'open')
puts 'no Pull request OPEN on the REPO!' unless prs.any?
prs
end
# public for etrigger the test
def retrigger_check(pr)
return unless retrigger_needed?(pr)
client.create_status(@repo, pr.head.sha, 'pending',
context: @context, description: @description,
target_url: @target_url)
exit 1 if @check
launch_test_and_setup_status(@repo, pr)
j_status == 'success' ? exit(0) : exit(1)
end
# public always rerun tests against the pr number if this exists
def trigger_by_pr_number(pr)
return false if @pr_number.nil?
return false if @pr_number != pr.number
puts "Got triggered by PR_NUMBER OPTION, rerunning on #{@pr_number}"
launch_test_and_setup_status(@repo, pr)
true
end
# public method, trigger changelogtest if option active
def changelog_active(pr, comm_st)
return false unless @changelog_test
return false unless changelog_changed(@repo, pr, comm_st)
true
end
def unreviewed_pr_test(pr, comm_st)
return unless unreviewed_pr_ck(comm_st)
pr_all_files_type(@repo, pr.number, @file_type)
return if empty_files_changed_by_pr
# gb.check is true when there is a job running as scheduler
# which doesn't execute the test but trigger another job
return false if @check
launch_test_and_setup_status(@repo, pr)
true
end
def reviewed_pr_test(comm_st, pr)
# if PR status is not on pending and the context is not set,
# we dont run the tests
return false unless context_pr(comm_st) == false ||
pending_pr(comm_st) == true
pr_all_files_type(@repo, pr.number, @file_type)
return true if changelog_active(pr, comm_st)
return false unless @pr_files.any?
exit 1 if @check
launch_test_and_setup_status(@repo, pr)
true
end
private
# this function setup first pending to PR, then execute the tests
# then set the status according to the results of script executed.
# pr_head = is the PR branch
# base = is a the upstream branch, where the pr targets
def launch_test_and_setup_status(repo, pr)
# pending
@client.create_status(repo, pr.head.sha, 'pending',
context: @context, description: @description,
target_url: @target_url)
# do tests
@j_status = gbexec.pr_test(pr)
# set status
@client.create_status(repo, pr.head.sha, @j_status,
context: @context, description: @description,
target_url: @target_url)
end
# this function will check if the PR contains in comment the magic word
# # for retrigger all the tests.
def magicword(repo, pr_number, context)
magic_word_trigger = "@gitbot rerun #{context} !!!"
pr_comment = @client.issue_comments(repo, pr_number)
# a pr contain always a comments, cannot be nil
pr_comment.each do |com|
# FIXME: if user in @org retrigger only
# add org variable somewhere, maybe as option
# next unless @client.organization_member?(@org, com.user.login)
# delete comment otherwise it will be retrigger infinetely
if com.body.include? magic_word_trigger
@client.delete_comment(repo, com.id)
return true
end
end
false
end
# check all files of a Prs Number if they are a specific type
# EX: Pr 56, we check if files are '.rb'
def pr_all_files_type(repo, pr_number, type)
files = @client.pull_request_files(repo, pr_number)
files.each do |file|
@pr_files.push(file.filename) if file.filename.include? type
end
end
# check if the commit of a pr is on pending
def pending_pr(comm_st)
# 2) pending
pending_on_context = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'pending'
pending_on_context = true
end
end
pending_on_context
end
# if the Pr contains magic word, test changelog
# is true
def magic_comment(repo, pr_num)
@client.issue_comments(repo, pr_num).each do |com|
if com.body.include?('no changelog needed!')
@j_status = 'success'
break
end
end
end
# check it the cm of pr contain the context from gitbot already
def context_pr(cm_st)
# 1) context_present == false triggers test. >
# this means the PR is not with context tagged
context_present = false
(0..cm_st.statuses.size - 1).each do |pr_status|
context_present = true if cm_st.statuses[pr_status]['context'] == @context
end
context_present
end
# if the pr has travis test and one custom, we will have 2 elements.
# in this case, if the 1st element doesn't have the state property
# state property is "pending", failure etc.
# if we don't have this, so we have 0 status
# the PRs is "unreviewed"
def unreviewed_pr_ck(comm_st)
puts comm_st.statuses[0]['state']
return false
rescue NoMethodError
return true
end
def success_status?(comm_st)
status = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'success'
status = true
end
end
status
end
def failed_status?(comm_st)
status = false
(0..comm_st.statuses.size - 1).each do |pr_status|
if comm_st.statuses[pr_status]['context'] == @context &&
comm_st.statuses[pr_status]['state'] == 'failure'
status = true
end
end
status
end
# control if the pr change add any files, specified
# it can be also a dir
def empty_files_changed_by_pr
return if pr_files.any?
puts "no files of type #{@file_type} found! skipping"
true
end
def do_changelog_test(repo, pr)
@j_status = 'failure'
pr_all_files_type(repo, pr.number, @file_type)
# if the pr contains changes on .changes file, test ok
@j_status = 'success' if @pr_files.any?
magic_comment(repo, pr.number)
@client.create_status(repo, pr.head.sha, @j_status,
context: @context, description: @description,
target_url: @target_url)
true
end
# do the changelog test and set status
def changelog_changed(repo, pr, comm_st)
return false unless @changelog_test
# only execute 1 time, don"t run if test is failed, or ok
return false if failed_status?(comm_st)
return false if success_status?(comm_st)
do_changelog_test(repo, pr)
end
def retrigger_needed?(pr)
# we want redo sometimes tests
return false unless magicword(@repo, pr.number, @context)
# changelog trigger
if @changelog_test
do_changelog_test(@repo, pr)
return false
end
pr_all_files_type(@repo, pr.number, @file_type)
return false unless @pr_files.any?
# if check is set, the comment in the trigger job will be del.
# so setting it to pending, it will be remembered
true
end
end
|
# encoding: utf-8
require "amq/client/logging"
require "amq/client/settings"
require "amq/client/async/entity"
require "amq/client/async/channel"
module AMQ
# For overview of AMQP client adapters API, see {AMQ::Client::Adapter}
module Client
module Async
# Base adapter class. Specific implementations (for example, EventMachine-based, Cool.io-based or
# sockets-based) subclass it and must implement Adapter API methods:
#
# * #send_raw(data)
# * #estabilish_connection(settings)
# * #close_connection
#
# @abstract
module Adapter
def self.included(host)
host.extend ClassMethods
host.extend ProtocolMethodHandlers
host.class_eval do
#
# API
#
attr_accessor :logger
attr_accessor :settings
# @return [Array<#call>]
attr_reader :callbacks
# The locale defines the language in which the server will send reply texts.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_accessor :locale
# Client capabilities
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2.1)
attr_accessor :client_properties
# Server properties
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_properties
# Server capabilities
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_capabilities
# Locales server supports
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_locales
# Authentication mechanism used.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_reader :mechanism
# Authentication mechanisms broker supports.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_reader :server_authentication_mechanisms
# Channels within this connection.
#
# @see http://bit.ly/hw2ELX AMQP 0.9.1 specification (Section 2.2.5)
attr_reader :channels
# Maximum channel number that the server permits this connection to use.
# Usable channel numbers are in the range 1..channel_max.
# Zero indicates no specified limit.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Sections 1.4.2.5.1 and 1.4.2.6.1)
attr_accessor :channel_max
# Maximum frame size that the server permits this connection to use.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Sections 1.4.2.5.2 and 1.4.2.6.2)
attr_accessor :frame_max
attr_reader :known_hosts
# @api plugin
# @see #disconnect
# @note Adapters must implement this method but it is NOT supposed to be used directly.
# AMQ protocol defines two-step process of closing connection (send Connection.Close
# to the peer and wait for Connection.Close-Ok), implemented by {Adapter#disconnect}
def close_connection
raise NotImplementedError
end unless defined?(:close_connection) # since it is a module, this method may already be defined
end
end # self.included(host)
module ClassMethods
# Settings
def settings
@settings ||= AMQ::Client::Settings.default
end
def logger
@logger ||= begin
require "logger"
Logger.new(STDERR)
end
end
def logger=(logger)
methods = AMQ::Client::Logging::REQUIRED_METHODS
unless methods.all? { |method| logger.respond_to?(method) }
raise AMQ::Client::Logging::IncompatibleLoggerError.new(methods)
end
@logger = logger
end
# @return [Boolean] Current value of logging flag.
def logging
settings[:logging]
end
# Turns loggin on or off.
def logging=(boolean)
settings[:logging] = boolean
end
# Establishes connection to AMQ broker and returns it. New connection object is yielded to
# the block if it is given.
#
# @example Specifying adapter via the :adapter option
# AMQ::Client::Adapter.connect(:adapter => "socket")
# @example Specifying using custom adapter class
# AMQ::Client::SocketClient.connect
# @param [Hash] Connection parameters, including :adapter to use.
# @api public
def connect(settings = nil, &block)
@settings = Settings.configure(settings)
instance = self.new
instance.establish_connection(settings)
instance.register_connection_callback(&block)
instance
end
# Can be overriden by higher-level libraries like amqp gem or bunny.
# Defaults to AMQ::Client::TCPConnectionFailed.
#
# @return [Class]
def tcp_connection_failure_exception_class
@tcp_connection_failure_exception_class ||= AMQ::Client::TCPConnectionFailed
end # tcp_connection_failure_exception_class
# Can be overriden by higher-level libraries like amqp gem or bunny.
# Defaults to AMQ::Client::PossibleAuthenticationFailure.
#
# @return [Class]
def authentication_failure_exception_class
@authentication_failure_exception_class ||= AMQ::Client::PossibleAuthenticationFailureError
end # authentication_failure_exception_class
end # ClassMethods
#
# Behaviors
#
include Openable
include Callbacks
extend RegisterEntityMixin
register_entity :channel, AMQ::Client::Async::Channel
#
# API
#
# Establish socket connection to the server.
#
# @api plugin
def establish_connection(settings)
raise NotImplementedError
end
# Properly close connection with AMQ broker, as described in
# section 2.2.4 of the {http://bit.ly/hw2ELX AMQP 0.9.1 specification}.
#
# @api plugin
# @see #close_connection
def disconnect(reply_code = 200, reply_text = "Goodbye", class_id = 0, method_id = 0, &block)
@intentionally_closing_connection = true
self.on_disconnection(&block)
# ruby-amqp/amqp#66, MK.
if self.open?
closing!
self.send_frame(Protocol::Connection::Close.encode(reply_code, reply_text, class_id, method_id))
elsif self.closing?
# no-op
else
self.disconnection_successful
end
end
# Sends AMQ protocol header (also known as preamble).
#
# @note This must be implemented by all AMQP clients.
# @api plugin
# @see http://bit.ly/hw2ELX AMQP 0.9.1 specification (Section 2.2)
def send_preamble
self.send_raw(AMQ::Protocol::PREAMBLE)
end
# Sends frame to the peer, checking that connection is open.
#
# @raise [ConnectionClosedError]
def send_frame(frame)
if closed?
raise ConnectionClosedError.new(frame)
else
self.send_raw(frame.encode)
end
end
# Sends multiple frames, one by one. For thread safety this method takes a channel
# object and synchronizes on it.
#
# @api public
def send_frameset(frames, channel)
channel.synchronize do
frames.each { |frame| self.send_frame(frame) }
end
end # send_frameset(frames)
# Returns heartbeat interval this client uses, in seconds.
# This value may or may not be used depending on broker capabilities.
# Zero means the server does not want a heartbeat.
#
# @return [Fixnum] Heartbeat interval this client uses, in seconds.
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.6)
def heartbeat_interval
@settings[:heartbeat] || @settings[:heartbeat_interval] || 0
end # heartbeat_interval
# vhost this connection uses. Default is "/", a historically estabilished convention
# of RabbitMQ and amqp gem.
#
# @return [String] vhost this connection uses
# @api public
def vhost
@settings.fetch(:vhost, "/")
end # vhost
# @group Error Handling and Recovery
# Called when initial TCP connection fails.
# @api public
def tcp_connection_failed
@recovered = false
@on_tcp_connection_failure.call(@settings) if @on_tcp_connection_failure
end
# Called when previously established TCP connection fails.
# @api public
def tcp_connection_lost
@recovered = false
@on_tcp_connection_loss.call(self, @settings) if @on_tcp_connection_loss
self.handle_connection_interruption
end
# @return [Boolean]
def reconnecting?
@reconnecting
end # reconnecting?
# Defines a callback that will be run when initial TCP connection fails.
# You can define only one callback.
#
# @api public
def on_tcp_connection_failure(&block)
@on_tcp_connection_failure = block
end
# Defines a callback that will be run when TCP connection to AMQP broker is lost (interrupted).
# You can define only one callback.
#
# @api public
def on_tcp_connection_loss(&block)
@on_tcp_connection_loss = block
end
# Defines a callback that will be run when TCP connection is closed before authentication
# finishes. Usually this means authentication failure. You can define only one callback.
#
# @api public
def on_possible_authentication_failure(&block)
@on_possible_authentication_failure = block
end
# Defines a callback that will be executed when connection is closed after
# connection-level exception. Only one callback can be defined (the one defined last
# replaces previously added ones).
#
# @api public
def on_error(&block)
self.redefine_callback(:error, &block)
end
# Defines a callback that will be executed after TCP connection is interrupted (typically because of a network failure).
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def on_connection_interruption(&block)
self.redefine_callback(:after_connection_interruption, &block)
end # on_connection_interruption(&block)
alias after_connection_interruption on_connection_interruption
# @private
# @api plugin
def handle_connection_interruption
@channels.each { |n, c| c.handle_connection_interruption }
self.exec_callback_yielding_self(:after_connection_interruption)
end # handle_connection_interruption
# Defines a callback that will be executed after TCP connection has recovered after a network failure
# but before AMQP connection is re-opened.
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def before_recovery(&block)
self.redefine_callback(:before_recovery, &block)
end # before_recovery(&block)
# @private
def run_before_recovery_callbacks
self.exec_callback_yielding_self(:before_recovery, @settings)
@channels.each { |n, ch| ch.run_before_recovery_callbacks }
end
# Defines a callback that will be executed after AMQP connection has recovered after a network failure..
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def on_recovery(&block)
self.redefine_callback(:after_recovery, &block)
end # on_recovery(&block)
alias after_recovery on_recovery
# @private
def run_after_recovery_callbacks
self.exec_callback_yielding_self(:after_recovery, @settings)
@channels.each { |n, ch| ch.run_after_recovery_callbacks }
end
# @return [Boolean] whether connection is in the automatic recovery mode
# @api public
def auto_recovering?
!!@auto_recovery
end # auto_recovering?
alias auto_recovery? auto_recovering?
# Performs recovery of channels that are in the automatic recovery mode. Does not run recovery
# callbacks.
#
# @see Channel#auto_recover
# @see Queue#auto_recover
# @see Exchange#auto_recover
# @api plugin
def auto_recover
@channels.select { |channel_id, ch| ch.auto_recovering? }.each { |n, ch| ch.auto_recover }
end # auto_recover
# Performs recovery of channels that are in the automatic recovery mode. "before recovery" callbacks
# are run immediately, "after recovery" callbacks are run after AMQP connection is re-established and
# auto recovery is performed (using #auto_recover).
#
# Use this method if you want to run automatic recovery process after handling a connection-level exception,
# for example, 320 CONNECTION_FORCED (used by RabbitMQ when it is shut down gracefully).
#
# @see Channel#auto_recover
# @see Queue#auto_recover
# @see Exchange#auto_recover
# @api plugin
def start_automatic_recovery
self.run_before_recovery_callbacks
self.register_connection_callback do
# always run automatic recovery, because it is per-channel
# and connection has to start it. Channels that did not opt-in for
# autorecovery won't be selected. MK.
self.auto_recover
self.run_after_recovery_callbacks
end
end # start_automatic_recovery
# @endgroup
#
# Implementation
#
# Sends opaque data to AMQ broker over active connection.
#
# @note This must be implemented by all AMQP clients.
# @api plugin
def send_raw(data)
raise NotImplementedError
end
# Sends connection preamble to the broker.
# @api plugin
def handshake
@authenticating = true
self.send_preamble
end
# Sends connection.open to the server.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.7)
def open(vhost = "/")
self.send_frame(Protocol::Connection::Open.encode(vhost))
end
# Resets connection state.
#
# @api plugin
def reset_state!
# no-op by default
end # reset_state!
# @api plugin
# @see http://tools.ietf.org/rfc/rfc2595.txt RFC 2595
def encode_credentials(username, password)
"\0#{username}\0#{password}"
end # encode_credentials(username, password)
# Processes a single frame.
#
# @param [AMQ::Protocol::Frame] frame
# @api plugin
def receive_frame(frame)
@frames << frame
if frameset_complete?(@frames)
receive_frameset(@frames)
@frames.clear
else
# puts "#{frame.inspect} is NOT final"
end
end
# Processes a frameset by finding and invoking a suitable handler.
# Heartbeat frames are treated in a special way: they simply update @last_server_heartbeat
# value.
#
# @param [Array<AMQ::Protocol::Frame>] frames
# @api plugin
def receive_frameset(frames)
frame = frames.first
if Protocol::HeartbeatFrame === frame
@last_server_heartbeat = Time.now
else
if callable = AMQ::Client::HandlersRegistry.find(frame.method_class)
f = frames.shift
callable.call(self, f, frames)
else
raise MissingHandlerError.new(frames.first)
end
end
end
# Sends a heartbeat frame if connection is open.
# @api plugin
def send_heartbeat
if tcp_connection_established?
if @last_server_heartbeat < (Time.now - (self.heartbeat_interval * 2))
logger.error "Reconnecting due to missing server heartbeats"
# TODO: reconnect
end
send_frame(Protocol::HeartbeatFrame)
end
end # send_heartbeat
# Handles connection.start.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.)
def handle_start(connection_start)
@server_properties = connection_start.server_properties
@server_capabilities = @server_properties["capabilities"]
@server_authentication_mechanisms = (connection_start.mechanisms || "").split(" ")
@server_locales = Array(connection_start.locales)
username = @settings[:user] || @settings[:username]
password = @settings[:pass] || @settings[:password]
# It's not clear whether we should transition to :opening state here
# or in #open but in case authentication fails, it would be strange to have
# @status undefined. So lets do this. MK.
opening!
self.send_frame(Protocol::Connection::StartOk.encode(@client_properties, @mechanism, self.encode_credentials(username, password), @locale))
end
# Handles Connection.Tune-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.6)
def handle_tune(tune_ok)
@channel_max = tune_ok.channel_max.freeze
@frame_max = tune_ok.frame_max.freeze
@heartbeat_interval = self.heartbeat_interval || tune_ok.heartbeat
self.send_frame(Protocol::Connection::TuneOk.encode(@channel_max, [settings[:frame_max], @frame_max].min, @heartbeat_interval))
end # handle_tune(method)
# Handles Connection.Open-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.8.)
def handle_open_ok(open_ok)
@known_hosts = open_ok.known_hosts.dup.freeze
opened!
self.connection_successful if self.respond_to?(:connection_successful)
end
# Handles connection.close. When broker detects a connection level exception, this method is called.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.5.2.9)
def handle_close(conn_close)
closed!
# TODO: use proper exception class, provide protocol class (we know conn_close.class_id and conn_close.method_id) as well!
self.exec_callback_yielding_self(:error, conn_close)
end
# Handles Connection.Close-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.10)
def handle_close_ok(close_ok)
closed!
self.disconnection_successful
end # handle_close_ok(close_ok)
protected
# Returns next frame from buffer whenever possible
#
# @api private
def get_next_frame
return nil unless @chunk_buffer.size > 7 # otherwise, cannot read the length
# octet + short
offset = 3 # 1 + 2
# length
payload_length = @chunk_buffer[offset, 4].unpack(AMQ::Protocol::PACK_UINT32).first
# 4 bytes for long payload length, 1 byte final octet
frame_length = offset + payload_length + 5
if frame_length <= @chunk_buffer.size
@chunk_buffer.slice!(0, frame_length)
else
nil
end
end # def get_next_frame
# Utility methods
# Determines, whether the received frameset is ready to be further processed
def frameset_complete?(frames)
return false if frames.empty?
first_frame = frames[0]
first_frame.final? || (first_frame.method_class.has_content? && content_complete?(frames[1..-1]))
end
# Determines, whether given frame array contains full content body
def content_complete?(frames)
return false if frames.empty?
header = frames[0]
raise "Not a content header frame first: #{header.inspect}" unless header.kind_of?(AMQ::Protocol::HeaderFrame)
header.body_size == frames[1..-1].inject(0) {|sum, frame| sum + frame.payload.size }
end
end # Adapter
end # Async
end # Client
end # AMQ
Make it clear why do need to synchronize (only) methods that use framesets
# encoding: utf-8
require "amq/client/logging"
require "amq/client/settings"
require "amq/client/async/entity"
require "amq/client/async/channel"
module AMQ
# For overview of AMQP client adapters API, see {AMQ::Client::Adapter}
module Client
module Async
# Base adapter class. Specific implementations (for example, EventMachine-based, Cool.io-based or
# sockets-based) subclass it and must implement Adapter API methods:
#
# * #send_raw(data)
# * #estabilish_connection(settings)
# * #close_connection
#
# @abstract
module Adapter
def self.included(host)
host.extend ClassMethods
host.extend ProtocolMethodHandlers
host.class_eval do
#
# API
#
attr_accessor :logger
attr_accessor :settings
# @return [Array<#call>]
attr_reader :callbacks
# The locale defines the language in which the server will send reply texts.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_accessor :locale
# Client capabilities
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2.1)
attr_accessor :client_properties
# Server properties
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_properties
# Server capabilities
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_capabilities
# Locales server supports
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.3)
attr_reader :server_locales
# Authentication mechanism used.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_reader :mechanism
# Authentication mechanisms broker supports.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.2)
attr_reader :server_authentication_mechanisms
# Channels within this connection.
#
# @see http://bit.ly/hw2ELX AMQP 0.9.1 specification (Section 2.2.5)
attr_reader :channels
# Maximum channel number that the server permits this connection to use.
# Usable channel numbers are in the range 1..channel_max.
# Zero indicates no specified limit.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Sections 1.4.2.5.1 and 1.4.2.6.1)
attr_accessor :channel_max
# Maximum frame size that the server permits this connection to use.
#
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Sections 1.4.2.5.2 and 1.4.2.6.2)
attr_accessor :frame_max
attr_reader :known_hosts
# @api plugin
# @see #disconnect
# @note Adapters must implement this method but it is NOT supposed to be used directly.
# AMQ protocol defines two-step process of closing connection (send Connection.Close
# to the peer and wait for Connection.Close-Ok), implemented by {Adapter#disconnect}
def close_connection
raise NotImplementedError
end unless defined?(:close_connection) # since it is a module, this method may already be defined
end
end # self.included(host)
module ClassMethods
# Settings
def settings
@settings ||= AMQ::Client::Settings.default
end
def logger
@logger ||= begin
require "logger"
Logger.new(STDERR)
end
end
def logger=(logger)
methods = AMQ::Client::Logging::REQUIRED_METHODS
unless methods.all? { |method| logger.respond_to?(method) }
raise AMQ::Client::Logging::IncompatibleLoggerError.new(methods)
end
@logger = logger
end
# @return [Boolean] Current value of logging flag.
def logging
settings[:logging]
end
# Turns loggin on or off.
def logging=(boolean)
settings[:logging] = boolean
end
# Establishes connection to AMQ broker and returns it. New connection object is yielded to
# the block if it is given.
#
# @example Specifying adapter via the :adapter option
# AMQ::Client::Adapter.connect(:adapter => "socket")
# @example Specifying using custom adapter class
# AMQ::Client::SocketClient.connect
# @param [Hash] Connection parameters, including :adapter to use.
# @api public
def connect(settings = nil, &block)
@settings = Settings.configure(settings)
instance = self.new
instance.establish_connection(settings)
instance.register_connection_callback(&block)
instance
end
# Can be overriden by higher-level libraries like amqp gem or bunny.
# Defaults to AMQ::Client::TCPConnectionFailed.
#
# @return [Class]
def tcp_connection_failure_exception_class
@tcp_connection_failure_exception_class ||= AMQ::Client::TCPConnectionFailed
end # tcp_connection_failure_exception_class
# Can be overriden by higher-level libraries like amqp gem or bunny.
# Defaults to AMQ::Client::PossibleAuthenticationFailure.
#
# @return [Class]
def authentication_failure_exception_class
@authentication_failure_exception_class ||= AMQ::Client::PossibleAuthenticationFailureError
end # authentication_failure_exception_class
end # ClassMethods
#
# Behaviors
#
include Openable
include Callbacks
extend RegisterEntityMixin
register_entity :channel, AMQ::Client::Async::Channel
#
# API
#
# Establish socket connection to the server.
#
# @api plugin
def establish_connection(settings)
raise NotImplementedError
end
# Properly close connection with AMQ broker, as described in
# section 2.2.4 of the {http://bit.ly/hw2ELX AMQP 0.9.1 specification}.
#
# @api plugin
# @see #close_connection
def disconnect(reply_code = 200, reply_text = "Goodbye", class_id = 0, method_id = 0, &block)
@intentionally_closing_connection = true
self.on_disconnection(&block)
# ruby-amqp/amqp#66, MK.
if self.open?
closing!
self.send_frame(Protocol::Connection::Close.encode(reply_code, reply_text, class_id, method_id))
elsif self.closing?
# no-op
else
self.disconnection_successful
end
end
# Sends AMQ protocol header (also known as preamble).
#
# @note This must be implemented by all AMQP clients.
# @api plugin
# @see http://bit.ly/hw2ELX AMQP 0.9.1 specification (Section 2.2)
def send_preamble
self.send_raw(AMQ::Protocol::PREAMBLE)
end
# Sends frame to the peer, checking that connection is open.
#
# @raise [ConnectionClosedError]
def send_frame(frame)
if closed?
raise ConnectionClosedError.new(frame)
else
self.send_raw(frame.encode)
end
end
# Sends multiple frames, one by one. For thread safety this method takes a channel
# object and synchronizes on it.
#
# @api public
def send_frameset(frames, channel)
# some (many) developers end up sharing channels between threads and when multiple
# threads publish on the same channel aggressively, at some point frame will be
# delivered out of order and broker will raise 505 UNEXPECTED_FRAME exception.
# If we synchronize on channel, however, this is both thread safe and pretty fine-grained
# locking. Note that "single frame" methods do not need this kind of synchronization. MK.
channel.synchronize do
frames.each { |frame| self.send_frame(frame) }
end
end # send_frameset(frames)
# Returns heartbeat interval this client uses, in seconds.
# This value may or may not be used depending on broker capabilities.
# Zero means the server does not want a heartbeat.
#
# @return [Fixnum] Heartbeat interval this client uses, in seconds.
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.6)
def heartbeat_interval
@settings[:heartbeat] || @settings[:heartbeat_interval] || 0
end # heartbeat_interval
# vhost this connection uses. Default is "/", a historically estabilished convention
# of RabbitMQ and amqp gem.
#
# @return [String] vhost this connection uses
# @api public
def vhost
@settings.fetch(:vhost, "/")
end # vhost
# @group Error Handling and Recovery
# Called when initial TCP connection fails.
# @api public
def tcp_connection_failed
@recovered = false
@on_tcp_connection_failure.call(@settings) if @on_tcp_connection_failure
end
# Called when previously established TCP connection fails.
# @api public
def tcp_connection_lost
@recovered = false
@on_tcp_connection_loss.call(self, @settings) if @on_tcp_connection_loss
self.handle_connection_interruption
end
# @return [Boolean]
def reconnecting?
@reconnecting
end # reconnecting?
# Defines a callback that will be run when initial TCP connection fails.
# You can define only one callback.
#
# @api public
def on_tcp_connection_failure(&block)
@on_tcp_connection_failure = block
end
# Defines a callback that will be run when TCP connection to AMQP broker is lost (interrupted).
# You can define only one callback.
#
# @api public
def on_tcp_connection_loss(&block)
@on_tcp_connection_loss = block
end
# Defines a callback that will be run when TCP connection is closed before authentication
# finishes. Usually this means authentication failure. You can define only one callback.
#
# @api public
def on_possible_authentication_failure(&block)
@on_possible_authentication_failure = block
end
# Defines a callback that will be executed when connection is closed after
# connection-level exception. Only one callback can be defined (the one defined last
# replaces previously added ones).
#
# @api public
def on_error(&block)
self.redefine_callback(:error, &block)
end
# Defines a callback that will be executed after TCP connection is interrupted (typically because of a network failure).
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def on_connection_interruption(&block)
self.redefine_callback(:after_connection_interruption, &block)
end # on_connection_interruption(&block)
alias after_connection_interruption on_connection_interruption
# @private
# @api plugin
def handle_connection_interruption
@channels.each { |n, c| c.handle_connection_interruption }
self.exec_callback_yielding_self(:after_connection_interruption)
end # handle_connection_interruption
# Defines a callback that will be executed after TCP connection has recovered after a network failure
# but before AMQP connection is re-opened.
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def before_recovery(&block)
self.redefine_callback(:before_recovery, &block)
end # before_recovery(&block)
# @private
def run_before_recovery_callbacks
self.exec_callback_yielding_self(:before_recovery, @settings)
@channels.each { |n, ch| ch.run_before_recovery_callbacks }
end
# Defines a callback that will be executed after AMQP connection has recovered after a network failure..
# Only one callback can be defined (the one defined last replaces previously added ones).
#
# @api public
def on_recovery(&block)
self.redefine_callback(:after_recovery, &block)
end # on_recovery(&block)
alias after_recovery on_recovery
# @private
def run_after_recovery_callbacks
self.exec_callback_yielding_self(:after_recovery, @settings)
@channels.each { |n, ch| ch.run_after_recovery_callbacks }
end
# @return [Boolean] whether connection is in the automatic recovery mode
# @api public
def auto_recovering?
!!@auto_recovery
end # auto_recovering?
alias auto_recovery? auto_recovering?
# Performs recovery of channels that are in the automatic recovery mode. Does not run recovery
# callbacks.
#
# @see Channel#auto_recover
# @see Queue#auto_recover
# @see Exchange#auto_recover
# @api plugin
def auto_recover
@channels.select { |channel_id, ch| ch.auto_recovering? }.each { |n, ch| ch.auto_recover }
end # auto_recover
# Performs recovery of channels that are in the automatic recovery mode. "before recovery" callbacks
# are run immediately, "after recovery" callbacks are run after AMQP connection is re-established and
# auto recovery is performed (using #auto_recover).
#
# Use this method if you want to run automatic recovery process after handling a connection-level exception,
# for example, 320 CONNECTION_FORCED (used by RabbitMQ when it is shut down gracefully).
#
# @see Channel#auto_recover
# @see Queue#auto_recover
# @see Exchange#auto_recover
# @api plugin
def start_automatic_recovery
self.run_before_recovery_callbacks
self.register_connection_callback do
# always run automatic recovery, because it is per-channel
# and connection has to start it. Channels that did not opt-in for
# autorecovery won't be selected. MK.
self.auto_recover
self.run_after_recovery_callbacks
end
end # start_automatic_recovery
# @endgroup
#
# Implementation
#
# Sends opaque data to AMQ broker over active connection.
#
# @note This must be implemented by all AMQP clients.
# @api plugin
def send_raw(data)
raise NotImplementedError
end
# Sends connection preamble to the broker.
# @api plugin
def handshake
@authenticating = true
self.send_preamble
end
# Sends connection.open to the server.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.7)
def open(vhost = "/")
self.send_frame(Protocol::Connection::Open.encode(vhost))
end
# Resets connection state.
#
# @api plugin
def reset_state!
# no-op by default
end # reset_state!
# @api plugin
# @see http://tools.ietf.org/rfc/rfc2595.txt RFC 2595
def encode_credentials(username, password)
"\0#{username}\0#{password}"
end # encode_credentials(username, password)
# Processes a single frame.
#
# @param [AMQ::Protocol::Frame] frame
# @api plugin
def receive_frame(frame)
@frames << frame
if frameset_complete?(@frames)
receive_frameset(@frames)
@frames.clear
else
# puts "#{frame.inspect} is NOT final"
end
end
# Processes a frameset by finding and invoking a suitable handler.
# Heartbeat frames are treated in a special way: they simply update @last_server_heartbeat
# value.
#
# @param [Array<AMQ::Protocol::Frame>] frames
# @api plugin
def receive_frameset(frames)
frame = frames.first
if Protocol::HeartbeatFrame === frame
@last_server_heartbeat = Time.now
else
if callable = AMQ::Client::HandlersRegistry.find(frame.method_class)
f = frames.shift
callable.call(self, f, frames)
else
raise MissingHandlerError.new(frames.first)
end
end
end
# Sends a heartbeat frame if connection is open.
# @api plugin
def send_heartbeat
if tcp_connection_established?
if @last_server_heartbeat < (Time.now - (self.heartbeat_interval * 2))
logger.error "Reconnecting due to missing server heartbeats"
# TODO: reconnect
end
send_frame(Protocol::HeartbeatFrame)
end
end # send_heartbeat
# Handles connection.start.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.1.)
def handle_start(connection_start)
@server_properties = connection_start.server_properties
@server_capabilities = @server_properties["capabilities"]
@server_authentication_mechanisms = (connection_start.mechanisms || "").split(" ")
@server_locales = Array(connection_start.locales)
username = @settings[:user] || @settings[:username]
password = @settings[:pass] || @settings[:password]
# It's not clear whether we should transition to :opening state here
# or in #open but in case authentication fails, it would be strange to have
# @status undefined. So lets do this. MK.
opening!
self.send_frame(Protocol::Connection::StartOk.encode(@client_properties, @mechanism, self.encode_credentials(username, password), @locale))
end
# Handles Connection.Tune-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.6)
def handle_tune(tune_ok)
@channel_max = tune_ok.channel_max.freeze
@frame_max = tune_ok.frame_max.freeze
@heartbeat_interval = self.heartbeat_interval || tune_ok.heartbeat
self.send_frame(Protocol::Connection::TuneOk.encode(@channel_max, [settings[:frame_max], @frame_max].min, @heartbeat_interval))
end # handle_tune(method)
# Handles Connection.Open-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.8.)
def handle_open_ok(open_ok)
@known_hosts = open_ok.known_hosts.dup.freeze
opened!
self.connection_successful if self.respond_to?(:connection_successful)
end
# Handles connection.close. When broker detects a connection level exception, this method is called.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.5.2.9)
def handle_close(conn_close)
closed!
# TODO: use proper exception class, provide protocol class (we know conn_close.class_id and conn_close.method_id) as well!
self.exec_callback_yielding_self(:error, conn_close)
end
# Handles Connection.Close-Ok.
#
# @api plugin
# @see http://bit.ly/htCzCX AMQP 0.9.1 protocol documentation (Section 1.4.2.10)
def handle_close_ok(close_ok)
closed!
self.disconnection_successful
end # handle_close_ok(close_ok)
protected
# Returns next frame from buffer whenever possible
#
# @api private
def get_next_frame
return nil unless @chunk_buffer.size > 7 # otherwise, cannot read the length
# octet + short
offset = 3 # 1 + 2
# length
payload_length = @chunk_buffer[offset, 4].unpack(AMQ::Protocol::PACK_UINT32).first
# 4 bytes for long payload length, 1 byte final octet
frame_length = offset + payload_length + 5
if frame_length <= @chunk_buffer.size
@chunk_buffer.slice!(0, frame_length)
else
nil
end
end # def get_next_frame
# Utility methods
# Determines, whether the received frameset is ready to be further processed
def frameset_complete?(frames)
return false if frames.empty?
first_frame = frames[0]
first_frame.final? || (first_frame.method_class.has_content? && content_complete?(frames[1..-1]))
end
# Determines, whether given frame array contains full content body
def content_complete?(frames)
return false if frames.empty?
header = frames[0]
raise "Not a content header frame first: #{header.inspect}" unless header.kind_of?(AMQ::Protocol::HeaderFrame)
header.body_size == frames[1..-1].inject(0) {|sum, frame| sum + frame.payload.size }
end
end # Adapter
end # Async
end # Client
end # AMQ
|
# encoding: utf-8
module GithubCLI
# The API class is the main entry point for creating GithubCLI APIs.
class API
@@api = nil
class << self
attr_reader :config
def github_api
@@api ||= begin
@@api = configure_api
end
end
def configure_api
@@api = Github.new
config = GithubCLI.config.data
if config['user.token']
@@api.oauth_token = config['user.token']
end
if config['user.login'] && config['user.password']
@@api.basic_auth = "#{config['user.login']}:#{config['user.password']}"
end
@@api.endpoint = GithubCLI.config['core.endpoint'] || @@api.endpoint
if ENV['TEST_HOST']
@@api.endpoint = 'http://' + ENV['TEST_HOST']
end
@@api
end
def output(format=:table, &block)
response = block.call
if response.respond_to?(:body)
formatter = Formatter.new response, :format => format
formatter.render_output
else
response
end
end
end
class All
def initialize(params)
puts Github::Repos.new.all params
end
end
end # API
end # GithubCLI
Process output errors.
# encoding: utf-8
module GithubCLI
# The API class is the main entry point for creating GithubCLI APIs.
class API
@@api = nil
class << self
attr_reader :config
def github_api
@@api ||= begin
@@api = configure_api
end
end
def configure_api
@@api = Github.new
config = GithubCLI.config.data
if config['user.token']
@@api.oauth_token = config['user.token']
end
if config['user.login'] && config['user.password']
@@api.basic_auth = "#{config['user.login']}:#{config['user.password']}"
end
@@api.endpoint = GithubCLI.config['core.endpoint'] || @@api.endpoint
if ENV['TEST_HOST']
@@api.endpoint = 'http://' + ENV['TEST_HOST']
end
@@api
end
def output(format=:table, &block)
GithubCLI.on_error do
response = block.call
if response.respond_to?(:body)
formatter = Formatter.new response, :format => format
formatter.render_output
else
response
end
end
end
end
class All
def initialize(params)
puts Github::Repos.new.all params
end
end
end # API
end # GithubCLI
|
# encoding: utf-8
module GithubCLI
class CLI < Thor
include Thor::Actions
require 'github_cli/subcommands'
def initialize(*args)
super
the_shell = (options["no-color"] ? Thor::Shell::Basic.new : shell)
GithubCLI.ui = UI.new(the_shell)
GithubCLi.ui.debug! if options["verbose"]
options["no-pager"] ? Pager.disable : Pager.enable
Terminal.print_program_name
end
map "repository" => :repo,
"reference" => :ref,
"is" => :issue,
"-v" => :version,
"ls" => :list
class_option :config, :type => :string,
:desc => "Configuration file.", :banner => "Config file name",
:default => ".githubrc"
class_option :oauth, :type => :string, :aliases => '-a',
:desc => 'Authentication token.',
:banner => 'Set authentication token'
class_option "no-color", :type => :boolean,
:banner => "Disable colorization in output."
class_option "no-pager", :type => :boolean,
:banner => "Disable pagination of the output."
class_option :verbose, :type => :boolean,
:banner => "Enable verbose output mode."
desc 'init', 'Generates a configuration file in your home directory'
long_desc <<-DESC
Initializes a configuration file where you can set default options for
interacting with GitHub API. Both global and per-command options can be
specified. These defaults override the bult-in defaults and allow you to
omit commonly used command line options.
DESC
method_option :force, :type => :boolean, :default => false, :aliases => "-f",
:banner => "Overwrite configuration file. "
def init(filename=nil)
if filename.nil? || filename =~ /^\//
@config_filename = options[:config]
else
@config_filename = filename
end
# config = Config.new(@config_filename)
if File.exists?(GithubCLI.config.path) && !options[:force]
GithubCLI.ui.error "Not overwritting existing config file #{GithubCLI.config.path}, use --force to override."
exit 1
end
oauth_token = ask "Please specify your GitHub Authentication Token (register on github.com to get it):"
GithubCLI.config.save({'oauth_token' => oauth_token, 'basic_auth' => nil })
GithubCLI.ui.confirm "Writing new configuration file to #{GithubCLI.config.path}"
end
desc 'list <pattern>', 'List all available commands limited by pattern'
def list(pattern="")
pattern = /^#{pattern}.*$/i
Terminal.print_commands pattern
end
desc 'version', 'Display Github CLI version.'
def version
say "Github CLI #{GithubCLI::VERSION}"
end
end # CLI
end # GithubCLI
Add new global option for setting pager.
# encoding: utf-8
module GithubCLI
class CLI < Thor
include Thor::Actions
require 'github_cli/subcommands'
def initialize(*args)
super
the_shell = (options["no-color"] ? Thor::Shell::Basic.new : shell)
GithubCLI.ui = UI.new(the_shell)
GithubCLi.ui.debug! if options["verbose"]
options["no-pager"] ? Pager.disable : Pager.enable
Terminal.print_program_name
end
map "repository" => :repo,
"reference" => :ref,
"is" => :issue,
"-v" => :version,
"ls" => :list
class_option :config, :type => :string,
:desc => "Configuration file.", :banner => "Config file name",
:default => ".githubrc"
class_option :oauth, :type => :string, :aliases => '-a',
:desc => 'Authentication token.',
:banner => 'Set authentication token'
class_option "no-color", :type => :boolean,
:desc => "Disable colorization in output."
class_option "no-pager", :type => :boolean,
:desc => "Disable pagination of the output."
class_option :pager, :type => :string, :aliases => '-p',
:desc => "Command to be used for paging. Command can have options after it i.e. 'less -r'. Defaults to common pagers i.e. less if detected.",
:banner => "less, more etc..."
class_option :verbose, :type => :boolean,
:desc => "Enable verbose output mode."
desc 'init', 'Generates a configuration file in your home directory'
long_desc <<-DESC
Initializes a configuration file where you can set default options for
interacting with GitHub API. Both global and per-command options can be
specified. These defaults override the bult-in defaults and allow you to
omit commonly used command line options.
DESC
method_option :force, :type => :boolean, :default => false, :aliases => "-f",
:banner => "Overwrite configuration file. "
def init(filename=nil)
if filename.nil? || filename =~ /^\//
@config_filename = options[:config]
else
@config_filename = filename
end
# config = Config.new(@config_filename)
if File.exists?(GithubCLI.config.path) && !options[:force]
GithubCLI.ui.error "Not overwritting existing config file #{GithubCLI.config.path}, use --force to override."
exit 1
end
oauth_token = ask "Please specify your GitHub Authentication Token (register on github.com to get it):"
GithubCLI.config.save({'oauth_token' => oauth_token, 'basic_auth' => nil })
GithubCLI.ui.confirm "Writing new configuration file to #{GithubCLI.config.path}"
end
desc 'list <pattern>', 'List all available commands limited by pattern'
def list(pattern="")
pattern = /^#{pattern}.*$/i
Terminal.print_commands pattern
end
desc 'version', 'Display Github CLI version.'
def version
say "Github CLI #{GithubCLI::VERSION}"
end
end # CLI
end # GithubCLI
|
require 'base64'
module Appium
module Device
extend Forwardable
NoArgMethods = {
post: {
open_notifications: 'session/:session_id/appium/device/open_notifications',
shake: 'session/:session_id/appium/device/shake',
launch: 'session/:session_id/appium/app/launch',
close_app: 'session/:session_id/appium/app/close',
reset: 'session/:session_id/appium/app/reset',
toggle_airplane_mode: 'session/:session_id/appium/device/toggle_airplane_mode',
},
get: {
current_activity: 'session/:session_id/appium/device/current_activity',
current_context: 'session/:session_id/context',
}
}
# @!method app_strings
# Return the hash of all localization strings.
# ```ruby
# app_strings #=> "TransitionsTitle"=>"Transitions", "WebTitle"=>"Web"
# ```
# @!method background_app
# Backgrounds the app for a set number of seconds.
# This is a blocking application
# @param seconds (int) How many seconds to background the app for.
# @!method current_activity
# @!method launch
# Start the simulator and applicaton configured with desired capabilities
# @!method reset
# Reset the device, relaunching the application.
# @!method shake
# Cause the device to shake
# @!method toggle_flight_mode
# toggle flight mode on or off
# @!method hide_keyboard
# Hide the onscreen keyboard
# @param close_key (String) the name of the key which closes the keyboard.
# Defaults to 'Done'.
# ```ruby
# hide_keyboard # Close a keyboard with the 'Done' button
# hide_keyboard('Finished') # Close a keyboard with the 'Finished' button
# ```
# @!method press_keycode
# Press keycode on the device.
# http://developer.android.com/reference/android/view/KeyEvent.html
# @param key (integer) The key to press.
# @param metastate (String) The state the metakeys should be in when pressing the key.
# @!method long_press_keycode
# Long press keycode on the device.
# http://developer.android.com/reference/android/view/KeyEvent.html
# @param key (integer) The key to long press.
# @param metastate (String) The state the metakeys should be in when long pressing the key.
# @!method push_file
# Place a file in a specific location on the device.
# @param path (String) The absolute path on the device to store data at.
# @param data (String) Raw file data to be sent to the device.
# @!method pull_file
# Retrieve a file from the device. This can retrieve an absolute path or
# a path relative to the installed app (iOS only).
# @param path (String) Either an absolute path OR, for iOS devices, a path relative to the app, as described.
#
# ```ruby
# pull_file '/local/data/some/path' #=> Get the file at that path
# pull_file 'Shenanigans.app/some/file' #=> Get 'some/file' from the install location of Shenanigans.app
# ```
# @!method pull_folder
# Retrieve a folder from the device.
# @param path (String) absolute path to the folder
#
# ```ruby
# pull_folder '/data/local/tmp' #=> Get the folder at that path
# ```
# @!method end_coverage
# Android only; Ends the test coverage and writes the results to the given path on device.
# @param path (String) Path on the device to write too.
# @param intent (String) Intent to broadcast when ending coverage.
# @!method get_settings
# Get appium Settings for current test session
# @!method update_settings
# Update appium Settings for current test session
# @param settings (hash) Settings to update, keys are settings, values to value to set each setting to
class << self
def extended(mod)
extend_webdriver_with_forwardable
NoArgMethods.each_pair do |verb, pair|
pair.each_pair { |command, path| add_endpoint_method command, path, verb }
end
add_endpoint_method(:available_contexts, 'session/:session_id/contexts', :get) do
def available_contexts
# return empty array instead of nil on failure
execute(:available_contexts, {}) || []
end
end
add_endpoint_method(:app_strings, 'session/:session_id/appium/app/strings') do
def app_strings language=nil
opts = language ? { language: language } : {}
execute :app_strings, {}, opts
end
end
add_endpoint_method(:lock, 'session/:session_id/appium/device/lock') do
def lock(duration)
execute :lock, {}, :seconds => duration
end
end
add_endpoint_method(:install, 'session/:session_id/appium/device/install_app') do
def install(path)
execute :install, {}, :appPath => path
end
end
add_endpoint_method(:remove, 'session/:session_id/appium/device/remove_app') do
def remove(id)
execute :remove, {}, :appId => id
end
end
add_endpoint_method(:is_installed?, 'session/:session_id/appium/device/app_installed') do
def is_installed?(app_id)
execute :is_installed?, {}, :bundleId => app_id
end
end
add_endpoint_method(:background_app, 'session/:session_id/appium/app/background') do
def background_app(duration)
execute :background_app, {}, :seconds => duration
end
end
# @!method start_activity
# Start a new activity within the current app or launch a new app and start the target activity.
#
# Android only.
# @param [String] The package owning the activity [required]
# @param [String] The target activity [required]
# @param [String] The package to start before the target package [optional]
# @param [String] The activity to start before the target activity [optional]
#
# ```ruby
# start_activity app_package: 'io.appium.android.apis', app_activity: '.accessibility.AccessibilityNodeProviderActivity'
# ```
add_endpoint_method(:start_activity, 'session/:session_id/appium/device/start_activity') do
def start_activity(opts)
raise 'opts must be a hash' unless opts.is_a? Hash
app_package = opts[:app_package]
raise 'app_package is required' unless app_package
app_activity = opts[:app_activity]
raise 'app_activity is required' unless opts[:app_activity]
app_wait_package = opts.fetch(:app_wait_package, '')
app_wait_activity = opts.fetch(:app_wait_activity, '')
unknown_opts = opts.keys - [:app_package, :app_activity, :app_wait_package, :app_wait_activity]
raise "Unknown options #{unknown_opts}" unless unknown_opts.empty?
execute :start_activity, {}, { appPackage: app_package, appActivity: app_activity,
appWaitPackage: app_wait_package, appWaitActivity: app_wait_activity }
end
end
add_endpoint_method(:set_context, 'session/:session_id/context') do
def set_context(context=null)
execute :set_context, {}, :name => context
end
end
add_endpoint_method(:hide_keyboard, 'session/:session_id/appium/device/hide_keyboard') do
def hide_keyboard(close_key=nil)
# Android can only tapOutside.
if $driver.device_is_android?
return execute :hide_keyboard, {}, { strategy: :tapOutside }
end
close_key ||= 'Done' # default to Done key.
$driver.hide_ios_keyboard close_key
end
end
add_endpoint_method(:press_keycode, 'session/:session_id/appium/device/press_keycode') do
def press_keycode(key, metastate=nil)
args = { keycode: key }
args[:metastate] = metastate if metastate
execute :press_keycode, {}, args
end
end
add_endpoint_method(:long_press_keycode, 'session/:session_id/appium/device/long_press_keycode') do
def long_press_keycode(key, metastate=nil)
args = { keycode: key }
args[:metastate] = metastate if metastate
execute :long_press_keycode, {}, args
end
end
# TODO TEST ME
add_endpoint_method(:set_immediate_value, 'session/:session_id/appium/element/:id/value') do
def set_immediate_value(element, value)
execute :set_immediate_value, { :id => element.ref }, value
end
end
add_endpoint_method(:push_file, 'session/:session_id/appium/device/push_file') do
def push_file(path, filedata)
encoded_data = Base64.encode64 filedata
execute :push_file, {}, path: path, data: encoded_data
end
end
add_endpoint_method(:pull_file, 'session/:session_id/appium/device/pull_file') do
def pull_file(path)
data = execute :pull_file, {}, path: path
Base64.decode64 data
end
end
# TODO TEST ME
add_endpoint_method(:pull_folder, 'session/:session_id/appium/device/pull_folder') do
def pull_folder(path)
data = execute :pull_folder, {}, path: path
Base64.decode64 data
end
end
# TODO TEST ME
add_endpoint_method(:end_coverage, 'session/:session_id/appium/app/end_test_coverage') do
def end_coverage(path, intent)
execute :end_coverage, {}, path: path, intent: intent
end
end
add_endpoint_method(:get_settings, 'session/:session_id/appium/settings', :get) do
def get_settings
execute :get_settings, {}
end
end
add_endpoint_method(:update_settings, 'session/:session_id/appium/settings') do
def update_settings(settings)
execute :update_settings, {}, settings: settings
end
end
add_touch_actions
extend_search_contexts
end
# def extended
# @private
def add_endpoint_method(method, path, verb=:post)
if block_given?
# &Proc.new with no args passes the passed_in block
# Because creating Procs from blocks is slow
create_bridge_command method, verb, path, &Proc.new
else
create_bridge_command method, verb, path
end
delegate_driver_method method
delegate_from_appium_driver method
end
# @private
def extend_webdriver_with_forwardable
return if Selenium::WebDriver::Driver.kind_of? Forwardable
Selenium::WebDriver::Driver.class_eval do
extend Forwardable
end
end
# @private
def delegate_driver_method(method)
return if Selenium::WebDriver::Driver.method_defined? method
Selenium::WebDriver::Driver.class_eval { def_delegator :@bridge, method }
end
# @private
def delegate_from_appium_driver(method, delegation_target=:driver)
def_delegator delegation_target, method
end
# @private
def create_bridge_command(method, verb, path)
Selenium::WebDriver::Remote::Bridge.class_eval do
command method, verb, path
if block_given?
class_eval &Proc.new
else
define_method(method) { execute method }
end
end
end
# @!method accessiblity_id_find
# find_element/s with their accessibility_id
#
# ```ruby
# find_elements :accessibility_id, 'Animation'
# ```
def extend_search_contexts
Selenium::WebDriver::SearchContext.class_eval do
Selenium::WebDriver::SearchContext::FINDERS[:accessibility_id] = 'accessibility id'
end
end
def add_touch_actions
add_endpoint_method(:touch_actions, 'session/:session_id/touch/perform') do
def touch_actions(actions)
actions = [actions].flatten
execute :touch_actions, {}, actions
end
end
add_endpoint_method(:multi_touch, 'session/:session_id/touch/multi/perform') do
def multi_touch(actions)
execute :multi_touch, {}, actions: actions
end
end
actions = Appium::TouchAction::COMPLEX_ACTIONS
actions.each do |method|
delegate_from_appium_driver(method, Appium::TouchAction)
end
delegate_from_appium_driver(:pinch, Appium::MultiTouch)
delegate_from_appium_driver(:zoom, Appium::MultiTouch)
end
end # class << self
# @!method set_context
# Change the context to the given context.
# @param [String] The context to change to
#
# ```ruby
# set_context "NATIVE_APP"
# ```
# @!method current_context
# @return [String] The context currently being used.
# @!method available_contexts
# @return [Array<String>] All usable contexts, as an array of strings.
# Perform a block within the given context, then switch back to the starting context.
# @param context (String) The context to switch to for the duration of the block.
#
# ```ruby
# within_context('NATIVE_APP') do
# find_element [:tag, "button"]
# ```
def within_context(context)
existing_context = current_context
set_context context
yield if block_given?
set_context existing_context
end
# Change to the default context. This is equivalent to `set_context nil`.
def switch_to_default_context
set_context nil
end
end # module Device
end # module Appium
Device Modes
require 'base64'
module Appium
module Device
extend Forwardable
NoArgMethods = {
post: {
open_notifications: 'session/:session_id/appium/device/open_notifications',
shake: 'session/:session_id/appium/device/shake',
launch: 'session/:session_id/appium/app/launch',
close_app: 'session/:session_id/appium/app/close',
reset: 'session/:session_id/appium/app/reset',
toggle_airplane_mode: 'session/:session_id/appium/device/toggle_airplane_mode',
},
get: {
current_activity: 'session/:session_id/appium/device/current_activity',
current_context: 'session/:session_id/context',
get_network_connection: 'session/:session_id/network_connection',
}
}
# @!method app_strings
# Return the hash of all localization strings.
# ```ruby
# app_strings #=> "TransitionsTitle"=>"Transitions", "WebTitle"=>"Web"
# ```
# @!method background_app
# Backgrounds the app for a set number of seconds.
# This is a blocking application
# @param seconds (int) How many seconds to background the app for.
# @!method current_activity
# @!method launch
# Start the simulator and applicaton configured with desired capabilities
# @!method reset
# Reset the device, relaunching the application.
# @!method shake
# Cause the device to shake
# @!method toggle_flight_mode
# toggle flight mode on or off
# @!method hide_keyboard
# Hide the onscreen keyboard
# @param close_key (String) the name of the key which closes the keyboard.
# Defaults to 'Done'.
# ```ruby
# hide_keyboard # Close a keyboard with the 'Done' button
# hide_keyboard('Finished') # Close a keyboard with the 'Finished' button
# ```
# @!method press_keycode
# Press keycode on the device.
# http://developer.android.com/reference/android/view/KeyEvent.html
# @param key (integer) The key to press.
# @param metastate (String) The state the metakeys should be in when pressing the key.
# @!method long_press_keycode
# Long press keycode on the device.
# http://developer.android.com/reference/android/view/KeyEvent.html
# @param key (integer) The key to long press.
# @param metastate (String) The state the metakeys should be in when long pressing the key.
# @!method push_file
# Place a file in a specific location on the device.
# @param path (String) The absolute path on the device to store data at.
# @param data (String) Raw file data to be sent to the device.
# @!method pull_file
# Retrieve a file from the device. This can retrieve an absolute path or
# a path relative to the installed app (iOS only).
# @param path (String) Either an absolute path OR, for iOS devices, a path relative to the app, as described.
#
# ```ruby
# pull_file '/local/data/some/path' #=> Get the file at that path
# pull_file 'Shenanigans.app/some/file' #=> Get 'some/file' from the install location of Shenanigans.app
# ```
# @!method pull_folder
# Retrieve a folder from the device.
# @param path (String) absolute path to the folder
#
# ```ruby
# pull_folder '/data/local/tmp' #=> Get the folder at that path
# ```
# @!method end_coverage
# Android only; Ends the test coverage and writes the results to the given path on device.
# @param path (String) Path on the device to write too.
# @param intent (String) Intent to broadcast when ending coverage.
# @!method get_settings
# Get appium Settings for current test session
# @!method update_settings
# Update appium Settings for current test session
# @param settings (hash) Settings to update, keys are settings, values to value to set each setting to
class << self
def extended(mod)
extend_webdriver_with_forwardable
NoArgMethods.each_pair do |verb, pair|
pair.each_pair { |command, path| add_endpoint_method command, path, verb }
end
add_endpoint_method(:available_contexts, 'session/:session_id/contexts', :get) do
def available_contexts
# return empty array instead of nil on failure
execute(:available_contexts, {}) || []
end
end
add_endpoint_method(:app_strings, 'session/:session_id/appium/app/strings') do
def app_strings language=nil
opts = language ? { language: language } : {}
execute :app_strings, {}, opts
end
end
add_endpoint_method(:lock, 'session/:session_id/appium/device/lock') do
def lock(duration)
execute :lock, {}, :seconds => duration
end
end
add_endpoint_method(:install, 'session/:session_id/appium/device/install_app') do
def install(path)
execute :install, {}, :appPath => path
end
end
add_endpoint_method(:remove, 'session/:session_id/appium/device/remove_app') do
def remove(id)
execute :remove, {}, :appId => id
end
end
add_endpoint_method(:is_installed?, 'session/:session_id/appium/device/app_installed') do
def is_installed?(app_id)
execute :is_installed?, {}, :bundleId => app_id
end
end
add_endpoint_method(:background_app, 'session/:session_id/appium/app/background') do
def background_app(duration)
execute :background_app, {}, :seconds => duration
end
end
# @!method start_activity
# Start a new activity within the current app or launch a new app and start the target activity.
#
# Android only.
# @param [String] The package owning the activity [required]
# @param [String] The target activity [required]
# @param [String] The package to start before the target package [optional]
# @param [String] The activity to start before the target activity [optional]
#
# ```ruby
# start_activity app_package: 'io.appium.android.apis', app_activity: '.accessibility.AccessibilityNodeProviderActivity'
# ```
add_endpoint_method(:start_activity, 'session/:session_id/appium/device/start_activity') do
def start_activity(opts)
raise 'opts must be a hash' unless opts.is_a? Hash
app_package = opts[:app_package]
raise 'app_package is required' unless app_package
app_activity = opts[:app_activity]
raise 'app_activity is required' unless opts[:app_activity]
app_wait_package = opts.fetch(:app_wait_package, '')
app_wait_activity = opts.fetch(:app_wait_activity, '')
unknown_opts = opts.keys - [:app_package, :app_activity, :app_wait_package, :app_wait_activity]
raise "Unknown options #{unknown_opts}" unless unknown_opts.empty?
execute :start_activity, {}, { appPackage: app_package, appActivity: app_activity,
appWaitPackage: app_wait_package, appWaitActivity: app_wait_activity }
end
end
add_endpoint_method(:set_context, 'session/:session_id/context') do
def set_context(context=null)
execute :set_context, {}, :name => context
end
end
add_endpoint_method(:hide_keyboard, 'session/:session_id/appium/device/hide_keyboard') do
def hide_keyboard(close_key=nil)
# Android can only tapOutside.
if $driver.device_is_android?
return execute :hide_keyboard, {}, { strategy: :tapOutside }
end
close_key ||= 'Done' # default to Done key.
$driver.hide_ios_keyboard close_key
end
end
add_endpoint_method(:press_keycode, 'session/:session_id/appium/device/press_keycode') do
def press_keycode(key, metastate=nil)
args = { keycode: key }
args[:metastate] = metastate if metastate
execute :press_keycode, {}, args
end
end
add_endpoint_method(:long_press_keycode, 'session/:session_id/appium/device/long_press_keycode') do
def long_press_keycode(key, metastate=nil)
args = { keycode: key }
args[:metastate] = metastate if metastate
execute :long_press_keycode, {}, args
end
end
# TODO TEST ME
add_endpoint_method(:set_immediate_value, 'session/:session_id/appium/element/:id/value') do
def set_immediate_value(element, value)
execute :set_immediate_value, { :id => element.ref }, value
end
end
add_endpoint_method(:push_file, 'session/:session_id/appium/device/push_file') do
def push_file(path, filedata)
encoded_data = Base64.encode64 filedata
execute :push_file, {}, path: path, data: encoded_data
end
end
add_endpoint_method(:pull_file, 'session/:session_id/appium/device/pull_file') do
def pull_file(path)
data = execute :pull_file, {}, path: path
Base64.decode64 data
end
end
# TODO TEST ME
add_endpoint_method(:pull_folder, 'session/:session_id/appium/device/pull_folder') do
def pull_folder(path)
data = execute :pull_folder, {}, path: path
Base64.decode64 data
end
end
# TODO TEST ME
add_endpoint_method(:end_coverage, 'session/:session_id/appium/app/end_test_coverage') do
def end_coverage(path, intent)
execute :end_coverage, {}, path: path, intent: intent
end
end
add_endpoint_method(:get_settings, 'session/:session_id/appium/settings', :get) do
def get_settings
execute :get_settings, {}
end
end
add_endpoint_method(:update_settings, 'session/:session_id/appium/settings') do
def update_settings(settings)
execute :update_settings, {}, settings: settings
end
end
# @!method get_network_connection
# Get the device network connection current status
# See set_network_connection method for return value
# @!method set_network_connection
# Set the device network connection mode
# @param path (String) Bit mask that represent the network mode
# Value (Alias) | Data | Wifi | Airplane Mode
# -------------------------------------------------
# 1 (Airplane Mode) | 0 | 0 | 1
# 6 (All network on) | 1 | 1 | 0
# 4 (Data only) | 1 | 0 | 0
# 2 (Wifi only) | 0 | 1 | 0
# 0 (None) | 0 | 0 | 0
#
add_endpoint_method(:set_network_connection, 'session/:session_id/network_connection') do
def set_network_connection(mode)
execute :set_network_connection, {}, type: mode
end
end
add_touch_actions
extend_search_contexts
end
# def extended
# @private
def add_endpoint_method(method, path, verb=:post)
if block_given?
# &Proc.new with no args passes the passed_in block
# Because creating Procs from blocks is slow
create_bridge_command method, verb, path, &Proc.new
else
create_bridge_command method, verb, path
end
delegate_driver_method method
delegate_from_appium_driver method
end
# @private
def extend_webdriver_with_forwardable
return if Selenium::WebDriver::Driver.kind_of? Forwardable
Selenium::WebDriver::Driver.class_eval do
extend Forwardable
end
end
# @private
def delegate_driver_method(method)
return if Selenium::WebDriver::Driver.method_defined? method
Selenium::WebDriver::Driver.class_eval { def_delegator :@bridge, method }
end
# @private
def delegate_from_appium_driver(method, delegation_target=:driver)
def_delegator delegation_target, method
end
# @private
def create_bridge_command(method, verb, path)
Selenium::WebDriver::Remote::Bridge.class_eval do
command method, verb, path
if block_given?
class_eval &Proc.new
else
define_method(method) { execute method }
end
end
end
# @!method accessiblity_id_find
# find_element/s with their accessibility_id
#
# ```ruby
# find_elements :accessibility_id, 'Animation'
# ```
def extend_search_contexts
Selenium::WebDriver::SearchContext.class_eval do
Selenium::WebDriver::SearchContext::FINDERS[:accessibility_id] = 'accessibility id'
end
end
def add_touch_actions
add_endpoint_method(:touch_actions, 'session/:session_id/touch/perform') do
def touch_actions(actions)
actions = [actions].flatten
execute :touch_actions, {}, actions
end
end
add_endpoint_method(:multi_touch, 'session/:session_id/touch/multi/perform') do
def multi_touch(actions)
execute :multi_touch, {}, actions: actions
end
end
actions = Appium::TouchAction::COMPLEX_ACTIONS
actions.each do |method|
delegate_from_appium_driver(method, Appium::TouchAction)
end
delegate_from_appium_driver(:pinch, Appium::MultiTouch)
delegate_from_appium_driver(:zoom, Appium::MultiTouch)
end
end # class << self
# @!method set_context
# Change the context to the given context.
# @param [String] The context to change to
#
# ```ruby
# set_context "NATIVE_APP"
# ```
# @!method current_context
# @return [String] The context currently being used.
# @!method available_contexts
# @return [Array<String>] All usable contexts, as an array of strings.
# Perform a block within the given context, then switch back to the starting context.
# @param context (String) The context to switch to for the duration of the block.
#
# ```ruby
# within_context('NATIVE_APP') do
# find_element [:tag, "button"]
# ```
def within_context(context)
existing_context = current_context
set_context context
yield if block_given?
set_context existing_context
end
# Change to the default context. This is equivalent to `set_context nil`.
def switch_to_default_context
set_context nil
end
end # module Device
end # module Appium
|
module Gitter
VERSION = "1.1.0"
end
bumped to version 1.1.1
module Gitter
VERSION = "1.1.1"
end
|
require 'aptible/auth'
require 'aptible/api'
module Aptible
module Rails
module Controller
extend ActiveSupport::Concern
included do
helper_method :auth, :api, :current_aptible_user,
:current_organization, :subscribed?, :has_acccount?,
:email_verified?, :subscribed_and_verified?, :user_url,
:organization_url
end
def auth
@auth ||= Aptible::Auth::Agent.new(token: service_token).get
end
def api
@api ||= Aptible::Api::Agent.new(token: service_token).get
end
def current_aptible_user
return unless aptible_subject
@current_user ||= auth.find_by_url(aptible_subject)
rescue => e
clear_session_cookie
raise e
end
def current_organization
session[:organization_url] ||= auth.organizations.first.href
url = [session[:organization_url], token: service_token]
@current_organization ||= Aptible::Auth::Organization.find_by_url(*url)
rescue
nil
end
# rubocop:disable PredicateName
def has_account?
current_organization && current_organization.accounts.any?
end
# rubocop:enable PredicateName
def subscribed?
@has_subscription ||= has_account? &&
current_organization.accounts.any?(&:has_subscription?)
end
def email_verified?
current_aptible_user && current_aptible_user.verified?
end
def subscribed_and_verified?
has_account? && subscribed? && email_verified?
end
def service_token
return unless aptible_token && aptible_token.session
@service_token ||= service_token_for(aptible_token)
end
def aptible_login_url
Aptible::Rails.configuration.login_url
end
def aptible_subject
token_subject || session_subject
end
def aptible_token
current_token || session_token
end
# before_action :authenticate_user
def authenticate_aptible_user
redirect_to aptible_login_url unless current_aptible_user
end
# before_action :ensure_service_token
def ensure_service_token
redirect_to aptible_login_url unless service_token
end
def service_token_for(token)
service_token = fetch_service_token(token)
if Fridge::AccessToken.new(service_token).valid?
service_token
else
fetch_service_token(token, force: true) || token
end
end
def fetch_service_token(token, options = {})
fail 'Token must be a service token' unless token.session
::Rails.cache.fetch "service_token:#{token.session}", options do
swap_session_token(token)
end
end
def swap_session_token(token)
Aptible::Auth::Token.create(
client_id: Aptible::Rails.configuration.client_id,
client_secret: Aptible::Rails.configuration.client_secret,
subject: token.serialize
).access_token
rescue OAuth2::Error => e
if e.code == 'unauthorized'
nil
else
fail 'Could not swap session token, check Client#privileged?'
end
end
def organization_url(id)
"#{dashboard_url}/organizations/#{id}"
end
def user_url(id = current_aptible_user.id)
"#{dashboard_url}/users/#{id}"
end
end
end
end
Clean up Controller mixin
Fixes aptible/dashboard.aptible.com#62
require 'aptible/auth'
require 'aptible/api'
module Aptible
module Rails
module Controller
extend ActiveSupport::Concern
included do
helper_method :current_user, :current_organization, :user_url,
:organization_url
end
def current_user
return unless current_user_url
@current_user ||= Aptible::Auth::User.find_by_url(current_user_url,
token: session_token)
rescue => e
clear_session_cookie
raise e
end
def current_organization
session[:organization_url] ||= Aptible::Auth::Organization.all(
token: session_token
).first.href
url = [session[:organization_url], token: service_token]
@current_organization ||= Aptible::Auth::Organization.find_by_url(*url)
rescue
nil
end
def current_user_url
token_subject || session_subject
end
# before_action :authenticate_user
def authenticate_user
redirect_to Aptible::Rails.configuration.login_url unless current_user
end
# before_action :ensure_service_token
def ensure_service_token
redirect_to aptible_login_url unless service_token
end
def service_token
return unless session_token && session_token.session
return @service_token if @service_token
@service_token = cached_service_token(session_token)
if Fridge::AccessToken.new(service_token).valid?
@service_token
else
@service_token = cached_service_token(session_token,
force: true) || session_token
end
end
def cached_service_token(session_token, options = {})
fail 'Token must be a service token' unless session_token.session
cache_key = "service_token:#{session_token.session}"
::Rails.cache.fetch(cache_key, options) do
swap_session_token(session_token)
end
end
def swap_session_token(session_token)
Aptible::Auth::Token.create(
client_id: Aptible::Rails.configuration.client_id,
client_secret: Aptible::Rails.configuration.client_secret,
subject: session_token.serialize
).access_token
rescue OAuth2::Error => e
if e.code == 'unauthorized'
nil
else
fail 'Could not swap session token, check Client#privileged?'
end
end
def organization_url(id)
"#{dashboard_url}/organizations/#{id}"
end
def user_url(id = current_user.id)
"#{dashboard_url}/users/#{id}"
end
end
end
end
|
=begin
Copyright 2010-2012 Tasos Laskos <tasos.laskos@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=end
require 'digest/sha1'
require 'cgi'
module Arachni
module Module
#
# Utilities class
#
# Includes some useful methods for the system, the modules etc...
#
# @author: Tasos "Zapotek" Laskos
# <tasos.laskos@gmail.com>
# <zapotek@segfault.gr>
# @version: 0.1.3
#
module Utilities
def uri_parser
@@uri_parser ||= URI::Parser.new
end
def uri_parse( url )
uri_parser.parse( url )
end
def uri_encode( *args )
uri_parser.escape( *args )
end
def uri_decode( *args )
uri_parser.unescape( *args )
end
#
# Decodes URLs to reverse multiple encodes and removes NULL characters
#
def url_sanitize( url )
while( url =~ /%[a-fA-F0-9]{2}/ )
url = ( uri_decode( url ).to_s.unpack( 'A*' )[0] )
end
return uri_encode( CGI.unescapeHTML( url ) )
end
#
# Gets path from URL
#
# @param [String] url
#
# @return [String] path
#
def get_path( url )
uri = uri_parser.parse( uri_encode( url ) )
path = uri.path
if !File.extname( path ).empty?
path = File.dirname( path )
end
path << '/' if path[-1] != '/'
return uri.scheme + "://" + uri.host + ':' + uri.port.to_s + path
end
def seed
@@seed ||= Digest::SHA2.hexdigest( srand( 1000 ).to_s )
end
def normalize_url( url )
# make sure we're working with the pure form of the URL
url = url_sanitize( url )
begin
normalized = uri_encode( uri_decode( url.to_s ) ).to_s.gsub( '[', '%5B' ).gsub( ']', '%5D' )
rescue Exception => e
# ap e
# ap e.backtrace
begin
normalized = uri_encode( uri_decode( url.to_s ) ).to_s
rescue Exception => e
# ap e
# ap e.backtrace
normalized = url
end
end
#
# prevent this: http://example.com#fragment
# from becoming this: http://example.com%23fragment
#
begin
normalized.gsub!( '%23', '#' )
rescue
end
return normalized
end
#
# Gets module data files from 'modules/[modtype]/[modname]/[filename]'
#
# @param [String] filename filename, without the path
# @param [Block] the block to be passed each line as it's read
#
def read_file( filename, &block )
# the path of the module that called us
mod_path = block.source_location[0]
# the name of the module that called us
mod_name = File.basename( mod_path, ".rb")
# the path to the module's data file directory
path = File.expand_path( File.dirname( mod_path ) ) +
'/' + mod_name + '/'
file = File.open( path + '/' + filename ).each {
|line|
yield line.strip
}
file.close
end
def hash_keys_to_str( hash )
nh = {}
hash.each_pair {
|k, v|
nh[k.to_s] = v
nh[k.to_s] = hash_keys_to_str( v ) if v.is_a? Hash
}
return nh
end
#
# Wraps the "block" in exception handling code and runs it.
#
# @param [Block]
#
def exception_jail( raise_exception = true, &block )
begin
block.call
rescue Exception => e
err_name = !e.to_s.empty? ? e.to_s : e.class.name
print_error( err_name )
print_error_backtrace( e )
raise e if raise_exception
end
end
extend self
end
end
end
get_path(): doesn't add port to the URL if it's 80
=begin
Copyright 2010-2012 Tasos Laskos <tasos.laskos@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=end
require 'digest/sha1'
require 'cgi'
module Arachni
module Module
#
# Utilities class
#
# Includes some useful methods for the system, the modules etc...
#
# @author: Tasos "Zapotek" Laskos
# <tasos.laskos@gmail.com>
# <zapotek@segfault.gr>
# @version: 0.1.3
#
module Utilities
def uri_parser
@@uri_parser ||= URI::Parser.new
end
def uri_parse( url )
uri_parser.parse( url )
end
def uri_encode( *args )
uri_parser.escape( *args )
end
def uri_decode( *args )
uri_parser.unescape( *args )
end
#
# Decodes URLs to reverse multiple encodes and removes NULL characters
#
def url_sanitize( url )
while( url =~ /%[a-fA-F0-9]{2}/ )
url = ( uri_decode( url ).to_s.unpack( 'A*' )[0] )
end
return uri_encode( CGI.unescapeHTML( url ) )
end
#
# Gets path from URL
#
# @param [String] url
#
# @return [String] path
#
def get_path( url )
uri = uri_parser.parse( uri_encode( url ) )
path = uri.path
if !File.extname( path ).empty?
path = File.dirname( path )
end
path << '/' if path[-1] != '/'
uri_str = uri.scheme + "://" + uri.host
uri_str += ':' + uri.port.to_s if uri.port != 80
uri_str += path
end
def seed
@@seed ||= Digest::SHA2.hexdigest( srand( 1000 ).to_s )
end
def normalize_url( url )
# make sure we're working with the pure form of the URL
url = url_sanitize( url )
begin
normalized = uri_encode( uri_decode( url.to_s ) ).to_s.gsub( '[', '%5B' ).gsub( ']', '%5D' )
rescue Exception => e
# ap e
# ap e.backtrace
begin
normalized = uri_encode( uri_decode( url.to_s ) ).to_s
rescue Exception => e
# ap e
# ap e.backtrace
normalized = url
end
end
#
# prevent this: http://example.com#fragment
# from becoming this: http://example.com%23fragment
#
begin
normalized.gsub!( '%23', '#' )
rescue
end
return normalized
end
#
# Gets module data files from 'modules/[modtype]/[modname]/[filename]'
#
# @param [String] filename filename, without the path
# @param [Block] the block to be passed each line as it's read
#
def read_file( filename, &block )
# the path of the module that called us
mod_path = block.source_location[0]
# the name of the module that called us
mod_name = File.basename( mod_path, ".rb")
# the path to the module's data file directory
path = File.expand_path( File.dirname( mod_path ) ) +
'/' + mod_name + '/'
file = File.open( path + '/' + filename ).each {
|line|
yield line.strip
}
file.close
end
def hash_keys_to_str( hash )
nh = {}
hash.each_pair {
|k, v|
nh[k.to_s] = v
nh[k.to_s] = hash_keys_to_str( v ) if v.is_a? Hash
}
return nh
end
#
# Wraps the "block" in exception handling code and runs it.
#
# @param [Block]
#
def exception_jail( raise_exception = true, &block )
begin
block.call
rescue Exception => e
err_name = !e.to_s.empty? ? e.to_s : e.class.name
print_error( err_name )
print_error_backtrace( e )
raise e if raise_exception
end
end
extend self
end
end
end
|
# -*- ruby -*-
#encoding: utf-8
require 'time'
require 'arborist/cli' unless defined?( Arborist::CLI )
require 'arborist/client'
# Command to fetch down/acked/disabled nodes for quick display.
module Arborist::CLI::Summary
extend Arborist::CLI::Subcommand
using Arborist::TimeRefinements
BANNER = [
' _ _ _',
' __ _ _ _| |__ ___ _ _(_)__| |',
'/ _` | \'_| \'_ \\/ _ \\ \'_| (_-< _|',
'\\__,_|_| |_.__/\\___/_| |_/__/\\__| %s, %s nodes',
]
desc 'Summarize known problems'
command :summary do |cmd|
cmd.flag [:s, :sort],
type: String,
desc: "Sort output by this node key",
arg_name: 'sort',
default_value: 'status_changed'
cmd.action do |globals, options, args|
client = Arborist::Client.new
status = client.status
nodes = client.fetch
down = get_status( nodes, 'down' )
acked = get_status( nodes, 'acked' )
disabled = get_status( nodes, 'disabled' )
quieted = get_status( nodes, 'quieted' )
problems = ! ( down.size + acked.size + disabled.size ).zero?
prompt.say "Connected to: %s" % [ highlight_string(client.tree_api_url) ]
prompt.say "Status as of: %s" % [ hl.on_blue.bright_white(Time.now.to_s) ]
(0..2).each do |i|
prompt.say "%s" % [ hl.bold.bright_green( BANNER[i] ) ]
end
prompt.say hl.bold.bright_green( BANNER.last ) % [
highlight_string(status['server_version']),
highlight_string(status['nodecount'])
]
puts
if problems
output_problems( disabled, acked, down, quieted, options[:sort] )
else
prompt.say success_string( "No problems found!" )
end
end
end
###############
module_function
###############
### Since we fetch all nodes instead of doing separate
### API searches, quickly return nodes of a given +status+.
def get_status( nodes, status )
return nodes.select{|n| n['status'] == status}
end
### Output all problems.
###
def output_problems( disabled, acked, down, quieted, sort )
unless disabled.size.zero?
prompt.say hl.headline( "Disabled Nodes" )
display_table( *format_acked(disabled, sort) )
puts
end
unless acked.size.zero?
prompt.say hl.headline( "Acknowledged Outages" )
display_table( *format_acked(acked, sort) )
puts
end
unless down.size.zero?
prompt.say hl.headline( "Current Outages" )
header = [
highlight_string( 'identifier' ),
highlight_string( 'type' ),
highlight_string( 'when' ),
highlight_string( 'errors' )
]
display_table( header, format_down(down, sort) )
prompt.say "%d nodes have been %s as a result of the above problems." % [
quieted.size,
hl.quieted( 'quieted' )
]
puts
end
end
### Prepare an array of acked/disabled nodes.
def format_acked( nodes, sort_key )
header = [
highlight_string( 'identifier' ),
highlight_string( 'type' ),
highlight_string( 'when' ),
highlight_string( 'who' ),
highlight_string( 'message' )
]
rows = nodes.sort_by{|n| n[sort_key] }.each_with_object([]) do |node, acc|
acc << [
hl.disabled( node['identifier'] ),
node[ 'type' ],
Time.parse( node[ 'status_changed' ] ).as_delta,
node[ 'ack' ][ 'sender' ],
node[ 'ack' ][ 'message' ]
]
end
return header, rows
end
### Prepare an array of down nodes.
def format_down( nodes, sort_key )
return nodes.sort_by{|n| n[sort_key] }.each_with_object([]) do |node, acc|
errors = node[ 'errors' ].map{|err| "%s: %s" % [ err.first, err.last ]}
acc << [
hl.down( node['identifier'] ),
node[ 'type' ],
Time.parse( node[ 'status_changed' ] ).as_delta,
errors.join( '\n' )
]
end
end
end # module Arborist::CLI::Summary
Fix newline display in summary mode.
# -*- ruby -*-
#encoding: utf-8
require 'time'
require 'arborist/cli' unless defined?( Arborist::CLI )
require 'arborist/client'
# Command to fetch down/acked/disabled nodes for quick display.
module Arborist::CLI::Summary
extend Arborist::CLI::Subcommand
using Arborist::TimeRefinements
BANNER = [
' _ _ _',
' __ _ _ _| |__ ___ _ _(_)__| |',
'/ _` | \'_| \'_ \\/ _ \\ \'_| (_-< _|',
'\\__,_|_| |_.__/\\___/_| |_/__/\\__| %s, %s nodes',
]
desc 'Summarize known problems'
command :summary do |cmd|
cmd.flag [:s, :sort],
type: String,
desc: "Sort output by this node key",
arg_name: 'sort',
default_value: 'status_changed'
cmd.action do |globals, options, args|
client = Arborist::Client.new
status = client.status
nodes = client.fetch
down = get_status( nodes, 'down' )
acked = get_status( nodes, 'acked' )
disabled = get_status( nodes, 'disabled' )
quieted = get_status( nodes, 'quieted' )
problems = ! ( down.size + acked.size + disabled.size ).zero?
prompt.say "Connected to: %s" % [ highlight_string(client.tree_api_url) ]
prompt.say "Status as of: %s" % [ hl.on_blue.bright_white(Time.now.to_s) ]
(0..2).each do |i|
prompt.say "%s" % [ hl.bold.bright_green( BANNER[i] ) ]
end
prompt.say hl.bold.bright_green( BANNER.last ) % [
highlight_string(status['server_version']),
highlight_string(status['nodecount'])
]
puts
if problems
output_problems( disabled, acked, down, quieted, options[:sort] )
else
prompt.say success_string( "No problems found!" )
end
end
end
###############
module_function
###############
### Since we fetch all nodes instead of doing separate
### API searches, quickly return nodes of a given +status+.
def get_status( nodes, status )
return nodes.select{|n| n['status'] == status}
end
### Output all problems.
###
def output_problems( disabled, acked, down, quieted, sort )
unless disabled.size.zero?
prompt.say hl.headline( "Disabled Nodes" )
display_table( *format_acked(disabled, sort) )
puts
end
unless acked.size.zero?
prompt.say hl.headline( "Acknowledged Outages" )
display_table( *format_acked(acked, sort) )
puts
end
unless down.size.zero?
prompt.say hl.headline( "Current Outages" )
header = [
highlight_string( 'identifier' ),
highlight_string( 'type' ),
highlight_string( 'when' ),
highlight_string( 'errors' )
]
display_table( header, format_down(down, sort) )
prompt.say "%d nodes have been %s as a result of the above problems." % [
quieted.size,
hl.quieted( 'quieted' )
]
puts
end
end
### Prepare an array of acked/disabled nodes.
def format_acked( nodes, sort_key )
header = [
highlight_string( 'identifier' ),
highlight_string( 'type' ),
highlight_string( 'when' ),
highlight_string( 'who' ),
highlight_string( 'message' )
]
rows = nodes.sort_by{|n| n[sort_key] }.each_with_object([]) do |node, acc|
acc << [
hl.disabled( node['identifier'] ),
node[ 'type' ],
Time.parse( node[ 'status_changed' ] ).as_delta,
node[ 'ack' ][ 'sender' ],
node[ 'ack' ][ 'message' ]
]
end
return header, rows
end
### Prepare an array of down nodes.
def format_down( nodes, sort_key )
return nodes.sort_by{|n| n[sort_key] }.each_with_object([]) do |node, acc|
errors = node[ 'errors' ].map{|err| "%s: %s" % [ err.first, err.last ]}
acc << [
hl.down( node['identifier'] ),
node[ 'type' ],
Time.parse( node[ 'status_changed' ] ).as_delta,
errors.join( "\n" )
]
end
end
end # module Arborist::CLI::Summary
|
module Ars
module Doppelganger
VERSION = "0.1.0"
end
end
Bump to 0.2.0
module Ars
module Doppelganger
VERSION = "0.2.0"
end
end
|
require 'ass_maintainer/info_base/version'
require 'ass_launcher'
module AssMaintainer
# rubocop:disable Metrics/ClassLength
# Class for manipulate with 1C:Enterprise application instance aka
# +information base+ or +infobase+
#
# Instances of this class have dinamicly generated interfaece
#
# 1C:Enterprise application may be deployed as file (aka file infobase) or
# on a 1C:Enterprise server (aka server infobase). In the {#initialize}
# instance of this class will be extended suitable module:
# - server infobase instance will be extend module {ServerIb}
# - file infobase instance will be exten module {FileIb}
#
# Both instance type inherits methods from {Interfaces::InfoBase}
#
# All instances get methods wrappers for access to {#options} see
# {.build_options_wrapper}
class InfoBase
extend AssLauncher::Api
require 'ass_maintainer/info_base/config'
require 'ass_maintainer/info_base/interfaces'
require 'ass_maintainer/info_base/default_maker'
require 'ass_maintainer/info_base/file_ib'
require 'ass_maintainer/info_base/server_ib'
require 'ass_maintainer/info_base/cfg'
# :nodoc:
class MethodDenied < StandardError
def initialize(m)
super "Infobase is read only. Method #{m} denied!"
end
end
# Deafult port for connect to 1C:Enterprise serever agent
DEFAULT_SAGENT_PORT = '1540'
# Hooks before and after make and remove infobase. Hooks may be passed as
# options or seted later see {#add_hook}
HOOKS = {
before_make: ->(ib) {},
after_make: ->(ib) {},
before_rm: ->(ib) {},
after_rm: ->(ib) {}
}
# On default for make and remove infobase uses {DefaultMaker} and
# {FileIb::FileBaseDestroyer} or {ServerIb::ServerBaseDestroyer}
# but we can pass custom maker and destroyer as {#options}.
# Maker and destroyer must implements {Interfaces::IbMaker} and
# {Interfaces::IbDestroyer}
WORKERS = {
maker: nil,
destroyer: nil
}
# - +:platform_require+ Required 1C:Enterprise version
# - +:sagent_host+ Host name of 1C:Enterprise server agent
# - +:sagent_port+ TCP port of 1C:Enterprise server agent on
# default {DEFAULT_SAGENT_PORT}
# - +:sagent_usr+ Admin for 1C:Enterprise server agent
# - +:sagent_pwd+ Admin password for 1C:Enterprise server agent
# - +:cluster_usr+ Admin for 1C:Enterprise cluster.
# - +:cluster_pwd+ Pasword Admin for 1C:Enterprise cluster.
# - +:unlock_code+ Code for connect to locked infobase aka "/UC" parameter
ARGUMENTS = {
platform_require: nil,
sagent_host: nil,
sagent_port: nil,
sagent_usr: nil,
sagent_pwd: nil,
cluster_usr: nil,
cluster_pwd: nil,
unlock_code: nil
}
OPTIONS = (ARGUMENTS.merge HOOKS).merge WORKERS
# Dinamicaly builds of options wrappers
def self.build_options_wrapper
OPTIONS.each_key do |key|
next if WORKERS.keys.include? key
define_method key do
options[key]
end
next if HOOKS.keys.include? key
define_method "#{key}=".to_sym do |arg|
options[key] = arg
end
end
end
build_options_wrapper
# see {#initialize} +name+
attr_reader :name
# see {#initialize} +connection_string+
attr_reader :connection_string
# see {#initialize} +options+
attr_reader :options
# InfoBase is read only
# destructive methods fails with {MethodDenied} error
attr_reader :read_only
alias_method :read_only?, :read_only
# @param name [String] name of infobase
# @param connection_string [String AssLauncher::Support::ConnectionString]
# @param read_only [true false] infobse is read only or not
# @param options [Hash] see {OPTIONS}
def initialize(name, connection_string, read_only = true, **options)
@name = name
@connection_string = self.class.cs(connection_string.to_s)
@read_only = read_only
@options = validate_options(options)
case self.connection_string.is
when :file then extend FileIb
when :server then extend ServerIb
else fail ArgumentError
end
yield self if block_given?
end
def validate_options(options)
_opts = options.keys - OPTIONS.keys
fail ArgumentError, "Unknown options: #{_opts}" unless _opts.empty?
OPTIONS.merge(options)
end
private :validate_options
# Add hook. In all hook whill be passed +self+
# @raise [ArgumentError] if invalid hook name or not block given
# @param hook [Symbol] hook name
def add_hook(hook, &block)
fail ArgumentError, "Invalid hook `#{hook}'" unless\
HOOKS.keys.include? hook
fail ArgumentError, 'Block require' unless block_given?
options[hook] = block
end
# Requrement 1C version
# @return [String]
def platform_require
options[:platform_require] || self.class.config.platform_require
end
# Rebuild infobse first call {#rm!} second call {#make}
# @raise (see #rm!)
def rebuild!(sure = :no)
rm! sure
make
end
# (see #make_infobase!)
def make
make_infobase! unless exists?
self
end
# Make new empty infobase
# wrpped in +before_make+ and +after_make+ hooks
# @raise [MethodDenied] if infobase {#read_only?}
def make_infobase!
fail MethodDenied, :make_infobase! if read_only?
before_make.call(self)
maker.execute(self)
after_make.call(self)
self
end
private :make_infobase!
# (see #rm_infobase!)
def rm!(sure = :no)
fail 'If you are sure pass :yes value' unless sure == :yes
return unless exists?
rm_infobase!
nil
end
# Remove infobase
# wrpped in +before_rm+ and +after_rm+ hooks
# @raise [MethodDenied] if infobase {#read_only?}
def rm_infobase!
fail MethodDenied, :rm_infobase! if read_only?
before_rm.call(self)
destroyer.execute(self)
after_rm.call(self)
end
private :rm_infobase!
# @return [AssLauncher::Enterprise::BinaryWrapper::ThickClient]
def thick
self.class.thicks(platform_require).last ||
fail("Platform 1C #{platform_require} not found")
end
# @return [AssLauncher::Enterprise::BinaryWrapper::ThinClient]
def thin
self.class.thins(platform_require).last ||
fail("Platform 1C #{platform_require} not found")
end
# Get ole connector specified in +type+ parameter
# @param type [Symbol] see +AssLauncher::Api#ole+
def ole(type)
self.class.ole(type, ole_requirement)
end
def ole_requirement
"= #{thick.version}"
end
private :ole_requirement
def fail_if_not_exists
fail 'Infobase not exists' unless exists?
end
private :fail_if_not_exists
# Build command for run designer
# block will be passed to arguments builder
# @return [AssLauncher::Support::Shell::Command]
def designer(&block)
command(:thick, :designer, &block)
end
# Build command for run enterprise
# block will be passed to arguments builder
# @param client [Symbol] +:thin+ or +thick+ client
# @return [AssLauncher::Support::Shell::Command]
def enterprise(client, &block)
command(client, :enterprise, &block)
end
def command(client, mode, &block)
fail_if_not_exists
case client
when :thin then
thin.command(connection_string.to_args + common_args, &block)
when :thick then
thick.command(mode, connection_string.to_args + common_args,
&block)
else
fail ArgumentError, "Invalid client #{client}"
end
end
private :command
# Common arguments for all commands
def common_args
r = []
r += ['/L', locale] if locale
r += ['/UC', unlock_code] if unlock_code
r
end
# Dump infobase to +.dt+ file
def dump(path)
designer do
dumpIB path
end.run.wait.result.verify!
path
end
# Restore infobase from +.dt+ file
# @raise [MethodDenied] if {#read_only?}
def restore!(path)
fail MethodDenied, :restore! if read_only?
designer do
restoreIB path
end.run.wait.result.verify!
path
end
# Returns instance for manipuate with
# InfoBase database. If infobase not
# exists returns nil
# @return [DbCfg nil]
def db_cfg
@db_cfg ||= DbCfg.new(self) if exists?
end
# Returns instance for manipuate with
# databse configuration. If infobase not
# exists returns nil
# @return [Cfg nil]
def cfg
@cfg ||= Cfg.new(self) if exists?
end
# Returns type of infobase
# @return [Symbol] +:file+ or +:server+
def is
connection_string.is
end
# Check type of infobase
# @param type [Symbol] +:file+ or +:server+
def is?(type)
connection_string.is?(type)
end
# Set user name
def usr=(user_name)
connection_string.usr = user_name
end
alias_method :user=, :usr=
# User name
# @return [String]
def usr
connection_string.usr
end
alias_method :user, :usr
# Set locale
# @param l [String] locale code +en+, +ru+ etc
def locale=(l)
connection_string.locale = l
end
# Get locale
# @return [String]
def locale
connection_string.locale
end
# Set user password
def pwd=(password)
connection_string.pwd = password
end
alias_method :password=, :pwd=
# User password
# @return [String]
def pwd
connection_string.pwd
end
alias_method :password, :pwd
include Interfaces::InfoBase
end
# rubocop:enable Metrics/ClassLength
end
fix doc
require 'ass_maintainer/info_base/version'
require 'ass_launcher'
module AssMaintainer
# rubocop:disable Metrics/ClassLength
# Class for manipulate with 1C:Enterprise application instance aka
# +information base+ or +infobase+
#
# Instances of this class have dinamicly generated interfaece
#
# 1C:Enterprise application may be deployed as file (aka file infobase) or
# on a 1C:Enterprise server (aka server infobase). In the {#initialize}
# instance of will be extended by suitable module:
# - server infobase instance will be extended by {ServerIb} module
# - file infobase instance will be extended by {FileIb} module
#
# Both instances types inherits methods from {Interfaces::InfoBase}
#
# All instances gets methods for access to {#options} see
# {.build_options_wrapper}
class InfoBase
extend AssLauncher::Api
require 'ass_maintainer/info_base/config'
require 'ass_maintainer/info_base/interfaces'
require 'ass_maintainer/info_base/default_maker'
require 'ass_maintainer/info_base/file_ib'
require 'ass_maintainer/info_base/server_ib'
require 'ass_maintainer/info_base/cfg'
# :nodoc:
class MethodDenied < StandardError
def initialize(m)
super "Infobase is read only. Method #{m} denied!"
end
end
# Deafult port for connect to 1C:Enterprise serever agent
DEFAULT_SAGENT_PORT = '1540'
# Hooks before and after make and remove infobase. Hooks may be passed as
# options or seted later see {#add_hook}
HOOKS = {
before_make: ->(ib) {},
after_make: ->(ib) {},
before_rm: ->(ib) {},
after_rm: ->(ib) {}
}
# On default for make and remove infobase uses {DefaultMaker} and
# {FileIb::FileBaseDestroyer} or {ServerIb::ServerBaseDestroyer}
# but we can pass custom maker and destroyer as {#options}.
# Maker and destroyer must implements {Interfaces::IbMaker} and
# {Interfaces::IbDestroyer}
WORKERS = {
maker: nil,
destroyer: nil
}
# - +:platform_require+ Required 1C:Enterprise version
# - +:sagent_host+ Host name of 1C:Enterprise server agent
# - +:sagent_port+ TCP port of 1C:Enterprise server agent on
# default {DEFAULT_SAGENT_PORT}
# - +:sagent_usr+ Admin for 1C:Enterprise server agent
# - +:sagent_pwd+ Admin password for 1C:Enterprise server agent
# - +:cluster_usr+ Admin for 1C:Enterprise cluster.
# - +:cluster_pwd+ Pasword Admin for 1C:Enterprise cluster.
# - +:unlock_code+ Code for connect to locked infobase aka "/UC" parameter
ARGUMENTS = {
platform_require: nil,
sagent_host: nil,
sagent_port: nil,
sagent_usr: nil,
sagent_pwd: nil,
cluster_usr: nil,
cluster_pwd: nil,
unlock_code: nil
}
OPTIONS = (ARGUMENTS.merge HOOKS).merge WORKERS
# Dinamicaly builds of options wrappers
def self.build_options_wrapper
OPTIONS.each_key do |key|
next if WORKERS.keys.include? key
define_method key do
options[key]
end
next if HOOKS.keys.include? key
define_method "#{key}=".to_sym do |arg|
options[key] = arg
end
end
end
build_options_wrapper
# see {#initialize} +name+
attr_reader :name
# see {#initialize} +connection_string+
attr_reader :connection_string
# see {#initialize} +options+
attr_reader :options
# InfoBase is read only
# destructive methods fails with {MethodDenied} error
attr_reader :read_only
alias_method :read_only?, :read_only
# @param name [String] name of infobase
# @param connection_string [String AssLauncher::Support::ConnectionString]
# @param read_only [true false] infobse is read only or not
# @param options [Hash] see {OPTIONS}
def initialize(name, connection_string, read_only = true, **options)
@name = name
@connection_string = self.class.cs(connection_string.to_s)
@read_only = read_only
@options = validate_options(options)
case self.connection_string.is
when :file then extend FileIb
when :server then extend ServerIb
else fail ArgumentError
end
yield self if block_given?
end
def validate_options(options)
_opts = options.keys - OPTIONS.keys
fail ArgumentError, "Unknown options: #{_opts}" unless _opts.empty?
OPTIONS.merge(options)
end
private :validate_options
# Add hook. In all hook whill be passed +self+
# @raise [ArgumentError] if invalid hook name or not block given
# @param hook [Symbol] hook name
def add_hook(hook, &block)
fail ArgumentError, "Invalid hook `#{hook}'" unless\
HOOKS.keys.include? hook
fail ArgumentError, 'Block require' unless block_given?
options[hook] = block
end
# Requrement 1C version
# @return [String]
def platform_require
options[:platform_require] || self.class.config.platform_require
end
# Rebuild infobse first call {#rm!} second call {#make}
# @raise (see #rm!)
def rebuild!(sure = :no)
rm! sure
make
end
# (see #make_infobase!)
def make
make_infobase! unless exists?
self
end
# Make new empty infobase
# wrpped in +before_make+ and +after_make+ hooks
# @raise [MethodDenied] if infobase {#read_only?}
def make_infobase!
fail MethodDenied, :make_infobase! if read_only?
before_make.call(self)
maker.execute(self)
after_make.call(self)
self
end
private :make_infobase!
# (see #rm_infobase!)
def rm!(sure = :no)
fail 'If you are sure pass :yes value' unless sure == :yes
return unless exists?
rm_infobase!
nil
end
# Remove infobase
# wrpped in +before_rm+ and +after_rm+ hooks
# @raise [MethodDenied] if infobase {#read_only?}
def rm_infobase!
fail MethodDenied, :rm_infobase! if read_only?
before_rm.call(self)
destroyer.execute(self)
after_rm.call(self)
end
private :rm_infobase!
# @return [AssLauncher::Enterprise::BinaryWrapper::ThickClient]
def thick
self.class.thicks(platform_require).last ||
fail("Platform 1C #{platform_require} not found")
end
# @return [AssLauncher::Enterprise::BinaryWrapper::ThinClient]
def thin
self.class.thins(platform_require).last ||
fail("Platform 1C #{platform_require} not found")
end
# Get ole connector specified in +type+ parameter
# @param type [Symbol] see +AssLauncher::Api#ole+
def ole(type)
self.class.ole(type, ole_requirement)
end
def ole_requirement
"= #{thick.version}"
end
private :ole_requirement
def fail_if_not_exists
fail 'Infobase not exists' unless exists?
end
private :fail_if_not_exists
# Build command for run designer
# block will be passed to arguments builder
# @return [AssLauncher::Support::Shell::Command]
def designer(&block)
command(:thick, :designer, &block)
end
# Build command for run enterprise
# block will be passed to arguments builder
# @param client [Symbol] +:thin+ or +thick+ client
# @return [AssLauncher::Support::Shell::Command]
def enterprise(client, &block)
command(client, :enterprise, &block)
end
def command(client, mode, &block)
fail_if_not_exists
case client
when :thin then
thin.command(connection_string.to_args + common_args, &block)
when :thick then
thick.command(mode, connection_string.to_args + common_args,
&block)
else
fail ArgumentError, "Invalid client #{client}"
end
end
private :command
# Common arguments for all commands
def common_args
r = []
r += ['/L', locale] if locale
r += ['/UC', unlock_code] if unlock_code
r
end
# Dump infobase to +.dt+ file
def dump(path)
designer do
dumpIB path
end.run.wait.result.verify!
path
end
# Restore infobase from +.dt+ file
# @raise [MethodDenied] if {#read_only?}
def restore!(path)
fail MethodDenied, :restore! if read_only?
designer do
restoreIB path
end.run.wait.result.verify!
path
end
# Returns instance for manipuate with
# InfoBase database. If infobase not
# exists returns nil
# @return [DbCfg nil]
def db_cfg
@db_cfg ||= DbCfg.new(self) if exists?
end
# Returns instance for manipuate with
# databse configuration. If infobase not
# exists returns nil
# @return [Cfg nil]
def cfg
@cfg ||= Cfg.new(self) if exists?
end
# Returns type of infobase
# @return [Symbol] +:file+ or +:server+
def is
connection_string.is
end
# Check type of infobase
# @param type [Symbol] +:file+ or +:server+
def is?(type)
connection_string.is?(type)
end
# Set user name
def usr=(user_name)
connection_string.usr = user_name
end
alias_method :user=, :usr=
# User name
# @return [String]
def usr
connection_string.usr
end
alias_method :user, :usr
# Set locale
# @param l [String] locale code +en+, +ru+ etc
def locale=(l)
connection_string.locale = l
end
# Get locale
# @return [String]
def locale
connection_string.locale
end
# Set user password
def pwd=(password)
connection_string.pwd = password
end
alias_method :password=, :pwd=
# User password
# @return [String]
def pwd
connection_string.pwd
end
alias_method :password, :pwd
include Interfaces::InfoBase
end
# rubocop:enable Metrics/ClassLength
end
|
module Ast
# assignment, like operators are really function calls
class CallSiteExpression < Expression
# attr_reader :name, :args , :receiver
def compile context , into
params = args.collect{ |a| a.compile(context, into) }
if receiver.is_a?(NameExpression) and (receiver.name == :self)
function = context.current_class.get_or_create_function(name)
value_receiver = Vm::Integer.new(Vm::Function::RECEIVER_REG)
else
value_receiver = receiver.compile(context , into)
function = context.current_class.get_or_create_function(name)
end
# this lot below should go, since the compile should handle all
if receiver.is_a? VariableExpression
raise "not implemented instance var:#{receiver}"
end
raise "No such method error #{3.to_s}:#{name}" if (function.nil?)
raise "No receiver error #{inspect}:#{value_receiver}:#{name}" if (value_receiver.nil?)
call = Vm::CallSite.new( name , value_receiver , params , function)
current_function = context.function
current_function.save_locals(context , into) if current_function
call.load_args into
call.do_call into
current_function.restore_locals(context , into) if current_function
puts "compile call #{function.return_type}"
function.return_type
end
end
class VariableExpression < CallSiteExpression
# super( :_get_instance_variable , [StringExpression.new(name)] )
end
end
adding call counter and splitting block after call
module Ast
# assignment, like operators are really function calls
class CallSiteExpression < Expression
# attr_reader :name, :args , :receiver
@@counter = 0
def compile context , into
params = args.collect{ |a| a.compile(context, into) }
if receiver.is_a?(NameExpression) and (receiver.name == :self)
function = context.current_class.get_or_create_function(name)
value_receiver = Vm::Integer.new(Vm::Function::RECEIVER_REG)
else
value_receiver = receiver.compile(context , into)
function = context.current_class.get_or_create_function(name)
end
# this lot below should go, since the compile should handle all
if receiver.is_a? VariableExpression
raise "not implemented instance var:#{receiver}"
end
raise "No such method error #{3.to_s}:#{name}" if (function.nil?)
raise "No receiver error #{inspect}:#{value_receiver}:#{name}" if (value_receiver.nil?)
call = Vm::CallSite.new( name , value_receiver , params , function)
current_function = context.function
current_function.save_locals(context , into) if current_function
call.load_args into
call.do_call into
after = into.new_block("#{into.name}_call#{@@counter+=1}")
into.insert_at after
current_function.restore_locals(context , after) if current_function
puts "compile call #{function.return_type}"
function.return_type
end
end
class VariableExpression < CallSiteExpression
# super( :_get_instance_variable , [StringExpression.new(name)] )
end
end |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'json'
require 'timeout'
module Astute
class DeploymentEngine
def initialize(context)
if self.class.superclass.name == 'Object'
raise "Instantiation of this superclass is not allowed. Please subclass from #{self.class.name}."
end
@ctx = context
end
def deploy(nodes, attrs)
# See implementation in subclasses, this may be everriden
attrs['deployment_mode'] ||= 'multinode' # simple multinode deployment is the default
attrs['use_cinder'] ||= nodes.any?{|n| n['role'] == 'cinder'}
@ctx.deploy_log_parser.deploy_type = attrs['deployment_mode']
Astute.logger.info "Deployment mode #{attrs['deployment_mode']}"
result = self.send("deploy_#{attrs['deployment_mode']}", nodes, attrs)
end
def method_missing(method, *args)
Astute.logger.error "Method #{method} is not implemented for #{self.class}, raising exception."
raise "Method #{method} is not implemented for #{self.class}"
end
# we mix all attrs and prepare them for Puppet
# Works for multinode deployment mode
def attrs_multinode(nodes, attrs)
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => n['role'],
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
# TODO(mihgen): we should report error back if there are not enough metadata passed
attrs
end
# This method is called by Ruby metaprogramming magic from deploy method
# It should not contain any magic with attributes, and should not directly run any type of MC plugins
# It does only support of deployment sequence. See deploy_piece implementation in subclasses.
def deploy_multinode(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
other_nodes = nodes - ctrl_nodes
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def attrs_ha(nodes, attrs)
# we use the same set of mount points for all storage nodes
attrs['mp'] = [{'point' => '1', 'weight' => '1'},{'point'=>'2','weight'=>'2'}]
mountpoints = ""
attrs['mp'].each do |mountpoint|
mountpoints << "#{mountpoint['point']} #{mountpoint['weight']}\n"
end
Astute.logger.debug("#{nodes}")
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => n['role'],
'mountpoints' => mountpoints,
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'swift_zone' => n['id'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
if attrs['nodes'].select { |node| node['role'] == 'primary-controller' }.empty?
ctrl_nodes = attrs['nodes'].select {|n| n['role'] == 'controller'}
ctrl_nodes[0]['role'] = 'primary-controller'
end
attrs
end
alias :attrs_ha_full :attrs_ha
alias :attrs_ha_compact :attrs_ha
def deploy_ha_full(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
proxy_nodes = nodes.select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = nodes.select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = nodes - ctrl_nodes - primary_ctrl_nodes - \
primary_proxy_nodes - quantum_nodes - storage_nodes - proxy_nodes
Astute.logger.info "Starting deployment of primary swift proxy"
deploy_piece(primary_proxy_nodes, attrs)
Astute.logger.info "Starting deployment of non-primary swift proxies"
deploy_piece(proxy_nodes, attrs)
Astute.logger.info "Starting deployment of swift storages"
deploy_piece(storage_nodes, attrs)
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def deploy_ha_compact(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
proxy_nodes = nodes.select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = nodes.select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = nodes - ctrl_nodes - primary_ctrl_nodes - \
primary_proxy_nodes - quantum_nodes
#FIXME: add last_controller attribute to attributes hash in order to determine
#if we are the last controller in deployment sequence and it is safe to
#upload test virtual machine image
last_controller = ctrl_nodes.last
if last_controller['name']
attrs['last_controller'] = last_controller['name']
elsif last_controller['fqdn']
attrs['last_controller'] = last_controller['fqdn'].split(/\./)[0]
end
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
alias :deploy_ha :deploy_ha_compact
def attrs_rpmcache(nodes, attrs)
attrs
end
def deploy_rpmcache(nodes, attrs)
Astute.logger.info "Starting release downloading"
deploy_piece(nodes, attrs, 0)
end
private
def select_ifaces(var,name)
result = false
if var.is_a?(Array)
result = true if var.include?(name)
elsif var.is_a?(String)
result = true if var == name
end
end
def nodes_status(nodes, status, data_to_merge)
{'nodes' => nodes.map { |n| {'uid' => n['uid'], 'status' => status}.merge(data_to_merge) }}
end
def validate_nodes(nodes)
if nodes.empty?
Astute.logger.info "#{@ctx.task_id}: Nodes to deploy are not provided. Do nothing."
return false
end
return true
end
def calculate_networks(data, hwinterfaces)
interfaces = {}
data ||= []
Astute.logger.info "calculate_networks function was provided with #{data.size} interfaces"
data.each do |net|
Astute.logger.debug "Calculating network for #{net.inspect}"
if net['vlan'] && net['vlan'] != 0
name = [net['dev'], net['vlan']].join('.')
else
name = net['dev']
end
unless interfaces.has_key?(name)
interfaces[name] = {'interface' => name, 'ipaddr' => []}
end
iface = interfaces[name]
if net['name'] == 'admin'
if iface['ipaddr'].size > 0
Astute.logger.error "Admin network interferes with openstack nets"
end
iface['ipaddr'] += ['dhcp']
else
if iface['ipaddr'].any?{|x| x == 'dhcp'}
Astute.logger.error "Admin network interferes with openstack nets"
end
if net['ip']
iface['ipaddr'] += [net['ip']]
end
if net['gateway'] && net['name'] =~ /^public$/i
iface['gateway'] = net['gateway']
end
end
Astute.logger.debug "Calculated network for interface: #{name}, data: #{iface.inspect}"
end
interfaces['lo'] = {'interface'=>'lo', 'ipaddr'=>['127.0.0.1/8']} unless interfaces.has_key?('lo')
hwinterfaces.each do |i|
unless interfaces.has_key?(i['name'])
interfaces[i['name']] = {'interface' => i['name'], 'ipaddr' => []}
end
end
interfaces.keys.each do |i|
interfaces[i]['ipaddr'] = 'none' if interfaces[i]['ipaddr'].size == 0
interfaces[i]['ipaddr'] = 'dhcp' if interfaces[i]['ipaddr'] == ['dhcp']
end
interfaces
end
end
end
Extract last_controller assigned to attrs
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'json'
require 'timeout'
module Astute
class DeploymentEngine
def initialize(context)
if self.class.superclass.name == 'Object'
raise "Instantiation of this superclass is not allowed. Please subclass from #{self.class.name}."
end
@ctx = context
end
def deploy(nodes, attrs)
# See implementation in subclasses, this may be everriden
attrs['deployment_mode'] ||= 'multinode' # simple multinode deployment is the default
attrs['use_cinder'] ||= nodes.any?{|n| n['role'] == 'cinder'}
@ctx.deploy_log_parser.deploy_type = attrs['deployment_mode']
Astute.logger.info "Deployment mode #{attrs['deployment_mode']}"
result = self.send("deploy_#{attrs['deployment_mode']}", nodes, attrs)
end
def method_missing(method, *args)
Astute.logger.error "Method #{method} is not implemented for #{self.class}, raising exception."
raise "Method #{method} is not implemented for #{self.class}"
end
# we mix all attrs and prepare them for Puppet
# Works for multinode deployment mode
def attrs_multinode(nodes, attrs)
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => n['role'],
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
# TODO(mihgen): we should report error back if there are not enough metadata passed
attrs
end
# This method is called by Ruby metaprogramming magic from deploy method
# It should not contain any magic with attributes, and should not directly run any type of MC plugins
# It does only support of deployment sequence. See deploy_piece implementation in subclasses.
def deploy_multinode(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
other_nodes = nodes - ctrl_nodes
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def attrs_ha(nodes, attrs)
# we use the same set of mount points for all storage nodes
attrs['mp'] = [{'point' => '1', 'weight' => '1'},{'point'=>'2','weight'=>'2'}]
mountpoints = ""
attrs['mp'].each do |mountpoint|
mountpoints << "#{mountpoint['point']} #{mountpoint['weight']}\n"
end
Astute.logger.debug("#{nodes}")
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => n['role'],
'mountpoints' => mountpoints,
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'swift_zone' => n['id'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
ctrl_nodes = attrs['nodes'].select {|n| n['role'] == 'controller'}
if attrs['nodes'].select { |node| node['role'] == 'primary-controller' }.empty?
ctrl_nodes[0]['role'] = 'primary-controller'
end
attrs['last_controller'] = ctrl_nodes.last['name']
attrs
end
alias :attrs_ha_full :attrs_ha
alias :attrs_ha_compact :attrs_ha
def deploy_ha_full(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
proxy_nodes = nodes.select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = nodes.select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = nodes - ctrl_nodes - primary_ctrl_nodes - \
primary_proxy_nodes - quantum_nodes - storage_nodes - proxy_nodes
Astute.logger.info "Starting deployment of primary swift proxy"
deploy_piece(primary_proxy_nodes, attrs)
Astute.logger.info "Starting deployment of non-primary swift proxies"
deploy_piece(proxy_nodes, attrs)
Astute.logger.info "Starting deployment of swift storages"
deploy_piece(storage_nodes, attrs)
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def deploy_ha_compact(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
proxy_nodes = nodes.select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = nodes.select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = nodes - ctrl_nodes - primary_ctrl_nodes - \
primary_proxy_nodes - quantum_nodes
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
alias :deploy_ha :deploy_ha_compact
def attrs_rpmcache(nodes, attrs)
attrs
end
def deploy_rpmcache(nodes, attrs)
Astute.logger.info "Starting release downloading"
deploy_piece(nodes, attrs, 0)
end
private
def select_ifaces(var,name)
result = false
if var.is_a?(Array)
result = true if var.include?(name)
elsif var.is_a?(String)
result = true if var == name
end
end
def nodes_status(nodes, status, data_to_merge)
{'nodes' => nodes.map { |n| {'uid' => n['uid'], 'status' => status}.merge(data_to_merge) }}
end
def validate_nodes(nodes)
if nodes.empty?
Astute.logger.info "#{@ctx.task_id}: Nodes to deploy are not provided. Do nothing."
return false
end
return true
end
def calculate_networks(data, hwinterfaces)
interfaces = {}
data ||= []
Astute.logger.info "calculate_networks function was provided with #{data.size} interfaces"
data.each do |net|
Astute.logger.debug "Calculating network for #{net.inspect}"
if net['vlan'] && net['vlan'] != 0
name = [net['dev'], net['vlan']].join('.')
else
name = net['dev']
end
unless interfaces.has_key?(name)
interfaces[name] = {'interface' => name, 'ipaddr' => []}
end
iface = interfaces[name]
if net['name'] == 'admin'
if iface['ipaddr'].size > 0
Astute.logger.error "Admin network interferes with openstack nets"
end
iface['ipaddr'] += ['dhcp']
else
if iface['ipaddr'].any?{|x| x == 'dhcp'}
Astute.logger.error "Admin network interferes with openstack nets"
end
if net['ip']
iface['ipaddr'] += [net['ip']]
end
if net['gateway'] && net['name'] =~ /^public$/i
iface['gateway'] = net['gateway']
end
end
Astute.logger.debug "Calculated network for interface: #{name}, data: #{iface.inspect}"
end
interfaces['lo'] = {'interface'=>'lo', 'ipaddr'=>['127.0.0.1/8']} unless interfaces.has_key?('lo')
hwinterfaces.each do |i|
unless interfaces.has_key?(i['name'])
interfaces[i['name']] = {'interface' => i['name'], 'ipaddr' => []}
end
end
interfaces.keys.each do |i|
interfaces[i]['ipaddr'] = 'none' if interfaces[i]['ipaddr'].size == 0
interfaces[i]['ipaddr'] = 'dhcp' if interfaces[i]['ipaddr'] == ['dhcp']
end
interfaces
end
end
end
|
require 'aweplug/helpers/vimeo'
require 'aweplug/cache/yaml_file_cache'
require 'json'
require 'parallel'
module Aweplug
module Extensions
module Video
# Public: Awestruct Extension which iterates over a site variable which
# contains vimeo URLs and creates pages out of them, also sends
# the info over to a searchisko instance for indexing. This
# makes use of the Aweplug::Helper::Searchisko class, please see
# that class for more info on options and settings for Searchisko.
class Vimeo
include Aweplug::Helpers::Vimeo
# Public: Creates a new instance of this Awestruct plugin.
#
# variable_name - Name of the variable in the Awestruct Site containing
# the list of vimeo videos.
# layout - Name of the layout to be used for the generated Pages.
# push_to_searchisko - A boolean controlling whether a push to
# seachisko should happen. A push will not
# happen when the development profile is in
# use, regardless of the value of this
# option.
#
# Returns a new instance of this extension.
def initialize variable_name, layout, push_to_searchisko = true
@variable = variable_name
@layout = layout
@push_to_searchisko = push_to_searchisko
end
def execute site
@site = site
site.send("video_cache=", {})
if site.cache.nil?
site.send('cache=', Aweplug::Cache::YamlFileCache.new)
end
# Iterate over the albums, call the vimeo endpoint
# for each video in the response create page
Parallel.each(site[@variable]["albums"], in_threads: 10) do |album|
# TODO: do something about pagination, if / when we hit that issue
albumJson = JSON.load(exec_method('vimeo.albums.getVideos', {album_id: album['id'], per_page: 50, full_response: 1, format: 'json'}))
albumJson['videos']['video'].each do |videoJson|
video = Aweplug::Helpers::Vimeo::VimeoVideo.new videoJson, site
page_path = Pathname.new(File.join 'video', 'vimeo', "#{video.id}.html")
# Skip if the site already has this page
next if site.pages.find {|p| p.source_path == page_path}
add_video_to_site video, site
send_video_to_searchisko video, site, album['product']
end
end
Parallel.each(site[@variable]["videos"], in_threads: 40) do |videoUrl|
id = videoUrl.split('http://vimeo.com/').last
page_path = Pathname.new(File.join 'video', 'vimeo', "#{id}.html")
# Skip if the site already has this page
next if site.pages.find {|p| p.source_path == page_path}
videoJson = JSON.load(exec_method "vimeo.videos.getInfo", {format: 'json', video_id: id})['video'].first
video = Aweplug::Helpers::Vimeo::VimeoVideo.new videoJson, site
add_video_to_site video, site
send_video_to_searchisko video, site
end
end
def add_video_to_site video, site
page_path = Pathname.new(File.join 'video', 'vimeo', "#{video.id}.html")
page = ::Awestruct::Page.new(site,
::Awestruct::Handlers::LayoutHandler.new(site,
::Awestruct::Handlers::TiltHandler.new(site,
::Aweplug::Handlers::SyntheticHandler.new(site, '', page_path))))
page.layout = @layout
page.output_path = File.join 'video', 'vimeo', video.id,'index.html'
page.stale_output_callback = ->(p) { return (File.exist?(p.output_path) && File.mtime(__FILE__) > File.mtime(p.output_path)) }
page.send('title=', video.title)
page.send('description=', video.description)
page.send('video=', video)
page.send('video_url=', video.url)
site.video_cache[video.url] = video
site.pages << page
end
def send_video_to_searchisko video, site, product = nil
unless (payload = video.searchisko_payload).nil?
unless !@push_to_searchisko || site.profile =~ /development/
searchisko = Aweplug::Helpers::Searchisko.new({:base_url => site.dcp_base_url,
:authenticate => true,
:searchisko_username => ENV['dcp_user'],
:searchisko_password => ENV['dcp_password'],
:cache => site.cache,
:logger => site.log_faraday,
:searchisko_warnings => site.searchisko_warnings})
payload.merge!({target_product: product})
searchisko.push_content('jbossdeveloper_vimeo', video.id, payload.to_json)
end
end
end
end
end
end
end
Messed up the check to skip if the video was done
require 'aweplug/helpers/vimeo'
require 'aweplug/cache/yaml_file_cache'
require 'json'
require 'parallel'
module Aweplug
module Extensions
module Video
# Public: Awestruct Extension which iterates over a site variable which
# contains vimeo URLs and creates pages out of them, also sends
# the info over to a searchisko instance for indexing. This
# makes use of the Aweplug::Helper::Searchisko class, please see
# that class for more info on options and settings for Searchisko.
class Vimeo
include Aweplug::Helpers::Vimeo
# Public: Creates a new instance of this Awestruct plugin.
#
# variable_name - Name of the variable in the Awestruct Site containing
# the list of vimeo videos.
# layout - Name of the layout to be used for the generated Pages.
# push_to_searchisko - A boolean controlling whether a push to
# seachisko should happen. A push will not
# happen when the development profile is in
# use, regardless of the value of this
# option.
#
# Returns a new instance of this extension.
def initialize variable_name, layout, push_to_searchisko = true
@variable = variable_name
@layout = layout
@push_to_searchisko = push_to_searchisko
end
def execute site
@site = site
site.send("video_cache=", {})
if site.cache.nil?
site.send('cache=', Aweplug::Cache::YamlFileCache.new)
end
# Iterate over the albums, call the vimeo endpoint
# for each video in the response create page
Parallel.each(site[@variable]["albums"], in_threads: 10) do |album|
# TODO: do something about pagination, if / when we hit that issue
albumJson = JSON.load(exec_method('vimeo.albums.getVideos', {album_id: album['id'], per_page: 50, full_response: 1, format: 'json'}))
albumJson['videos']['video'].each do |videoJson|
video = Aweplug::Helpers::Vimeo::VimeoVideo.new videoJson, site
page_path = Pathname.new(File.join 'video', 'vimeo', "#{video.id}.html")
# Skip if the site already has this page
next if site.pages.any? {|p| p.source_path.include? page_path.to_s}
add_video_to_site video, site
send_video_to_searchisko video, site, album['product']
end
end
Parallel.each(site[@variable]["videos"], in_threads: 40) do |videoUrl|
id = videoUrl.split('http://vimeo.com/').last
page_path = Pathname.new(File.join 'video', 'vimeo', "#{id}.html")
# Skip if the site already has this page
next if site.pages.any? {|p| p.source_path.include? page_path.to_s}
videoJson = JSON.load(exec_method "vimeo.videos.getInfo", {format: 'json', video_id: id})['video'].first
video = Aweplug::Helpers::Vimeo::VimeoVideo.new videoJson, site
add_video_to_site video, site
send_video_to_searchisko video, site
end
end
def add_video_to_site video, site
page_path = Pathname.new(File.join 'video', 'vimeo', "#{video.id}.html")
page = ::Awestruct::Page.new(site,
::Awestruct::Handlers::LayoutHandler.new(site,
::Awestruct::Handlers::TiltHandler.new(site,
::Aweplug::Handlers::SyntheticHandler.new(site, '', page_path))))
page.layout = @layout
page.output_path = File.join 'video', 'vimeo', video.id,'index.html'
page.stale_output_callback = ->(p) { return (File.exist?(p.output_path) && File.mtime(__FILE__) > File.mtime(p.output_path)) }
page.send('title=', video.title)
page.send('description=', video.description)
page.send('video=', video)
page.send('video_url=', video.url)
site.video_cache[video.url] = video
site.pages << page
end
def send_video_to_searchisko video, site, product = nil
unless (payload = video.searchisko_payload).nil?
unless !@push_to_searchisko || site.profile =~ /development/
searchisko = Aweplug::Helpers::Searchisko.new({:base_url => site.dcp_base_url,
:authenticate => true,
:searchisko_username => ENV['dcp_user'],
:searchisko_password => ENV['dcp_password'],
:cache => site.cache,
:logger => site.log_faraday,
:searchisko_warnings => site.searchisko_warnings})
payload.merge!({target_product: product})
searchisko.push_content('jbossdeveloper_vimeo', video.id, payload.to_json)
end
end
end
end
end
end
end
|
module AwsCli
module CLI
module EC2
require 'awscli/cli/ec2'
require 'awscli/helper'
class Instances < Thor
# default_task :list
desc 'list_sizes', 'lists available sizes of vms'
def list_sizes
puts Awscli::Instances::INSTANCE_SIZES
end
desc 'list_regions', 'lists available regions to connect to'
def list_regions
puts Awscli::Instances::REGIONS
end
desc "list", "list the instances"
long_desc <<-LONGDESC
List and describe your instances
The INSTANCE parameter is the instance ID(s) to describe.
If unspecified all your instances will be returned.
LONGDESC
def list
puts "Listing Instances"
create_ec2_object
# puts parent_options #access awscli/cli/ec2.rb class options
@ec2.list_instances
end
desc "diatt", "list instance attributes"
long_desc <<-LONGDESC
Describes the specified attribute of the specified instance. You can specify only one attribute at a time.
\x5
Available Attributes to Request:
architecture ami_launch_index availability_zone block_device_mapping network_interfaces client_token
dns_name ebs_optimized groups flavor_id iam_instance_profile image_id instance_initiated_shutdown_behavior
kernel_id key_name created_at monitoring placement_group platform private_dns_name private_ip_address
public_ip_address ramdisk_id root_device_name root_device_type security_group_ids state state_reason subnet_id
tenancy tags user_data vpc_id volumes username
LONGDESC
method_option :id, :aliases => "-i", :banner => "INSTANCEID", :type => :string, :desc => "Id of an instance to modify attribute", :required => true
method_option :attr, :aliases => "-a", :banner => "ATTR", :type => :string, :desc => "Attribute to modify", :required => true
def diatt
create_ec2_object
@ec2.describe_instance_attribute(options[:id], options[:attr])
end
desc "miatt", "modify instance attributes"
long_desc <<-LONGDESC
Modifies an instance attribute. Only one attribute can be specified per call.
LONGDESC
method_option :id, :aliases => "-i", :banner => "INSTANCEID", :type => :string, :desc => "Id of an instance to modify attribute", :required => true
method_option :isize, :aliases => "-t", :banner => "VALUE", :type => :string, :desc => "Changes the instance type to the specified value."
method_option :kernel, :aliases => "-k", :banner => "VALUE", :type => :string, :desc => "Changes the instance's kernel to the specified value"
method_option :ramdisk, :aliases => "-r", :banner => "VALUE", :type => :string, :desc => "Changes the instance's RAM disk to the specified value"
method_option :userdata, :aliases => "-u", :banner => "VALUE", :type => :string, :desc => "Changes the instance's user data to the specified value"
method_option :disable_api_term, :aliases => "-d", :banner => "true|false" , :type => :string, :desc => "Changes the instance's DisableApiTermination flag to the specified value. Setting this flag means you can't terminate the instance using the API"
method_option :inst_shutdown_beh, :aliases => "-s", :banner => "stop|terminate", :type => :string, :desc => "Changes the instance's InstanceInitiatedShutdownBehavior flag to the specified value."
method_option :source_dest_check, :aliases => "-c", :banner => "true|false" , :type => :string, :desc => "This attribute exists to enable a Network Address Translation (NAT) instance in a VPC to perform NAT. The attribute controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled"
method_option :group_id, :aliases => "-g", :banner => "G1, G2, ..", :type => :array, :desc => "This attribute is applicable only to instances running in a VPC. Use this parameter when you want to change the security groups that an instance is in."
def miatt
create_ec2_object
opts = Marshal.load(Marshal.dump(options)) #create a copy of options, as original options hash cannot be modified
opts.reject!{ |k| k == 'id' } #remove id from opts
abort "Please pass an attribute by setting respective option" unless opts
abort "You can only pass one attribute at a time" if opts.size != 1
opts.each do |k,v|
puts "calling modify_instance_attribute with: #{options[:id]}, #{k}, #{opts[k]}"
@ec2.modify_instance_attribute(options[:id], k, opts[k])
end
end
desc "riatt", "reset instances attribute(s)"
long_desc <<-LONGDESC
Resets an instance attribute to its initial value. Only one attribute can be specified per call.
LONGDESC
def riatt
puts "Not yet Implemented"
end
desc "dins", "describe instance status"
long_desc <<-LONGDESC
Describe the status for one or more instances.
Checks are performed on your instances to determine if they are
in running order or not. Use this command to see the result of these
instance checks so that you can take remedial action if possible.
There are two types of checks performed: INSTANCE and SYSTEM.
INSTANCE checks examine the health and reachability of the
application environment. SYSTEM checks examine the health of
the infrastructure surrounding your instance.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def dins
create_ec2_object
@ec2.describe_instance_status options[:instance_id]
end
# desc "import", "ec2_import_instance"
# long_desc <<-LONGDESC
# Create an import instance task to import a virtual machine into EC2
# using meta_data from the given disk image. The volume size for the
# imported disk_image will be calculated automatically, unless specified.
# LONGDESC
# def import
# puts "Cannot find it in the *FOG*"
# end
desc "reboot", "reboot an instance"
long_desc <<-LONGDESC
Reboot selected running instances.
The INSTANCE parameter is an instance ID to reboot.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def reboot
create_ec2_object
@ec2.reboot_instance options[:instance_id]
end
desc "create", "launch a new instance"
long_desc <<-LONGDESC
Launch an instance of a specified AMI.
Usage Examples:
`awscli ec2 instances create -i ami-b63210f3 -k ruby-sample-1363113606 -b "/dev/sdb=ephemeral10" "/dev/sdc=snap-xxxxx::false::"`
`awscli ec2 instances create -i ami-b63210f3 -k ruby-sample-1363113606 -b "/dev/sdb=ephemeral10" "/dev/sdc=:10:false::"`
Running Multiple Instances:
`awscli ec2 instances create -i <ami_id> -c 10 -k <key_name> -b "/dev/sdd=:100:false::"`
Block Device Mapping Format:
This argument is passed in the form of <devicename>=<blockdevice>. The devicename is the device name of the physical device on the instance to map. The blockdevice can be one of the following values:
none - supportsesses an existing mapping of the device from the AMI used to launch the instance. For example: "/dev/sdc=none"
ephemeral[0..3] - An instance store volume to be mapped to the device. For example: "/dev/sdc=ephemeral0"
[snapshot-id]:[volume-size]:[true|false]:[standard|io1[:iops]] - An EBS volume to be mapped to the device.
[snapshot-id] To create a volume from a snapshot, specify the snapshot ID.
[volume-size] To create an empty EBS volume, omit the snapshot ID and specify a volume size instead.
For example: "/dev/sdh=:20".
[delete-on-termination] To prevent the volume from being deleted on termination of the instance, specify false.
The default is true.
[volume-type] To create a Provisioned IOPS volume, specify io1. The default volume type is standard.
If the volume type is io1, you can also provision the number of IOPS that the volume supports.
For example, "/dev/sdh=snap-7eb96d16::false:io1:500"
LONGDESC
method_option :image_id, :aliases => "-i", :required => true, :banner => "AMIID", :type => :string, :desc => "Id of machine image to load on instances"
method_option :availability_zone, :banner => "ZONE", :type => :string, :desc => "Placement constraint for instances"
method_option :placement_group, :banner => "GROUP", :type => :string, :desc => "Name of existing placement group to launch instance into"
method_option :tenancy, :banner => "TENANCY", :type => :string, :desc => "Tenancy option in ['dedicated', 'default'], defaults to 'default'"
method_option :block_device_mapping, :aliases => "-b", :type => :array , :desc => "<devicename>=<blockdeveice>, see help for how to pass values"
method_option :client_token, :type => :string, :desc => "unique case-sensitive token for ensuring idempotency"
method_option :groups, :aliases => "-g", :banner => "SG1 SG2 SG3",:type => :array, :default => ["default"], :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => "-t",:type => :string, :default => "m1.small", :desc => "Type of instance to boot."
method_option :kernel_id, :type => :string, :desc => "Id of kernel with which to launch"
method_option :key_name, :aliases => "-k", :required => true, :type => :string, :desc => "Name of a keypair to add to booting instances"
method_option :monitoring, :type => :boolean, :default => false, :desc => "Enables monitoring, defaults to false"
method_option :ramdisk_id, :type => :string, :desc => "Id of ramdisk with which to launch"
method_option :subnet_id, :type => :string, :desc => "VPC option to specify subnet to launch instance into"
method_option :user_data, :type => :string, :desc => "Additional data to provide to booting instances"
method_option :ebs_optimized, :type => :boolean, :default => false, :desc => "Whether the instance is optimized for EBS I/O"
method_option :vpc_id, :type => :string, :desc => "VPC to connect to"
method_option :tags, :type => :hash, :default => {'Name' => "awscli-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :private_ip_address, :banner => "IP",:type => :string, :desc => "VPC option to specify ip address within subnet"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
def create
create_ec2_object
@ec2.create_instance options
end
desc 'create_centos', 'Create a centos based instance, ebs_backed with root being 100GB (user has to manually execute resize2fs /dev/sda to reclaim extra storage on root device)'
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
method_option :groups,:aliases => '-g', :banner => 'SG1 SG2 SG3',:type => :array, :default => %w(default), :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => '-t', :default => 'm1.small', :desc => 'Type of the instance to boot'
method_option :key_name, :aliases => '-k', :required => true, :desc => 'Name of the keypair to use'
method_option :tags, :type => :hash, :default => {'Name' => "awscli-centos-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
def create_centos
create_ec2_object
centos_amis = {
'us-east-1' => 'ami-a96b01c0', #Virginia
'us-west-1' => 'ami-51351b14', #Northern California
'us-west-2' => 'ami-bd58c98d', #Oregon
'eu-west-1' => 'ami-050b1b71', #Ireland
'ap-southeast-1' => 'ami-23682671', #Singapore
'ap-southeast-2' => 'ami-ffcd5ec5', #Sydney
'ap-northeast-1' => 'ami-3fe8603e', #Tokyo
'sa-east-1' => 'ami-e2cd68ff', #Sao Paulo
}
@ec2.create_instance :image_id => centos_amis[parent_options[:region]],
:block_device_mapping => %w(/dev/sda=:100:true::),
:groups => options[:groups],
:flavor_id => options[:flavor_id],
:key_name => options[:key_name],
:tags => options[:tags],
:count => options[:count],
:wait_for => options[:wait_for]
end
desc 'create_ubuntu', 'Create a ubuntu based instance, ebs_backed with root being 100GB (user has to manually execute resize2fs /dev/sda1 to reclaim extra storage on root device)'
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
method_option :groups,:aliases => '-g', :banner => 'SG1 SG2 SG3',:type => :array, :default => %w(default), :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => '-t', :default => 'm1.small', :desc => 'Type of the instance to boot'
method_option :key_name, :aliases => '-k', :required => true, :desc => 'Name of the keypair to use'
method_option :tags, :type => :hash, :default => {'Name' => "awscli-ubuntu-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
def create_ubuntu
create_ec2_object
ubuntu_amis = {
'us-east-1' => 'ami-9b85eef2', #Virginia
'us-west-1' => 'ami-9b2d03de', #Northern California
'us-west-2' => 'ami-77be2f47', #Oregon
'eu-west-1' => 'ami-f5736381', #Ireland
'ap-southeast-1' => 'ami-085b155a', #Singapore
'ap-southeast-2' => 'ami-37c0530d', #Sydney
'ap-northeast-1' => 'ami-57109956', #Tokyo
'sa-east-1' => 'ami-a4fb5eb9', #Sao Paulo
}
@ec2.create_instance :image_id => ubuntu_amis[parent_options[:region]],
:block_device_mapping => %w(/dev/sda1=:100:true::),
:groups => options[:groups],
:flavor_id => options[:flavor_id],
:key_name => options[:key_name],
:tags => options[:tags],
:count => options[:count],
:wait_for => options[:wait_for]
end
desc "start", "start instances"
long_desc <<-LONGDESC
Start selected running instances.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def start
create_ec2_object
@ec2.start_instance options[:instance_id]
end
desc "stop", "stop instances"
long_desc <<-LONGDESC
Stop selected running instances.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def stop
create_ec2_object
@ec2.stop_instance options[:instance_id]
end
desc "terminate", "teminate instances"
long_desc <<-LONGDESC
Terminate selected running instances
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def terminate
create_ec2_object
@ec2.terminate_instance options[:instance_id]
end
desc "terminate_all", "terminate all running instances (causes data loss)"
method_option :delete_volumes, :aliases => "-v", :type => :boolean, :desc => "delete the ebs volumes attached to instance if any", :default => false
def terminate_all
create_ec2_object
@ec2.terminate_instances options[:delete_volumes]
end
desc "console_output", "Retrieve console output for specified instance"
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id to get console output from"
def console_output
create_ec2_object
@ec2.get_console_output options[:instance_id]
end
private
def create_ec2_object
puts "ec2 Establishing Connetion..."
$ec2_conn = if parent_options[:region]
Awscli::Connection.new.request_ec2(parent_options[:region])
else
Awscli::Connection.new.request_ec2
end
puts "ec2 Establishing Connetion... OK"
@ec2 = Awscli::EC2::EC2.new($ec2_conn)
end
AwsCli::CLI::Ec2.register AwsCli::CLI::EC2::Instances, :instances, 'instances [COMMAND]', 'EC2 Instance Management'
end
end
end
end
new feature to ec2 instances `list_all`
list_all: will list instances running in all the regions
module AwsCli
module CLI
module EC2
require 'awscli/cli/ec2'
require 'awscli/helper'
class Instances < Thor
# default_task :list
desc 'list_sizes', 'lists available sizes of vms'
def list_sizes
puts Awscli::Instances::INSTANCE_SIZES
end
desc 'list_regions', 'lists available regions to connect to'
def list_regions
puts Awscli::Instances::REGIONS
end
desc "list", "list the instances"
long_desc <<-LONGDESC
List and describe your instances
The INSTANCE parameter is the instance ID(s) to describe.
If unspecified all your instances will be returned.
LONGDESC
def list
puts "Listing Instances for region: #{parent_options[:region]}"
create_ec2_object
# puts parent_options #access awscli/cli/ec2.rb class options
@ec2.list_instances
end
desc "list_all", "lists instances for all regions"
def list_all
Awscli::Instances::REGIONS.each do |region|
puts "Listing instances for region: #{region}"
ec2 = Awscli::EC2::EC2.new(Awscli::Connection.new.request_ec2 region)
ec2.list_instances
end
end
desc "diatt", "list instance attributes"
long_desc <<-LONGDESC
Describes the specified attribute of the specified instance. You can specify only one attribute at a time.
\x5
Available Attributes to Request:
architecture ami_launch_index availability_zone block_device_mapping network_interfaces client_token
dns_name ebs_optimized groups flavor_id iam_instance_profile image_id instance_initiated_shutdown_behavior
kernel_id key_name created_at monitoring placement_group platform private_dns_name private_ip_address
public_ip_address ramdisk_id root_device_name root_device_type security_group_ids state state_reason subnet_id
tenancy tags user_data vpc_id volumes username
LONGDESC
method_option :id, :aliases => "-i", :banner => "INSTANCEID", :type => :string, :desc => "Id of an instance to modify attribute", :required => true
method_option :attr, :aliases => "-a", :banner => "ATTR", :type => :string, :desc => "Attribute to modify", :required => true
def diatt
create_ec2_object
@ec2.describe_instance_attribute(options[:id], options[:attr])
end
desc "miatt", "modify instance attributes"
long_desc <<-LONGDESC
Modifies an instance attribute. Only one attribute can be specified per call.
LONGDESC
method_option :id, :aliases => "-i", :banner => "INSTANCEID", :type => :string, :desc => "Id of an instance to modify attribute", :required => true
method_option :isize, :aliases => "-t", :banner => "VALUE", :type => :string, :desc => "Changes the instance type to the specified value."
method_option :kernel, :aliases => "-k", :banner => "VALUE", :type => :string, :desc => "Changes the instance's kernel to the specified value"
method_option :ramdisk, :aliases => "-r", :banner => "VALUE", :type => :string, :desc => "Changes the instance's RAM disk to the specified value"
method_option :userdata, :aliases => "-u", :banner => "VALUE", :type => :string, :desc => "Changes the instance's user data to the specified value"
method_option :disable_api_term, :aliases => "-d", :banner => "true|false" , :type => :string, :desc => "Changes the instance's DisableApiTermination flag to the specified value. Setting this flag means you can't terminate the instance using the API"
method_option :inst_shutdown_beh, :aliases => "-s", :banner => "stop|terminate", :type => :string, :desc => "Changes the instance's InstanceInitiatedShutdownBehavior flag to the specified value."
method_option :source_dest_check, :aliases => "-c", :banner => "true|false" , :type => :string, :desc => "This attribute exists to enable a Network Address Translation (NAT) instance in a VPC to perform NAT. The attribute controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled"
method_option :group_id, :aliases => "-g", :banner => "G1, G2, ..", :type => :array, :desc => "This attribute is applicable only to instances running in a VPC. Use this parameter when you want to change the security groups that an instance is in."
def miatt
create_ec2_object
opts = Marshal.load(Marshal.dump(options)) #create a copy of options, as original options hash cannot be modified
opts.reject!{ |k| k == 'id' } #remove id from opts
abort "Please pass an attribute by setting respective option" unless opts
abort "You can only pass one attribute at a time" if opts.size != 1
opts.each do |k,v|
puts "calling modify_instance_attribute with: #{options[:id]}, #{k}, #{opts[k]}"
@ec2.modify_instance_attribute(options[:id], k, opts[k])
end
end
desc "riatt", "reset instances attribute(s)"
long_desc <<-LONGDESC
Resets an instance attribute to its initial value. Only one attribute can be specified per call.
LONGDESC
def riatt
puts "Not yet Implemented"
end
desc "dins", "describe instance status"
long_desc <<-LONGDESC
Describe the status for one or more instances.
Checks are performed on your instances to determine if they are
in running order or not. Use this command to see the result of these
instance checks so that you can take remedial action if possible.
There are two types of checks performed: INSTANCE and SYSTEM.
INSTANCE checks examine the health and reachability of the
application environment. SYSTEM checks examine the health of
the infrastructure surrounding your instance.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def dins
create_ec2_object
@ec2.describe_instance_status options[:instance_id]
end
# desc "import", "ec2_import_instance"
# long_desc <<-LONGDESC
# Create an import instance task to import a virtual machine into EC2
# using meta_data from the given disk image. The volume size for the
# imported disk_image will be calculated automatically, unless specified.
# LONGDESC
# def import
# puts "Cannot find it in the *FOG*"
# end
desc "reboot", "reboot an instance"
long_desc <<-LONGDESC
Reboot selected running instances.
The INSTANCE parameter is an instance ID to reboot.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def reboot
create_ec2_object
@ec2.reboot_instance options[:instance_id]
end
desc "create", "launch a new instance"
long_desc <<-LONGDESC
Launch an instance of a specified AMI.
Usage Examples:
`awscli ec2 instances create -i ami-b63210f3 -k ruby-sample-1363113606 -b "/dev/sdb=ephemeral10" "/dev/sdc=snap-xxxxx::false::"`
`awscli ec2 instances create -i ami-b63210f3 -k ruby-sample-1363113606 -b "/dev/sdb=ephemeral10" "/dev/sdc=:10:false::"`
Running Multiple Instances:
`awscli ec2 instances create -i <ami_id> -c 10 -k <key_name> -b "/dev/sdd=:100:false::"`
Block Device Mapping Format:
This argument is passed in the form of <devicename>=<blockdevice>. The devicename is the device name of the physical device on the instance to map. The blockdevice can be one of the following values:
none - supportsesses an existing mapping of the device from the AMI used to launch the instance. For example: "/dev/sdc=none"
ephemeral[0..3] - An instance store volume to be mapped to the device. For example: "/dev/sdc=ephemeral0"
[snapshot-id]:[volume-size]:[true|false]:[standard|io1[:iops]] - An EBS volume to be mapped to the device.
[snapshot-id] To create a volume from a snapshot, specify the snapshot ID.
[volume-size] To create an empty EBS volume, omit the snapshot ID and specify a volume size instead.
For example: "/dev/sdh=:20".
[delete-on-termination] To prevent the volume from being deleted on termination of the instance, specify false.
The default is true.
[volume-type] To create a Provisioned IOPS volume, specify io1. The default volume type is standard.
If the volume type is io1, you can also provision the number of IOPS that the volume supports.
For example, "/dev/sdh=snap-7eb96d16::false:io1:500"
LONGDESC
method_option :image_id, :aliases => "-i", :required => true, :banner => "AMIID", :type => :string, :desc => "Id of machine image to load on instances"
method_option :availability_zone, :banner => "ZONE", :type => :string, :desc => "Placement constraint for instances"
method_option :placement_group, :banner => "GROUP", :type => :string, :desc => "Name of existing placement group to launch instance into"
method_option :tenancy, :banner => "TENANCY", :type => :string, :desc => "Tenancy option in ['dedicated', 'default'], defaults to 'default'"
method_option :block_device_mapping, :aliases => "-b", :type => :array , :desc => "<devicename>=<blockdeveice>, see help for how to pass values"
method_option :client_token, :type => :string, :desc => "unique case-sensitive token for ensuring idempotency"
method_option :groups, :aliases => "-g", :banner => "SG1 SG2 SG3",:type => :array, :default => ["default"], :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => "-t",:type => :string, :default => "m1.small", :desc => "Type of instance to boot."
method_option :kernel_id, :type => :string, :desc => "Id of kernel with which to launch"
method_option :key_name, :aliases => "-k", :required => true, :type => :string, :desc => "Name of a keypair to add to booting instances"
method_option :monitoring, :type => :boolean, :default => false, :desc => "Enables monitoring, defaults to false"
method_option :ramdisk_id, :type => :string, :desc => "Id of ramdisk with which to launch"
method_option :subnet_id, :type => :string, :desc => "VPC option to specify subnet to launch instance into"
method_option :user_data, :type => :string, :desc => "Additional data to provide to booting instances"
method_option :ebs_optimized, :type => :boolean, :default => false, :desc => "Whether the instance is optimized for EBS I/O"
method_option :vpc_id, :type => :string, :desc => "VPC to connect to"
method_option :tags, :type => :hash, :default => {'Name' => "awscli-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :private_ip_address, :banner => "IP",:type => :string, :desc => "VPC option to specify ip address within subnet"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
def create
create_ec2_object
@ec2.create_instance options
end
desc 'create_centos', 'Create a centos based instance, ebs_backed with root being 100GB (user has to manually execute resize2fs /dev/sda to reclaim extra storage on root device)'
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
method_option :groups,:aliases => '-g', :banner => 'SG1 SG2 SG3',:type => :array, :default => %w(default), :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => '-t', :default => 'm1.small', :desc => 'Type of the instance to boot'
method_option :key_name, :aliases => '-k', :required => true, :desc => 'Name of the keypair to use'
method_option :tags, :type => :hash, :default => {'Name' => "awscli-centos-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
def create_centos
create_ec2_object
centos_amis = {
'us-east-1' => 'ami-a96b01c0', #Virginia
'us-west-1' => 'ami-51351b14', #Northern California
'us-west-2' => 'ami-bd58c98d', #Oregon
'eu-west-1' => 'ami-050b1b71', #Ireland
'ap-southeast-1' => 'ami-23682671', #Singapore
'ap-southeast-2' => 'ami-ffcd5ec5', #Sydney
'ap-northeast-1' => 'ami-3fe8603e', #Tokyo
'sa-east-1' => 'ami-e2cd68ff', #Sao Paulo
}
@ec2.create_instance :image_id => centos_amis[parent_options[:region]],
:block_device_mapping => %w(/dev/sda=:100:true::),
:groups => options[:groups],
:flavor_id => options[:flavor_id],
:key_name => options[:key_name],
:tags => options[:tags],
:count => options[:count],
:wait_for => options[:wait_for]
end
desc 'create_ubuntu', 'Create a ubuntu based instance, ebs_backed with root being 100GB (user has to manually execute resize2fs /dev/sda1 to reclaim extra storage on root device)'
method_option :count, :aliases => '-c', :type => :numeric, :default => 1, :desc => 'Number of instances to launch'
method_option :groups,:aliases => '-g', :banner => 'SG1 SG2 SG3',:type => :array, :default => %w(default), :desc => "Name of security group(s) for instances (not supported for VPC). Default: 'default'"
method_option :flavor_id, :aliases => '-t', :default => 'm1.small', :desc => 'Type of the instance to boot'
method_option :key_name, :aliases => '-k', :required => true, :desc => 'Name of the keypair to use'
method_option :tags, :type => :hash, :default => {'Name' => "awscli-ubuntu-#{Time.now.to_i}"}, :desc => "Tags to identify server"
method_option :wait_for, :aliases => "-w", :type => :boolean, :default => false, :desc => "wait for the server to get created and return public_dns"
def create_ubuntu
create_ec2_object
ubuntu_amis = {
'us-east-1' => 'ami-9b85eef2', #Virginia
'us-west-1' => 'ami-9b2d03de', #Northern California
'us-west-2' => 'ami-77be2f47', #Oregon
'eu-west-1' => 'ami-f5736381', #Ireland
'ap-southeast-1' => 'ami-085b155a', #Singapore
'ap-southeast-2' => 'ami-37c0530d', #Sydney
'ap-northeast-1' => 'ami-57109956', #Tokyo
'sa-east-1' => 'ami-a4fb5eb9', #Sao Paulo
}
@ec2.create_instance :image_id => ubuntu_amis[parent_options[:region]],
:block_device_mapping => %w(/dev/sda1=:100:true::),
:groups => options[:groups],
:flavor_id => options[:flavor_id],
:key_name => options[:key_name],
:tags => options[:tags],
:count => options[:count],
:wait_for => options[:wait_for]
end
desc "start", "start instances"
long_desc <<-LONGDESC
Start selected running instances.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def start
create_ec2_object
@ec2.start_instance options[:instance_id]
end
desc "stop", "stop instances"
long_desc <<-LONGDESC
Stop selected running instances.
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def stop
create_ec2_object
@ec2.stop_instance options[:instance_id]
end
desc "terminate", "teminate instances"
long_desc <<-LONGDESC
Terminate selected running instances
LONGDESC
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id that needs to be stopped"
def terminate
create_ec2_object
@ec2.terminate_instance options[:instance_id]
end
desc "terminate_all", "terminate all running instances (causes data loss)"
method_option :delete_volumes, :aliases => "-v", :type => :boolean, :desc => "delete the ebs volumes attached to instance if any", :default => false
def terminate_all
create_ec2_object
@ec2.terminate_instances options[:delete_volumes]
end
desc "console_output", "Retrieve console output for specified instance"
method_option :instance_id, :aliases => "-i", :required => true, :banner => "ID", :desc => "instance id to get console output from"
def console_output
create_ec2_object
@ec2.get_console_output options[:instance_id]
end
private
def create_ec2_object
puts "ec2 Establishing Connetion..."
$ec2_conn = if parent_options[:region]
Awscli::Connection.new.request_ec2(parent_options[:region])
else
Awscli::Connection.new.request_ec2
end
puts "ec2 Establishing Connetion... OK"
@ec2 = Awscli::EC2::EC2.new($ec2_conn)
end
AwsCli::CLI::Ec2.register AwsCli::CLI::EC2::Instances, :instances, 'instances [COMMAND]', 'EC2 Instance Management'
end
end
end
end |
require "json"
module BetterErrors
class Middleware
def initialize(app, handler = ErrorPage)
@app = app
@handler = handler
end
def call(env)
if env["PATH_INFO"] =~ %r{/__better_errors/(?<oid>\d+)/(?<method>\w+)}
internal_call env, $~
else
app_call env
end
end
private
def app_call(env)
@app.call env
rescue Exception => ex
@error_page = @handler.new ex, env
log_exception
[500, { "Content-Type" => "text/html; charset=utf-8" }, [@error_page.render]]
end
def log_exception
return unless BetterErrors.logger
message = "\n#{@error_page.exception.class} - #{@error_page.exception.message}:\n"
@error_page.backtrace_frames.each do |frame|
message << " #{frame}\n"
end
BetterErrors.logger.fatal message
end
def internal_call(env, opts)
if opts[:oid].to_i != @error_page.object_id
return [200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(error: "Session expired")]]
end
response = @error_page.send("do_#{opts[:method]}", JSON.parse(env["rack.input"].read))
[200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(response)]]
end
end
end
/__better_errors should show the most recent error page
require "json"
module BetterErrors
class Middleware
def initialize(app, handler = ErrorPage)
@app = app
@handler = handler
end
def call(env)
case env["PATH_INFO"]
when %r{\A/__better_errors/(?<oid>\d+)/(?<method>\w+)\z}
internal_call env, $~
when %r{\A/__better_errors/?\z}
show_error_page env
else
app_call env
end
end
private
def app_call(env)
@app.call env
rescue Exception => ex
@error_page = @handler.new ex, env
log_exception
show_error_page(env)
end
def show_error_page(env)
[500, { "Content-Type" => "text/html; charset=utf-8" }, [@error_page.render]]
end
def log_exception
return unless BetterErrors.logger
message = "\n#{@error_page.exception.class} - #{@error_page.exception.message}:\n"
@error_page.backtrace_frames.each do |frame|
message << " #{frame}\n"
end
BetterErrors.logger.fatal message
end
def internal_call(env, opts)
if opts[:oid].to_i != @error_page.object_id
return [200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(error: "Session expired")]]
end
response = @error_page.send("do_#{opts[:method]}", JSON.parse(env["rack.input"].read))
[200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(response)]]
end
end
end
|
require "json"
require "ipaddr"
require "set"
module BetterErrors
# Better Errors' error handling middleware. Including this in your middleware
# stack will show a Better Errors error page for exceptions raised below this
# middleware.
#
# If you are using Ruby on Rails, you do not need to manually insert this
# middleware into your middleware stack.
#
# @example Sinatra
# require "better_errors"
#
# if development?
# use BetterErrors::Middleware
# end
#
# @example Rack
# require "better_errors"
# if ENV["RACK_ENV"] == "development"
# use BetterErrors::Middleware
# end
#
class Middleware
# The set of IP addresses that are allowed to access Better Errors.
#
# Set to `{ "127.0.0.1/8", "::1/128" }` by default.
ALLOWED_IPS = Set.new
# Adds an address to the set of IP addresses allowed to access Better
# Errors.
def self.allow_ip!(addr)
ALLOWED_IPS << IPAddr.new(addr)
end
allow_ip! "127.0.0.0/8"
allow_ip! "::1/128" rescue nil # windows ruby doesn't have ipv6 support
# A new instance of BetterErrors::Middleware
#
# @param app The Rack app/middleware to wrap with Better Errors
# @param handler The error handler to use.
def initialize(app, handler = ErrorPage)
@app = app
@handler = handler
end
# Calls the Better Errors middleware
#
# @param [Hash] env
# @return [Array]
def call(env)
if allow_ip? env
better_errors_call env
else
@app.call env
end
end
private
def allow_ip?(env)
# REMOTE_ADDR is not in the rack spec, so some application servers do
# not provide it.
return true unless env["REMOTE_ADDR"] and !env["REMOTE_ADDR"].strip.empty?
ip = IPAddr.new env["REMOTE_ADDR"].split("%").first
ALLOWED_IPS.any? { |subnet| subnet.include? ip }
end
def better_errors_call(env)
case env["PATH_INFO"]
when %r{/__better_errors/(?<id>.+?)/(?<method>\w+)\z}
internal_call env, $~
when %r{/__better_errors/?\z}
show_error_page env
else
protected_app_call env
end
end
def protected_app_call(env)
@app.call env
rescue Exception => ex
@error_page = @handler.new ex, env
log_exception
show_error_page(env, ex)
end
def show_error_page(env, exception=nil)
type, content = if @error_page
if text?(env)
[ 'plain', @error_page.render('text') ]
else
[ 'html', @error_page.render ]
end
else
[ 'html', no_errors_page ]
end
status_code = 500
if defined? ActionDispatch::ExceptionWrapper
status_code = ActionDispatch::ExceptionWrapper.new(env, exception).status_code
end
[status_code, { "Content-Type" => "text/#{type}; charset=utf-8" }, [content]]
end
def text?(env)
env["HTTP_X_REQUESTED_WITH"] == "XMLHttpRequest" ||
!env["HTTP_ACCEPT"].to_s.include?('html')
end
def log_exception
return unless BetterErrors.logger
message = "\n#{@error_page.exception.type} - #{@error_page.exception.message}:\n"
@error_page.backtrace_frames.each do |frame|
message << " #{frame}\n"
end
BetterErrors.logger.fatal message
end
def internal_call(env, opts)
if opts[:id] != @error_page.id
return [200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(error: "Session expired")]]
end
env["rack.input"].rewind
response = @error_page.send("do_#{opts[:method]}", JSON.parse(env["rack.input"].read))
[200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(response)]]
end
def no_errors_page
"<h1>No errors</h1><p>No errors have been recorded yet.</p><hr>" +
"<code>Better Errors v#{BetterErrors::VERSION}</code>"
end
end
end
Use Rack Request to check proxy forwarded IPs
require "json"
require "ipaddr"
require "set"
module BetterErrors
# Better Errors' error handling middleware. Including this in your middleware
# stack will show a Better Errors error page for exceptions raised below this
# middleware.
#
# If you are using Ruby on Rails, you do not need to manually insert this
# middleware into your middleware stack.
#
# @example Sinatra
# require "better_errors"
#
# if development?
# use BetterErrors::Middleware
# end
#
# @example Rack
# require "better_errors"
# if ENV["RACK_ENV"] == "development"
# use BetterErrors::Middleware
# end
#
class Middleware
# The set of IP addresses that are allowed to access Better Errors.
#
# Set to `{ "127.0.0.1/8", "::1/128" }` by default.
ALLOWED_IPS = Set.new
# Adds an address to the set of IP addresses allowed to access Better
# Errors.
def self.allow_ip!(addr)
ALLOWED_IPS << IPAddr.new(addr)
end
allow_ip! "127.0.0.0/8"
allow_ip! "::1/128" rescue nil # windows ruby doesn't have ipv6 support
# A new instance of BetterErrors::Middleware
#
# @param app The Rack app/middleware to wrap with Better Errors
# @param handler The error handler to use.
def initialize(app, handler = ErrorPage)
@app = app
@handler = handler
end
# Calls the Better Errors middleware
#
# @param [Hash] env
# @return [Array]
def call(env)
if allow_ip? env
better_errors_call env
else
@app.call env
end
end
private
def allow_ip?(env)
request = Rack::Request.new(env)
return true unless request.ip and !request.ip.strip.empty?
ip = IPAddr.new request.ip.split("%").first
ALLOWED_IPS.any? { |subnet| subnet.include? ip }
end
def better_errors_call(env)
case env["PATH_INFO"]
when %r{/__better_errors/(?<id>.+?)/(?<method>\w+)\z}
internal_call env, $~
when %r{/__better_errors/?\z}
show_error_page env
else
protected_app_call env
end
end
def protected_app_call(env)
@app.call env
rescue Exception => ex
@error_page = @handler.new ex, env
log_exception
show_error_page(env, ex)
end
def show_error_page(env, exception=nil)
type, content = if @error_page
if text?(env)
[ 'plain', @error_page.render('text') ]
else
[ 'html', @error_page.render ]
end
else
[ 'html', no_errors_page ]
end
status_code = 500
if defined? ActionDispatch::ExceptionWrapper
status_code = ActionDispatch::ExceptionWrapper.new(env, exception).status_code
end
[status_code, { "Content-Type" => "text/#{type}; charset=utf-8" }, [content]]
end
def text?(env)
env["HTTP_X_REQUESTED_WITH"] == "XMLHttpRequest" ||
!env["HTTP_ACCEPT"].to_s.include?('html')
end
def log_exception
return unless BetterErrors.logger
message = "\n#{@error_page.exception.type} - #{@error_page.exception.message}:\n"
@error_page.backtrace_frames.each do |frame|
message << " #{frame}\n"
end
BetterErrors.logger.fatal message
end
def internal_call(env, opts)
if opts[:id] != @error_page.id
return [200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(error: "Session expired")]]
end
env["rack.input"].rewind
response = @error_page.send("do_#{opts[:method]}", JSON.parse(env["rack.input"].read))
[200, { "Content-Type" => "text/plain; charset=utf-8" }, [JSON.dump(response)]]
end
def no_errors_page
"<h1>No errors</h1><p>No errors have been recorded yet.</p><hr>" +
"<code>Better Errors v#{BetterErrors::VERSION}</code>"
end
end
end
|
# Encoding: utf-8
# ASP.NET Core Buildpack
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require_relative '../app_dir'
module AspNetCoreBuildpack
class Dotnet
def initialize(shell)
@shell = shell
end
def restore(dir, out)
@shell.env['HOME'] = dir
@shell.env['LD_LIBRARY_PATH'] = "$LD_LIBRARY_PATH:#{dir}/libunwind/lib"
@shell.env['PATH'] = "$PATH:#{dir}/.dotnet"
project_list = AppDir.new(dir).with_project_json.join(' ')
cmd = "bash -c 'cd #{dir}; dotnet restore --quiet #{project_list}'"
@shell.exec(cmd, out)
end
end
end
dotnet restore no longer supports --quiet
# Encoding: utf-8
# ASP.NET Core Buildpack
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require_relative '../app_dir'
module AspNetCoreBuildpack
class Dotnet
def initialize(shell)
@shell = shell
end
def restore(dir, out)
@shell.env['HOME'] = dir
@shell.env['LD_LIBRARY_PATH'] = "$LD_LIBRARY_PATH:#{dir}/libunwind/lib"
@shell.env['PATH'] = "$PATH:#{dir}/.dotnet"
project_list = AppDir.new(dir).with_project_json.join(' ')
cmd = "bash -c 'cd #{dir}; dotnet restore --verbosity minimal #{project_list}'"
@shell.exec(cmd, out)
end
end
end
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BuildrPlus::FeatureManager.feature(:jms => [:ejb]) do |f|
f.enhance(:Config) do
def mq_container_name(project)
app_scope = BuildrPlus::Config.app_scope
env_code = BuildrPlus::Config.env_code
"openmq_#{project.root_project.name}#{app_scope.nil? ? '' : '_'}#{app_scope}_#{env_code}"
end
def stop_container(project)
name = mq_container_name(project)
sh "docker stop #{name} > /dev/null" if is_openmq_running?(project)
sh "docker rm #{name} > /dev/null" if is_openmq_created?(project)
end
def start_container(project)
name = mq_container_name(project)
stop_container(project)
port = find_free_port
sh "docker run -d -ti -P --name=#{name} --net=host -eIMQ_PORTMAPPER_PORT=#{port} --label org.realityforge.buildr_plus.omq.port=#{port} stocksoftware/openmq > /dev/null"
link_container_to_configuration(project, BuildrPlus::Config.environment_config)
BuildrPlus::Config.output_aux_confgs!
end
def link_container_to_configuration(project, environment_config)
name = mq_container_name(project)
if is_openmq_running?(project)
omq_port =
`docker inspect --format '{{ index .Config.Labels "org.realityforge.buildr_plus.omq.port"}}' #{name} 2>/dev/null`.chomp
environment_config.broker(:host => BuildrPlus::Redfish.docker_ip,
:port => omq_port.to_i,
:admin_username => 'admin',
:admin_password => 'admin')
end
end
def find_free_port
server = TCPServer.new('127.0.0.1', 0)
port = server.addr[1]
server.close
port
end
def is_openmq_running?(project)
`docker ps | grep #{mq_container_name(project)}`.chomp != ''
end
def is_openmq_created?(project)
`docker ps -a | grep #{mq_container_name(project)}`.chomp != ''
end
end
f.enhance(:ProjectExtension) do
after_define do |project|
if project.ipr?
if BuildrPlus::FeatureManager.activated?(:redfish)
desc 'Start an openmq server useful to test against'
project.task ':openmq:start' do
BuildrPlus::Jms.start_container(project)
end
desc 'Stop the openmq server'
project.task ':openmq:stop' do
BuildrPlus::Jms.stop_container(project)
end
end
end
end
end
end
Quote environment variable so it works under linux
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BuildrPlus::FeatureManager.feature(:jms => [:ejb]) do |f|
f.enhance(:Config) do
def mq_container_name(project)
app_scope = BuildrPlus::Config.app_scope
env_code = BuildrPlus::Config.env_code
"openmq_#{project.root_project.name}#{app_scope.nil? ? '' : '_'}#{app_scope}_#{env_code}"
end
def stop_container(project)
name = mq_container_name(project)
sh "docker stop #{name} > /dev/null" if is_openmq_running?(project)
sh "docker rm #{name} > /dev/null" if is_openmq_created?(project)
end
def start_container(project)
name = mq_container_name(project)
stop_container(project)
port = find_free_port
sh "docker run -d -ti -P --name=#{name} --net=host -eIMQ_PORTMAPPER_PORT=\"#{port}\" --label org.realityforge.buildr_plus.omq.port=#{port} stocksoftware/openmq > /dev/null"
link_container_to_configuration(project, BuildrPlus::Config.environment_config)
BuildrPlus::Config.output_aux_confgs!
end
def link_container_to_configuration(project, environment_config)
name = mq_container_name(project)
if is_openmq_running?(project)
omq_port =
`docker inspect --format '{{ index .Config.Labels "org.realityforge.buildr_plus.omq.port"}}' #{name} 2>/dev/null`.chomp
environment_config.broker(:host => BuildrPlus::Redfish.docker_ip,
:port => omq_port.to_i,
:admin_username => 'admin',
:admin_password => 'admin')
end
end
def find_free_port
server = TCPServer.new('127.0.0.1', 0)
port = server.addr[1]
server.close
port
end
def is_openmq_running?(project)
`docker ps | grep #{mq_container_name(project)}`.chomp != ''
end
def is_openmq_created?(project)
`docker ps -a | grep #{mq_container_name(project)}`.chomp != ''
end
end
f.enhance(:ProjectExtension) do
after_define do |project|
if project.ipr?
if BuildrPlus::FeatureManager.activated?(:redfish)
desc 'Start an openmq server useful to test against'
project.task ':openmq:start' do
BuildrPlus::Jms.start_container(project)
end
desc 'Stop the openmq server'
project.task ':openmq:stop' do
BuildrPlus::Jms.stop_container(project)
end
end
end
end
end
end
|
module Bundler::Github
module DslPatch
# The list of valid options is buried in unfortunately buried in this
# original method, so to add an option without reimplementing (and
# duplicating) code, we rescue the exception, and swap our :github option
# for :git, with the expanded URI
def _normalize_options_with_github(*args)
begin
_normalize_options_without_github(*args)
rescue Bundler::InvalidOption => exception
if exception.message =~ /:github/
Bundler::Github.expand_options(*args)
retry
else
raise exception
end
end
end
def self.included(mod)
mod.send :alias_method, :_normalize_options_without_github, :_normalize_options
mod.send :alias_method, :_normalize_options, :_normalize_options_with_github
end
end
end
Fix grammar in comment
module Bundler::Github
module DslPatch
# The list of valid options is buried in this original method, so to add
# an option without reimplementing (and duplicating) code, we rescue the
# exception, and swap our :github option for :git, with the expanded URI
def _normalize_options_with_github(*args)
begin
_normalize_options_without_github(*args)
rescue Bundler::InvalidOption => exception
if exception.message =~ /:github/
Bundler::Github.expand_options(*args)
retry
else
raise exception
end
end
end
def self.included(mod)
mod.send :alias_method, :_normalize_options_without_github, :_normalize_options
mod.send :alias_method, :_normalize_options, :_normalize_options_with_github
end
end
end
|
# ----------------------------------------------------------------------------- #
# File: window.rb
# Description: A wrapper over window
# Author: jkepler http://github.com/mare-imbrium/canis/
# Date: Around for a long time
# License: Same as Ruby's License (http://www.ruby-lang.org/LICENSE.txt)
# Last update: 2014-05-23 15:41
#
# == CHANGED
# removed dead or redudant code - 2014-04-22 - 12:53
# - replaced getchar with new simpler one - 2014-05-04
# - introduced key_tos to replace keycode_tos, moved to Util in rwidget.rb
# - reintroduced nedelay and reduced escdelay
#
# == TODO
# strip and remove cruft. Several methods marked as deprecated.
# ----------------------------------------------------------------------------- #
#
require 'canis/core/system/ncurses'
require 'canis/core/system/panel'
# this is since often windows are declared with 0 height or width and this causes
# crashes in the most unlikely places. This prevceents me from having to write ternary
# e.g.
# @layout[:width].ifzero(FFI::NCurses::LINES-2)
class Fixnum
def ifzero v
return self if self != 0
return v
end
end
# This class is to be extended so that it can be called by anyone wanting to implement
# chunks ot text with color and attributes. Chunkline consists of multiple chunks of colored text
# and should implement a +each_with_color+.
# The purpose of adding this is so that +chunk.rb+ does not need to be required if colored text
# is not being used by an application.
class AbstractChunkLine; end
module Canis
class Window
attr_reader :width, :height, :top, :left
attr_accessor :layout # hash containing hwtl
attr_reader :panel # reader requires so he can del it in end
attr_reader :window_type # window or pad to distinguish 2009-11-02 23:11
attr_accessor :name # more for debugging log files. 2010-02-02 19:58
#attr_accessor :modified # has it been modified and may need a refresh 2014-04-22 - 10:23 CLEANUP
# for root windows we need to know the form so we can ask it to update when
# there are overlapping windows.
attr_accessor :form
# creation and layout related {{{
# @param [Array, Hash] window coordinates (ht, w, top, left)
# or
# @param [int, int, int, int] window coordinates (ht, w, top, left)
# 2011-09-21 allowing array, or 4 ints, in addition to hash @since 1.3.1
def initialize(*args)
case args.size
when 1
case args[0]
when Array, Hash
layout = args[0]
else
raise ArgumentError, "Window expects 4 ints, array of 4 ints, or Hash in constructor"
end
when 4
layout = { :height => args[0], :width => args[1], :top => args[2], :left => args[3] }
end
@visible = true
set_layout(layout)
#$log.debug "XXX:WINDOW got h #{@height}, w #{@width}, t #{@top}, l #{@left} "
@height = FFI::NCurses.LINES if @height == 0 # 2011-11-14 added since tired of checking for zero
@width = FFI::NCurses.COLS if @width == 0
@window = FFI::NCurses.newwin(@height, @width, @top, @left) # added FFI 2011-09-6
# trying out refreshing underlying window.
$global_windows ||= []
# this causes issues padrefresh failing when display_list does a resize.
#$global_windows << self
@panel = Ncurses::Panel.new(@window) # added FFI 2011-09-6
#$error_message_row = $status_message_row = Ncurses.LINES-1
$error_message_row ||= Ncurses.LINES-1
$error_message_col ||= 1 # ask (bottomline) uses 0 as default so you can have mismatch. XXX
$status_message ||= Canis::Variable.new # in case not an App
# 2014-05-07 - 12:29 CANIS earlier this was called $key_map but that suggests a map.
$key_map_type ||= :vim
$esc_esc = true; # gove me double esc as 2727 so i can map it.
init_vars
unless @key_reader
create_default_key_reader
end
end
def init_vars
@window_type = :WINDOW
Ncurses::keypad(@window, true)
# Added this so we can get Esc, and also C-c pressed in succession does not crash system
# 2011-12-20 half-delay crashes system as does cbreak
#This causes us to be unable to process gg qq since getch won't wait.
#FFI::NCurses::nodelay(@window, bf = true)
# wtimeout was causing RESIZE sigwinch to only happen after pressing a key
#Ncurses::wtimeout(@window, $ncurses_timeout || 500) # will wait a second on wgetch so we can get gg and qq
#@stack = [] # since we have moved to handler 2014-04-20 - 11:15
@name ||="#{self}"
@modified = true
$catch_alt_digits ||= false # is this where is should put globals ? 2010-03-14 14:00 XXX
end
##
# this is an alternative constructor
def self.root_window(layout = { :height => 0, :width => 0, :top => 0, :left => 0 })
#Canis::start_ncurses
@layout = layout
@window = Window.new(@layout)
@window.name = "Window::ROOTW:#{$global_windows.count}"
@window.wrefresh
Ncurses::Panel.update_panels
# earlier we only put root window, now we may need to do all (bline - numbered menu - alert)
$global_windows << @window unless $global_windows.include? @window
return @window
end
# This refreshes the root window whenever overlapping windows are
# destroyed or moved.
# This works by asking the root window's form to repaint all its objects.
# This is now being called whenever a window is destroyed (and also resized).
# However, it must
# manually be called if you move a window.
# NOTE : if there are too many root windows, this could get expensive since we are updating all.
# We may need to have a way to specify which window to repaint.
# If there are non-root windows above, we may have manually refresh only the previous one.
#
def self.refresh_all
#Ncurses.touchwin(FFI::NCurses.stdscr)
# above blanks out entire screen
# in case of multiple root windows lets just do last otherwise too much refreshing.
return unless $global_windows.last
wins = [ $global_windows.last ]
wins.each_with_index do |w,i|
$log.debug " REFRESH_ALL on #{w.name} (#{i}) sending 1000"
# NOTE 2014-05-01 - 20:25 although we have reached the root window from any level
# however, this is sending the hack to whoever is trapping the key, which in our current
# case happends to be Viewer, *not* the root form. We need to send to root form.
f = w.form
if f
# send hack to root windows form if passed.
f.handle_key 1000
end
#w.ungetch(1000)
# below blanks out entire screen too
#FFI::NCurses.touchwin(w.get_window)
#$log.debug "XXX: refreshall diong window "
#w.hide
#w.show
#Ncurses.refresh
#w.wrefresh
end
#Ncurses::Panel.update_panels
end
# 2009-10-13 12:24
# not used as yet
# this is an alternative constructor
# created if you don't want to create a hash first
# 2011-09-21 V1.3.1 You can now send an array to Window constructor
def self.create_window(h=0, w=0, t=0, l=0)
layout = { :height => h, :width => w, :top => t, :left => l }
@window = Window.new(layout)
return @window
end
def resize_with(layout)
$log.debug " DARN ! This awready duz a resize!! if h or w or even top or left changed!!! XXX"
set_layout(layout)
wresize(height, width)
mvwin(top, left)
Window.refresh_all
end
%w[width height top left].each do |side|
eval(
"def #{side}=(n)
return if n == #{side}
@layout[:#{side}] = n
resize_with @layout
end"
)
end
##
# Creating variables case of array, we still create the hash
# @param array or hash containing h w t and l
def set_layout(layout)
case layout
when Array
$log.error "NIL in window constructor" if layout.include? nil
raise ArgumentError, "Nil in window constructor" if layout.include? nil
# NOTE this is just setting, and not replacing zero with max values
@height, @width, @top, @left = *layout
raise ArgumentError, "Nil in window constructor" if @top.nil? || @left.nil?
@layout = { :height => @height, :width => @width, :top => @top, :left => @left }
when Hash
@layout = layout
[:height, :width, :top, :left].each do |name|
instance_variable_set("@#{name}", @layout[name])
end
end
end
# --- layout and creation related }}}
# ADDED DUE TO FFI
def wrefresh
Ncurses.wrefresh(@window)
end
def delwin # 2011-09-7
Ncurses.delwin(@window)
end
def attron *args
FFI::NCurses.wattron @window, *args
end
def attroff *args
FFI::NCurses.wattroff @window, *args
end
#
# ## END FFI
def resize
resize_with(@layout)
end
# Ncurses
def pos
raise "dead code ??"
return y, x
end
def y
raise "dead code ??"
Ncurses.getcury(@window)
end
def x
raise "dead code ??"
Ncurses.getcurx(@window)
end
def x=(n) move(y, n) end
def y=(n) move(n, x) end
#def move(y, x)
#return unless @visible
## Log.debug([y, x] => caller[0,4])
##@window.wmove(y, x) # bombing since ffi-ncurses 0.4.0 (maybe it was never called
##earlier. was crashing in appemail.rb testchoose.
#wmove y,x # can alias it
#end
# since include FFI is taking over, i need to force it here. not going into
# method_missing
def wmove y,x
#Ncurses.wmove @window, y, x
FFI::NCurses.wmove @window, y, x
end
alias :move :wmove
def method_missing(name, *args)
name = name.to_s
if (name[0,2] == "mv")
test_name = name.dup
test_name[2,0] = "w" # insert "w" after"mv"
if (FFI::NCurses.respond_to?(test_name))
return FFI::NCurses.send(test_name, @window, *args)
end
end
test_name = "w" + name
if (FFI::NCurses.respond_to?(test_name))
return FFI::NCurses.send(test_name, @window, *args)
end
FFI::NCurses.send(name, @window, *args)
end
def respond_to?(name)
name = name.to_s
if (name[0,2] == "mv" && FFI::NCurses.respond_to?("mvw" + name[2..-1]))
return true
end
FFI::NCurses.respond_to?("w" + name) || FFI::NCurses.respond_to?(name)
end
#--
# removing some methods that not used or used once
# leaving here so we not what to do to print in these cases
def print(string, width = width)
w = width == 0? Ncurses.COLS : width
waddnstr(string.to_s, w) # changed 2011 dts
end
#def print_yx(string, y = 0, x = 0)
#w = width == 0? Ncurses.COLS : width
#mvwaddnstr(y, x, string, w) # changed 2011 dts
#end
#++
# dead code ??? --- {{{
# NOTE: many of these methods using width will not work since root windows width
# is 0
def print_empty_line
raise "print empty is working"
return unless visible?
w = getmaxx == 0? Ncurses.COLS : getmaxx
printw(' ' * w)
end
def print_line(string)
raise "print line is working"
w = getmaxx == 0? Ncurses.COLS : getmaxx
print(string.ljust(w))
end
def puts(*strings)
raise "puts is working, remove this"
print(strings.join("\n") << "\n")
end
def _refresh
raise "dead code remove"
return unless visible?
@window.refresh
end
def wnoutrefresh
#raise "dead code ???"
return unless visible?
# next line gives error XXX DEAD
@window.wnoutrefresh
end
def color=(color)
raise "dead code ???"
@color = color
@window.color_set(color, nil)
end
def highlight_line(color, y, x, max)
raise "dead code"
@window.mvchgat(y, x, max, Ncurses::A_NORMAL, color, nil)
end
# doesn't seem to work, clears first line, not both
def clear
# return unless visible?
raise "dead code ??"
move 0, 0
puts *Array.new(height){ ' ' * (width - 1) }
end
def on_top
raise "on_top used, remove this line dead code"
Ncurses::Panel.top_panel @panel.pointer
wnoutrefresh
end
# --- dead code ??? }}}
# return the character to the keyboard buffer to be read again.
def ungetch(ch)
Ncurses.ungetch(ch)
end
# reads a character from keyboard and returns
# NOTE:
# if a function key is pressed, multiple such ints will be returned one after the other
# so the caller must decipher the same. See +getchar()+
#
# @return int
# @return -1 if no char read
# ORIGINALLY After esc there was a timeout, but after others there was notimeout, so it would wait
# indefinitely for a key
# NOTE : caller may set a timeout prior to calling, but not change setting after since this method
# maintains the default state in +ensure+. e.g. +widget.rb+ does a blocking get in +_process_key+
# Curses sets a timeout when ESCAPE is pressed, it is called ESCDELAY and is 1000 milliseconds.
# You may reduce it if you are not on some old slow telnet session. This returns faster from an esc
# although there are still some issues. ESC-ESC becomes an issue, but if i press ESC-ESC-1 then esc-esc comes
# together. otherwise there is a -1 between each esc.
#
def getch
#c = @window.getch
#FFI::NCurses::nodelay(@window, true)
#FFI::NCurses::wtimeout(@window, 0)
#$log.debug " #{Time.now.to_f} inside MAIN before getch "
c = FFI::NCurses.wgetch(@window)
# the only reason i am doing this is so ESC can be returned if no key is pressed
# after that, not sure how this effects everything. most likely I should just
# go back to using a wtimeout, and not worry about resize requiring a keystroke
if c == 27
$escstart = Time.now.to_f
# if ESC pressed don't wait too long for next key
Ncurses::wtimeout(@window, $ncurses_timeout || 500) # will wait n millisecond on wgetch so that we can return if no
else
FFI::NCurses.set_escdelay(100)
# this means keep waiting for a key.
Ncurses::nowtimeout(@window, true)
end
c
rescue SystemExit, Interrupt
#FFI::NCurses.flushinp
3 # is C-c
rescue StandardError
-1 # is C-c
ensure
# whatever the default is, is to be set here in case caller changed it.
#FFI::NCurses::nodelay(@window, true)
end
# Earlier this was handled by window itself. Now we delegate to a reader
# @return int keycode, can be function key or meta or arrow key.
#
# NOTE:
# This is called by user programs in a loop.
# We are now moving from returning an int to returning a string similar to what
# user would get on commandline using C-v
#
def getchar
@key_reader.getchar
end
# setup and reset
# Ncurses panel
def hide
#return unless visible? # added 2011-10-14 these 2 are not behaving properly
Ncurses::Panel.hide_panel @panel.pointer
#Ncurses.refresh # wnoutrefresh
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
@visible = false
end
def show
#return if visible? # added 2011-10-14 these 2 are not behaving properly
Ncurses::Panel.show_panel @panel.pointer
#Ncurses.refresh # wnoutrefresh
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
@visible = true
end
def visible?
@visible
end
##
# destroy window, panel and any pads that were requested
#
def destroy
# typically the ensure block should have this
#$log.debug "win destroy start"
$global_windows.delete self
Ncurses::Panel.del_panel(@panel.pointer) if @panel
delwin() if @window
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
# destroy any pads that were created by widgets using get_pad
@pads.each { |pad|
FFI::NCurses.delwin(pad) if pad
pad = nil
} if @pads
# added here to hopefully take care of this issue once and for all.
# Whenever any window is destroyed, the root window is repainted.
#
Window.refresh_all
#$log.debug "win destroy end"
end
#
# 2011-11-13 since 1.4.1
# Widgets can get window to create a pad for them. This way when the window
# is destroyed, it will delete all the pads. A widget wold not be able to do this.
# The destroy method of the widget will be called.
def get_pad content_rows, content_cols
pad = FFI::NCurses.newpad(content_rows, content_cols)
@pads ||= []
@pads << pad
## added 2013-03-05 - 19:21 without next line how was pad being returned
return pad
end
# print and chunk related --- {{{
#
# Allows user to send data as normal string or chunks for printing
# An array is assumed to be a chunk containing color and attrib info
#
def printstring_or_chunks(r,c,content, color, att = Ncurses::A_NORMAL)
if content.is_a? String
printstring(r,c,content, color, att)
elsif content.is_a? AbstractChunkLine
#$log.debug "XXX: using chunkline" # 2011-12-10 12:40:13
wmove r, c
a = get_attrib att
# please add width to avoid overflow
show_colored_chunks content, color, a
elsif content.is_a? Array
# several chunks in one row - NOTE Very experimental may change
if content[0].is_a? Array
$log.warn "XXX: WARNING outdated should send in a chunkline"
wmove r, c
a = get_attrib att
# please add width to avoid overflow
show_colored_chunks content, color, a
else
# a single row chunk - NOTE Very experimental may change
text = content[1].dup
printstring r, c, text, content[0] || color, content[2] || att
end
end
end
#
# prints a string formatted in our new experimental coloring format
# taken from tmux. Currently, since i have chunks workings, i convert
# to chunks and use the existing print function. This could change.
# An example of a formatted string is:
# s="#[fg=green]testing chunks #[fg=yellow, bg=red, bold]yellow #[reverse] reverseme \
# #[normal]normal#[bg = black]just yellow#[fg=blue],blue now #[underline] underlined text"
# Ideally I should push and pop colors which the shell does not do with ansi terminal sequences.
# That way i can have a line in red,
# with some word in yellow, and then the line continues in red.
#
def printstring_formatted(r,c,content, color, att = Ncurses::A_NORMAL)
att = get_attrib att unless att.is_a? Fixnum
chunkline = convert_to_chunk(content, color, att)
printstring_or_chunks r,c, chunkline, color, att
end # print
#
# print a formatted line right aligned
# c (col) is ignored and calculated based on width and unformatted string length
#
def printstring_formatted_right(r,c,content, color, att = Ncurses::A_NORMAL)
clean = content.gsub /#\[[^\]]*\]/,'' # clean out all markup
#c = actual_width() - clean.length # actual width not working if resize
c = getmaxx() - clean.length
printstring_formatted(r,c,content, color, att )
end
private
def get_default_color_parser
require 'canis/core/util/defaultcolorparser'
@color_parser || DefaultColorParser.new
end
# supply with a color parser, if you supplied formatted text
public
def color_parser f
$log.debug "XXX: color_parser setting in window to #{f} "
require 'canis/core/include/colorparser'
if f == :tmux
@color_parser = get_default_color_parser()
else
@color_parser = f
end
end
#
# Takes a formatted string and converts the parsed parts to chunks.
#
# @param [String] takes the entire line or string and breaks into an array of chunks
# @yield chunk if block
# @return [ChunkLine] # [Array] array of chunks
public
def convert_to_chunk s, colorp=$datacolor, att=FFI::NCurses::A_NORMAL
unless @color_parser
require 'canis/core/include/colorparser'
@color_parser = get_default_color_parser()
@converter = Chunks::ColorParser.new @color_parser
end
@converter.convert_to_chunk s, colorp, att
end
##
# prints a string at row, col, with given color and attribute
# added by rk 2008-11-29 19:01
# I usually use this, not the others ones here
# @param r - row
# @param c - col
# @param string - text to print
# @param color - color pair
# @ param att - ncurses attribute: normal, bold, reverse, blink,
# underline
public
def printstring(r,c,string, color, att = Ncurses::A_NORMAL)
#$log.debug " #{@name} inside window printstring r #{r} c #{c} #{string} "
if att.nil?
att = Ncurses::A_NORMAL
else
att = get_attrib att
end
wattron(Ncurses.COLOR_PAIR(color) | att)
mvwprintw(r, c, "%s", :string, string);
wattroff(Ncurses.COLOR_PAIR(color) | att)
end
##
# prints the border for message boxes
#
# NOTE : FOR MESSAGEBOXES ONLY !!!! Then why not move to messagebox FIXME
def print_border_mb row, col, height, width, color, attr
# the next is for xterm-256
att = get_attrib attr
len = width
len = Ncurses.COLS-0 if len == 0
# print a bar across the screen
#attron(Ncurses.COLOR_PAIR(color) | att)
# this works for newmessagebox but not for old one.
# Even now in some cases some black shows through, if the widget is printing spaces
# such as field or textview on a messagebox.
(row-1).upto(row+height-1) do |r|
mvwhline(r, col, 1, len)
end
#attroff(Ncurses.COLOR_PAIR(color) | att)
mvwaddch row, col, Ncurses::ACS_ULCORNER
mvwhline( row, col+1, Ncurses::ACS_HLINE, width-6)
mvwaddch row, col+width-5, Ncurses::ACS_URCORNER
mvwvline( row+1, col, Ncurses::ACS_VLINE, height-4)
mvwaddch row+height-3, col, Ncurses::ACS_LLCORNER
mvwhline(row+height-3, col+1, Ncurses::ACS_HLINE, width-6)
mvwaddch row+height-3, col+width-5, Ncurses::ACS_LRCORNER
mvwvline( row+1, col+width-5, Ncurses::ACS_VLINE, height-4)
end
##
# prints a border around a widget, CLEARING the area.
# If calling with a pad, you would typically use 0,0, h-1, w-1.
# FIXME can this be moved to module Bordertitle ?
def print_border row, col, height, width, color, att=Ncurses::A_NORMAL
raise "height needs to be supplied." if height.nil?
raise "width needs to be supplied." if width.nil?
att ||= Ncurses::A_NORMAL
#$log.debug " inside window print_border r #{row} c #{col} h #{height} w #{width} "
# 2009-11-02 00:45 made att nil for blanking out
# FIXME - in tabbedpanes this clears one previous line ??? XXX when using a textarea/view
# when using a pad this calls pads printstring which again reduces top and left !!! 2010-01-26 23:53
ww=width-2
(row+1).upto(row+height-1) do |r|
printstring( r, col+1," "*ww , color, att)
end
print_border_only row, col, height, width, color, att
end
## print just the border, no cleanup
#+ Earlier, we would clean up. Now in some cases, i'd like
#+ to print border over what's been done.
# XXX this reduces 1 from width but not height !!! FIXME
# FIXME can this be moved to module Bordertitle ?
def print_border_only row, col, height, width, color, att=Ncurses::A_NORMAL
if att.nil?
att = Ncurses::A_NORMAL
else
att = get_attrib att
end
wattron(Ncurses.COLOR_PAIR(color) | att)
mvwaddch row, col, Ncurses::ACS_ULCORNER
mvwhline( row, col+1, Ncurses::ACS_HLINE, width-2)
mvwaddch row, col+width-1, Ncurses::ACS_URCORNER
mvwvline( row+1, col, Ncurses::ACS_VLINE, height-1)
mvwaddch row+height-0, col, Ncurses::ACS_LLCORNER
mvwhline(row+height-0, col+1, Ncurses::ACS_HLINE, width-2)
mvwaddch row+height-0, col+width-1, Ncurses::ACS_LRCORNER
mvwvline( row+1, col+width-1, Ncurses::ACS_VLINE, height-1)
wattroff(Ncurses.COLOR_PAIR(color) | att)
end
# Previously this printed a chunk as a full line, I've modified it to print on
# one line. This can be used for running text.
# NOTE 2013-03-08 - 17:02 added width so we don't overflow
# NOTE 2014-05-11 - textpad has its own version, so does not call this.
def show_colored_chunks(chunks, defcolor = nil, defattr = nil, wid = 999, pcol = 0)
return unless visible?
ww = 0
chunks.each_with_color do |text, color, attrib|
## 2013-03-08 - 19:11 take care of scrolling by means of pcol
if pcol > 0
if pcol > text.length
# ignore entire chunk and reduce pcol
pcol -= text.length
next
else
# print portion of chunk and zero pcol
text = text[pcol..-1]
pcol = 0
end
end
oldw = ww
ww += text.length
if ww > wid
# if we are exceeding the width then by howmuch
rem = wid - oldw
if rem > 0
# take only as much as we are allowed
text = text[0,rem]
else
break
end
end
color ||= defcolor
attrib ||= defattr
cc, bg = ColorMap.get_colors_for_pair color
#$log.debug "XXX: CHUNK window #{text}, cp #{color} , attrib #{attrib}. #{cc}, #{bg} "
color_set(color,nil) if color
wattron(attrib) if attrib
#print(text)
waddnstr(text.to_s, @width) # changed 2014-04-22 - 11:59 to reduce a function
wattroff(attrib) if attrib
end
end
# ----- }}}
# This used to return an Ncurses window object, and you could call methods on it
# Now it returns a FFI::NCurses.window pointer which you cannot call methods on.
# You have to pass it to FFI::NCurses.<method>
def get_window; @window; end
# returns name of window or self (mostly for debugging)
def to_s; @name || self; end
# actions to perform when window closed.
# == Example
# @window.close_command do
# if confirm("Save tasks?", :default_button => 0)
# take some actions
# end
# end
def close_command *args, &block
@close_command ||= []
@close_args ||= []
@close_command << block
@close_args << args
end
alias :command :close_command
# set a single command to confirm whether window shoud close or not
# Block should return true or false for closing or not
# == Examples
#
# @window.confirm_close_command do
# confirm "Sure you wanna quit?", :default_button => 1
# end
#
def confirm_close_command *args, &block
@confirm_close_command = block
@confirm_close_args = args
end
# Called when window close is requested by user.
# Executes confirm_close block and if it succeeds then executes close commands
# called by util/app.rb
def fire_close_handler
if @confirm_close_command
comm = @confirm_close_command
ret = comm.call(self, *@confirm_close_args)
return ret unless ret # only return if false returned
end
if @close_command
@close_command.each_with_index do |comm, ix|
comm.call(self, *@close_args[ix]) if comm
end
end
@close_command = nil
@close_args = nil
return true
end
# creates a key reader unless overridden by application which should be rare.
def create_default_key_reader
@key_reader = DefaultKeyReader.new self
end
end # window
# created on 2014-04-20 - 00:19 so that user can install own handler
#
#
# A class that reads keys and handles function, shifted function, control, alt, and other
# extended keys.
# THis essentially consists of a method getchar which will be called by the application
# to get keys in a loop. Application may also call getchar to get one key in some situations.
#
# Originally, rbcurse returned an int, but we are movign to a string, so that user can use the exact
# control codes he gets on the terminal using C-v and map them here.
#
#
class DefaultKeyReader # --- {{{
def initialize win
@window = win
@stack = []
end
# return an int for the key read. this is just a single int, and is not interpreted
# for control or function keys. it also will return -1 when no action.
# You may re-implenent it or call the original one.
#
def getch
@window.getch
end
# A map of int keycodes associated with a string name which is defined in $kh
$kh_int ||= Hash.new {|hash, key| hash[key] = key.hash }
# these 4 for xterm-color which does not send 265 on F1
$kh_int["F1"] = 265
$kh_int["F2"] = 266
$kh_int["F3"] = 267
$kh_int["F4"] = 268
# testing out shift+Function. these are the codes my kb generates
if File.exists? File.expand_path("~/ncurses-keys.yml")
# a sample of this file should be available with this
# the file is a hash or mapping, but should not contrain control characters.
# Usually delete the control character and insert a "\e" in its place.
# "\e[1;3C": C-RIGHT
require 'yaml'
$kh = YAML::load( File.open( File.expand_path("~/ncurses-keys.yml" ) ))
else
# if we could not find any mappings then use some dummy ones that work on my laptop.
$kh=Hash.new
KEY_S_F1='[1;2P'
$kh[KEY_S_F1]="S-F1"
$kh['[1;2Q']="S-F2"
$kh['[1;2R']="S-F3"
$kh['[1;2S']="S-F4"
$kh['[15;2~']="S-F5"
end
# this is for xterm-color which does not send 265 on F1
$kh['OP']="F1"
$kh['OQ']="F2"
$kh['OR']="F3"
$kh['OS']="F4"
# NOTE: This is a reworked and much simpler version of the original getchar which was taken from manveru's
# codebase. This also currently returns the keycode as int while placing the char version in a
# global $key_chr. Until we are ready to return a char, we use this.
#
# FIXME : I have tried very hard to revert to nodelay but it does not seem to have an effect when ESC is pressed.
# Somewhere, there is a delay when ESC is pressed. I not longer wish to provide the feature of pressing ESC
# and then a key to be evaluated as Meta-key. This slows down when a user just presses ESC.
#
# Read a char from the window (from user) and returns int code.
# In some cases, such as codes entered in the $kh hash, we do not yet have a keycode defined
# so we return 9999 and the user can access $key_chr.
#
# NOTE: Do not convert to string, that is doing two things. Allow user to convert if required using
# `key_tos`
def getchar
$key_chr = nil
c = nil
while true
c = self.getch
break if c != -1
end
cn = c
$key_int = c
# handle control codes 0 to 127 but not escape
if cn >= 0 && cn < 128 && cn != 27
#$key_chr = key_tos(c)
return c
end
# if escape then get into a loop and keep checking till -1 or another escape
#
if c == 27
buff=c.chr
# if there is another escape coming through then 2 keys were pressed so
# evaluate upon hitting an escape
# NOTE : i think only if ESc is followed by [ should be keep collectig
# otherwise the next char should evaluate. cases like F1 are already being sent in as high integer codes
while true
#$log.debug " #{Time.now.to_f} inside LOOP before getch "
# This getch seems to take enough time not to return a -1 for almost a second
# even if nodelay is true ??? XXX
FFI::NCurses.set_escdelay(5)
k = self.getch
#$log.debug "elapsed #{elapsed} millis inside LOOP AFTER getch #{k} (#{elapsed1})"
$log.debug "inside LOOP AFTER getch #{k} "
if k == 27
# seems like two Meta keys pressed in quick succession without chance for -1 to kick in
# but this still does not catch meta char followed by single char. M-za , it does.
if $esc_esc
if buff == 27.chr
$key_chr = "<ESC-ESC>"
return 2727
else
alert "buff is #{buff}"
end
end
$log.debug " 1251 before evaluate "
x = _evaluate_buff buff
# return ESC so it can be interpreted again.
@window.ungetch k
$key_chr = x if x
return $key_int if x
$log.warn "getchar: window.rb 1200 Found no mapping for #{buff} "
$key_chr = buff
return $key_int
#return buff # otherwise caught in loop ???
elsif k > -1
# FIXME next lne crashes if M-C-h pressed which gives 263
if k > 255
$log.warn "getchar: window.rb 1247 Found no mapping for #{buff} #{k} "
$key_int = k + 128
return $key_int
# this contains ESc followed by a high number
=begin
ka = key_tos(k)
if ka
$key_chr = "<M-" + ka[1..-1]
$key_int = k + 128
return $key_int
else
$key_chr = "UNKNOWN: Meta + #{k}"
return 9999
end
=end
end
buff += k.chr
# this is an alt/meta code. All other complex codes seem to have a [ after the escape
# so we will keep accumulating them.
# NOTE this still means that user can press Alt-[ and some letter in quick succession
# and it will accumulate rather than be interpreted as M-[.
#
if buff.length == 2 and k == 79
# this is Alt-O and can be a F key in some terms like xterm-color
elsif buff.length == 2 and k.chr != '['
x = _evaluate_buff buff
$key_chr = x
return $key_int if x
end
#$log.debug "XXX: getchar adding #{k}, #{k.chr} to buff #{buff} "
else
#$log.debug " GOT -1 in escape "
# it is -1 so evaluate
x = _evaluate_buff buff
$key_chr = x if x
return $key_int if x
$log.warn "getchar: window.rb 1256 Found no mapping for #{buff} "
$key_chr = buff
return $key_int
end
end
end
# what if keyname does not return anything
if c > 127
#$log.info "xxxgetchar: window.rb sending #{c} "
=begin
ch = FFI::NCurses::keyname(c)
# remove those ugly brackets around function keys
if ch && ch[-1]==')'
ch = ch.gsub(/[()]/,'')
end
if ch && ch.index("KEY_")
ch = ch.gsub(/KEY_/,'')
end
ch = "<#{ch}>" if ch
#return ch if ch
$key_chr = ch if ch
$key_chr = "UNKNOWN:#{c}" unless ch
$log.warn "getchar: window.rb 1234 Found no mapping for #{c} " unless ch
=end
#$key_chr = key_tos(ch)
return c
end
if c
#$key_chr = c.chr
return c
end
end
def getchar_as_char
$key_int = getchar
$key_chr = key_tos( $key_int )
return $key_chr
end
=begin
# NOTE I cannot use this since we are not ready to take a string, that is a big decision that
# requries a lot of work, and some decisions. We may bind using "<CR>" or "<C-d>" so
# maybe that's how we may need to send back
## get a character from user and return as a string
# Adapted from:
#http://stackoverflow.com/questions/174933/how-to-get-a-single-character-without-pressing-enter/8274275#8274275
# Need to take complex keys and matc against a hash.
# We cannot use the cetus example as is since here $stdin.ready? does not work and more importantly
# we have keyboard set to true so function keys and arrow keys are not returned as multiple values but as
# one int in the 255 and above range. so that must be interpreted separately.
#
# If we wait for -1 then quick M-a can get concatenated. we need to take care
# a ESC means the previous one should be evaluated and not contactenated
# FIXME = ESCESC 2727 - can't do this as will clash with Esc, M-(n).
# this is a rework of the above but returns an int so that the existing programs can keep working.
# We will store the char codes/ in a global string so user can get esp if unknown.
# UNUSED since we are still using int codes.
def getchar_as_char # -- {{{
c = nil
while true
c = self.getch
break if c != -1
end
cn = c
#return FFI::NCurses::keyname(c) if [10,13,127,0,32,8].include? c
$key_int = c
if cn >= 0 && cn < 128 && cn != 27
$key_chr = key_tos(c)
return $key_chr
end
# if escape then get into a loop and keep checking till -1 or another escape
#
if c == 27
buff=c.chr
# if there is another escape coming through then 2 keys were pressed so
# evaluate upon hitting an escape
# NOTE : i think only if ESc is followed by [ should be keep collectig
# otherwise the next char should evaluate. cases like F1 are already being sent in as high integer codes
while true
k = self.getch
if k == 27
# seems like two Meta keys pressed in quick succession without chance for -1 to kick in
# but this still does not catch meta char followed by single char. M-za
x = _evaluate_buff buff
# return ESC so it can be interpreted again.
@window.ungetch k
return x if x
$log.warn "getchar: window.rb 1200 Found no mapping for #{buff} "
return buff # otherwise caught in loop ???
elsif k > -1
buff += k.chr
# this is an alt/meta code. All other complex codes seem to have a [ after the escape
# so we will keep accumulating them.
# NOTE this still means that user can press Alt-[ and some letter in quick succession
# and it will accumulate rather than be interpreted as M-[.
#
if buff.length == 2 and k.chr != '['
x = _evaluate_buff buff
return x if x
end
#$log.debug "XXX: getchar adding #{k}, #{k.chr} to buff #{buff} "
else
# it is -1 so evaluate
x = _evaluate_buff buff
return x if x
return buff
end
end
end
# what if keyname does not return anything
if c > 127
#$log.info "xxxgetchar: window.rb sending #{c} "
ch = FFI::NCurses::keyname(c)
# remove those ugly brackets around function keys
if ch && ch[-1]==')'
ch = ch.gsub(/[()]/,'')
end
return ch if ch
$log.warn "getchar: window.rb 1234 Found no mapping for #{c} "
return c
end
return c.chr if c
end # -- }}}
=end
# Generate and return an int for a newkey which user has specified in yml file.
# We use hash, which won't allow me to derive key string
# in case loop user can do:
# when KEY_ENTER
# when 32
# when $kh_int["S-F2"]
def _get_int_for_newkey x
# FIXME put the declaration somewhere else maybe in window cons ???
y = $kh_int[x]
# when i give user the hash, he can get the string back ???
$kh_int[y] = x unless $kh_int.key? y
return y
end
# check buffer if some key mapped in global kh for this
# Otherwise if it is 2 keys then it is a Meta key
# Can return nil if no mapping
# @return [String] string code for key (since it is mostly from $kh. Also sets, $key_int
private
def _evaluate_buff buff
if buff == 27.chr
$key_int = 27
#$escend = Time.now.to_f
#elapsed = ($escend - $escstart)*1000
#$log.debug " #{elapsed} evaluated to ESC"
$key_chr = "<ESC>"
return $key_chr
end
x=$kh[buff]
if x
$key_int = 9999
$key_int = _get_int_for_newkey(x)
$key_cache[$key_int] = x unless $key_cache.key? $key_int
# FIXME currently 9999 signifies unknown key, but since this is derived from a user list
# we could have some dummy number being passed or set by user too.
return "<#{x}>"
end
#$log.debug "XXX: getchar returning with #{buff}"
if buff.size == 2
## possibly a meta/alt char
k = buff[-1]
$key_int = 128 + k.ord
return key_tos( $key_int )
end
$key_int = 99999
nil
end
end # class DefaultKeyReader -- }}}
end
enhanced refresh_all not to refresh current window in case of resize
# ----------------------------------------------------------------------------- #
# File: window.rb
# Description: A wrapper over window
# Author: jkepler http://github.com/mare-imbrium/canis/
# Date: Around for a long time
# License: Same as Ruby's License (http://www.ruby-lang.org/LICENSE.txt)
# Last update: 2014-05-27 01:34
#
# == CHANGED
# removed dead or redudant code - 2014-04-22 - 12:53
# - replaced getchar with new simpler one - 2014-05-04
# - introduced key_tos to replace keycode_tos, moved to Util in rwidget.rb
# - reintroduced nedelay and reduced escdelay
#
# == TODO
# strip and remove cruft. Several methods marked as deprecated.
# ----------------------------------------------------------------------------- #
#
require 'canis/core/system/ncurses'
require 'canis/core/system/panel'
# this is since often windows are declared with 0 height or width and this causes
# crashes in the most unlikely places. This prevceents me from having to write ternary
# e.g.
# @layout[:width].ifzero(FFI::NCurses::LINES-2)
class Fixnum
def ifzero v
return self if self != 0
return v
end
end
# This class is to be extended so that it can be called by anyone wanting to implement
# chunks ot text with color and attributes. Chunkline consists of multiple chunks of colored text
# and should implement a +each_with_color+.
# The purpose of adding this is so that +chunk.rb+ does not need to be required if colored text
# is not being used by an application.
class AbstractChunkLine; end
module Canis
class Window
attr_reader :width, :height, :top, :left
attr_accessor :layout # hash containing hwtl
attr_reader :panel # reader requires so he can del it in end
attr_reader :window_type # window or pad to distinguish 2009-11-02 23:11
attr_accessor :name # more for debugging log files. 2010-02-02 19:58
#attr_accessor :modified # has it been modified and may need a refresh 2014-04-22 - 10:23 CLEANUP
# for root windows we need to know the form so we can ask it to update when
# there are overlapping windows.
attr_accessor :form
# creation and layout related {{{
# @param [Array, Hash] window coordinates (ht, w, top, left)
# or
# @param [int, int, int, int] window coordinates (ht, w, top, left)
# 2011-09-21 allowing array, or 4 ints, in addition to hash @since 1.3.1
def initialize(*args)
case args.size
when 1
case args[0]
when Array, Hash
layout = args[0]
else
raise ArgumentError, "Window expects 4 ints, array of 4 ints, or Hash in constructor"
end
when 4
layout = { :height => args[0], :width => args[1], :top => args[2], :left => args[3] }
end
@visible = true
set_layout(layout)
#$log.debug "XXX:WINDOW got h #{@height}, w #{@width}, t #{@top}, l #{@left} "
@height = FFI::NCurses.LINES if @height == 0 # 2011-11-14 added since tired of checking for zero
@width = FFI::NCurses.COLS if @width == 0
@window = FFI::NCurses.newwin(@height, @width, @top, @left) # added FFI 2011-09-6
# trying out refreshing underlying window.
$global_windows ||= []
# this causes issues padrefresh failing when display_list does a resize.
#$global_windows << self
@panel = Ncurses::Panel.new(@window) # added FFI 2011-09-6
#$error_message_row = $status_message_row = Ncurses.LINES-1
$error_message_row ||= Ncurses.LINES-1
$error_message_col ||= 1 # ask (bottomline) uses 0 as default so you can have mismatch. XXX
$status_message ||= Canis::Variable.new # in case not an App
# 2014-05-07 - 12:29 CANIS earlier this was called $key_map but that suggests a map.
$key_map_type ||= :vim
$esc_esc = true; # gove me double esc as 2727 so i can map it.
init_vars
unless @key_reader
create_default_key_reader
end
end
def init_vars
@window_type = :WINDOW
Ncurses::keypad(@window, true)
# Added this so we can get Esc, and also C-c pressed in succession does not crash system
# 2011-12-20 half-delay crashes system as does cbreak
#This causes us to be unable to process gg qq since getch won't wait.
#FFI::NCurses::nodelay(@window, bf = true)
# wtimeout was causing RESIZE sigwinch to only happen after pressing a key
#Ncurses::wtimeout(@window, $ncurses_timeout || 500) # will wait a second on wgetch so we can get gg and qq
#@stack = [] # since we have moved to handler 2014-04-20 - 11:15
@name ||="#{self}"
@modified = true
$catch_alt_digits ||= false # is this where is should put globals ? 2010-03-14 14:00 XXX
end
##
# this is an alternative constructor
def self.root_window(layout = { :height => 0, :width => 0, :top => 0, :left => 0 })
#Canis::start_ncurses
@layout = layout
@window = Window.new(@layout)
@window.name = "Window::ROOTW:#{$global_windows.count}"
@window.wrefresh
Ncurses::Panel.update_panels
# earlier we only put root window, now we may need to do all (bline - numbered menu - alert)
$global_windows << @window unless $global_windows.include? @window
return @window
end
# This refreshes the root window whenever overlapping windows are
# destroyed or moved.
# This works by asking the root window's form to repaint all its objects.
# This is now being called whenever a window is destroyed (and also resized).
# However, it must
# manually be called if you move a window.
# NOTE : if there are too many root windows, this could get expensive since we are updating all.
# We may need to have a way to specify which window to repaint.
# If there are non-root windows above, we may have manually refresh only the previous one.
#
def self.refresh_all current_win=nil
#Ncurses.touchwin(FFI::NCurses.stdscr)
# above blanks out entire screen
# in case of multiple root windows lets just do last otherwise too much refreshing.
gw = $global_windows
if current_win
gw = $global_windows.select {|e| e != current_win }
end
return unless gw.last
wins = [ gw.last ]
wins.each_with_index do |w,i|
$log.debug " REFRESH_ALL on #{w.name} (#{i}) sending 1000"
# NOTE 2014-05-01 - 20:25 although we have reached the root window from any level
# however, this is sending the hack to whoever is trapping the key, which in our current
# case happends to be Viewer, *not* the root form. We need to send to root form.
f = w.form
if f
# send hack to root windows form if passed.
f.handle_key 1000
end
#w.ungetch(1000)
# below blanks out entire screen too
#FFI::NCurses.touchwin(w.get_window)
#$log.debug "XXX: refreshall diong window "
#w.hide
#w.show
#Ncurses.refresh
#w.wrefresh
end
#Ncurses::Panel.update_panels
end
# 2009-10-13 12:24
# not used as yet
# this is an alternative constructor
# created if you don't want to create a hash first
# 2011-09-21 V1.3.1 You can now send an array to Window constructor
def self.create_window(h=0, w=0, t=0, l=0)
layout = { :height => h, :width => w, :top => t, :left => l }
@window = Window.new(layout)
return @window
end
def resize_with(layout)
#$log.debug " DARN ! This awready duz a resize!! if h or w or even top or left changed!!! XXX"
$log.debug " resize #{@height} , #{@width} , #{@top} , #{@left}, "
set_layout(layout)
wresize(height, width)
mvwin(top, left)
Window.refresh_all self
end
%w[width height top left].each do |side|
eval(
"def #{side}=(n)
return if n == #{side}
@layout[:#{side}] = n
resize_with @layout
end"
)
end
##
# Creating variables case of array, we still create the hash
# @param array or hash containing h w t and l
def set_layout(layout)
case layout
when Array
$log.error "NIL in window constructor" if layout.include? nil
raise ArgumentError, "Nil in window constructor" if layout.include? nil
# NOTE this is just setting, and not replacing zero with max values
@height, @width, @top, @left = *layout
raise ArgumentError, "Nil in window constructor" if @top.nil? || @left.nil?
@layout = { :height => @height, :width => @width, :top => @top, :left => @left }
when Hash
@layout = layout
[:height, :width, :top, :left].each do |name|
instance_variable_set("@#{name}", @layout[name])
end
end
end
# --- layout and creation related }}}
# ADDED DUE TO FFI
def wrefresh
Ncurses.wrefresh(@window)
end
def delwin # 2011-09-7
Ncurses.delwin(@window)
end
def attron *args
FFI::NCurses.wattron @window, *args
end
def attroff *args
FFI::NCurses.wattroff @window, *args
end
#
# ## END FFI
def resize
resize_with(@layout)
end
# Ncurses
def pos
raise "dead code ??"
return y, x
end
def y
raise "dead code ??"
Ncurses.getcury(@window)
end
def x
raise "dead code ??"
Ncurses.getcurx(@window)
end
def x=(n) move(y, n) end
def y=(n) move(n, x) end
#def move(y, x)
#return unless @visible
## Log.debug([y, x] => caller[0,4])
##@window.wmove(y, x) # bombing since ffi-ncurses 0.4.0 (maybe it was never called
##earlier. was crashing in appemail.rb testchoose.
#wmove y,x # can alias it
#end
# since include FFI is taking over, i need to force it here. not going into
# method_missing
def wmove y,x
#Ncurses.wmove @window, y, x
FFI::NCurses.wmove @window, y, x
end
alias :move :wmove
def method_missing(name, *args)
name = name.to_s
if (name[0,2] == "mv")
test_name = name.dup
test_name[2,0] = "w" # insert "w" after"mv"
if (FFI::NCurses.respond_to?(test_name))
return FFI::NCurses.send(test_name, @window, *args)
end
end
test_name = "w" + name
if (FFI::NCurses.respond_to?(test_name))
return FFI::NCurses.send(test_name, @window, *args)
end
FFI::NCurses.send(name, @window, *args)
end
def respond_to?(name)
name = name.to_s
if (name[0,2] == "mv" && FFI::NCurses.respond_to?("mvw" + name[2..-1]))
return true
end
FFI::NCurses.respond_to?("w" + name) || FFI::NCurses.respond_to?(name)
end
#--
# removing some methods that not used or used once
# leaving here so we not what to do to print in these cases
def print(string, width = width)
w = width == 0? Ncurses.COLS : width
waddnstr(string.to_s, w) # changed 2011 dts
end
#def print_yx(string, y = 0, x = 0)
#w = width == 0? Ncurses.COLS : width
#mvwaddnstr(y, x, string, w) # changed 2011 dts
#end
#++
# dead code ??? --- {{{
# NOTE: many of these methods using width will not work since root windows width
# is 0
def print_empty_line
raise "print empty is working"
return unless visible?
w = getmaxx == 0? Ncurses.COLS : getmaxx
printw(' ' * w)
end
def print_line(string)
raise "print line is working"
w = getmaxx == 0? Ncurses.COLS : getmaxx
print(string.ljust(w))
end
def puts(*strings)
raise "puts is working, remove this"
print(strings.join("\n") << "\n")
end
def _refresh
raise "dead code remove"
return unless visible?
@window.refresh
end
def wnoutrefresh
#raise "dead code ???"
return unless visible?
# next line gives error XXX DEAD
@window.wnoutrefresh
end
def color=(color)
raise "dead code ???"
@color = color
@window.color_set(color, nil)
end
def highlight_line(color, y, x, max)
raise "dead code"
@window.mvchgat(y, x, max, Ncurses::A_NORMAL, color, nil)
end
# doesn't seem to work, clears first line, not both
def clear
# return unless visible?
raise "dead code ??"
move 0, 0
puts *Array.new(height){ ' ' * (width - 1) }
end
def on_top
raise "on_top used, remove this line dead code"
Ncurses::Panel.top_panel @panel.pointer
wnoutrefresh
end
# --- dead code ??? }}}
# return the character to the keyboard buffer to be read again.
def ungetch(ch)
Ncurses.ungetch(ch)
end
# reads a character from keyboard and returns
# NOTE:
# if a function key is pressed, multiple such ints will be returned one after the other
# so the caller must decipher the same. See +getchar()+
#
# @return int
# @return -1 if no char read
# ORIGINALLY After esc there was a timeout, but after others there was notimeout, so it would wait
# indefinitely for a key
# NOTE : caller may set a timeout prior to calling, but not change setting after since this method
# maintains the default state in +ensure+. e.g. +widget.rb+ does a blocking get in +_process_key+
# Curses sets a timeout when ESCAPE is pressed, it is called ESCDELAY and is 1000 milliseconds.
# You may reduce it if you are not on some old slow telnet session. This returns faster from an esc
# although there are still some issues. ESC-ESC becomes an issue, but if i press ESC-ESC-1 then esc-esc comes
# together. otherwise there is a -1 between each esc.
#
def getch
#c = @window.getch
#FFI::NCurses::nodelay(@window, true)
#FFI::NCurses::wtimeout(@window, 0)
#$log.debug " #{Time.now.to_f} inside MAIN before getch "
c = FFI::NCurses.wgetch(@window)
# the only reason i am doing this is so ESC can be returned if no key is pressed
# after that, not sure how this effects everything. most likely I should just
# go back to using a wtimeout, and not worry about resize requiring a keystroke
if c == 27
$escstart = Time.now.to_f
# if ESC pressed don't wait too long for next key
Ncurses::wtimeout(@window, $ncurses_timeout || 500) # will wait n millisecond on wgetch so that we can return if no
else
FFI::NCurses.set_escdelay(100)
# this means keep waiting for a key.
Ncurses::nowtimeout(@window, true)
end
c
rescue SystemExit, Interrupt
#FFI::NCurses.flushinp
3 # is C-c
rescue StandardError
-1 # is C-c
ensure
# whatever the default is, is to be set here in case caller changed it.
#FFI::NCurses::nodelay(@window, true)
end
# Earlier this was handled by window itself. Now we delegate to a reader
# @return int keycode, can be function key or meta or arrow key.
#
# NOTE:
# This is called by user programs in a loop.
# We are now moving from returning an int to returning a string similar to what
# user would get on commandline using C-v
#
def getchar
@key_reader.getchar
end
# setup and reset
# Ncurses panel
def hide
#return unless visible? # added 2011-10-14 these 2 are not behaving properly
Ncurses::Panel.hide_panel @panel.pointer
#Ncurses.refresh # wnoutrefresh
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
@visible = false
end
def show
#return if visible? # added 2011-10-14 these 2 are not behaving properly
Ncurses::Panel.show_panel @panel.pointer
#Ncurses.refresh # wnoutrefresh
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
@visible = true
end
def visible?
@visible
end
##
# destroy window, panel and any pads that were requested
#
def destroy
# typically the ensure block should have this
#$log.debug "win destroy start"
$global_windows.delete self
Ncurses::Panel.del_panel(@panel.pointer) if @panel
delwin() if @window
Ncurses::Panel.update_panels # added so below window does not need to do this 2011-10-1
# destroy any pads that were created by widgets using get_pad
@pads.each { |pad|
FFI::NCurses.delwin(pad) if pad
pad = nil
} if @pads
# added here to hopefully take care of this issue once and for all.
# Whenever any window is destroyed, the root window is repainted.
#
Window.refresh_all
#$log.debug "win destroy end"
end
#
# 2011-11-13 since 1.4.1
# Widgets can get window to create a pad for them. This way when the window
# is destroyed, it will delete all the pads. A widget wold not be able to do this.
# The destroy method of the widget will be called.
def get_pad content_rows, content_cols
pad = FFI::NCurses.newpad(content_rows, content_cols)
@pads ||= []
@pads << pad
## added 2013-03-05 - 19:21 without next line how was pad being returned
return pad
end
# print and chunk related --- {{{
#
# Allows user to send data as normal string or chunks for printing
# An array is assumed to be a chunk containing color and attrib info
#
def printstring_or_chunks(r,c,content, color, att = Ncurses::A_NORMAL)
if content.is_a? String
printstring(r,c,content, color, att)
elsif content.is_a? AbstractChunkLine
#$log.debug "XXX: using chunkline" # 2011-12-10 12:40:13
wmove r, c
a = get_attrib att
# please add width to avoid overflow
show_colored_chunks content, color, a
elsif content.is_a? Array
# several chunks in one row - NOTE Very experimental may change
if content[0].is_a? Array
$log.warn "XXX: WARNING outdated should send in a chunkline"
wmove r, c
a = get_attrib att
# please add width to avoid overflow
show_colored_chunks content, color, a
else
# a single row chunk - NOTE Very experimental may change
text = content[1].dup
printstring r, c, text, content[0] || color, content[2] || att
end
end
end
#
# prints a string formatted in our new experimental coloring format
# taken from tmux. Currently, since i have chunks workings, i convert
# to chunks and use the existing print function. This could change.
# An example of a formatted string is:
# s="#[fg=green]testing chunks #[fg=yellow, bg=red, bold]yellow #[reverse] reverseme \
# #[normal]normal#[bg = black]just yellow#[fg=blue],blue now #[underline] underlined text"
# Ideally I should push and pop colors which the shell does not do with ansi terminal sequences.
# That way i can have a line in red,
# with some word in yellow, and then the line continues in red.
#
def printstring_formatted(r,c,content, color, att = Ncurses::A_NORMAL)
att = get_attrib att unless att.is_a? Fixnum
chunkline = convert_to_chunk(content, color, att)
printstring_or_chunks r,c, chunkline, color, att
end # print
#
# print a formatted line right aligned
# c (col) is ignored and calculated based on width and unformatted string length
#
def printstring_formatted_right(r,c,content, color, att = Ncurses::A_NORMAL)
clean = content.gsub /#\[[^\]]*\]/,'' # clean out all markup
#c = actual_width() - clean.length # actual width not working if resize
c = getmaxx() - clean.length
printstring_formatted(r,c,content, color, att )
end
private
def get_default_color_parser
require 'canis/core/util/defaultcolorparser'
@color_parser || DefaultColorParser.new
end
# supply with a color parser, if you supplied formatted text
public
def color_parser f
$log.debug "XXX: color_parser setting in window to #{f} "
require 'canis/core/include/colorparser'
if f == :tmux
@color_parser = get_default_color_parser()
else
@color_parser = f
end
end
#
# Takes a formatted string and converts the parsed parts to chunks.
#
# @param [String] takes the entire line or string and breaks into an array of chunks
# @yield chunk if block
# @return [ChunkLine] # [Array] array of chunks
public
def convert_to_chunk s, colorp=$datacolor, att=FFI::NCurses::A_NORMAL
unless @color_parser
require 'canis/core/include/colorparser'
@color_parser = get_default_color_parser()
@converter = Chunks::ColorParser.new @color_parser
# we need to know set the parent in colorparser. 2014-05-26 - 14:49
@converter.form = self.form
end
@converter.convert_to_chunk s, colorp, att
end
##
# prints a string at row, col, with given color and attribute
# added by rk 2008-11-29 19:01
# I usually use this, not the others ones here
# @param r - row
# @param c - col
# @param string - text to print
# @param color - color pair
# @ param att - ncurses attribute: normal, bold, reverse, blink,
# underline
public
def printstring(r,c,string, color, att = Ncurses::A_NORMAL)
#$log.debug " #{@name} inside window printstring r #{r} c #{c} #{string} "
if att.nil?
att = Ncurses::A_NORMAL
else
att = get_attrib att
end
wattron(Ncurses.COLOR_PAIR(color) | att)
mvwprintw(r, c, "%s", :string, string);
wattroff(Ncurses.COLOR_PAIR(color) | att)
end
##
# prints the border for message boxes
#
# NOTE : FOR MESSAGEBOXES ONLY !!!! Then why not move to messagebox FIXME
def print_border_mb row, col, height, width, color, attr
# the next is for xterm-256
att = get_attrib attr
len = width
len = Ncurses.COLS-0 if len == 0
# print a bar across the screen
#attron(Ncurses.COLOR_PAIR(color) | att)
# this works for newmessagebox but not for old one.
# Even now in some cases some black shows through, if the widget is printing spaces
# such as field or textview on a messagebox.
(row-1).upto(row+height-1) do |r|
mvwhline(r, col, 1, len)
end
#attroff(Ncurses.COLOR_PAIR(color) | att)
mvwaddch row, col, Ncurses::ACS_ULCORNER
mvwhline( row, col+1, Ncurses::ACS_HLINE, width-6)
mvwaddch row, col+width-5, Ncurses::ACS_URCORNER
mvwvline( row+1, col, Ncurses::ACS_VLINE, height-4)
mvwaddch row+height-3, col, Ncurses::ACS_LLCORNER
mvwhline(row+height-3, col+1, Ncurses::ACS_HLINE, width-6)
mvwaddch row+height-3, col+width-5, Ncurses::ACS_LRCORNER
mvwvline( row+1, col+width-5, Ncurses::ACS_VLINE, height-4)
end
##
# prints a border around a widget, CLEARING the area.
# If calling with a pad, you would typically use 0,0, h-1, w-1.
# FIXME can this be moved to module Bordertitle ?
def print_border row, col, height, width, color, att=Ncurses::A_NORMAL
raise "height needs to be supplied." if height.nil?
raise "width needs to be supplied." if width.nil?
att ||= Ncurses::A_NORMAL
#$log.debug " inside window print_border r #{row} c #{col} h #{height} w #{width} "
# 2009-11-02 00:45 made att nil for blanking out
# FIXME - in tabbedpanes this clears one previous line ??? XXX when using a textarea/view
# when using a pad this calls pads printstring which again reduces top and left !!! 2010-01-26 23:53
ww=width-2
(row+1).upto(row+height-1) do |r|
printstring( r, col+1," "*ww , color, att)
end
print_border_only row, col, height, width, color, att
end
## print just the border, no cleanup
#+ Earlier, we would clean up. Now in some cases, i'd like
#+ to print border over what's been done.
# XXX this reduces 1 from width but not height !!! FIXME
# FIXME can this be moved to module Bordertitle ?
def print_border_only row, col, height, width, color, att=Ncurses::A_NORMAL
if att.nil?
att = Ncurses::A_NORMAL
else
att = get_attrib att
end
wattron(Ncurses.COLOR_PAIR(color) | att)
mvwaddch row, col, Ncurses::ACS_ULCORNER
mvwhline( row, col+1, Ncurses::ACS_HLINE, width-2)
mvwaddch row, col+width-1, Ncurses::ACS_URCORNER
mvwvline( row+1, col, Ncurses::ACS_VLINE, height-1)
mvwaddch row+height-0, col, Ncurses::ACS_LLCORNER
mvwhline(row+height-0, col+1, Ncurses::ACS_HLINE, width-2)
mvwaddch row+height-0, col+width-1, Ncurses::ACS_LRCORNER
mvwvline( row+1, col+width-1, Ncurses::ACS_VLINE, height-1)
wattroff(Ncurses.COLOR_PAIR(color) | att)
end
# Previously this printed a chunk as a full line, I've modified it to print on
# one line. This can be used for running text.
# NOTE 2013-03-08 - 17:02 added width so we don't overflow
# NOTE 2014-05-11 - textpad has its own version, so does not call this.
def show_colored_chunks(chunks, defcolor = nil, defattr = nil, wid = 999, pcol = 0)
return unless visible?
ww = 0
chunks.each_with_color do |text, color, attrib|
## 2013-03-08 - 19:11 take care of scrolling by means of pcol
if pcol > 0
if pcol > text.length
# ignore entire chunk and reduce pcol
pcol -= text.length
next
else
# print portion of chunk and zero pcol
text = text[pcol..-1]
pcol = 0
end
end
oldw = ww
ww += text.length
if ww > wid
# if we are exceeding the width then by howmuch
rem = wid - oldw
if rem > 0
# take only as much as we are allowed
text = text[0,rem]
else
break
end
end
color ||= defcolor
attrib ||= defattr
cc, bg = ColorMap.get_colors_for_pair color
#$log.debug "XXX: CHUNK window #{text}, cp #{color} , attrib #{attrib}. #{cc}, #{bg} "
color_set(color,nil) if color
wattron(attrib) if attrib
#print(text)
waddnstr(text.to_s, @width) # changed 2014-04-22 - 11:59 to reduce a function
wattroff(attrib) if attrib
end
end
# ----- }}}
# This used to return an Ncurses window object, and you could call methods on it
# Now it returns a FFI::NCurses.window pointer which you cannot call methods on.
# You have to pass it to FFI::NCurses.<method>
def get_window; @window; end
# returns name of window or self (mostly for debugging)
def to_s; @name || self; end
# actions to perform when window closed.
# == Example
# @window.close_command do
# if confirm("Save tasks?", :default_button => 0)
# take some actions
# end
# end
def close_command *args, &block
@close_command ||= []
@close_args ||= []
@close_command << block
@close_args << args
end
alias :command :close_command
# set a single command to confirm whether window shoud close or not
# Block should return true or false for closing or not
# == Examples
#
# @window.confirm_close_command do
# confirm "Sure you wanna quit?", :default_button => 1
# end
#
def confirm_close_command *args, &block
@confirm_close_command = block
@confirm_close_args = args
end
# Called when window close is requested by user.
# Executes confirm_close block and if it succeeds then executes close commands
# called by util/app.rb
def fire_close_handler
if @confirm_close_command
comm = @confirm_close_command
ret = comm.call(self, *@confirm_close_args)
return ret unless ret # only return if false returned
end
if @close_command
@close_command.each_with_index do |comm, ix|
comm.call(self, *@close_args[ix]) if comm
end
end
@close_command = nil
@close_args = nil
return true
end
# creates a key reader unless overridden by application which should be rare.
def create_default_key_reader
@key_reader = DefaultKeyReader.new self
end
end # window
# created on 2014-04-20 - 00:19 so that user can install own handler
#
#
# A class that reads keys and handles function, shifted function, control, alt, and other
# extended keys.
# THis essentially consists of a method getchar which will be called by the application
# to get keys in a loop. Application may also call getchar to get one key in some situations.
#
# Originally, rbcurse returned an int, but we are movign to a string, so that user can use the exact
# control codes he gets on the terminal using C-v and map them here.
#
#
class DefaultKeyReader # --- {{{
def initialize win
@window = win
@stack = []
end
# return an int for the key read. this is just a single int, and is not interpreted
# for control or function keys. it also will return -1 when no action.
# You may re-implenent it or call the original one.
#
def getch
@window.getch
end
# A map of int keycodes associated with a string name which is defined in $kh
$kh_int ||= Hash.new {|hash, key| hash[key] = key.hash }
# these 4 for xterm-color which does not send 265 on F1
$kh_int["F1"] = 265
$kh_int["F2"] = 266
$kh_int["F3"] = 267
$kh_int["F4"] = 268
# testing out shift+Function. these are the codes my kb generates
if File.exists? File.expand_path("~/ncurses-keys.yml")
# a sample of this file should be available with this
# the file is a hash or mapping, but should not contrain control characters.
# Usually delete the control character and insert a "\e" in its place.
# "\e[1;3C": C-RIGHT
require 'yaml'
$kh = YAML::load( File.open( File.expand_path("~/ncurses-keys.yml" ) ))
else
# if we could not find any mappings then use some dummy ones that work on my laptop.
$kh=Hash.new
KEY_S_F1='[1;2P'
$kh[KEY_S_F1]="S-F1"
$kh['[1;2Q']="S-F2"
$kh['[1;2R']="S-F3"
$kh['[1;2S']="S-F4"
$kh['[15;2~']="S-F5"
end
# this is for xterm-color which does not send 265 on F1
$kh['OP']="F1"
$kh['OQ']="F2"
$kh['OR']="F3"
$kh['OS']="F4"
# NOTE: This is a reworked and much simpler version of the original getchar which was taken from manveru's
# codebase. This also currently returns the keycode as int while placing the char version in a
# global $key_chr. Until we are ready to return a char, we use this.
#
# FIXME : I have tried very hard to revert to nodelay but it does not seem to have an effect when ESC is pressed.
# Somewhere, there is a delay when ESC is pressed. I not longer wish to provide the feature of pressing ESC
# and then a key to be evaluated as Meta-key. This slows down when a user just presses ESC.
#
# Read a char from the window (from user) and returns int code.
# In some cases, such as codes entered in the $kh hash, we do not yet have a keycode defined
# so we return 9999 and the user can access $key_chr.
#
# NOTE: Do not convert to string, that is doing two things. Allow user to convert if required using
# `key_tos`
def getchar
$key_chr = nil
c = nil
while true
c = self.getch
break if c != -1
end
cn = c
$key_int = c
# handle control codes 0 to 127 but not escape
if cn >= 0 && cn < 128 && cn != 27
#$key_chr = key_tos(c)
return c
end
# if escape then get into a loop and keep checking till -1 or another escape
#
if c == 27
buff=c.chr
# if there is another escape coming through then 2 keys were pressed so
# evaluate upon hitting an escape
# NOTE : i think only if ESc is followed by [ should be keep collectig
# otherwise the next char should evaluate. cases like F1 are already being sent in as high integer codes
while true
#$log.debug " #{Time.now.to_f} inside LOOP before getch "
# This getch seems to take enough time not to return a -1 for almost a second
# even if nodelay is true ??? XXX
FFI::NCurses.set_escdelay(5)
k = self.getch
#$log.debug "elapsed #{elapsed} millis inside LOOP AFTER getch #{k} (#{elapsed1})"
$log.debug "inside LOOP AFTER getch #{k} "
if k == 27
# seems like two Meta keys pressed in quick succession without chance for -1 to kick in
# but this still does not catch meta char followed by single char. M-za , it does.
if $esc_esc
if buff == 27.chr
$key_chr = "<ESC-ESC>"
return 2727
else
alert "buff is #{buff}"
end
end
$log.debug " 1251 before evaluate "
x = _evaluate_buff buff
# return ESC so it can be interpreted again.
@window.ungetch k
$key_chr = x if x
return $key_int if x
$log.warn "getchar: window.rb 1200 Found no mapping for #{buff} "
$key_chr = buff
return $key_int
#return buff # otherwise caught in loop ???
elsif k > -1
# FIXME next lne crashes if M-C-h pressed which gives 263
if k > 255
$log.warn "getchar: window.rb 1247 Found no mapping for #{buff} #{k} "
$key_int = k + 128
return $key_int
# this contains ESc followed by a high number
=begin
ka = key_tos(k)
if ka
$key_chr = "<M-" + ka[1..-1]
$key_int = k + 128
return $key_int
else
$key_chr = "UNKNOWN: Meta + #{k}"
return 9999
end
=end
end
buff += k.chr
# this is an alt/meta code. All other complex codes seem to have a [ after the escape
# so we will keep accumulating them.
# NOTE this still means that user can press Alt-[ and some letter in quick succession
# and it will accumulate rather than be interpreted as M-[.
#
if buff.length == 2 and k == 79
# this is Alt-O and can be a F key in some terms like xterm-color
elsif buff.length == 2 and k.chr != '['
x = _evaluate_buff buff
$key_chr = x
return $key_int if x
end
#$log.debug "XXX: getchar adding #{k}, #{k.chr} to buff #{buff} "
else
#$log.debug " GOT -1 in escape "
# it is -1 so evaluate
x = _evaluate_buff buff
$key_chr = x if x
return $key_int if x
$log.warn "getchar: window.rb 1256 Found no mapping for #{buff} "
$key_chr = buff
return $key_int
end
end
end
# what if keyname does not return anything
if c > 127
#$log.info "xxxgetchar: window.rb sending #{c} "
=begin
ch = FFI::NCurses::keyname(c)
# remove those ugly brackets around function keys
if ch && ch[-1]==')'
ch = ch.gsub(/[()]/,'')
end
if ch && ch.index("KEY_")
ch = ch.gsub(/KEY_/,'')
end
ch = "<#{ch}>" if ch
#return ch if ch
$key_chr = ch if ch
$key_chr = "UNKNOWN:#{c}" unless ch
$log.warn "getchar: window.rb 1234 Found no mapping for #{c} " unless ch
=end
#$key_chr = key_tos(ch)
return c
end
if c
#$key_chr = c.chr
return c
end
end
def getchar_as_char
$key_int = getchar
$key_chr = key_tos( $key_int )
return $key_chr
end
=begin
# NOTE I cannot use this since we are not ready to take a string, that is a big decision that
# requries a lot of work, and some decisions. We may bind using "<CR>" or "<C-d>" so
# maybe that's how we may need to send back
## get a character from user and return as a string
# Adapted from:
#http://stackoverflow.com/questions/174933/how-to-get-a-single-character-without-pressing-enter/8274275#8274275
# Need to take complex keys and matc against a hash.
# We cannot use the cetus example as is since here $stdin.ready? does not work and more importantly
# we have keyboard set to true so function keys and arrow keys are not returned as multiple values but as
# one int in the 255 and above range. so that must be interpreted separately.
#
# If we wait for -1 then quick M-a can get concatenated. we need to take care
# a ESC means the previous one should be evaluated and not contactenated
# FIXME = ESCESC 2727 - can't do this as will clash with Esc, M-(n).
# this is a rework of the above but returns an int so that the existing programs can keep working.
# We will store the char codes/ in a global string so user can get esp if unknown.
# UNUSED since we are still using int codes.
def getchar_as_char # -- {{{
c = nil
while true
c = self.getch
break if c != -1
end
cn = c
#return FFI::NCurses::keyname(c) if [10,13,127,0,32,8].include? c
$key_int = c
if cn >= 0 && cn < 128 && cn != 27
$key_chr = key_tos(c)
return $key_chr
end
# if escape then get into a loop and keep checking till -1 or another escape
#
if c == 27
buff=c.chr
# if there is another escape coming through then 2 keys were pressed so
# evaluate upon hitting an escape
# NOTE : i think only if ESc is followed by [ should be keep collectig
# otherwise the next char should evaluate. cases like F1 are already being sent in as high integer codes
while true
k = self.getch
if k == 27
# seems like two Meta keys pressed in quick succession without chance for -1 to kick in
# but this still does not catch meta char followed by single char. M-za
x = _evaluate_buff buff
# return ESC so it can be interpreted again.
@window.ungetch k
return x if x
$log.warn "getchar: window.rb 1200 Found no mapping for #{buff} "
return buff # otherwise caught in loop ???
elsif k > -1
buff += k.chr
# this is an alt/meta code. All other complex codes seem to have a [ after the escape
# so we will keep accumulating them.
# NOTE this still means that user can press Alt-[ and some letter in quick succession
# and it will accumulate rather than be interpreted as M-[.
#
if buff.length == 2 and k.chr != '['
x = _evaluate_buff buff
return x if x
end
#$log.debug "XXX: getchar adding #{k}, #{k.chr} to buff #{buff} "
else
# it is -1 so evaluate
x = _evaluate_buff buff
return x if x
return buff
end
end
end
# what if keyname does not return anything
if c > 127
#$log.info "xxxgetchar: window.rb sending #{c} "
ch = FFI::NCurses::keyname(c)
# remove those ugly brackets around function keys
if ch && ch[-1]==')'
ch = ch.gsub(/[()]/,'')
end
return ch if ch
$log.warn "getchar: window.rb 1234 Found no mapping for #{c} "
return c
end
return c.chr if c
end # -- }}}
=end
# Generate and return an int for a newkey which user has specified in yml file.
# We use hash, which won't allow me to derive key string
# in case loop user can do:
# when KEY_ENTER
# when 32
# when $kh_int["S-F2"]
def _get_int_for_newkey x
# FIXME put the declaration somewhere else maybe in window cons ???
y = $kh_int[x]
# when i give user the hash, he can get the string back ???
$kh_int[y] = x unless $kh_int.key? y
return y
end
# check buffer if some key mapped in global kh for this
# Otherwise if it is 2 keys then it is a Meta key
# Can return nil if no mapping
# @return [String] string code for key (since it is mostly from $kh. Also sets, $key_int
private
def _evaluate_buff buff
if buff == 27.chr
$key_int = 27
#$escend = Time.now.to_f
#elapsed = ($escend - $escstart)*1000
#$log.debug " #{elapsed} evaluated to ESC"
$key_chr = "<ESC>"
return $key_chr
end
x=$kh[buff]
if x
$key_int = 9999
$key_int = _get_int_for_newkey(x)
$key_cache[$key_int] = x unless $key_cache.key? $key_int
# FIXME currently 9999 signifies unknown key, but since this is derived from a user list
# we could have some dummy number being passed or set by user too.
return "<#{x}>"
end
#$log.debug "XXX: getchar returning with #{buff}"
if buff.size == 2
## possibly a meta/alt char
k = buff[-1]
$key_int = 128 + k.ord
return key_tos( $key_int )
end
$key_int = 99999
nil
end
end # class DefaultKeyReader -- }}}
end
|
namespace :chord do
desc 'start chord'
task :start do
servers = Hash[roles(:node).collect { |s| [s.properties.name, s] }]
on roles(:node) do |server|
preds = server.properties.preds.collect { |s| "-pred #{servers[s].properties.ip}:#{fetch(:node_port)}" }.join(' ')
succs = server.properties.succs.collect { |s| "-succ #{servers[s].properties.ip}:#{fetch(:node_port)}" }.join(' ')
execute '/sbin/start-stop-daemon',
'--start',
'--quiet',
'--oknodo',
'--make-pidfile',
"--pidfile #{current_path}/extraction/chord/tmp/chord.pid",
'--background',
"--chdir #{current_path}/extraction/chord",
'--startas /bin/bash',
"-- -c 'exec ./chord.native -bind #{server.properties.ip}:#{fetch(:node_port)} #{preds} #{succs} > log/chord.log 2>&1'"
end
end
desc 'stop chord'
task :stop do
on roles(:node) do
execute '/sbin/start-stop-daemon',
'--stop',
'--oknodo',
"--pidfile #{current_path}/extraction/chord/tmp/chord.pid"
end
end
desc 'tail chord log'
task :tail_log do
on roles(:node) do
execute 'tail',
'-n 20',
"#{shared_path}/extraction/chord/log/chord.log"
end
end
desc 'truncate chord log'
task :truncate_log do
on roles(:node) do
execute 'truncate',
'-s 0',
"#{shared_path}/extraction/chord/log/chord.log"
end
end
end
bind to 0.0.0.0
namespace :chord do
desc 'start chord'
task :start do
servers = Hash[roles(:node).collect { |s| [s.properties.name, s] }]
on roles(:node) do |server|
preds = server.properties.preds.collect { |s| "-pred #{servers[s].properties.ip}:#{fetch(:node_port)}" }.join(' ')
succs = server.properties.succs.collect { |s| "-succ #{servers[s].properties.ip}:#{fetch(:node_port)}" }.join(' ')
execute '/sbin/start-stop-daemon',
'--start',
'--quiet',
'--oknodo',
'--make-pidfile',
"--pidfile #{current_path}/extraction/chord/tmp/chord.pid",
'--background',
"--chdir #{current_path}/extraction/chord",
'--startas /bin/bash',
"-- -c 'exec ./chord.native -bind 0.0.0.0:#{fetch(:node_port)} #{preds} #{succs} > log/chord.log 2>&1'"
end
end
desc 'stop chord'
task :stop do
on roles(:node) do
execute '/sbin/start-stop-daemon',
'--stop',
'--oknodo',
"--pidfile #{current_path}/extraction/chord/tmp/chord.pid"
end
end
desc 'tail chord log'
task :tail_log do
on roles(:node) do
execute 'tail',
'-n 20',
"#{shared_path}/extraction/chord/log/chord.log"
end
end
desc 'truncate chord log'
task :truncate_log do
on roles(:node) do
execute 'truncate',
'-s 0',
"#{shared_path}/extraction/chord/log/chord.log"
end
end
end
|
namespace :load do
task :defaults do
set :runit_service_dir, -> {"#{fetch :home}/etc/service/rails-#{fetch :server_port}-#{fetch :domain}-#{fetch :application}-#{fetch :stage}"}
end
end
namespace :rails do
task :versions do
on roles :all do
within '/opt/ruby' do
execute(:ls)
end
end
end
task :setup_reverse_proxy do
on roles :all do
path = "#{fetch :home}/#{fetch :domain}/htdocs"
if test("[ -e #{path}/.htaccess ]")
info "reverse proxy configured @ #{path}/.htaccess"
next
end
htaccess = <<-EOF
RewriteEngine On
RewriteRule ^(.*)$ http://localhost:#{fetch :server_port}/$1 [P]
EOF
execute "mkdir -p #{path}"
upload! StringIO.new(htaccess), "#{path}/.htaccess"
execute "chmod +r #{path}/.htaccess"
end
end
task :setup_application_server do
on roles :all do
daemon_name = "rails-#{fetch :server_port}-#{fetch :domain}-#{fetch :application}-#{fetch :stage}"
runit_dir = "#{fetch :home}/etc/sv/#{daemon_name}"
if test("[ -e #{runit_dir} ]")
info("runit ready @ #{runit_dir}")
next
end
daemon_script = <<-EOF
#!/bin/bash -e
exec 2>&1
export HOME=#{fetch :home}
export PATH=#{fetch(:default_env)['PATH']}
source $HOME/.bashrc
source $HOME/#{fetch :domain}/etc/rubyrc
cd #{fetch :deploy_to}/current
exec bundle exec #{fetch :application_server} -p #{fetch :server_port} -e #{fetch :stage} 2>&1
EOF
log_script = <<-EOF
#!/bin/bash -e
exec svlogd -tt ./main
EOF
execute "mkdir -p #{runit_dir}"
execute "mkdir -p #{runit_dir}/log/main"
upload! StringIO.new(daemon_script), "#{runit_dir}/run"
upload! StringIO.new(log_script), "#{runit_dir}/log/run"
execute "chmod +x #{runit_dir}/run"
execute "chmod +x #{runit_dir}/log/run"
execute "ln -nfs #{runit_dir} #{fetch :runit_service_dir}"
end
end
after 'deploy:published', :setup_application_server
after :setup_application_server, :setup_reverse_proxy
end
+ add special support for passenger application server
namespace :load do
task :defaults do
set :runit_service_dir, -> {"#{fetch :home}/etc/service/rails-#{fetch :server_port}-#{fetch :domain}-#{fetch :application}-#{fetch :stage}"}
end
end
namespace :rails do
task :versions do
on roles :all do
within '/opt/ruby' do
execute(:ls)
end
end
end
task :setup_reverse_proxy do
on roles :all do
path = "#{fetch :home}/#{fetch :domain}/htdocs"
if test("[ -e #{path}/.htaccess ]")
info "reverse proxy configured @ #{path}/.htaccess"
next
end
htaccess = <<-EOF
RewriteEngine On
RewriteRule ^(.*)$ http://localhost:#{fetch :server_port}/$1 [P]
EOF
execute "mkdir -p #{path}"
upload! StringIO.new(htaccess), "#{path}/.htaccess"
execute "chmod +r #{path}/.htaccess"
end
end
task :setup_application_server do
on roles :all do
daemon_name = "rails-#{fetch :server_port}-#{fetch :domain}-#{fetch :application}-#{fetch :stage}"
runit_dir = "#{fetch :home}/etc/sv/#{daemon_name}"
if test("[ -e #{runit_dir} ]")
info("runit ready @ #{runit_dir}")
next
end
application_server = fetch(:application_server)
application_server + ' start' if application_server == 'passenger'
daemon_script = <<-EOF
#!/bin/bash -e
exec 2>&1
export HOME=#{fetch :home}
export PATH=#{fetch(:default_env)['PATH']}
source $HOME/.bashrc
source $HOME/#{fetch :domain}/etc/rubyrc
cd #{fetch :deploy_to}/current
exec bundle exec #{application_server} -p #{fetch :server_port} -e #{fetch :stage} 2>&1
EOF
log_script = <<-EOF
#!/bin/bash -e
exec svlogd -tt ./main
EOF
execute "mkdir -p #{runit_dir}"
execute "mkdir -p #{runit_dir}/log/main"
upload! StringIO.new(daemon_script), "#{runit_dir}/run"
upload! StringIO.new(log_script), "#{runit_dir}/log/run"
execute "chmod +x #{runit_dir}/run"
execute "chmod +x #{runit_dir}/log/run"
execute "ln -nfs #{runit_dir} #{fetch :runit_service_dir}"
end
end
after 'deploy:published', :setup_application_server
after :setup_application_server, :setup_reverse_proxy
end |
module Capybara
module Harness
VERSION = "0.0.12"
end
end
Bump version.
module Capybara
module Harness
VERSION = "0.0.13"
end
end
|
module Cequel
module Record
class RecordSet < SimpleDelegator
extend Forwardable
extend Cequel::Util::HashAccessors
include Enumerable
Bound = Struct.new(:value, :inclusive)
def self.default_attributes
{:scoped_key_values => [], :select_columns => []}
end
def initialize(clazz, attributes = {})
attributes = self.class.default_attributes.merge!(attributes)
@clazz, @attributes = clazz, attributes
super(clazz)
end
def all
self
end
def select(*columns)
return super if block_given?
scoped { |attributes| attributes[:select_columns].concat(columns) }
end
def limit(count)
scoped(:row_limit => count)
end
def where(column_name, value)
column = clazz.table_schema.column(column_name)
raise IllegalQuery,
"Can't scope by more than one indexed column in the same query" if scoped_indexed_column
raise ArgumentError,
"No column #{column_name} configured for #{clazz.name}" unless column
raise ArgumentError,
"Use the `at` method to restrict scope by primary key" unless column.data_column?
raise ArgumentError,
"Can't scope by non-indexed column #{column_name}" unless column.indexed?
scoped(scoped_indexed_column: {column_name => value})
end
def at(*scoped_key_values)
scoped do |attributes|
attributes[:scoped_key_values].concat(scoped_key_values)
end
end
def [](scoped_key_value)
if next_key_column
at(scoped_key_value)
else
attributes = {}
key_values = [*scoped_key_values, scoped_key_value]
clazz.key_column_names.zip(key_values) do |key_name, key_value|
attributes[key_name] = key_value
end
clazz.new_empty { @attributes = attributes }
end
end
def find(*scoped_key_values)
self[*scoped_key_values].load!
end
def /(scoped_key_value)
at(scoped_key_value)
end
def after(start_key)
scoped(lower_bound: Bound.new(start_key, false))
end
def before(end_key)
scoped(upper_bound: Bound.new(end_key, false))
end
def in(range)
scoped(
lower_bound: Bound.new(range.first, true),
upper_bound: Bound.new(range.last, !range.exclude_end?)
)
end
def from(start_key)
unless single_partition?
raise IllegalQuery,
"Can't construct exclusive range on partition key #{range_key_name}"
end
scoped(lower_bound: Bound.new(start_key, true))
end
def upto(end_key)
unless single_partition?
raise IllegalQuery,
"Can't construct exclusive range on partition key #{range_key_name}"
end
scoped(upper_bound: Bound.new(end_key, true))
end
def reverse
unless single_partition?
raise IllegalQuery,
"Can't reverse without scoping to partition key #{range_key_name}"
end
scoped(reversed: !reversed?)
end
def first(count = nil)
count ? limit(count).entries : limit(1).each.first
end
def last(count = nil)
reverse.first(count).tap do |results|
results.reverse! if count
end
end
def count
data_set.count
end
def each(&block)
find_each(&block)
end
def find_each(options = {})
return enum_for(:find_each, options) unless block_given?
find_each_row(options) { |row| yield clazz.hydrate(row) }
end
def find_each_row(options = {}, &block)
return enum_for(:find_each_row, options) unless block
find_rows_in_batches(options) { |row| row.each(&block) }
end
def find_rows_in_batches(options = {}, &block)
return find_rows_in_single_batch(options, &block) if row_limit
batch_size = options.fetch(:batch_size, 1000)
batch_record_set = base_record_set = limit(batch_size)
more_results = true
while more_results
rows = batch_record_set.find_rows_in_single_batch
yield rows if rows.any?
more_results = rows.length == batch_size
last_row = rows.last
if more_results
find_nested_batches_from(last_row, options, &block)
batch_record_set = base_record_set.next_batch_from(last_row)
end
end
end
def data_set
@data_set ||= construct_data_set
end
protected
attr_reader :attributes
hattr_reader :attributes, :select_columns, :scoped_key_values, :row_limit,
:lower_bound, :upper_bound, :scoped_indexed_column
protected :select_columns, :scoped_key_values, :row_limit, :lower_bound,
:upper_bound, :scoped_indexed_column
hattr_inquirer :attributes, :reversed
protected :reversed?
def next_batch_from(row)
reversed? ? before(row[range_key_name]) : after(row[range_key_name])
end
def find_nested_batches_from(row, options, &block)
if next_key_column
at(row[range_key_name]).
next_batch_from(row).
find_rows_in_batches(options, &block)
end
end
def find_rows_in_single_batch(options = {})
if options.key?(:batch_size)
raise ArgumentError,
"Can't pass :batch_size argument with a limit in the scope"
else
data_set.entries.tap do |batch|
yield batch if batch.any? && block_given?
end
end
end
def range_key_column
clazz.key_columns[scoped_key_values.length]
end
def range_key_name
range_key_column.name
end
def scoped_key_columns
clazz.key_columns.first(scoped_key_values.length)
end
def scoped_key_names
scoped_key_columns.map { |column| column.name }
end
def single_partition?
scoped_key_values.length >= clazz.partition_key_columns.length
end
private
attr_reader :clazz
def_delegators :clazz, :connection
private :connection
def method_missing(method, *args, &block)
clazz.with_scope(self) { super }
end
def next_key_column
clazz.key_columns[scoped_key_values.length + 1]
end
def next_key_name
next_key_column.name if next_key_column
end
def construct_data_set
data_set = connection[clazz.table_name]
data_set = data_set.limit(row_limit) if row_limit
data_set = data_set.select(*select_columns) if select_columns
if scoped_key_values
key_conditions = Hash[scoped_key_names.zip(scoped_key_values)]
data_set = data_set.where(key_conditions)
end
if lower_bound
fragment = construct_bound_fragment(lower_bound, '>')
data_set = data_set.where(fragment, lower_bound.value)
end
if upper_bound
fragment = construct_bound_fragment(upper_bound, '<')
data_set = data_set.where(fragment, upper_bound.value)
end
data_set = data_set.order(range_key_name => :desc) if reversed?
data_set = data_set.where(scoped_indexed_column) if scoped_indexed_column
data_set
end
def construct_bound_fragment(bound, base_operator)
operator = bound.inclusive ? "#{base_operator}=" : base_operator
single_partition? ?
"#{range_key_name} #{operator} ?" :
"TOKEN(#{range_key_name}) #{operator} TOKEN(?)"
end
def scoped(new_attributes = {}, &block)
attributes_copy = Marshal.load(Marshal.dump(attributes))
attributes_copy.merge!(new_attributes)
attributes_copy.tap(&block) if block
RecordSet.new(clazz, attributes_copy)
end
end
end
end
Implement RecordSet#== and #inspect
module Cequel
module Record
class RecordSet < SimpleDelegator
extend Forwardable
extend Cequel::Util::HashAccessors
include Enumerable
Bound = Struct.new(:value, :inclusive)
def self.default_attributes
{:scoped_key_values => [], :select_columns => []}
end
def initialize(clazz, attributes = {})
attributes = self.class.default_attributes.merge!(attributes)
@clazz, @attributes = clazz, attributes
super(clazz)
end
def all
self
end
def select(*columns)
return super if block_given?
scoped { |attributes| attributes[:select_columns].concat(columns) }
end
def limit(count)
scoped(:row_limit => count)
end
def where(column_name, value)
column = clazz.table_schema.column(column_name)
raise IllegalQuery,
"Can't scope by more than one indexed column in the same query" if scoped_indexed_column
raise ArgumentError,
"No column #{column_name} configured for #{clazz.name}" unless column
raise ArgumentError,
"Use the `at` method to restrict scope by primary key" unless column.data_column?
raise ArgumentError,
"Can't scope by non-indexed column #{column_name}" unless column.indexed?
scoped(scoped_indexed_column: {column_name => value})
end
def at(*scoped_key_values)
scoped do |attributes|
attributes[:scoped_key_values].concat(scoped_key_values)
end
end
def [](scoped_key_value)
if next_key_column
at(scoped_key_value)
else
attributes = {}
key_values = [*scoped_key_values, scoped_key_value]
clazz.key_column_names.zip(key_values) do |key_name, key_value|
attributes[key_name] = key_value
end
clazz.new_empty { @attributes = attributes }
end
end
def find(*scoped_key_values)
self[*scoped_key_values].load!
end
def /(scoped_key_value)
at(scoped_key_value)
end
def after(start_key)
scoped(lower_bound: Bound.new(start_key, false))
end
def before(end_key)
scoped(upper_bound: Bound.new(end_key, false))
end
def in(range)
scoped(
lower_bound: Bound.new(range.first, true),
upper_bound: Bound.new(range.last, !range.exclude_end?)
)
end
def from(start_key)
unless single_partition?
raise IllegalQuery,
"Can't construct exclusive range on partition key #{range_key_name}"
end
scoped(lower_bound: Bound.new(start_key, true))
end
def upto(end_key)
unless single_partition?
raise IllegalQuery,
"Can't construct exclusive range on partition key #{range_key_name}"
end
scoped(upper_bound: Bound.new(end_key, true))
end
def reverse
unless single_partition?
raise IllegalQuery,
"Can't reverse without scoping to partition key #{range_key_name}"
end
scoped(reversed: !reversed?)
end
def first(count = nil)
count ? limit(count).entries : limit(1).each.first
end
def last(count = nil)
reverse.first(count).tap do |results|
results.reverse! if count
end
end
def count
data_set.count
end
def each(&block)
find_each(&block)
end
def find_each(options = {})
return enum_for(:find_each, options) unless block_given?
find_each_row(options) { |row| yield clazz.hydrate(row) }
end
def find_each_row(options = {}, &block)
return enum_for(:find_each_row, options) unless block
find_rows_in_batches(options) { |row| row.each(&block) }
end
def find_rows_in_batches(options = {}, &block)
return find_rows_in_single_batch(options, &block) if row_limit
batch_size = options.fetch(:batch_size, 1000)
batch_record_set = base_record_set = limit(batch_size)
more_results = true
while more_results
rows = batch_record_set.find_rows_in_single_batch
yield rows if rows.any?
more_results = rows.length == batch_size
last_row = rows.last
if more_results
find_nested_batches_from(last_row, options, &block)
batch_record_set = base_record_set.next_batch_from(last_row)
end
end
end
def data_set
@data_set ||= construct_data_set
end
def_delegators :entries, :inspect
def ==(other)
entries == other.to_a
end
protected
attr_reader :attributes
hattr_reader :attributes, :select_columns, :scoped_key_values, :row_limit,
:lower_bound, :upper_bound, :scoped_indexed_column
protected :select_columns, :scoped_key_values, :row_limit, :lower_bound,
:upper_bound, :scoped_indexed_column
hattr_inquirer :attributes, :reversed
protected :reversed?
def next_batch_from(row)
reversed? ? before(row[range_key_name]) : after(row[range_key_name])
end
def find_nested_batches_from(row, options, &block)
if next_key_column
at(row[range_key_name]).
next_batch_from(row).
find_rows_in_batches(options, &block)
end
end
def find_rows_in_single_batch(options = {})
if options.key?(:batch_size)
raise ArgumentError,
"Can't pass :batch_size argument with a limit in the scope"
else
data_set.entries.tap do |batch|
yield batch if batch.any? && block_given?
end
end
end
def range_key_column
clazz.key_columns[scoped_key_values.length]
end
def range_key_name
range_key_column.name
end
def scoped_key_columns
clazz.key_columns.first(scoped_key_values.length)
end
def scoped_key_names
scoped_key_columns.map { |column| column.name }
end
def single_partition?
scoped_key_values.length >= clazz.partition_key_columns.length
end
private
attr_reader :clazz
def_delegators :clazz, :connection
private :connection
def method_missing(method, *args, &block)
clazz.with_scope(self) { super }
end
def next_key_column
clazz.key_columns[scoped_key_values.length + 1]
end
def next_key_name
next_key_column.name if next_key_column
end
def construct_data_set
data_set = connection[clazz.table_name]
data_set = data_set.limit(row_limit) if row_limit
data_set = data_set.select(*select_columns) if select_columns
if scoped_key_values
key_conditions = Hash[scoped_key_names.zip(scoped_key_values)]
data_set = data_set.where(key_conditions)
end
if lower_bound
fragment = construct_bound_fragment(lower_bound, '>')
data_set = data_set.where(fragment, lower_bound.value)
end
if upper_bound
fragment = construct_bound_fragment(upper_bound, '<')
data_set = data_set.where(fragment, upper_bound.value)
end
data_set = data_set.order(range_key_name => :desc) if reversed?
data_set = data_set.where(scoped_indexed_column) if scoped_indexed_column
data_set
end
def construct_bound_fragment(bound, base_operator)
operator = bound.inclusive ? "#{base_operator}=" : base_operator
single_partition? ?
"#{range_key_name} #{operator} ?" :
"TOKEN(#{range_key_name}) #{operator} TOKEN(?)"
end
def scoped(new_attributes = {}, &block)
attributes_copy = Marshal.load(Marshal.dump(attributes))
attributes_copy.merge!(new_attributes)
attributes_copy.tap(&block) if block
RecordSet.new(clazz, attributes_copy)
end
end
end
end
|
# -*- encoding : utf-8 -*-
module Cequel
module Record
#
# This module provides `created_at` and `updated_at` functionality for
# records. It does this in two ways:
#
# * If a record's primary key is a `timeuuid` with the `:auto` option set,
# the `created_at` method will return the time extracted from the primary
# key.
# * Calling the `timestamps` macro in the class definition will define the
# `updated_at` and (if necessary) `created_at` columns, and set up
# lifecycle hooks to populate them appropriately.
#
# @example Record class with timestamps
# class Blog
# key :subdomain, :text
# column :name, :text
#
# timestamps
# end
#
module Timestamps
extend ActiveSupport::Concern
module ClassMethods
protected
def key(name, type, options = {})
super
if type == :timeuuid && options[:auto]
module_eval(<<-RUBY, __FILE__, __LINE__+1)
def created_at
read_attribute(#{name.inspect}).try(:to_time)
end
RUBY
end
end
def timestamps
column :updated_at, :timestamp
if method_defined?(:created_at)
before_save :set_updated_at
else
column :created_at, :timestamp
before_create :set_created_and_updated_at
before_update :set_updated_at
end
end
end
private
def set_created_and_updated_at
now = Time.now
self.created_at = now
self.updated_at = now
end
def set_updated_at
self.updated_at = Time.now
end
end
end
end
Add more docs
# -*- encoding : utf-8 -*-
module Cequel
module Record
#
# This module provides `created_at` and `updated_at` functionality for
# records. It does this in two ways:
#
# * If a record's primary key is a `timeuuid` with the `:auto` option set,
# the `created_at` method will return the time extracted from the primary
# key.
# * Calling the `timestamps` macro in the class definition will define the
# `updated_at` and (if necessary) `created_at` columns, and set up
# lifecycle hooks to populate them appropriately.
#
# @example Record class with timestamps
# class Blog
# key :subdomain, :text
# column :name, :text
#
# timestamps
# end
#
# @since 1.3.0
#
module Timestamps
extend ActiveSupport::Concern
#
# Provides class methods for the Timestamps module
#
module ClassMethods
protected
def key(name, type, options = {})
super
if type == :timeuuid && options[:auto]
module_eval(<<-RUBY, __FILE__, __LINE__+1)
def created_at
read_attribute(#{name.inspect}).try(:to_time)
end
RUBY
end
end
def timestamps
column :updated_at, :timestamp
if method_defined?(:created_at)
before_save :set_updated_at
else
column :created_at, :timestamp
before_create :set_created_and_updated_at
before_update :set_updated_at
end
end
end
private
def set_created_and_updated_at
now = Time.now
self.created_at = now
self.updated_at = now
end
def set_updated_at
self.updated_at = Time.now
end
end
end
end
|
require "chatroid/adapter/twitter/event"
require "twitter/json_stream"
require "twitter_oauth"
require "json"
require "cgi"
class Chatroid
module Adapter
module Twitter
private
def connect
EventMachine::run do
stream.on_error &method(:on_error)
stream.each_item &method(:on_each_item)
end
end
def stream
@stream ||= ::Twitter::JSONStream.connect(
:host => host,
:path => path,
:port => 443,
:ssl => true,
:oauth => {
:consumer_key => config[:consumer_key],
:consumer_secret => config[:consumer_secret],
:access_key => config[:access_key],
:access_secret => config[:access_secret],
}
)
end
def client
@client ||= TwitterOAuth::Client.new(
:consumer_key => config[:consumer_key],
:consumer_secret => config[:consumer_secret],
:token => config[:access_key],
:secret => config[:access_secret]
)
end
def tweet(body)
client.update(body)
end
def reply(body, event)
id = event["id"]
user = event["user"]["screen_name"]
body = "@#{user} #{body}"
client.update(body, :in_reply_to_status_id => id)
end
def favorite(event)
client.favorite(event["id"])
end
def follow(event)
client.friend(event["user"]["id"])
end
def user_info
@user_info ||= client.info
end
def on_each_item(json)
event = Event.new(json, user_info["id"])
send("trigger_#{event.type}", event.params)
end
def on_error(error)
p error
exit
end
def host
config[:filter] ?
"stream.twitter.com" :
"userstream.twitter.com"
end
def path
config[:filter] ?
"/1.1/statuses/filter.json?track=#{CGI.escape(config[:filter])}" :
"/1.1/user.json"
end
end
end
end
Do not use klass::method style method access
require "chatroid/adapter/twitter/event"
require "twitter/json_stream"
require "twitter_oauth"
require "json"
require "cgi"
class Chatroid
module Adapter
module Twitter
private
def connect
EventMachine.run do
stream.on_error &method(:on_error)
stream.each_item &method(:on_each_item)
end
end
def stream
@stream ||= ::Twitter::JSONStream.connect(
:host => host,
:path => path,
:port => 443,
:ssl => true,
:oauth => {
:consumer_key => config[:consumer_key],
:consumer_secret => config[:consumer_secret],
:access_key => config[:access_key],
:access_secret => config[:access_secret],
}
)
end
def client
@client ||= TwitterOAuth::Client.new(
:consumer_key => config[:consumer_key],
:consumer_secret => config[:consumer_secret],
:token => config[:access_key],
:secret => config[:access_secret]
)
end
def tweet(body)
client.update(body)
end
def reply(body, event)
id = event["id"]
user = event["user"]["screen_name"]
body = "@#{user} #{body}"
client.update(body, :in_reply_to_status_id => id)
end
def favorite(event)
client.favorite(event["id"])
end
def follow(event)
client.friend(event["user"]["id"])
end
def user_info
@user_info ||= client.info
end
def on_each_item(json)
event = Event.new(json, user_info["id"])
send("trigger_#{event.type}", event.params)
end
def on_error(error)
p error
exit
end
def host
config[:filter] ?
"stream.twitter.com" :
"userstream.twitter.com"
end
def path
config[:filter] ?
"/1.1/statuses/filter.json?track=#{CGI.escape(config[:filter])}" :
"/1.1/user.json"
end
end
end
end
|
#
# Copyright (C) 2010-2016 dtk contributors
#
# This file is part of the dtk project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'yaml'
module DTK::Client
# Abstract class that holds classes and methods for xecuting commands by
# make calls to server and performing client side operations
class ContentGenerator
def initialize(content_dir, module_ref, version)
@content_dir = content_dir
@module_ref = module_ref
@version = version
@directory_content = nil
end
def generate_module_content
module_hash = {
'dsl_version' => '1.0.0',
'module' => "#{@module_ref.namespace}/#{@module_ref.module_name}",
'version' => @version || 'master'
}
if dependencies = ret_dependencies_hash
module_hash.merge!('dependencies' => dependencies)
end
if assemblies = ret_assemblies_hash
module_hash.merge!('assemblies' => assemblies)
end
module_hash
end
private
def get_raw_content?(file_path)
File.open(file_path).read if file_path and File.exists?(file_path)
end
def convert_file_content_to_hash(file_path)
begin
YAML.load(get_raw_content?(file_path))
rescue Exception => e
yaml_err_msg = e.message.gsub(/\(<unknown>\): /,'').capitalize
raise Error::Usage, "YAML parsing error in '#{file_path}':\n#{yaml_err_msg}"
end
end
def get_directory_content
@directory_content ||= Dir.glob("#{@content_dir}/**/*")
end
def invalidate_directory_content
@directory_content = Dir.glob("#{@content_dir}/**/*")
end
def get_assembly_files
get_directory_content.select { |f| f =~ AssemblyRegexp[:regexp] || f =~ AssemblyRegexp[:legacy_regexp] }
end
AssemblyRegexp = {
:regexp => Regexp.new("assemblies/(.*)\.dtk\.assembly\.(yml|yaml)$"),
:legacy_regexp => Regexp.new("assemblies/([^/]+)/assembly\.(yml|yaml)$")
}
def get_module_refs_file
get_directory_content.find { |f| f =~ ModuleRefsRegexp[:regexp] }
end
ModuleRefsRegexp = {
:regexp => Regexp.new("module_refs\.(yml|yaml)$")
}
def ret_assemblies_hash
assemblies = {}
get_assembly_files.each do |assembly|
content_hash = convert_file_content_to_hash(assembly)
name = content_hash['name']
assembly_content = content_hash['assembly']
workflows = ret_workflows_hash(content_hash)
assembly_content.merge!('workflows' => workflows) if workflows
assemblies.merge!(name => assembly_content)
end
assemblies.empty? ? nil : assemblies
end
def ret_workflows_hash(content_hash)
if workflows = content_hash['workflow'] || content_hash['workflows']
# this is legacy workflow
if workflow_name = workflows.delete('assembly_action')
{ workflow_name => workflows }
else
workflows
end
end
end
def ret_dependencies_hash
if file_path = get_module_refs_file
module_refs_content = convert_file_content_to_hash(file_path)
dependencies = []
if cmp_dependencies = module_refs_content['component_modules']
cmp_dependencies.each_pair do |name, namespace_h|
dependencies << "#{namespace_h['namespace']}/#{name}"
end
end
dependencies
end
end
end
end
Fix dependency parsing on dtk module install
#
# Copyright (C) 2010-2016 dtk contributors
#
# This file is part of the dtk project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'yaml'
module DTK::Client
# Abstract class that holds classes and methods for xecuting commands by
# make calls to server and performing client side operations
class ContentGenerator
def initialize(content_dir, module_ref, version)
@content_dir = content_dir
@module_ref = module_ref
@version = version
@directory_content = nil
end
def generate_module_content
module_hash = {
'dsl_version' => '1.0.0',
'module' => "#{@module_ref.namespace}/#{@module_ref.module_name}",
'version' => @version || 'master'
}
if dependencies = ret_dependencies_hash
module_hash.merge!('dependencies' => dependencies)
end
if assemblies = ret_assemblies_hash
module_hash.merge!('assemblies' => assemblies)
end
module_hash
end
private
def get_raw_content?(file_path)
File.open(file_path).read if file_path and File.exists?(file_path)
end
def convert_file_content_to_hash(file_path)
begin
YAML.load(get_raw_content?(file_path))
rescue Exception => e
yaml_err_msg = e.message.gsub(/\(<unknown>\): /,'').capitalize
raise Error::Usage, "YAML parsing error in '#{file_path}':\n#{yaml_err_msg}"
end
end
def get_directory_content
@directory_content ||= Dir.glob("#{@content_dir}/**/*")
end
def invalidate_directory_content
@directory_content = Dir.glob("#{@content_dir}/**/*")
end
def get_assembly_files
get_directory_content.select { |f| f =~ AssemblyRegexp[:regexp] || f =~ AssemblyRegexp[:legacy_regexp] }
end
AssemblyRegexp = {
:regexp => Regexp.new("assemblies/(.*)\.dtk\.assembly\.(yml|yaml)$"),
:legacy_regexp => Regexp.new("assemblies/([^/]+)/assembly\.(yml|yaml)$")
}
def get_module_refs_file
get_directory_content.find { |f| f =~ ModuleRefsRegexp[:regexp] }
end
ModuleRefsRegexp = {
:regexp => Regexp.new("module_refs\.(yml|yaml)$")
}
def ret_assemblies_hash
assemblies = {}
get_assembly_files.each do |assembly|
content_hash = convert_file_content_to_hash(assembly)
name = content_hash['name']
assembly_content = content_hash['assembly']
workflows = ret_workflows_hash(content_hash)
assembly_content.merge!('workflows' => workflows) if workflows
assemblies.merge!(name => assembly_content)
end
assemblies.empty? ? nil : assemblies
end
def ret_workflows_hash(content_hash)
if workflows = content_hash['workflow'] || content_hash['workflows']
# this is legacy workflow
if workflow_name = workflows.delete('assembly_action')
{ workflow_name => workflows }
else
workflows
end
end
end
def ret_dependencies_hash
if file_path = get_module_refs_file
module_refs_content = convert_file_content_to_hash(file_path)
dependencies = {}
if cmp_dependencies = module_refs_content['component_modules']
cmp_dependencies.each_pair do |name, namespace_h|
dependencies.merge!({ "#{namespace_h['namespace']}/#{name}" => namespace_h['version']||'master' })
end
end
dependencies
end
end
end
end
|
module Codemirror
module Rails
VERSION = '5.7'
CODEMIRROR_VERSION = '5.8'
end
end
Bump gem version for release
module Codemirror
module Rails
VERSION = '5.8'
CODEMIRROR_VERSION = '5.8'
end
end
|
module ConnectFourCli
VERSION = "0.1.0"
end
v0.1.1
module ConnectFourCli
VERSION = "0.1.1"
end
|
module ContentfulModel
VERSION = "0.1.0"
end
version bump
module ContentfulModel
VERSION = "0.1.0.1"
end
|
# Author:: Mike Evans <mike@urlgonomics.com>
# Copyright:: 2013 Urlgonomics LLC.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Couchbase::Operations
module Get
# Obtain an object stored in Couchbase by given key.
#
# @since 1.0.0
#
# @see http://couchbase.com/docs/couchbase-manual-2.0/couchbase-architecture-apis-memcached-protocol-additions.html#couchbase-architecture-apis-memcached-protocol-additions-getl
#
# @overload get(*keys, options = {})
# @param keys [String, Symbol, Array] One or several keys to fetch
# @param options [Hash] Options for operation.
# @option options [true, false] :extended (false) If set to +true+, the
# operation will return a tuple +[value, flags, cas]+, otherwise (by
# default) it returns just the value.
# @option options [Fixnum] :ttl (self.default_ttl) Expiry time for key.
# Values larger than 30*24*60*60 seconds (30 days) are interpreted as
# absolute times (from the epoch).
# @option options [true, false] :quiet (self.quiet) If set to +true+, the
# operation won't raise error for missing key, it will return +nil+.
# Otherwise it will raise error in synchronous mode. In asynchronous
# mode this option ignored.
# @option options [Symbol] :format (nil) Explicitly choose the decoder
# for this key (+:plain+, +:document+, +:marshal+). See
# {Bucket#default_format}.
# @option options [Fixnum, Boolean] :lock Lock the keys for time span.
# If this parameter is +true+ the key(s) will be locked for default
# timeout. Also you can use number to setup your own timeout in
# seconds. If it will be lower that zero or exceed the maximum, the
# server will use default value. You can determine actual default and
# maximum values calling {Bucket#stats} without arguments and
# inspecting keys "ep_getl_default_timeout" and "ep_getl_max_timeout"
# correspondingly. See overloaded hash syntax to specify custom timeout
# per each key.
# @option options [true, false] :assemble_hash (false) Assemble Hash for
# results. Hash assembled automatically if +:extended+ option is true
# or in case of "get and touch" multimple keys.
# @option options [true, false] :replica (false) Read key from replica
# node. Options +:ttl+ and +:lock+ are not compatible with +:replica+.
#
# @yieldparam ret [Result] the result of operation in asynchronous mode
# (valid attributes: +error+, +operation+, +key+, +value+, +flags+,
# +cas+).
#
# @return [Object, Array, Hash] the value(s) (or tuples in extended mode)
# associated with the key.
#
# @raise [Couchbase::Error::NotFound] if the key is missing in the
# bucket.
#
# @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect})
#
# @raise [ArgumentError] when passing the block in synchronous mode
#
# @example Get single value in quiet mode (the default)
# c.get("foo") #=> the associated value or nil
#
# @example Use alternative hash-like syntax
# c["foo"] #=> the associated value or nil
#
# @example Get single value in verbose mode
# c.get("missing-foo", :quiet => false) #=> raises Couchbase::NotFound
# c.get("missing-foo", :quiet => true) #=> returns nil
#
# @example Get and touch single value. The key won't be accessible after 10 seconds
# c.get("foo", :ttl => 10)
#
# @example Extended get
# val, flags, cas = c.get("foo", :extended => true)
#
# @example Get multiple keys
# c.get("foo", "bar", "baz") #=> [val1, val2, val3]
#
# @example Get multiple keys with assembing result into the Hash
# c.get("foo", "bar", "baz", :assemble_hash => true)
# #=> {"foo" => val1, "bar" => val2, "baz" => val3}
#
# @example Extended get multiple keys
# c.get("foo", "bar", :extended => true)
# #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]}
#
# @example Asynchronous get
# c.run do
# c.get("foo", "bar", "baz") do |res|
# ret.operation #=> :get
# ret.success? #=> true
# ret.key #=> "foo", "bar" or "baz" in separate calls
# ret.value
# ret.flags
# ret.cas
# end
# end
#
# @example Get and lock key using default timeout
# c.get("foo", :lock => true)
#
# @example Determine lock timeout parameters
# c.stats.values_at("ep_getl_default_timeout", "ep_getl_max_timeout")
# #=> [{"127.0.0.1:11210"=>"15"}, {"127.0.0.1:11210"=>"30"}]
#
# @example Get and lock key using custom timeout
# c.get("foo", :lock => 3)
#
# @example Get and lock multiple keys using custom timeout
# c.get("foo", "bar", :lock => 3)
#
# @overload get(keys, options = {})
# When the method receive hash map, it will behave like it receive list
# of keys (+keys.keys+), but also touch each key setting expiry time to
# the corresponding value. But unlike usual get this command always
# return hash map +{key => value}+ or +{key => [value, flags, cas]}+.
#
# @param keys [Hash] Map key-ttl
# @param options [Hash] Options for operation. (see options definition
# above)
#
# @return [Hash] the values (or tuples in extended mode) associated with
# the keys.
#
# @example Get and touch multiple keys
# c.get("foo" => 10, "bar" => 20) #=> {"foo" => val1, "bar" => val2}
#
# @example Extended get and touch multiple keys
# c.get({"foo" => 10, "bar" => 20}, :extended => true)
# #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]}
#
# @example Get and lock multiple keys for chosen period in seconds
# c.get("foo" => 10, "bar" => 20, :lock => true)
# #=> {"foo" => val1, "bar" => val2}
#
def get(*args, &block)
key, options = expand_get_args(args)
if async?
async_get(key, &block)
# if block_given?
# async_get(key, &Proc.new)
# else
# async_get(key)
# end
else
sync_block_error if block_given?
get_key(key, options)
end
end
def [](key, options = {})
get(key, options)
end
def get_bulk(keys, options)
results = if options[:extended]
get_bulk_extended(keys)
else
client_get_bulk(keys)
end
not_found_error(results.size != keys.size, options)
if options[:assemble_hash] || options[:extended]
results
else
ordered_multi_values(keys, results)
end
end
def async_get(key)
case key
when String, Symbol
meta = { op: :get, key: key }
future = client.asyncGet(key)
when Array
meta = { op: :get }
future = client.asyncGetBulk(key)
when Hash
# async_get_and_touch(key, options, &block)
end
register_future(future, meta, &Proc.new) if block_given?
end
private
def get_key(key, options)
case key
when String, Symbol
get_single(key, options)
when Array
get_bulk(key, options)
when Hash
get_and_touch(key, options)
end
end
def expand_get_args(args)
options = extract_options_hash(args)
key = args.size == 1 ? args.first : args
[key, options]
end
def get_single(key, options)
if options[:lock]
client_get_and_lock(key, options)
elsif options[:extended]
get_extended(key, options)
else
value = if options.key?(:ttl)
client_get_and_touch(key, options[:ttl])
else
client.get(key)
end
not_found_error(value.nil?, options)
value.nil? ? nil : value
end
end
def get_extended(key, options = {})
if options.key?(:lock)
client_get_and_lock(key, options[:lock])
end
extended = client_get_extended(key)
not_found_error(extended.nil?, options)
extended
end
def get_and_touch(key, options = {})
if key.size > 1
get_bulk_and_touch(key, options)
else
key, ttl = key.first
value = client_get_and_touch(key, ttl)
not_found_error(value.nil?)
{ key => value }
end
end
def get_bulk_and_touch(keys, options = {})
options.merge!(assemble_hash: true)
results = get_bulk(keys.keys, options)
touch(keys)
results.to_hash
end
def get_bulk_extended(keys, options = {})
{}.tap do |results|
keys.each do |key|
results[key] = get_extended(key, options)
end
end
end
def ordered_multi_values(keys, results)
keys.map { |key| results[key] }
end
def client_get_and_touch(key, ttl)
client.getAndTouch(key, ttl).getValue
end
def client_get_and_lock(key, options)
lock = options[:lock] == true ? 30 : options[:lock]
cas = client.getAndLock(key, lock)
if options[:extended]
[cas.getValue, nil, cas.getCas]
else
cas.getValue
end
end
def client_get_extended(key)
cas_value = client.gets(key)
if cas_value.nil?
nil
else
[cas_value.getValue, nil, cas_value.getCas]
end
end
def client_get_bulk(keys)
client.getBulk(keys)
rescue java.lang.ClassCastException
raise TypeError.new
end
end
end
Fixed bug with bulk gets and quiet flags
# Author:: Mike Evans <mike@urlgonomics.com>
# Copyright:: 2013 Urlgonomics LLC.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Couchbase::Operations
module Get
# Obtain an object stored in Couchbase by given key.
#
# @since 1.0.0
#
# @see http://couchbase.com/docs/couchbase-manual-2.0/couchbase-architecture-apis-memcached-protocol-additions.html#couchbase-architecture-apis-memcached-protocol-additions-getl
#
# @overload get(*keys, options = {})
# @param keys [String, Symbol, Array] One or several keys to fetch
# @param options [Hash] Options for operation.
# @option options [true, false] :extended (false) If set to +true+, the
# operation will return a tuple +[value, flags, cas]+, otherwise (by
# default) it returns just the value.
# @option options [Fixnum] :ttl (self.default_ttl) Expiry time for key.
# Values larger than 30*24*60*60 seconds (30 days) are interpreted as
# absolute times (from the epoch).
# @option options [true, false] :quiet (self.quiet) If set to +true+, the
# operation won't raise error for missing key, it will return +nil+.
# Otherwise it will raise error in synchronous mode. In asynchronous
# mode this option ignored.
# @option options [Symbol] :format (nil) Explicitly choose the decoder
# for this key (+:plain+, +:document+, +:marshal+). See
# {Bucket#default_format}.
# @option options [Fixnum, Boolean] :lock Lock the keys for time span.
# If this parameter is +true+ the key(s) will be locked for default
# timeout. Also you can use number to setup your own timeout in
# seconds. If it will be lower that zero or exceed the maximum, the
# server will use default value. You can determine actual default and
# maximum values calling {Bucket#stats} without arguments and
# inspecting keys "ep_getl_default_timeout" and "ep_getl_max_timeout"
# correspondingly. See overloaded hash syntax to specify custom timeout
# per each key.
# @option options [true, false] :assemble_hash (false) Assemble Hash for
# results. Hash assembled automatically if +:extended+ option is true
# or in case of "get and touch" multimple keys.
# @option options [true, false] :replica (false) Read key from replica
# node. Options +:ttl+ and +:lock+ are not compatible with +:replica+.
#
# @yieldparam ret [Result] the result of operation in asynchronous mode
# (valid attributes: +error+, +operation+, +key+, +value+, +flags+,
# +cas+).
#
# @return [Object, Array, Hash] the value(s) (or tuples in extended mode)
# associated with the key.
#
# @raise [Couchbase::Error::NotFound] if the key is missing in the
# bucket.
#
# @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect})
#
# @raise [ArgumentError] when passing the block in synchronous mode
#
# @example Get single value in quiet mode (the default)
# c.get("foo") #=> the associated value or nil
#
# @example Use alternative hash-like syntax
# c["foo"] #=> the associated value or nil
#
# @example Get single value in verbose mode
# c.get("missing-foo", :quiet => false) #=> raises Couchbase::NotFound
# c.get("missing-foo", :quiet => true) #=> returns nil
#
# @example Get and touch single value. The key won't be accessible after 10 seconds
# c.get("foo", :ttl => 10)
#
# @example Extended get
# val, flags, cas = c.get("foo", :extended => true)
#
# @example Get multiple keys
# c.get("foo", "bar", "baz") #=> [val1, val2, val3]
#
# @example Get multiple keys with assembing result into the Hash
# c.get("foo", "bar", "baz", :assemble_hash => true)
# #=> {"foo" => val1, "bar" => val2, "baz" => val3}
#
# @example Extended get multiple keys
# c.get("foo", "bar", :extended => true)
# #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]}
#
# @example Asynchronous get
# c.run do
# c.get("foo", "bar", "baz") do |res|
# ret.operation #=> :get
# ret.success? #=> true
# ret.key #=> "foo", "bar" or "baz" in separate calls
# ret.value
# ret.flags
# ret.cas
# end
# end
#
# @example Get and lock key using default timeout
# c.get("foo", :lock => true)
#
# @example Determine lock timeout parameters
# c.stats.values_at("ep_getl_default_timeout", "ep_getl_max_timeout")
# #=> [{"127.0.0.1:11210"=>"15"}, {"127.0.0.1:11210"=>"30"}]
#
# @example Get and lock key using custom timeout
# c.get("foo", :lock => 3)
#
# @example Get and lock multiple keys using custom timeout
# c.get("foo", "bar", :lock => 3)
#
# @overload get(keys, options = {})
# When the method receive hash map, it will behave like it receive list
# of keys (+keys.keys+), but also touch each key setting expiry time to
# the corresponding value. But unlike usual get this command always
# return hash map +{key => value}+ or +{key => [value, flags, cas]}+.
#
# @param keys [Hash] Map key-ttl
# @param options [Hash] Options for operation. (see options definition
# above)
#
# @return [Hash] the values (or tuples in extended mode) associated with
# the keys.
#
# @example Get and touch multiple keys
# c.get("foo" => 10, "bar" => 20) #=> {"foo" => val1, "bar" => val2}
#
# @example Extended get and touch multiple keys
# c.get({"foo" => 10, "bar" => 20}, :extended => true)
# #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]}
#
# @example Get and lock multiple keys for chosen period in seconds
# c.get("foo" => 10, "bar" => 20, :lock => true)
# #=> {"foo" => val1, "bar" => val2}
#
def get(*args, &block)
key, options = expand_get_args(args)
if async?
async_get(key, &block)
# if block_given?
# async_get(key, &Proc.new)
# else
# async_get(key)
# end
else
sync_block_error if block_given?
get_key(key, options)
end
end
def [](key, options = {})
get(key, options)
end
def get_bulk(keys, options)
results = if options[:extended]
get_bulk_extended(keys)
else
client_get_bulk(keys)
end
not_found_error(results.size != keys.size, options)
if options[:assemble_hash] || options[:extended]
results
else
ordered_multi_values(keys, results)
end
end
def async_get(key)
case key
when String, Symbol
meta = { op: :get, key: key }
future = client.asyncGet(key)
when Array
meta = { op: :get }
future = client.asyncGetBulk(key)
when Hash
# async_get_and_touch(key, options, &block)
end
register_future(future, meta, &Proc.new) if block_given?
end
private
def get_key(key, options)
case key
when String, Symbol
get_single(key, options)
when Array
get_bulk(key, options)
when Hash
get_and_touch(key, options)
end
end
def expand_get_args(args)
options = extract_options_hash(args)
key = args.size == 1 ? args.first : args
[key, options]
end
def get_single(key, options)
if options[:lock]
client_get_and_lock(key, options)
elsif options[:extended]
get_extended(key, options)
else
value = if options.key?(:ttl)
client_get_and_touch(key, options[:ttl])
else
client.get(key)
end
not_found_error(value.nil?, options)
value.nil? ? nil : value
end
end
def get_extended(key, options = {})
if options.key?(:lock)
client_get_and_lock(key, options[:lock])
end
extended = client_get_extended(key)
not_found_error(extended.nil?, options)
extended
end
def get_and_touch(key, options = {})
if key.size > 1
get_bulk_and_touch(key, options)
else
key, ttl = key.first
value = client_get_and_touch(key, ttl)
not_found_error(value.nil?)
{ key => value }
end
end
def get_bulk_and_touch(keys, options = {})
options.merge!(assemble_hash: true)
results = get_bulk(keys.keys, options)
touch(keys)
results.to_hash
end
def get_bulk_extended(keys, options = {})
{}.tap do |results|
keys.each do |key|
result = get_extended(key, options)
results[key] = result unless result.nil?
end
end
end
def ordered_multi_values(keys, results)
keys.map { |key| results[key] }
end
def client_get_and_touch(key, ttl)
client.getAndTouch(key, ttl).getValue
end
def client_get_and_lock(key, options)
lock = options[:lock] == true ? 30 : options[:lock]
cas = client.getAndLock(key, lock)
if options[:extended]
[cas.getValue, nil, cas.getCas]
else
cas.getValue
end
end
def client_get_extended(key)
cas_value = client.gets(key)
if cas_value.nil?
nil
else
[cas_value.getValue, nil, cas_value.getCas]
end
end
def client_get_bulk(keys)
client.getBulk(keys)
rescue java.lang.ClassCastException
raise TypeError.new
end
end
end
|
module Cucumber
module Formatter
# The formatter used for <tt>--format steps</tt>
class Steps
def initialize(step_mother, io, options)
@io = io
@options = options
@step_definition_files = collect_steps(step_mother)
end
def visit_features(features)
print_summary
end
private
def print_summary
count = 0
@step_definition_files.keys.sort.each do |step_definition_file|
@io.puts step_definition_file
sources = @step_definition_files[step_definition_file]
source_indent = source_indent(sources)
sources.sort.each do |file_colon_line, regexp|
@io.print "#{regexp}".indent(2)
@io.print " # #{file_colon_line}".indent(source_indent - regexp.size)
@io.puts
end
@io.puts
count += sources.size
end
@io.puts "#{count} step definition(s) in #{@step_definition_files.size} source file(s)."
end
def collect_steps(step_mother)
step_mother.step_definitions.inject({}) do |step_definitions, step_definition|
step_definitions[step_definition.file] ||= []
step_definitions[step_definition.file] << [ step_definition.file_colon_line, step_definition.regexp.inspect ]
step_definitions
end
end
def source_indent(sources)
sources.map { |file_colon_line, regexp| regexp.size }.max + 1
end
end
end
end
Fix steps formatter
module Cucumber
module Formatter
# The formatter used for <tt>--format steps</tt>
class Steps
def initialize(step_mother, io, options)
@io = io
@options = options
@step_definition_files = collect_steps(step_mother)
end
def after_visit_features(features)
print_summary
end
private
def print_summary
count = 0
@step_definition_files.keys.sort.each do |step_definition_file|
@io.puts step_definition_file
sources = @step_definition_files[step_definition_file]
source_indent = source_indent(sources)
sources.sort.each do |file_colon_line, regexp|
@io.print "#{regexp}".indent(2)
@io.print " # #{file_colon_line}".indent(source_indent - regexp.size)
@io.puts
end
@io.puts
count += sources.size
end
@io.puts "#{count} step definition(s) in #{@step_definition_files.size} source file(s)."
end
def collect_steps(step_mother)
step_mother.step_definitions.inject({}) do |step_definitions, step_definition|
step_definitions[step_definition.file] ||= []
step_definitions[step_definition.file] << [ step_definition.file_colon_line, step_definition.regexp.inspect ]
step_definitions
end
end
def source_indent(sources)
sources.map { |file_colon_line, regexp| regexp.size }.max + 1
end
end
end
end |
require 'rubygems'
require 'celerity'
module Culerity
class CelerityServer
def initialize(_in, _out)
@proxies = {}
@browser_options = {}
while(true)
call = eval _in.gets.to_s.strip
return if call == ["_exit_"]
unless call.nil?
begin
result = target(call.first).send call[1], *call[2..-1]
_out << "[:return, #{proxify result}]\n"
rescue => e
_out << "[:exception, \"#{e.class.name}\", #{e.message.inspect}, #{e.backtrace.inspect}]\n"
end
end
end
end
private
def configure_browser(options)
@browser_options = options
end
def browser
@browser ||= Celerity::Browser.new @browser_options || {}
end
def target(object_id)
if object_id == 'browser'
browser
elsif object_id == 'celerity'
self
else
@proxies[object_id]
end
end
def proxify(result)
if [String, TrueClass, FalseClass, Fixnum, Float, NilClass].include?(result.class)
result.inspect
else
@proxies[result.object_id] = result
"Culerity::RemoteObjectProxy.new(#{result.object_id}, @io)"
end
end
end
end
Making Culerity return actual Array objects rather than a RemoteProxy object.
Fixes the bug I had with b.select_list(:index, 0).options not showing me the original array, but only letting me get the elements one by one.
http://github.com/langalex/culerity/issues#issue/2
require 'rubygems'
require 'celerity'
module Culerity
class CelerityServer
def initialize(_in, _out)
@proxies = {}
@browser_options = {}
while(true)
call = eval _in.gets.to_s.strip
return if call == ["_exit_"]
unless call.nil?
begin
result = target(call.first).send call[1], *call[2..-1]
_out << "[:return, #{proxify result}]\n"
rescue => e
_out << "[:exception, \"#{e.class.name}\", #{e.message.inspect}, #{e.backtrace.inspect}]\n"
end
end
end
end
private
def configure_browser(options)
@browser_options = options
end
def browser
@browser ||= Celerity::Browser.new @browser_options || {}
end
def target(object_id)
if object_id == 'browser'
browser
elsif object_id == 'celerity'
self
else
@proxies[object_id]
end
end
def proxify(result)
if [String, TrueClass, FalseClass, Fixnum, Float, NilClass, Array].include?(result.class)
result.inspect
else
@proxies[result.object_id] = result
"Culerity::RemoteObjectProxy.new(#{result.object_id}, @io)"
end
end
end
end |
# frozen_string_literal: true
module DatabaseFlusher
VERSION = '0.2.1'.freeze
end
Release v0.2.2
# frozen_string_literal: true
module DatabaseFlusher
VERSION = '0.2.2'.freeze
end
|
namespace :devlifecycles do
desc "Synchronizes models with Devlifecycles"
task :sync => :environment do
puts "Using API KEY: #{Devlifecycles.api_key}"
end
end
ensure model to rake task is an ActiveRecord model
namespace :devlifecycles do
desc "Synchronizes models with Devlifecycles"
task :sync => :environment do
puts "Using API KEY: #{Devlifecycles.api_key}"
model_name = ARGV.last
model_class = Object
begin
model_class = model_name.camelize.constantize
if model_class.superclass != ActiveRecord::Base
raise "#{model_class.superclass}"
end
puts "Syncing #{model_class}"
rescue
puts "#{model_name} is not an instance of ActiveRecord::Base -- cannot sync."
end
end
end
|
module DiamondLang
class OneCommand
def self.instance
@@instance
end
def self.create(*args)
@@instance = self.new(*args)
puts @@instance.to_command.to_s.gsub(/\\?"(\w+?)\\?":/, '\1:')
end
def initialize(height=5, length=6, width=5, offset=coords(2, 2, 0), surrond=Helpers::Block.new('stained_hardened_clay', 13))
@height = height # y
@width = width # z
@length = (length / 2).floor * 2 # x
puts "WARNING: The length of your command block needs to be even. Rounding down to #{@length}." unless length.even?
@offset = offset.freeze
@corner1 = coords("~#{@offset.x}", "~#{@offset.y}", "~#{@offset.z}").freeze
@corner2 = coords("~#{@offset.x._value + @length}", "~#{@offset.y._value + @height}", "~#{@offset.z._value + @width}").freeze
@surrond = surrond
end
def startup(c)
c.gamerule(:commandBlockOutput, @output)
c.fill @corner1.to_s, @corner2.to_s, @surrond.to_s, 'hollow' if @surrond
end
def cleanup(c)
c.setblock "~ ~ ~1 command_block 0 replace {Command:fill ~ ~-1 ~-1 ~ ~ ~ air}"
c.setblock "~ ~-1 ~1 redstone_block"
c.kill e('MinecartCommandBlock').selector({r: 1})
end
def create_commands(c)
chain = CommandChain.new self
tick chain
commands = chain.commands.map do |command|
command.to_block
end
command_lines = commands.each_slice(@length - 1).each_with_index.map do |line, z|
direction = z.even? ? 5 : 4
line.map! do |c|
c.direction = direction
c
end
end
command_levels = command_lines.each_slice(@width - 1).each_with_index.map do |level, y|
level = level.map! do |line|
line.last.direction = y.even? ? 3 : 2
line
end
level.last.last.direction = 1
level = level.each_with_index.map do |line, z|
z.even? ? line : line.reverse
end
y.even? ? level : level.reverse
end
command_levels.first.first.first.type = :repeating
raise Errors::TooSmall if command_levels.to_a.length > (@height - 1)
command_levels.each_with_index do |level, y|
level.each_with_index do |line, z|
z += @width - 1 - level.length if y.odd?
line.each_with_index do |command, x|
x += @length - 1 - line.length unless y.odd? == z.odd?
c.setblock coords(
"~#{x + @corner1.x._value + 1}",
"~#{y + @corner1.y._value + 1}",
"~#{z + @corner1.z._value + 1}"
), command.to_s(:replace)
end
end
end
end
def chain
chain = CommandChain.new self
startup chain
setup chain
create_commands chain
cleanup chain
chain
end
def to_command
activator_rail = b('activator_rail').to_falling_sand
redstone_block = b('redstone_block').to_falling_sand
activator_rail.passengers.push *chain.to_minecarts
redstone_block.passengers << activator_rail
redstone_block.summon coords('~', '~1', '~')
end
def setup(c)
puts "Warning: You haven't implimented a setup function."
end
def tick(c)
puts "Warning: You haven't implimented a tick function."
end
private
def b(*args)
Helpers::Block.new(*args)
end
def e(*args)
Helpers::Entity.new(*args)
end
def s(*args)
Helpers::TargetSelector.new(*args)
end
def coords(*args)
Helpers::Coordinates.new(*args)
end
end
end
Added some nice utilities
module DiamondLang
class OneCommand
def self.instance
@@instance
end
def self.create(*args)
@@instance = self.new(*args)
puts @@instance.to_command.to_s.gsub(/\\?"(\w+?)\\?":/, '\1:')
end
def initialize(height=5, length=6, width=5, offset=coords(2, 2, 0), surrond=Helpers::Block.new('stained_hardened_clay', 13), output=false)
@output = output
@height = height # y
@width = width # z
@length = (length / 2).floor * 2 # x
puts "WARNING: The length of your command block needs to be even. Rounding down to #{@length}." unless length.even?
@offset = offset.freeze
@corner1 = coords("~#{@offset.x}", "~#{@offset.y}", "~#{@offset.z}").freeze
@corner2 = coords("~#{@offset.x._value + @length}", "~#{@offset.y._value + @height}", "~#{@offset.z._value + @width}").freeze
@surrond = surrond
end
def startup(c)
c.gamerule(:commandBlockOutput, @output)
c.fill @corner1.to_s, @corner2.to_s, @surrond.to_s, 'hollow' if @surrond
end
def cleanup(c)
c.setblock "~ ~ ~1 command_block 0 replace {Command:fill ~ ~-1 ~-1 ~ ~ ~ air}"
c.setblock "~ ~-1 ~1 redstone_block"
c.kill e('MinecartCommandBlock').selector({r: 1})
end
def create_commands(c)
chain = CommandChain.new self
tick chain
commands = chain.commands.map do |command|
command.to_block
end
command_lines = commands.each_slice(@length - 1).each_with_index.map do |line, z|
direction = z.even? ? 5 : 4
line.map! do |c|
c.direction = direction
c
end
end
command_levels = command_lines.each_slice(@width - 1).each_with_index.map do |level, y|
level = level.map! do |line|
line.last.direction = y.even? ? 3 : 2
line
end
level.last.last.direction = 1
level = level.each_with_index.map do |line, z|
z.even? ? line : line.reverse
end
y.even? ? level : level.reverse
end
command_levels.first.first.first.type = :repeating
raise Errors::TooSmall if command_levels.to_a.length > (@height - 1)
command_levels.each_with_index do |level, y|
level.each_with_index do |line, z|
z += @width - 1 - level.length if y.odd?
line.each_with_index do |command, x|
x += @length - 1 - line.length unless y.odd? == z.odd?
c.setblock coords(
"~#{x + @corner1.x._value + 1}",
"~#{y + @corner1.y._value + 1}",
"~#{z + @corner1.z._value + 1}"
), command.to_s(:replace)
end
end
end
end
def chain
chain = CommandChain.new self
startup chain
setup chain
create_commands chain
cleanup chain
chain
end
def to_command
activator_rail = b('activator_rail').to_falling_sand
redstone_block = b('redstone_block').to_falling_sand
activator_rail.passengers.push *chain.to_minecarts
redstone_block.passengers << activator_rail
redstone_block.summon coords('~', '~1', '~')
end
def setup(c)
puts "Warning: You haven't implimented a setup function."
end
def tick(c)
puts "Warning: You haven't implimented a tick function."
end
private
def b(*args)
Helpers::Block.new(*args)
end
def e(*args)
Helpers::Entity.new(*args)
end
def s(*args)
Helpers::TargetSelector.new(*args)
end
def sp
s :p
end
def s_self
s :e, {r: 0, c: 1}
end
def coords(*args)
Helpers::Coordinates.new(*args)
end
def relative
coords('~', '~', '~')
end
def colors(color)
{
white: 0,
orange: 1,
magenta: 2,
light_blue: 3,
yellow: 4,
lime: 5,
pink: 6,
grey: 7,
light_grey: 8,
cyan: 9,
purple: 10,
blue: 11,
brown: 12,
green: 13,
red: 14,
black: 15
}.freeze[color]
end
end
end
|
module Dimensions
module Rails
VERSION = '1.0.2'
end
end
Bump version
module Dimensions
module Rails
VERSION = '1.0.3'
end
end
|
require 'discordrb/events/generic'
module Discordrb::Events
# Event raised when a text message is sent to a channel
class MessageEvent
attr_reader :message
delegate :author, :channel, :content, :timestamp, to: :message
delegate :server, to: :channel
def initialize(message, bot)
@bot = bot
@message = message
end
def send_message(content)
@message.channel.send_message(content)
end
alias_method :user, :author
alias_method :text, :content
alias_method :send, :send_message
alias_method :respond, :send_message
end
# Event handler for MessageEvent
class MessageEventHandler < EventHandler
def matches?(event)
# Check for the proper event type
return false unless event.is_a? MessageEvent
[
matches_all(@attributes[:starting_with], event.content) { |a, e| e.start_with? a },
matches_all(@attributes[:ending_with], event.content) { |a, e| e.end_with? a },
matches_all(@attributes[:containing], event.content) { |a, e| e.include? a },
matches_all(@attributes[:in], event.channel) do |a, e|
if a.is_a? String
# Make sure to remove the "#" from channel names in case it was specified
a.delete('#') == e.name
elsif a.is_a? Fixnum
a == e.id
else
a == e
end
end,
matches_all(@attributes[:from], event.author) do |a, e|
if a.is_a? String
a == e.name
elsif a.is_a? Fixnum
a == e.id
else
a == e
end
end,
matches_all(@attributes[:with_text], event.content) { |a, e| e == a },
matches_all(@attributes[:after], event.timestamp) { |a, e| a > e },
matches_all(@attributes[:before], event.timestamp) { |a, e| a < e }
].reduce(true, &:&)
end
end
class MentionEvent < MessageEvent; end
class MentionEventHandler < MessageEventHandler; end
end
Add some aliases to MessageEvent attributes
require 'discordrb/events/generic'
module Discordrb::Events
# Event raised when a text message is sent to a channel
class MessageEvent
attr_reader :message
delegate :author, :channel, :content, :timestamp, to: :message
delegate :server, to: :channel
def initialize(message, bot)
@bot = bot
@message = message
end
def send_message(content)
@message.channel.send_message(content)
end
alias_method :user, :author
alias_method :text, :content
alias_method :send, :send_message
alias_method :respond, :send_message
end
# Event handler for MessageEvent
class MessageEventHandler < EventHandler
def matches?(event)
# Check for the proper event type
return false unless event.is_a? MessageEvent
[
matches_all(@attributes[:starting_with] || @attributes[:start_with], event.content) { |a, e| e.start_with? a },
matches_all(@attributes[:ending_with] || @attributes[:end_with], event.content) { |a, e| e.end_with? a },
matches_all(@attributes[:containing] || @attributes[:contains], event.content) { |a, e| e.include? a },
matches_all(@attributes[:in], event.channel) do |a, e|
if a.is_a? String
# Make sure to remove the "#" from channel names in case it was specified
a.delete('#') == e.name
elsif a.is_a? Fixnum
a == e.id
else
a == e
end
end,
matches_all(@attributes[:from], event.author) do |a, e|
if a.is_a? String
a == e.name
elsif a.is_a? Fixnum
a == e.id
else
a == e
end
end,
matches_all(@attributes[:with_text] || @attributes[:content], event.content) { |a, e| e == a },
matches_all(@attributes[:after], event.timestamp) { |a, e| a > e },
matches_all(@attributes[:before], event.timestamp) { |a, e| a < e }
].reduce(true, &:&)
end
end
class MentionEvent < MessageEvent; end
class MentionEventHandler < MessageEventHandler; end
end
|
Reference implementation of SSO from Discourse
# This class is the reference implementation of a SSO provider from Discourse.
module OpenFoodNetwork
class SingleSignOn
ACCESSORS = [:nonce, :name, :username, :email, :avatar_url, :avatar_force_update, :require_activation,
:about_me, :external_id, :return_sso_url, :admin, :moderator, :suppress_welcome_message]
FIXNUMS = []
BOOLS = [:avatar_force_update, :admin, :moderator, :require_activation, :suppress_welcome_message]
NONCE_EXPIRY_TIME = 10.minutes
attr_accessor(*ACCESSORS)
attr_accessor :sso_secret, :sso_url
def self.sso_secret
raise RuntimeError, "sso_secret not implemented on class, be sure to set it on instance"
end
def self.sso_url
raise RuntimeError, "sso_url not implemented on class, be sure to set it on instance"
end
def self.parse(payload, sso_secret = nil)
sso = new
sso.sso_secret = sso_secret if sso_secret
parsed = Rack::Utils.parse_query(payload)
if sso.sign(parsed["sso"]) != parsed["sig"]
diags = "\n\nsso: #{parsed["sso"]}\n\nsig: #{parsed["sig"]}\n\nexpected sig: #{sso.sign(parsed["sso"])}"
if parsed["sso"] =~ /[^a-zA-Z0-9=\r\n\/+]/m
raise RuntimeError, "The SSO field should be Base64 encoded, using only A-Z, a-z, 0-9, +, /, and = characters. Your input contains characters we don't understand as Base64, see http://en.wikipedia.org/wiki/Base64 #{diags}"
else
raise RuntimeError, "Bad signature for payload #{diags}"
end
end
decoded = Base64.decode64(parsed["sso"])
decoded_hash = Rack::Utils.parse_query(decoded)
ACCESSORS.each do |k|
val = decoded_hash[k.to_s]
val = val.to_i if FIXNUMS.include? k
if BOOLS.include? k
val = ["true", "false"].include?(val) ? val == "true" : nil
end
sso.send("#{k}=", val)
end
decoded_hash.each do |k,v|
# 1234567
# custom.
#
if k[0..6] == "custom."
field = k[7..-1]
sso.custom_fields[field] = v
end
end
sso
end
def sso_secret
@sso_secret || self.class.sso_secret
end
def sso_url
@sso_url || self.class.sso_url
end
def custom_fields
@custom_fields ||= {}
end
def sign(payload)
OpenSSL::HMAC.hexdigest("sha256", sso_secret, payload)
end
def to_url(base_url=nil)
base = "#{base_url || sso_url}"
"#{base}#{base.include?('?') ? '&' : '?'}#{payload}"
end
def payload
payload = Base64.encode64(unsigned_payload)
"sso=#{CGI::escape(payload)}&sig=#{sign(payload)}"
end
def unsigned_payload
payload = {}
ACCESSORS.each do |k|
next if (val = send k) == nil
payload[k] = val
end
if @custom_fields
@custom_fields.each do |k,v|
payload["custom.#{k}"] = v.to_s
end
end
Rack::Utils.build_query(payload)
end
end
end
|
module Dlibhydra
class Concept < ActiveFedora::Base
include Dlibhydra::RdfType #,Dlibhydra::Generic,Dlibhydra::SameAs,Dlibhydra::SkosLabels,Dlibhydra::DCTerms,Dlibhydra::AssignId,Dlibhydra::AssignRdfTypes
include Dlibhydra::ConceptScheme
belongs_to :concept_scheme, class_name: 'ConceptScheme', predicate: ::RDF::SKOS.inScheme
# Use only for Broader, Narrower will be added by default
#has_and_belongs_to_many :broader, class_name: 'Concept', predicate: ::RDF::SKOS.broader, inverse_of: :narrower
#has_and_belongs_to_many :narrower, class_name: 'Concept', predicate: ::RDF::SKOS.narrower, inverse_of: :broader
property :definition, predicate: ::RDF::SKOS.definition, multiple: false do |index|
index.as :stored_searchable
end
property :see_also, predicate: ::RDF::SKOS.related, multiple: true do |index|
index.as :stored_searchable
end
# there is more skos we could add ...
end
end
dlibhydra gem first commit
module Dlibhydra
class Concept < ActiveFedora::Base
include Dlibhydra::RdfType #,Dlibhydra::Generic,Dlibhydra::SameAs,Dlibhydra::SkosLabels,Dlibhydra::DCTerms,Dlibhydra::AssignId,Dlibhydra::AssignRdfTypes
belongs_to :concept_scheme, class_name: 'ConceptScheme', predicate: ::RDF::SKOS.inScheme
# Use only for Broader, Narrower will be added by default
#has_and_belongs_to_many :broader, class_name: 'Concept', predicate: ::RDF::SKOS.broader, inverse_of: :narrower
#has_and_belongs_to_many :narrower, class_name: 'Concept', predicate: ::RDF::SKOS.narrower, inverse_of: :broader
property :definition, predicate: ::RDF::SKOS.definition, multiple: false do |index|
index.as :stored_searchable
end
property :see_also, predicate: ::RDF::SKOS.related, multiple: true do |index|
index.as :stored_searchable
end
# there is more skos we could add ...
end
end
|
# encoding: UTF-8
require 'library_stdnums'
require 'uri'
require 'faraday'
require 'faraday_middleware'
module MARC
class Record
# Taken from pul-store marc.rb lib extension
# Shamelessly lifted from SolrMARC, with a few changes; no doubt there will
# be more.
@@THREE_OR_FOUR_DIGITS = /^(20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)(\d{2})\.?$/
@@FOUR_DIGIT_PATTERN_BRACES = /^\[([12]\d{3})\??\]\.?$/
@@FOUR_DIGIT_PATTERN_ONE_BRACE = /^\[(20|19|18|17|16|15|14|13|12|11|10)(\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_1 = /^l(\d{3})/
@@FOUR_DIGIT_PATTERN_OTHER_2 = /^\[(20|19|18|17|16|15|14|13|12|11|10)\](\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_3 = /^\[?(20|19|18|17|16|15|14|13|12|11|10)(\d)[^\d]\]?/
@@FOUR_DIGIT_PATTERN_OTHER_4 = /i\.e\.\,? (20|19|18|17|16|15|14|13|12|11|10)(\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_5 = /^\[?(\d{2})\-\-\??\]?/
@@BC_DATE_PATTERN = /[0-9]+ [Bb]\.?[Cc]\.?/
def best_date
date = nil
if self['260']
if self['260']['c']
field_260c = self['260']['c']
case field_260c
when @@THREE_OR_FOUR_DIGITS
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_BRACES
date = $1
when @@FOUR_DIGIT_PATTERN_ONE_BRACE
date = $1
when @@FOUR_DIGIT_PATTERN_OTHER_1
date = "1#{$1}"
when @@FOUR_DIGIT_PATTERN_OTHER_2
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_OTHER_3
date = "#{$1}#{$2}0"
when @@FOUR_DIGIT_PATTERN_OTHER_4
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_OTHER_5
date = "#{$1}00"
when @@BC_DATE_PATTERN
date = nil
end
end
end
date ||= self.date_from_008
end
def date_from_008
if self['008']
d = self['008'].value[7,4]
d = d.gsub 'u', '0' unless d == 'uuuu'
d = d.gsub ' ', '0' unless d == ' '
d if d =~ /^[0-9]{4}$/
end
end
def end_date_from_008
if self['008']
d = self['008'].value[11,4]
d = d.gsub 'u', '0' unless d == 'uuuu'
d = d.gsub ' ', '0' unless d == ' '
d if d =~ /^[0-9]{4}$/
end
end
def date_display
date = nil
if self['260']
if self['260']['c']
date = self['260']['c']
end
end
date ||= self.date_from_008
end
end
end
FALLBACK_STANDARD_NO = 'Other standard number'
def map_024_indicators_to_labels i
case i
when '0' then 'International Standard Recording Code'
when '1' then 'Universal Product Code'
when '2' then 'International Standard Music Number'
when '3' then 'International Article Number'
when '4' then 'Serial Item and Contribution Identifier'
when '7' then '$2'
else FALLBACK_STANDARD_NO
end
end
def indicator_label_246 i
case i
when '0' then 'Portion of title'
when '1' then 'Parallel title'
when '2' then 'Distinctive title'
when '3' then 'Other title'
when '4' then 'Cover title'
when '5' then 'Added title page title'
when '6' then 'Caption title'
when '7' then 'Running title'
when '8' then 'Spine title'
end
end
def subfield_specified_hash_key subfield_value, fallback
key = subfield_value.capitalize.gsub(/[[:punct:]]?$/,'')
key.empty? ? fallback : key
end
def standard_no_hash record
standard_no = {}
Traject::MarcExtractor.cached('024').collect_matching_lines(record) do |field, spec, extractor|
standard_label = map_024_indicators_to_labels(field.indicator1)
standard_number = nil
field.subfields.each do |s_field|
standard_number = s_field.value if s_field.code == 'a'
standard_label = subfield_specified_hash_key(s_field.value, FALLBACK_STANDARD_NO) if s_field.code == '2' and standard_label == '$2'
end
standard_label = FALLBACK_STANDARD_NO if standard_label == '$2'
standard_no[standard_label] ? standard_no[standard_label] << standard_number : standard_no[standard_label] = [standard_number] unless standard_number.nil?
end
standard_no
end
# Handles ISBNs, ISSNs, and OCLCs
# ISBN: 020a, 020z, 776z
# ISSN: 022a, 022l, 022y, 022z, 776x
# OCLC: 035a, 776w, 787w
# BIB: 776w, 787w (adds BIB prefix so Blacklight can detect whether to search id field)
def other_versions record
linked_nums = []
Traject::MarcExtractor.cached('020az:022alyz:035a:776wxz:787w').collect_matching_lines(record) do |field, spec, extractor|
field.subfields.each do |s_field|
linked_nums << StdNum::ISBN.normalize(s_field.value) if (field.tag == "020") or (field.tag == "776" and s_field.code == 'z')
linked_nums << StdNum::ISSN.normalize(s_field.value) if (field.tag == "022") or (field.tag == "776" and s_field.code == 'x')
linked_nums << oclc_normalize(s_field.value, prefix: true) if s_field.value.start_with?('(OCoLC)') and (field.tag == "035")
if (field.tag == "776" and s_field.code == 'w') or (field.tag == "787" and s_field.code == 'w')
linked_nums << oclc_normalize(s_field.value, prefix: true) if s_field.value.include?('(OCoLC)')
linked_nums << "BIB" + strip_non_numeric(s_field.value) unless s_field.value.include?('(')
if s_field.value.include?('(') and !s_field.value.start_with?('(')
logger.error "#{record['001']} - linked field formatting: #{s_field.value}"
end
end
end
end
linked_nums.compact.uniq
end
# only includes values before $t
def process_names record
names = []
Traject::MarcExtractor.cached('100aqbcdk:110abcdfgkln:111abcdfgklnpq:700aqbcdk:710abcdfgkln:711abcdfgklnpq').collect_matching_lines(record) do |field, spec, extractor|
name = extractor.collect_subfields(field, spec).first
unless name.nil?
remove = ''
after_t = false
field.subfields.each do |s_field|
remove << " #{s_field.value}" if after_t and spec.includes_subfield_code?(s_field.code)
after_t = true if s_field.code == 't'
end
name = name.chomp(remove)
names << Traject::Macros::Marc21.trim_punctuation(name)
end
end
names.uniq
end
##
# Get hash of authors grouped by role
# @param [MARC::Record]
# @return [Hash]
def process_author_roles record
author_roles = {
'TRL' => 'translators',
'EDT' => 'editors',
'COM' => 'compilers',
'TRANSLATOR' => 'translators',
'EDITOR' => 'editors',
'COMPILER' => 'compilers'
}
names = {}
names['secondary_authors'] = []
names['translators'] = []
names['editors'] = []
names['compilers'] = []
Traject::MarcExtractor.cached('100a:110a:111a:700a:710a:711a').collect_matching_lines(record) do |field, spec, extractor|
name = extractor.collect_subfields(field, spec).first
unless name.nil?
name = Traject::Macros::Marc21.trim_punctuation(name)
# If name is from 1xx field, it is the primary author.
if /1../.match(field.tag)
names['primary_author'] = name
else
relator = ""
field.subfields.each do |s_field|
# relator code (subfield 4)
if s_field.code == '4'
relator = s_field.value.upcase.gsub(/[[:punct:]]?$/, '')
# relator term (subfield e)
elsif s_field.code == 'e'
relator = s_field.value.upcase.gsub(/[[:punct:]]?$/, '')
end
end
# Set role from relator value.
role = author_roles[relator] || 'secondary_authors'
names[role] << name
end
end
end
names
end
##
# Process publication information for citations.
# @param [MARC::Record]
# @return [Array] pub info strings from fields 260 and 264.
def set_pub_citation(record)
pub_citation = []
Traject::MarcExtractor.cached('260:264').collect_matching_lines(record) do |field, spec, extractor|
a_pub_info = nil
b_pub_info = nil
pub_info = ""
field.subfields.each do |s_field|
a_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'a'
b_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'b'
end
# Build publication info string and add to citation array.
pub_info += a_pub_info unless a_pub_info.nil?
pub_info += ": " if !a_pub_info.nil? and !b_pub_info.nil?
pub_info += b_pub_info unless b_pub_info.nil?
pub_citation << pub_info if !pub_info.empty?
end
pub_citation
end
##
# Process publication information for creation information
# @param [MARC::Record]
# @return [Array<String>]
def set_pub_created(record)
value = []
Traject::MarcExtractor.cached('260:264').collect_matching_lines(record) do |field, spec, extractor|
a_pub_info = nil
b_pub_info = nil
c_pub_info = nil
pub_info = ""
field.subfields.each do |s_field|
a_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'a'
b_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'b'
c_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'c'
end
# Build the string
pub_info += a_pub_info unless a_pub_info.nil?
pub_info += ": " unless a_pub_info.nil? && b_pub_info.nil?
pub_info += b_pub_info unless b_pub_info.nil?
pub_info += ', ' unless pub_info.nil? || c_pub_info.nil?
pub_info += c_pub_info unless c_pub_info.nil?
# Append the terminal publication date
pub_info += record.end_date_from_008 if !c_pub_info.nil? && /\d{4}\-$/.match(c_pub_info)
value << pub_info unless pub_info.empty?
end
value
end
SEPARATOR = '—'
# for the hierarchical subject display and facet
# split with em dash along v,x,y,z
def process_subject_facet record, fields
subjects = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
subject = extractor.collect_subfields(field, spec).first
unless subject.nil?
field.subfields.each do |s_field|
if (s_field.code == 'v' || s_field.code == 'x' || s_field.code == 'y' || s_field.code == 'z')
subject = subject.gsub(" #{s_field.value}", "#{SEPARATOR}#{s_field.value}")
end
end
subject = subject.split(SEPARATOR)
subject = subject.map{ |s| Traject::Macros::Marc21.trim_punctuation(s) }.join(SEPARATOR)
subjects << subject
end
end
subjects
end
# for the split subject facet
# split with em dash along x,z
def process_subject_topic_facet record
subjects = []
Traject::MarcExtractor.cached('600|*0|abcdfklmnopqrtxz:610|*0|abfklmnoprstxz:611|*0|abcdefgklnpqstxz:630|*0|adfgklmnoprstxz:650|*0|abcxz:651|*0|axz').collect_matching_lines(record) do |field, spec, extractor|
subject = extractor.collect_subfields(field, spec).first
unless subject.nil?
field.subfields.each do |s_field|
if (s_field.code == 'x' || s_field.code == 'z')
subject = subject.gsub(" #{s_field.value}", "#{SEPARATOR}#{s_field.value}")
end
end
subject = subject.split(SEPARATOR)
subjects << subject.map { |s| Traject::Macros::Marc21.trim_punctuation(s) }
end
end
subjects.flatten
end
def strip_non_numeric num_str
num_str.gsub(/\D/, '').to_i.to_s
end
def oclc_normalize oclc, opts = {prefix: false}
oclc_num = strip_non_numeric(oclc)
if opts[:prefix] == true
case oclc_num.length
when 1..8
"ocm" + "%08d" % oclc_num
when 9
"ocn" + oclc_num
else
"on" + oclc_num
end
else
oclc_num
end
end
# Cached mapping of ARKs to Bib IDs
# Retrieves and stores paginated Solr responses containing the ARK's and BibID's
class CacheMap
attr_reader :values
# Constructor
# @param host [String] the host for the Blacklight endpoint
# @param path [String] the path for the Blacklight endpoint
# @param rows [Integer] the number of rows for each Solr response
# @param logger [IO] the logging device
def initialize(host:, path: '/catalog.json', rows: 1000000, logger: STDOUT)
@host = host
@path = path
@rows = rows
@logger = logger
@values = {}
seed!
end
# Seed the cache
# @param page [Integer] the page number at which to start the caching
def seed!(page: 1)
response = query(page: page)
return if response.empty?
pages = response.fetch('pages')
cache_page(response)
if pages.fetch('last_page?') == false
seed!(page: page + 1)
end
end
private
# Cache a page
# @param page [Hash] Solr response page
def cache_page(page)
docs = page.fetch('docs')
docs.each do |doc|
arks = doc.fetch('identifier_ssim', [])
bib_ids = doc.fetch('source_metadata_identifier_ssim', [])
ark = arks.first
bib_id = bib_ids.first
@values[ark] = bib_id
end
end
# Query the service using the endpoint
# @param [Integer] the page parameter for the query
def query(page: 1)
begin
url = URI::HTTPS.build(host: @host, path: @path, query: "q=&rows=#{@rows}&page=#{page}&f[identifier_tesim][]=ark")
http_response = Faraday.get(url)
values = JSON.parse(http_response.body)
values.fetch('response')
rescue StandardError => err
@logger.error "Failed to seed the ARK cached from the repository: #{err}"
{}
end
end
end
# Composite of CacheMaps
# Provides the ability to build a cache from multiple Solr endpoints
class CompositeCacheMap
# Constructor
# @param cache_maps [Array<CacheMap>] the CacheMap instances for each endpoint
def initialize(cache_maps:)
@cache_maps = cache_maps
end
# Seed the cache
# @param page [Integer] the page number at which to start the caching
def seed!(page: 1)
@cache_maps.each { |cache_map| cache_map.seed! }
end
# Retrieve the cached values
# @return [Hash] the values cached from the Solr response
def values
@cache_maps.map { |cache_map| cache_map.values }.reduce(&:merge)
end
end
# Retrieve the stored (or seed) the cache for the ARK's in Figgy
# @return [CacheMap]
def figgy_ark_cache
CacheMap.new(host: "figgy.princeton.edu", logger: logger)
end
# Retrieve the stored (or seed) the cache for the ARK's in Plum
# @return [CacheMap]
def plum_ark_cache
CacheMap.new(host: "plum.princeton.edu", logger: logger)
end
# Retrieve the stored (or seed) the cache for the ARK's in all repositories
# @return [CompositeCacheMap]
def ark_cache
@cache ||= CompositeCacheMap.new(cache_maps: [figgy_ark_cache, plum_ark_cache])
@cache.values
end
# Class modeling the ARK standard for URL's
# @see https://tools.ietf.org/html/draft-kunze-ark-18
class URI::ARK < URI::Generic
attr_reader :nmah, :naan, :name
# Constructs an ARK from a URL
# @param url [URI::Generic] the URL for the ARK resource
# @return [URI::ARK] the ARK
def self.parse(url: url)
build(
scheme: url.scheme,
userinfo: url.userinfo,
host: url.host,
port: url.port,
registry: url.registry,
path: url.path,
opaque: url.opaque,
query: url.query,
fragment: url.fragment
)
end
# Validates whether or not a URL is an ARK URL
# @param uri [URI::Generic] a URL
# @return [TrueClass, FalseClass]
def self.ark?(url: url)
m = /\:\/\/(.+)\/ark\:\/(.+)\/(.+)\/?/.match(url.to_s)
!!m
end
# Constructor
def initialize(*arg)
super(*arg)
extract_components!
end
private
# Extract the components from the ARK URL into member variables
def extract_components!
raise StandardError, "Invalid ARK URL using: #{self.to_s}" unless self.class.ark?(url: self)
m = /\:\/\/(.+)\/ark\:\/(.+)\/(.+)\/?/.match(self.to_s)
@nmah = m[1]
@naan = m[2]
@name = m[3]
end
end
# Class for building instances of URI::HTTPS for Orangelight URL's
class OrangelightUrlBuilder
# Constructor
# @param ark_cache [CompositeCacheMap] composite of caches for mapping ARK's to BibID's
# @param service_host [String] the host name for the Orangelight instance
# @todo Resolve the service_host default parameter properly (please @see https://github.com/pulibrary/marc_liberation/issues/313)
def initialize(ark_cache:, service_host: 'pulsearch.princeton.edu')
@ark_cache = ark_cache
@service_host = service_host
end
# Generates an Orangelight URL using an ARK
# @param ark [URI::ARK] the archival resource key
# @return URI::HTTPS the URL
def build(url:)
if url.is_a? URI::ARK
cached_bib_id = @ark_cache.fetch("ark:/#{url.naan}/#{url.name}", nil)
else
cached_bib_id = @ark_cache.fetch(url.to_s, nil)
end
return if cached_bib_id.nil?
URI::HTTPS.build(host: @service_host, path: "/catalog/#{cached_bib_id}")
end
end
# returns hash of links ($u) (key),
# anchor text ($y, $3, hostname), and additional labels ($z) (array value)
# @param [MARC::Record] the MARC record being parsed
# @return [Hash] the values used to construct the links
def electronic_access_links(record)
links = {}
holding_856s = {}
Traject::MarcExtractor.cached('856').collect_matching_lines(record) do |field, spec, extractor|
anchor_text = false
z_label = false
url_key = false
holding_id = nil
field.subfields.each do |s_field|
holding_id = s_field.value if s_field.code == '0'
# e. g. http://arks.princeton.edu/ark:/88435/7d278t10z, https://drive.google.com/open?id=0B3HwfRG3YqiNVVR4bXNvRzNwaGs
url_key = s_field.value if s_field.code == 'u'
# e. g. "Curatorial documentation"
z_label = s_field.value if s_field.code == 'z'
if s_field.code == 'y' || s_field.code == '3' || s_field.code == 'x'
if anchor_text
anchor_text << ": #{s_field.value}"
else
anchor_text = s_field.value
end
end
end
logger.error "#{record['001']} - no url in 856 field" unless url_key
url = begin
url = URI.parse(url_key)
rescue StandardError => err
logger.error "#{record['001']} - invalid URL in 856 field"
nil
end
if url
if url.host
# Default to the host for the URL for the <a> text content
anchor_text = url.host unless anchor_text
# Retrieve the ARK resource
bib_id_field = record['001']
bib_id = bib_id_field.value
# Extract the ARK from the URL (if the URL is indeed an ARK)
url = URI::ARK.parse(url: url) if URI::ARK.ark?(url: url)
# ...and attempt to build an Orangelight URL from the (cached) mappings exposed by the repositories
builder = OrangelightUrlBuilder.new(ark_cache: ark_cache)
orangelight_url = builder.build(url: url)
url_key = orangelight_url.to_s unless orangelight_url.nil?
end
# Build the URL
url_labels = [anchor_text] # anchor text is first element
url_labels << z_label if z_label # optional 2nd element if z
if holding_id.nil?
links[url_key] = url_labels
else
holding_856s[holding_id] = { url_key => url_labels }
end
end
end
links['holding_record_856s'] = holding_856s unless holding_856s == {}
links
end
def remove_parens_035 standard_no
standard_no.gsub(/^\(.*?\)/,'')
end
GENRES = [
'Bibliography',
'Biography',
'Catalogs',
'Catalogues raisonnes',
'Commentaries',
'Congresses',
'Diaries',
'Dictionaries',
'Drama',
'Encyclopedias',
'Exhibitions',
'Fiction',
'Guidebooks',
'In art',
'Indexes',
'Librettos',
'Manuscripts',
'Newspapers',
'Periodicals',
'Pictorial works',
'Poetry',
'Portraits',
'Scores',
'Songs and music',
'Sources',
'Statistics',
'Texts',
'Translations'
]
GENRE_STARTS_WITH = [
'Census',
'Maps',
'Methods',
'Parts',
'Personal narratives',
'Scores and parts',
'Study and teaching',
'Translations into '
]
# 600/610/650/651 $v, $x filtered
# 655 $a, $v, $x filtered
def process_genre_facet record
genres = []
Traject::MarcExtractor.cached('600|*0|x:610|*0|x:611|*0|x:630|*0|x:650|*0|x:651|*0|x:655|*0|x').collect_matching_lines(record) do |field, spec, extractor|
genre = extractor.collect_subfields(field, spec).first
unless genre.nil?
genre = Traject::Macros::Marc21.trim_punctuation(genre)
genres << genre if GENRES.include?(genre) || GENRE_STARTS_WITH.any? { |g| genre[g] }
end
end
Traject::MarcExtractor.cached('600|*0|v:610|*0|v:611|*0|v:630|*0|v:650|*0|v:651|*0|v:655|*0|a:655|*0|v').collect_matching_lines(record) do |field, spec, extractor|
genre = extractor.collect_subfields(field, spec).first
unless genre.nil?
genre = Traject::Macros::Marc21.trim_punctuation(genre)
if genre.match(/^\s+$/)
logger.error "#{record['001']} - Blank genre field"
else
genres << genre
end
end
end
genres.uniq
end
def everything_after_t record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
after_t = false
title = []
field.subfields.each do |s_field|
title << s_field.value if after_t
if s_field.code == 't'
title << s_field.value
after_t = true
end
end
values << Traject::Macros::Marc21.trim_punctuation(title.join(' ')) unless title.empty?
end
values
end
def everything_through_t record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
non_t = true
title = []
field.subfields.each do |s_field|
title << s_field.value
if s_field.code == 't'
non_t = false
break
end
end
values << Traject::Macros::Marc21.trim_punctuation(title.join(' ')) unless (title.empty? or non_t)
end
values
end
##
# @param record [MARC::Record]
# @param fields [String] MARC fields of interest
# @return [Array] of name-titles each in an [Array], each element [String] split by hierarchy,
# both name ($a) and title ($t) are required
def prep_name_title record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
name_title = []
author = []
non_a = true
non_t = true
field.subfields.each do |s_field|
next if (!spec.subfields.nil? && !spec.subfields.include?(s_field.code))
non_a = false if s_field.code == 'a'
non_t = false if s_field.code == 't'
if non_t
author << s_field.value
else
name_title << s_field.value
end
end
unless (non_a || non_t)
name_title.unshift(author.join(' '))
values << name_title unless name_title.empty?
end
end
values
end
# @param fields [Array] with portions of hierarchy from name-titles
# @return [Array] name-title portions of hierarchy including previous elements, author
def join_hierarchy_without_author fields
fields.collect { |h| h.collect.with_index { |v, i| Traject::Macros::Marc21.trim_punctuation(h[0..i].join(' ')) } }
end
# @param fields [Array] with portions of hierarchy from name-titles
# @return [Array] name-title portions of hierarchy including previous elements
def join_hierarchy fields
join_hierarchy_without_author(fields).map { |a| a[1..-1] }
end
# holding block json hash keyed on mfhd id including location, library, call number, shelving title,
# location note, location has, location has (current), indexes, and supplements
# pulls from mfhd 852, 866, 867, and 868
# assumes exactly 1 852 is present per mfhd (it saves the last 852 it finds)
def process_holdings record
all_holdings = {}
Traject::MarcExtractor.cached('852').collect_matching_lines(record) do |field, spec, extractor|
holding = {}
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'b'
## Location and Library aren't loading correctly with SCSB Records
holding['location'] ||= Traject::TranslationMap.new("locations", :default => "__passthrough__")[s_field.value]
holding['library'] ||= Traject::TranslationMap.new("location_display", :default => "__passthrough__")[s_field.value]
holding['location_code'] ||= s_field.value
elsif /[ckhij]/.match(s_field.code)
holding['call_number'] ||= []
holding['call_number'] << s_field.value
unless s_field.code == 'c'
holding['call_number_browse'] ||= []
holding['call_number_browse'] << s_field.value
end
elsif s_field.code == 'l'
holding['shelving_title'] ||= []
holding['shelving_title'] << s_field.value
elsif s_field.code == 't' && holding['copy_number'].nil?
holding['copy_number'] = s_field.value
elsif s_field.code == 'z'
holding['location_note'] ||= []
holding['location_note'] << s_field.value
end
end
holding['call_number'] = holding['call_number'].join(' ') if holding['call_number']
holding['call_number_browse'] = holding['call_number_browse'].join(' ') if holding['call_number_browse']
all_holdings[holding_id] = holding unless holding_id.nil?
end
Traject::MarcExtractor.cached('866az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['location_has'] ||= []
all_holdings[holding_id]['location_has'] << value.join(' ')
end
end
Traject::MarcExtractor.cached('8670az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['supplements'] ||= []
all_holdings[holding_id]['supplements'] << value.join(' ')
end
end
Traject::MarcExtractor.cached('8680az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['indexes'] ||= []
all_holdings[holding_id]['indexes'] << value.join(' ')
end
end
### Added for ReCAP records
Traject::MarcExtractor.cached('87603ahjptxz').collect_matching_lines(record) do |field, spec, extractor|
item = {}
field.subfields.each do |s_field|
if s_field.code == '0'
item[:holding_id] = s_field.value
elsif s_field.code == '3'
item[:enumeration] = s_field.value
elsif s_field.code == 'a'
item[:id] = s_field.value
elsif s_field.code == 'h'
item[:use_statement] = s_field.value
elsif s_field.code == 'j'
item[:status_at_load] = s_field.value
elsif s_field.code == 'p'
item[:barcode] = s_field.value
elsif s_field.code == 't'
item[:copy_number] = s_field.value
elsif s_field.code == 'x'
item[:cgc] = s_field.value
elsif s_field.code == 'z'
item[:collection_code] = s_field.value
end
end
if all_holdings[item[:holding_id]]["items"].nil?
all_holdings[item[:holding_id]]["items"] = [ item ]
else
all_holdings[item[:holding_id]]["items"] << item
end
end
all_holdings
end
def process_recap_notes record
item_notes = []
partner_lib = nil
Traject::MarcExtractor.cached('852').collect_matching_lines(record) do |field, spec, extractor|
field.subfields.each do |s_field|
if s_field.code == 'b'
partner_lib = s_field.value #||= Traject::TranslationMap.new("locations", :default => "__passthrough__")[s_field.value]
end
end
end
Traject::MarcExtractor.cached('87603ahjptxz').collect_matching_lines(record) do |field, spec, extractor|
col_group = ''
field.subfields.each do |s_field|
if s_field.code == 'x'
if s_field.value == 'Shared'
col_group = 'S'
elsif s_field.value == 'Private'
col_group = 'P'
else
col_group = 'O'
end
end
end
if partner_lib == 'scsbnypl'
partner_display_string = 'N'
elsif partner_lib == 'scsbcul'
partner_display_string = 'C'
end
item_notes << "#{partner_display_string} - #{col_group}"
end
item_notes
end
Resolves #323 by removing the default "url" values for URI::ARK.parse and URI::ARK.ark?
# encoding: UTF-8
require 'library_stdnums'
require 'uri'
require 'faraday'
require 'faraday_middleware'
module MARC
class Record
# Taken from pul-store marc.rb lib extension
# Shamelessly lifted from SolrMARC, with a few changes; no doubt there will
# be more.
@@THREE_OR_FOUR_DIGITS = /^(20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)(\d{2})\.?$/
@@FOUR_DIGIT_PATTERN_BRACES = /^\[([12]\d{3})\??\]\.?$/
@@FOUR_DIGIT_PATTERN_ONE_BRACE = /^\[(20|19|18|17|16|15|14|13|12|11|10)(\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_1 = /^l(\d{3})/
@@FOUR_DIGIT_PATTERN_OTHER_2 = /^\[(20|19|18|17|16|15|14|13|12|11|10)\](\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_3 = /^\[?(20|19|18|17|16|15|14|13|12|11|10)(\d)[^\d]\]?/
@@FOUR_DIGIT_PATTERN_OTHER_4 = /i\.e\.\,? (20|19|18|17|16|15|14|13|12|11|10)(\d{2})/
@@FOUR_DIGIT_PATTERN_OTHER_5 = /^\[?(\d{2})\-\-\??\]?/
@@BC_DATE_PATTERN = /[0-9]+ [Bb]\.?[Cc]\.?/
def best_date
date = nil
if self['260']
if self['260']['c']
field_260c = self['260']['c']
case field_260c
when @@THREE_OR_FOUR_DIGITS
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_BRACES
date = $1
when @@FOUR_DIGIT_PATTERN_ONE_BRACE
date = $1
when @@FOUR_DIGIT_PATTERN_OTHER_1
date = "1#{$1}"
when @@FOUR_DIGIT_PATTERN_OTHER_2
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_OTHER_3
date = "#{$1}#{$2}0"
when @@FOUR_DIGIT_PATTERN_OTHER_4
date = "#{$1}#{$2}"
when @@FOUR_DIGIT_PATTERN_OTHER_5
date = "#{$1}00"
when @@BC_DATE_PATTERN
date = nil
end
end
end
date ||= self.date_from_008
end
def date_from_008
if self['008']
d = self['008'].value[7,4]
d = d.gsub 'u', '0' unless d == 'uuuu'
d = d.gsub ' ', '0' unless d == ' '
d if d =~ /^[0-9]{4}$/
end
end
def end_date_from_008
if self['008']
d = self['008'].value[11,4]
d = d.gsub 'u', '0' unless d == 'uuuu'
d = d.gsub ' ', '0' unless d == ' '
d if d =~ /^[0-9]{4}$/
end
end
def date_display
date = nil
if self['260']
if self['260']['c']
date = self['260']['c']
end
end
date ||= self.date_from_008
end
end
end
FALLBACK_STANDARD_NO = 'Other standard number'
def map_024_indicators_to_labels i
case i
when '0' then 'International Standard Recording Code'
when '1' then 'Universal Product Code'
when '2' then 'International Standard Music Number'
when '3' then 'International Article Number'
when '4' then 'Serial Item and Contribution Identifier'
when '7' then '$2'
else FALLBACK_STANDARD_NO
end
end
def indicator_label_246 i
case i
when '0' then 'Portion of title'
when '1' then 'Parallel title'
when '2' then 'Distinctive title'
when '3' then 'Other title'
when '4' then 'Cover title'
when '5' then 'Added title page title'
when '6' then 'Caption title'
when '7' then 'Running title'
when '8' then 'Spine title'
end
end
def subfield_specified_hash_key subfield_value, fallback
key = subfield_value.capitalize.gsub(/[[:punct:]]?$/,'')
key.empty? ? fallback : key
end
def standard_no_hash record
standard_no = {}
Traject::MarcExtractor.cached('024').collect_matching_lines(record) do |field, spec, extractor|
standard_label = map_024_indicators_to_labels(field.indicator1)
standard_number = nil
field.subfields.each do |s_field|
standard_number = s_field.value if s_field.code == 'a'
standard_label = subfield_specified_hash_key(s_field.value, FALLBACK_STANDARD_NO) if s_field.code == '2' and standard_label == '$2'
end
standard_label = FALLBACK_STANDARD_NO if standard_label == '$2'
standard_no[standard_label] ? standard_no[standard_label] << standard_number : standard_no[standard_label] = [standard_number] unless standard_number.nil?
end
standard_no
end
# Handles ISBNs, ISSNs, and OCLCs
# ISBN: 020a, 020z, 776z
# ISSN: 022a, 022l, 022y, 022z, 776x
# OCLC: 035a, 776w, 787w
# BIB: 776w, 787w (adds BIB prefix so Blacklight can detect whether to search id field)
def other_versions record
linked_nums = []
Traject::MarcExtractor.cached('020az:022alyz:035a:776wxz:787w').collect_matching_lines(record) do |field, spec, extractor|
field.subfields.each do |s_field|
linked_nums << StdNum::ISBN.normalize(s_field.value) if (field.tag == "020") or (field.tag == "776" and s_field.code == 'z')
linked_nums << StdNum::ISSN.normalize(s_field.value) if (field.tag == "022") or (field.tag == "776" and s_field.code == 'x')
linked_nums << oclc_normalize(s_field.value, prefix: true) if s_field.value.start_with?('(OCoLC)') and (field.tag == "035")
if (field.tag == "776" and s_field.code == 'w') or (field.tag == "787" and s_field.code == 'w')
linked_nums << oclc_normalize(s_field.value, prefix: true) if s_field.value.include?('(OCoLC)')
linked_nums << "BIB" + strip_non_numeric(s_field.value) unless s_field.value.include?('(')
if s_field.value.include?('(') and !s_field.value.start_with?('(')
logger.error "#{record['001']} - linked field formatting: #{s_field.value}"
end
end
end
end
linked_nums.compact.uniq
end
# only includes values before $t
def process_names record
names = []
Traject::MarcExtractor.cached('100aqbcdk:110abcdfgkln:111abcdfgklnpq:700aqbcdk:710abcdfgkln:711abcdfgklnpq').collect_matching_lines(record) do |field, spec, extractor|
name = extractor.collect_subfields(field, spec).first
unless name.nil?
remove = ''
after_t = false
field.subfields.each do |s_field|
remove << " #{s_field.value}" if after_t and spec.includes_subfield_code?(s_field.code)
after_t = true if s_field.code == 't'
end
name = name.chomp(remove)
names << Traject::Macros::Marc21.trim_punctuation(name)
end
end
names.uniq
end
##
# Get hash of authors grouped by role
# @param [MARC::Record]
# @return [Hash]
def process_author_roles record
author_roles = {
'TRL' => 'translators',
'EDT' => 'editors',
'COM' => 'compilers',
'TRANSLATOR' => 'translators',
'EDITOR' => 'editors',
'COMPILER' => 'compilers'
}
names = {}
names['secondary_authors'] = []
names['translators'] = []
names['editors'] = []
names['compilers'] = []
Traject::MarcExtractor.cached('100a:110a:111a:700a:710a:711a').collect_matching_lines(record) do |field, spec, extractor|
name = extractor.collect_subfields(field, spec).first
unless name.nil?
name = Traject::Macros::Marc21.trim_punctuation(name)
# If name is from 1xx field, it is the primary author.
if /1../.match(field.tag)
names['primary_author'] = name
else
relator = ""
field.subfields.each do |s_field|
# relator code (subfield 4)
if s_field.code == '4'
relator = s_field.value.upcase.gsub(/[[:punct:]]?$/, '')
# relator term (subfield e)
elsif s_field.code == 'e'
relator = s_field.value.upcase.gsub(/[[:punct:]]?$/, '')
end
end
# Set role from relator value.
role = author_roles[relator] || 'secondary_authors'
names[role] << name
end
end
end
names
end
##
# Process publication information for citations.
# @param [MARC::Record]
# @return [Array] pub info strings from fields 260 and 264.
def set_pub_citation(record)
pub_citation = []
Traject::MarcExtractor.cached('260:264').collect_matching_lines(record) do |field, spec, extractor|
a_pub_info = nil
b_pub_info = nil
pub_info = ""
field.subfields.each do |s_field|
a_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'a'
b_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'b'
end
# Build publication info string and add to citation array.
pub_info += a_pub_info unless a_pub_info.nil?
pub_info += ": " if !a_pub_info.nil? and !b_pub_info.nil?
pub_info += b_pub_info unless b_pub_info.nil?
pub_citation << pub_info if !pub_info.empty?
end
pub_citation
end
##
# Process publication information for creation information
# @param [MARC::Record]
# @return [Array<String>]
def set_pub_created(record)
value = []
Traject::MarcExtractor.cached('260:264').collect_matching_lines(record) do |field, spec, extractor|
a_pub_info = nil
b_pub_info = nil
c_pub_info = nil
pub_info = ""
field.subfields.each do |s_field|
a_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'a'
b_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'b'
c_pub_info = Traject::Macros::Marc21.trim_punctuation(s_field.value).strip if s_field.code == 'c'
end
# Build the string
pub_info += a_pub_info unless a_pub_info.nil?
pub_info += ": " unless a_pub_info.nil? && b_pub_info.nil?
pub_info += b_pub_info unless b_pub_info.nil?
pub_info += ', ' unless pub_info.nil? || c_pub_info.nil?
pub_info += c_pub_info unless c_pub_info.nil?
# Append the terminal publication date
pub_info += record.end_date_from_008 if !c_pub_info.nil? && /\d{4}\-$/.match(c_pub_info)
value << pub_info unless pub_info.empty?
end
value
end
SEPARATOR = '—'
# for the hierarchical subject display and facet
# split with em dash along v,x,y,z
def process_subject_facet record, fields
subjects = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
subject = extractor.collect_subfields(field, spec).first
unless subject.nil?
field.subfields.each do |s_field|
if (s_field.code == 'v' || s_field.code == 'x' || s_field.code == 'y' || s_field.code == 'z')
subject = subject.gsub(" #{s_field.value}", "#{SEPARATOR}#{s_field.value}")
end
end
subject = subject.split(SEPARATOR)
subject = subject.map{ |s| Traject::Macros::Marc21.trim_punctuation(s) }.join(SEPARATOR)
subjects << subject
end
end
subjects
end
# for the split subject facet
# split with em dash along x,z
def process_subject_topic_facet record
subjects = []
Traject::MarcExtractor.cached('600|*0|abcdfklmnopqrtxz:610|*0|abfklmnoprstxz:611|*0|abcdefgklnpqstxz:630|*0|adfgklmnoprstxz:650|*0|abcxz:651|*0|axz').collect_matching_lines(record) do |field, spec, extractor|
subject = extractor.collect_subfields(field, spec).first
unless subject.nil?
field.subfields.each do |s_field|
if (s_field.code == 'x' || s_field.code == 'z')
subject = subject.gsub(" #{s_field.value}", "#{SEPARATOR}#{s_field.value}")
end
end
subject = subject.split(SEPARATOR)
subjects << subject.map { |s| Traject::Macros::Marc21.trim_punctuation(s) }
end
end
subjects.flatten
end
def strip_non_numeric num_str
num_str.gsub(/\D/, '').to_i.to_s
end
def oclc_normalize oclc, opts = {prefix: false}
oclc_num = strip_non_numeric(oclc)
if opts[:prefix] == true
case oclc_num.length
when 1..8
"ocm" + "%08d" % oclc_num
when 9
"ocn" + oclc_num
else
"on" + oclc_num
end
else
oclc_num
end
end
# Cached mapping of ARKs to Bib IDs
# Retrieves and stores paginated Solr responses containing the ARK's and BibID's
class CacheMap
attr_reader :values
# Constructor
# @param host [String] the host for the Blacklight endpoint
# @param path [String] the path for the Blacklight endpoint
# @param rows [Integer] the number of rows for each Solr response
# @param logger [IO] the logging device
def initialize(host:, path: '/catalog.json', rows: 1000000, logger: STDOUT)
@host = host
@path = path
@rows = rows
@logger = logger
@values = {}
seed!
end
# Seed the cache
# @param page [Integer] the page number at which to start the caching
def seed!(page: 1)
response = query(page: page)
return if response.empty?
pages = response.fetch('pages')
cache_page(response)
if pages.fetch('last_page?') == false
seed!(page: page + 1)
end
end
private
# Cache a page
# @param page [Hash] Solr response page
def cache_page(page)
docs = page.fetch('docs')
docs.each do |doc|
arks = doc.fetch('identifier_ssim', [])
bib_ids = doc.fetch('source_metadata_identifier_ssim', [])
ark = arks.first
bib_id = bib_ids.first
@values[ark] = bib_id
end
end
# Query the service using the endpoint
# @param [Integer] the page parameter for the query
def query(page: 1)
begin
url = URI::HTTPS.build(host: @host, path: @path, query: "q=&rows=#{@rows}&page=#{page}&f[identifier_tesim][]=ark")
http_response = Faraday.get(url)
values = JSON.parse(http_response.body)
values.fetch('response')
rescue StandardError => err
@logger.error "Failed to seed the ARK cached from the repository: #{err}"
{}
end
end
end
# Composite of CacheMaps
# Provides the ability to build a cache from multiple Solr endpoints
class CompositeCacheMap
# Constructor
# @param cache_maps [Array<CacheMap>] the CacheMap instances for each endpoint
def initialize(cache_maps:)
@cache_maps = cache_maps
end
# Seed the cache
# @param page [Integer] the page number at which to start the caching
def seed!(page: 1)
@cache_maps.each { |cache_map| cache_map.seed! }
end
# Retrieve the cached values
# @return [Hash] the values cached from the Solr response
def values
@cache_maps.map { |cache_map| cache_map.values }.reduce(&:merge)
end
end
# Retrieve the stored (or seed) the cache for the ARK's in Figgy
# @return [CacheMap]
def figgy_ark_cache
CacheMap.new(host: "figgy.princeton.edu", logger: logger)
end
# Retrieve the stored (or seed) the cache for the ARK's in Plum
# @return [CacheMap]
def plum_ark_cache
CacheMap.new(host: "plum.princeton.edu", logger: logger)
end
# Retrieve the stored (or seed) the cache for the ARK's in all repositories
# @return [CompositeCacheMap]
def ark_cache
@cache ||= CompositeCacheMap.new(cache_maps: [figgy_ark_cache, plum_ark_cache])
@cache.values
end
# Class modeling the ARK standard for URL's
# @see https://tools.ietf.org/html/draft-kunze-ark-18
class URI::ARK < URI::Generic
attr_reader :nmah, :naan, :name
# Constructs an ARK from a URL
# @param url [URI::Generic] the URL for the ARK resource
# @return [URI::ARK] the ARK
def self.parse(url:)
build(
scheme: url.scheme,
userinfo: url.userinfo,
host: url.host,
port: url.port,
registry: url.registry,
path: url.path,
opaque: url.opaque,
query: url.query,
fragment: url.fragment
)
end
# Validates whether or not a URL is an ARK URL
# @param uri [URI::Generic] a URL
# @return [TrueClass, FalseClass]
def self.ark?(url:)
m = /\:\/\/(.+)\/ark\:\/(.+)\/(.+)\/?/.match(url.to_s)
!!m
end
# Constructor
def initialize(*arg)
super(*arg)
extract_components!
end
private
# Extract the components from the ARK URL into member variables
def extract_components!
raise StandardError, "Invalid ARK URL using: #{self.to_s}" unless self.class.ark?(url: self)
m = /\:\/\/(.+)\/ark\:\/(.+)\/(.+)\/?/.match(self.to_s)
@nmah = m[1]
@naan = m[2]
@name = m[3]
end
end
# Class for building instances of URI::HTTPS for Orangelight URL's
class OrangelightUrlBuilder
# Constructor
# @param ark_cache [CompositeCacheMap] composite of caches for mapping ARK's to BibID's
# @param service_host [String] the host name for the Orangelight instance
# @todo Resolve the service_host default parameter properly (please @see https://github.com/pulibrary/marc_liberation/issues/313)
def initialize(ark_cache:, service_host: 'pulsearch.princeton.edu')
@ark_cache = ark_cache
@service_host = service_host
end
# Generates an Orangelight URL using an ARK
# @param ark [URI::ARK] the archival resource key
# @return URI::HTTPS the URL
def build(url:)
if url.is_a? URI::ARK
cached_bib_id = @ark_cache.fetch("ark:/#{url.naan}/#{url.name}", nil)
else
cached_bib_id = @ark_cache.fetch(url.to_s, nil)
end
return if cached_bib_id.nil?
URI::HTTPS.build(host: @service_host, path: "/catalog/#{cached_bib_id}")
end
end
# returns hash of links ($u) (key),
# anchor text ($y, $3, hostname), and additional labels ($z) (array value)
# @param [MARC::Record] the MARC record being parsed
# @return [Hash] the values used to construct the links
def electronic_access_links(record)
links = {}
holding_856s = {}
Traject::MarcExtractor.cached('856').collect_matching_lines(record) do |field, spec, extractor|
anchor_text = false
z_label = false
url_key = false
holding_id = nil
field.subfields.each do |s_field|
holding_id = s_field.value if s_field.code == '0'
# e. g. http://arks.princeton.edu/ark:/88435/7d278t10z, https://drive.google.com/open?id=0B3HwfRG3YqiNVVR4bXNvRzNwaGs
url_key = s_field.value if s_field.code == 'u'
# e. g. "Curatorial documentation"
z_label = s_field.value if s_field.code == 'z'
if s_field.code == 'y' || s_field.code == '3' || s_field.code == 'x'
if anchor_text
anchor_text << ": #{s_field.value}"
else
anchor_text = s_field.value
end
end
end
logger.error "#{record['001']} - no url in 856 field" unless url_key
url = begin
url = URI.parse(url_key)
rescue StandardError => err
logger.error "#{record['001']} - invalid URL in 856 field"
nil
end
if url
if url.host
# Default to the host for the URL for the <a> text content
anchor_text = url.host unless anchor_text
# Retrieve the ARK resource
bib_id_field = record['001']
bib_id = bib_id_field.value
# Extract the ARK from the URL (if the URL is indeed an ARK)
url = URI::ARK.parse(url: url) if URI::ARK.ark?(url: url)
# ...and attempt to build an Orangelight URL from the (cached) mappings exposed by the repositories
builder = OrangelightUrlBuilder.new(ark_cache: ark_cache)
orangelight_url = builder.build(url: url)
url_key = orangelight_url.to_s unless orangelight_url.nil?
end
# Build the URL
url_labels = [anchor_text] # anchor text is first element
url_labels << z_label if z_label # optional 2nd element if z
if holding_id.nil?
links[url_key] = url_labels
else
holding_856s[holding_id] = { url_key => url_labels }
end
end
end
links['holding_record_856s'] = holding_856s unless holding_856s == {}
links
end
def remove_parens_035 standard_no
standard_no.gsub(/^\(.*?\)/,'')
end
GENRES = [
'Bibliography',
'Biography',
'Catalogs',
'Catalogues raisonnes',
'Commentaries',
'Congresses',
'Diaries',
'Dictionaries',
'Drama',
'Encyclopedias',
'Exhibitions',
'Fiction',
'Guidebooks',
'In art',
'Indexes',
'Librettos',
'Manuscripts',
'Newspapers',
'Periodicals',
'Pictorial works',
'Poetry',
'Portraits',
'Scores',
'Songs and music',
'Sources',
'Statistics',
'Texts',
'Translations'
]
GENRE_STARTS_WITH = [
'Census',
'Maps',
'Methods',
'Parts',
'Personal narratives',
'Scores and parts',
'Study and teaching',
'Translations into '
]
# 600/610/650/651 $v, $x filtered
# 655 $a, $v, $x filtered
def process_genre_facet record
genres = []
Traject::MarcExtractor.cached('600|*0|x:610|*0|x:611|*0|x:630|*0|x:650|*0|x:651|*0|x:655|*0|x').collect_matching_lines(record) do |field, spec, extractor|
genre = extractor.collect_subfields(field, spec).first
unless genre.nil?
genre = Traject::Macros::Marc21.trim_punctuation(genre)
genres << genre if GENRES.include?(genre) || GENRE_STARTS_WITH.any? { |g| genre[g] }
end
end
Traject::MarcExtractor.cached('600|*0|v:610|*0|v:611|*0|v:630|*0|v:650|*0|v:651|*0|v:655|*0|a:655|*0|v').collect_matching_lines(record) do |field, spec, extractor|
genre = extractor.collect_subfields(field, spec).first
unless genre.nil?
genre = Traject::Macros::Marc21.trim_punctuation(genre)
if genre.match(/^\s+$/)
logger.error "#{record['001']} - Blank genre field"
else
genres << genre
end
end
end
genres.uniq
end
def everything_after_t record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
after_t = false
title = []
field.subfields.each do |s_field|
title << s_field.value if after_t
if s_field.code == 't'
title << s_field.value
after_t = true
end
end
values << Traject::Macros::Marc21.trim_punctuation(title.join(' ')) unless title.empty?
end
values
end
def everything_through_t record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
non_t = true
title = []
field.subfields.each do |s_field|
title << s_field.value
if s_field.code == 't'
non_t = false
break
end
end
values << Traject::Macros::Marc21.trim_punctuation(title.join(' ')) unless (title.empty? or non_t)
end
values
end
##
# @param record [MARC::Record]
# @param fields [String] MARC fields of interest
# @return [Array] of name-titles each in an [Array], each element [String] split by hierarchy,
# both name ($a) and title ($t) are required
def prep_name_title record, fields
values = []
Traject::MarcExtractor.cached(fields).collect_matching_lines(record) do |field, spec, extractor|
name_title = []
author = []
non_a = true
non_t = true
field.subfields.each do |s_field|
next if (!spec.subfields.nil? && !spec.subfields.include?(s_field.code))
non_a = false if s_field.code == 'a'
non_t = false if s_field.code == 't'
if non_t
author << s_field.value
else
name_title << s_field.value
end
end
unless (non_a || non_t)
name_title.unshift(author.join(' '))
values << name_title unless name_title.empty?
end
end
values
end
# @param fields [Array] with portions of hierarchy from name-titles
# @return [Array] name-title portions of hierarchy including previous elements, author
def join_hierarchy_without_author fields
fields.collect { |h| h.collect.with_index { |v, i| Traject::Macros::Marc21.trim_punctuation(h[0..i].join(' ')) } }
end
# @param fields [Array] with portions of hierarchy from name-titles
# @return [Array] name-title portions of hierarchy including previous elements
def join_hierarchy fields
join_hierarchy_without_author(fields).map { |a| a[1..-1] }
end
# holding block json hash keyed on mfhd id including location, library, call number, shelving title,
# location note, location has, location has (current), indexes, and supplements
# pulls from mfhd 852, 866, 867, and 868
# assumes exactly 1 852 is present per mfhd (it saves the last 852 it finds)
def process_holdings record
all_holdings = {}
Traject::MarcExtractor.cached('852').collect_matching_lines(record) do |field, spec, extractor|
holding = {}
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'b'
## Location and Library aren't loading correctly with SCSB Records
holding['location'] ||= Traject::TranslationMap.new("locations", :default => "__passthrough__")[s_field.value]
holding['library'] ||= Traject::TranslationMap.new("location_display", :default => "__passthrough__")[s_field.value]
holding['location_code'] ||= s_field.value
elsif /[ckhij]/.match(s_field.code)
holding['call_number'] ||= []
holding['call_number'] << s_field.value
unless s_field.code == 'c'
holding['call_number_browse'] ||= []
holding['call_number_browse'] << s_field.value
end
elsif s_field.code == 'l'
holding['shelving_title'] ||= []
holding['shelving_title'] << s_field.value
elsif s_field.code == 't' && holding['copy_number'].nil?
holding['copy_number'] = s_field.value
elsif s_field.code == 'z'
holding['location_note'] ||= []
holding['location_note'] << s_field.value
end
end
holding['call_number'] = holding['call_number'].join(' ') if holding['call_number']
holding['call_number_browse'] = holding['call_number_browse'].join(' ') if holding['call_number_browse']
all_holdings[holding_id] = holding unless holding_id.nil?
end
Traject::MarcExtractor.cached('866az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['location_has'] ||= []
all_holdings[holding_id]['location_has'] << value.join(' ')
end
end
Traject::MarcExtractor.cached('8670az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['supplements'] ||= []
all_holdings[holding_id]['supplements'] << value.join(' ')
end
end
Traject::MarcExtractor.cached('8680az').collect_matching_lines(record) do |field, spec, extractor|
value = []
holding_id = nil
field.subfields.each do |s_field|
if s_field.code == '0'
holding_id = s_field.value
elsif s_field.code == 'a'
value << s_field.value
elsif s_field.code == 'z'
value << s_field.value
end
end
if (all_holdings[holding_id] and !value.empty?)
all_holdings[holding_id]['indexes'] ||= []
all_holdings[holding_id]['indexes'] << value.join(' ')
end
end
### Added for ReCAP records
Traject::MarcExtractor.cached('87603ahjptxz').collect_matching_lines(record) do |field, spec, extractor|
item = {}
field.subfields.each do |s_field|
if s_field.code == '0'
item[:holding_id] = s_field.value
elsif s_field.code == '3'
item[:enumeration] = s_field.value
elsif s_field.code == 'a'
item[:id] = s_field.value
elsif s_field.code == 'h'
item[:use_statement] = s_field.value
elsif s_field.code == 'j'
item[:status_at_load] = s_field.value
elsif s_field.code == 'p'
item[:barcode] = s_field.value
elsif s_field.code == 't'
item[:copy_number] = s_field.value
elsif s_field.code == 'x'
item[:cgc] = s_field.value
elsif s_field.code == 'z'
item[:collection_code] = s_field.value
end
end
if all_holdings[item[:holding_id]]["items"].nil?
all_holdings[item[:holding_id]]["items"] = [ item ]
else
all_holdings[item[:holding_id]]["items"] << item
end
end
all_holdings
end
def process_recap_notes record
item_notes = []
partner_lib = nil
Traject::MarcExtractor.cached('852').collect_matching_lines(record) do |field, spec, extractor|
field.subfields.each do |s_field|
if s_field.code == 'b'
partner_lib = s_field.value #||= Traject::TranslationMap.new("locations", :default => "__passthrough__")[s_field.value]
end
end
end
Traject::MarcExtractor.cached('87603ahjptxz').collect_matching_lines(record) do |field, spec, extractor|
col_group = ''
field.subfields.each do |s_field|
if s_field.code == 'x'
if s_field.value == 'Shared'
col_group = 'S'
elsif s_field.value == 'Private'
col_group = 'P'
else
col_group = 'O'
end
end
end
if partner_lib == 'scsbnypl'
partner_display_string = 'N'
elsif partner_lib == 'scsbcul'
partner_display_string = 'C'
end
item_notes << "#{partner_display_string} - #{col_group}"
end
item_notes
end
|
###########################################
#
# General configuration for this cluster
#
###########################################
default['bcpc']['country'] = "US"
default['bcpc']['state'] = "NY"
default['bcpc']['location'] = "New York"
default['bcpc']['organization'] = "Bloomberg"
default['bcpc']['openstack_release'] = "icehouse"
# Can be "updates" or "proposed"
default['bcpc']['openstack_branch'] = "proposed"
# Should be kvm (or qemu if testing in VMs)
default['bcpc']['virt_type'] = "kvm"
# Region name for this cluster
default['bcpc']['region_name'] = node.chef_environment
# Domain name that will be used for DNS
default['bcpc']['domain_name'] = "bcpc.example.com"
# Key if Cobalt+VMS is to be used
default['bcpc']['vms_key'] = nil
# Flags to enable/disable BCPC cluster features
default['bcpc']['enabled']['logging'] = true
default['bcpc']['enabled']['metrics'] = true
default['bcpc']['enabled']['monitoring'] = true
default['bcpc']['enabled']['dns'] = true
default['bcpc']['enabled']['host_firewall'] = true
default['bcpc']['enabled']['encrypt_data_bag'] = false
default['bcpc']['enabled']['apt_upgrade'] = false
default['bcpc']['enabled']['keepalived_checks'] = true
# This can be either 'sql' or 'ldap' to either store identities
# in the mysql DB or the LDAP server
default['bcpc']['keystone']['backend'] = 'ldap'
###########################################
#
# Host-specific defaults for the cluster
#
###########################################
default['bcpc']['ceph']['hdd_disks'] = ["sdb", "sdc"]
default['bcpc']['ceph']['ssd_disks'] = ["sdd", "sde"]
default['bcpc']['ceph']['enabled_pools'] = ["ssd", "hdd"]
default['bcpc']['management']['interface'] = "eth0"
default['bcpc']['storage']['interface'] = "eth1"
default['bcpc']['floating']['interface'] = "eth2"
default['bcpc']['fixed']['vlan_interface'] = node['bcpc']['floating']['interface']
###########################################
#
# Ceph settings for the cluster
#
###########################################
default['bcpc']['ceph']['chooseleaf'] = "rack"
default['bcpc']['ceph']['pgp_auto_adjust'] = false
default['bcpc']['ceph']['pgs_per_node'] = 1024
# The 'portion' parameters should add up to ~100 across all pools
default['bcpc']['ceph']['default']['replicas'] = 2
default['bcpc']['ceph']['default']['type'] = 'hdd'
default['bcpc']['ceph']['rgw']['replicas'] = 3
default['bcpc']['ceph']['rgw']['portion'] = 33
default['bcpc']['ceph']['rgw']['type'] = 'hdd'
default['bcpc']['ceph']['images']['replicas'] = 3
default['bcpc']['ceph']['images']['portion'] = 33
default['bcpc']['ceph']['images']['type'] = 'ssd'
default['bcpc']['ceph']['images']['name'] = "images"
default['bcpc']['ceph']['volumes']['replicas'] = 3
default['bcpc']['ceph']['volumes']['portion'] = 33
default['bcpc']['ceph']['volumes']['name'] = "volumes"
default['bcpc']['ceph']['vms_disk']['replicas'] = 3
default['bcpc']['ceph']['vms_disk']['portion'] = 10
default['bcpc']['ceph']['vms_disk']['type'] = 'ssd'
default['bcpc']['ceph']['vms_disk']['name'] = "vmsdisk"
default['bcpc']['ceph']['vms_mem']['replicas'] = 3
default['bcpc']['ceph']['vms_mem']['portion'] = 10
default['bcpc']['ceph']['vms_mem']['type'] = 'ssd'
default['bcpc']['ceph']['vms_mem']['name'] = "vmsmem"
default['bcpc']['ceph']['ssd']['ruleset'] = 1
default['bcpc']['ceph']['hdd']['ruleset'] = 2
###########################################
#
# Network settings for the cluster
#
###########################################
default['bcpc']['management']['vip'] = "10.17.1.15"
default['bcpc']['management']['netmask'] = "255.255.255.0"
default['bcpc']['management']['cidr'] = "10.17.1.0/24"
default['bcpc']['management']['gateway'] = "10.17.1.1"
default['bcpc']['metadata']['ip'] = "169.254.169.254"
default['bcpc']['storage']['netmask'] = "255.255.255.0"
default['bcpc']['storage']['cidr'] = "100.100.0.0/24"
default['bcpc']['storage']['gateway'] = "100.100.0.1"
default['bcpc']['floating']['vip'] = "192.168.43.15"
default['bcpc']['floating']['netmask'] = "255.255.255.0"
default['bcpc']['floating']['cidr'] = "192.168.43.0/24"
default['bcpc']['floating']['gateway'] = "192.168.43.2"
default['bcpc']['floating']['available_subnet'] = "192.168.43.128/25"
default['bcpc']['fixed']['cidr'] = "1.127.0.0/16"
default['bcpc']['fixed']['vlan_start'] = "1000"
default['bcpc']['fixed']['num_networks'] = "100"
default['bcpc']['fixed']['network_size'] = "256"
default['bcpc']['fixed']['dhcp_lease_time'] = 3600
default['bcpc']['ntp_servers'] = ["pool.ntp.org"]
default['bcpc']['dns_servers'] = ["8.8.8.8", "8.8.4.4"]
###########################################
#
# Repos for things we rely on
#
###########################################
default['bcpc']['repos']['ceph'] = "http://www.ceph.com/debian-firefly"
default['bcpc']['repos']['ceph-extras'] = "http://www.ceph.com/packages/ceph-extras/debian"
default['bcpc']['repos']['ceph-el6-x86_64'] = "http://ceph.com/rpm-dumpling/el6/x86_64"
default['bcpc']['repos']['ceph-el6-noarch'] = "http://ceph.com/rpm-dumpling/el6/noarch"
default['bcpc']['repos']['rabbitmq'] = "http://www.rabbitmq.com/debian"
default['bcpc']['repos']['mysql'] = "http://repo.percona.com/apt"
default['bcpc']['repos']['haproxy'] = "http://ppa.launchpad.net/vbernat/haproxy-1.5/ubuntu"
default['bcpc']['repos']['openstack'] = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
default['bcpc']['repos']['hwraid'] = "http://hwraid.le-vert.net/ubuntu"
default['bcpc']['repos']['fluentd'] = "http://packages.treasure-data.com/precise"
default['bcpc']['repos']['ceph-apache'] = "http://gitbuilder.ceph.com/apache2-deb-precise-x86_64-basic/ref/master"
default['bcpc']['repos']['ceph-fcgi'] = "http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-precise-x86_64-basic/ref/master"
default['bcpc']['repos']['gridcentric'] = "http://downloads.gridcentric.com/packages/%s/%s/ubuntu"
###########################################
#
# [Optional] If using apt-mirror to pull down repos, we use these settings.
#
###########################################
# Note - us.archive.ubuntu.com tends to rate-limit pretty hard.
# If you are on East Coast US, we recommend Columbia University in env file:
# "mirror" : {
# "ubuntu": "mirror.cc.columbia.edu/pub/linux/ubuntu/archive"
# }
# For a complete list of Ubuntu mirrors, please see:
# https://launchpad.net/ubuntu/+archivemirrors
default['bcpc']['mirror']['ubuntu'] = "us.archive.ubuntu.com/ubuntu"
default['bcpc']['mirror']['ubuntu-dist'] = ['precise']
default['bcpc']['mirror']['ceph-dist'] = ['firefly']
default['bcpc']['mirror']['os-dist'] = ['icehouse']
###########################################
#
# Default names for db's, pools, and users
#
###########################################
default['bcpc']['dbname']['nova'] = "nova"
default['bcpc']['dbname']['cinder'] = "cinder"
default['bcpc']['dbname']['glance'] = "glance"
default['bcpc']['dbname']['horizon'] = "horizon"
default['bcpc']['dbname']['keystone'] = "keystone"
default['bcpc']['dbname']['heat'] = "heat"
default['bcpc']['dbname']['ceilometer'] = "ceilometer"
default['bcpc']['dbname']['graphite'] = "graphite"
default['bcpc']['dbname']['pdns'] = "pdns"
default['bcpc']['dbname']['zabbix'] = "zabbix"
default['bcpc']['admin_tenant'] = "AdminTenant"
default['bcpc']['admin_role'] = "Admin"
default['bcpc']['member_role'] = "Member"
default['bcpc']['admin_email'] = "admin@localhost.com"
default['bcpc']['zabbix']['user'] = "zabbix"
default['bcpc']['zabbix']['group'] = "adm"
default['bcpc']['ports']['apache']['radosgw'] = 80
default['bcpc']['ports']['apache']['radosgw_https'] = 443
default['bcpc']['ports']['haproxy']['radosgw'] = 80
default['bcpc']['ports']['haproxy']['radosgw_https'] = 443
# Can be set to 'http' or 'https'
default['bcpc']['protocol']['keystone'] = "https"
default['bcpc']['protocol']['glance'] = "https"
default['bcpc']['protocol']['nova'] = "https"
default['bcpc']['protocol']['cinder'] = "https"
default['bcpc']['protocol']['heat'] = "https"
add some comments about what all the bools do
###########################################
#
# General configuration for this cluster
#
###########################################
default['bcpc']['country'] = "US"
default['bcpc']['state'] = "NY"
default['bcpc']['location'] = "New York"
default['bcpc']['organization'] = "Bloomberg"
default['bcpc']['openstack_release'] = "icehouse"
# Can be "updates" or "proposed"
default['bcpc']['openstack_branch'] = "proposed"
# Should be kvm (or qemu if testing in VMs that don't support VT-x)
default['bcpc']['virt_type'] = "kvm"
# Region name for this cluster
default['bcpc']['region_name'] = node.chef_environment
# Domain name for this cluster (used in many configs)
default['bcpc']['domain_name'] = "bcpc.example.com"
# Key if Cobalt+VMS is to be used
default['bcpc']['vms_key'] = nil
###########################################
#
# Flags to enable/disable BCPC cluster features
#
###########################################
# This will enable elasticsearch & kibana on head nodes and fluentd on all nodes
default['bcpc']['enabled']['logging'] = true
# This will enable graphite web and carbon on head nodes and diamond on all nodes
default['bcpc']['enabled']['metrics'] = true
# This will enable zabbix server on head nodes and zabbix agent on all nodes
default['bcpc']['enabled']['monitoring'] = true
# This will enable powerdns on head nodes
default['bcpc']['enabled']['dns'] = true
# This will enable iptables firewall on all nodes
default['bcpc']['enabled']['host_firewall'] = true
# This will enable of encryption of the chef data bag
default['bcpc']['enabled']['encrypt_data_bag'] = false
# This will enable auto-upgrades on all nodes (not recommended for stability)
default['bcpc']['enabled']['apt_upgrade'] = false
## This will enable the extra healthchecks for keepalived (VIP management)
default['bcpc']['enabled']['keepalived_checks'] = true
# This can be either 'sql' or 'ldap' to either store identities
# in the mysql DB or the LDAP server
default['bcpc']['keystone']['backend'] = 'ldap'
###########################################
#
# Host-specific defaults for the cluster
#
###########################################
default['bcpc']['ceph']['hdd_disks'] = ["sdb", "sdc"]
default['bcpc']['ceph']['ssd_disks'] = ["sdd", "sde"]
default['bcpc']['ceph']['enabled_pools'] = ["ssd", "hdd"]
default['bcpc']['management']['interface'] = "eth0"
default['bcpc']['storage']['interface'] = "eth1"
default['bcpc']['floating']['interface'] = "eth2"
default['bcpc']['fixed']['vlan_interface'] = node['bcpc']['floating']['interface']
###########################################
#
# Ceph settings for the cluster
#
###########################################
default['bcpc']['ceph']['chooseleaf'] = "rack"
default['bcpc']['ceph']['pgp_auto_adjust'] = false
default['bcpc']['ceph']['pgs_per_node'] = 1024
# The 'portion' parameters should add up to ~100 across all pools
default['bcpc']['ceph']['default']['replicas'] = 2
default['bcpc']['ceph']['default']['type'] = 'hdd'
default['bcpc']['ceph']['rgw']['replicas'] = 3
default['bcpc']['ceph']['rgw']['portion'] = 33
default['bcpc']['ceph']['rgw']['type'] = 'hdd'
default['bcpc']['ceph']['images']['replicas'] = 3
default['bcpc']['ceph']['images']['portion'] = 33
default['bcpc']['ceph']['images']['type'] = 'ssd'
default['bcpc']['ceph']['images']['name'] = "images"
default['bcpc']['ceph']['volumes']['replicas'] = 3
default['bcpc']['ceph']['volumes']['portion'] = 33
default['bcpc']['ceph']['volumes']['name'] = "volumes"
default['bcpc']['ceph']['vms_disk']['replicas'] = 3
default['bcpc']['ceph']['vms_disk']['portion'] = 10
default['bcpc']['ceph']['vms_disk']['type'] = 'ssd'
default['bcpc']['ceph']['vms_disk']['name'] = "vmsdisk"
default['bcpc']['ceph']['vms_mem']['replicas'] = 3
default['bcpc']['ceph']['vms_mem']['portion'] = 10
default['bcpc']['ceph']['vms_mem']['type'] = 'ssd'
default['bcpc']['ceph']['vms_mem']['name'] = "vmsmem"
default['bcpc']['ceph']['ssd']['ruleset'] = 1
default['bcpc']['ceph']['hdd']['ruleset'] = 2
###########################################
#
# Network settings for the cluster
#
###########################################
default['bcpc']['management']['vip'] = "10.17.1.15"
default['bcpc']['management']['netmask'] = "255.255.255.0"
default['bcpc']['management']['cidr'] = "10.17.1.0/24"
default['bcpc']['management']['gateway'] = "10.17.1.1"
default['bcpc']['metadata']['ip'] = "169.254.169.254"
default['bcpc']['storage']['netmask'] = "255.255.255.0"
default['bcpc']['storage']['cidr'] = "100.100.0.0/24"
default['bcpc']['storage']['gateway'] = "100.100.0.1"
default['bcpc']['floating']['vip'] = "192.168.43.15"
default['bcpc']['floating']['netmask'] = "255.255.255.0"
default['bcpc']['floating']['cidr'] = "192.168.43.0/24"
default['bcpc']['floating']['gateway'] = "192.168.43.2"
default['bcpc']['floating']['available_subnet'] = "192.168.43.128/25"
default['bcpc']['fixed']['cidr'] = "1.127.0.0/16"
default['bcpc']['fixed']['vlan_start'] = "1000"
default['bcpc']['fixed']['num_networks'] = "100"
default['bcpc']['fixed']['network_size'] = "256"
default['bcpc']['fixed']['dhcp_lease_time'] = 3600
default['bcpc']['ntp_servers'] = ["pool.ntp.org"]
default['bcpc']['dns_servers'] = ["8.8.8.8", "8.8.4.4"]
###########################################
#
# Repos for things we rely on
#
###########################################
default['bcpc']['repos']['ceph'] = "http://www.ceph.com/debian-firefly"
default['bcpc']['repos']['ceph-extras'] = "http://www.ceph.com/packages/ceph-extras/debian"
default['bcpc']['repos']['ceph-el6-x86_64'] = "http://ceph.com/rpm-dumpling/el6/x86_64"
default['bcpc']['repos']['ceph-el6-noarch'] = "http://ceph.com/rpm-dumpling/el6/noarch"
default['bcpc']['repos']['rabbitmq'] = "http://www.rabbitmq.com/debian"
default['bcpc']['repos']['mysql'] = "http://repo.percona.com/apt"
default['bcpc']['repos']['haproxy'] = "http://ppa.launchpad.net/vbernat/haproxy-1.5/ubuntu"
default['bcpc']['repos']['openstack'] = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
default['bcpc']['repos']['hwraid'] = "http://hwraid.le-vert.net/ubuntu"
default['bcpc']['repos']['fluentd'] = "http://packages.treasure-data.com/precise"
default['bcpc']['repos']['ceph-apache'] = "http://gitbuilder.ceph.com/apache2-deb-precise-x86_64-basic/ref/master"
default['bcpc']['repos']['ceph-fcgi'] = "http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-precise-x86_64-basic/ref/master"
default['bcpc']['repos']['gridcentric'] = "http://downloads.gridcentric.com/packages/%s/%s/ubuntu"
###########################################
#
# [Optional] If using apt-mirror to pull down repos, we use these settings.
#
###########################################
# Note - us.archive.ubuntu.com tends to rate-limit pretty hard.
# If you are on East Coast US, we recommend Columbia University in env file:
# "mirror" : {
# "ubuntu": "mirror.cc.columbia.edu/pub/linux/ubuntu/archive"
# }
# For a complete list of Ubuntu mirrors, please see:
# https://launchpad.net/ubuntu/+archivemirrors
default['bcpc']['mirror']['ubuntu'] = "us.archive.ubuntu.com/ubuntu"
default['bcpc']['mirror']['ubuntu-dist'] = ['precise']
default['bcpc']['mirror']['ceph-dist'] = ['firefly']
default['bcpc']['mirror']['os-dist'] = ['icehouse']
###########################################
#
# Default names for db's, pools, and users
#
###########################################
default['bcpc']['dbname']['nova'] = "nova"
default['bcpc']['dbname']['cinder'] = "cinder"
default['bcpc']['dbname']['glance'] = "glance"
default['bcpc']['dbname']['horizon'] = "horizon"
default['bcpc']['dbname']['keystone'] = "keystone"
default['bcpc']['dbname']['heat'] = "heat"
default['bcpc']['dbname']['ceilometer'] = "ceilometer"
default['bcpc']['dbname']['graphite'] = "graphite"
default['bcpc']['dbname']['pdns'] = "pdns"
default['bcpc']['dbname']['zabbix'] = "zabbix"
default['bcpc']['admin_tenant'] = "AdminTenant"
default['bcpc']['admin_role'] = "Admin"
default['bcpc']['member_role'] = "Member"
default['bcpc']['admin_email'] = "admin@localhost.com"
default['bcpc']['zabbix']['user'] = "zabbix"
default['bcpc']['zabbix']['group'] = "adm"
default['bcpc']['ports']['apache']['radosgw'] = 80
default['bcpc']['ports']['apache']['radosgw_https'] = 443
default['bcpc']['ports']['haproxy']['radosgw'] = 80
default['bcpc']['ports']['haproxy']['radosgw_https'] = 443
# Can be set to 'http' or 'https'
default['bcpc']['protocol']['keystone'] = "https"
default['bcpc']['protocol']['glance'] = "https"
default['bcpc']['protocol']['nova'] = "https"
default['bcpc']['protocol']['cinder'] = "https"
default['bcpc']['protocol']['heat'] = "https"
|
###########################################
#
# General configuration for this cluster
#
###########################################
default['bcpc']['country'] = "US"
default['bcpc']['state'] = "NY"
default['bcpc']['location'] = "New York"
default['bcpc']['organization'] = "Bloomberg"
default['bcpc']['openstack_release'] = "kilo"
# Can be "updates" or "proposed"
default['bcpc']['openstack_branch'] = "proposed"
# Should be kvm (or qemu if testing in VMs that don't support VT-x)
default['bcpc']['virt_type'] = "kvm"
# Define the kernel to be installed. By default, track latest LTS kernel
default['bcpc']['preseed']['kernel'] = "linux-image-generic-lts-trusty"
# ulimits for libvirt-bin
default['bcpc']['libvirt-bin']['ulimit']['nofile'] = 4096
# Region name for this cluster
default['bcpc']['region_name'] = node.chef_environment
# Domain name for this cluster (used in many configs)
default['bcpc']['cluster_domain'] = "bcpc.example.com"
# Hypervisor domain (domain used by actual machines)
default['bcpc']['hypervisor_domain'] = "hypervisor-bcpc.example.com"
# Key if Cobalt+VMS is to be used
default['bcpc']['vms_key'] = nil
# custom SSL certificate (specify filename).
# certificate files should be stored under 'files/default' directory
default['bcpc']['ssl_certificate'] = nil
default['bcpc']['ssl_private_key'] = nil
default['bcpc']['ssl_intermediate_certificate'] = nil
# custom SSL certificate for Rados Gateway (S3)
default['bcpc']['s3_ssl_certificate'] = nil
default['bcpc']['s3_ssl_private_key'] = nil
default['bcpc']['s3_ssl_intermediate_certificate'] = nil
###########################################
#
# Package versions
#
###########################################
default['bcpc']['elasticsearch']['version'] = '1.5.1'
default['bcpc']['ceph']['version'] = '0.94.5-1trusty'
default['bcpc']['ceph']['version_number'] = '0.94.5'
default['bcpc']['erlang']['version'] = '1:17.5.3'
default['bcpc']['haproxy']['version'] = '1.5.15-1ppa1~trusty'
default['bcpc']['kibana']['version'] = '4.0.2'
default['bcpc']['rabbitmq']['version'] = '3.5.7-1'
###########################################
#
# Flags to enable/disable BCPC cluster features
#
###########################################
# This will enable elasticsearch & kibana on head nodes and fluentd on all nodes
default['bcpc']['enabled']['logging'] = true
# This will enable graphite web and carbon on head nodes and diamond on all nodes
default['bcpc']['enabled']['metrics'] = true
# This will enable zabbix server on head nodes and zabbix agent on all nodes
default['bcpc']['enabled']['monitoring'] = true
# This will enable powerdns on head nodes
default['bcpc']['enabled']['dns'] = true
# This will enable iptables firewall on all nodes
default['bcpc']['enabled']['host_firewall'] = true
# This will enable of encryption of the chef data bag
default['bcpc']['enabled']['encrypt_data_bag'] = false
# This will enable auto-upgrades on all nodes (not recommended for stability)
default['bcpc']['enabled']['apt_upgrade'] = false
# This will enable running apt-get update at the start of every Chef run
default['bcpc']['enabled']['always_update_package_lists'] = true
# This will enable the extra healthchecks for keepalived (VIP management)
default['bcpc']['enabled']['keepalived_checks'] = true
# This will enable the networking test scripts
default['bcpc']['enabled']['network_tests'] = true
# This will enable httpd disk caching for radosgw in apache
default['bcpc']['enabled']['radosgw_cache'] = false
# This will enable using TPM-based hwrngd
default['bcpc']['enabled']['tpm'] = false
# This will block VMs from talking to the management network
default['bcpc']['enabled']['secure_fixed_networks'] = true
# Toggle to enable/disable swap memory
default['bcpc']['enabled']['swap'] = true
# Toggle to enable/disable Heat (OpenStack Cloud Formation)
default['bcpc']['enabled']['heat'] = false
# If radosgw_cache is enabled, default to 20MB max file size
default['bcpc']['radosgw']['cache_max_file_size'] = 20000000
###########################################
#
# Host-specific defaults for the cluster
#
###########################################
default['bcpc']['ceph']['hdd_disks'] = ["sdb", "sdc"]
default['bcpc']['ceph']['ssd_disks'] = ["sdd", "sde"]
default['bcpc']['ceph']['enabled_pools'] = ["ssd", "hdd"]
default['bcpc']['management']['interface'] = "eth0"
default['bcpc']['storage']['interface'] = "eth1"
default['bcpc']['floating']['interface'] = "eth2"
default['bcpc']['fixed']['vlan_interface'] = node['bcpc']['floating']['interface']
###########################################
#
# Ceph settings for the cluster
#
###########################################
# Trusty is not available at this time for ceph-extras
default['bcpc']['ceph']['extras']['dist'] = "precise"
# To use apache instead of civetweb, make the following value anything but 'civetweb'
default['bcpc']['ceph']['frontend'] = "civetweb"
default['bcpc']['ceph']['chooseleaf'] = "rack"
default['bcpc']['ceph']['pgp_auto_adjust'] = false
# Need to review...
default['bcpc']['ceph']['pgs_per_node'] = 1024
default['bcpc']['ceph']['max_pgs_per_osd'] = 300
# Journal size could be 10GB or higher in some cases
default['bcpc']['ceph']['journal_size'] = 2048
# The 'portion' parameters should add up to ~100 across all pools
default['bcpc']['ceph']['default']['replicas'] = 3
default['bcpc']['ceph']['default']['type'] = 'hdd'
default['bcpc']['ceph']['rgw']['replicas'] = 3
default['bcpc']['ceph']['rgw']['portion'] = 33
default['bcpc']['ceph']['rgw']['type'] = 'hdd'
default['bcpc']['ceph']['images']['replicas'] = 3
default['bcpc']['ceph']['images']['portion'] = 33
# Set images to hdd instead of sdd
default['bcpc']['ceph']['images']['type'] = 'hdd'
default['bcpc']['ceph']['images']['name'] = "images"
default['bcpc']['ceph']['volumes']['replicas'] = 3
default['bcpc']['ceph']['volumes']['portion'] = 33
default['bcpc']['ceph']['volumes']['name'] = "volumes"
# Created a new pool for VMs and set type to ssd
default['bcpc']['ceph']['vms']['replicas'] = 3
default['bcpc']['ceph']['vms']['portion'] = 33
default['bcpc']['ceph']['vms']['type'] = 'ssd'
default['bcpc']['ceph']['vms']['name'] = "vms"
# Set up crush rulesets
default['bcpc']['ceph']['ssd']['ruleset'] = 1
default['bcpc']['ceph']['hdd']['ruleset'] = 2
# If you are about to make a big change to the ceph cluster
# setting to true will reduce the load form the resulting
# ceph rebalance and keep things operational.
# See wiki for further details.
default['bcpc']['ceph']['rebalance'] = false
# Set the default niceness of Ceph OSD and monitor processes
default['bcpc']['ceph']['osd_niceness'] = -10
default['bcpc']['ceph']['mon_niceness'] = -10
###########################################
#
# RabbitMQ settings
#
###########################################
# if changing this setting, you will need to reset Mnesia
# on all RabbitMQ nodes in the cluster
default['bcpc']['rabbitmq']['durable_queues'] = true
# ulimits for RabbitMQ server
default['bcpc']['rabbitmq']['ulimit']['nofile'] = 4096
# Heartbeat timeout to detect dead RabbitMQ brokers
default['bcpc']['rabbitmq']['heartbeat'] = 60
###########################################
#
# Network settings for the cluster
#
###########################################
default['bcpc']['management']['vip'] = "10.17.1.15"
default['bcpc']['management']['netmask'] = "255.255.255.0"
default['bcpc']['management']['cidr'] = "10.17.1.0/24"
default['bcpc']['management']['gateway'] = "10.17.1.1"
default['bcpc']['management']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['management']['interface-parent'] = nil
# list of TCP ports that should be open on the management interface
# (generally stuff served via HAProxy)
default['bcpc']['management']['firewall_tcp_ports'] = [
80,443,8088,7480,5000,35357,9292,8776,8773,8774,8004,8000,8777
]
default['bcpc']['metadata']['ip'] = "169.254.169.254"
default['bcpc']['storage']['netmask'] = "255.255.255.0"
default['bcpc']['storage']['cidr'] = "100.100.0.0/24"
default['bcpc']['storage']['gateway'] = "100.100.0.1"
default['bcpc']['storage']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['storage']['interface-parent'] = nil
default['bcpc']['floating']['vip'] = "192.168.43.15"
default['bcpc']['floating']['netmask'] = "255.255.255.0"
default['bcpc']['floating']['cidr'] = "192.168.43.0/24"
default['bcpc']['floating']['gateway'] = "192.168.43.2"
default['bcpc']['floating']['available_subnet'] = "192.168.43.128/25"
default['bcpc']['floating']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['floating']['interface-parent'] = nil
default['bcpc']['fixed']['cidr'] = "1.127.0.0/16"
default['bcpc']['fixed']['vlan_start'] = "1000"
default['bcpc']['fixed']['num_networks'] = "100"
default['bcpc']['fixed']['network_size'] = "256"
default['bcpc']['fixed']['dhcp_lease_time'] = "120"
default['bcpc']['ntp_servers'] = ["pool.ntp.org"]
default['bcpc']['dns_servers'] = ["8.8.8.8", "8.8.4.4"]
# Proxy server URL for recipes to use
# Example: http://proxy-hostname:port
default['bcpc']['proxy_server_url'] = nil
###########################################
#
# Repos for things we rely on
#
###########################################
default['bcpc']['repos']['rabbitmq'] = "http://www.rabbitmq.com/debian"
default['bcpc']['repos']['mysql'] = "http://repo.percona.com/apt"
default['bcpc']['repos']['haproxy'] = "http://ppa.launchpad.net/vbernat/haproxy-1.5/ubuntu"
default['bcpc']['repos']['openstack'] = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
default['bcpc']['repos']['hwraid'] = "http://hwraid.le-vert.net/ubuntu"
default['bcpc']['repos']['fluentd'] = "http://packages.treasure-data.com/2/ubuntu/#{node['lsb']['codename']}"
default['bcpc']['repos']['gridcentric'] = "http://downloads.gridcentric.com/packages/%s/%s/ubuntu"
default['bcpc']['repos']['elasticsearch'] = "http://packages.elasticsearch.org/elasticsearch/1.5/debian"
default['bcpc']['repos']['kibana'] = "http://packages.elasticsearch.org/kibana/4.1/debian"
default['bcpc']['repos']['erlang'] = "http://packages.erlang-solutions.com/ubuntu"
default['bcpc']['repos']['ceph'] = "http://download.ceph.com/debian-hammer"
default['bcpc']['repos']['zabbix'] = "http://repo.zabbix.com/zabbix/2.4/ubuntu"
###########################################
#
# [Optional] If using apt-mirror to pull down repos, we use these settings.
#
###########################################
# Note - us.archive.ubuntu.com tends to rate-limit pretty hard.
# If you are on East Coast US, we recommend Columbia University in env file:
# "mirror" : {
# "ubuntu": "mirror.cc.columbia.edu/pub/linux/ubuntu/archive"
# }
# For a complete list of Ubuntu mirrors, please see:
# https://launchpad.net/ubuntu/+archivemirrors
default['bcpc']['mirror']['ubuntu'] = "us.archive.ubuntu.com/ubuntu"
default['bcpc']['mirror']['ubuntu-dist'] = ['trusty']
default['bcpc']['mirror']['ceph-dist'] = ['hammer']
default['bcpc']['mirror']['os-dist'] = ['kilo']
default['bcpc']['mirror']['elasticsearch-dist'] = '1.5'
default['bcpc']['mirror']['kibana-dist'] = '4.1'
###########################################
#
# Default names for db's, pools, and users
#
###########################################
default['bcpc']['dbname']['nova'] = "nova"
default['bcpc']['dbname']['cinder'] = "cinder"
default['bcpc']['dbname']['glance'] = "glance"
default['bcpc']['dbname']['horizon'] = "horizon"
default['bcpc']['dbname']['keystone'] = "keystone"
default['bcpc']['dbname']['heat'] = "heat"
default['bcpc']['dbname']['ceilometer'] = "ceilometer"
default['bcpc']['dbname']['graphite'] = "graphite"
default['bcpc']['dbname']['pdns'] = "pdns"
default['bcpc']['dbname']['zabbix'] = "zabbix"
default['bcpc']['admin_tenant'] = "AdminTenant"
default['bcpc']['admin_role'] = "Admin"
default['bcpc']['member_role'] = "Member"
default['bcpc']['admin_email'] = "admin@localhost.com"
default['bcpc']['zabbix']['user'] = "zabbix"
default['bcpc']['zabbix']['group'] = "adm"
# General ports for both Apache and Civetweb (no ssl for civetweb at this time)
default['bcpc']['ports']['radosgw'] = 8088
default['bcpc']['ports']['radosgw_https'] = 443
default['bcpc']['ports']['civetweb']['radosgw'] = 8088
# Apache - Leave until Apache is removed
default['bcpc']['ports']['apache']['radosgw'] = 80
default['bcpc']['ports']['apache']['radosgw_https'] = 443
default['bcpc']['ports']['haproxy']['radosgw'] = 80
default['bcpc']['ports']['haproxy']['radosgw_https'] = 443
# Can be set to 'http' or 'https'
default['bcpc']['protocol']['keystone'] = "https"
default['bcpc']['protocol']['glance'] = "https"
default['bcpc']['protocol']['nova'] = "https"
default['bcpc']['protocol']['cinder'] = "https"
default['bcpc']['protocol']['heat'] = "https"
###########################################
#
# Horizon Settings
#
###########################################
#
# List panels to remove from the Horizon interface here
# (if the last panel in a group is removed, the group will also be removed)
default['bcpc']['horizon']['disable_panels'] = ['containers']
###########################################
#
# Keystone Settings
#
###########################################
#
# Defaut log file
default['bcpc']['keystone']['log_file'] = '/var/log/keystone/keystone.log'
# Eventlet server is deprecated in Kilo, so by default we
# serve Keystone via Apache now.
default['bcpc']['keystone']['eventlet_server'] = false
# Turn caching via memcached on or off.
default['bcpc']['keystone']['enable_caching'] = true
# Enable debug logging (also caching debug logging).
default['bcpc']['keystone']['debug'] = false
# Enable verbose logging.
default['bcpc']['keystone']['verbose'] = false
# Set the timeout for how long we will wait for Keystone to become operational
# before failing (configures timeout on the wait-for-keystone-to-be-operational
# spinlock guard).
default['bcpc']['keystone']['wait_for_keystone_timeout'] = 120
# Set the number of Keystone WSGI processes and threads to use by default on the
# public API (experimentally threads > 1 may cause problems with the service
# catalog, for now we recommend scaling only in the processes dimension)
default['bcpc']['keystone']['wsgi']['processes'] = 5
default['bcpc']['keystone']['wsgi']['threads'] = 1
# The driver section below allows either 'sql' or 'ldap' (or 'templated' for catalog)
# Note that not all drivers may support SQL/LDAP, only tinker if you know what you're getting into
default['bcpc']['keystone']['drivers']['assignment'] = 'sql'
default['bcpc']['keystone']['drivers']['catalog'] = 'sql'
default['bcpc']['keystone']['drivers']['credential'] = 'sql'
default['bcpc']['keystone']['drivers']['domain_config'] = 'sql'
default['bcpc']['keystone']['drivers']['endpoint_filter'] = 'sql'
default['bcpc']['keystone']['drivers']['endpoint_policy'] = 'sql'
default['bcpc']['keystone']['drivers']['federation'] = 'sql'
default['bcpc']['keystone']['drivers']['identity'] = 'sql'
default['bcpc']['keystone']['drivers']['identity_mapping'] = 'sql'
default['bcpc']['keystone']['drivers']['oauth1'] = 'sql'
default['bcpc']['keystone']['drivers']['policy'] = 'sql'
default['bcpc']['keystone']['drivers']['revoke'] = 'sql'
default['bcpc']['keystone']['drivers']['role'] = 'sql'
default['bcpc']['keystone']['drivers']['trust'] = 'sql'
# Notifications driver
default['bcpc']['keystone']['drivers']['notification'] = 'log'
# Notifications format. See: http://docs.openstack.org/developer/keystone/event_notifications.html
default['bcpc']['keystone']['notification_format'] = 'cadf'
# LDAP credentials used by Keystone
default['bcpc']['ldap']['admin_user'] = nil
default['bcpc']['ldap']['admin_pass'] = nil
default['bcpc']['ldap']['config'] = {}
###########################################
#
# Keystone policy Settings
#
###########################################
default['bcpc']['keystone']['policy'] = {
"admin_required" => "role:admin or is_admin:1",
"service_role" => "role:service",
"service_or_admin" => "rule:admin_required or rule:service_role",
"owner" => "user_id:%(user_id)s",
"admin_or_owner" => "rule:admin_required or rule:owner",
"token_subject" => "user_id:%(target.token.user_id)s",
"admin_or_token_subject" => "rule:admin_required or rule:token_subject",
"default" => "rule:admin_required",
"identity:get_region" => "",
"identity:list_regions" => "",
"identity:create_region" => "rule:admin_required",
"identity:update_region" => "rule:admin_required",
"identity:delete_region" => "rule:admin_required",
"identity:get_service" => "rule:admin_required",
"identity:list_services" => "rule:admin_required",
"identity:create_service" => "rule:admin_required",
"identity:update_service" => "rule:admin_required",
"identity:delete_service" => "rule:admin_required",
"identity:get_endpoint" => "rule:admin_required",
"identity:list_endpoints" => "rule:admin_required",
"identity:create_endpoint" => "rule:admin_required",
"identity:update_endpoint" => "rule:admin_required",
"identity:delete_endpoint" => "rule:admin_required",
"identity:get_domain" => "rule:admin_required",
"identity:list_domains" => "rule:admin_required",
"identity:create_domain" => "rule:admin_required",
"identity:update_domain" => "rule:admin_required",
"identity:delete_domain" => "rule:admin_required",
"identity:get_project" => "rule:admin_required",
"identity:list_projects" => "rule:admin_required",
"identity:list_user_projects" => "rule:admin_or_owner",
"identity:create_project" => "rule:admin_required",
"identity:update_project" => "rule:admin_required",
"identity:delete_project" => "rule:admin_required",
"identity:get_user" => "rule:admin_required",
"identity:list_users" => "rule:admin_required",
"identity:create_user" => "rule:admin_required",
"identity:update_user" => "rule:admin_required",
"identity:delete_user" => "rule:admin_required",
"identity:change_password" => "rule:admin_or_owner",
"identity:get_group" => "rule:admin_required",
"identity:list_groups" => "rule:admin_required",
"identity:list_groups_for_user" => "rule:admin_or_owner",
"identity:create_group" => "rule:admin_required",
"identity:update_group" => "rule:admin_required",
"identity:delete_group" => "rule:admin_required",
"identity:list_users_in_group" => "rule:admin_required",
"identity:remove_user_from_group" => "rule:admin_required",
"identity:check_user_in_group" => "rule:admin_required",
"identity:add_user_to_group" => "rule:admin_required",
"identity:get_credential" => "rule:admin_required",
"identity:list_credentials" => "rule:admin_required",
"identity:create_credential" => "rule:admin_required",
"identity:update_credential" => "rule:admin_required",
"identity:delete_credential" => "rule:admin_required",
"identity:ec2_get_credential" => "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:ec2_list_credentials" => "rule:admin_or_owner",
"identity:ec2_create_credential" => "rule:admin_or_owner",
"identity:ec2_delete_credential" => "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:get_role" => "rule:admin_required",
"identity:list_roles" => "rule:admin_required",
"identity:create_role" => "rule:admin_required",
"identity:update_role" => "rule:admin_required",
"identity:delete_role" => "rule:admin_required",
"identity:check_grant" => "rule:admin_required",
"identity:list_grants" => "rule:admin_required",
"identity:create_grant" => "rule:admin_required",
"identity:revoke_grant" => "rule:admin_required",
"identity:list_role_assignments" => "rule:admin_required",
"identity:get_policy" => "rule:admin_required",
"identity:list_policies" => "rule:admin_required",
"identity:create_policy" => "rule:admin_required",
"identity:update_policy" => "rule:admin_required",
"identity:delete_policy" => "rule:admin_required",
"identity:check_token" => "rule:admin_required",
"identity:validate_token" => "rule:service_or_admin",
"identity:validate_token_head" => "rule:service_or_admin",
"identity:revocation_list" => "rule:service_or_admin",
"identity:revoke_token" => "rule:admin_or_token_subject",
"identity:create_trust" => "user_id:%(trust.trustor_user_id)s",
"identity:get_trust" => "rule:admin_or_owner",
"identity:list_trusts" => "",
"identity:list_roles_for_trust" => "",
"identity:get_role_for_trust" => "",
"identity:delete_trust" => "",
"identity:create_consumer" => "rule:admin_required",
"identity:get_consumer" => "rule:admin_required",
"identity:list_consumers" => "rule:admin_required",
"identity:delete_consumer" => "rule:admin_required",
"identity:update_consumer" => "rule:admin_required",
"identity:authorize_request_token" => "rule:admin_required",
"identity:list_access_token_roles" => "rule:admin_required",
"identity:get_access_token_role" => "rule:admin_required",
"identity:list_access_tokens" => "rule:admin_required",
"identity:get_access_token" => "rule:admin_required",
"identity:delete_access_token" => "rule:admin_required",
"identity:list_projects_for_endpoint" => "rule:admin_required",
"identity:add_endpoint_to_project" => "rule:admin_required",
"identity:check_endpoint_in_project" => "rule:admin_required",
"identity:list_endpoints_for_project" => "rule:admin_required",
"identity:remove_endpoint_from_project" => "rule:admin_required",
"identity:create_endpoint_group" => "rule:admin_required",
"identity:list_endpoint_groups" => "rule:admin_required",
"identity:get_endpoint_group" => "rule:admin_required",
"identity:update_endpoint_group" => "rule:admin_required",
"identity:delete_endpoint_group" => "rule:admin_required",
"identity:list_projects_associated_with_endpoint_group" => "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group" => "rule:admin_required",
"identity:get_endpoint_group_in_project" => "rule:admin_required",
"identity:add_endpoint_group_to_project" => "rule:admin_required",
"identity:remove_endpoint_group_from_project" => "rule:admin_required",
"identity:create_identity_provider" => "rule:admin_required",
"identity:list_identity_providers" => "rule:admin_required",
"identity:get_identity_providers" => "rule:admin_required",
"identity:update_identity_provider" => "rule:admin_required",
"identity:delete_identity_provider" => "rule:admin_required",
"identity:create_protocol" => "rule:admin_required",
"identity:update_protocol" => "rule:admin_required",
"identity:get_protocol" => "rule:admin_required",
"identity:list_protocols" => "rule:admin_required",
"identity:delete_protocol" => "rule:admin_required",
"identity:create_mapping" => "rule:admin_required",
"identity:get_mapping" => "rule:admin_required",
"identity:list_mappings" => "rule:admin_required",
"identity:delete_mapping" => "rule:admin_required",
"identity:update_mapping" => "rule:admin_required",
"identity:create_service_provider" => "rule:admin_required",
"identity:list_service_providers" => "rule:admin_required",
"identity:get_service_provider" => "rule:admin_required",
"identity:update_service_provider" => "rule:admin_required",
"identity:delete_service_provider" => "rule:admin_required",
"identity:get_auth_catalog" => "",
"identity:get_auth_projects" => "",
"identity:get_auth_domains" => "",
"identity:list_projects_for_groups" => "",
"identity:list_domains_for_groups" => "",
"identity:list_revoke_events" => "",
"identity:create_policy_association_for_endpoint" => "rule:admin_required",
"identity:check_policy_association_for_endpoint" => "rule:admin_required",
"identity:delete_policy_association_for_endpoint" => "rule:admin_required",
"identity:create_policy_association_for_service" => "rule:admin_required",
"identity:check_policy_association_for_service" => "rule:admin_required",
"identity:delete_policy_association_for_service" => "rule:admin_required",
"identity:create_policy_association_for_region_and_service" => "rule:admin_required",
"identity:check_policy_association_for_region_and_service" => "rule:admin_required",
"identity:delete_policy_association_for_region_and_service" => "rule:admin_required",
"identity:get_policy_for_endpoint" => "rule:admin_required",
"identity:list_endpoints_for_policy" => "rule:admin_required",
"identity:create_domain_config" => "rule:admin_required",
"identity:get_domain_config" => "rule:admin_required",
"identity:update_domain_config" => "rule:admin_required",
"identity:delete_domain_config" => "rule:admin_required"
}
###########################################
#
# Nova Settings
#
###########################################
#
# Over-allocation settings. Set according to your cluster
# SLAs. Default is to not allow over allocation of memory
# a slight over allocation of CPU (x2).
default['bcpc']['nova']['ram_allocation_ratio'] = 1.0
default['bcpc']['nova']['reserved_host_memory_mb'] = 1024
default['bcpc']['nova']['cpu_allocation_ratio'] = 2.0
# select from between this many equally optimal hosts when launching an instance
default['bcpc']['nova']['scheduler_host_subset_size'] = 3
# "workers" parameters in nova are set to number of CPUs
# available by default. This provides an override.
default['bcpc']['nova']['workers'] = 5
# Patch toggle for https://github.com/bloomberg/chef-bcpc/pull/493
default['bcpc']['nova']['live_migration_patch'] = false
# Verbose logging (level INFO)
default['bcpc']['nova']['verbose'] = false
# Nova debug toggle
default['bcpc']['nova']['debug'] = false
# Nova default log levels
default['bcpc']['nova']['default_log_levels'] = nil
# Nova scheduler default filters
default['bcpc']['nova']['scheduler_default_filters'] = ['AggregateInstanceExtraSpecsFilter', 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
# settings pertaining to ephemeral storage via mdadm/LVM
# (software RAID settings are here for logical grouping)
default['bcpc']['software_raid']['enabled'] = false
# define devices to RAID together in the hardware role for a type (e.g., BCPC-Hardware-Virtual)
default['bcpc']['software_raid']['devices'] = []
default['bcpc']['software_raid']['md_device'] = '/dev/md/md0'
default['bcpc']['software_raid']['chunk_size'] = 512
default['bcpc']['nova']['ephemeral'] = false
default['bcpc']['nova']['ephemeral_vg_name'] = 'nova_disk'
default['bcpc']['nova']['ephemeral_disks'] = [default['bcpc']['software_raid']['md_device']]
default['bcpc']['nova']['quota'] = {
"cores" => 4,
"floating_ips" => 10,
"gigabytes"=> 1000,
"instances" => 10,
"ram" => 51200
}
# load a custom vendor driver,
# e.g. "nova.api.metadata.bcpc_metadata.BcpcMetadata",
# comment out to use default
#default['bcpc']['vendordata_driver'] = "nova.api.metadata.bcpc_metadata.BcpcMetadata"
###########################################
#
# Nova policy Settings
#
###########################################
default['bcpc']['nova']['policy'] = {
"context_is_admin" => "role:admin",
"admin_or_owner" => "is_admin:True or project_id:%(project_id)s",
"default" => "role:admin",
"cells_scheduler_filter:TargetCellFilter" => "is_admin:True",
"compute:create" => "",
"compute:create:attach_network" => "role:admin",
"compute:create:attach_volume" => "rule:admin_or_owner",
"compute:create:forced_host" => "is_admin:True",
"compute:get" => "rule:admin_or_owner",
"compute:get_all" => "rule:admin_or_owner",
"compute:get_all_tenants" => "rule:admin_or_owner",
"compute:get_diagnostics" => "rule:admin_or_owner",
"compute:start" => "rule:admin_or_owner",
"compute:stop" => "rule:admin_or_owner",
"compute:attach_volume" => "rule:admin_or_owner",
"compute:detach_volume" => "rule:admin_or_owner",
"compute:update" => "rule:admin_or_owner",
"compute:reboot" => "rule:admin_or_owner",
"compute:delete" => "rule:admin_or_owner",
"compute:unlock_override" => "rule:admin_api",
"compute:get_instance_metadata" => "rule:admin_or_owner",
"compute:update_instance_metadata" => "rule:admin_or_owner",
"compute:delete_instance_metadata" => "rule:admin_or_owner",
"compute:get_rdp_console" => "rule:admin_or_owner",
"compute:get_vnc_console" => "rule:admin_or_owner",
"compute:get_console_output" => "rule:admin_or_owner",
"compute:get_serial_console" => "rule:admin_or_owner",
"compute:snapshot" => "role:admin",
"compute:shelve" => "role:admin",
"compute:shelve_offload" => "role:admin",
"compute:unshelve" => "role:admin",
"compute:resize" => "role:admin",
"compute:confirm_resize" => "role:admin",
"compute:revert_resize" => "role:admin",
"compute:rebuild" => "role:admin",
"compute:security_groups:add_to_instance" => "rule:admin_or_owner",
"compute:security_groups:remove_from_instance" => "rule:admin_or_owner",
"compute:volume_snapshot_create" => "role:admin",
"compute:volume_snapshot_delete" => "role:admin",
"admin_api" => "is_admin:True",
"compute_extension:accounts" => "rule:admin_api",
"compute_extension:admin_actions" => "rule:admin_api",
"compute_extension:admin_actions:pause" => "role:admin",
"compute_extension:admin_actions:unpause" => "role:admin",
"compute_extension:admin_actions:suspend" => "role:admin",
"compute_extension:admin_actions:resume" => "role:admin",
"compute_extension:admin_actions:lock" => "role:admin",
"compute_extension:admin_actions:unlock" => "role:admin",
"compute_extension:admin_actions:resetNetwork" => "rule:admin_api",
"compute_extension:admin_actions:injectNetworkInfo" => "rule:admin_api",
"compute_extension:admin_actions:createBackup" => "role:admin",
"compute_extension:admin_actions:migrateLive" => "rule:admin_api",
"compute_extension:admin_actions:resetState" => "rule:admin_api",
"compute_extension:admin_actions:migrate" => "rule:admin_api",
"compute_extension:aggregates" => "rule:admin_api",
"compute_extension:agents" => "rule:admin_api",
"compute_extension:attach_interfaces" => "role:admin",
"compute_extension:baremetal_nodes" => "rule:admin_api",
"compute_extension:cells" => "rule:admin_api",
"compute_extension:cells:create" => "rule:admin_api",
"compute_extension:cells:delete" => "rule:admin_api",
"compute_extension:cells:update" => "rule:admin_api",
"compute_extension:cells:sync_instances" => "rule:admin_api",
"compute_extension:certificates" => "role:admin",
"compute_extension:cloudpipe" => "rule:admin_api",
"compute_extension:cloudpipe_update" => "rule:admin_api",
"compute_extension:console_output" => "rule:admin_or_owner",
"compute_extension:consoles" => "rule:admin_or_owner",
"compute_extension:config_drive" => "rule:admin_or_owner",
"compute_extension:createserverext" => "role:admin",
"compute_extension:deferred_delete" => "role:admin",
"compute_extension:disk_config" => "rule:admin_or_owner",
"compute_extension:evacuate" => "rule:admin_api",
"compute_extension:extended_server_attributes" => "rule:admin_api",
"compute_extension:extended_status" => "rule:admin_or_owner",
"compute_extension:extended_availability_zone" => "",
"compute_extension:extended_ips" => "rule:admin_or_owner",
"compute_extension:extended_ips_mac" => "rule:admin_or_owner",
"compute_extension:extended_vif_net" => "role:admin",
"compute_extension:extended_volumes" => "rule:admin_or_owner",
"compute_extension:fixed_ips" => "rule:admin_api",
"compute_extension:flavor_access" => "rule:admin_or_owner",
"compute_extension:flavor_access:addTenantAccess" => "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess" => "rule:admin_api",
"compute_extension:flavor_disabled" => "rule:admin_or_owner",
"compute_extension:flavor_rxtx" => "rule:admin_or_owner",
"compute_extension:flavor_swap" => "rule:admin_or_owner",
"compute_extension:flavorextradata" => "rule:admin_or_owner",
"compute_extension:flavorextraspecs:index" => "role:admin",
"compute_extension:flavorextraspecs:show" => "role:admin",
"compute_extension:flavorextraspecs:create" => "rule:admin_api",
"compute_extension:flavorextraspecs:update" => "rule:admin_api",
"compute_extension:flavorextraspecs:delete" => "rule:admin_api",
"compute_extension:flavormanage" => "rule:admin_api",
"compute_extension:floating_ip_dns" => "rule:admin_or_owner",
"compute_extension:floating_ip_pools" => "rule:admin_or_owner",
"compute_extension:floating_ips" => "rule:admin_or_owner",
"compute_extension:floating_ips_bulk" => "rule:admin_api",
"compute_extension:fping" => "role:admin",
"compute_extension:fping:all_tenants" => "rule:admin_api",
"compute_extension:hide_server_addresses" => "is_admin:False",
"compute_extension:hosts" => "rule:admin_api",
"compute_extension:hypervisors" => "rule:admin_api",
"compute_extension:image_size" => "role:admin",
"compute_extension:instance_actions" => "rule:admin_or_owner",
"compute_extension:instance_actions:events" => "rule:admin_api",
"compute_extension:instance_usage_audit_log" => "rule:admin_api",
"compute_extension:keypairs" => "rule:admin_or_owner",
"compute_extension:keypairs:index" => "rule:admin_or_owner",
"compute_extension:keypairs:show" => "rule:admin_or_owner",
"compute_extension:keypairs:create" => "",
"compute_extension:keypairs:delete" => "rule:admin_or_owner",
"compute_extension:multinic" => "role:admin",
"compute_extension:networks" => "rule:admin_api",
"compute_extension:networks:view" => "role:admin",
"compute_extension:networks_associate" => "rule:admin_api",
"compute_extension:quotas:show" => "rule:admin_or_owner",
"compute_extension:quotas:update" => "rule:admin_api",
"compute_extension:quotas:delete" => "rule:admin_api",
"compute_extension:quota_classes" => "role:admin",
"compute_extension:rescue" => "role:admin",
"compute_extension:security_group_default_rules" => "rule:admin_api",
"compute_extension:security_groups" => "rule:admin_or_owner",
"compute_extension:server_diagnostics" => "rule:admin_or_owner",
"compute_extension:server_groups" => "rule:admin_or_owner",
"compute_extension:server_password" => "role:admin",
"compute_extension:server_usage" => "rule:admin_or_owner",
"compute_extension:services" => "rule:admin_api",
"compute_extension:shelve" => "role:admin",
"compute_extension:shelveOffload" => "rule:admin_api",
"compute_extension:simple_tenant_usage:show" => "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list" => "rule:admin_api",
"compute_extension:unshelve" => "role:admin",
"compute_extension:users" => "rule:admin_api",
"compute_extension:virtual_interfaces" => "role:admin",
"compute_extension:virtual_storage_arrays" => "role:admin",
"compute_extension:volumes" => "rule:admin_or_owner",
"compute_extension:volume_attachments:index" => "rule:admin_or_owner",
"compute_extension:volume_attachments:show" => "rule:admin_or_owner",
"compute_extension:volume_attachments:create" => "rule:admin_or_owner",
"compute_extension:volume_attachments:update" => "rule:admin_or_owner",
"compute_extension:volume_attachments:delete" => "rule:admin_or_owner",
"compute_extension:volumetypes" => "role:admin",
"compute_extension:availability_zone:list" => "rule:admin_or_owner",
"compute_extension:availability_zone:detail" => "rule:admin_api",
"compute_extension:used_limits_for_admin" => "rule:admin_api",
"compute_extension:migrations:index" => "rule:admin_api",
"compute_extension:os-assisted-volume-snapshots:create" => "rule:admin_api",
"compute_extension:os-assisted-volume-snapshots:delete" => "rule:admin_api",
"compute_extension:console_auth_tokens" => "rule:admin_api",
"compute_extension:os-server-external-events:create" => "rule:admin_api",
"network:get_all" => "rule:admin_or_owner",
"network:get" => "rule:admin_or_owner",
"network:create" => "rule:admin_or_owner",
"network:delete" => "rule:admin_or_owner",
"network:associate" => "rule:admin_or_owner",
"network:disassociate" => "rule:admin_or_owner",
"network:get_vifs_by_instance" => "rule:admin_or_owner",
"network:allocate_for_instance" => "rule:admin_or_owner",
"network:deallocate_for_instance" => "rule:admin_or_owner",
"network:validate_networks" => "rule:admin_or_owner",
"network:get_instance_uuids_by_ip_filter" => "rule:admin_or_owner",
"network:get_instance_id_by_floating_address" => "rule:admin_or_owner",
"network:setup_networks_on_host" => "rule:admin_or_owner",
"network:get_backdoor_port" => "rule:admin_or_owner",
"network:get_floating_ip" => "rule:admin_or_owner",
"network:get_floating_ip_pools" => "rule:admin_or_owner",
"network:get_floating_ip_by_address" => "rule:admin_or_owner",
"network:get_floating_ips_by_project" => "rule:admin_or_owner",
"network:get_floating_ips_by_fixed_address" => "rule:admin_or_owner",
"network:allocate_floating_ip" => "rule:admin_or_owner",
"network:associate_floating_ip" => "rule:admin_or_owner",
"network:disassociate_floating_ip" => "rule:admin_or_owner",
"network:release_floating_ip" => "rule:admin_or_owner",
"network:migrate_instance_start" => "rule:admin_or_owner",
"network:migrate_instance_finish" => "rule:admin_or_owner",
"network:get_fixed_ip" => "role:admin",
"network:get_fixed_ip_by_address" => "rule:admin_or_owner",
"network:add_fixed_ip_to_instance" => "role:admin",
"network:remove_fixed_ip_from_instance" => "role:admin",
"network:add_network_to_project" => "role:admin",
"network:get_instance_nw_info" => "rule:admin_or_owner",
"network:get_dns_domains" => "role:admin",
"network:add_dns_entry" => "role:admin",
"network:modify_dns_entry" => "role:admin",
"network:delete_dns_entry" => "role:admin",
"network:get_dns_entries_by_address" => "role:admin",
"network:get_dns_entries_by_name" => "role:admin",
"network:create_private_dns_domain" => "role:admin",
"network:create_public_dns_domain" => "role:admin",
"network:delete_dns_domain" => "role:admin",
"network:attach_external_network" => "rule:admin_api",
"os_compute_api:servers:start" => "role:admin",
"os_compute_api:servers:stop" => "role:admin",
"os_compute_api:os-access-ips:discoverable" => "role:admin",
"os_compute_api:os-access-ips" => "role:admin",
"os_compute_api:os-admin-actions" => "rule:admin_api",
"os_compute_api:os-admin-actions:discoverable" => "role:admin",
"os_compute_api:os-admin-actions:reset_network" => "rule:admin_api",
"os_compute_api:os-admin-actions:inject_network_info" => "rule:admin_api",
"os_compute_api:os-admin-actions:reset_state" => "rule:admin_api",
"os_compute_api:os-admin-password" => "role:admin",
"os_compute_api:os-admin-password:discoverable" => "role:admin",
"os_compute_api:os-aggregates:discoverable" => "role:admin",
"os_compute_api:os-aggregates:index" => "rule:admin_api",
"os_compute_api:os-aggregates:create" => "rule:admin_api",
"os_compute_api:os-aggregates:show" => "rule:admin_api",
"os_compute_api:os-aggregates:update" => "rule:admin_api",
"os_compute_api:os-aggregates:delete" => "rule:admin_api",
"os_compute_api:os-aggregates:add_host" => "rule:admin_api",
"os_compute_api:os-aggregates:remove_host" => "rule:admin_api",
"os_compute_api:os-aggregates:set_metadata" => "rule:admin_api",
"os_compute_api:os-agents" => "rule:admin_api",
"os_compute_api:os-agents:discoverable" => "role:admin",
"os_compute_api:os-attach-interfaces" => "role:admin",
"os_compute_api:os-attach-interfaces:discoverable" => "role:admin",
"os_compute_api:os-baremetal-nodes" => "rule:admin_api",
"os_compute_api:os-baremetal-nodes:discoverable" => "role:admin",
"os_compute_api:os-block-device-mapping-v1:discoverable" => "role:admin",
"os_compute_api:os-cells" => "rule:admin_api",
"os_compute_api:os-cells:create" => "rule:admin_api",
"os_compute_api:os-cells:delete" => "rule:admin_api",
"os_compute_api:os-cells:update" => "rule:admin_api",
"os_compute_api:os-cells:sync_instances" => "rule:admin_api",
"os_compute_api:os-cells:discoverable" => "role:admin",
"os_compute_api:os-certificates:create" => "role:admin",
"os_compute_api:os-certificates:show" => "role:admin",
"os_compute_api:os-certificates:discoverable" => "role:admin",
"os_compute_api:os-cloudpipe" => "rule:admin_api",
"os_compute_api:os-cloudpipe:discoverable" => "role:admin",
"os_compute_api:os-consoles:discoverable" => "role:admin",
"os_compute_api:os-consoles:create" => "role:admin",
"os_compute_api:os-consoles:delete" => "role:admin",
"os_compute_api:os-consoles:index" => "role:admin",
"os_compute_api:os-consoles:show" => "role:admin",
"os_compute_api:os-console-output:discoverable" => "role:admin",
"os_compute_api:os-console-output" => "role:admin",
"os_compute_api:os-remote-consoles" => "role:admin",
"os_compute_api:os-remote-consoles:discoverable" => "role:admin",
"os_compute_api:os-create-backup:discoverable" => "role:admin",
"os_compute_api:os-create-backup" => "role:admin",
"os_compute_api:os-deferred-delete" => "role:admin",
"os_compute_api:os-deferred-delete:discoverable" => "role:admin",
"os_compute_api:os-disk-config" => "role:admin",
"os_compute_api:os-disk-config:discoverable" => "role:admin",
"os_compute_api:os-evacuate" => "rule:admin_api",
"os_compute_api:os-evacuate:discoverable" => "role:admin",
"os_compute_api:os-extended-server-attributes" => "rule:admin_api",
"os_compute_api:os-extended-server-attributes:discoverable" => "role:admin",
"os_compute_api:os-extended-status" => "role:admin",
"os_compute_api:os-extended-status:discoverable" => "role:admin",
"os_compute_api:os-extended-availability-zone" => "role:admin",
"os_compute_api:os-extended-availability-zone:discoverable" => "role:admin",
"os_compute_api:extension_info:discoverable" => "role:admin",
"os_compute_api:os-extended-volumes" => "role:admin",
"os_compute_api:os-extended-volumes:discoverable" => "role:admin",
"os_compute_api:os-fixed-ips" => "rule:admin_api",
"os_compute_api:os-fixed-ips:discoverable" => "role:admin",
"os_compute_api:os-flavor-access" => "role:admin",
"os_compute_api:os-flavor-access:discoverable" => "role:admin",
"os_compute_api:os-flavor-access:remove_tenant_access" => "rule:admin_api",
"os_compute_api:os-flavor-access:add_tenant_access" => "rule:admin_api",
"os_compute_api:os-flavor-rxtx" => "role:admin",
"os_compute_api:os-flavor-rxtx:discoverable" => "role:admin",
"os_compute_api:flavors:discoverable" => "role:admin",
"os_compute_api:os-flavor-extra-specs:discoverable" => "role:admin",
"os_compute_api:os-flavor-extra-specs:index" => "role:admin",
"os_compute_api:os-flavor-extra-specs:show" => "role:admin",
"os_compute_api:os-flavor-extra-specs:create" => "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:update" => "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:delete" => "rule:admin_api",
"os_compute_api:os-flavor-manage:discoverable" => "role:admin",
"os_compute_api:os-flavor-manage" => "rule:admin_api",
"os_compute_api:os-floating-ip-dns" => "role:admin",
"os_compute_api:os-floating-ip-dns:discoverable" => "role:admin",
"os_compute_api:os-floating-ip-pools" => "role:admin",
"os_compute_api:os-floating-ip-pools:discoverable" => "role:admin",
"os_compute_api:os-floating-ips" => "role:admin",
"os_compute_api:os-floating-ips:discoverable" => "role:admin",
"os_compute_api:os-floating-ips-bulk" => "rule:admin_api",
"os_compute_api:os-floating-ips-bulk:discoverable" => "role:admin",
"os_compute_api:os-fping" => "role:admin",
"os_compute_api:os-fping:discoverable" => "role:admin",
"os_compute_api:os-fping:all_tenants" => "rule:admin_api",
"os_compute_api:os-hide-server-addresses" => "is_admin:False",
"os_compute_api:os-hide-server-addresses:discoverable" => "role:admin",
"os_compute_api:os-hosts" => "rule:admin_api",
"os_compute_api:os-hosts:discoverable" => "role:admin",
"os_compute_api:os-hypervisors" => "rule:admin_api",
"os_compute_api:os-hypervisors:discoverable" => "role:admin",
"os_compute_api:images:discoverable" => "role:admin",
"os_compute_api:image-size" => "role:admin",
"os_compute_api:image-size:discoverable" => "role:admin",
"os_compute_api:os-instance-actions" => "role:admin",
"os_compute_api:os-instance-actions:discoverable" => "role:admin",
"os_compute_api:os-instance-actions:events" => "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log" => "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log:discoverable" => "role:admin",
"os_compute_api:ips:discoverable" => "role:admin",
"os_compute_api:ips:index" => "role:admin",
"os_compute_api:ips:show" => "role:admin",
"os_compute_api:os-keypairs:discoverable" => "role:admin",
"os_compute_api:os-keypairs" => "role:admin",
"os_compute_api:os-keypairs:index" => "role:admin",
"os_compute_api:os-keypairs:show" => "role:admin",
"os_compute_api:os-keypairs:create" => "role:admin",
"os_compute_api:os-keypairs:delete" => "role:admin",
"os_compute_api:limits:discoverable" => "role:admin",
"os_compute_api:os-lock-server:discoverable" => "role:admin",
"os_compute_api:os-lock-server:lock" => "role:admin",
"os_compute_api:os-lock-server:unlock" => "role:admin",
"os_compute_api:os-migrate-server:discoverable" => "role:admin",
"os_compute_api:os-migrate-server:migrate" => "rule:admin_api",
"os_compute_api:os-migrate-server:migrate_live" => "rule:admin_api",
"os_compute_api:os-multinic" => "role:admin",
"os_compute_api:os-multinic:discoverable" => "role:admin",
"os_compute_api:os-networks" => "rule:admin_api",
"os_compute_api:os-networks:view" => "role:admin",
"os_compute_api:os-networks:discoverable" => "role:admin",
"os_compute_api:os-networks-associate" => "rule:admin_api",
"os_compute_api:os-networks-associate:discoverable" => "role:admin",
"os_compute_api:os-pause-server:discoverable" => "role:admin",
"os_compute_api:os-pause-server:pause" => "role:admin",
"os_compute_api:os-pause-server:unpause" => "role:admin",
"os_compute_api:os-pci:pci_servers" => "role:admin",
"os_compute_api:os-pci:discoverable" => "role:admin",
"os_compute_api:os-pci:index" => "rule:admin_api",
"os_compute_api:os-pci:detail" => "rule:admin_api",
"os_compute_api:os-pci:show" => "rule:admin_api",
"os_compute_api:os-personality:discoverable" => "role:admin",
"os_compute_api:os-preserve-ephemeral-rebuild:discoverable" => "role:admin",
"os_compute_api:os-quota-sets:discoverable" => "role:admin",
"os_compute_api:os-quota-sets:show" => "role:admin",
"os_compute_api:os-quota-sets:update" => "rule:admin_api",
"os_compute_api:os-quota-sets:delete" => "rule:admin_api",
"os_compute_api:os-quota-sets:detail" => "rule:admin_api",
"os_compute_api:os-quota-class-sets" => "role:admin",
"os_compute_api:os-quota-class-sets:discoverable" => "role:admin",
"os_compute_api:os-rescue" => "role:admin",
"os_compute_api:os-rescue:discoverable" => "role:admin",
"os_compute_api:os-scheduler-hints:discoverable" => "role:admin",
"os_compute_api:os-security-group-default-rules:discoverable" => "role:admin",
"os_compute_api:os-security-group-default-rules" => "rule:admin_api",
"os_compute_api:os-security-groups" => "role:admin",
"os_compute_api:os-security-groups:discoverable" => "role:admin",
"os_compute_api:os-server-diagnostics" => "rule:admin_api",
"os_compute_api:os-server-diagnostics:discoverable" => "role:admin",
"os_compute_api:os-server-password" => "role:admin",
"os_compute_api:os-server-password:discoverable" => "role:admin",
"os_compute_api:os-server-usage" => "role:admin",
"os_compute_api:os-server-usage:discoverable" => "role:admin",
"os_compute_api:os-server-groups" => "role:admin",
"os_compute_api:os-server-groups:discoverable" => "role:admin",
"os_compute_api:os-services" => "rule:admin_api",
"os_compute_api:os-services:discoverable" => "role:admin",
"os_compute_api:server-metadata:discoverable" => "role:admin",
"os_compute_api:server-metadata:index" => "role:admin",
"os_compute_api:server-metadata:show" => "role:admin",
"os_compute_api:server-metadata:delete" => "role:admin",
"os_compute_api:server-metadata:create" => "role:admin",
"os_compute_api:server-metadata:update" => "role:admin",
"os_compute_api:server-metadata:update_all" => "role:admin",
"os_compute_api:servers:discoverable" => "role:admin",
"os_compute_api:os-shelve:shelve" => "role:admin",
"os_compute_api:os-shelve:shelve:discoverable" => "role:admin",
"os_compute_api:os-shelve:shelve_offload" => "rule:admin_api",
"os_compute_api:os-simple-tenant-usage:discoverable" => "role:admin",
"os_compute_api:os-simple-tenant-usage:show" => "role:admin",
"os_compute_api:os-simple-tenant-usage:list" => "rule:admin_api",
"os_compute_api:os-suspend-server:discoverable" => "role:admin",
"os_compute_api:os-suspend-server:suspend" => "role:admin",
"os_compute_api:os-suspend-server:resume" => "role:admin",
"os_compute_api:os-tenant-networks" => "role:admin",
"os_compute_api:os-tenant-networks:discoverable" => "role:admin",
"os_compute_api:os-shelve:unshelve" => "role:admin",
"os_compute_api:os-user-data:discoverable" => "role:admin",
"os_compute_api:os-virtual-interfaces" => "role:admin",
"os_compute_api:os-virtual-interfaces:discoverable" => "role:admin",
"os_compute_api:os-volumes" => "role:admin",
"os_compute_api:os-volumes:discoverable" => "role:admin",
"os_compute_api:os-volumes-attachments:index" => "role:admin",
"os_compute_api:os-volumes-attachments:show" => "role:admin",
"os_compute_api:os-volumes-attachments:create" => "role:admin",
"os_compute_api:os-volumes-attachments:update" => "role:admin",
"os_compute_api:os-volumes-attachments:delete" => "role:admin",
"os_compute_api:os-volumes-attachments:discoverable" => "role:admin",
"os_compute_api:os-availability-zone:list" => "rule:admin_or_owner",
"os_compute_api:os-availability-zone:discoverable" => "role:admin",
"os_compute_api:os-availability-zone:detail" => "rule:admin_api",
"os_compute_api:os-used-limits" => "rule:admin_api",
"os_compute_api:os-used-limits:discoverable" => "role:admin",
"os_compute_api:os-migrations:index" => "rule:admin_api",
"os_compute_api:os-migrations:discoverable" => "role:admin",
"os_compute_api:os-assisted-volume-snapshots:create" => "rule:admin_api",
"os_compute_api:os-assisted-volume-snapshots:delete" => "rule:admin_api",
"os_compute_api:os-assisted-volume-snapshots:discoverable" => "role:admin",
"os_compute_api:os-console-auth-tokens" => "rule:admin_api",
"os_compute_api:os-server-external-events:create" => "rule:admin_api"
}
###########################################
#
# Cinder Settings
#
###########################################
# Verbose logging (level INFO)
default['bcpc']['cinder']['verbose'] = false
default['bcpc']['cinder']['workers'] = 5
default['bcpc']['cinder']['allow_az_fallback'] = true
default['bcpc']['cinder']['quota'] = {
"volumes" => -1,
"quota_snapshots" => 10,
"consistencygroups" => 10,
"gigabytes" => 1000
}
###########################################
#
# Cinder policy Settings
#
###########################################
default['bcpc']['cinder']['policy'] = {
"context_is_admin" => "role:admin",
"admin_or_owner" => "is_admin:True or project_id:%(project_id)s",
"default" => "rule:admin_or_owner",
"admin_api" => "is_admin:True",
"volume:create" => "",
"volume:delete" => "",
"volume:get" => "",
"volume:get_all" => "",
"volume:get_volume_metadata" => "",
"volume:get_volume_admin_metadata" => "rule:admin_api",
"volume:delete_volume_admin_metadata" => "rule:admin_api",
"volume:update_volume_admin_metadata" => "rule:admin_api",
"volume:get_snapshot" => "",
"volume:get_all_snapshots" => "",
"volume:extend" => "",
"volume:update_readonly_flag" => "",
"volume:retype" => "",
"volume_extension:types_manage" => "rule:admin_api",
"volume_extension:types_extra_specs" => "rule:admin_api",
"volume_extension:volume_type_access" => "",
"volume_extension:volume_type_access:addProjectAccess" => "rule:admin_api",
"volume_extension:volume_type_access:removeProjectAccess" => "rule:admin_api",
"volume_extension:volume_type_encryption" => "rule:admin_api",
"volume_extension:volume_encryption_metadata" => "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes" => "",
"volume_extension:volume_image_metadata" => "",
"volume_extension:quotas:show" => "",
"volume_extension:quotas:update" => "rule:admin_api",
"volume_extension:quota_classes" => "",
"volume_extension:volume_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete" => "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach" => "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete" => "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume" => "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion" => "rule:admin_api",
"volume_extension:volume_host_attribute" => "rule:admin_api",
"volume_extension:volume_tenant_attribute" => "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute" => "rule:admin_api",
"volume_extension:hosts" => "rule:admin_api",
"volume_extension:services" => "rule:admin_api",
"volume_extension:volume_manage" => "rule:admin_api",
"volume_extension:volume_unmanage" => "rule:admin_api",
"volume:services" => "rule:admin_api",
"volume:create_transfer" => "",
"volume:accept_transfer" => "",
"volume:delete_transfer" => "",
"volume:get_all_transfers" => "",
"volume_extension:replication:promote" => "rule:admin_api",
"volume_extension:replication:reenable" => "rule:admin_api",
"backup:create" => "role:admin",
"backup:delete" => "role:admin",
"backup:get" => "",
"backup:get_all" => "",
"backup:restore" => "role:admin",
"backup:backup-import" => "rule:admin_api",
"backup:backup-export" => "rule:admin_api",
"snapshot_extension:snapshot_actions:update_snapshot_status" => "",
"consistencygroup:create" => "group:nobody",
"consistencygroup:delete" => "group:nobody",
"consistencygroup:update" => "group:nobody",
"consistencygroup:get" => "group:nobody",
"consistencygroup:get_all" => "group:nobody",
"consistencygroup:create_cgsnapshot" => "group:nobody",
"consistencygroup:delete_cgsnapshot" => "group:nobody",
"consistencygroup:get_cgsnapshot" => "group:nobody",
"consistencygroup:get_all_cgsnapshots" => "group:nobody",
"scheduler_extension:scheduler_stats:get_pools" => "rule:admin_api"
}
###########################################
#
# Glance policy Settings
#
###########################################
# Verbose logging (level INFO)
default['bcpc']['glance']['verbose'] = false
default['bcpc']['glance']['workers'] = 5
default['bcpc']['glance']['policy'] = {
"context_is_admin" => "role:admin",
"default" => "",
"add_image" => "role:admin",
"delete_image" => "",
"get_image" => "",
"get_images" => "",
"modify_image" => "",
"publicize_image" => "role:admin",
"copy_from" => "",
"download_image" => "",
"upload_image" => "role:admin",
"delete_image_location" => "",
"get_image_location" => "",
"set_image_location" => "",
"add_member" => "",
"delete_member" => "",
"get_member" => "",
"get_members" => "",
"modify_member" => "",
"manage_image_cache" => "role:admin",
"get_task" => "",
"get_tasks" => "",
"add_task" => "",
"modify_task" => "",
"deactivate" => "",
"reactivate" => "",
"get_metadef_namespace" => "",
"get_metadef_namespaces" => "",
"modify_metadef_namespace" => "",
"add_metadef_namespace" => "",
"get_metadef_object" => "",
"get_metadef_objects" => "",
"modify_metadef_object" => "",
"add_metadef_object" => "",
"list_metadef_resource_types" => "",
"get_metadef_resource_type" => "",
"add_metadef_resource_type_association" => "",
"get_metadef_property" => "",
"get_metadef_properties" => "",
"modify_metadef_property" => "",
"add_metadef_property" => "",
"get_metadef_tag" => "",
"get_metadef_tags" => "",
"modify_metadef_tag" => "",
"add_metadef_tag" => "",
"add_metadef_tags" => ""
}
###########################################
#
# Heat policy Settings
#
###########################################
default['bcpc']['heat']['workers'] = 5
default['bcpc']['heat']['policy'] = {
"deny_stack_user" => "not role:heat_stack_user",
"deny_everybody" => "!",
"cloudformation:ListStacks" => "rule:deny_stack_user",
"cloudformation:CreateStack" => "rule:deny_stack_user",
"cloudformation:DescribeStacks" => "rule:deny_stack_user",
"cloudformation:DeleteStack" => "rule:deny_stack_user",
"cloudformation:UpdateStack" => "rule:deny_stack_user",
"cloudformation:CancelUpdateStack" => "rule:deny_stack_user",
"cloudformation:DescribeStackEvents" => "rule:deny_stack_user",
"cloudformation:ValidateTemplate" => "rule:deny_stack_user",
"cloudformation:GetTemplate" => "rule:deny_stack_user",
"cloudformation:EstimateTemplateCost" => "rule:deny_stack_user",
"cloudformation:DescribeStackResource" => "",
"cloudformation:DescribeStackResources" => "rule:deny_stack_user",
"cloudformation:ListStackResources" => "rule:deny_stack_user",
"cloudwatch:DeleteAlarms" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarmHistory" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarms" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarmsForMetric" => "rule:deny_stack_user",
"cloudwatch:DisableAlarmActions" => "rule:deny_stack_user",
"cloudwatch:EnableAlarmActions" => "rule:deny_stack_user",
"cloudwatch:GetMetricStatistics" => "rule:deny_stack_user",
"cloudwatch:ListMetrics" => "rule:deny_stack_user",
"cloudwatch:PutMetricAlarm" => "rule:deny_stack_user",
"cloudwatch:PutMetricData" => "",
"cloudwatch:SetAlarmState" => "rule:deny_stack_user",
"actions:action" => "rule:deny_stack_user",
"build_info:build_info" => "rule:deny_stack_user",
"events:index" => "rule:deny_stack_user",
"events:show" => "rule:deny_stack_user",
"resource:index" => "rule:deny_stack_user",
"resource:metadata" => "",
"resource:signal" => "",
"resource:show" => "rule:deny_stack_user",
"stacks:abandon" => "rule:deny_stack_user",
"stacks:create" => "rule:deny_stack_user",
"stacks:delete" => "rule:deny_stack_user",
"stacks:detail" => "rule:deny_stack_user",
"stacks:generate_template" => "rule:deny_stack_user",
"stacks:global_index" => "rule:deny_everybody",
"stacks:index" => "rule:deny_stack_user",
"stacks:list_resource_types" => "rule:deny_stack_user",
"stacks:lookup" => "",
"stacks:preview" => "rule:deny_stack_user",
"stacks:resource_schema" => "rule:deny_stack_user",
"stacks:show" => "rule:deny_stack_user",
"stacks:template" => "rule:deny_stack_user",
"stacks:update" => "rule:deny_stack_user",
"stacks:update_patch" => "rule:deny_stack_user",
"stacks:validate_template" => "rule:deny_stack_user",
"stacks:snapshot" => "rule:deny_stack_user",
"stacks:show_snapshot" => "rule:deny_stack_user",
"stacks:delete_snapshot" => "rule:deny_stack_user",
"stacks:list_snapshots" => "rule:deny_stack_user",
"stacks:restore_snapshot" => "rule:deny_stack_user",
"software_configs:create" => "rule:deny_stack_user",
"software_configs:show" => "rule:deny_stack_user",
"software_configs:delete" => "rule:deny_stack_user",
"software_deployments:index" => "rule:deny_stack_user",
"software_deployments:create" => "rule:deny_stack_user",
"software_deployments:show" => "rule:deny_stack_user",
"software_deployments:update" => "rule:deny_stack_user",
"software_deployments:delete" => "rule:deny_stack_user",
"software_deployments:metadata" => "",
"service:index" => "rule:context_is_admin"
}
###########################################
#
# Routemon settings
#
###########################################
#
# numfixes is how many times to try and fix default routes in the mgmt
# and storage networks when they disappear. If numfixes starts off at
# 0, or after 'numfixes' attempts have been made, then routemon
# subsequently only monitors and reports
#
default['bcpc']['routemon']['numfixes'] = 0
###########################################
#
# MySQL settings
#
###########################################
#
# If set to 0, max_connections for MySQL on heads will default to an
# auto-calculated value.
default['bcpc']['mysql-head']['max_connections'] = 0
###########################################
#
# BCPC system (sysctl) settings
#
###########################################
#
# Use this to *add* more reserved ports; i.e. modify value of
# net.ipv4.ip_local_reserved_ports
default['bcpc']['system']['additional_reserved_ports'] = []
# Any other sysctl parameters (register under parameters)
default['bcpc']['system']['parameters']['kernel.pid_max'] = 4194303
###########################################
#
# CPU governor settings
#
###########################################
#
# Available options: conservative, ondemand, userspace, powersave, performance
# Review documentation at https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
default['bcpc']['cpupower']['governor'] = "ondemand"
default['bcpc']['cpupower']['ondemand_ignore_nice_load'] = nil
default['bcpc']['cpupower']['ondemand_io_is_busy'] = nil
default['bcpc']['cpupower']['ondemand_powersave_bias'] = nil
default['bcpc']['cpupower']['ondemand_sampling_down_factor'] = nil
default['bcpc']['cpupower']['ondemand_sampling_rate'] = nil
default['bcpc']['cpupower']['ondemand_up_threshold'] = nil
###########################################
#
# General monitoring settings
#
###########################################
#
# Besides being the VIP that monitoring agents/clients will communicate with,
# monitoring services (carbon/elasticsearch/zabbix-server) will bind to it if
# BCPC-Monitoring role is assigned in-cluster.
default['bcpc']['monitoring']['vip'] = "10.17.1.16"
# List of monitoring clients external to cluster that we are monitoring
default['bcpc']['monitoring']['external_clients'] = []
# Monitoring database settings
default['bcpc']['monitoring']['mysql']['innodb_buffer_pool_size'] = nil
# Pagerduty integration
default['bcpc']['monitoring']['pagerduty']['enabled'] = false
# Pagerduty service key
default['bcpc']['monitoring']['pagerduty']['key'] = nil
###########################################
#
# Graphite settings
#
###########################################
#
# Graphite Server FQDN
default['bcpc']['graphite']['fqdn'] = "graphite.#{node['bcpc']['cluster_domain']}"
#
# Default retention rates
# http://graphite.readthedocs.org/en/latest/config-carbon.html#storage-schemas-conf
default['bcpc']['graphite']['retention'] = '60s:1d'
#
# Maximum number of whisper files to create per minute. This is set low to avoid
# I/O storm when new nodes are enrolled into cluster.
# Set to 'inf' (infinite) to remove limit.
default['bcpc']['graphite']['max_creates_per_min'] = '60'
###########################################
#
# Diamond settings
#
###########################################
#
# List of queue names separated by whitespace to report on. If nil, report all.
default['bcpc']['diamond']['collectors']['rabbitmq']['queues'] = nil
# Regular expression or list of queues to not report on.
# If not nil, this overrides "queues".
default['bcpc']['diamond']['collectors']['rabbitmq']['queues_ignored'] = '.*'
# List of vhosts to report on. If nil, report none.
default['bcpc']['diamond']['collectors']['rabbitmq']['vhosts'] = nil
# Ceph Collector parameters
default['bcpc']['diamond']['collectors']['CephCollector']['metrics_whitelist'] = "ceph.mon.#{node['hostname']}.cluster.*"
# Openstack Collector parameters
default['bcpc']['diamond']['collectors']['cloud'] = {
"interval" => "900",
"path" => "openstack",
"hostname" => "#{node['bcpc']['region_name']}",
"db_host" => "#{node['bcpc']['management']['vip']}",
}
###########################################
#
# defaults for the bcpc.bootstrap settings
#
###########################################
#
# A value of nil means to let the Ubuntu installer work it out - it
# will try to find the nearest one. However the selected mirror is
# often slow.
default['bcpc']['bootstrap']['mirror'] = nil
#
# if you do specify a mirror, you can adjust the file path that comes
# after the hostname in the URL here
default['bcpc']['bootstrap']['mirror_path'] = "/ubuntu"
#
# worked example for the columbia mirror mentioned above which has a
# non-standard path
#default['bcpc']['bootstrap']['mirror'] = "mirror.cc.columbia.edu"
#default['bcpc']['bootstrap']['mirror_path'] = "/pub/linux/ubuntu/archive"
###########################################
#
# Rally settings
#
###########################################
#
# Package versions
# None needed at this time
default['bcpc']['rally']['user'] = 'ubuntu'
###########################################
#
# Openstack Flavors
#
###########################################
default['bcpc']['flavors'] = {
"m1.tiny" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.small" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.medium" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.large" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.xlarge" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"e1.tiny" => {
"vcpus" => 1,
"memory_mb" => 512,
"disk_gb" => 1,
"ephemeral_gb" => 5,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.small" => {
"vcpus" => 1,
"memory_mb" => 2048,
"disk_gb" => 20,
"ephemeral_gb" => 20,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.medium" => {
"vcpus" => 2,
"memory_mb" => 4096,
"disk_gb" => 40,
"ephemeral_gb" => 40,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.large" => {
"vcpus" => 4,
"memory_mb" => 8192,
"disk_gb" => 40,
"ephemeral_gb" => 80,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.xlarge" => {
"vcpus" => 8,
"memory_mb" => 16384,
"disk_gb" => 40,
"ephemeral_gb" => 160,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.2xlarge" => {
"vcpus" => 8,
"memory_mb" => 32768,
"disk_gb" => 40,
"ephemeral_gb" => 320,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
}
}
###########################################
#
# Openstack Host Aggregates
#
###########################################
default['bcpc']['host_aggregates'] = {
"general_compute" => {
"general_compute" => "yes"
},
"ephemeral_compute" => {
"general_compute" => "no",
"ephemeral_compute" => "yes"
}
}
default['bcpc']['aggregate_membership'] = []
###########################################
#
# RadosGW Quotas
#
###########################################
default['bcpc']['rgw_quota'] = {
'user' => {
'default' => {
'max_size' => 10737418240
}
}
}
###########################################
#
# Openstack Project Quotas
#
###########################################
default['bcpc']['quota'] = {
'nova' => {
'AdminTenant' => {
'cores' => -1
}
}
}
###########################################
#
# Zabbix settings
#
###########################################
#
default['bcpc']['zabbix']['discovery']['delay'] = 600
default['bcpc']['zabbix']['discovery']['ip_ranges'] = [node['bcpc']['management']['cidr']]
default['bcpc']['zabbix']['fqdn'] = "zabbix.#{node['bcpc']['cluster_domain']}"
default['bcpc']['zabbix']['storage_retention'] = 7
default['bcpc']['zabbix']['php_settings'] = {
'max_execution_time' => 300,
'memory_limit' => '256M',
'post_max_size' => '16M',
'upload_max_filesize' => '2M',
'max_input_time' => 300,
'date.timezone' => 'America/New_York'
}
# Zabbix severities to notify about.
# https://www.zabbix.com/documentation/2.4/manual/api/reference/usermedia/object
default['bcpc']['zabbix']['severity'] = 63
###########################################
#
# Kibana settings
#
###########################################
#
# Kibana Server FQDN
default['bcpc']['kibana']['fqdn'] = "kibana.#{node['bcpc']['cluster_domain']}"
###########################################
#
# Elasticsearch settings
#
###########################################
#
# Heap memory size
default['bcpc']['elasticsearch']['heap_size'] = '256m'
# Additional Java options
default['bcpc']['elasticsearch']['java_opts'] = '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -verbose:gc -Xloggc:/var/log/elasticsearch/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m'
###########################################
#
# Getty settings
#
###########################################
default['bcpc']['getty']['ttys'] = %w( ttyS0 ttyS1 )
Bump version spec for rabbitmq to 3.6.0-1
###########################################
#
# General configuration for this cluster
#
###########################################
default['bcpc']['country'] = "US"
default['bcpc']['state'] = "NY"
default['bcpc']['location'] = "New York"
default['bcpc']['organization'] = "Bloomberg"
default['bcpc']['openstack_release'] = "kilo"
# Can be "updates" or "proposed"
default['bcpc']['openstack_branch'] = "proposed"
# Should be kvm (or qemu if testing in VMs that don't support VT-x)
default['bcpc']['virt_type'] = "kvm"
# Define the kernel to be installed. By default, track latest LTS kernel
default['bcpc']['preseed']['kernel'] = "linux-image-generic-lts-trusty"
# ulimits for libvirt-bin
default['bcpc']['libvirt-bin']['ulimit']['nofile'] = 4096
# Region name for this cluster
default['bcpc']['region_name'] = node.chef_environment
# Domain name for this cluster (used in many configs)
default['bcpc']['cluster_domain'] = "bcpc.example.com"
# Hypervisor domain (domain used by actual machines)
default['bcpc']['hypervisor_domain'] = "hypervisor-bcpc.example.com"
# Key if Cobalt+VMS is to be used
default['bcpc']['vms_key'] = nil
# custom SSL certificate (specify filename).
# certificate files should be stored under 'files/default' directory
default['bcpc']['ssl_certificate'] = nil
default['bcpc']['ssl_private_key'] = nil
default['bcpc']['ssl_intermediate_certificate'] = nil
# custom SSL certificate for Rados Gateway (S3)
default['bcpc']['s3_ssl_certificate'] = nil
default['bcpc']['s3_ssl_private_key'] = nil
default['bcpc']['s3_ssl_intermediate_certificate'] = nil
###########################################
#
# Package versions
#
###########################################
default['bcpc']['elasticsearch']['version'] = '1.5.1'
default['bcpc']['ceph']['version'] = '0.94.5-1trusty'
default['bcpc']['ceph']['version_number'] = '0.94.5'
default['bcpc']['erlang']['version'] = '1:17.5.3'
default['bcpc']['haproxy']['version'] = '1.5.15-1ppa1~trusty'
default['bcpc']['kibana']['version'] = '4.0.2'
default['bcpc']['rabbitmq']['version'] = '3.6.0-1'
###########################################
#
# Flags to enable/disable BCPC cluster features
#
###########################################
# This will enable elasticsearch & kibana on head nodes and fluentd on all nodes
default['bcpc']['enabled']['logging'] = true
# This will enable graphite web and carbon on head nodes and diamond on all nodes
default['bcpc']['enabled']['metrics'] = true
# This will enable zabbix server on head nodes and zabbix agent on all nodes
default['bcpc']['enabled']['monitoring'] = true
# This will enable powerdns on head nodes
default['bcpc']['enabled']['dns'] = true
# This will enable iptables firewall on all nodes
default['bcpc']['enabled']['host_firewall'] = true
# This will enable of encryption of the chef data bag
default['bcpc']['enabled']['encrypt_data_bag'] = false
# This will enable auto-upgrades on all nodes (not recommended for stability)
default['bcpc']['enabled']['apt_upgrade'] = false
# This will enable running apt-get update at the start of every Chef run
default['bcpc']['enabled']['always_update_package_lists'] = true
# This will enable the extra healthchecks for keepalived (VIP management)
default['bcpc']['enabled']['keepalived_checks'] = true
# This will enable the networking test scripts
default['bcpc']['enabled']['network_tests'] = true
# This will enable httpd disk caching for radosgw in apache
default['bcpc']['enabled']['radosgw_cache'] = false
# This will enable using TPM-based hwrngd
default['bcpc']['enabled']['tpm'] = false
# This will block VMs from talking to the management network
default['bcpc']['enabled']['secure_fixed_networks'] = true
# Toggle to enable/disable swap memory
default['bcpc']['enabled']['swap'] = true
# Toggle to enable/disable Heat (OpenStack Cloud Formation)
default['bcpc']['enabled']['heat'] = false
# If radosgw_cache is enabled, default to 20MB max file size
default['bcpc']['radosgw']['cache_max_file_size'] = 20000000
###########################################
#
# Host-specific defaults for the cluster
#
###########################################
default['bcpc']['ceph']['hdd_disks'] = ["sdb", "sdc"]
default['bcpc']['ceph']['ssd_disks'] = ["sdd", "sde"]
default['bcpc']['ceph']['enabled_pools'] = ["ssd", "hdd"]
default['bcpc']['management']['interface'] = "eth0"
default['bcpc']['storage']['interface'] = "eth1"
default['bcpc']['floating']['interface'] = "eth2"
default['bcpc']['fixed']['vlan_interface'] = node['bcpc']['floating']['interface']
###########################################
#
# Ceph settings for the cluster
#
###########################################
# Trusty is not available at this time for ceph-extras
default['bcpc']['ceph']['extras']['dist'] = "precise"
# To use apache instead of civetweb, make the following value anything but 'civetweb'
default['bcpc']['ceph']['frontend'] = "civetweb"
default['bcpc']['ceph']['chooseleaf'] = "rack"
default['bcpc']['ceph']['pgp_auto_adjust'] = false
# Need to review...
default['bcpc']['ceph']['pgs_per_node'] = 1024
default['bcpc']['ceph']['max_pgs_per_osd'] = 300
# Journal size could be 10GB or higher in some cases
default['bcpc']['ceph']['journal_size'] = 2048
# The 'portion' parameters should add up to ~100 across all pools
default['bcpc']['ceph']['default']['replicas'] = 3
default['bcpc']['ceph']['default']['type'] = 'hdd'
default['bcpc']['ceph']['rgw']['replicas'] = 3
default['bcpc']['ceph']['rgw']['portion'] = 33
default['bcpc']['ceph']['rgw']['type'] = 'hdd'
default['bcpc']['ceph']['images']['replicas'] = 3
default['bcpc']['ceph']['images']['portion'] = 33
# Set images to hdd instead of sdd
default['bcpc']['ceph']['images']['type'] = 'hdd'
default['bcpc']['ceph']['images']['name'] = "images"
default['bcpc']['ceph']['volumes']['replicas'] = 3
default['bcpc']['ceph']['volumes']['portion'] = 33
default['bcpc']['ceph']['volumes']['name'] = "volumes"
# Created a new pool for VMs and set type to ssd
default['bcpc']['ceph']['vms']['replicas'] = 3
default['bcpc']['ceph']['vms']['portion'] = 33
default['bcpc']['ceph']['vms']['type'] = 'ssd'
default['bcpc']['ceph']['vms']['name'] = "vms"
# Set up crush rulesets
default['bcpc']['ceph']['ssd']['ruleset'] = 1
default['bcpc']['ceph']['hdd']['ruleset'] = 2
# If you are about to make a big change to the ceph cluster
# setting to true will reduce the load form the resulting
# ceph rebalance and keep things operational.
# See wiki for further details.
default['bcpc']['ceph']['rebalance'] = false
# Set the default niceness of Ceph OSD and monitor processes
default['bcpc']['ceph']['osd_niceness'] = -10
default['bcpc']['ceph']['mon_niceness'] = -10
###########################################
#
# RabbitMQ settings
#
###########################################
# if changing this setting, you will need to reset Mnesia
# on all RabbitMQ nodes in the cluster
default['bcpc']['rabbitmq']['durable_queues'] = true
# ulimits for RabbitMQ server
default['bcpc']['rabbitmq']['ulimit']['nofile'] = 4096
# Heartbeat timeout to detect dead RabbitMQ brokers
default['bcpc']['rabbitmq']['heartbeat'] = 60
###########################################
#
# Network settings for the cluster
#
###########################################
default['bcpc']['management']['vip'] = "10.17.1.15"
default['bcpc']['management']['netmask'] = "255.255.255.0"
default['bcpc']['management']['cidr'] = "10.17.1.0/24"
default['bcpc']['management']['gateway'] = "10.17.1.1"
default['bcpc']['management']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['management']['interface-parent'] = nil
# list of TCP ports that should be open on the management interface
# (generally stuff served via HAProxy)
default['bcpc']['management']['firewall_tcp_ports'] = [
80,443,8088,7480,5000,35357,9292,8776,8773,8774,8004,8000,8777
]
default['bcpc']['metadata']['ip'] = "169.254.169.254"
default['bcpc']['storage']['netmask'] = "255.255.255.0"
default['bcpc']['storage']['cidr'] = "100.100.0.0/24"
default['bcpc']['storage']['gateway'] = "100.100.0.1"
default['bcpc']['storage']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['storage']['interface-parent'] = nil
default['bcpc']['floating']['vip'] = "192.168.43.15"
default['bcpc']['floating']['netmask'] = "255.255.255.0"
default['bcpc']['floating']['cidr'] = "192.168.43.0/24"
default['bcpc']['floating']['gateway'] = "192.168.43.2"
default['bcpc']['floating']['available_subnet'] = "192.168.43.128/25"
default['bcpc']['floating']['interface'] = nil
# if 'interface' is a VLAN interface, specifying a parent allows MTUs
# to be set properly
default['bcpc']['floating']['interface-parent'] = nil
default['bcpc']['fixed']['cidr'] = "1.127.0.0/16"
default['bcpc']['fixed']['vlan_start'] = "1000"
default['bcpc']['fixed']['num_networks'] = "100"
default['bcpc']['fixed']['network_size'] = "256"
default['bcpc']['fixed']['dhcp_lease_time'] = "120"
default['bcpc']['ntp_servers'] = ["pool.ntp.org"]
default['bcpc']['dns_servers'] = ["8.8.8.8", "8.8.4.4"]
# Proxy server URL for recipes to use
# Example: http://proxy-hostname:port
default['bcpc']['proxy_server_url'] = nil
###########################################
#
# Repos for things we rely on
#
###########################################
default['bcpc']['repos']['rabbitmq'] = "http://www.rabbitmq.com/debian"
default['bcpc']['repos']['mysql'] = "http://repo.percona.com/apt"
default['bcpc']['repos']['haproxy'] = "http://ppa.launchpad.net/vbernat/haproxy-1.5/ubuntu"
default['bcpc']['repos']['openstack'] = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
default['bcpc']['repos']['hwraid'] = "http://hwraid.le-vert.net/ubuntu"
default['bcpc']['repos']['fluentd'] = "http://packages.treasure-data.com/2/ubuntu/#{node['lsb']['codename']}"
default['bcpc']['repos']['gridcentric'] = "http://downloads.gridcentric.com/packages/%s/%s/ubuntu"
default['bcpc']['repos']['elasticsearch'] = "http://packages.elasticsearch.org/elasticsearch/1.5/debian"
default['bcpc']['repos']['kibana'] = "http://packages.elasticsearch.org/kibana/4.1/debian"
default['bcpc']['repos']['erlang'] = "http://packages.erlang-solutions.com/ubuntu"
default['bcpc']['repos']['ceph'] = "http://download.ceph.com/debian-hammer"
default['bcpc']['repos']['zabbix'] = "http://repo.zabbix.com/zabbix/2.4/ubuntu"
###########################################
#
# [Optional] If using apt-mirror to pull down repos, we use these settings.
#
###########################################
# Note - us.archive.ubuntu.com tends to rate-limit pretty hard.
# If you are on East Coast US, we recommend Columbia University in env file:
# "mirror" : {
# "ubuntu": "mirror.cc.columbia.edu/pub/linux/ubuntu/archive"
# }
# For a complete list of Ubuntu mirrors, please see:
# https://launchpad.net/ubuntu/+archivemirrors
default['bcpc']['mirror']['ubuntu'] = "us.archive.ubuntu.com/ubuntu"
default['bcpc']['mirror']['ubuntu-dist'] = ['trusty']
default['bcpc']['mirror']['ceph-dist'] = ['hammer']
default['bcpc']['mirror']['os-dist'] = ['kilo']
default['bcpc']['mirror']['elasticsearch-dist'] = '1.5'
default['bcpc']['mirror']['kibana-dist'] = '4.1'
###########################################
#
# Default names for db's, pools, and users
#
###########################################
default['bcpc']['dbname']['nova'] = "nova"
default['bcpc']['dbname']['cinder'] = "cinder"
default['bcpc']['dbname']['glance'] = "glance"
default['bcpc']['dbname']['horizon'] = "horizon"
default['bcpc']['dbname']['keystone'] = "keystone"
default['bcpc']['dbname']['heat'] = "heat"
default['bcpc']['dbname']['ceilometer'] = "ceilometer"
default['bcpc']['dbname']['graphite'] = "graphite"
default['bcpc']['dbname']['pdns'] = "pdns"
default['bcpc']['dbname']['zabbix'] = "zabbix"
default['bcpc']['admin_tenant'] = "AdminTenant"
default['bcpc']['admin_role'] = "Admin"
default['bcpc']['member_role'] = "Member"
default['bcpc']['admin_email'] = "admin@localhost.com"
default['bcpc']['zabbix']['user'] = "zabbix"
default['bcpc']['zabbix']['group'] = "adm"
# General ports for both Apache and Civetweb (no ssl for civetweb at this time)
default['bcpc']['ports']['radosgw'] = 8088
default['bcpc']['ports']['radosgw_https'] = 443
default['bcpc']['ports']['civetweb']['radosgw'] = 8088
# Apache - Leave until Apache is removed
default['bcpc']['ports']['apache']['radosgw'] = 80
default['bcpc']['ports']['apache']['radosgw_https'] = 443
default['bcpc']['ports']['haproxy']['radosgw'] = 80
default['bcpc']['ports']['haproxy']['radosgw_https'] = 443
# Can be set to 'http' or 'https'
default['bcpc']['protocol']['keystone'] = "https"
default['bcpc']['protocol']['glance'] = "https"
default['bcpc']['protocol']['nova'] = "https"
default['bcpc']['protocol']['cinder'] = "https"
default['bcpc']['protocol']['heat'] = "https"
###########################################
#
# Horizon Settings
#
###########################################
#
# List panels to remove from the Horizon interface here
# (if the last panel in a group is removed, the group will also be removed)
default['bcpc']['horizon']['disable_panels'] = ['containers']
###########################################
#
# Keystone Settings
#
###########################################
#
# Defaut log file
default['bcpc']['keystone']['log_file'] = '/var/log/keystone/keystone.log'
# Eventlet server is deprecated in Kilo, so by default we
# serve Keystone via Apache now.
default['bcpc']['keystone']['eventlet_server'] = false
# Turn caching via memcached on or off.
default['bcpc']['keystone']['enable_caching'] = true
# Enable debug logging (also caching debug logging).
default['bcpc']['keystone']['debug'] = false
# Enable verbose logging.
default['bcpc']['keystone']['verbose'] = false
# Set the timeout for how long we will wait for Keystone to become operational
# before failing (configures timeout on the wait-for-keystone-to-be-operational
# spinlock guard).
default['bcpc']['keystone']['wait_for_keystone_timeout'] = 120
# Set the number of Keystone WSGI processes and threads to use by default on the
# public API (experimentally threads > 1 may cause problems with the service
# catalog, for now we recommend scaling only in the processes dimension)
default['bcpc']['keystone']['wsgi']['processes'] = 5
default['bcpc']['keystone']['wsgi']['threads'] = 1
# The driver section below allows either 'sql' or 'ldap' (or 'templated' for catalog)
# Note that not all drivers may support SQL/LDAP, only tinker if you know what you're getting into
default['bcpc']['keystone']['drivers']['assignment'] = 'sql'
default['bcpc']['keystone']['drivers']['catalog'] = 'sql'
default['bcpc']['keystone']['drivers']['credential'] = 'sql'
default['bcpc']['keystone']['drivers']['domain_config'] = 'sql'
default['bcpc']['keystone']['drivers']['endpoint_filter'] = 'sql'
default['bcpc']['keystone']['drivers']['endpoint_policy'] = 'sql'
default['bcpc']['keystone']['drivers']['federation'] = 'sql'
default['bcpc']['keystone']['drivers']['identity'] = 'sql'
default['bcpc']['keystone']['drivers']['identity_mapping'] = 'sql'
default['bcpc']['keystone']['drivers']['oauth1'] = 'sql'
default['bcpc']['keystone']['drivers']['policy'] = 'sql'
default['bcpc']['keystone']['drivers']['revoke'] = 'sql'
default['bcpc']['keystone']['drivers']['role'] = 'sql'
default['bcpc']['keystone']['drivers']['trust'] = 'sql'
# Notifications driver
default['bcpc']['keystone']['drivers']['notification'] = 'log'
# Notifications format. See: http://docs.openstack.org/developer/keystone/event_notifications.html
default['bcpc']['keystone']['notification_format'] = 'cadf'
# LDAP credentials used by Keystone
default['bcpc']['ldap']['admin_user'] = nil
default['bcpc']['ldap']['admin_pass'] = nil
default['bcpc']['ldap']['config'] = {}
###########################################
#
# Keystone policy Settings
#
###########################################
default['bcpc']['keystone']['policy'] = {
"admin_required" => "role:admin or is_admin:1",
"service_role" => "role:service",
"service_or_admin" => "rule:admin_required or rule:service_role",
"owner" => "user_id:%(user_id)s",
"admin_or_owner" => "rule:admin_required or rule:owner",
"token_subject" => "user_id:%(target.token.user_id)s",
"admin_or_token_subject" => "rule:admin_required or rule:token_subject",
"default" => "rule:admin_required",
"identity:get_region" => "",
"identity:list_regions" => "",
"identity:create_region" => "rule:admin_required",
"identity:update_region" => "rule:admin_required",
"identity:delete_region" => "rule:admin_required",
"identity:get_service" => "rule:admin_required",
"identity:list_services" => "rule:admin_required",
"identity:create_service" => "rule:admin_required",
"identity:update_service" => "rule:admin_required",
"identity:delete_service" => "rule:admin_required",
"identity:get_endpoint" => "rule:admin_required",
"identity:list_endpoints" => "rule:admin_required",
"identity:create_endpoint" => "rule:admin_required",
"identity:update_endpoint" => "rule:admin_required",
"identity:delete_endpoint" => "rule:admin_required",
"identity:get_domain" => "rule:admin_required",
"identity:list_domains" => "rule:admin_required",
"identity:create_domain" => "rule:admin_required",
"identity:update_domain" => "rule:admin_required",
"identity:delete_domain" => "rule:admin_required",
"identity:get_project" => "rule:admin_required",
"identity:list_projects" => "rule:admin_required",
"identity:list_user_projects" => "rule:admin_or_owner",
"identity:create_project" => "rule:admin_required",
"identity:update_project" => "rule:admin_required",
"identity:delete_project" => "rule:admin_required",
"identity:get_user" => "rule:admin_required",
"identity:list_users" => "rule:admin_required",
"identity:create_user" => "rule:admin_required",
"identity:update_user" => "rule:admin_required",
"identity:delete_user" => "rule:admin_required",
"identity:change_password" => "rule:admin_or_owner",
"identity:get_group" => "rule:admin_required",
"identity:list_groups" => "rule:admin_required",
"identity:list_groups_for_user" => "rule:admin_or_owner",
"identity:create_group" => "rule:admin_required",
"identity:update_group" => "rule:admin_required",
"identity:delete_group" => "rule:admin_required",
"identity:list_users_in_group" => "rule:admin_required",
"identity:remove_user_from_group" => "rule:admin_required",
"identity:check_user_in_group" => "rule:admin_required",
"identity:add_user_to_group" => "rule:admin_required",
"identity:get_credential" => "rule:admin_required",
"identity:list_credentials" => "rule:admin_required",
"identity:create_credential" => "rule:admin_required",
"identity:update_credential" => "rule:admin_required",
"identity:delete_credential" => "rule:admin_required",
"identity:ec2_get_credential" => "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:ec2_list_credentials" => "rule:admin_or_owner",
"identity:ec2_create_credential" => "rule:admin_or_owner",
"identity:ec2_delete_credential" => "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:get_role" => "rule:admin_required",
"identity:list_roles" => "rule:admin_required",
"identity:create_role" => "rule:admin_required",
"identity:update_role" => "rule:admin_required",
"identity:delete_role" => "rule:admin_required",
"identity:check_grant" => "rule:admin_required",
"identity:list_grants" => "rule:admin_required",
"identity:create_grant" => "rule:admin_required",
"identity:revoke_grant" => "rule:admin_required",
"identity:list_role_assignments" => "rule:admin_required",
"identity:get_policy" => "rule:admin_required",
"identity:list_policies" => "rule:admin_required",
"identity:create_policy" => "rule:admin_required",
"identity:update_policy" => "rule:admin_required",
"identity:delete_policy" => "rule:admin_required",
"identity:check_token" => "rule:admin_required",
"identity:validate_token" => "rule:service_or_admin",
"identity:validate_token_head" => "rule:service_or_admin",
"identity:revocation_list" => "rule:service_or_admin",
"identity:revoke_token" => "rule:admin_or_token_subject",
"identity:create_trust" => "user_id:%(trust.trustor_user_id)s",
"identity:get_trust" => "rule:admin_or_owner",
"identity:list_trusts" => "",
"identity:list_roles_for_trust" => "",
"identity:get_role_for_trust" => "",
"identity:delete_trust" => "",
"identity:create_consumer" => "rule:admin_required",
"identity:get_consumer" => "rule:admin_required",
"identity:list_consumers" => "rule:admin_required",
"identity:delete_consumer" => "rule:admin_required",
"identity:update_consumer" => "rule:admin_required",
"identity:authorize_request_token" => "rule:admin_required",
"identity:list_access_token_roles" => "rule:admin_required",
"identity:get_access_token_role" => "rule:admin_required",
"identity:list_access_tokens" => "rule:admin_required",
"identity:get_access_token" => "rule:admin_required",
"identity:delete_access_token" => "rule:admin_required",
"identity:list_projects_for_endpoint" => "rule:admin_required",
"identity:add_endpoint_to_project" => "rule:admin_required",
"identity:check_endpoint_in_project" => "rule:admin_required",
"identity:list_endpoints_for_project" => "rule:admin_required",
"identity:remove_endpoint_from_project" => "rule:admin_required",
"identity:create_endpoint_group" => "rule:admin_required",
"identity:list_endpoint_groups" => "rule:admin_required",
"identity:get_endpoint_group" => "rule:admin_required",
"identity:update_endpoint_group" => "rule:admin_required",
"identity:delete_endpoint_group" => "rule:admin_required",
"identity:list_projects_associated_with_endpoint_group" => "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group" => "rule:admin_required",
"identity:get_endpoint_group_in_project" => "rule:admin_required",
"identity:add_endpoint_group_to_project" => "rule:admin_required",
"identity:remove_endpoint_group_from_project" => "rule:admin_required",
"identity:create_identity_provider" => "rule:admin_required",
"identity:list_identity_providers" => "rule:admin_required",
"identity:get_identity_providers" => "rule:admin_required",
"identity:update_identity_provider" => "rule:admin_required",
"identity:delete_identity_provider" => "rule:admin_required",
"identity:create_protocol" => "rule:admin_required",
"identity:update_protocol" => "rule:admin_required",
"identity:get_protocol" => "rule:admin_required",
"identity:list_protocols" => "rule:admin_required",
"identity:delete_protocol" => "rule:admin_required",
"identity:create_mapping" => "rule:admin_required",
"identity:get_mapping" => "rule:admin_required",
"identity:list_mappings" => "rule:admin_required",
"identity:delete_mapping" => "rule:admin_required",
"identity:update_mapping" => "rule:admin_required",
"identity:create_service_provider" => "rule:admin_required",
"identity:list_service_providers" => "rule:admin_required",
"identity:get_service_provider" => "rule:admin_required",
"identity:update_service_provider" => "rule:admin_required",
"identity:delete_service_provider" => "rule:admin_required",
"identity:get_auth_catalog" => "",
"identity:get_auth_projects" => "",
"identity:get_auth_domains" => "",
"identity:list_projects_for_groups" => "",
"identity:list_domains_for_groups" => "",
"identity:list_revoke_events" => "",
"identity:create_policy_association_for_endpoint" => "rule:admin_required",
"identity:check_policy_association_for_endpoint" => "rule:admin_required",
"identity:delete_policy_association_for_endpoint" => "rule:admin_required",
"identity:create_policy_association_for_service" => "rule:admin_required",
"identity:check_policy_association_for_service" => "rule:admin_required",
"identity:delete_policy_association_for_service" => "rule:admin_required",
"identity:create_policy_association_for_region_and_service" => "rule:admin_required",
"identity:check_policy_association_for_region_and_service" => "rule:admin_required",
"identity:delete_policy_association_for_region_and_service" => "rule:admin_required",
"identity:get_policy_for_endpoint" => "rule:admin_required",
"identity:list_endpoints_for_policy" => "rule:admin_required",
"identity:create_domain_config" => "rule:admin_required",
"identity:get_domain_config" => "rule:admin_required",
"identity:update_domain_config" => "rule:admin_required",
"identity:delete_domain_config" => "rule:admin_required"
}
###########################################
#
# Nova Settings
#
###########################################
#
# Over-allocation settings. Set according to your cluster
# SLAs. Default is to not allow over allocation of memory
# a slight over allocation of CPU (x2).
default['bcpc']['nova']['ram_allocation_ratio'] = 1.0
default['bcpc']['nova']['reserved_host_memory_mb'] = 1024
default['bcpc']['nova']['cpu_allocation_ratio'] = 2.0
# select from between this many equally optimal hosts when launching an instance
default['bcpc']['nova']['scheduler_host_subset_size'] = 3
# "workers" parameters in nova are set to number of CPUs
# available by default. This provides an override.
default['bcpc']['nova']['workers'] = 5
# Patch toggle for https://github.com/bloomberg/chef-bcpc/pull/493
default['bcpc']['nova']['live_migration_patch'] = false
# Verbose logging (level INFO)
default['bcpc']['nova']['verbose'] = false
# Nova debug toggle
default['bcpc']['nova']['debug'] = false
# Nova default log levels
default['bcpc']['nova']['default_log_levels'] = nil
# Nova scheduler default filters
default['bcpc']['nova']['scheduler_default_filters'] = ['AggregateInstanceExtraSpecsFilter', 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
# settings pertaining to ephemeral storage via mdadm/LVM
# (software RAID settings are here for logical grouping)
default['bcpc']['software_raid']['enabled'] = false
# define devices to RAID together in the hardware role for a type (e.g., BCPC-Hardware-Virtual)
default['bcpc']['software_raid']['devices'] = []
default['bcpc']['software_raid']['md_device'] = '/dev/md/md0'
default['bcpc']['software_raid']['chunk_size'] = 512
default['bcpc']['nova']['ephemeral'] = false
default['bcpc']['nova']['ephemeral_vg_name'] = 'nova_disk'
default['bcpc']['nova']['ephemeral_disks'] = [default['bcpc']['software_raid']['md_device']]
default['bcpc']['nova']['quota'] = {
"cores" => 4,
"floating_ips" => 10,
"gigabytes"=> 1000,
"instances" => 10,
"ram" => 51200
}
# load a custom vendor driver,
# e.g. "nova.api.metadata.bcpc_metadata.BcpcMetadata",
# comment out to use default
#default['bcpc']['vendordata_driver'] = "nova.api.metadata.bcpc_metadata.BcpcMetadata"
###########################################
#
# Nova policy Settings
#
###########################################
default['bcpc']['nova']['policy'] = {
"context_is_admin" => "role:admin",
"admin_or_owner" => "is_admin:True or project_id:%(project_id)s",
"default" => "role:admin",
"cells_scheduler_filter:TargetCellFilter" => "is_admin:True",
"compute:create" => "",
"compute:create:attach_network" => "role:admin",
"compute:create:attach_volume" => "rule:admin_or_owner",
"compute:create:forced_host" => "is_admin:True",
"compute:get" => "rule:admin_or_owner",
"compute:get_all" => "rule:admin_or_owner",
"compute:get_all_tenants" => "rule:admin_or_owner",
"compute:get_diagnostics" => "rule:admin_or_owner",
"compute:start" => "rule:admin_or_owner",
"compute:stop" => "rule:admin_or_owner",
"compute:attach_volume" => "rule:admin_or_owner",
"compute:detach_volume" => "rule:admin_or_owner",
"compute:update" => "rule:admin_or_owner",
"compute:reboot" => "rule:admin_or_owner",
"compute:delete" => "rule:admin_or_owner",
"compute:unlock_override" => "rule:admin_api",
"compute:get_instance_metadata" => "rule:admin_or_owner",
"compute:update_instance_metadata" => "rule:admin_or_owner",
"compute:delete_instance_metadata" => "rule:admin_or_owner",
"compute:get_rdp_console" => "rule:admin_or_owner",
"compute:get_vnc_console" => "rule:admin_or_owner",
"compute:get_console_output" => "rule:admin_or_owner",
"compute:get_serial_console" => "rule:admin_or_owner",
"compute:snapshot" => "role:admin",
"compute:shelve" => "role:admin",
"compute:shelve_offload" => "role:admin",
"compute:unshelve" => "role:admin",
"compute:resize" => "role:admin",
"compute:confirm_resize" => "role:admin",
"compute:revert_resize" => "role:admin",
"compute:rebuild" => "role:admin",
"compute:security_groups:add_to_instance" => "rule:admin_or_owner",
"compute:security_groups:remove_from_instance" => "rule:admin_or_owner",
"compute:volume_snapshot_create" => "role:admin",
"compute:volume_snapshot_delete" => "role:admin",
"admin_api" => "is_admin:True",
"compute_extension:accounts" => "rule:admin_api",
"compute_extension:admin_actions" => "rule:admin_api",
"compute_extension:admin_actions:pause" => "role:admin",
"compute_extension:admin_actions:unpause" => "role:admin",
"compute_extension:admin_actions:suspend" => "role:admin",
"compute_extension:admin_actions:resume" => "role:admin",
"compute_extension:admin_actions:lock" => "role:admin",
"compute_extension:admin_actions:unlock" => "role:admin",
"compute_extension:admin_actions:resetNetwork" => "rule:admin_api",
"compute_extension:admin_actions:injectNetworkInfo" => "rule:admin_api",
"compute_extension:admin_actions:createBackup" => "role:admin",
"compute_extension:admin_actions:migrateLive" => "rule:admin_api",
"compute_extension:admin_actions:resetState" => "rule:admin_api",
"compute_extension:admin_actions:migrate" => "rule:admin_api",
"compute_extension:aggregates" => "rule:admin_api",
"compute_extension:agents" => "rule:admin_api",
"compute_extension:attach_interfaces" => "role:admin",
"compute_extension:baremetal_nodes" => "rule:admin_api",
"compute_extension:cells" => "rule:admin_api",
"compute_extension:cells:create" => "rule:admin_api",
"compute_extension:cells:delete" => "rule:admin_api",
"compute_extension:cells:update" => "rule:admin_api",
"compute_extension:cells:sync_instances" => "rule:admin_api",
"compute_extension:certificates" => "role:admin",
"compute_extension:cloudpipe" => "rule:admin_api",
"compute_extension:cloudpipe_update" => "rule:admin_api",
"compute_extension:console_output" => "rule:admin_or_owner",
"compute_extension:consoles" => "rule:admin_or_owner",
"compute_extension:config_drive" => "rule:admin_or_owner",
"compute_extension:createserverext" => "role:admin",
"compute_extension:deferred_delete" => "role:admin",
"compute_extension:disk_config" => "rule:admin_or_owner",
"compute_extension:evacuate" => "rule:admin_api",
"compute_extension:extended_server_attributes" => "rule:admin_api",
"compute_extension:extended_status" => "rule:admin_or_owner",
"compute_extension:extended_availability_zone" => "",
"compute_extension:extended_ips" => "rule:admin_or_owner",
"compute_extension:extended_ips_mac" => "rule:admin_or_owner",
"compute_extension:extended_vif_net" => "role:admin",
"compute_extension:extended_volumes" => "rule:admin_or_owner",
"compute_extension:fixed_ips" => "rule:admin_api",
"compute_extension:flavor_access" => "rule:admin_or_owner",
"compute_extension:flavor_access:addTenantAccess" => "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess" => "rule:admin_api",
"compute_extension:flavor_disabled" => "rule:admin_or_owner",
"compute_extension:flavor_rxtx" => "rule:admin_or_owner",
"compute_extension:flavor_swap" => "rule:admin_or_owner",
"compute_extension:flavorextradata" => "rule:admin_or_owner",
"compute_extension:flavorextraspecs:index" => "role:admin",
"compute_extension:flavorextraspecs:show" => "role:admin",
"compute_extension:flavorextraspecs:create" => "rule:admin_api",
"compute_extension:flavorextraspecs:update" => "rule:admin_api",
"compute_extension:flavorextraspecs:delete" => "rule:admin_api",
"compute_extension:flavormanage" => "rule:admin_api",
"compute_extension:floating_ip_dns" => "rule:admin_or_owner",
"compute_extension:floating_ip_pools" => "rule:admin_or_owner",
"compute_extension:floating_ips" => "rule:admin_or_owner",
"compute_extension:floating_ips_bulk" => "rule:admin_api",
"compute_extension:fping" => "role:admin",
"compute_extension:fping:all_tenants" => "rule:admin_api",
"compute_extension:hide_server_addresses" => "is_admin:False",
"compute_extension:hosts" => "rule:admin_api",
"compute_extension:hypervisors" => "rule:admin_api",
"compute_extension:image_size" => "role:admin",
"compute_extension:instance_actions" => "rule:admin_or_owner",
"compute_extension:instance_actions:events" => "rule:admin_api",
"compute_extension:instance_usage_audit_log" => "rule:admin_api",
"compute_extension:keypairs" => "rule:admin_or_owner",
"compute_extension:keypairs:index" => "rule:admin_or_owner",
"compute_extension:keypairs:show" => "rule:admin_or_owner",
"compute_extension:keypairs:create" => "",
"compute_extension:keypairs:delete" => "rule:admin_or_owner",
"compute_extension:multinic" => "role:admin",
"compute_extension:networks" => "rule:admin_api",
"compute_extension:networks:view" => "role:admin",
"compute_extension:networks_associate" => "rule:admin_api",
"compute_extension:quotas:show" => "rule:admin_or_owner",
"compute_extension:quotas:update" => "rule:admin_api",
"compute_extension:quotas:delete" => "rule:admin_api",
"compute_extension:quota_classes" => "role:admin",
"compute_extension:rescue" => "role:admin",
"compute_extension:security_group_default_rules" => "rule:admin_api",
"compute_extension:security_groups" => "rule:admin_or_owner",
"compute_extension:server_diagnostics" => "rule:admin_or_owner",
"compute_extension:server_groups" => "rule:admin_or_owner",
"compute_extension:server_password" => "role:admin",
"compute_extension:server_usage" => "rule:admin_or_owner",
"compute_extension:services" => "rule:admin_api",
"compute_extension:shelve" => "role:admin",
"compute_extension:shelveOffload" => "rule:admin_api",
"compute_extension:simple_tenant_usage:show" => "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list" => "rule:admin_api",
"compute_extension:unshelve" => "role:admin",
"compute_extension:users" => "rule:admin_api",
"compute_extension:virtual_interfaces" => "role:admin",
"compute_extension:virtual_storage_arrays" => "role:admin",
"compute_extension:volumes" => "rule:admin_or_owner",
"compute_extension:volume_attachments:index" => "rule:admin_or_owner",
"compute_extension:volume_attachments:show" => "rule:admin_or_owner",
"compute_extension:volume_attachments:create" => "rule:admin_or_owner",
"compute_extension:volume_attachments:update" => "rule:admin_or_owner",
"compute_extension:volume_attachments:delete" => "rule:admin_or_owner",
"compute_extension:volumetypes" => "role:admin",
"compute_extension:availability_zone:list" => "rule:admin_or_owner",
"compute_extension:availability_zone:detail" => "rule:admin_api",
"compute_extension:used_limits_for_admin" => "rule:admin_api",
"compute_extension:migrations:index" => "rule:admin_api",
"compute_extension:os-assisted-volume-snapshots:create" => "rule:admin_api",
"compute_extension:os-assisted-volume-snapshots:delete" => "rule:admin_api",
"compute_extension:console_auth_tokens" => "rule:admin_api",
"compute_extension:os-server-external-events:create" => "rule:admin_api",
"network:get_all" => "rule:admin_or_owner",
"network:get" => "rule:admin_or_owner",
"network:create" => "rule:admin_or_owner",
"network:delete" => "rule:admin_or_owner",
"network:associate" => "rule:admin_or_owner",
"network:disassociate" => "rule:admin_or_owner",
"network:get_vifs_by_instance" => "rule:admin_or_owner",
"network:allocate_for_instance" => "rule:admin_or_owner",
"network:deallocate_for_instance" => "rule:admin_or_owner",
"network:validate_networks" => "rule:admin_or_owner",
"network:get_instance_uuids_by_ip_filter" => "rule:admin_or_owner",
"network:get_instance_id_by_floating_address" => "rule:admin_or_owner",
"network:setup_networks_on_host" => "rule:admin_or_owner",
"network:get_backdoor_port" => "rule:admin_or_owner",
"network:get_floating_ip" => "rule:admin_or_owner",
"network:get_floating_ip_pools" => "rule:admin_or_owner",
"network:get_floating_ip_by_address" => "rule:admin_or_owner",
"network:get_floating_ips_by_project" => "rule:admin_or_owner",
"network:get_floating_ips_by_fixed_address" => "rule:admin_or_owner",
"network:allocate_floating_ip" => "rule:admin_or_owner",
"network:associate_floating_ip" => "rule:admin_or_owner",
"network:disassociate_floating_ip" => "rule:admin_or_owner",
"network:release_floating_ip" => "rule:admin_or_owner",
"network:migrate_instance_start" => "rule:admin_or_owner",
"network:migrate_instance_finish" => "rule:admin_or_owner",
"network:get_fixed_ip" => "role:admin",
"network:get_fixed_ip_by_address" => "rule:admin_or_owner",
"network:add_fixed_ip_to_instance" => "role:admin",
"network:remove_fixed_ip_from_instance" => "role:admin",
"network:add_network_to_project" => "role:admin",
"network:get_instance_nw_info" => "rule:admin_or_owner",
"network:get_dns_domains" => "role:admin",
"network:add_dns_entry" => "role:admin",
"network:modify_dns_entry" => "role:admin",
"network:delete_dns_entry" => "role:admin",
"network:get_dns_entries_by_address" => "role:admin",
"network:get_dns_entries_by_name" => "role:admin",
"network:create_private_dns_domain" => "role:admin",
"network:create_public_dns_domain" => "role:admin",
"network:delete_dns_domain" => "role:admin",
"network:attach_external_network" => "rule:admin_api",
"os_compute_api:servers:start" => "role:admin",
"os_compute_api:servers:stop" => "role:admin",
"os_compute_api:os-access-ips:discoverable" => "role:admin",
"os_compute_api:os-access-ips" => "role:admin",
"os_compute_api:os-admin-actions" => "rule:admin_api",
"os_compute_api:os-admin-actions:discoverable" => "role:admin",
"os_compute_api:os-admin-actions:reset_network" => "rule:admin_api",
"os_compute_api:os-admin-actions:inject_network_info" => "rule:admin_api",
"os_compute_api:os-admin-actions:reset_state" => "rule:admin_api",
"os_compute_api:os-admin-password" => "role:admin",
"os_compute_api:os-admin-password:discoverable" => "role:admin",
"os_compute_api:os-aggregates:discoverable" => "role:admin",
"os_compute_api:os-aggregates:index" => "rule:admin_api",
"os_compute_api:os-aggregates:create" => "rule:admin_api",
"os_compute_api:os-aggregates:show" => "rule:admin_api",
"os_compute_api:os-aggregates:update" => "rule:admin_api",
"os_compute_api:os-aggregates:delete" => "rule:admin_api",
"os_compute_api:os-aggregates:add_host" => "rule:admin_api",
"os_compute_api:os-aggregates:remove_host" => "rule:admin_api",
"os_compute_api:os-aggregates:set_metadata" => "rule:admin_api",
"os_compute_api:os-agents" => "rule:admin_api",
"os_compute_api:os-agents:discoverable" => "role:admin",
"os_compute_api:os-attach-interfaces" => "role:admin",
"os_compute_api:os-attach-interfaces:discoverable" => "role:admin",
"os_compute_api:os-baremetal-nodes" => "rule:admin_api",
"os_compute_api:os-baremetal-nodes:discoverable" => "role:admin",
"os_compute_api:os-block-device-mapping-v1:discoverable" => "role:admin",
"os_compute_api:os-cells" => "rule:admin_api",
"os_compute_api:os-cells:create" => "rule:admin_api",
"os_compute_api:os-cells:delete" => "rule:admin_api",
"os_compute_api:os-cells:update" => "rule:admin_api",
"os_compute_api:os-cells:sync_instances" => "rule:admin_api",
"os_compute_api:os-cells:discoverable" => "role:admin",
"os_compute_api:os-certificates:create" => "role:admin",
"os_compute_api:os-certificates:show" => "role:admin",
"os_compute_api:os-certificates:discoverable" => "role:admin",
"os_compute_api:os-cloudpipe" => "rule:admin_api",
"os_compute_api:os-cloudpipe:discoverable" => "role:admin",
"os_compute_api:os-consoles:discoverable" => "role:admin",
"os_compute_api:os-consoles:create" => "role:admin",
"os_compute_api:os-consoles:delete" => "role:admin",
"os_compute_api:os-consoles:index" => "role:admin",
"os_compute_api:os-consoles:show" => "role:admin",
"os_compute_api:os-console-output:discoverable" => "role:admin",
"os_compute_api:os-console-output" => "role:admin",
"os_compute_api:os-remote-consoles" => "role:admin",
"os_compute_api:os-remote-consoles:discoverable" => "role:admin",
"os_compute_api:os-create-backup:discoverable" => "role:admin",
"os_compute_api:os-create-backup" => "role:admin",
"os_compute_api:os-deferred-delete" => "role:admin",
"os_compute_api:os-deferred-delete:discoverable" => "role:admin",
"os_compute_api:os-disk-config" => "role:admin",
"os_compute_api:os-disk-config:discoverable" => "role:admin",
"os_compute_api:os-evacuate" => "rule:admin_api",
"os_compute_api:os-evacuate:discoverable" => "role:admin",
"os_compute_api:os-extended-server-attributes" => "rule:admin_api",
"os_compute_api:os-extended-server-attributes:discoverable" => "role:admin",
"os_compute_api:os-extended-status" => "role:admin",
"os_compute_api:os-extended-status:discoverable" => "role:admin",
"os_compute_api:os-extended-availability-zone" => "role:admin",
"os_compute_api:os-extended-availability-zone:discoverable" => "role:admin",
"os_compute_api:extension_info:discoverable" => "role:admin",
"os_compute_api:os-extended-volumes" => "role:admin",
"os_compute_api:os-extended-volumes:discoverable" => "role:admin",
"os_compute_api:os-fixed-ips" => "rule:admin_api",
"os_compute_api:os-fixed-ips:discoverable" => "role:admin",
"os_compute_api:os-flavor-access" => "role:admin",
"os_compute_api:os-flavor-access:discoverable" => "role:admin",
"os_compute_api:os-flavor-access:remove_tenant_access" => "rule:admin_api",
"os_compute_api:os-flavor-access:add_tenant_access" => "rule:admin_api",
"os_compute_api:os-flavor-rxtx" => "role:admin",
"os_compute_api:os-flavor-rxtx:discoverable" => "role:admin",
"os_compute_api:flavors:discoverable" => "role:admin",
"os_compute_api:os-flavor-extra-specs:discoverable" => "role:admin",
"os_compute_api:os-flavor-extra-specs:index" => "role:admin",
"os_compute_api:os-flavor-extra-specs:show" => "role:admin",
"os_compute_api:os-flavor-extra-specs:create" => "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:update" => "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:delete" => "rule:admin_api",
"os_compute_api:os-flavor-manage:discoverable" => "role:admin",
"os_compute_api:os-flavor-manage" => "rule:admin_api",
"os_compute_api:os-floating-ip-dns" => "role:admin",
"os_compute_api:os-floating-ip-dns:discoverable" => "role:admin",
"os_compute_api:os-floating-ip-pools" => "role:admin",
"os_compute_api:os-floating-ip-pools:discoverable" => "role:admin",
"os_compute_api:os-floating-ips" => "role:admin",
"os_compute_api:os-floating-ips:discoverable" => "role:admin",
"os_compute_api:os-floating-ips-bulk" => "rule:admin_api",
"os_compute_api:os-floating-ips-bulk:discoverable" => "role:admin",
"os_compute_api:os-fping" => "role:admin",
"os_compute_api:os-fping:discoverable" => "role:admin",
"os_compute_api:os-fping:all_tenants" => "rule:admin_api",
"os_compute_api:os-hide-server-addresses" => "is_admin:False",
"os_compute_api:os-hide-server-addresses:discoverable" => "role:admin",
"os_compute_api:os-hosts" => "rule:admin_api",
"os_compute_api:os-hosts:discoverable" => "role:admin",
"os_compute_api:os-hypervisors" => "rule:admin_api",
"os_compute_api:os-hypervisors:discoverable" => "role:admin",
"os_compute_api:images:discoverable" => "role:admin",
"os_compute_api:image-size" => "role:admin",
"os_compute_api:image-size:discoverable" => "role:admin",
"os_compute_api:os-instance-actions" => "role:admin",
"os_compute_api:os-instance-actions:discoverable" => "role:admin",
"os_compute_api:os-instance-actions:events" => "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log" => "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log:discoverable" => "role:admin",
"os_compute_api:ips:discoverable" => "role:admin",
"os_compute_api:ips:index" => "role:admin",
"os_compute_api:ips:show" => "role:admin",
"os_compute_api:os-keypairs:discoverable" => "role:admin",
"os_compute_api:os-keypairs" => "role:admin",
"os_compute_api:os-keypairs:index" => "role:admin",
"os_compute_api:os-keypairs:show" => "role:admin",
"os_compute_api:os-keypairs:create" => "role:admin",
"os_compute_api:os-keypairs:delete" => "role:admin",
"os_compute_api:limits:discoverable" => "role:admin",
"os_compute_api:os-lock-server:discoverable" => "role:admin",
"os_compute_api:os-lock-server:lock" => "role:admin",
"os_compute_api:os-lock-server:unlock" => "role:admin",
"os_compute_api:os-migrate-server:discoverable" => "role:admin",
"os_compute_api:os-migrate-server:migrate" => "rule:admin_api",
"os_compute_api:os-migrate-server:migrate_live" => "rule:admin_api",
"os_compute_api:os-multinic" => "role:admin",
"os_compute_api:os-multinic:discoverable" => "role:admin",
"os_compute_api:os-networks" => "rule:admin_api",
"os_compute_api:os-networks:view" => "role:admin",
"os_compute_api:os-networks:discoverable" => "role:admin",
"os_compute_api:os-networks-associate" => "rule:admin_api",
"os_compute_api:os-networks-associate:discoverable" => "role:admin",
"os_compute_api:os-pause-server:discoverable" => "role:admin",
"os_compute_api:os-pause-server:pause" => "role:admin",
"os_compute_api:os-pause-server:unpause" => "role:admin",
"os_compute_api:os-pci:pci_servers" => "role:admin",
"os_compute_api:os-pci:discoverable" => "role:admin",
"os_compute_api:os-pci:index" => "rule:admin_api",
"os_compute_api:os-pci:detail" => "rule:admin_api",
"os_compute_api:os-pci:show" => "rule:admin_api",
"os_compute_api:os-personality:discoverable" => "role:admin",
"os_compute_api:os-preserve-ephemeral-rebuild:discoverable" => "role:admin",
"os_compute_api:os-quota-sets:discoverable" => "role:admin",
"os_compute_api:os-quota-sets:show" => "role:admin",
"os_compute_api:os-quota-sets:update" => "rule:admin_api",
"os_compute_api:os-quota-sets:delete" => "rule:admin_api",
"os_compute_api:os-quota-sets:detail" => "rule:admin_api",
"os_compute_api:os-quota-class-sets" => "role:admin",
"os_compute_api:os-quota-class-sets:discoverable" => "role:admin",
"os_compute_api:os-rescue" => "role:admin",
"os_compute_api:os-rescue:discoverable" => "role:admin",
"os_compute_api:os-scheduler-hints:discoverable" => "role:admin",
"os_compute_api:os-security-group-default-rules:discoverable" => "role:admin",
"os_compute_api:os-security-group-default-rules" => "rule:admin_api",
"os_compute_api:os-security-groups" => "role:admin",
"os_compute_api:os-security-groups:discoverable" => "role:admin",
"os_compute_api:os-server-diagnostics" => "rule:admin_api",
"os_compute_api:os-server-diagnostics:discoverable" => "role:admin",
"os_compute_api:os-server-password" => "role:admin",
"os_compute_api:os-server-password:discoverable" => "role:admin",
"os_compute_api:os-server-usage" => "role:admin",
"os_compute_api:os-server-usage:discoverable" => "role:admin",
"os_compute_api:os-server-groups" => "role:admin",
"os_compute_api:os-server-groups:discoverable" => "role:admin",
"os_compute_api:os-services" => "rule:admin_api",
"os_compute_api:os-services:discoverable" => "role:admin",
"os_compute_api:server-metadata:discoverable" => "role:admin",
"os_compute_api:server-metadata:index" => "role:admin",
"os_compute_api:server-metadata:show" => "role:admin",
"os_compute_api:server-metadata:delete" => "role:admin",
"os_compute_api:server-metadata:create" => "role:admin",
"os_compute_api:server-metadata:update" => "role:admin",
"os_compute_api:server-metadata:update_all" => "role:admin",
"os_compute_api:servers:discoverable" => "role:admin",
"os_compute_api:os-shelve:shelve" => "role:admin",
"os_compute_api:os-shelve:shelve:discoverable" => "role:admin",
"os_compute_api:os-shelve:shelve_offload" => "rule:admin_api",
"os_compute_api:os-simple-tenant-usage:discoverable" => "role:admin",
"os_compute_api:os-simple-tenant-usage:show" => "role:admin",
"os_compute_api:os-simple-tenant-usage:list" => "rule:admin_api",
"os_compute_api:os-suspend-server:discoverable" => "role:admin",
"os_compute_api:os-suspend-server:suspend" => "role:admin",
"os_compute_api:os-suspend-server:resume" => "role:admin",
"os_compute_api:os-tenant-networks" => "role:admin",
"os_compute_api:os-tenant-networks:discoverable" => "role:admin",
"os_compute_api:os-shelve:unshelve" => "role:admin",
"os_compute_api:os-user-data:discoverable" => "role:admin",
"os_compute_api:os-virtual-interfaces" => "role:admin",
"os_compute_api:os-virtual-interfaces:discoverable" => "role:admin",
"os_compute_api:os-volumes" => "role:admin",
"os_compute_api:os-volumes:discoverable" => "role:admin",
"os_compute_api:os-volumes-attachments:index" => "role:admin",
"os_compute_api:os-volumes-attachments:show" => "role:admin",
"os_compute_api:os-volumes-attachments:create" => "role:admin",
"os_compute_api:os-volumes-attachments:update" => "role:admin",
"os_compute_api:os-volumes-attachments:delete" => "role:admin",
"os_compute_api:os-volumes-attachments:discoverable" => "role:admin",
"os_compute_api:os-availability-zone:list" => "rule:admin_or_owner",
"os_compute_api:os-availability-zone:discoverable" => "role:admin",
"os_compute_api:os-availability-zone:detail" => "rule:admin_api",
"os_compute_api:os-used-limits" => "rule:admin_api",
"os_compute_api:os-used-limits:discoverable" => "role:admin",
"os_compute_api:os-migrations:index" => "rule:admin_api",
"os_compute_api:os-migrations:discoverable" => "role:admin",
"os_compute_api:os-assisted-volume-snapshots:create" => "rule:admin_api",
"os_compute_api:os-assisted-volume-snapshots:delete" => "rule:admin_api",
"os_compute_api:os-assisted-volume-snapshots:discoverable" => "role:admin",
"os_compute_api:os-console-auth-tokens" => "rule:admin_api",
"os_compute_api:os-server-external-events:create" => "rule:admin_api"
}
###########################################
#
# Cinder Settings
#
###########################################
# Verbose logging (level INFO)
default['bcpc']['cinder']['verbose'] = false
default['bcpc']['cinder']['workers'] = 5
default['bcpc']['cinder']['allow_az_fallback'] = true
default['bcpc']['cinder']['quota'] = {
"volumes" => -1,
"quota_snapshots" => 10,
"consistencygroups" => 10,
"gigabytes" => 1000
}
###########################################
#
# Cinder policy Settings
#
###########################################
default['bcpc']['cinder']['policy'] = {
"context_is_admin" => "role:admin",
"admin_or_owner" => "is_admin:True or project_id:%(project_id)s",
"default" => "rule:admin_or_owner",
"admin_api" => "is_admin:True",
"volume:create" => "",
"volume:delete" => "",
"volume:get" => "",
"volume:get_all" => "",
"volume:get_volume_metadata" => "",
"volume:get_volume_admin_metadata" => "rule:admin_api",
"volume:delete_volume_admin_metadata" => "rule:admin_api",
"volume:update_volume_admin_metadata" => "rule:admin_api",
"volume:get_snapshot" => "",
"volume:get_all_snapshots" => "",
"volume:extend" => "",
"volume:update_readonly_flag" => "",
"volume:retype" => "",
"volume_extension:types_manage" => "rule:admin_api",
"volume_extension:types_extra_specs" => "rule:admin_api",
"volume_extension:volume_type_access" => "",
"volume_extension:volume_type_access:addProjectAccess" => "rule:admin_api",
"volume_extension:volume_type_access:removeProjectAccess" => "rule:admin_api",
"volume_extension:volume_type_encryption" => "rule:admin_api",
"volume_extension:volume_encryption_metadata" => "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes" => "",
"volume_extension:volume_image_metadata" => "",
"volume_extension:quotas:show" => "",
"volume_extension:quotas:update" => "rule:admin_api",
"volume_extension:quota_classes" => "",
"volume_extension:volume_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status" => "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete" => "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach" => "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete" => "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume" => "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion" => "rule:admin_api",
"volume_extension:volume_host_attribute" => "rule:admin_api",
"volume_extension:volume_tenant_attribute" => "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute" => "rule:admin_api",
"volume_extension:hosts" => "rule:admin_api",
"volume_extension:services" => "rule:admin_api",
"volume_extension:volume_manage" => "rule:admin_api",
"volume_extension:volume_unmanage" => "rule:admin_api",
"volume:services" => "rule:admin_api",
"volume:create_transfer" => "",
"volume:accept_transfer" => "",
"volume:delete_transfer" => "",
"volume:get_all_transfers" => "",
"volume_extension:replication:promote" => "rule:admin_api",
"volume_extension:replication:reenable" => "rule:admin_api",
"backup:create" => "role:admin",
"backup:delete" => "role:admin",
"backup:get" => "",
"backup:get_all" => "",
"backup:restore" => "role:admin",
"backup:backup-import" => "rule:admin_api",
"backup:backup-export" => "rule:admin_api",
"snapshot_extension:snapshot_actions:update_snapshot_status" => "",
"consistencygroup:create" => "group:nobody",
"consistencygroup:delete" => "group:nobody",
"consistencygroup:update" => "group:nobody",
"consistencygroup:get" => "group:nobody",
"consistencygroup:get_all" => "group:nobody",
"consistencygroup:create_cgsnapshot" => "group:nobody",
"consistencygroup:delete_cgsnapshot" => "group:nobody",
"consistencygroup:get_cgsnapshot" => "group:nobody",
"consistencygroup:get_all_cgsnapshots" => "group:nobody",
"scheduler_extension:scheduler_stats:get_pools" => "rule:admin_api"
}
###########################################
#
# Glance policy Settings
#
###########################################
# Verbose logging (level INFO)
default['bcpc']['glance']['verbose'] = false
default['bcpc']['glance']['workers'] = 5
default['bcpc']['glance']['policy'] = {
"context_is_admin" => "role:admin",
"default" => "",
"add_image" => "role:admin",
"delete_image" => "",
"get_image" => "",
"get_images" => "",
"modify_image" => "",
"publicize_image" => "role:admin",
"copy_from" => "",
"download_image" => "",
"upload_image" => "role:admin",
"delete_image_location" => "",
"get_image_location" => "",
"set_image_location" => "",
"add_member" => "",
"delete_member" => "",
"get_member" => "",
"get_members" => "",
"modify_member" => "",
"manage_image_cache" => "role:admin",
"get_task" => "",
"get_tasks" => "",
"add_task" => "",
"modify_task" => "",
"deactivate" => "",
"reactivate" => "",
"get_metadef_namespace" => "",
"get_metadef_namespaces" => "",
"modify_metadef_namespace" => "",
"add_metadef_namespace" => "",
"get_metadef_object" => "",
"get_metadef_objects" => "",
"modify_metadef_object" => "",
"add_metadef_object" => "",
"list_metadef_resource_types" => "",
"get_metadef_resource_type" => "",
"add_metadef_resource_type_association" => "",
"get_metadef_property" => "",
"get_metadef_properties" => "",
"modify_metadef_property" => "",
"add_metadef_property" => "",
"get_metadef_tag" => "",
"get_metadef_tags" => "",
"modify_metadef_tag" => "",
"add_metadef_tag" => "",
"add_metadef_tags" => ""
}
###########################################
#
# Heat policy Settings
#
###########################################
default['bcpc']['heat']['workers'] = 5
default['bcpc']['heat']['policy'] = {
"deny_stack_user" => "not role:heat_stack_user",
"deny_everybody" => "!",
"cloudformation:ListStacks" => "rule:deny_stack_user",
"cloudformation:CreateStack" => "rule:deny_stack_user",
"cloudformation:DescribeStacks" => "rule:deny_stack_user",
"cloudformation:DeleteStack" => "rule:deny_stack_user",
"cloudformation:UpdateStack" => "rule:deny_stack_user",
"cloudformation:CancelUpdateStack" => "rule:deny_stack_user",
"cloudformation:DescribeStackEvents" => "rule:deny_stack_user",
"cloudformation:ValidateTemplate" => "rule:deny_stack_user",
"cloudformation:GetTemplate" => "rule:deny_stack_user",
"cloudformation:EstimateTemplateCost" => "rule:deny_stack_user",
"cloudformation:DescribeStackResource" => "",
"cloudformation:DescribeStackResources" => "rule:deny_stack_user",
"cloudformation:ListStackResources" => "rule:deny_stack_user",
"cloudwatch:DeleteAlarms" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarmHistory" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarms" => "rule:deny_stack_user",
"cloudwatch:DescribeAlarmsForMetric" => "rule:deny_stack_user",
"cloudwatch:DisableAlarmActions" => "rule:deny_stack_user",
"cloudwatch:EnableAlarmActions" => "rule:deny_stack_user",
"cloudwatch:GetMetricStatistics" => "rule:deny_stack_user",
"cloudwatch:ListMetrics" => "rule:deny_stack_user",
"cloudwatch:PutMetricAlarm" => "rule:deny_stack_user",
"cloudwatch:PutMetricData" => "",
"cloudwatch:SetAlarmState" => "rule:deny_stack_user",
"actions:action" => "rule:deny_stack_user",
"build_info:build_info" => "rule:deny_stack_user",
"events:index" => "rule:deny_stack_user",
"events:show" => "rule:deny_stack_user",
"resource:index" => "rule:deny_stack_user",
"resource:metadata" => "",
"resource:signal" => "",
"resource:show" => "rule:deny_stack_user",
"stacks:abandon" => "rule:deny_stack_user",
"stacks:create" => "rule:deny_stack_user",
"stacks:delete" => "rule:deny_stack_user",
"stacks:detail" => "rule:deny_stack_user",
"stacks:generate_template" => "rule:deny_stack_user",
"stacks:global_index" => "rule:deny_everybody",
"stacks:index" => "rule:deny_stack_user",
"stacks:list_resource_types" => "rule:deny_stack_user",
"stacks:lookup" => "",
"stacks:preview" => "rule:deny_stack_user",
"stacks:resource_schema" => "rule:deny_stack_user",
"stacks:show" => "rule:deny_stack_user",
"stacks:template" => "rule:deny_stack_user",
"stacks:update" => "rule:deny_stack_user",
"stacks:update_patch" => "rule:deny_stack_user",
"stacks:validate_template" => "rule:deny_stack_user",
"stacks:snapshot" => "rule:deny_stack_user",
"stacks:show_snapshot" => "rule:deny_stack_user",
"stacks:delete_snapshot" => "rule:deny_stack_user",
"stacks:list_snapshots" => "rule:deny_stack_user",
"stacks:restore_snapshot" => "rule:deny_stack_user",
"software_configs:create" => "rule:deny_stack_user",
"software_configs:show" => "rule:deny_stack_user",
"software_configs:delete" => "rule:deny_stack_user",
"software_deployments:index" => "rule:deny_stack_user",
"software_deployments:create" => "rule:deny_stack_user",
"software_deployments:show" => "rule:deny_stack_user",
"software_deployments:update" => "rule:deny_stack_user",
"software_deployments:delete" => "rule:deny_stack_user",
"software_deployments:metadata" => "",
"service:index" => "rule:context_is_admin"
}
###########################################
#
# Routemon settings
#
###########################################
#
# numfixes is how many times to try and fix default routes in the mgmt
# and storage networks when they disappear. If numfixes starts off at
# 0, or after 'numfixes' attempts have been made, then routemon
# subsequently only monitors and reports
#
default['bcpc']['routemon']['numfixes'] = 0
###########################################
#
# MySQL settings
#
###########################################
#
# If set to 0, max_connections for MySQL on heads will default to an
# auto-calculated value.
default['bcpc']['mysql-head']['max_connections'] = 0
###########################################
#
# BCPC system (sysctl) settings
#
###########################################
#
# Use this to *add* more reserved ports; i.e. modify value of
# net.ipv4.ip_local_reserved_ports
default['bcpc']['system']['additional_reserved_ports'] = []
# Any other sysctl parameters (register under parameters)
default['bcpc']['system']['parameters']['kernel.pid_max'] = 4194303
###########################################
#
# CPU governor settings
#
###########################################
#
# Available options: conservative, ondemand, userspace, powersave, performance
# Review documentation at https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
default['bcpc']['cpupower']['governor'] = "ondemand"
default['bcpc']['cpupower']['ondemand_ignore_nice_load'] = nil
default['bcpc']['cpupower']['ondemand_io_is_busy'] = nil
default['bcpc']['cpupower']['ondemand_powersave_bias'] = nil
default['bcpc']['cpupower']['ondemand_sampling_down_factor'] = nil
default['bcpc']['cpupower']['ondemand_sampling_rate'] = nil
default['bcpc']['cpupower']['ondemand_up_threshold'] = nil
###########################################
#
# General monitoring settings
#
###########################################
#
# Besides being the VIP that monitoring agents/clients will communicate with,
# monitoring services (carbon/elasticsearch/zabbix-server) will bind to it if
# BCPC-Monitoring role is assigned in-cluster.
default['bcpc']['monitoring']['vip'] = "10.17.1.16"
# List of monitoring clients external to cluster that we are monitoring
default['bcpc']['monitoring']['external_clients'] = []
# Monitoring database settings
default['bcpc']['monitoring']['mysql']['innodb_buffer_pool_size'] = nil
# Pagerduty integration
default['bcpc']['monitoring']['pagerduty']['enabled'] = false
# Pagerduty service key
default['bcpc']['monitoring']['pagerduty']['key'] = nil
###########################################
#
# Graphite settings
#
###########################################
#
# Graphite Server FQDN
default['bcpc']['graphite']['fqdn'] = "graphite.#{node['bcpc']['cluster_domain']}"
#
# Default retention rates
# http://graphite.readthedocs.org/en/latest/config-carbon.html#storage-schemas-conf
default['bcpc']['graphite']['retention'] = '60s:1d'
#
# Maximum number of whisper files to create per minute. This is set low to avoid
# I/O storm when new nodes are enrolled into cluster.
# Set to 'inf' (infinite) to remove limit.
default['bcpc']['graphite']['max_creates_per_min'] = '60'
###########################################
#
# Diamond settings
#
###########################################
#
# List of queue names separated by whitespace to report on. If nil, report all.
default['bcpc']['diamond']['collectors']['rabbitmq']['queues'] = nil
# Regular expression or list of queues to not report on.
# If not nil, this overrides "queues".
default['bcpc']['diamond']['collectors']['rabbitmq']['queues_ignored'] = '.*'
# List of vhosts to report on. If nil, report none.
default['bcpc']['diamond']['collectors']['rabbitmq']['vhosts'] = nil
# Ceph Collector parameters
default['bcpc']['diamond']['collectors']['CephCollector']['metrics_whitelist'] = "ceph.mon.#{node['hostname']}.cluster.*"
# Openstack Collector parameters
default['bcpc']['diamond']['collectors']['cloud'] = {
"interval" => "900",
"path" => "openstack",
"hostname" => "#{node['bcpc']['region_name']}",
"db_host" => "#{node['bcpc']['management']['vip']}",
}
###########################################
#
# defaults for the bcpc.bootstrap settings
#
###########################################
#
# A value of nil means to let the Ubuntu installer work it out - it
# will try to find the nearest one. However the selected mirror is
# often slow.
default['bcpc']['bootstrap']['mirror'] = nil
#
# if you do specify a mirror, you can adjust the file path that comes
# after the hostname in the URL here
default['bcpc']['bootstrap']['mirror_path'] = "/ubuntu"
#
# worked example for the columbia mirror mentioned above which has a
# non-standard path
#default['bcpc']['bootstrap']['mirror'] = "mirror.cc.columbia.edu"
#default['bcpc']['bootstrap']['mirror_path'] = "/pub/linux/ubuntu/archive"
###########################################
#
# Rally settings
#
###########################################
#
# Package versions
# None needed at this time
default['bcpc']['rally']['user'] = 'ubuntu'
###########################################
#
# Openstack Flavors
#
###########################################
default['bcpc']['flavors'] = {
"m1.tiny" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.small" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.medium" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.large" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"m1.xlarge" => {
"extra_specs" => { "aggregate_instance_extra_specs:general_compute" => "yes"}
},
"e1.tiny" => {
"vcpus" => 1,
"memory_mb" => 512,
"disk_gb" => 1,
"ephemeral_gb" => 5,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.small" => {
"vcpus" => 1,
"memory_mb" => 2048,
"disk_gb" => 20,
"ephemeral_gb" => 20,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.medium" => {
"vcpus" => 2,
"memory_mb" => 4096,
"disk_gb" => 40,
"ephemeral_gb" => 40,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.large" => {
"vcpus" => 4,
"memory_mb" => 8192,
"disk_gb" => 40,
"ephemeral_gb" => 80,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.xlarge" => {
"vcpus" => 8,
"memory_mb" => 16384,
"disk_gb" => 40,
"ephemeral_gb" => 160,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
},
"e1.2xlarge" => {
"vcpus" => 8,
"memory_mb" => 32768,
"disk_gb" => 40,
"ephemeral_gb" => 320,
"extra_specs" => { "aggregate_instance_extra_specs:ephemeral_compute" => "yes"}
}
}
###########################################
#
# Openstack Host Aggregates
#
###########################################
default['bcpc']['host_aggregates'] = {
"general_compute" => {
"general_compute" => "yes"
},
"ephemeral_compute" => {
"general_compute" => "no",
"ephemeral_compute" => "yes"
}
}
default['bcpc']['aggregate_membership'] = []
###########################################
#
# RadosGW Quotas
#
###########################################
default['bcpc']['rgw_quota'] = {
'user' => {
'default' => {
'max_size' => 10737418240
}
}
}
###########################################
#
# Openstack Project Quotas
#
###########################################
default['bcpc']['quota'] = {
'nova' => {
'AdminTenant' => {
'cores' => -1
}
}
}
###########################################
#
# Zabbix settings
#
###########################################
#
default['bcpc']['zabbix']['discovery']['delay'] = 600
default['bcpc']['zabbix']['discovery']['ip_ranges'] = [node['bcpc']['management']['cidr']]
default['bcpc']['zabbix']['fqdn'] = "zabbix.#{node['bcpc']['cluster_domain']}"
default['bcpc']['zabbix']['storage_retention'] = 7
default['bcpc']['zabbix']['php_settings'] = {
'max_execution_time' => 300,
'memory_limit' => '256M',
'post_max_size' => '16M',
'upload_max_filesize' => '2M',
'max_input_time' => 300,
'date.timezone' => 'America/New_York'
}
# Zabbix severities to notify about.
# https://www.zabbix.com/documentation/2.4/manual/api/reference/usermedia/object
default['bcpc']['zabbix']['severity'] = 63
###########################################
#
# Kibana settings
#
###########################################
#
# Kibana Server FQDN
default['bcpc']['kibana']['fqdn'] = "kibana.#{node['bcpc']['cluster_domain']}"
###########################################
#
# Elasticsearch settings
#
###########################################
#
# Heap memory size
default['bcpc']['elasticsearch']['heap_size'] = '256m'
# Additional Java options
default['bcpc']['elasticsearch']['java_opts'] = '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -verbose:gc -Xloggc:/var/log/elasticsearch/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m'
###########################################
#
# Getty settings
#
###########################################
default['bcpc']['getty']['ttys'] = %w( ttyS0 ttyS1 )
|
# Add the opscode APT source for chef
default[:apt][:sources] = node[:apt][:sources] | ["opscode"]
# Set the default server version
default[:chef][:server][:version] = "12.0.8-1"
# Set the default client version
default[:chef][:client][:version] = "12.5.1-1"
# A list of gems needed by chef recipes
default[:chef][:gems] = []
Update chef client to 12.6.0
# Add the opscode APT source for chef
default[:apt][:sources] = node[:apt][:sources] | ["opscode"]
# Set the default server version
default[:chef][:server][:version] = "12.0.8-1"
# Set the default client version
default[:chef][:client][:version] = "12.6.0-1"
# A list of gems needed by chef recipes
default[:chef][:gems] = []
|
#
# Cookbook Name:: civicrm
# Recipe:: default
#
# Copyright 2011, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "wordpress"
include_recipe "mysql"
package "wkhtmltopdf"
passwords = data_bag_item("civicrm", "passwords")
database_password = passwords["database"]
site_key = passwords["key"]
mysql_user "civicrm@localhost" do
password database_password
end
mysql_database "civicrm" do
permissions "civicrm@localhost" => :all
end
wordpress_site "join.osmfoundation.org" do
aliases "crm.osmfoundation.org"
ssl_enabled true
database_name "civicrm"
database_user "civicrm"
database_password database_password
end
wordpress_theme "osmblog-wp-theme" do
site "join.osmfoundation.org"
repository "git://github.com/harry-wood/osmblog-wp-theme.git"
end
wordpress_plugin "registration-honeypot" do
site "join.osmfoundation.org"
end
wordpress_plugin "sitepress-multilingual-cms" do
site "join.osmfoundation.org"
repository "git://chef.openstreetmap.org/sitepress-multilingual-cms.git"
end
civicrm_version = node[:civicrm][:version]
civicrm_directory = "/srv/join.osmfoundation.org/wp-content/plugins/civicrm"
directory "/opt/civicrm-#{civicrm_version}" do
owner "wordpress"
group "wordpress"
mode 0o755
end
remote_file "/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip" do
action :create_if_missing
source "https://download.civicrm.org/civicrm-#{civicrm_version}-wordpress.zip"
owner "wordpress"
group "wordpress"
mode 0o644
backup false
end
remote_file "/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz" do
action :create_if_missing
source "https://download.civicrm.org/civicrm-#{civicrm_version}-l10n.tar.gz"
owner "wordpress"
group "wordpress"
mode 0o644
backup false
end
execute "/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip" do
action :nothing
command "unzip -qq /var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip"
cwd "/opt/civicrm-#{civicrm_version}"
user "wordpress"
group "wordpress"
subscribes :run, "remote_file[/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip]", :immediately
end
execute "/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz" do
action :nothing
command "tar -zxf /var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz"
cwd "/opt/civicrm-#{civicrm_version}/civicrm"
user "wordpress"
group "wordpress"
subscribes :run, "remote_file[/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz]", :immediately
end
execute "/opt/civicrm-#{civicrm_version}/civicrm" do
action :nothing
command "rsync --archive --delete /opt/civicrm-#{civicrm_version}/civicrm/ #{civicrm_directory}"
user "wordpress"
group "wordpress"
subscribes :run, "execute[/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip]", :immediately
subscribes :run, "execute[/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz]", :immediately
end
directory "/srv/join.osmfoundation.org/wp-content/plugins/files" do
owner "www-data"
group "www-data"
mode 0o755
end
extensions_directory = "/srv/join.osmfoundation.org/wp-content/plugins/civicrm-extensions"
directory extensions_directory do
owner "wordpress"
group "wordpress"
mode 0o755
end
node[:civicrm][:extensions].each do |_, details|
git "#{extensions_directory}/#{details[:name]}" do
action :sync
repository details[:repository]
revision details[:revision]
user "wordpress"
group "wordpress"
end
end
settings = edit_file "#{civicrm_directory}/civicrm/templates/CRM/common/civicrm.settings.php.template" do |line|
line.gsub!(/%%cms%%/, "WordPress")
line.gsub!(/%%CMSdbUser%%/, "civicrm")
line.gsub!(/%%CMSdbPass%%/, database_password)
line.gsub!(/%%CMSdbHost%%/, "localhost")
line.gsub!(/%%CMSdbName%%/, "civicrm")
line.gsub!(/%%dbUser%%/, "civicrm")
line.gsub!(/%%dbPass%%/, database_password)
line.gsub!(/%%dbHost%%/, "localhost")
line.gsub!(/%%dbName%%/, "civicrm")
line.gsub!(/%%crmRoot%%/, "#{civicrm_directory}/civicrm/")
line.gsub!(/%%templateCompileDir%%/, "/srv/join.osmfoundation.org/wp-content/plugins/files/civicrm/templates_c/")
line.gsub!(/%%baseURL%%/, "http://join.osmfoundation.org/")
line.gsub!(/%%siteKey%%/, site_key)
line.gsub!(%r{// *(.*'ext_repo_url'.*)$}, "\\1")
line
end
file "#{civicrm_directory}/civicrm.settings.php" do
owner "wordpress"
group "wordpress"
mode 0o644
content settings
end
template "/etc/cron.d/osmf-crm" do
source "cron.erb"
owner "root"
group "root"
mode 0o600
variables :directory => civicrm_directory, :passwords => passwords
end
template "/etc/cron.daily/osmf-crm-backup" do
source "backup.cron.erb"
owner "root"
group "root"
mode 0o750
variables :passwords => passwords
end
Install contact-form-7 extension in civicrm wordpress instance
#
# Cookbook Name:: civicrm
# Recipe:: default
#
# Copyright 2011, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "wordpress"
include_recipe "mysql"
package "wkhtmltopdf"
passwords = data_bag_item("civicrm", "passwords")
database_password = passwords["database"]
site_key = passwords["key"]
mysql_user "civicrm@localhost" do
password database_password
end
mysql_database "civicrm" do
permissions "civicrm@localhost" => :all
end
wordpress_site "join.osmfoundation.org" do
aliases "crm.osmfoundation.org"
ssl_enabled true
database_name "civicrm"
database_user "civicrm"
database_password database_password
end
wordpress_theme "osmblog-wp-theme" do
site "join.osmfoundation.org"
repository "git://github.com/harry-wood/osmblog-wp-theme.git"
end
wordpress_plugin "registration-honeypot" do
site "join.osmfoundation.org"
end
wordpress_plugin "sitepress-multilingual-cms" do
site "join.osmfoundation.org"
repository "git://chef.openstreetmap.org/sitepress-multilingual-cms.git"
end
wordpress_plugin "contact-form-7" do
site "join.osmfoundation.org"
end
civicrm_version = node[:civicrm][:version]
civicrm_directory = "/srv/join.osmfoundation.org/wp-content/plugins/civicrm"
directory "/opt/civicrm-#{civicrm_version}" do
owner "wordpress"
group "wordpress"
mode 0o755
end
remote_file "/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip" do
action :create_if_missing
source "https://download.civicrm.org/civicrm-#{civicrm_version}-wordpress.zip"
owner "wordpress"
group "wordpress"
mode 0o644
backup false
end
remote_file "/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz" do
action :create_if_missing
source "https://download.civicrm.org/civicrm-#{civicrm_version}-l10n.tar.gz"
owner "wordpress"
group "wordpress"
mode 0o644
backup false
end
execute "/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip" do
action :nothing
command "unzip -qq /var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip"
cwd "/opt/civicrm-#{civicrm_version}"
user "wordpress"
group "wordpress"
subscribes :run, "remote_file[/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip]", :immediately
end
execute "/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz" do
action :nothing
command "tar -zxf /var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz"
cwd "/opt/civicrm-#{civicrm_version}/civicrm"
user "wordpress"
group "wordpress"
subscribes :run, "remote_file[/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz]", :immediately
end
execute "/opt/civicrm-#{civicrm_version}/civicrm" do
action :nothing
command "rsync --archive --delete /opt/civicrm-#{civicrm_version}/civicrm/ #{civicrm_directory}"
user "wordpress"
group "wordpress"
subscribes :run, "execute[/var/cache/chef/civicrm-#{civicrm_version}-wordpress.zip]", :immediately
subscribes :run, "execute[/var/cache/chef/civicrm-#{civicrm_version}-l10n.tar.gz]", :immediately
end
directory "/srv/join.osmfoundation.org/wp-content/plugins/files" do
owner "www-data"
group "www-data"
mode 0o755
end
extensions_directory = "/srv/join.osmfoundation.org/wp-content/plugins/civicrm-extensions"
directory extensions_directory do
owner "wordpress"
group "wordpress"
mode 0o755
end
node[:civicrm][:extensions].each do |_, details|
git "#{extensions_directory}/#{details[:name]}" do
action :sync
repository details[:repository]
revision details[:revision]
user "wordpress"
group "wordpress"
end
end
settings = edit_file "#{civicrm_directory}/civicrm/templates/CRM/common/civicrm.settings.php.template" do |line|
line.gsub!(/%%cms%%/, "WordPress")
line.gsub!(/%%CMSdbUser%%/, "civicrm")
line.gsub!(/%%CMSdbPass%%/, database_password)
line.gsub!(/%%CMSdbHost%%/, "localhost")
line.gsub!(/%%CMSdbName%%/, "civicrm")
line.gsub!(/%%dbUser%%/, "civicrm")
line.gsub!(/%%dbPass%%/, database_password)
line.gsub!(/%%dbHost%%/, "localhost")
line.gsub!(/%%dbName%%/, "civicrm")
line.gsub!(/%%crmRoot%%/, "#{civicrm_directory}/civicrm/")
line.gsub!(/%%templateCompileDir%%/, "/srv/join.osmfoundation.org/wp-content/plugins/files/civicrm/templates_c/")
line.gsub!(/%%baseURL%%/, "http://join.osmfoundation.org/")
line.gsub!(/%%siteKey%%/, site_key)
line.gsub!(%r{// *(.*'ext_repo_url'.*)$}, "\\1")
line
end
file "#{civicrm_directory}/civicrm.settings.php" do
owner "wordpress"
group "wordpress"
mode 0o644
content settings
end
template "/etc/cron.d/osmf-crm" do
source "cron.erb"
owner "root"
group "root"
mode 0o600
variables :directory => civicrm_directory, :passwords => passwords
end
template "/etc/cron.daily/osmf-crm-backup" do
source "backup.cron.erb"
owner "root"
group "root"
mode 0o750
variables :passwords => passwords
end
|
#
# Cookbook Name:: Hadoop
#
# Copyright RightScale, Inc. All rights reserved. All access and use subject to the
# RightScale Terms of Service available at http://www.rightscale.com/terms.php and,
# if applicable, other agreements such as a RightScale Master Subscription Agreement.
module RightScale
module Hadoop
module Helper
# get a list of hosts from the server tags
def get_hosts(type)
hadoop_servers = Set.new
r= rightscale_server_collection "hosts" do
tags "hadoop:node_type=#{type}"
action :nothing
end
r.run_action(:load)
log "HOSTS: #{node[:server_collection]['hosts'].inspect}"
node[:rightscale_server_collection]['hosts'].to_hash.values.each do |tags|
ip = RightScale::Utils::Helper.get_tag_value('server:private_ip_0', tags)
hadoop_servers.add?(ip)
end
hadoop_servers
end
# Add public key for root to ssh to itself as needed by hadoop.
#
# @param public_ssh_key [string] public key to add
#
# @raises [RuntimeError] if ssh key string is empty
def add_public_key(public_ssh_key)
Chef::Log.info(" Updating authorized_keys ")
directory "/root/.ssh" do
mode "0700"
recursive true
action :create
end
file "/root/.ssh/authorized_keys" do
mode "0600"
action :create_if_missing
end
if "#{public_ssh_key}" != ""
ruby_block "create_authorized_keys" do
block do
# Writing key to file
system("echo '#{public_ssh_key}' >> /root/.ssh/authorized_keys")
# Setting permissions
system("chmod 0600 /root/.ssh/authorized_keys")
end
end
else
raise " Missing Public ssh key"
end
end
end
end
end
move init and config into default
#
# Cookbook Name:: Hadoop
#
# Copyright RightScale, Inc. All rights reserved. All access and use subject to the
# RightScale Terms of Service available at http://www.rightscale.com/terms.php and,
# if applicable, other agreements such as a RightScale Master Subscription Agreement.
module RightScale
module Hadoop
module Helper
# get a list of hosts from the server tags
def get_hosts(type)
hadoop_servers = Set.new
r= rightscale_server_collection "hosts" do
tags "hadoop:node_type=#{type}"
action :nothing
end
r.run_action(:load)
log "HOSTS: #{node[:rightscale_server_collection]['hosts'].inspect}"
node[:rightscale_server_collection]['hosts'].to_hash.values.each do |tags|
ip = RightScale::Utils::Helper.get_tag_value('server:private_ip_0', tags)
hadoop_servers.add?(ip)
end
hadoop_servers
end
# Add public key for root to ssh to itself as needed by hadoop.
#
# @param public_ssh_key [string] public key to add
#
# @raises [RuntimeError] if ssh key string is empty
def add_public_key(public_ssh_key)
Chef::Log.info(" Updating authorized_keys ")
directory "/root/.ssh" do
mode "0700"
recursive true
action :create
end
file "/root/.ssh/authorized_keys" do
mode "0600"
action :create_if_missing
end
if "#{public_ssh_key}" != ""
ruby_block "create_authorized_keys" do
block do
# Writing key to file
system("echo '#{public_ssh_key}' >> /root/.ssh/authorized_keys")
# Setting permissions
system("chmod 0600 /root/.ssh/authorized_keys")
end
end
else
raise " Missing Public ssh key"
end
end
end
end
end |
#
# Author:: Seth Chisamore (<schisamo@opscode.com>)
# Cookbook Name:: java
# Attributes:: default
#
# Copyright 2010, Opscode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# remove the deprecated Ubuntu jdk packages
default['java']['remove_deprecated_packages'] = false
# default jdk attributes
default['java']['install_flavor'] = "openjdk"
default['java']['jdk_version'] = '6'
default['java']['arch'] = kernel['machine'] =~ /x86_64/ ? "x86_64" : "i586"
case platform
when "centos","redhat","fedora","scientific","amazon","oracle"
default['java']['java_home'] = "/usr/lib/jvm/java"
when "freebsd"
default['java']['java_home'] = "/usr/local/openjdk#{java['jdk_version']}"
when "arch"
default['java']['java_home'] = "/usr/lib/jvm/java-#{java['jdk_version']}-openjdk"
when "windows"
default['java']['install_flavor'] = "windows"
default['java']['windows']['url'] = nil
default['java']['windows']['package_name'] = "Java(TM) SE Development Kit 7 (64-bit)"
else
default['java']['java_home'] = "/usr/lib/jvm/default-java"
end
# if you change this to true, you can download directly from Oracle
default['java']['oracle']['accept_oracle_download_terms'] = false
# direct download paths for oracle, you have been warned!
# jdk6 attributes
default['java']['jdk']['6']['bin_cmds'] = [ "appletviewer", "apt", "ControlPanel", "extcheck", "HtmlConverter", "idlj", "jar", "jarsigner",
"java", "javac", "javadoc", "javah", "javap", "javaws", "jconsole", "jcontrol", "jdb", "jhat",
"jinfo", "jmap", "jps", "jrunscript", "jsadebugd", "jstack", "jstat", "jstatd", "jvisualvm",
"keytool", "native2ascii", "orbd", "pack200", "policytool", "rmic", "rmid", "rmiregistry",
"schemagen", "serialver", "servertool", "tnameserv", "unpack200", "wsgen", "wsimport", "xjc" ]
# x86_64
default['java']['jdk']['6']['x86_64']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/6u43-b01/jdk-6u43-linux-x64.bin'
default['java']['jdk']['6']['x86_64']['checksum'] = '653c35ec6c64c3b127dea0afed1f16ba2b32efcaeae86fc02777b70e3e166aee'
# i586
default['java']['jdk']['6']['i586']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/6u43-b01/jdk-6u43-linux-i586.bin'
default['java']['jdk']['6']['i586']['checksum'] = '616df04eddac146b1c67de00ae07bb2d1179f470f35f9e661e1f5075423cf4e1'
# jdk7 attributes
default['java']['jdk']['7']['bin_cmds'] = [ "appletviewer", "apt", "ControlPanel", "extcheck", "idlj", "jar", "jarsigner", "java", "javac",
"javadoc", "javafxpackager", "javah", "javap", "javaws", "jcmd", "jconsole", "jcontrol", "jdb",
"jhat", "jinfo", "jmap", "jps", "jrunscript", "jsadebugd", "jstack", "jstat", "jstatd", "jvisualvm",
"keytool", "native2ascii", "orbd", "pack200", "policytool", "rmic", "rmid", "rmiregistry",
"schemagen", "serialver", "servertool", "tnameserv", "unpack200", "wsgen", "wsimport", "xjc" ]
# x86_64
default['java']['jdk']['7']['x86_64']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/7u17-b02/jdk-7u17-linux-x64.tar.gz'
default['java']['jdk']['7']['x86_64']['checksum'] = '8611ce31e0b7ecb99d34703ad89b29a545a3fb30356553be3674366cbe722782'
# i586
default['java']['jdk']['7']['i586']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/7u17-b02/jdk-7u17-linux-i586.tar.gz'
default['java']['jdk']['7']['i586']['checksum'] = '4046e941e05717538dd4deb1b1f55cb8bb6bd38793c7317034d1f5019086d956'
change to install java from oracle
#
# Author:: Seth Chisamore (<schisamo@opscode.com>)
# Cookbook Name:: java
# Attributes:: default
#
# Copyright 2010, Opscode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# remove the deprecated Ubuntu jdk packages
default['java']['remove_deprecated_packages'] = false
# default jdk attributes
default['java']['install_flavor'] = "oracle"
#default['java']['install_flavor'] = "openjdk"
default['java']['jdk_version'] = '6'
default['java']['arch'] = kernel['machine'] =~ /x86_64/ ? "x86_64" : "i586"
case platform
when "centos","redhat","fedora","scientific","amazon","oracle"
default['java']['java_home'] = "/usr/lib/jvm/java"
when "freebsd"
default['java']['java_home'] = "/usr/local/openjdk#{java['jdk_version']}"
when "arch"
default['java']['java_home'] = "/usr/lib/jvm/java-#{java['jdk_version']}-openjdk"
when "windows"
default['java']['install_flavor'] = "windows"
default['java']['windows']['url'] = nil
default['java']['windows']['package_name'] = "Java(TM) SE Development Kit 7 (64-bit)"
else
default['java']['java_home'] = "/usr/lib/jvm/default-java"
end
# if you change this to true, you can download directly from Oracle
default['java']['oracle']['accept_oracle_download_terms'] = false
# direct download paths for oracle, you have been warned!
# jdk6 attributes
default['java']['jdk']['6']['bin_cmds'] = [ "appletviewer", "apt", "ControlPanel", "extcheck", "HtmlConverter", "idlj", "jar", "jarsigner",
"java", "javac", "javadoc", "javah", "javap", "javaws", "jconsole", "jcontrol", "jdb", "jhat",
"jinfo", "jmap", "jps", "jrunscript", "jsadebugd", "jstack", "jstat", "jstatd", "jvisualvm",
"keytool", "native2ascii", "orbd", "pack200", "policytool", "rmic", "rmid", "rmiregistry",
"schemagen", "serialver", "servertool", "tnameserv", "unpack200", "wsgen", "wsimport", "xjc" ]
# x86_64
default['java']['jdk']['6']['x86_64']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/6u43-b01/jdk-6u43-linux-x64.bin'
default['java']['jdk']['6']['x86_64']['checksum'] = '653c35ec6c64c3b127dea0afed1f16ba2b32efcaeae86fc02777b70e3e166aee'
# i586
default['java']['jdk']['6']['i586']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/6u43-b01/jdk-6u43-linux-i586.bin'
default['java']['jdk']['6']['i586']['checksum'] = '616df04eddac146b1c67de00ae07bb2d1179f470f35f9e661e1f5075423cf4e1'
# jdk7 attributes
default['java']['jdk']['7']['bin_cmds'] = [ "appletviewer", "apt", "ControlPanel", "extcheck", "idlj", "jar", "jarsigner", "java", "javac",
"javadoc", "javafxpackager", "javah", "javap", "javaws", "jcmd", "jconsole", "jcontrol", "jdb",
"jhat", "jinfo", "jmap", "jps", "jrunscript", "jsadebugd", "jstack", "jstat", "jstatd", "jvisualvm",
"keytool", "native2ascii", "orbd", "pack200", "policytool", "rmic", "rmid", "rmiregistry",
"schemagen", "serialver", "servertool", "tnameserv", "unpack200", "wsgen", "wsimport", "xjc" ]
# x86_64
default['java']['jdk']['7']['x86_64']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/7u17-b02/jdk-7u17-linux-x64.tar.gz'
default['java']['jdk']['7']['x86_64']['checksum'] = '8611ce31e0b7ecb99d34703ad89b29a545a3fb30356553be3674366cbe722782'
# i586
default['java']['jdk']['7']['i586']['url'] = 'http://download.oracle.com/otn-pub/java/jdk/7u17-b02/jdk-7u17-linux-i586.tar.gz'
default['java']['jdk']['7']['i586']['checksum'] = '4046e941e05717538dd4deb1b1f55cb8bb6bd38793c7317034d1f5019086d956'
|
#
# Cookbook Name:: taginfo
# Recipe:: default
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "json"
include_recipe "apache"
include_recipe "passenger"
include_recipe "git"
package %w[
libsqlite3-dev
zlib1g-dev
libbz2-dev
libboost-dev
libexpat1-dev
libsparsehash-dev
libgd-dev
libicu-dev
libboost-program-options-dev
cmake
make
g++
]
package %w[
sqlite3
pyosmium
curl
pbzip2
]
ruby_version = node[:passenger][:ruby_version]
package "ruby#{ruby_version}"
gem_package "bundler#{ruby_version}" do
package_name "bundler"
gem_binary "gem#{ruby_version}"
options "--format-executable"
end
apache_module "cache"
apache_module "cache_disk"
apache_module "headers"
template "/etc/cron.d/taginfo" do
source "cron.erb"
owner "root"
group "root"
mode 0o644
end
directory "/var/log/taginfo" do
owner "taginfo"
group "taginfo"
mode 0o755
end
file "/etc/logrotate.d/taginfo" do
action :delete
end
template "/etc/sudoers.d/taginfo" do
source "sudoers.erb"
owner "root"
group "root"
mode 0o440
end
node[:taginfo][:sites].each do |site|
site_name = site[:name]
site_aliases = Array(site[:aliases])
directory = site[:directory] || "/srv/#{site_name}"
description = site[:description]
about = site[:about]
icon = site[:icon]
contact = site[:contact]
directory "/var/log/taginfo/#{site_name}" do
owner "taginfo"
group "taginfo"
mode 0o755
end
directory directory do
owner "taginfo"
group "taginfo"
mode 0o755
end
git "#{directory}/libosmium" do
action :sync
repository "git://github.com/osmcode/libosmium.git"
revision "v2.12.1"
user "taginfo"
group "taginfo"
end
git "#{directory}/osmium-tool" do
action :sync
repository "git://github.com/osmcode/osmium-tool.git"
revision "v1.6.1"
user "taginfo"
group "taginfo"
end
git "#{directory}/taginfo" do
action :sync
repository "git://github.com/taginfo/taginfo.git"
revision "osmorg-taginfo-live"
user "taginfo"
group "taginfo"
end
settings = Chef::DelayedEvaluator.new do
settings = JSON.parse(IO.read("#{directory}/taginfo/taginfo-config-example.json"))
settings["instance"]["url"] = "https://#{site_name}/"
settings["instance"]["description"] = description
settings["instance"]["about"] = about
settings["instance"]["icon"] = "/img/logo/#{icon}.png"
settings["instance"]["contact"] = contact
settings["instance"]["access_control_allow_origin"] = ""
settings["logging"]["directory"] = "/var/log/taginfo/#{site_name}"
settings["opensearch"]["shortname"] = "Taginfo"
settings["opensearch"]["contact"] = "webmaster@openstreetmap.org"
settings["sources"]["download"] = ""
settings["sources"]["create"] = "db languages projects wiki"
settings["sources"]["db"]["planetfile"] = "#{directory}/planet/planet.pbf"
settings["sources"]["db"]["bindir"] = "#{directory}/taginfo/tagstats"
settings["tagstats"]["cxxflags"] = "-I../../libosmium/include"
settings["tagstats"]["geodistribution"] = "DenseMmapArray"
JSON.pretty_generate(settings)
end
file "#{directory}/taginfo-config.json" do
owner "taginfo"
group "taginfo"
mode 0o644
content settings
notifies :restart, "service[apache2]"
end
execute "#{directory}/taginfo/tagstats/Makefile" do
action :nothing
command "make"
cwd "#{directory}/taginfo/tagstats"
user "taginfo"
group "taginfo"
subscribes :run, "git[#{directory}/libosmium]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "service[apache2]"
end
directory "#{directory}/osmium-tool/build" do
owner "taginfo"
group "taginfo"
mode "0755"
end
execute "compile-osmium" do
action :nothing
command "cmake .. && make"
cwd "#{directory}/osmium-tool/build"
user "taginfo"
group "taginfo"
subscribes :run, "git[#{directory}/libosmium]"
subscribes :run, "git[#{directory}/osmium-tool]"
end
execute "#{directory}/taginfo/Gemfile" do
action :nothing
command "bundle#{ruby_version} install"
cwd "#{directory}/taginfo"
user "root"
group "root"
subscribes :run, "gem_package[bundler#{ruby_version}]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "passenger_application[#{directory}/taginfo/web/public]"
end
%w[taginfo/web/tmp bin data data/old download sources planet planet/log].each do |dir|
directory "#{directory}/#{dir}" do
owner "taginfo"
group "taginfo"
mode 0o755
end
end
remote_file "#{directory}/planet/planet.pbf" do
action :create_if_missing
source "https://planet.openstreetmap.org/pbf/planet-latest.osm.pbf"
owner "taginfo"
group "taginfo"
mode 0o644
end
template "#{directory}/bin/update-planet" do
source "update-planet.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :directory => directory
end
template "#{directory}/bin/update-taginfo" do
source "update-taginfo.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :directory => directory
end
template "#{directory}/bin/update" do
source "update.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :name => site_name, :directory => directory
end
passenger_application "#{directory}/taginfo/web/public"
ssl_certificate site_name do
domains [site_name] + site_aliases
notifies :reload, "service[apache2]"
end
apache_site site_name do
template "apache.erb"
directory "#{directory}/taginfo/web/public"
variables :aliases => site_aliases
end
end
Use packages libosmium and protozero on taginfo server.
Instead of getting it from git.
#
# Cookbook Name:: taginfo
# Recipe:: default
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "json"
include_recipe "apache"
include_recipe "passenger"
include_recipe "git"
package %w[
libsqlite3-dev
zlib1g-dev
libbz2-dev
libboost-dev
libexpat1-dev
libsparsehash-dev
libgd-dev
libicu-dev
libboost-program-options-dev
libosmium2-dev
libprotozero-dev
cmake
make
g++
]
package %w[
sqlite3
osmium-tool
pyosmium
curl
pbzip2
]
ruby_version = node[:passenger][:ruby_version]
package "ruby#{ruby_version}"
gem_package "bundler#{ruby_version}" do
package_name "bundler"
gem_binary "gem#{ruby_version}"
options "--format-executable"
end
apache_module "cache"
apache_module "cache_disk"
apache_module "headers"
template "/etc/cron.d/taginfo" do
source "cron.erb"
owner "root"
group "root"
mode 0o644
end
directory "/var/log/taginfo" do
owner "taginfo"
group "taginfo"
mode 0o755
end
file "/etc/logrotate.d/taginfo" do
action :delete
end
template "/etc/sudoers.d/taginfo" do
source "sudoers.erb"
owner "root"
group "root"
mode 0o440
end
node[:taginfo][:sites].each do |site|
site_name = site[:name]
site_aliases = Array(site[:aliases])
directory = site[:directory] || "/srv/#{site_name}"
description = site[:description]
about = site[:about]
icon = site[:icon]
contact = site[:contact]
directory "/var/log/taginfo/#{site_name}" do
owner "taginfo"
group "taginfo"
mode 0o755
end
directory directory do
owner "taginfo"
group "taginfo"
mode 0o755
end
git "#{directory}/taginfo" do
action :sync
repository "git://github.com/taginfo/taginfo.git"
revision "osmorg-taginfo-live"
user "taginfo"
group "taginfo"
end
settings = Chef::DelayedEvaluator.new do
settings = JSON.parse(IO.read("#{directory}/taginfo/taginfo-config-example.json"))
settings["instance"]["url"] = "https://#{site_name}/"
settings["instance"]["description"] = description
settings["instance"]["about"] = about
settings["instance"]["icon"] = "/img/logo/#{icon}.png"
settings["instance"]["contact"] = contact
settings["instance"]["access_control_allow_origin"] = ""
settings["logging"]["directory"] = "/var/log/taginfo/#{site_name}"
settings["opensearch"]["shortname"] = "Taginfo"
settings["opensearch"]["contact"] = "webmaster@openstreetmap.org"
settings["sources"]["download"] = ""
settings["sources"]["create"] = "db languages projects wiki"
settings["sources"]["db"]["planetfile"] = "#{directory}/planet/planet.pbf"
settings["sources"]["db"]["bindir"] = "#{directory}/taginfo/tagstats"
settings["tagstats"]["geodistribution"] = "DenseMmapArray"
JSON.pretty_generate(settings)
end
file "#{directory}/taginfo-config.json" do
owner "taginfo"
group "taginfo"
mode 0o644
content settings
notifies :restart, "service[apache2]"
end
execute "#{directory}/taginfo/tagstats/Makefile" do
action :nothing
command "make"
cwd "#{directory}/taginfo/tagstats"
user "taginfo"
group "taginfo"
subscribes :run, "apt_package[libprotozero-dev]"
subscribes :run, "apt_package[libosmium2-dev]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "service[apache2]"
end
execute "#{directory}/taginfo/Gemfile" do
action :nothing
command "bundle#{ruby_version} install"
cwd "#{directory}/taginfo"
user "root"
group "root"
subscribes :run, "gem_package[bundler#{ruby_version}]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "passenger_application[#{directory}/taginfo/web/public]"
end
%w[taginfo/web/tmp bin data data/old download sources planet planet/log].each do |dir|
directory "#{directory}/#{dir}" do
owner "taginfo"
group "taginfo"
mode 0o755
end
end
remote_file "#{directory}/planet/planet.pbf" do
action :create_if_missing
source "https://planet.openstreetmap.org/pbf/planet-latest.osm.pbf"
owner "taginfo"
group "taginfo"
mode 0o644
end
template "#{directory}/bin/update-planet" do
source "update-planet.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :directory => directory
end
template "#{directory}/bin/update-taginfo" do
source "update-taginfo.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :directory => directory
end
template "#{directory}/bin/update" do
source "update.erb"
owner "taginfo"
group "taginfo"
mode 0o755
variables :name => site_name, :directory => directory
end
passenger_application "#{directory}/taginfo/web/public"
ssl_certificate site_name do
domains [site_name] + site_aliases
notifies :reload, "service[apache2]"
end
apache_site site_name do
template "apache.erb"
directory "#{directory}/taginfo/web/public"
variables :aliases => site_aliases
end
end
|
#
# Cookbook Name:: taginfo
# Recipe:: default
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "json"
include_recipe "apache::ssl"
include_recipe "passenger"
include_recipe "git"
package "libsqlite3-dev"
package "zlib1g-dev"
package "libbz2-dev"
package "libboost-dev"
package "libexpat1-dev"
package "libsparsehash-dev"
package "libgd2-xpm-dev"
package "libicu-dev"
package "cmake"
package "make"
package "g++"
package "sqlite3"
package "osmosis"
package "curl"
package "m4"
package "ruby#{node[:passenger][:ruby_version]}"
package "rubygems#{node[:passenger][:ruby_version]}"
gem_package "json"
gem_package "sqlite3"
gem_package "sinatra"
gem_package "sinatra-r18n"
gem_package "rack-contrib"
apache_module "cache"
apache_module "cache_disk"
apache_module "headers"
template "/etc/cron.d/taginfo" do
source "cron.erb"
owner "root"
group "root"
mode 0644
end
directory "/var/log/taginfo" do
owner "taginfo"
group "taginfo"
mode 0755
end
file "/etc/logrotate.d/taginfo" do
action :delete
end
template "/etc/sudoers.d/taginfo" do
source "sudoers.erb"
owner "root"
group "root"
mode 0440
end
node[:taginfo][:sites].each do |site|
name = site[:name]
directory = site[:directory] || "/srv/#{name}"
description = site[:description]
about = site[:about]
icon = site[:icon]
contact = site[:contact]
directory "/var/log/taginfo/#{name}" do
owner "taginfo"
group "taginfo"
mode 0755
end
directory directory do
owner "taginfo"
group "taginfo"
mode 0755
end
git "#{directory}/libosmium" do
action :sync
repository "git://github.com/osmcode/libosmium.git"
revision "v2.5.2"
user "taginfo"
group "taginfo"
end
git "#{directory}/taginfo" do
action :sync
repository "git://github.com/joto/taginfo.git"
revision "osmorg-taginfo-live"
user "taginfo"
group "taginfo"
end
settings = Chef::DelayedEvaluator.new do
settings = JSON.parse(IO.read("#{directory}/taginfo/taginfo-config-example.json"))
settings["instance"]["url"] = "http://#{name}/"
settings["instance"]["description"] = description
settings["instance"]["about"] = about
settings["instance"]["icon"] = "/img/logo/#{icon}.png"
settings["instance"]["contact"] = contact
settings["instance"]["access_control_allow_origin"] = ""
settings["logging"]["directory"] = "/var/log/taginfo/#{name}"
settings["opensearch"]["shortname"] = "Taginfo"
settings["opensearch"]["contact"] = "webmaster@openstreetmap.org"
settings["sources"]["download"] = ""
settings["sources"]["create"] = "db languages projects wiki"
settings["sources"]["db"]["planetfile"] = "#{directory}/planet/planet.pbf"
settings["sources"]["db"]["bindir"] = "#{directory}/taginfo/tagstats"
settings["sources"]["db"]["tagstats"] = "#{directory}/taginfo/tagstats/tagstats"
settings["tagstats"]["cxxflags"] = "-I../../libosmium/include"
settings["tagstats"]["geodistribution"] = "DenseMmapArray"
settings["user_interface"]["key_page"]["show_tab_similar"] = true
settings["level0"]["overpass_url_prefix"] = "http://overpass-api.de/api/interpreter?"
JSON.pretty_generate(settings)
end
file "#{directory}/taginfo-config.json" do
owner "taginfo"
group "taginfo"
mode 0644
content settings
notifies :restart, "service[apache2]"
end
execute "#{directory}/taginfo/tagstats/Makefile" do
action :nothing
command "make"
cwd "#{directory}/taginfo/tagstats"
user "taginfo"
group "taginfo"
subscribes :run, "git[#{directory}/libosmium]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "service[apache2]"
end
%w(taginfo/web/tmp bin data data/old download sources planet planet/log planet/replication).each do |dir|
directory "#{directory}/#{dir}" do
owner "taginfo"
group "taginfo"
mode 0755
end
end
remote_file "#{directory}/planet/planet.pbf" do
action :create_if_missing
source "http://planet.openstreetmap.org/pbf/planet-latest.osm.pbf"
owner "taginfo"
group "taginfo"
mode 0644
end
template "#{directory}/planet/replication/configuration.txt" do
source "configuration.txt.erb"
owner "taginfo"
group "taginfo"
mode 0644
end
file "#{directory}/planet/replication/download.lock" do
owner "taginfo"
group "taginfo"
mode 0644
end
template "#{directory}/bin/update-planet" do
source "update-planet.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :directory => directory
end
template "#{directory}/bin/update-taginfo" do
source "update-taginfo.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :directory => directory
end
template "#{directory}/bin/update" do
source "update.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :name => name, :directory => directory
end
apache_site name do
template "apache.erb"
directory "#{directory}/taginfo/web/public"
end
end
Add package libboost-program-options-dev to taginfo server.
Needed for osmium-tool.
#
# Cookbook Name:: taginfo
# Recipe:: default
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "json"
include_recipe "apache::ssl"
include_recipe "passenger"
include_recipe "git"
package "libsqlite3-dev"
package "zlib1g-dev"
package "libbz2-dev"
package "libboost-dev"
package "libexpat1-dev"
package "libsparsehash-dev"
package "libgd2-xpm-dev"
package "libicu-dev"
package "libboost-program-options-dev"
package "cmake"
package "make"
package "g++"
package "sqlite3"
package "osmosis"
package "curl"
package "m4"
package "ruby#{node[:passenger][:ruby_version]}"
package "rubygems#{node[:passenger][:ruby_version]}"
gem_package "json"
gem_package "sqlite3"
gem_package "sinatra"
gem_package "sinatra-r18n"
gem_package "rack-contrib"
apache_module "cache"
apache_module "cache_disk"
apache_module "headers"
template "/etc/cron.d/taginfo" do
source "cron.erb"
owner "root"
group "root"
mode 0644
end
directory "/var/log/taginfo" do
owner "taginfo"
group "taginfo"
mode 0755
end
file "/etc/logrotate.d/taginfo" do
action :delete
end
template "/etc/sudoers.d/taginfo" do
source "sudoers.erb"
owner "root"
group "root"
mode 0440
end
node[:taginfo][:sites].each do |site|
name = site[:name]
directory = site[:directory] || "/srv/#{name}"
description = site[:description]
about = site[:about]
icon = site[:icon]
contact = site[:contact]
directory "/var/log/taginfo/#{name}" do
owner "taginfo"
group "taginfo"
mode 0755
end
directory directory do
owner "taginfo"
group "taginfo"
mode 0755
end
git "#{directory}/libosmium" do
action :sync
repository "git://github.com/osmcode/libosmium.git"
revision "v2.5.2"
user "taginfo"
group "taginfo"
end
git "#{directory}/taginfo" do
action :sync
repository "git://github.com/joto/taginfo.git"
revision "osmorg-taginfo-live"
user "taginfo"
group "taginfo"
end
settings = Chef::DelayedEvaluator.new do
settings = JSON.parse(IO.read("#{directory}/taginfo/taginfo-config-example.json"))
settings["instance"]["url"] = "http://#{name}/"
settings["instance"]["description"] = description
settings["instance"]["about"] = about
settings["instance"]["icon"] = "/img/logo/#{icon}.png"
settings["instance"]["contact"] = contact
settings["instance"]["access_control_allow_origin"] = ""
settings["logging"]["directory"] = "/var/log/taginfo/#{name}"
settings["opensearch"]["shortname"] = "Taginfo"
settings["opensearch"]["contact"] = "webmaster@openstreetmap.org"
settings["sources"]["download"] = ""
settings["sources"]["create"] = "db languages projects wiki"
settings["sources"]["db"]["planetfile"] = "#{directory}/planet/planet.pbf"
settings["sources"]["db"]["bindir"] = "#{directory}/taginfo/tagstats"
settings["sources"]["db"]["tagstats"] = "#{directory}/taginfo/tagstats/tagstats"
settings["tagstats"]["cxxflags"] = "-I../../libosmium/include"
settings["tagstats"]["geodistribution"] = "DenseMmapArray"
settings["user_interface"]["key_page"]["show_tab_similar"] = true
settings["level0"]["overpass_url_prefix"] = "http://overpass-api.de/api/interpreter?"
JSON.pretty_generate(settings)
end
file "#{directory}/taginfo-config.json" do
owner "taginfo"
group "taginfo"
mode 0644
content settings
notifies :restart, "service[apache2]"
end
execute "#{directory}/taginfo/tagstats/Makefile" do
action :nothing
command "make"
cwd "#{directory}/taginfo/tagstats"
user "taginfo"
group "taginfo"
subscribes :run, "git[#{directory}/libosmium]"
subscribes :run, "git[#{directory}/taginfo]"
notifies :restart, "service[apache2]"
end
%w(taginfo/web/tmp bin data data/old download sources planet planet/log planet/replication).each do |dir|
directory "#{directory}/#{dir}" do
owner "taginfo"
group "taginfo"
mode 0755
end
end
remote_file "#{directory}/planet/planet.pbf" do
action :create_if_missing
source "http://planet.openstreetmap.org/pbf/planet-latest.osm.pbf"
owner "taginfo"
group "taginfo"
mode 0644
end
template "#{directory}/planet/replication/configuration.txt" do
source "configuration.txt.erb"
owner "taginfo"
group "taginfo"
mode 0644
end
file "#{directory}/planet/replication/download.lock" do
owner "taginfo"
group "taginfo"
mode 0644
end
template "#{directory}/bin/update-planet" do
source "update-planet.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :directory => directory
end
template "#{directory}/bin/update-taginfo" do
source "update-taginfo.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :directory => directory
end
template "#{directory}/bin/update" do
source "update.erb"
owner "taginfo"
group "taginfo"
mode 0755
variables :name => name, :directory => directory
end
apache_site name do
template "apache.erb"
directory "#{directory}/taginfo/web/public"
end
end
|
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/light-service/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Attila Domokos"]
gem.email = ["adomokos@gmail.com"]
gem.description = %q{A service skeleton with an emphasis on simplicity}
gem.summary = %q{A service skeleton with an emphasis on simplicity}
gem.homepage = ""
gem.license = "MIT"
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "light-service"
gem.require_paths = ["lib"]
gem.version = LightService::VERSION
gem.add_development_dependency("rspec", "~> 2.0")
gem.add_development_dependency("simplecov", "~> 0.7.1")
gem.add_development_dependency("pry", "0.9.12.2")
end
Setting the gem homepage - right now it's Github
# -*- encoding: utf-8 -*-
require File.expand_path('../lib/light-service/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ["Attila Domokos"]
gem.email = ["adomokos@gmail.com"]
gem.description = %q{A service skeleton with an emphasis on simplicity}
gem.summary = %q{A service skeleton with an emphasis on simplicity}
gem.homepage = "https://github.com/adomokos/light-service"
gem.license = "MIT"
gem.files = `git ls-files`.split($\)
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
gem.name = "light-service"
gem.require_paths = ["lib"]
gem.version = LightService::VERSION
gem.add_development_dependency("rspec", "~> 2.0")
gem.add_development_dependency("simplecov", "~> 0.7.1")
gem.add_development_dependency("pry", "0.9.12.2")
end
|
class GoogleAccount
attr_reader :email
class GoogleAppsAPIError < RuntimeError; end
def initialize(email)
@email = email
end
def exists?
result = api.execute(
api_method: directory.users.get,
# This will find by primary email or aliases according to Google's documentation
parameters: {userKey: full_email}
)
return false unless result.success?
result.data.emails.map{|e| e['address']}.include? full_email
end
def available?
!exists?
end
def create_or_update!(first_name, last_name, department, title, privacy)
if exists?
update! first_name, last_name, department, title, privacy
:update
else
create! first_name, last_name, department, title, privacy
:create
end
end
def create!(first_name, last_name, department, title, privacy)
params = {
primaryEmail: full_email,
password: GoogleAccount.random_password,
name: {
familyName: last_name,
givenName: first_name
},
organizations: [
department: department,
title: title
],
includeInGlobalAddressList: !privacy
}
new_user = directory.users.insert.request_schema.new(params)
result = api.execute api_method: directory.users.insert, body_object: new_user
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
true
end
def update!(first_name, last_name, department, title, privacy)
params = {
name: {
givenName: first_name,
familyName: last_name
},
organizations: [
department: department,
title: title
],
includeInGlobalAddressList: !privacy
}
user_updates = directory.users.update.request_schema.new(params)
result = api.execute api_method: directory.users.update, parameters: {userKey: full_email}, body_object: user_updates
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
true
end
def exists_in_group?(full_email, group)
group = GoogleAccount.group_to_email(group)
result = api.execute( api_method: directory.members.get, parameters: {groupKey: group, memberKey: full_email} )
return result.success?
end
def join!(group, role = 'MEMBER')
unless exists_in_group?(full_email, group)
group = GoogleAccount.group_to_email(group)
params = {email: full_email, role: role}
new_member = directory.members.insert.request_schema.new(params)
result = api.execute api_method: directory.members.insert, parameters: {groupKey: group}, body_object: new_member
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
return true if result.succes?
else
return false
end
end
def leave!(group)
if exists_in_group?(full_email, group)
group = GoogleAccount.group_to_email(group)
result = api.execute api_method: directory.members.delete, parameters: {groupKey: group, memberKey: full_email}
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
return true if result.success?
else
return false
end
end
def full_email
GoogleAccount.full_email(email)
end
def self.full_email(email)
if email.include? '@'
email
else
"#{email}@#{Settings.google.domain}"
end
end
def self.group_to_email(group_name)
full_email(group_name.to_s.downcase.gsub /[^a-z0-9]+/, '.')
end
def self.random_password
rand(36**rand(16..42)).to_s(36)
end
private
def api
return @api unless @api.nil?
@api = Google::APIClient.new(
application_name: Settings.google.api_client.application_name,
application_version: Settings.google.api_client.application_version)
key = Google::APIClient::KeyUtils.load_from_pkcs12(Settings.google.api_client.key_path, Settings.google.api_client.secret)
@api.authorization = Signet::OAuth2::Client.new(
token_credential_uri: 'https://accounts.google.com/o/oauth2/token',
audience: 'https://accounts.google.com/o/oauth2/token',
scope: Settings.google.api_client.scopes,
issuer: Settings.google.api_client.issuer,
signing_key: key)
@api.authorization.person = Settings.google.api_client.person
@api.authorization.fetch_access_token!
@api
end
def directory
@directory ||= api.discovered_api('admin', 'directory_v1')
end
end
Refactor the join/leave methods
class GoogleAccount
attr_reader :email
class GoogleAppsAPIError < RuntimeError; end
def initialize(email)
@email = email
end
def exists?
result = api.execute(
api_method: directory.users.get,
# This will find by primary email or aliases according to Google's documentation
parameters: {userKey: full_email}
)
return false unless result.success?
result.data.emails.map{|e| e['address']}.include? full_email
end
def available?
!exists?
end
def create_or_update!(first_name, last_name, department, title, privacy)
if exists?
update! first_name, last_name, department, title, privacy
:update
else
create! first_name, last_name, department, title, privacy
:create
end
end
def create!(first_name, last_name, department, title, privacy)
params = {
primaryEmail: full_email,
password: GoogleAccount.random_password,
name: {
familyName: last_name,
givenName: first_name
},
organizations: [
department: department,
title: title
],
includeInGlobalAddressList: !privacy
}
new_user = directory.users.insert.request_schema.new(params)
result = api.execute api_method: directory.users.insert, body_object: new_user
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
true
end
def update!(first_name, last_name, department, title, privacy)
params = {
name: {
givenName: first_name,
familyName: last_name
},
organizations: [
department: department,
title: title
],
includeInGlobalAddressList: !privacy
}
user_updates = directory.users.update.request_schema.new(params)
result = api.execute api_method: directory.users.update, parameters: {userKey: full_email}, body_object: user_updates
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
true
end
def exists_in_group?(group)
group = GoogleAccount.group_to_email(group)
result = api.execute( api_method: directory.members.get, parameters: {groupKey: group, memberKey: full_email} )
return result.success?
end
def join!(group, role = 'MEMBER')
return false unless exists_in_group?(group)
group = GoogleAccount.group_to_email(group)
params = {email: full_email, role: role}
new_member = directory.members.insert.request_schema.new(params)
result = api.execute api_method: directory.members.insert, parameters: {groupKey: group}, body_object: new_member
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
return true if result.succes?
end
def leave!(group)
return false if exists_in_group?(group)
group = GoogleAccount.group_to_email(group)
result = api.execute api_method: directory.members.delete, parameters: {groupKey: group, memberKey: full_email}
raise GoogleAppsAPIError, result.data['error']['message'] unless result.success?
return true if result.success?
end
def full_email
GoogleAccount.full_email(email)
end
def self.full_email(email)
if email.include? '@'
email
else
"#{email}@#{Settings.google.domain}"
end
end
def self.group_to_email(group_name)
full_email(group_name.to_s.downcase.gsub /[^a-z0-9]+/, '.')
end
def self.random_password
rand(36**rand(16..42)).to_s(36)
end
private
def api
return @api unless @api.nil?
@api = Google::APIClient.new(
application_name: Settings.google.api_client.application_name,
application_version: Settings.google.api_client.application_version)
key = Google::APIClient::KeyUtils.load_from_pkcs12(Settings.google.api_client.key_path, Settings.google.api_client.secret)
@api.authorization = Signet::OAuth2::Client.new(
token_credential_uri: 'https://accounts.google.com/o/oauth2/token',
audience: 'https://accounts.google.com/o/oauth2/token',
scope: Settings.google.api_client.scopes,
issuer: Settings.google.api_client.issuer,
signing_key: key)
@api.authorization.person = Settings.google.api_client.person
@api.authorization.fetch_access_token!
@api
end
def directory
@directory ||= api.discovered_api('admin', 'directory_v1')
end
end
|
require 'uri'
require "graphite_graph/version"
# A small DSL to assist in the creation of Graphite graphs
# see https://github.com/ripienaar/graphite-graph-dsl/wiki
# for full details
class GraphiteGraph
attr_reader :info, :properties, :targets, :target_order, :critical_threshold, :warning_threshold
def initialize(file, overrides={}, info={})
@info = info
@file = file
@munin_mode = false
@overrides = overrides
@linecount = 0
@critical_threshold = []
@warning_threshold = []
load_graph
end
def defaults
@properties = {:title => nil,
:vtitle => nil,
:width => 500,
:height => 250,
:from => "-1hour",
:until => "now",
:surpress => false,
:description => nil,
:hide_legend => nil,
:hide_grid => nil,
:ymin => nil,
:ymax => nil,
:linewidth => nil,
:linemode => nil,
:fontsize => nil,
:fontbold => false,
:timezone => nil,
:background_color => nil,
:foreground_color => nil,
:draw_null_as_zero => false,
:major_grid_line_color => nil,
:minor_grid_line_color => nil,
:area => :none,
:placeholders => nil}.merge(@overrides)
end
def [](key)
if key == :url
url
else
@properties[key]
end
end
def method_missing(meth, *args)
if properties.include?(meth)
properties[meth] = args.first unless @overrides.include?(meth)
else
super
end
end
def load_graph
@properties = defaults
@targets = {}
@target_order = []
self.instance_eval(File.read(@file)) unless @file == :none
end
def service(service, data, &blk)
raise "No hostname given for this instance" unless info[:hostname]
@service_mode = {:service => service, :data => data}
blk.call
@service_mode = false
end
# add forecast, bands, aberrations and actual fields using the
# Holt-Winters Confidence Band prediction model
#
# hw_predict :foo, :data => "some.data.item", :alias => "Some Item"
#
# You can tweak the colors by setting:
# :forecast_color => "blue"
# :bands_color => "grey"
# :aberration_color => "red"
#
# You can add an aberration line:
#
# :aberration_line => true,
# :aberration_second_y => true
#
# You can disable the forecast line by setting:
#
# :forecast_line => false
#
# You can disable the confidence lines by settings:
#
# :bands_lines => false
#
# You can disable the display of the actual data:
#
# :actual_line => false
def hw_predict(name, args)
raise ":data is needed as an argument to a Holt-Winters Confidence forecast" unless args[:data]
unless args[:forecast_line] == false
forecast_args = args.clone
forecast_args[:data] = "holtWintersForecast(#{forecast_args[:data]})"
forecast_args[:alias] = "#{args[:alias]} Forecast"
forecast_args[:color] = args[:forecast_color] || "blue"
field "#{name}_forecast", forecast_args
end
unless args[:bands_lines] == false
bands_args = args.clone
bands_args[:data] = "holtWintersConfidenceBands(#{bands_args[:data]})"
bands_args[:color] = args[:bands_color] || "grey"
bands_args[:dashed] = true
bands_args[:alias] = "#{args[:alias]} Confidence"
field "#{name}_bands", bands_args
end
if args[:aberration_line]
aberration_args = args.clone
aberration_args[:data] = "holtWintersAberration(keepLastValue(#{aberration_args[:data]}))"
aberration_args[:color] = args[:aberration_color] || "orange"
aberration_args[:alias] = "#{args[:alias]} Aberration"
aberration_args[:second_y_axis] = true if aberration_args[:aberration_second_y]
field "#{name}_aberration", aberration_args
end
if args[:critical]
color = args[:critical_color] || "red"
critical :value => args[:critical], :color => color, :name => name
end
if args[:warning]
color = args[:warning_color] || "orange"
warning :value => args[:warning], :color => color, :name => name
end
args[:color] ||= "yellow"
field name, args unless args[:actual_line] == false
end
alias :forecast :hw_predict
# takes a series of metrics in a wildcard query and aggregates the values by a subgroup
#
# data must contain a wildcard query, a subgroup position, and an optional aggregate function.
# if the aggregate function is omitted, sumSeries will be used.
#
# group :data => "metric.*.value", :subgroup => "2", :aggregator => "sumSeries"
#
def group(name, args)
raise ":data is needed as an argument to group metrics" unless args[:data]
raise ":subgroup is needed as an argument to group metrics" unless args.include?(:subgroup)
args[:aggregator] = "sumSeries" unless args[:aggregator]
group_args = args.clone
group_args[:data] = "groupByNode(#{group_args[:data]},#{group_args[:subgroup]},\"#{group_args[:aggregator]}\")"
field "#{name}_group", group_args
end
# draws a single dashed line with predictable names, defaults to red line
#
# data can be a single item or a 2 item array, it doesn't break if you supply
# more but # more than 2 items just doesn't make sense generally
#
# critical :value => [700, -700], :color => "red"
#
# You can prevent the line from being drawn but just store the ranges for monitoring
# purposes by adding :hide => true to the arguments
def critical(options)
raise "critical lines need a value" unless options[:value]
@critical_threshold = [options[:value]].flatten
options[:color] ||= "red"
unless options[:hide]
@critical_threshold.each_with_index do |crit, index|
line :caption => "crit_#{index}", :value => crit, :color => options[:color], :dashed => true
end
end
end
# draws a single dashed line with predictable names, defaults to orange line
#
# data can be a single item or a 2 item array, it doesn't break if you supply
# more but # more than 2 items just doesn't make sense generally
#
# warning :value => [700, -700], :color => "orange"
#
# You can prevent the line from being drawn but just store the ranges for monitoring
# purposes by adding :hide => true to the arguments
def warning(options)
raise "warning lines need a value" unless options[:value]
@warning_threshold = [options[:value]].flatten
options[:color] ||= "orange"
unless options[:hide]
@warning_threshold.flatten.each_with_index do |warn, index|
line :caption => "warn_#{index}", :value => warn, :color => options[:color], :dashed => true
end
end
end
# draws a simple line on the graph with a caption, value and color.
#
# line :caption => "warning", :value => 50, :color => "orange"
def line(options)
raise "lines need a caption" unless options.include?(:caption)
raise "lines need a value" unless options.include?(:value)
raise "lines need a color" unless options.include?(:color)
options[:alias] = options[:caption] unless options[:alias]
args = {:data => "threshold(#{options[:value]})", :color => options[:color], :alias => options[:alias]}
args[:dashed] = true if options[:dashed]
field "line_#{@linecount}", args
@linecount += 1
end
# adds a field to the graph, each field needs a unique name
def field(name, args)
raise "A field called #{name} already exist for this graph" if targets.include?(name)
default = {}
if @service_mode
default[:data] = [info[:hostname], @service_mode[:service], @service_mode[:data], name].join(".")
end
targets[name] = default.merge(args)
target_order << name
end
def url(format = nil, url=true)
return nil if properties[:surpress]
url_parts = []
colors = []
[:title, :vtitle, :from, :width, :height, :until].each do |item|
url_parts << "#{item}=#{properties[item]}" if properties[item]
end
url_parts << "areaMode=#{properties[:area]}" if properties[:area]
url_parts << "hideLegend=#{properties[:hide_legend]}" if properties.include?(:hide_legend)
url_parts << "hideGrid=#{properties[:hide_grid]}" if properties[:hide_grid]
url_parts << "yMin=#{properties[:ymin]}" if properties[:ymin]
url_parts << "yMax=#{properties[:ymax]}" if properties[:ymax]
url_parts << "lineWidth=#{properties[:linewidth]}" if properties[:linewidth]
url_parts << "lineMode=#{properties[:linemode]}" if properties[:linemode]
url_parts << "fontSize=#{properties[:fontsize]}" if properties[:fontsize]
url_parts << "fontBold=#{properties[:fontbold]}" if properties[:fontbold]
url_parts << "drawNullAsZero=#{properties[:draw_null_as_zero]}" if properties[:draw_null_as_zero]
url_parts << "tz=#{properties[:timezone]}" if properties[:timezone]
url_parts << "majorGridLineColor=#{properties[:major_grid_line_color]}" if properties[:major_grid_line_color]
url_parts << "minorGridLineColor=#{properties[:minor_grid_line_color]}" if properties[:minor_grid_line_color]
url_parts << "bgcolor=#{properties[:background_color]}" if properties[:background_color]
url_parts << "fgcolor=#{properties[:foreground_color]}" if properties[:foreground_color]
target_order.each do |name|
target = targets[name]
if target[:target]
url_parts << "target=#{target[:target]}"
else
raise "field #{name} does not have any data associated with it" unless target[:data]
graphite_target = target[:data]
graphite_target = "derivative(#{graphite_target})" if target[:derivative]
graphite_target = "highestAverage(#{graphite_target},#{target[:highest_average]})" if target[:highest_average]
graphite_target = "scale(#{graphite_target},#{target[:scale]})" if target[:scale]
graphite_target = "drawAsInfinite(#{graphite_target})" if target[:line]
graphite_target = "movingAverage(#{graphite_target},#{target[:smoothing]})" if target[:smoothing]
graphite_target = "color(#{graphite_target},\"#{target[:color]}\")" if target[:color]
graphite_target = "dashed(#{graphite_target})" if target[:dashed]
graphite_target = "secondYAxis(#{graphite_target})" if target[:second_y_axis]
unless target.include?(:subgroup)
if target[:alias_by_node]
graphite_target = "aliasByNode(#{graphite_target},#{target[:alias_by_node]})"
elsif target[:alias]
graphite_target = "alias(#{graphite_target},\"#{target[:alias]}\")"
else
graphite_target = "alias(#{graphite_target},\"#{name.to_s.capitalize}\")"
end
if target[:cacti_style]
graphite_target = "cactiStyle(#{graphite_target})"
elsif
graphite_target = "legendValue(#{graphite_target},\"#{target[:legend_value]}\")" if target[:legend_value]
end
end
url_parts << "target=#{graphite_target}"
end
end
url_parts << "format=#{format}" if format
if url
url_str = url_parts.join("&")
properties[:placeholders].each { |k,v| url_str.gsub!("%{#{k}}", v.to_s) } if properties[:placeholders].is_a?(Hash)
URI.encode(url_str)
else
url_parts
end
end
end
Fix syntax error
require 'uri'
require "graphite_graph/version"
# A small DSL to assist in the creation of Graphite graphs
# see https://github.com/ripienaar/graphite-graph-dsl/wiki
# for full details
class GraphiteGraph
attr_reader :info, :properties, :targets, :target_order, :critical_threshold, :warning_threshold
def initialize(file, overrides={}, info={})
@info = info
@file = file
@munin_mode = false
@overrides = overrides
@linecount = 0
@critical_threshold = []
@warning_threshold = []
load_graph
end
def defaults
@properties = {:title => nil,
:vtitle => nil,
:width => 500,
:height => 250,
:from => "-1hour",
:until => "now",
:surpress => false,
:description => nil,
:hide_legend => nil,
:hide_grid => nil,
:ymin => nil,
:ymax => nil,
:linewidth => nil,
:linemode => nil,
:fontsize => nil,
:fontbold => false,
:timezone => nil,
:background_color => nil,
:foreground_color => nil,
:draw_null_as_zero => false,
:major_grid_line_color => nil,
:minor_grid_line_color => nil,
:area => :none,
:placeholders => nil}.merge(@overrides)
end
def [](key)
if key == :url
url
else
@properties[key]
end
end
def method_missing(meth, *args)
if properties.include?(meth)
properties[meth] = args.first unless @overrides.include?(meth)
else
super
end
end
def load_graph
@properties = defaults
@targets = {}
@target_order = []
self.instance_eval(File.read(@file)) unless @file == :none
end
def service(service, data, &blk)
raise "No hostname given for this instance" unless info[:hostname]
@service_mode = {:service => service, :data => data}
blk.call
@service_mode = false
end
# add forecast, bands, aberrations and actual fields using the
# Holt-Winters Confidence Band prediction model
#
# hw_predict :foo, :data => "some.data.item", :alias => "Some Item"
#
# You can tweak the colors by setting:
# :forecast_color => "blue"
# :bands_color => "grey"
# :aberration_color => "red"
#
# You can add an aberration line:
#
# :aberration_line => true,
# :aberration_second_y => true
#
# You can disable the forecast line by setting:
#
# :forecast_line => false
#
# You can disable the confidence lines by settings:
#
# :bands_lines => false
#
# You can disable the display of the actual data:
#
# :actual_line => false
def hw_predict(name, args)
raise ":data is needed as an argument to a Holt-Winters Confidence forecast" unless args[:data]
unless args[:forecast_line] == false
forecast_args = args.clone
forecast_args[:data] = "holtWintersForecast(#{forecast_args[:data]})"
forecast_args[:alias] = "#{args[:alias]} Forecast"
forecast_args[:color] = args[:forecast_color] || "blue"
field "#{name}_forecast", forecast_args
end
unless args[:bands_lines] == false
bands_args = args.clone
bands_args[:data] = "holtWintersConfidenceBands(#{bands_args[:data]})"
bands_args[:color] = args[:bands_color] || "grey"
bands_args[:dashed] = true
bands_args[:alias] = "#{args[:alias]} Confidence"
field "#{name}_bands", bands_args
end
if args[:aberration_line]
aberration_args = args.clone
aberration_args[:data] = "holtWintersAberration(keepLastValue(#{aberration_args[:data]}))"
aberration_args[:color] = args[:aberration_color] || "orange"
aberration_args[:alias] = "#{args[:alias]} Aberration"
aberration_args[:second_y_axis] = true if aberration_args[:aberration_second_y]
field "#{name}_aberration", aberration_args
end
if args[:critical]
color = args[:critical_color] || "red"
critical :value => args[:critical], :color => color, :name => name
end
if args[:warning]
color = args[:warning_color] || "orange"
warning :value => args[:warning], :color => color, :name => name
end
args[:color] ||= "yellow"
field name, args unless args[:actual_line] == false
end
alias :forecast :hw_predict
# takes a series of metrics in a wildcard query and aggregates the values by a subgroup
#
# data must contain a wildcard query, a subgroup position, and an optional aggregate function.
# if the aggregate function is omitted, sumSeries will be used.
#
# group :data => "metric.*.value", :subgroup => "2", :aggregator => "sumSeries"
#
def group(name, args)
raise ":data is needed as an argument to group metrics" unless args[:data]
raise ":subgroup is needed as an argument to group metrics" unless args.include?(:subgroup)
args[:aggregator] = "sumSeries" unless args[:aggregator]
group_args = args.clone
group_args[:data] = "groupByNode(#{group_args[:data]},#{group_args[:subgroup]},\"#{group_args[:aggregator]}\")"
field "#{name}_group", group_args
end
# draws a single dashed line with predictable names, defaults to red line
#
# data can be a single item or a 2 item array, it doesn't break if you supply
# more but # more than 2 items just doesn't make sense generally
#
# critical :value => [700, -700], :color => "red"
#
# You can prevent the line from being drawn but just store the ranges for monitoring
# purposes by adding :hide => true to the arguments
def critical(options)
raise "critical lines need a value" unless options[:value]
@critical_threshold = [options[:value]].flatten
options[:color] ||= "red"
unless options[:hide]
@critical_threshold.each_with_index do |crit, index|
line :caption => "crit_#{index}", :value => crit, :color => options[:color], :dashed => true
end
end
end
# draws a single dashed line with predictable names, defaults to orange line
#
# data can be a single item or a 2 item array, it doesn't break if you supply
# more but # more than 2 items just doesn't make sense generally
#
# warning :value => [700, -700], :color => "orange"
#
# You can prevent the line from being drawn but just store the ranges for monitoring
# purposes by adding :hide => true to the arguments
def warning(options)
raise "warning lines need a value" unless options[:value]
@warning_threshold = [options[:value]].flatten
options[:color] ||= "orange"
unless options[:hide]
@warning_threshold.flatten.each_with_index do |warn, index|
line :caption => "warn_#{index}", :value => warn, :color => options[:color], :dashed => true
end
end
end
# draws a simple line on the graph with a caption, value and color.
#
# line :caption => "warning", :value => 50, :color => "orange"
def line(options)
raise "lines need a caption" unless options.include?(:caption)
raise "lines need a value" unless options.include?(:value)
raise "lines need a color" unless options.include?(:color)
options[:alias] = options[:caption] unless options[:alias]
args = {:data => "threshold(#{options[:value]})", :color => options[:color], :alias => options[:alias]}
args[:dashed] = true if options[:dashed]
field "line_#{@linecount}", args
@linecount += 1
end
# adds a field to the graph, each field needs a unique name
def field(name, args)
raise "A field called #{name} already exist for this graph" if targets.include?(name)
default = {}
if @service_mode
default[:data] = [info[:hostname], @service_mode[:service], @service_mode[:data], name].join(".")
end
targets[name] = default.merge(args)
target_order << name
end
def url(format = nil, url=true)
return nil if properties[:surpress]
url_parts = []
colors = []
[:title, :vtitle, :from, :width, :height, :until].each do |item|
url_parts << "#{item}=#{properties[item]}" if properties[item]
end
url_parts << "areaMode=#{properties[:area]}" if properties[:area]
url_parts << "hideLegend=#{properties[:hide_legend]}" if properties.include?(:hide_legend)
url_parts << "hideGrid=#{properties[:hide_grid]}" if properties[:hide_grid]
url_parts << "yMin=#{properties[:ymin]}" if properties[:ymin]
url_parts << "yMax=#{properties[:ymax]}" if properties[:ymax]
url_parts << "lineWidth=#{properties[:linewidth]}" if properties[:linewidth]
url_parts << "lineMode=#{properties[:linemode]}" if properties[:linemode]
url_parts << "fontSize=#{properties[:fontsize]}" if properties[:fontsize]
url_parts << "fontBold=#{properties[:fontbold]}" if properties[:fontbold]
url_parts << "drawNullAsZero=#{properties[:draw_null_as_zero]}" if properties[:draw_null_as_zero]
url_parts << "tz=#{properties[:timezone]}" if properties[:timezone]
url_parts << "majorGridLineColor=#{properties[:major_grid_line_color]}" if properties[:major_grid_line_color]
url_parts << "minorGridLineColor=#{properties[:minor_grid_line_color]}" if properties[:minor_grid_line_color]
url_parts << "bgcolor=#{properties[:background_color]}" if properties[:background_color]
url_parts << "fgcolor=#{properties[:foreground_color]}" if properties[:foreground_color]
target_order.each do |name|
target = targets[name]
if target[:target]
url_parts << "target=#{target[:target]}"
else
raise "field #{name} does not have any data associated with it" unless target[:data]
graphite_target = target[:data]
graphite_target = "derivative(#{graphite_target})" if target[:derivative]
graphite_target = "highestAverage(#{graphite_target},#{target[:highest_average]})" if target[:highest_average]
graphite_target = "scale(#{graphite_target},#{target[:scale]})" if target[:scale]
graphite_target = "drawAsInfinite(#{graphite_target})" if target[:line]
graphite_target = "movingAverage(#{graphite_target},#{target[:smoothing]})" if target[:smoothing]
graphite_target = "color(#{graphite_target},\"#{target[:color]}\")" if target[:color]
graphite_target = "dashed(#{graphite_target})" if target[:dashed]
graphite_target = "secondYAxis(#{graphite_target})" if target[:second_y_axis]
unless target.include?(:subgroup)
if target[:alias_by_node]
graphite_target = "aliasByNode(#{graphite_target},#{target[:alias_by_node]})"
elsif target[:alias]
graphite_target = "alias(#{graphite_target},\"#{target[:alias]}\")"
else
graphite_target = "alias(#{graphite_target},\"#{name.to_s.capitalize}\")"
end
if target[:cacti_style]
graphite_target = "cactiStyle(#{graphite_target})"
elsif target[:legend_value]
graphite_target = "legendValue(#{graphite_target},\"#{target[:legend_value]}\")"
end
end
url_parts << "target=#{graphite_target}"
end
end
url_parts << "format=#{format}" if format
if url
url_str = url_parts.join("&")
properties[:placeholders].each { |k,v| url_str.gsub!("%{#{k}}", v.to_s) } if properties[:placeholders].is_a?(Hash)
URI.encode(url_str)
else
url_parts
end
end
end
|
require 'htmlentities'
module Griddler
class Email
include ActionView::Helpers::SanitizeHelper
attr_reader :to, :from, :cc, :bcc, :subject, :body, :raw_body, :raw_text, :raw_html,
:headers, :raw_headers, :attachments
def initialize(params)
@params = params
@to = recipients(:to)
@from = extract_address(params[:from])
@subject = params[:subject]
@body = extract_body
@raw_text = params[:text]
@raw_html = params[:html]
@raw_body = @raw_text.presence || @raw_html
@headers = extract_headers
@cc = recipients(:cc)
@bcc = recipients(:bcc)
@raw_headers = params[:headers]
@attachments = params[:attachments]
end
private
attr_reader :params
def config
@config ||= Griddler.configuration
end
def recipients(type)
params[type].to_a.map { |recipient| extract_address(recipient) }.compact
end
def extract_address(address)
clean_address = clean_text(address)
EmailParser.parse_address(clean_address) if clean_address =~ /@/
end
def extract_body
EmailParser.extract_reply_body(text_or_sanitized_html)
end
def extract_headers
if params[:headers].is_a?(Hash)
deep_clean_invalid_utf8_bytes(params[:headers])
else
EmailParser.extract_headers(clean_invalid_utf8_bytes(params[:headers]))
end
end
def extract_cc_from_headers(headers)
EmailParser.extract_cc(headers)
end
def text_or_sanitized_html
text = clean_text(params.fetch(:text, ''))
text.presence || clean_html(params.fetch(:html, '')).presence
end
def clean_text(text)
clean_invalid_utf8_bytes(text)
end
def clean_html(html)
cleaned_html = clean_invalid_utf8_bytes(html)
cleaned_html = strip_tags(cleaned_html)
cleaned_html = HTMLEntities.new.decode(cleaned_html)
cleaned_html
end
def deep_clean_invalid_utf8_bytes(object)
case object
when Hash
object.inject({}) do |clean_hash, (key, dirty_value)|
clean_hash[key] = deep_clean_invalid_utf8_bytes(dirty_value)
clean_hash
end
when Array
object.map { |element| deep_clean_invalid_utf8_bytes(element) }
when String
clean_invalid_utf8_bytes(object)
else
object
end
end
def clean_invalid_utf8_bytes(text)
if text && !text.valid_encoding?
text.force_encoding('ISO-8859-1').encode('UTF-8')
else
text
end
end
end
end
Include @envelope_to to determine who to proxy the email to.
require 'htmlentities'
module Griddler
class Email
include ActionView::Helpers::SanitizeHelper
attr_reader :to, :from, :cc, :bcc, :subject, :body, :raw_body, :raw_text, :raw_html,
:headers, :raw_headers, :attachments, :envelope_to
def initialize(params)
@params = params
@envelope_to = extract_address(JSON.parse(params[:envelope])['to'].first)
@to = recipients(:to)
@from = extract_address(params[:from])
@subject = params[:subject]
@body = extract_body
@raw_text = params[:text]
@raw_html = params[:html]
@raw_body = @raw_text.presence || @raw_html
@headers = extract_headers
@cc = recipients(:cc)
@bcc = recipients(:bcc)
@raw_headers = params[:headers]
@attachments = params[:attachments]
end
private
attr_reader :params
def config
@config ||= Griddler.configuration
end
def recipients(type)
params[type].to_a.map { |recipient| extract_address(recipient) }.compact
end
def extract_address(address)
clean_address = clean_text(address)
EmailParser.parse_address(clean_address) if clean_address =~ /@/
end
def extract_body
EmailParser.extract_reply_body(text_or_sanitized_html)
end
def extract_headers
if params[:headers].is_a?(Hash)
deep_clean_invalid_utf8_bytes(params[:headers])
else
EmailParser.extract_headers(clean_invalid_utf8_bytes(params[:headers]))
end
end
def extract_cc_from_headers(headers)
EmailParser.extract_cc(headers)
end
def text_or_sanitized_html
text = clean_text(params.fetch(:text, ''))
text.presence || clean_html(params.fetch(:html, '')).presence
end
def clean_text(text)
clean_invalid_utf8_bytes(text)
end
def clean_html(html)
cleaned_html = clean_invalid_utf8_bytes(html)
cleaned_html = strip_tags(cleaned_html)
cleaned_html = HTMLEntities.new.decode(cleaned_html)
cleaned_html
end
def deep_clean_invalid_utf8_bytes(object)
case object
when Hash
object.inject({}) do |clean_hash, (key, dirty_value)|
clean_hash[key] = deep_clean_invalid_utf8_bytes(dirty_value)
clean_hash
end
when Array
object.map { |element| deep_clean_invalid_utf8_bytes(element) }
when String
clean_invalid_utf8_bytes(object)
else
object
end
end
def clean_invalid_utf8_bytes(text)
if text && !text.valid_encoding?
text.force_encoding('ISO-8859-1').encode('UTF-8')
else
text
end
end
end
end
|
require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../../../fixtures/code_loading', __FILE__)
describe "Kernel#require_relative with a relative path" do
it "needs to be reviewed for spec completeness"
before :each do
CodeLoadingSpecs.spec_setup
@dir = "../../fixtures/code"
@abs_dir = File.expand_path(@dir, File.dirname(__FILE__))
@path = "#{@dir}/load_fixture.rb"
@abs_path = File.expand_path(@path, File.dirname(__FILE__))
end
after :each do
CodeLoadingSpecs.spec_cleanup
end
platform_is_not :windows do
describe "when file is a symlink" do
before :each do
@link = tmp("symlink.rb", false)
@real_path = "#{@abs_dir}/symlink/symlink1.rb"
File.symlink(@real_path, @link)
end
after :each do
rm_r @link
end
it "loads a path relative to current file" do
require_relative(@link).should be_true
ScratchPad.recorded.should == [:loaded]
end
end
end
it "loads a path relative to the current file" do
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a file defining many methods" do
require_relative("#{@dir}/methods_fixture.rb").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a LoadError if the file does not exist" do
lambda { require_relative("#{@dir}/nonexistent.rb") }.should raise_error(LoadError)
ScratchPad.recorded.should == []
end
it "raises a LoadError if basepath does not exist" do
lambda { eval("require_relative('#{@dir}/nonexistent.rb')") }.should raise_error(LoadError)
end
it "stores the missing path in a LoadError object" do
path = "#{@dir}/nonexistent.rb"
lambda {
require_relative(path)
}.should(raise_error(LoadError) { |e|
e.path.should == File.expand_path(path, @abs_dir)
})
end
it "calls #to_str on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a TypeError if argument does not respond to #to_str" do
lambda { require_relative(nil) }.should raise_error(TypeError)
lambda { require_relative(42) }.should raise_error(TypeError)
lambda {
require_relative([@path,@path])
}.should raise_error(TypeError)
end
it "raises a TypeError if passed an object that has #to_s but not #to_str" do
name = mock("load_fixture.rb mock")
name.stub!(:to_s).and_return(@path)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "raises a TypeError if #to_str does not return a String" do
name = mock("#to_str returns nil")
name.should_receive(:to_str).at_least(1).times.and_return(nil)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "calls #to_path on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_path).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "calls #to_str on non-String objects returned by #to_path" do
name = mock("load_fixture.rb mock")
to_path = mock("load_fixture_rb #to_path mock")
name.should_receive(:to_path).and_return(to_path)
to_path.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
describe "(file extensions)" do
it "loads a .rb extensioned file when passed a non-extensioned path" do
require_relative("#{@dir}/load_fixture").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a .rb extensioned file when a C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dll"
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "does not load a C-extension file if a .rb extensioned file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.rb"
require_relative("#{@dir}/load_fixture").should be_false
ScratchPad.recorded.should == []
end
it "loads a .rb extensioned file when passed a non-.rb extensioned path" do
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "loads a .rb extensioned file when a complex-extensioned C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dll"
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "does not load a C-extension file if a complex-extensioned .rb file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.rb"
require_relative("#{@dir}/load_fixture.ext").should be_false
ScratchPad.recorded.should == []
end
end
describe "($LOAD_FEATURES)" do
it "stores an absolute path" do
require_relative(@path).should be_true
$LOADED_FEATURES.should == [@abs_path]
end
it "does not store the path if the load fails" do
lambda { require_relative("#{@dir}/raise_fixture.rb") }.should raise_error(RuntimeError)
$LOADED_FEATURES.should == []
end
it "does not load an absolute path that is already stored" do
$LOADED_FEATURES << @abs_path
require_relative(@path).should be_false
ScratchPad.recorded.should == []
end
it "adds the suffix of the resolved filename" do
require_relative("#{@dir}/load_fixture").should be_true
$LOADED_FEATURES.should == ["#{@abs_dir}/load_fixture.rb"]
end
it "loads a path for a file already loaded with a relative path" do
$LOAD_PATH << File.expand_path(@dir)
$LOADED_FEATURES << "load_fixture.rb" << "load_fixture"
require_relative(@path).should be_true
$LOADED_FEATURES.should include(@abs_path)
ScratchPad.recorded.should == [:loaded]
end
end
end
describe "Kernel#require_relative with an absolute path" do
it "needs to be reviewed for spec completeness"
before :each do
CodeLoadingSpecs.spec_setup
@dir = File.expand_path "../../fixtures/code", File.dirname(__FILE__)
@abs_dir = @dir
@path = File.join @dir, "load_fixture.rb"
@abs_path = @path
end
after :each do
CodeLoadingSpecs.spec_cleanup
end
it "loads a path relative to the current file" do
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a file defining many methods" do
require_relative("#{@dir}/methods_fixture.rb").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a LoadError if the file does not exist" do
lambda { require_relative("#{@dir}/nonexistent.rb") }.should raise_error(LoadError)
ScratchPad.recorded.should == []
end
it "raises a LoadError if basepath does not exist" do
lambda { eval("require_relative('#{@dir}/nonexistent.rb')") }.should raise_error(LoadError)
end
it "stores the missing path in a LoadError object" do
path = "#{@dir}/nonexistent.rb"
lambda {
require_relative(path)
}.should(raise_error(LoadError) { |e|
e.path.should == File.expand_path(path, @abs_dir)
})
end
it "calls #to_str on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a TypeError if argument does not respond to #to_str" do
lambda { require_relative(nil) }.should raise_error(TypeError)
lambda { require_relative(42) }.should raise_error(TypeError)
lambda {
require_relative([@path,@path])
}.should raise_error(TypeError)
end
it "raises a TypeError if passed an object that has #to_s but not #to_str" do
name = mock("load_fixture.rb mock")
name.stub!(:to_s).and_return(@path)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "raises a TypeError if #to_str does not return a String" do
name = mock("#to_str returns nil")
name.should_receive(:to_str).at_least(1).times.and_return(nil)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "calls #to_path on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_path).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "calls #to_str on non-String objects returned by #to_path" do
name = mock("load_fixture.rb mock")
to_path = mock("load_fixture_rb #to_path mock")
name.should_receive(:to_path).and_return(to_path)
to_path.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
describe "(file extensions)" do
it "loads a .rb extensioned file when passed a non-extensioned path" do
require_relative("#{@dir}/load_fixture").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a .rb extensioned file when a C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dll"
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "does not load a C-extension file if a .rb extensioned file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.rb"
require_relative("#{@dir}/load_fixture").should be_false
ScratchPad.recorded.should == []
end
it "loads a .rb extensioned file when passed a non-.rb extensioned path" do
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "loads a .rb extensioned file when a complex-extensioned C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dll"
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "does not load a C-extension file if a complex-extensioned .rb file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.rb"
require_relative("#{@dir}/load_fixture.ext").should be_false
ScratchPad.recorded.should == []
end
end
describe "($LOAD_FEATURES)" do
it "stores an absolute path" do
require_relative(@path).should be_true
$LOADED_FEATURES.should == [@abs_path]
end
it "does not store the path if the load fails" do
lambda { require_relative("#{@dir}/raise_fixture.rb") }.should raise_error(RuntimeError)
$LOADED_FEATURES.should == []
end
it "does not load an absolute path that is already stored" do
$LOADED_FEATURES << @abs_path
require_relative(@path).should be_false
ScratchPad.recorded.should == []
end
it "adds the suffix of the resolved filename" do
require_relative("#{@dir}/load_fixture").should be_true
$LOADED_FEATURES.should == ["#{@abs_dir}/load_fixture.rb"]
end
it "loads a path for a file already loaded with a relative path" do
$LOAD_PATH << File.expand_path(@dir)
$LOADED_FEATURES << "load_fixture.rb" << "load_fixture"
require_relative(@path).should be_true
$LOADED_FEATURES.should include(@abs_path)
ScratchPad.recorded.should == [:loaded]
end
end
end
Typo: $LOAD_FEATURES → $LOADED_FEATURES
require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../../../fixtures/code_loading', __FILE__)
describe "Kernel#require_relative with a relative path" do
it "needs to be reviewed for spec completeness"
before :each do
CodeLoadingSpecs.spec_setup
@dir = "../../fixtures/code"
@abs_dir = File.expand_path(@dir, File.dirname(__FILE__))
@path = "#{@dir}/load_fixture.rb"
@abs_path = File.expand_path(@path, File.dirname(__FILE__))
end
after :each do
CodeLoadingSpecs.spec_cleanup
end
platform_is_not :windows do
describe "when file is a symlink" do
before :each do
@link = tmp("symlink.rb", false)
@real_path = "#{@abs_dir}/symlink/symlink1.rb"
File.symlink(@real_path, @link)
end
after :each do
rm_r @link
end
it "loads a path relative to current file" do
require_relative(@link).should be_true
ScratchPad.recorded.should == [:loaded]
end
end
end
it "loads a path relative to the current file" do
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a file defining many methods" do
require_relative("#{@dir}/methods_fixture.rb").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a LoadError if the file does not exist" do
lambda { require_relative("#{@dir}/nonexistent.rb") }.should raise_error(LoadError)
ScratchPad.recorded.should == []
end
it "raises a LoadError if basepath does not exist" do
lambda { eval("require_relative('#{@dir}/nonexistent.rb')") }.should raise_error(LoadError)
end
it "stores the missing path in a LoadError object" do
path = "#{@dir}/nonexistent.rb"
lambda {
require_relative(path)
}.should(raise_error(LoadError) { |e|
e.path.should == File.expand_path(path, @abs_dir)
})
end
it "calls #to_str on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a TypeError if argument does not respond to #to_str" do
lambda { require_relative(nil) }.should raise_error(TypeError)
lambda { require_relative(42) }.should raise_error(TypeError)
lambda {
require_relative([@path,@path])
}.should raise_error(TypeError)
end
it "raises a TypeError if passed an object that has #to_s but not #to_str" do
name = mock("load_fixture.rb mock")
name.stub!(:to_s).and_return(@path)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "raises a TypeError if #to_str does not return a String" do
name = mock("#to_str returns nil")
name.should_receive(:to_str).at_least(1).times.and_return(nil)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "calls #to_path on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_path).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "calls #to_str on non-String objects returned by #to_path" do
name = mock("load_fixture.rb mock")
to_path = mock("load_fixture_rb #to_path mock")
name.should_receive(:to_path).and_return(to_path)
to_path.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
describe "(file extensions)" do
it "loads a .rb extensioned file when passed a non-extensioned path" do
require_relative("#{@dir}/load_fixture").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a .rb extensioned file when a C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dll"
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "does not load a C-extension file if a .rb extensioned file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.rb"
require_relative("#{@dir}/load_fixture").should be_false
ScratchPad.recorded.should == []
end
it "loads a .rb extensioned file when passed a non-.rb extensioned path" do
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "loads a .rb extensioned file when a complex-extensioned C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dll"
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "does not load a C-extension file if a complex-extensioned .rb file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.rb"
require_relative("#{@dir}/load_fixture.ext").should be_false
ScratchPad.recorded.should == []
end
end
describe "($LOADED_FEATURES)" do
it "stores an absolute path" do
require_relative(@path).should be_true
$LOADED_FEATURES.should == [@abs_path]
end
it "does not store the path if the load fails" do
lambda { require_relative("#{@dir}/raise_fixture.rb") }.should raise_error(RuntimeError)
$LOADED_FEATURES.should == []
end
it "does not load an absolute path that is already stored" do
$LOADED_FEATURES << @abs_path
require_relative(@path).should be_false
ScratchPad.recorded.should == []
end
it "adds the suffix of the resolved filename" do
require_relative("#{@dir}/load_fixture").should be_true
$LOADED_FEATURES.should == ["#{@abs_dir}/load_fixture.rb"]
end
it "loads a path for a file already loaded with a relative path" do
$LOAD_PATH << File.expand_path(@dir)
$LOADED_FEATURES << "load_fixture.rb" << "load_fixture"
require_relative(@path).should be_true
$LOADED_FEATURES.should include(@abs_path)
ScratchPad.recorded.should == [:loaded]
end
end
end
describe "Kernel#require_relative with an absolute path" do
it "needs to be reviewed for spec completeness"
before :each do
CodeLoadingSpecs.spec_setup
@dir = File.expand_path "../../fixtures/code", File.dirname(__FILE__)
@abs_dir = @dir
@path = File.join @dir, "load_fixture.rb"
@abs_path = @path
end
after :each do
CodeLoadingSpecs.spec_cleanup
end
it "loads a path relative to the current file" do
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a file defining many methods" do
require_relative("#{@dir}/methods_fixture.rb").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a LoadError if the file does not exist" do
lambda { require_relative("#{@dir}/nonexistent.rb") }.should raise_error(LoadError)
ScratchPad.recorded.should == []
end
it "raises a LoadError if basepath does not exist" do
lambda { eval("require_relative('#{@dir}/nonexistent.rb')") }.should raise_error(LoadError)
end
it "stores the missing path in a LoadError object" do
path = "#{@dir}/nonexistent.rb"
lambda {
require_relative(path)
}.should(raise_error(LoadError) { |e|
e.path.should == File.expand_path(path, @abs_dir)
})
end
it "calls #to_str on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "raises a TypeError if argument does not respond to #to_str" do
lambda { require_relative(nil) }.should raise_error(TypeError)
lambda { require_relative(42) }.should raise_error(TypeError)
lambda {
require_relative([@path,@path])
}.should raise_error(TypeError)
end
it "raises a TypeError if passed an object that has #to_s but not #to_str" do
name = mock("load_fixture.rb mock")
name.stub!(:to_s).and_return(@path)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "raises a TypeError if #to_str does not return a String" do
name = mock("#to_str returns nil")
name.should_receive(:to_str).at_least(1).times.and_return(nil)
lambda { require_relative(name) }.should raise_error(TypeError)
end
it "calls #to_path on non-String objects" do
name = mock("load_fixture.rb mock")
name.should_receive(:to_path).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "calls #to_str on non-String objects returned by #to_path" do
name = mock("load_fixture.rb mock")
to_path = mock("load_fixture_rb #to_path mock")
name.should_receive(:to_path).and_return(to_path)
to_path.should_receive(:to_str).and_return(@path)
require_relative(name).should be_true
ScratchPad.recorded.should == [:loaded]
end
describe "(file extensions)" do
it "loads a .rb extensioned file when passed a non-extensioned path" do
require_relative("#{@dir}/load_fixture").should be_true
ScratchPad.recorded.should == [:loaded]
end
it "loads a .rb extensioned file when a C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.dll"
require_relative(@path).should be_true
ScratchPad.recorded.should == [:loaded]
end
it "does not load a C-extension file if a .rb extensioned file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.rb"
require_relative("#{@dir}/load_fixture").should be_false
ScratchPad.recorded.should == []
end
it "loads a .rb extensioned file when passed a non-.rb extensioned path" do
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "loads a .rb extensioned file when a complex-extensioned C-extension file of the same name is loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.bundle"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dylib"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.so"
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.dll"
require_relative("#{@dir}/load_fixture.ext").should be_true
ScratchPad.recorded.should == [:loaded]
$LOADED_FEATURES.should include "#{@abs_dir}/load_fixture.ext.rb"
end
it "does not load a C-extension file if a complex-extensioned .rb file is already loaded" do
$LOADED_FEATURES << "#{@abs_dir}/load_fixture.ext.rb"
require_relative("#{@dir}/load_fixture.ext").should be_false
ScratchPad.recorded.should == []
end
end
describe "($LOAD_FEATURES)" do
it "stores an absolute path" do
require_relative(@path).should be_true
$LOADED_FEATURES.should == [@abs_path]
end
it "does not store the path if the load fails" do
lambda { require_relative("#{@dir}/raise_fixture.rb") }.should raise_error(RuntimeError)
$LOADED_FEATURES.should == []
end
it "does not load an absolute path that is already stored" do
$LOADED_FEATURES << @abs_path
require_relative(@path).should be_false
ScratchPad.recorded.should == []
end
it "adds the suffix of the resolved filename" do
require_relative("#{@dir}/load_fixture").should be_true
$LOADED_FEATURES.should == ["#{@abs_dir}/load_fixture.rb"]
end
it "loads a path for a file already loaded with a relative path" do
$LOAD_PATH << File.expand_path(@dir)
$LOADED_FEATURES << "load_fixture.rb" << "load_fixture"
require_relative(@path).should be_true
$LOADED_FEATURES.should include(@abs_path)
ScratchPad.recorded.should == [:loaded]
end
end
end
|
require 'htmlentities'
module Griddler
class Email
include ActionView::Helpers::SanitizeHelper
attr_reader :to, :from, :cc, :bcc, :subject, :body, :raw_body, :raw_text, :raw_html,
:headers, :raw_headers, :attachments
def initialize(params)
@params = params
@to = recipients(:to)
@from = extract_address(params[:from])
@subject = params[:subject]
@body = extract_body
@raw_text = params[:text]
@raw_html = params[:html]
@raw_body = @raw_text.presence || @raw_html
@headers = extract_headers
@cc = recipients(:cc)
@bcc = recipients(:bcc)
@raw_headers = params[:headers]
@attachments = params[:attachments]
end
private
attr_reader :params
def config
@config ||= Griddler.configuration
end
def recipients(type)
params[type].to_a.map { |recipient| extract_address(recipient) }.compact
end
def extract_address(address)
clean_address = clean_text(address)
EmailParser.parse_address(clean_address) if clean_address =~ /@/
end
def extract_body
EmailParser.extract_reply_body(text_or_sanitized_html)
end
def extract_headers
if params[:headers].is_a?(Hash)
deep_clean_invalid_utf8_bytes(params[:headers])
else
EmailParser.extract_headers(clean_invalid_utf8_bytes(params[:headers]))
end
end
def extract_cc_from_headers(headers)
EmailParser.extract_cc(headers)
end
def text_or_sanitized_html
text = clean_text(params.fetch(:text, ''))
text.presence || clean_html(params.fetch(:html, '')).presence
end
def clean_text(text)
clean_invalid_utf8_bytes(text)
end
def clean_html(html)
cleaned_html = clean_invalid_utf8_bytes(html)
cleaned_html = strip_tags(cleaned_html)
cleaned_html = HTMLEntities.new.decode(cleaned_html)
cleaned_html
end
def deep_clean_invalid_utf8_bytes(object)
case object
when Hash
object.inject({}) do |clean_hash, (key, dirty_value)|
clean_hash[key] = deep_clean_invalid_utf8_bytes(dirty_value)
clean_hash
end
when Array
object.map { |element| deep_clean_invalid_utf8_bytes(element) }
when String
clean_invalid_utf8_bytes(object)
else
object
end
end
def clean_invalid_utf8_bytes(text)
if text && !text.valid_encoding?
text.force_encoding('ISO-8859-1').encode('UTF-8')
else
text
end
end
end
end
Removing unnecessary trailing whitespace
require 'htmlentities'
module Griddler
class Email
include ActionView::Helpers::SanitizeHelper
attr_reader :to, :from, :cc, :bcc, :subject, :body, :raw_body, :raw_text, :raw_html,
:headers, :raw_headers, :attachments
def initialize(params)
@params = params
@to = recipients(:to)
@from = extract_address(params[:from])
@subject = params[:subject]
@body = extract_body
@raw_text = params[:text]
@raw_html = params[:html]
@raw_body = @raw_text.presence || @raw_html
@headers = extract_headers
@cc = recipients(:cc)
@bcc = recipients(:bcc)
@raw_headers = params[:headers]
@attachments = params[:attachments]
end
private
attr_reader :params
def config
@config ||= Griddler.configuration
end
def recipients(type)
params[type].to_a.map { |recipient| extract_address(recipient) }.compact
end
def extract_address(address)
clean_address = clean_text(address)
EmailParser.parse_address(clean_address) if clean_address =~ /@/
end
def extract_body
EmailParser.extract_reply_body(text_or_sanitized_html)
end
def extract_headers
if params[:headers].is_a?(Hash)
deep_clean_invalid_utf8_bytes(params[:headers])
else
EmailParser.extract_headers(clean_invalid_utf8_bytes(params[:headers]))
end
end
def extract_cc_from_headers(headers)
EmailParser.extract_cc(headers)
end
def text_or_sanitized_html
text = clean_text(params.fetch(:text, ''))
text.presence || clean_html(params.fetch(:html, '')).presence
end
def clean_text(text)
clean_invalid_utf8_bytes(text)
end
def clean_html(html)
cleaned_html = clean_invalid_utf8_bytes(html)
cleaned_html = strip_tags(cleaned_html)
cleaned_html = HTMLEntities.new.decode(cleaned_html)
cleaned_html
end
def deep_clean_invalid_utf8_bytes(object)
case object
when Hash
object.inject({}) do |clean_hash, (key, dirty_value)|
clean_hash[key] = deep_clean_invalid_utf8_bytes(dirty_value)
clean_hash
end
when Array
object.map { |element| deep_clean_invalid_utf8_bytes(element) }
when String
clean_invalid_utf8_bytes(object)
else
object
end
end
def clean_invalid_utf8_bytes(text)
if text && !text.valid_encoding?
text.force_encoding('ISO-8859-1').encode('UTF-8')
else
text
end
end
end
end
|
require "forwardable"
require "hamster/immutable"
require "hamster/enumerable"
module Hamster
def self.vector(*items)
items.empty? ? EmptyVector : Vector.new(items.freeze)
end
# A `Vector` is an ordered, integer-indexed collection of objects. Like `Array`,
# `Vector` indexing starts at 0. Also like `Array`, negative indexes count back
# from the end of the `Vector`.
#
# `Vector`'s interface is modeled after that of `Array`, minus all the methods
# which do destructive updates. Some methods which modify `Array`s destructively
# (like {#insert} or {#delete_at}) are included, but they return new `Vectors`
# and leave the existing one unchanged.
#
# = Creating New Vectors
#
# Hamster.vector('a', 'b', 'c')
# Hamster::Vector.new([:first, :second, :third])
# Hamster::Vector[1, 2, 3, 4, 5]
#
# = Retrieving Items from Vectors
#
# require 'hamster/vector'
# vector = Hamster.vector(1, 2, 3, 4, 5)
# vector[0] # => 1
# vector[-1] # => 5
# vector[0,3] # => Hamster::Vector[1, 2, 3]
# vector[1..-1] # => Hamster::Vector[2, 3, 4, 5]
# vector.first # => 1
# vector.last # => 5
#
# = Creating Modified Vectors
#
# vector.add(6) # => Hamster::Vector[1, 2, 3, 4, 5, 6]
# vector.insert(1, :a, :b) # => Hamster::Vector[1, :a, :b, 2, 3, 4, 5]
# vector.delete_at(2) # => Hamster::Vector[1, 2, 4, 5]
# vector + [6, 7] # => Hamster::Vector[1, 2, 3, 4, 5, 6, 7]
#
# Other `Array`-like methods like {#select}, {#map}, {#shuffle}, {#uniq}, {#reverse},
# {#rotate}, {#flatten}, {#sort}, {#sort_by}, {#take}, {#drop}, {#take_while},
# {#drop_while}, {#fill}, {#product}, and {#transpose} are also supported.
#
class Vector
extend Forwardable
include Immutable
include Enumerable
# @private
BLOCK_SIZE = 32
# @private
INDEX_MASK = BLOCK_SIZE - 1
# @private
BITS_PER_LEVEL = 5
# Return the number of items in this `Vector`
# @return [Integer]
attr_reader :size
def_delegator :self, :size, :length
class << self
# Create a new `Vector` populated with the given items.
#
# @return [Vector]
def [](*items)
new(items.freeze)
end
# Return an empty `Vector`. If used on a subclass, returns an empty instance
# of that class.
#
# @return [Vector]
def empty
@empty ||= self.alloc([].freeze, 0, 0)
end
# "Raw" allocation of a new `Vector`. Used internally to create a new
# instance quickly after building a modified trie.
#
# @return [Vector]
# @private
def alloc(root, size, levels)
obj = allocate
obj.instance_variable_set(:@root, root)
obj.instance_variable_set(:@size, size)
obj.instance_variable_set(:@levels, levels)
obj
end
end
def initialize(items=[].freeze)
items = items.to_a
if items.size <= 32
items = items.dup.freeze if !items.frozen?
@root, @size, @levels = items, items.size, 0
else
root, size, levels = items, items.size, 0
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
@root, @size, @levels = root.freeze, size, levels
end
end
# Return `true` if this `Vector` contains no items.
#
# @return [Boolean]
def empty?
@size == 0
end
def_delegator :self, :empty?, :null?
# Return the first item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def first
get(0)
end
def_delegator :self, :first, :head
# Return the last item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def last
get(-1)
end
# Return a new `Vector` with `item` added after the last occupied position.
#
# @param item [Object] The object to insert at the end of the vector
# @return [Vector]
def add(item)
update_root(@size, item)
end
def_delegator :self, :add, :<<
def_delegator :self, :add, :conj
def_delegator :self, :add, :conjoin
# Return a new `Vector` with the item at `index` replaced by `item`. If the
# `item` argument is missing, but an optional code block is provided, it will
# be passed the existing item and what the block returns will replace it.
#
# @param index [Integer] The index to update
# @param item [Object] The object to insert into that position
# @return [Vector]
def set(index, item = yield(get(index)))
raise IndexError if @size == 0
index += @size if index < 0
raise IndexError if index >= @size || index < 0
update_root(index, item)
end
# Retrieve the item at `index`. If there is none (either the provided index
# is too high or too low), return `nil`.
#
# @param index [Integer] The index to retrieve
# @return [Object]
def get(index)
return nil if @size == 0
index += @size if index < 0
return nil if index >= @size || index < 0
leaf_node_for(@root, @levels * BITS_PER_LEVEL, index)[index & INDEX_MASK]
end
def_delegator :self, :get, :at
# Retrieve the value at `index`, or use the provided default value or block,
# or otherwise raise an `IndexError`.
#
# @overload fetch(index)
# Retrieve the value at the given index, or raise an `IndexError` if it is
# not found.
# @param index [Integer] The index to look up
# @overload fetch(index) { |index| ... }
# Retrieve the value at the given index, or call the optional
# code block (with the non-existent index) and get its return value.
# @yield [index] The index which does not exist
# @yieldreturn [Object] Object to return instead
# @param index [Integer] The index to look up
# @overload fetch(index, default)
# Retrieve the value at the given index, or else return the provided
# `default` value.
# @param index [Integer] The index to look up
# @param default [Object] Object to return if the key is not found
#
# @return [Object]
def fetch(index, default = (missing_default = true))
index += @size if index < 0
if index >= 0 && index < size
get(index)
elsif block_given?
yield
elsif !missing_default
default
else
raise IndexError, "index #{index} outside of vector bounds"
end
end
# Element reference. Return the item at a specific index, or a specified,
# contiguous range of items (as a new `Vector`).
#
# @overload vector[index]
# Return the item at `index`.
# @param index [Integer] The index to retrieve.
# @overload vector[start, length]
# Return a subvector starting at index `start` and continuing for `length` elements.
# @param start [Integer] The index to start retrieving items from.
# @param length [Integer] The number of items to retrieve.
# @overload vector[range]
# Return a subvector specified by the given `range` of indices.
# @param range [Range] The range of indices to retrieve.
#
# @return [Object]
def [](arg, length = (missing_length = true))
if missing_length
if arg.is_a?(Range)
from, to = arg.begin, arg.end
from += @size if from < 0
to += @size if to < 0
to += 1 if !arg.exclude_end?
to = @size if to > @size
length = to - from
length = 0 if length < 0
subsequence(from, length)
else
get(arg)
end
else
arg += @size if arg < 0
subsequence(arg, length)
end
end
def_delegator :self, :[], :slice
# Return a new `Vector` with the given values inserted before the element at `index`.
#
# @param index [Integer] The index where the new items should go
# @param items [Array] The items to add
# @return [Vector]
def insert(index, *items)
raise IndexError if index < -@size
index += @size if index < 0
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.unshift(*items)
elsif index == @size
suffix = items
else
suffix = Array.new(index - @size, nil).concat(items)
index = @size
end
replace_suffix(index, suffix)
end
# Return a new `Vector` with the element at `index` removed. If the given `index`
# does not exist, return `self`.
#
# @param index [Integer] The index to remove
# @return [Vector]
def delete_at(index)
return self if index >= @size || index < -@size
index += @size if index < 0
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
replace_suffix(index, suffix.tap { |a| a.shift })
end
# Call the given block once for each item in the vector, passing each
# item from first to last successively to the block.
#
# @return [self]
def each(&block)
return to_enum unless block_given?
traverse_depth_first(@root, @levels, &block)
self
end
# Call the given block once for each item in the vector, passing each
# item starting from the last, and counting back to the first, successively to
# the block.
#
# @return [self]
def reverse_each(&block)
return enum_for(:reverse_each) unless block_given?
reverse_traverse_depth_first(@root, @levels, &block)
self
end
# Return a new `Vector` containing all elements for which the given block returns
# true.
#
# @return [Vector]
def filter
return enum_for(:filter) unless block_given?
reduce(self.class.empty) { |vector, item| yield(item) ? vector.add(item) : vector }
end
# Return a new `Vector` with all items which are equal to `obj` removed.
# `#==` is used for checking equality.
#
# @param obj [Object] The object to remove (every occurrence)
# @return [Vector]
def delete(obj)
filter { |item| item != obj }
end
# Invoke the given block once for each item in the vector, and return a new
# `Vector` containing the values returned by the block.
#
# @return [Vector]
def map
return enum_for(:map) if not block_given?
return self if empty?
self.class.new(super)
end
def_delegator :self, :map, :collect
# Return a new `Vector` with the same elements as this one, but randomly permuted.
#
# @return [Vector]
def shuffle
self.class.new(((array = to_a).frozen? ? array.shuffle : array.shuffle!).freeze)
end
# Return a new `Vector` with no duplicate elements, as determined by `#hash` and
# `#eql?`. For each group of equivalent elements, only the first will be retained.
#
# @return [Vector]
def uniq
self.class.new(((array = to_a).frozen? ? array.uniq : array.uniq!).freeze)
end
# Return a new `Vector` with the same elements as this one, but in reverse order.
#
# @return [Vector]
def reverse
self.class.new(((array = to_a).frozen? ? array.reverse : array.reverse!).freeze)
end
# Return a new `Vector` with the same elements, but rotated so that the one at
# index `count` is the first element of the new vector. If `count` is positive,
# the elements will be shifted left, and those shifted past the lowest position
# will be moved to the end. If `count` is negative, the elements will be shifted
# right, and those shifted past the last position will be moved to the beginning.
#
# @param count [Integer] The number of positions to shift items by
# @return [Vector]
def rotate(count = 1)
return self if (count % @size) == 0
self.class.new(((array = to_a).frozen? ? array.rotate(count) : array.rotate!(count)).freeze)
end
# Return a new `Vector` with all nested vectors and arrays recursively "flattened
# out", that is, their elements inserted into the new `Vector` in the place where
# the nested array/vector originally was. If an optional `level` argument is
# provided, the flattening will only be done recursively that number of times.
# A `level` of 0 means not to flatten at all, 1 means to only flatten nested
# arrays/vectors which are directly contained within this `Vector`.
#
# @param level [Integer] The depth to which flattening should be applied
# @return [Vector]
def flatten(level = nil)
return self if level == 0
self.class.new(((array = to_a).frozen? ? array.flatten(level) : array.flatten!(level)).freeze)
end
# Return a new `Vector` built by concatenating this one with `other`. `other`
# can be any object which is convertible to an `Array` using `#to_a`.
#
# @param other [Enumerable] The collection to concatenate onto this vector
# @return [Vector]
def +(other)
other = other.to_a
other = other.dup if other.frozen?
replace_suffix(@size, other)
end
def_delegator :self, :+, :concat
# `others` should be arrays and/or vectors. The corresponding elements from this
# `Vector` and each of `others` (that is, the elements with the same indices)
# will be gathered into arrays.
#
# If an optional block is provided, each such array will be passed successively
# to the block. Otherwise, a new `Vector` of all those arrays will be returned.
#
# @param others [Array] The arrays/vectors to zip together with this one
# @return [Vector, nil]
def zip(*others)
if block_given?
super
else
self.class.new(super)
end
end
# Return a new `Vector` with the same items, but sorted. The sort order will
# be determined by comparing items using `#<=>`, or if an optional code block
# is provided, by using it as a comparator. The block should accept 2 parameters,
# and should return 0, 1, or -1 if the first parameter is equal to, greater than,
# or less than the second parameter (respectively).
#
# @return [Vector]
def sort
self.class.new(super)
end
# Return a new `Vector` with the same items, but sorted. The sort order will be
# determined by mapping the items through the given block to obtain sort keys,
# and then sorting the keys according to their natural sort order.
#
# @return [Vector]
def sort_by
self.class.new(super)
end
# Drop the first `n` elements and return the rest in a new `Vector`.
# @param n [Integer] The number of elements to remove
# @return [Vector]
def drop(n)
self.class.new(super)
end
# Return only the first `n` elements in a new `Vector`.
# @param n [Integer] The number of elements to retain
# @return [Vector]
def take(n)
self.class.new(super)
end
# Drop elements up to, but not including, the first element for which the
# block returns `nil` or `false`. Gather the remaining elements into a new
# `Vector`. If no block is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def drop_while
return enum_for(:drop_while) if not block_given?
self.class.new(super)
end
# Gather elements up to, but not including, the first element for which the
# block returns `nil` or `false`, and return them in a new `Vector`. If no block
# is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def take_while
return enum_for(:take_while) if not block_given?
self.class.new(super)
end
# Repetition. Return a new `Vector` built by concatenating `times` copies
# of this one together.
#
# @param times [Integer] The number of times to repeat the elements in this vector
# @return [Vector]
def *(times)
return self.class.empty if times == 0
return self if times == 1
result = (to_a * times)
result.is_a?(Array) ? self.class.new(result) : result
end
# Replace a range of indexes with the given object.
#
# @overload fill(obj)
# Return a new `Vector` of the same size, with every index set to `obj`.
# @overload fill(obj, start)
# Return a new `Vector` with all indexes from `start` to the end of the
# vector set to `obj`.
# @overload fill(obj, start, length)
# Return a new `Vector` with `length` indexes, beginning from `start`,
# set to `obj`.
#
# @return [Vector]
def fill(obj, index = 0, length = nil)
raise IndexError if index < -@size
index += @size if index < 0
length ||= @size - index # to the end of the array, if no length given
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.fill(obj, 0, length)
elsif index == @size
suffix = Array.new(length, obj)
else
suffix = Array.new(index - @size, nil).concat(Array.new(length, obj))
index = @size
end
replace_suffix(index, suffix)
end
# When invoked with a block, yields all combinations of length `n` of items
# from the `Vector`, and then returns `self`. There is no guarantee about
# which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def combination(n)
return enum_for(:combination, n) if not block_given?
return self if n < 0 || @size < n
if n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif n == @size
yield self.to_a
else
combos = lambda do |result,index,remaining|
while @size - index > remaining
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index+1, remaining-1]
end
index += 1
end
index.upto(@size-1) { |i| result << get(i) }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all repeated combinations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated combination" is
# one in which any item from the `Vector` can appear consecutively any number of
# times.
#
# There is no guarantee about which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_combination(n)
return enum_for(:repeated_combination, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif @size == 0
# yield nothing
else
combos = lambda do |result,index,remaining|
while index < @size-1
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index, remaining-1]
end
index += 1
end
item = get(index)
remaining.times { result << item }
yield result
end
combos[[], 0, n]
end
self
end
# Yields all permutations of length `n` of items from the `Vector`, and then
# returns `self`. If no length `n` is specified, permutations of all elements
# will be yielded.
#
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def permutation(n = @size)
return enum_for(:permutation, n) if not block_given?
if n < 0 || @size < n
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
used, result = [], []
perms = lambda do |index|
0.upto(@size-1) do |i|
if !used[i]
result[index] = get(i)
if index < n-1
used[i] = true
perms[index+1]
used[i] = false
else
yield result.dup
end
end
end
end
perms[0]
end
self
end
# When invoked with a block, yields all repeated permutations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated permutation" is
# one where any item from the `Vector` can appear any number of times, and in
# any position (not just consecutively)
#
# If no length `n` is specified, permutations of all elements will be yielded.
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_permutation(n = @size)
return enum_for(:repeated_permutation, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
result = []
perms = lambda do |index|
0.upto(@size-1) do |i|
result[index] = get(i)
if index < n-1
perms[index+1]
else
yield result.dup
end
end
end
perms[0]
end
self
end
# With one or more vector or array arguments, return the cartesian product of
# this vector's elements and those of each argument; with no arguments, return the
# result of multiplying all this vector's items together.
#
# @overload product(*vectors)
# Return a `Vector` of all combinations of elements from this `Vector` and each
# of the given vectors or arrays. The length of the returned `Vector` is the product
# of `self.size` and the size of each argument vector or array.
# @overload product
# Return the result of multiplying all the items in this `Vector` together.
#
# @return [Vector]
def product(*vectors)
# if no vectors passed, return "product" as in result of multiplying all items
return super if vectors.empty?
vectors.unshift(self)
if vectors.any?(&:empty?)
return block_given? ? self : []
end
counters = Array.new(vectors.size, 0)
bump_counters = lambda do
i = vectors.size-1
counters[i] += 1
while counters[i] == vectors[i].size
counters[i] = 0
i -= 1
return true if i == -1 # we are done
counters[i] += 1
end
false # not done yet
end
build_array = lambda do
array = []
counters.each_with_index { |index,i| array << vectors[i][index] }
array
end
if block_given?
while true
yield build_array[]
return self if bump_counters[]
end
else
result = []
while true
result << build_array[]
return result if bump_counters[]
end
end
end
# Assume all elements are vectors or arrays and transpose the rows and columns.
# In other words, take the first element of each nested vector/array and gather
# them together into a new `Vector`. Do likewise for the second, third, and so on
# down to the end of each nested vector/array. Gather all the resulting `Vectors`
# into a new `Vector` and return it.
#
# This operation is closely related to {#zip}. The result is almost the same as
# calling {#zip} on the first nested vector/array with the others supplied as
# arguments.
#
# @return [Vector]
def transpose
return self.class.empty if empty?
result = Array.new(first.size) { [] }
0.upto(@size-1) do |i|
source = get(i)
if source.size != result.size
raise IndexError, "element size differs (#{source.size} should be #{result.size})"
end
0.upto(result.size-1) do |j|
result[j].push(source[j])
end
end
result.map! { |a| self.class.new(a) }
self.class.new(result)
end
# By using binary search, finds a value from this `Vector` which meets the
# condition defined by the provided block. Behavior is just like `Array#bsearch`.
# See `Array#bsearch` for details.
#
# @return [Object]
def bsearch
low, high, result = 0, @size, nil
while low < high
mid = (low + ((high - low) >> 1))
val = get(mid)
v = yield val
if v.is_a? Numeric
if v == 0
return val
elsif v > 0
high = mid
else
low = mid + 1
end
elsif v == true
result = val
high = mid
elsif !v
low = mid + 1
else
raise TypeError, "wrong argument type #{v.class} (must be numeric, true, false, or nil)"
end
end
result
end
# Return an empty `Vector` instance, of the same class as this one. Useful if you
# have multiple subclasses of `Vector` and want to treat them polymorphically.
#
# @return [Vector]
def clear
self.class.empty
end
# Return a randomly chosen item from this `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def sample
get(rand(@size))
end
# Return a new `Vector` with only the elements at the given `indices`, in the
# order specified by `indices`. If any of the `indices` do not exist, `nil`s will
# appear in their places.
#
# @param indices [Array] The indices to retrieve and gather into a new `Vector`
# @return [Vector]
def values_at(*indices)
self.class.new(indices.map { |i| get(i) }.freeze)
end
# Return the index of the last element which is equal to the provided object,
# or for which the provided block returns true.
#
# @overload rindex(obj)
# Return the index of the last element in this `Vector` which is `#==` to `obj`.
# @overload rindex { |item| ... }
# Return the index of the last element in this `Vector` for which the block
# returns true. (Iteration starts from the last element, counts back, and
# stops as soon as a matching element is found.)
#
# @return [Index]
def rindex(obj = (missing_arg = true))
i = @size - 1
if missing_arg
if block_given?
reverse_each { |item| return i if yield item; i -= 1 }
nil
else
enum_for(:rindex)
end
else
reverse_each { |item| return i if item == obj; i -= 1 }
nil
end
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the first element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def assoc(obj)
each { |array| return array if obj == array[0] }
nil
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the second element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def rassoc(obj)
each { |array| return array if obj == array[1] }
nil
end
# Return an `Array` with the same elements, in the same order. The returned
# `Array` may or may not be frozen.
#
# @return [Array]
def to_a
if @levels == 0
@root
else
flatten_node(@root, @levels * BITS_PER_LEVEL, [])
end
end
# Return true if `other` has the same type and contents as this `Vector`.
#
# @param other [Object] The collection to compare with
# @return [Boolean]
def eql?(other)
return true if other.equal?(self)
return false unless instance_of?(other.class) && @size == other.size
@root.eql?(other.instance_variable_get(:@root))
end
# See `Object#hash`.
# @return [Integer]
def hash
reduce(0) { |hash, item| (hash << 5) - hash + item.hash }
end
private
def traverse_depth_first(node, level, &block)
return node.each(&block) if level == 0
node.each { |child| traverse_depth_first(child, level - 1, &block) }
end
def reverse_traverse_depth_first(node, level, &block)
return node.reverse_each(&block) if level == 0
node.reverse_each { |child| reverse_traverse_depth_first(child, level - 1, &block) }
end
def leaf_node_for(node, bitshift, index)
while bitshift > 0
node = node[(index >> bitshift) & INDEX_MASK]
bitshift -= BITS_PER_LEVEL
end
node
end
def update_root(index, item)
root, levels = @root, @levels
while index >= (1 << (BITS_PER_LEVEL * (levels + 1)))
root = [root].freeze
levels += 1
end
root = update_leaf_node(root, levels * BITS_PER_LEVEL, index, item)
self.class.alloc(root, @size > index ? @size : index + 1, levels)
end
def update_leaf_node(node, bitshift, index, item)
slot_index = (index >> bitshift) & INDEX_MASK
if bitshift > 0
old_child = node[slot_index] || []
item = update_leaf_node(old_child, bitshift - BITS_PER_LEVEL, index, item)
end
node.dup.tap { |n| n[slot_index] = item }.freeze
end
def flatten_range(node, bitshift, from, to)
from_slot = (from >> bitshift) & INDEX_MASK
to_slot = (to >> bitshift) & INDEX_MASK
if bitshift == 0 # are we at the bottom?
node.slice(from_slot, to_slot-from_slot+1)
elsif from_slot == to_slot
flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, to)
else
# the following bitmask can be used to pick out the part of the from/to indices
# which will be used to direct path BELOW this node
mask = ((1 << bitshift) - 1)
result = []
if from & mask == 0
flatten_node(node[from_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, from | mask))
end
(from_slot+1).upto(to_slot-1) do |slot_index|
flatten_node(node[slot_index], bitshift - BITS_PER_LEVEL, result)
end
if to & mask == mask
flatten_node(node[to_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[to_slot], bitshift - BITS_PER_LEVEL, to & ~mask, to))
end
result
end
end
def flatten_node(node, bitshift, result)
if bitshift == 0
result.concat(node)
elsif bitshift == BITS_PER_LEVEL
node.each { |a| result.concat(a) }
else
bitshift -= BITS_PER_LEVEL
node.each { |a| flatten_node(a, bitshift, result) }
end
result
end
def subsequence(from, length)
return nil if from > @size || from < 0 || length < 0
length = @size - from if @size < from + length
return self.class.empty if length == 0
self.class.new(flatten_range(@root, @levels * BITS_PER_LEVEL, from, from + length - 1))
end
def flatten_suffix(node, bitshift, from, result)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
result.concat(node)
else
result.concat(node.slice(from_slot, 32)) # entire suffix of node. excess length is ignored by #slice
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
from_slot.upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
elsif child = node[from_slot]
flatten_suffix(child, bitshift - BITS_PER_LEVEL, from, result)
(from_slot+1).upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
end
result
end
end
def replace_suffix(from, suffix)
# new suffix can go directly after existing elements
raise IndexError if from > @size
root, levels = @root, @levels
if (from >> (BITS_PER_LEVEL * (@levels + 1))) != 0
# index where new suffix goes doesn't fall within current tree
# we will need to deepen tree
root = [root].freeze
levels += 1
end
new_size = from + suffix.size
root = replace_node_suffix(root, levels * BITS_PER_LEVEL, from, suffix)
if !suffix.empty?
levels.times { suffix = suffix.each_slice(32).to_a }
root.concat(suffix)
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
else
while root.size == 1
root = root[0]
levels -= 1
end
end
self.class.alloc(root.freeze, new_size, levels)
end
def replace_node_suffix(node, bitshift, from, suffix)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
suffix.shift(32)
else
node.take(from_slot).concat(suffix.shift(32 - from_slot))
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
if from_slot == 0
new_node = suffix.shift(32 * (1 << bitshift))
while bitshift != 0
new_node = new_node.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
new_node
else
result = node.take(from_slot)
remainder = suffix.shift((32 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
end
elsif child = node[from_slot]
result = node.take(from_slot)
result.push(replace_node_suffix(child, bitshift - BITS_PER_LEVEL, from, suffix))
remainder = suffix.shift((31 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
else
raise "Shouldn't happen"
end
end
end
end
# The canonical empty `Vector`. Returned by `Hamster.vector` and `Vector[]` when
# invoked with no arguments; also returned by `Vector.empty`. Prefer using this
# one rather than creating many empty vectors using `Vector.new`.
#
EmptyVector = Hamster::Vector.empty
end
Align comments in Vector docstring
require "forwardable"
require "hamster/immutable"
require "hamster/enumerable"
module Hamster
def self.vector(*items)
items.empty? ? EmptyVector : Vector.new(items.freeze)
end
# A `Vector` is an ordered, integer-indexed collection of objects. Like `Array`,
# `Vector` indexing starts at 0. Also like `Array`, negative indexes count back
# from the end of the `Vector`.
#
# `Vector`'s interface is modeled after that of `Array`, minus all the methods
# which do destructive updates. Some methods which modify `Array`s destructively
# (like {#insert} or {#delete_at}) are included, but they return new `Vectors`
# and leave the existing one unchanged.
#
# = Creating New Vectors
#
# Hamster.vector('a', 'b', 'c')
# Hamster::Vector.new([:first, :second, :third])
# Hamster::Vector[1, 2, 3, 4, 5]
#
# = Retrieving Items from Vectors
#
# require 'hamster/vector'
# vector = Hamster.vector(1, 2, 3, 4, 5)
# vector[0] # => 1
# vector[-1] # => 5
# vector[0,3] # => Hamster::Vector[1, 2, 3]
# vector[1..-1] # => Hamster::Vector[2, 3, 4, 5]
# vector.first # => 1
# vector.last # => 5
#
# = Creating Modified Vectors
#
# vector.add(6) # => Hamster::Vector[1, 2, 3, 4, 5, 6]
# vector.insert(1, :a, :b) # => Hamster::Vector[1, :a, :b, 2, 3, 4, 5]
# vector.delete_at(2) # => Hamster::Vector[1, 2, 4, 5]
# vector + [6, 7] # => Hamster::Vector[1, 2, 3, 4, 5, 6, 7]
#
# Other `Array`-like methods like {#select}, {#map}, {#shuffle}, {#uniq}, {#reverse},
# {#rotate}, {#flatten}, {#sort}, {#sort_by}, {#take}, {#drop}, {#take_while},
# {#drop_while}, {#fill}, {#product}, and {#transpose} are also supported.
#
class Vector
extend Forwardable
include Immutable
include Enumerable
# @private
BLOCK_SIZE = 32
# @private
INDEX_MASK = BLOCK_SIZE - 1
# @private
BITS_PER_LEVEL = 5
# Return the number of items in this `Vector`
# @return [Integer]
attr_reader :size
def_delegator :self, :size, :length
class << self
# Create a new `Vector` populated with the given items.
#
# @return [Vector]
def [](*items)
new(items.freeze)
end
# Return an empty `Vector`. If used on a subclass, returns an empty instance
# of that class.
#
# @return [Vector]
def empty
@empty ||= self.alloc([].freeze, 0, 0)
end
# "Raw" allocation of a new `Vector`. Used internally to create a new
# instance quickly after building a modified trie.
#
# @return [Vector]
# @private
def alloc(root, size, levels)
obj = allocate
obj.instance_variable_set(:@root, root)
obj.instance_variable_set(:@size, size)
obj.instance_variable_set(:@levels, levels)
obj
end
end
def initialize(items=[].freeze)
items = items.to_a
if items.size <= 32
items = items.dup.freeze if !items.frozen?
@root, @size, @levels = items, items.size, 0
else
root, size, levels = items, items.size, 0
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
@root, @size, @levels = root.freeze, size, levels
end
end
# Return `true` if this `Vector` contains no items.
#
# @return [Boolean]
def empty?
@size == 0
end
def_delegator :self, :empty?, :null?
# Return the first item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def first
get(0)
end
def_delegator :self, :first, :head
# Return the last item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def last
get(-1)
end
# Return a new `Vector` with `item` added after the last occupied position.
#
# @param item [Object] The object to insert at the end of the vector
# @return [Vector]
def add(item)
update_root(@size, item)
end
def_delegator :self, :add, :<<
def_delegator :self, :add, :conj
def_delegator :self, :add, :conjoin
# Return a new `Vector` with the item at `index` replaced by `item`. If the
# `item` argument is missing, but an optional code block is provided, it will
# be passed the existing item and what the block returns will replace it.
#
# @param index [Integer] The index to update
# @param item [Object] The object to insert into that position
# @return [Vector]
def set(index, item = yield(get(index)))
raise IndexError if @size == 0
index += @size if index < 0
raise IndexError if index >= @size || index < 0
update_root(index, item)
end
# Retrieve the item at `index`. If there is none (either the provided index
# is too high or too low), return `nil`.
#
# @param index [Integer] The index to retrieve
# @return [Object]
def get(index)
return nil if @size == 0
index += @size if index < 0
return nil if index >= @size || index < 0
leaf_node_for(@root, @levels * BITS_PER_LEVEL, index)[index & INDEX_MASK]
end
def_delegator :self, :get, :at
# Retrieve the value at `index`, or use the provided default value or block,
# or otherwise raise an `IndexError`.
#
# @overload fetch(index)
# Retrieve the value at the given index, or raise an `IndexError` if it is
# not found.
# @param index [Integer] The index to look up
# @overload fetch(index) { |index| ... }
# Retrieve the value at the given index, or call the optional
# code block (with the non-existent index) and get its return value.
# @yield [index] The index which does not exist
# @yieldreturn [Object] Object to return instead
# @param index [Integer] The index to look up
# @overload fetch(index, default)
# Retrieve the value at the given index, or else return the provided
# `default` value.
# @param index [Integer] The index to look up
# @param default [Object] Object to return if the key is not found
#
# @return [Object]
def fetch(index, default = (missing_default = true))
index += @size if index < 0
if index >= 0 && index < size
get(index)
elsif block_given?
yield
elsif !missing_default
default
else
raise IndexError, "index #{index} outside of vector bounds"
end
end
# Element reference. Return the item at a specific index, or a specified,
# contiguous range of items (as a new `Vector`).
#
# @overload vector[index]
# Return the item at `index`.
# @param index [Integer] The index to retrieve.
# @overload vector[start, length]
# Return a subvector starting at index `start` and continuing for `length` elements.
# @param start [Integer] The index to start retrieving items from.
# @param length [Integer] The number of items to retrieve.
# @overload vector[range]
# Return a subvector specified by the given `range` of indices.
# @param range [Range] The range of indices to retrieve.
#
# @return [Object]
def [](arg, length = (missing_length = true))
if missing_length
if arg.is_a?(Range)
from, to = arg.begin, arg.end
from += @size if from < 0
to += @size if to < 0
to += 1 if !arg.exclude_end?
to = @size if to > @size
length = to - from
length = 0 if length < 0
subsequence(from, length)
else
get(arg)
end
else
arg += @size if arg < 0
subsequence(arg, length)
end
end
def_delegator :self, :[], :slice
# Return a new `Vector` with the given values inserted before the element at `index`.
#
# @param index [Integer] The index where the new items should go
# @param items [Array] The items to add
# @return [Vector]
def insert(index, *items)
raise IndexError if index < -@size
index += @size if index < 0
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.unshift(*items)
elsif index == @size
suffix = items
else
suffix = Array.new(index - @size, nil).concat(items)
index = @size
end
replace_suffix(index, suffix)
end
# Return a new `Vector` with the element at `index` removed. If the given `index`
# does not exist, return `self`.
#
# @param index [Integer] The index to remove
# @return [Vector]
def delete_at(index)
return self if index >= @size || index < -@size
index += @size if index < 0
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
replace_suffix(index, suffix.tap { |a| a.shift })
end
# Call the given block once for each item in the vector, passing each
# item from first to last successively to the block.
#
# @return [self]
def each(&block)
return to_enum unless block_given?
traverse_depth_first(@root, @levels, &block)
self
end
# Call the given block once for each item in the vector, passing each
# item starting from the last, and counting back to the first, successively to
# the block.
#
# @return [self]
def reverse_each(&block)
return enum_for(:reverse_each) unless block_given?
reverse_traverse_depth_first(@root, @levels, &block)
self
end
# Return a new `Vector` containing all elements for which the given block returns
# true.
#
# @return [Vector]
def filter
return enum_for(:filter) unless block_given?
reduce(self.class.empty) { |vector, item| yield(item) ? vector.add(item) : vector }
end
# Return a new `Vector` with all items which are equal to `obj` removed.
# `#==` is used for checking equality.
#
# @param obj [Object] The object to remove (every occurrence)
# @return [Vector]
def delete(obj)
filter { |item| item != obj }
end
# Invoke the given block once for each item in the vector, and return a new
# `Vector` containing the values returned by the block.
#
# @return [Vector]
def map
return enum_for(:map) if not block_given?
return self if empty?
self.class.new(super)
end
def_delegator :self, :map, :collect
# Return a new `Vector` with the same elements as this one, but randomly permuted.
#
# @return [Vector]
def shuffle
self.class.new(((array = to_a).frozen? ? array.shuffle : array.shuffle!).freeze)
end
# Return a new `Vector` with no duplicate elements, as determined by `#hash` and
# `#eql?`. For each group of equivalent elements, only the first will be retained.
#
# @return [Vector]
def uniq
self.class.new(((array = to_a).frozen? ? array.uniq : array.uniq!).freeze)
end
# Return a new `Vector` with the same elements as this one, but in reverse order.
#
# @return [Vector]
def reverse
self.class.new(((array = to_a).frozen? ? array.reverse : array.reverse!).freeze)
end
# Return a new `Vector` with the same elements, but rotated so that the one at
# index `count` is the first element of the new vector. If `count` is positive,
# the elements will be shifted left, and those shifted past the lowest position
# will be moved to the end. If `count` is negative, the elements will be shifted
# right, and those shifted past the last position will be moved to the beginning.
#
# @param count [Integer] The number of positions to shift items by
# @return [Vector]
def rotate(count = 1)
return self if (count % @size) == 0
self.class.new(((array = to_a).frozen? ? array.rotate(count) : array.rotate!(count)).freeze)
end
# Return a new `Vector` with all nested vectors and arrays recursively "flattened
# out", that is, their elements inserted into the new `Vector` in the place where
# the nested array/vector originally was. If an optional `level` argument is
# provided, the flattening will only be done recursively that number of times.
# A `level` of 0 means not to flatten at all, 1 means to only flatten nested
# arrays/vectors which are directly contained within this `Vector`.
#
# @param level [Integer] The depth to which flattening should be applied
# @return [Vector]
def flatten(level = nil)
return self if level == 0
self.class.new(((array = to_a).frozen? ? array.flatten(level) : array.flatten!(level)).freeze)
end
# Return a new `Vector` built by concatenating this one with `other`. `other`
# can be any object which is convertible to an `Array` using `#to_a`.
#
# @param other [Enumerable] The collection to concatenate onto this vector
# @return [Vector]
def +(other)
other = other.to_a
other = other.dup if other.frozen?
replace_suffix(@size, other)
end
def_delegator :self, :+, :concat
# `others` should be arrays and/or vectors. The corresponding elements from this
# `Vector` and each of `others` (that is, the elements with the same indices)
# will be gathered into arrays.
#
# If an optional block is provided, each such array will be passed successively
# to the block. Otherwise, a new `Vector` of all those arrays will be returned.
#
# @param others [Array] The arrays/vectors to zip together with this one
# @return [Vector, nil]
def zip(*others)
if block_given?
super
else
self.class.new(super)
end
end
# Return a new `Vector` with the same items, but sorted. The sort order will
# be determined by comparing items using `#<=>`, or if an optional code block
# is provided, by using it as a comparator. The block should accept 2 parameters,
# and should return 0, 1, or -1 if the first parameter is equal to, greater than,
# or less than the second parameter (respectively).
#
# @return [Vector]
def sort
self.class.new(super)
end
# Return a new `Vector` with the same items, but sorted. The sort order will be
# determined by mapping the items through the given block to obtain sort keys,
# and then sorting the keys according to their natural sort order.
#
# @return [Vector]
def sort_by
self.class.new(super)
end
# Drop the first `n` elements and return the rest in a new `Vector`.
# @param n [Integer] The number of elements to remove
# @return [Vector]
def drop(n)
self.class.new(super)
end
# Return only the first `n` elements in a new `Vector`.
# @param n [Integer] The number of elements to retain
# @return [Vector]
def take(n)
self.class.new(super)
end
# Drop elements up to, but not including, the first element for which the
# block returns `nil` or `false`. Gather the remaining elements into a new
# `Vector`. If no block is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def drop_while
return enum_for(:drop_while) if not block_given?
self.class.new(super)
end
# Gather elements up to, but not including, the first element for which the
# block returns `nil` or `false`, and return them in a new `Vector`. If no block
# is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def take_while
return enum_for(:take_while) if not block_given?
self.class.new(super)
end
# Repetition. Return a new `Vector` built by concatenating `times` copies
# of this one together.
#
# @param times [Integer] The number of times to repeat the elements in this vector
# @return [Vector]
def *(times)
return self.class.empty if times == 0
return self if times == 1
result = (to_a * times)
result.is_a?(Array) ? self.class.new(result) : result
end
# Replace a range of indexes with the given object.
#
# @overload fill(obj)
# Return a new `Vector` of the same size, with every index set to `obj`.
# @overload fill(obj, start)
# Return a new `Vector` with all indexes from `start` to the end of the
# vector set to `obj`.
# @overload fill(obj, start, length)
# Return a new `Vector` with `length` indexes, beginning from `start`,
# set to `obj`.
#
# @return [Vector]
def fill(obj, index = 0, length = nil)
raise IndexError if index < -@size
index += @size if index < 0
length ||= @size - index # to the end of the array, if no length given
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.fill(obj, 0, length)
elsif index == @size
suffix = Array.new(length, obj)
else
suffix = Array.new(index - @size, nil).concat(Array.new(length, obj))
index = @size
end
replace_suffix(index, suffix)
end
# When invoked with a block, yields all combinations of length `n` of items
# from the `Vector`, and then returns `self`. There is no guarantee about
# which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def combination(n)
return enum_for(:combination, n) if not block_given?
return self if n < 0 || @size < n
if n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif n == @size
yield self.to_a
else
combos = lambda do |result,index,remaining|
while @size - index > remaining
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index+1, remaining-1]
end
index += 1
end
index.upto(@size-1) { |i| result << get(i) }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all repeated combinations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated combination" is
# one in which any item from the `Vector` can appear consecutively any number of
# times.
#
# There is no guarantee about which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_combination(n)
return enum_for(:repeated_combination, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif @size == 0
# yield nothing
else
combos = lambda do |result,index,remaining|
while index < @size-1
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index, remaining-1]
end
index += 1
end
item = get(index)
remaining.times { result << item }
yield result
end
combos[[], 0, n]
end
self
end
# Yields all permutations of length `n` of items from the `Vector`, and then
# returns `self`. If no length `n` is specified, permutations of all elements
# will be yielded.
#
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def permutation(n = @size)
return enum_for(:permutation, n) if not block_given?
if n < 0 || @size < n
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
used, result = [], []
perms = lambda do |index|
0.upto(@size-1) do |i|
if !used[i]
result[index] = get(i)
if index < n-1
used[i] = true
perms[index+1]
used[i] = false
else
yield result.dup
end
end
end
end
perms[0]
end
self
end
# When invoked with a block, yields all repeated permutations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated permutation" is
# one where any item from the `Vector` can appear any number of times, and in
# any position (not just consecutively)
#
# If no length `n` is specified, permutations of all elements will be yielded.
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_permutation(n = @size)
return enum_for(:repeated_permutation, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
result = []
perms = lambda do |index|
0.upto(@size-1) do |i|
result[index] = get(i)
if index < n-1
perms[index+1]
else
yield result.dup
end
end
end
perms[0]
end
self
end
# With one or more vector or array arguments, return the cartesian product of
# this vector's elements and those of each argument; with no arguments, return the
# result of multiplying all this vector's items together.
#
# @overload product(*vectors)
# Return a `Vector` of all combinations of elements from this `Vector` and each
# of the given vectors or arrays. The length of the returned `Vector` is the product
# of `self.size` and the size of each argument vector or array.
# @overload product
# Return the result of multiplying all the items in this `Vector` together.
#
# @return [Vector]
def product(*vectors)
# if no vectors passed, return "product" as in result of multiplying all items
return super if vectors.empty?
vectors.unshift(self)
if vectors.any?(&:empty?)
return block_given? ? self : []
end
counters = Array.new(vectors.size, 0)
bump_counters = lambda do
i = vectors.size-1
counters[i] += 1
while counters[i] == vectors[i].size
counters[i] = 0
i -= 1
return true if i == -1 # we are done
counters[i] += 1
end
false # not done yet
end
build_array = lambda do
array = []
counters.each_with_index { |index,i| array << vectors[i][index] }
array
end
if block_given?
while true
yield build_array[]
return self if bump_counters[]
end
else
result = []
while true
result << build_array[]
return result if bump_counters[]
end
end
end
# Assume all elements are vectors or arrays and transpose the rows and columns.
# In other words, take the first element of each nested vector/array and gather
# them together into a new `Vector`. Do likewise for the second, third, and so on
# down to the end of each nested vector/array. Gather all the resulting `Vectors`
# into a new `Vector` and return it.
#
# This operation is closely related to {#zip}. The result is almost the same as
# calling {#zip} on the first nested vector/array with the others supplied as
# arguments.
#
# @return [Vector]
def transpose
return self.class.empty if empty?
result = Array.new(first.size) { [] }
0.upto(@size-1) do |i|
source = get(i)
if source.size != result.size
raise IndexError, "element size differs (#{source.size} should be #{result.size})"
end
0.upto(result.size-1) do |j|
result[j].push(source[j])
end
end
result.map! { |a| self.class.new(a) }
self.class.new(result)
end
# By using binary search, finds a value from this `Vector` which meets the
# condition defined by the provided block. Behavior is just like `Array#bsearch`.
# See `Array#bsearch` for details.
#
# @return [Object]
def bsearch
low, high, result = 0, @size, nil
while low < high
mid = (low + ((high - low) >> 1))
val = get(mid)
v = yield val
if v.is_a? Numeric
if v == 0
return val
elsif v > 0
high = mid
else
low = mid + 1
end
elsif v == true
result = val
high = mid
elsif !v
low = mid + 1
else
raise TypeError, "wrong argument type #{v.class} (must be numeric, true, false, or nil)"
end
end
result
end
# Return an empty `Vector` instance, of the same class as this one. Useful if you
# have multiple subclasses of `Vector` and want to treat them polymorphically.
#
# @return [Vector]
def clear
self.class.empty
end
# Return a randomly chosen item from this `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def sample
get(rand(@size))
end
# Return a new `Vector` with only the elements at the given `indices`, in the
# order specified by `indices`. If any of the `indices` do not exist, `nil`s will
# appear in their places.
#
# @param indices [Array] The indices to retrieve and gather into a new `Vector`
# @return [Vector]
def values_at(*indices)
self.class.new(indices.map { |i| get(i) }.freeze)
end
# Return the index of the last element which is equal to the provided object,
# or for which the provided block returns true.
#
# @overload rindex(obj)
# Return the index of the last element in this `Vector` which is `#==` to `obj`.
# @overload rindex { |item| ... }
# Return the index of the last element in this `Vector` for which the block
# returns true. (Iteration starts from the last element, counts back, and
# stops as soon as a matching element is found.)
#
# @return [Index]
def rindex(obj = (missing_arg = true))
i = @size - 1
if missing_arg
if block_given?
reverse_each { |item| return i if yield item; i -= 1 }
nil
else
enum_for(:rindex)
end
else
reverse_each { |item| return i if item == obj; i -= 1 }
nil
end
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the first element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def assoc(obj)
each { |array| return array if obj == array[0] }
nil
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the second element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def rassoc(obj)
each { |array| return array if obj == array[1] }
nil
end
# Return an `Array` with the same elements, in the same order. The returned
# `Array` may or may not be frozen.
#
# @return [Array]
def to_a
if @levels == 0
@root
else
flatten_node(@root, @levels * BITS_PER_LEVEL, [])
end
end
# Return true if `other` has the same type and contents as this `Vector`.
#
# @param other [Object] The collection to compare with
# @return [Boolean]
def eql?(other)
return true if other.equal?(self)
return false unless instance_of?(other.class) && @size == other.size
@root.eql?(other.instance_variable_get(:@root))
end
# See `Object#hash`.
# @return [Integer]
def hash
reduce(0) { |hash, item| (hash << 5) - hash + item.hash }
end
private
def traverse_depth_first(node, level, &block)
return node.each(&block) if level == 0
node.each { |child| traverse_depth_first(child, level - 1, &block) }
end
def reverse_traverse_depth_first(node, level, &block)
return node.reverse_each(&block) if level == 0
node.reverse_each { |child| reverse_traverse_depth_first(child, level - 1, &block) }
end
def leaf_node_for(node, bitshift, index)
while bitshift > 0
node = node[(index >> bitshift) & INDEX_MASK]
bitshift -= BITS_PER_LEVEL
end
node
end
def update_root(index, item)
root, levels = @root, @levels
while index >= (1 << (BITS_PER_LEVEL * (levels + 1)))
root = [root].freeze
levels += 1
end
root = update_leaf_node(root, levels * BITS_PER_LEVEL, index, item)
self.class.alloc(root, @size > index ? @size : index + 1, levels)
end
def update_leaf_node(node, bitshift, index, item)
slot_index = (index >> bitshift) & INDEX_MASK
if bitshift > 0
old_child = node[slot_index] || []
item = update_leaf_node(old_child, bitshift - BITS_PER_LEVEL, index, item)
end
node.dup.tap { |n| n[slot_index] = item }.freeze
end
def flatten_range(node, bitshift, from, to)
from_slot = (from >> bitshift) & INDEX_MASK
to_slot = (to >> bitshift) & INDEX_MASK
if bitshift == 0 # are we at the bottom?
node.slice(from_slot, to_slot-from_slot+1)
elsif from_slot == to_slot
flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, to)
else
# the following bitmask can be used to pick out the part of the from/to indices
# which will be used to direct path BELOW this node
mask = ((1 << bitshift) - 1)
result = []
if from & mask == 0
flatten_node(node[from_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, from | mask))
end
(from_slot+1).upto(to_slot-1) do |slot_index|
flatten_node(node[slot_index], bitshift - BITS_PER_LEVEL, result)
end
if to & mask == mask
flatten_node(node[to_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[to_slot], bitshift - BITS_PER_LEVEL, to & ~mask, to))
end
result
end
end
def flatten_node(node, bitshift, result)
if bitshift == 0
result.concat(node)
elsif bitshift == BITS_PER_LEVEL
node.each { |a| result.concat(a) }
else
bitshift -= BITS_PER_LEVEL
node.each { |a| flatten_node(a, bitshift, result) }
end
result
end
def subsequence(from, length)
return nil if from > @size || from < 0 || length < 0
length = @size - from if @size < from + length
return self.class.empty if length == 0
self.class.new(flatten_range(@root, @levels * BITS_PER_LEVEL, from, from + length - 1))
end
def flatten_suffix(node, bitshift, from, result)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
result.concat(node)
else
result.concat(node.slice(from_slot, 32)) # entire suffix of node. excess length is ignored by #slice
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
from_slot.upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
elsif child = node[from_slot]
flatten_suffix(child, bitshift - BITS_PER_LEVEL, from, result)
(from_slot+1).upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
end
result
end
end
def replace_suffix(from, suffix)
# new suffix can go directly after existing elements
raise IndexError if from > @size
root, levels = @root, @levels
if (from >> (BITS_PER_LEVEL * (@levels + 1))) != 0
# index where new suffix goes doesn't fall within current tree
# we will need to deepen tree
root = [root].freeze
levels += 1
end
new_size = from + suffix.size
root = replace_node_suffix(root, levels * BITS_PER_LEVEL, from, suffix)
if !suffix.empty?
levels.times { suffix = suffix.each_slice(32).to_a }
root.concat(suffix)
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
else
while root.size == 1
root = root[0]
levels -= 1
end
end
self.class.alloc(root.freeze, new_size, levels)
end
def replace_node_suffix(node, bitshift, from, suffix)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
suffix.shift(32)
else
node.take(from_slot).concat(suffix.shift(32 - from_slot))
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
if from_slot == 0
new_node = suffix.shift(32 * (1 << bitshift))
while bitshift != 0
new_node = new_node.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
new_node
else
result = node.take(from_slot)
remainder = suffix.shift((32 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
end
elsif child = node[from_slot]
result = node.take(from_slot)
result.push(replace_node_suffix(child, bitshift - BITS_PER_LEVEL, from, suffix))
remainder = suffix.shift((31 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
else
raise "Shouldn't happen"
end
end
end
end
# The canonical empty `Vector`. Returned by `Hamster.vector` and `Vector[]` when
# invoked with no arguments; also returned by `Vector.empty`. Prefer using this
# one rather than creating many empty vectors using `Vector.new`.
#
EmptyVector = Hamster::Vector.empty
end
|
require "forwardable"
require "hamster/immutable"
require "hamster/enumerable"
module Hamster
def self.vector(*items)
items.empty? ? EmptyVector : Vector.new(items.freeze)
end
# A `Vector` is an ordered, integer-indexed collection of objects. Like `Array`,
# `Vector` indexing starts at 0. Also like `Array`, negative indexes count back
# from the end of the `Vector`.
#
# `Vector`'s interface is modeled after that of `Array`, minus all the methods
# which do destructive updates. Some methods which modify `Array`s destructively
# (like {#insert} or {#delete_at}) are included, but they return new `Vectors`
# and leave the existing one unchanged.
#
# = Creating New Vectors
#
# Hamster.vector('a', 'b', 'c')
# Hamster::Vector.new([:first, :second, :third])
# Hamster::Vector[1, 2, 3, 4, 5]
#
# = Retrieving Items from Vectors
#
# require 'hamster/vector'
# vector = Hamster.vector(1, 2, 3, 4, 5)
# vector[0] # => 1
# vector[-1] # => 5
# vector[0,3] # => Hamster::Vector[1, 2, 3]
# vector[1..-1] # => Hamster::Vector[2, 3, 4, 5]
# vector.first # => 1
# vector.last # => 5
#
# = Creating Modified Vectors
#
# vector.add(6) # => Hamster::Vector[1, 2, 3, 4, 5, 6]
# vector.insert(1, :a, :b) # => Hamster::Vector[1, :a, :b, 2, 3, 4, 5]
# vector.delete_at(2) # => Hamster::Vector[1, 2, 4, 5]
# vector + [6, 7] # => Hamster::Vector[1, 2, 3, 4, 5, 6, 7]
#
# Other `Array`-like methods like {#select}, {#map}, {#shuffle}, {#uniq}, {#reverse},
# {#rotate}, {#flatten}, {#sort}, {#sort_by}, {#take}, {#drop}, {#take_while},
# {#drop_while}, {#fill}, {#product}, and {#transpose} are also supported.
#
class Vector
extend Forwardable
include Immutable
include Enumerable
# @private
BLOCK_SIZE = 32
# @private
INDEX_MASK = BLOCK_SIZE - 1
# @private
BITS_PER_LEVEL = 5
# Return the number of items in this `Vector`
# @return [Integer]
attr_reader :size
def_delegator :self, :size, :length
class << self
# Create a new `Vector` populated with the given items.
#
# @return [Vector]
def [](*items)
new(items.freeze)
end
# Return an empty `Vector`. If used on a subclass, returns an empty instance
# of that class.
#
# @return [Vector]
def empty
@empty ||= self.alloc([].freeze, 0, 0)
end
# "Raw" allocation of a new `Vector`. Used internally to create a new
# instance quickly after building a modified trie.
#
# @return [Vector]
# @private
def alloc(root, size, levels)
obj = allocate
obj.instance_variable_set(:@root, root)
obj.instance_variable_set(:@size, size)
obj.instance_variable_set(:@levels, levels)
obj
end
end
def initialize(items=[].freeze)
items = items.to_a
if items.size <= 32
items = items.dup.freeze if !items.frozen?
@root, @size, @levels = items, items.size, 0
else
root, size, levels = items, items.size, 0
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
@root, @size, @levels = root.freeze, size, levels
end
end
# Return `true` if this `Vector` contains no items.
#
# @return [Boolean]
def empty?
@size == 0
end
def_delegator :self, :empty?, :null?
# Return the first item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def first
get(0)
end
def_delegator :self, :first, :head
# Return the last item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def last
get(-1)
end
# Return a new `Vector` with `item` added after the last occupied position.
#
# @param item [Object] The object to insert at the end of the vector
# @return [Vector]
def add(item)
update_root(@size, item)
end
def_delegator :self, :add, :<<
def_delegator :self, :add, :conj
def_delegator :self, :add, :conjoin
# Return a new `Vector` with the item at `index` replaced by `item`. If the
# `item` argument is missing, but an optional code block is provided, it will
# be passed the existing item and what the block returns will replace it.
#
# @param index [Integer] The index to update
# @param item [Object] The object to insert into that position
# @return [Vector]
def set(index, item = yield(get(index)))
raise IndexError if @size == 0
index += @size if index < 0
raise IndexError if index >= @size || index < 0
update_root(index, item)
end
# Retrieve the item at `index`. If there is none (either the provided index
# is too high or too low), return `nil`.
#
# @param index [Integer] The index to retrieve
# @return [Object]
def get(index)
return nil if @size == 0
index += @size if index < 0
return nil if index >= @size || index < 0
leaf_node_for(@root, @levels * BITS_PER_LEVEL, index)[index & INDEX_MASK]
end
def_delegator :self, :get, :at
# Retrieve the value at `index`, or use the provided default value or block,
# or otherwise raise an `IndexError`.
#
# @overload fetch(index)
# Retrieve the value at the given index, or raise an `IndexError` if it is
# not found.
# @param index [Integer] The index to look up
# @overload fetch(index) { |index| ... }
# Retrieve the value at the given index, or call the optional
# code block (with the non-existent index) and get its return value.
# @yield [index] The index which does not exist
# @yieldreturn [Object] Object to return instead
# @param index [Integer] The index to look up
# @overload fetch(index, default)
# Retrieve the value at the given index, or else return the provided
# `default` value.
# @param index [Integer] The index to look up
# @param default [Object] Object to return if the key is not found
#
# @return [Object]
def fetch(index, default = (missing_default = true))
index += @size if index < 0
if index >= 0 && index < size
get(index)
elsif block_given?
yield
elsif !missing_default
default
else
raise IndexError, "index #{index} outside of vector bounds"
end
end
# Element reference. Return the item at a specific index, or a specified,
# contiguous range of items (as a new `Vector`).
#
# @overload vector[index]
# Return the item at `index`.
# @param index [Integer] The index to retrieve.
# @overload vector[start, length]
# Return a subvector starting at index `start` and continuing for `length` elements.
# @param start [Integer] The index to start retrieving items from.
# @param length [Integer] The number of items to retrieve.
# @overload vector[range]
# Return a subvector specified by the given `range` of indices.
# @param range [Range] The range of indices to retrieve.
#
# @return [Vector]
def [](arg, length = (missing_length = true))
if missing_length
if arg.is_a?(Range)
from, to = arg.begin, arg.end
from += @size if from < 0
to += @size if to < 0
to += 1 if !arg.exclude_end?
to = @size if to > @size
length = to - from
length = 0 if length < 0
subsequence(from, length)
else
get(arg)
end
else
arg += @size if arg < 0
subsequence(arg, length)
end
end
def_delegator :self, :[], :slice
# Return a new `Vector` with the given values inserted before the element at `index`.
#
# @param index [Integer] The index where the new items should go
# @param items [Array] The items to add
# @return [Vector]
def insert(index, *items)
raise IndexError if index < -@size
index += @size if index < 0
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.unshift(*items)
elsif index == @size
suffix = items
else
suffix = Array.new(index - @size, nil).concat(items)
index = @size
end
replace_suffix(index, suffix)
end
# Return a new `Vector` with the element at `index` removed. If the given `index`
# does not exist, return `self`.
#
# @param index [Integer] The index to remove
# @return [Vector]
def delete_at(index)
return self if index >= @size || index < -@size
index += @size if index < 0
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
replace_suffix(index, suffix.tap { |a| a.shift })
end
# Call the given block once for each item in the vector, passing each
# item from first to last successively to the block.
#
# @return [self]
def each(&block)
return to_enum unless block_given?
traverse_depth_first(@root, @levels, &block)
self
end
# Call the given block once for each item in the vector, passing each
# item starting from the last, and counting back to the first, successively to
# the block.
#
# @return [self]
def reverse_each(&block)
return enum_for(:reverse_each) unless block_given?
reverse_traverse_depth_first(@root, @levels, &block)
self
end
# Return a new `Vector` containing all elements for which the given block returns
# true.
#
# @return [Vector]
def filter
return enum_for(:filter) unless block_given?
reduce(self.class.empty) { |vector, item| yield(item) ? vector.add(item) : vector }
end
# Return a new `Vector` with all items which are equal to `obj` removed.
# `#==` is used for checking equality.
#
# @param obj [Object] The object to remove (every occurrence)
# @return [Vector]
def delete(obj)
filter { |item| item != obj }
end
# Invoke the given block once for each item in the vector, and return a new
# `Vector` containing the values returned by the block.
#
# @return [Vector]
def map
return enum_for(:map) if not block_given?
return self if empty?
self.class.new(super)
end
def_delegator :self, :map, :collect
# Return a new `Vector` with the same elements as this one, but randomly permuted.
#
# @return [Vector]
def shuffle
self.class.new(((array = to_a).frozen? ? array.shuffle : array.shuffle!).freeze)
end
# Return a new `Vector` with no duplicate elements, as determined by `#hash` and
# `#eql?`. For each group of equivalent elements, only the first will be retained.
#
# @return [Vector]
def uniq
self.class.new(((array = to_a).frozen? ? array.uniq : array.uniq!).freeze)
end
# Return a new `Vector` with the same elements as this one, but in reverse order.
#
# @return [Vector]
def reverse
self.class.new(((array = to_a).frozen? ? array.reverse : array.reverse!).freeze)
end
# Return a new `Vector` with the same elements, but rotated so that the one at
# index `count` is the first element of the new vector. If `count` is positive,
# the elements will be shifted left, and those shifted past the lowest position
# will be moved to the end. If `count` is negative, the elements will be shifted
# right, and those shifted past the last position will be moved to the beginning
#
# @param count [Integer] The number of positions to shift items by
# @return [Vector]
def rotate(count = 1)
return self if (count % @size) == 0
self.class.new(((array = to_a).frozen? ? array.rotate(count) : array.rotate!(count)).freeze)
end
# Return a new `Vector` with all nested vectors and arrays recursively "flattened
# out", that is, their elements inserted into the new `Vector` in the place where
# the nested array/vector originally was. If an optional `level` argument is
# provided, the flattening will only be done recursively that number of times.
# A `level` of 0 means not to flatten at all, 1 means to only flatten nested
# arrays/vectors which are directly contained within this `Vector`.
#
# @param level [Integer] The depth to which flattening should be applied
# @return [Vector]
def flatten(level = nil)
return self if level == 0
self.class.new(((array = to_a).frozen? ? array.flatten(level) : array.flatten!(level)).freeze)
end
# Return a new `Vector` built by concatenating this one with `other`. `other`
# can be any object which is convertible to an `Array` using `#to_a`.
#
# @param other [Enumerable] The collection to concatenate onto this vector
# @return [Vector]
def +(other)
other = other.to_a
other = other.dup if other.frozen?
replace_suffix(@size, other)
end
def_delegator :self, :+, :concat
# `others` should be arrays and/or vectors. The corresponding elements from this
# `Vector` and each of `others` (that is, the elements with the same indices)
# will be gathered into arrays.
#
# If an optional block is provided, each such array will be passed successively
# to the block. Otherwise, a new `Vector` of all those arrays will be returned.
#
# @param others [Array] The arrays/vectors to zip together with this one
# @return [Vector, nil]
def zip(*others)
if block_given?
super
else
self.class.new(super)
end
end
# Return a new `Vector` with the same items, but sorted. The sort order will
# be determined by comparing items using `#<=>`, or if an optional code block
# is provided, by using it as a comparator. The block should accept 2 parameters,
# and should return 0, 1, or -1 if the first parameter is equal to, greater than,
# or less than the second parameter (respectively).
#
# @return [Vector]
def sort
self.class.new(super)
end
# Return a new `Vector` with the same items, but sorted. The sort order will be
# determined by mapping the items through the given block to obtain sort keys,
# and then sorting the keys according to their natural sort order.
#
# @return [Vector]
def sort_by
self.class.new(super)
end
# Drop the first `n` elements and return the rest in a new `Vector`.
# @param n [Integer] The number of elements to remove
# @return [Vector]
def drop(n)
self.class.new(super)
end
# Return only the first `n` elements in a new `Vector`.
# @param n [Integer] The number of elements to retain
# @return [Vector]
def take(n)
self.class.new(super)
end
# Drop elements up to, but not including, the first element for which the
# block returns `nil` or `false`. Gather the remaining elements into a new
# `Vector`. If no block is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def drop_while
return enum_for(:drop_while) if not block_given?
self.class.new(super)
end
# Gather elements up to, but not including, the first element for which the
# block returns `nil` or `false`, and return them in a new `Vector`. If no block
# is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def take_while
return enum_for(:take_while) if not block_given?
self.class.new(super)
end
# Repetition. Return a new `Vector` built by concatenating `times` copies
# of this one together.
#
# @param times [Integer] The number of times to repeat the elements in this vector
# @return [Vector]
def *(times)
return self.class.empty if times == 0
return self if times == 1
result = (to_a * times)
result.is_a?(Array) ? self.class.new(result) : result
end
# Replace a range of indexes with the given object.
#
# @overload fill(obj)
# Return a new `Vector` of the same size, with every index set to `obj`.
# @overload fill(obj, start)
# Return a new `Vector` with all indexes from `start` to the end of the
# vector set to `obj`.
# @overload fill(obj, start, length)
# Return a new `Vector` with `length` indexes, beginning from `start`,
# set to `obj`.
#
# @return [Vector]
def fill(obj, index = 0, length = nil)
raise IndexError if index < -@size
index += @size if index < 0
length ||= @size - index # to the end of the array, if no length given
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.fill(obj, 0, length)
elsif index == @size
suffix = Array.new(length, obj)
else
suffix = Array.new(index - @size, nil).concat(Array.new(length, obj))
index = @size
end
replace_suffix(index, suffix)
end
# When invoked with a block, yields all combinations of length `n` of items
# from the `Vector`, and then returns `self`. There is no guarantee about
# which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def combination(n)
return enum_for(:combination, n) if not block_given?
return self if n < 0 || @size < n
if n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif n == @size
yield self.to_a
else
combos = lambda do |result,index,remaining|
while @size - index > remaining
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index+1, remaining-1]
end
index += 1
end
index.upto(@size-1) { |i| result << get(i) }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all repeated combinations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated combination" is
# one in which any item from the `Vector` can appear consecutively any number of
# times.
#
# There is no guarantee about which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_combination(n)
return enum_for(:repeated_combination, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif @size == 0
# yield nothing
else
combos = lambda do |result,index,remaining|
while index < @size-1
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index, remaining-1]
end
index += 1
end
item = get(index)
remaining.times { result << item }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all permutations of length `n` of items
# from the `Vector`, and then returns `self`. If no length `n` is specified,
# permutations of all elements will be yielded.
#
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def permutation(n = @size)
return enum_for(:permutation, n) if not block_given?
if n < 0 || @size < n
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
used, result = [], []
perms = lambda do |index|
0.upto(@size-1) do |i|
if !used[i]
result[index] = get(i)
if index < n-1
used[i] = true
perms[index+1]
used[i] = false
else
yield result.dup
end
end
end
end
perms[0]
end
self
end
# When invoked with a block, yields all repeated permutations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated permutation" is
# one where any item from the `Vector` can appear any number of times, and in
# any position (not just consecutively)
#
# If no length `n` is specified, permutations of all elements will be yielded.
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_permutation(n = @size)
return enum_for(:repeated_permutation, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
result = []
perms = lambda do |index|
0.upto(@size-1) do |i|
result[index] = get(i)
if index < n-1
perms[index+1]
else
yield result.dup
end
end
end
perms[0]
end
self
end
# With one or more vector or array arguments, return the cartesian product of
# this vector's elements and those of each argument; with no arguments, return the
# result of multiplying all this vector's items together.
#
# @overload product(*vectors)
# Return a `Vector` of all combinations of elements from this `Vector` and each
# of the given vectors or arrays. The length of the returned `Vector` is the product
# of `self.size` and the size of each argument vector or array.
# @overload product
# Return the result of multiplying all the items in this `Vector` together.
#
# @return [Vector]
def product(*vectors)
# if no vectors passed, return "product" as in result of multiplying all items
return super if vectors.empty?
vectors.unshift(self)
if vectors.any?(&:empty?)
return block_given? ? self : []
end
counters = Array.new(vectors.size, 0)
bump_counters = lambda do
i = vectors.size-1
counters[i] += 1
while counters[i] == vectors[i].size
counters[i] = 0
i -= 1
return true if i == -1 # we are done
counters[i] += 1
end
false # not done yet
end
build_array = lambda do
array = []
counters.each_with_index { |index,i| array << vectors[i][index] }
array
end
if block_given?
while true
yield build_array[]
return self if bump_counters[]
end
else
result = []
while true
result << build_array[]
return result if bump_counters[]
end
end
end
# Assume all elements are vectors or arrays and transpose the rows and columns.
# In other words, take the first element of each nested vector/array and gather
# them together into a new `Vector`. Do likewise for the second, third, and so on
# down to the end of each nested vector/array. Gather all the resulting `Vectors`
# into a new `Vector` and return it.
#
# This operation is closely related to {#zip}. The result is almost the same as
# calling {#zip} on the first nested vector/array with the others supplied as
# arguments.
#
# @return [Vector]
def transpose
return self.class.empty if empty?
result = Array.new(first.size) { [] }
0.upto(@size-1) do |i|
source = get(i)
if source.size != result.size
raise IndexError, "element size differs (#{source.size} should be #{result.size})"
end
0.upto(result.size-1) do |j|
result[j].push(source[j])
end
end
result.map! { |a| self.class.new(a) }
self.class.new(result)
end
# By using binary search, finds a value from this `Vector` which meets the
# condition defined by the provided block. Behavior is just like `Array#bsearch`.
# See `Array#bsearch` for details.
#
# @return [Object]
def bsearch
low, high, result = 0, @size, nil
while low < high
mid = (low + ((high - low) >> 1))
val = get(mid)
v = yield val
if v.is_a? Numeric
if v == 0
return val
elsif v > 0
high = mid
else
low = mid + 1
end
elsif v == true
result = val
high = mid
elsif !v
low = mid + 1
else
raise TypeError, "wrong argument type #{v.class} (must be numeric, true, false, or nil)"
end
end
result
end
# Return an empty `Vector` instance, of the same class as this one. Useful if you
# have multiple subclasses of `Vector` and want to treat them polymorphically.
#
# @return [Hash]
def clear
self.class.empty
end
# Return a randomly chosen item from this `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def sample
get(rand(@size))
end
# Return a new `Vector` with only the elements at the given `indices`, in the
# order specified by `indices`. If any of the `indices` do not exist, `nil`s will
# appear in their places.
#
# @param indices [Array] The indices to retrieve and gather into a new `Vector`
# @return [Vector]
def values_at(*indices)
self.class.new(indices.map { |i| get(i) }.freeze)
end
# Return the index of the last element which is equal to the provided object,
# or for which the provided block returns true.
#
# @overload rindex(obj)
# Return the index of the last element in this `Vector` which is `#==` to `obj`.
# @overload rindex { |item| ... }
# Return the index of the last element in this `Vector` for which the block
# returns true. (Iteration starts from the last element, counts back, and
# stops as soon as a matching element is found.)
#
# @return [Index]
def rindex(obj = (missing_arg = true))
i = @size - 1
if missing_arg
if block_given?
reverse_each { |item| return i if yield item; i -= 1 }
nil
else
enum_for(:rindex)
end
else
reverse_each { |item| return i if item == obj; i -= 1 }
nil
end
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the first element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def assoc(obj)
each { |array| return array if obj == array[0] }
nil
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the second element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def rassoc(obj)
each { |array| return array if obj == array[1] }
nil
end
# Return an `Array` with the same elements, in the same order. The returned
# `Array` may or may not be frozen.
#
# @return [Array]
def to_a
if @levels == 0
@root
else
flatten_node(@root, @levels * BITS_PER_LEVEL, [])
end
end
# Return true if `other` has the same type and contents as this `Vector`.
#
# @param other [Object] The collection to compare with
# @return [Boolean]
def eql?(other)
return true if other.equal?(self)
return false unless instance_of?(other.class) && @size == other.size
@root.eql?(other.instance_variable_get(:@root))
end
# See `Object#hash`.
# @return [Integer]
def hash
reduce(0) { |hash, item| (hash << 5) - hash + item.hash }
end
private
def traverse_depth_first(node, level, &block)
return node.each(&block) if level == 0
node.each { |child| traverse_depth_first(child, level - 1, &block) }
end
def reverse_traverse_depth_first(node, level, &block)
return node.reverse_each(&block) if level == 0
node.reverse_each { |child| reverse_traverse_depth_first(child, level - 1, &block) }
end
def leaf_node_for(node, bitshift, index)
while bitshift > 0
node = node[(index >> bitshift) & INDEX_MASK]
bitshift -= BITS_PER_LEVEL
end
node
end
def update_root(index, item)
root, levels = @root, @levels
while index >= (1 << (BITS_PER_LEVEL * (levels + 1)))
root = [root].freeze
levels += 1
end
root = update_leaf_node(root, levels * BITS_PER_LEVEL, index, item)
self.class.alloc(root, @size > index ? @size : index + 1, levels)
end
def update_leaf_node(node, bitshift, index, item)
slot_index = (index >> bitshift) & INDEX_MASK
if bitshift > 0
old_child = node[slot_index] || []
item = update_leaf_node(old_child, bitshift - BITS_PER_LEVEL, index, item)
end
node.dup.tap { |n| n[slot_index] = item }.freeze
end
def flatten_range(node, bitshift, from, to)
from_slot = (from >> bitshift) & INDEX_MASK
to_slot = (to >> bitshift) & INDEX_MASK
if bitshift == 0 # are we at the bottom?
node.slice(from_slot, to_slot-from_slot+1)
elsif from_slot == to_slot
flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, to)
else
# the following bitmask can be used to pick out the part of the from/to indices
# which will be used to direct path BELOW this node
mask = ((1 << bitshift) - 1)
result = []
if from & mask == 0
flatten_node(node[from_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, from | mask))
end
(from_slot+1).upto(to_slot-1) do |slot_index|
flatten_node(node[slot_index], bitshift - BITS_PER_LEVEL, result)
end
if to & mask == mask
flatten_node(node[to_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[to_slot], bitshift - BITS_PER_LEVEL, to & ~mask, to))
end
result
end
end
def flatten_node(node, bitshift, result)
if bitshift == 0
result.concat(node)
elsif bitshift == BITS_PER_LEVEL
node.each { |a| result.concat(a) }
else
bitshift -= BITS_PER_LEVEL
node.each { |a| flatten_node(a, bitshift, result) }
end
result
end
def subsequence(from, length)
return nil if from > @size || from < 0 || length < 0
length = @size - from if @size < from + length
return self.class.empty if length == 0
self.class.new(flatten_range(@root, @levels * BITS_PER_LEVEL, from, from + length - 1))
end
def flatten_suffix(node, bitshift, from, result)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
result.concat(node)
else
result.concat(node.slice(from_slot, 32)) # entire suffix of node. excess length is ignored by #slice
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
from_slot.upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
elsif child = node[from_slot]
flatten_suffix(child, bitshift - BITS_PER_LEVEL, from, result)
(from_slot+1).upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
end
result
end
end
def replace_suffix(from, suffix)
# new suffix can go directly after existing elements
raise IndexError if from > @size
root, levels = @root, @levels
if (from >> (BITS_PER_LEVEL * (@levels + 1))) != 0
# index where new suffix goes doesn't fall within current tree
# we will need to deepen tree
root = [root].freeze
levels += 1
end
new_size = from + suffix.size
root = replace_node_suffix(root, levels * BITS_PER_LEVEL, from, suffix)
if !suffix.empty?
levels.times { suffix = suffix.each_slice(32).to_a }
root.concat(suffix)
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
else
while root.size == 1
root = root[0]
levels -= 1
end
end
self.class.alloc(root.freeze, new_size, levels)
end
def replace_node_suffix(node, bitshift, from, suffix)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
suffix.shift(32)
else
node.take(from_slot).concat(suffix.shift(32 - from_slot))
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
if from_slot == 0
new_node = suffix.shift(32 * (1 << bitshift))
while bitshift != 0
new_node = new_node.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
new_node
else
result = node.take(from_slot)
remainder = suffix.shift((32 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
end
elsif child = node[from_slot]
result = node.take(from_slot)
result.push(replace_node_suffix(child, bitshift - BITS_PER_LEVEL, from, suffix))
remainder = suffix.shift((31 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
else
raise "Shouldn't happen"
end
end
end
end
# The canonical empty `Vector`. Returned by `Hamster.vector` and `Vector[]` when
# invoked with no arguments; also returned by `Vector.empty`. Prefer using this
# one rather than creating many empty vectors using `Vector.new`.
#
EmptyVector = Hamster::Vector.empty
end
Corrected @return annotation for Vector#clear
require "forwardable"
require "hamster/immutable"
require "hamster/enumerable"
module Hamster
def self.vector(*items)
items.empty? ? EmptyVector : Vector.new(items.freeze)
end
# A `Vector` is an ordered, integer-indexed collection of objects. Like `Array`,
# `Vector` indexing starts at 0. Also like `Array`, negative indexes count back
# from the end of the `Vector`.
#
# `Vector`'s interface is modeled after that of `Array`, minus all the methods
# which do destructive updates. Some methods which modify `Array`s destructively
# (like {#insert} or {#delete_at}) are included, but they return new `Vectors`
# and leave the existing one unchanged.
#
# = Creating New Vectors
#
# Hamster.vector('a', 'b', 'c')
# Hamster::Vector.new([:first, :second, :third])
# Hamster::Vector[1, 2, 3, 4, 5]
#
# = Retrieving Items from Vectors
#
# require 'hamster/vector'
# vector = Hamster.vector(1, 2, 3, 4, 5)
# vector[0] # => 1
# vector[-1] # => 5
# vector[0,3] # => Hamster::Vector[1, 2, 3]
# vector[1..-1] # => Hamster::Vector[2, 3, 4, 5]
# vector.first # => 1
# vector.last # => 5
#
# = Creating Modified Vectors
#
# vector.add(6) # => Hamster::Vector[1, 2, 3, 4, 5, 6]
# vector.insert(1, :a, :b) # => Hamster::Vector[1, :a, :b, 2, 3, 4, 5]
# vector.delete_at(2) # => Hamster::Vector[1, 2, 4, 5]
# vector + [6, 7] # => Hamster::Vector[1, 2, 3, 4, 5, 6, 7]
#
# Other `Array`-like methods like {#select}, {#map}, {#shuffle}, {#uniq}, {#reverse},
# {#rotate}, {#flatten}, {#sort}, {#sort_by}, {#take}, {#drop}, {#take_while},
# {#drop_while}, {#fill}, {#product}, and {#transpose} are also supported.
#
class Vector
extend Forwardable
include Immutable
include Enumerable
# @private
BLOCK_SIZE = 32
# @private
INDEX_MASK = BLOCK_SIZE - 1
# @private
BITS_PER_LEVEL = 5
# Return the number of items in this `Vector`
# @return [Integer]
attr_reader :size
def_delegator :self, :size, :length
class << self
# Create a new `Vector` populated with the given items.
#
# @return [Vector]
def [](*items)
new(items.freeze)
end
# Return an empty `Vector`. If used on a subclass, returns an empty instance
# of that class.
#
# @return [Vector]
def empty
@empty ||= self.alloc([].freeze, 0, 0)
end
# "Raw" allocation of a new `Vector`. Used internally to create a new
# instance quickly after building a modified trie.
#
# @return [Vector]
# @private
def alloc(root, size, levels)
obj = allocate
obj.instance_variable_set(:@root, root)
obj.instance_variable_set(:@size, size)
obj.instance_variable_set(:@levels, levels)
obj
end
end
def initialize(items=[].freeze)
items = items.to_a
if items.size <= 32
items = items.dup.freeze if !items.frozen?
@root, @size, @levels = items, items.size, 0
else
root, size, levels = items, items.size, 0
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
@root, @size, @levels = root.freeze, size, levels
end
end
# Return `true` if this `Vector` contains no items.
#
# @return [Boolean]
def empty?
@size == 0
end
def_delegator :self, :empty?, :null?
# Return the first item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def first
get(0)
end
def_delegator :self, :first, :head
# Return the last item in the `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def last
get(-1)
end
# Return a new `Vector` with `item` added after the last occupied position.
#
# @param item [Object] The object to insert at the end of the vector
# @return [Vector]
def add(item)
update_root(@size, item)
end
def_delegator :self, :add, :<<
def_delegator :self, :add, :conj
def_delegator :self, :add, :conjoin
# Return a new `Vector` with the item at `index` replaced by `item`. If the
# `item` argument is missing, but an optional code block is provided, it will
# be passed the existing item and what the block returns will replace it.
#
# @param index [Integer] The index to update
# @param item [Object] The object to insert into that position
# @return [Vector]
def set(index, item = yield(get(index)))
raise IndexError if @size == 0
index += @size if index < 0
raise IndexError if index >= @size || index < 0
update_root(index, item)
end
# Retrieve the item at `index`. If there is none (either the provided index
# is too high or too low), return `nil`.
#
# @param index [Integer] The index to retrieve
# @return [Object]
def get(index)
return nil if @size == 0
index += @size if index < 0
return nil if index >= @size || index < 0
leaf_node_for(@root, @levels * BITS_PER_LEVEL, index)[index & INDEX_MASK]
end
def_delegator :self, :get, :at
# Retrieve the value at `index`, or use the provided default value or block,
# or otherwise raise an `IndexError`.
#
# @overload fetch(index)
# Retrieve the value at the given index, or raise an `IndexError` if it is
# not found.
# @param index [Integer] The index to look up
# @overload fetch(index) { |index| ... }
# Retrieve the value at the given index, or call the optional
# code block (with the non-existent index) and get its return value.
# @yield [index] The index which does not exist
# @yieldreturn [Object] Object to return instead
# @param index [Integer] The index to look up
# @overload fetch(index, default)
# Retrieve the value at the given index, or else return the provided
# `default` value.
# @param index [Integer] The index to look up
# @param default [Object] Object to return if the key is not found
#
# @return [Object]
def fetch(index, default = (missing_default = true))
index += @size if index < 0
if index >= 0 && index < size
get(index)
elsif block_given?
yield
elsif !missing_default
default
else
raise IndexError, "index #{index} outside of vector bounds"
end
end
# Element reference. Return the item at a specific index, or a specified,
# contiguous range of items (as a new `Vector`).
#
# @overload vector[index]
# Return the item at `index`.
# @param index [Integer] The index to retrieve.
# @overload vector[start, length]
# Return a subvector starting at index `start` and continuing for `length` elements.
# @param start [Integer] The index to start retrieving items from.
# @param length [Integer] The number of items to retrieve.
# @overload vector[range]
# Return a subvector specified by the given `range` of indices.
# @param range [Range] The range of indices to retrieve.
#
# @return [Vector]
def [](arg, length = (missing_length = true))
if missing_length
if arg.is_a?(Range)
from, to = arg.begin, arg.end
from += @size if from < 0
to += @size if to < 0
to += 1 if !arg.exclude_end?
to = @size if to > @size
length = to - from
length = 0 if length < 0
subsequence(from, length)
else
get(arg)
end
else
arg += @size if arg < 0
subsequence(arg, length)
end
end
def_delegator :self, :[], :slice
# Return a new `Vector` with the given values inserted before the element at `index`.
#
# @param index [Integer] The index where the new items should go
# @param items [Array] The items to add
# @return [Vector]
def insert(index, *items)
raise IndexError if index < -@size
index += @size if index < 0
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.unshift(*items)
elsif index == @size
suffix = items
else
suffix = Array.new(index - @size, nil).concat(items)
index = @size
end
replace_suffix(index, suffix)
end
# Return a new `Vector` with the element at `index` removed. If the given `index`
# does not exist, return `self`.
#
# @param index [Integer] The index to remove
# @return [Vector]
def delete_at(index)
return self if index >= @size || index < -@size
index += @size if index < 0
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
replace_suffix(index, suffix.tap { |a| a.shift })
end
# Call the given block once for each item in the vector, passing each
# item from first to last successively to the block.
#
# @return [self]
def each(&block)
return to_enum unless block_given?
traverse_depth_first(@root, @levels, &block)
self
end
# Call the given block once for each item in the vector, passing each
# item starting from the last, and counting back to the first, successively to
# the block.
#
# @return [self]
def reverse_each(&block)
return enum_for(:reverse_each) unless block_given?
reverse_traverse_depth_first(@root, @levels, &block)
self
end
# Return a new `Vector` containing all elements for which the given block returns
# true.
#
# @return [Vector]
def filter
return enum_for(:filter) unless block_given?
reduce(self.class.empty) { |vector, item| yield(item) ? vector.add(item) : vector }
end
# Return a new `Vector` with all items which are equal to `obj` removed.
# `#==` is used for checking equality.
#
# @param obj [Object] The object to remove (every occurrence)
# @return [Vector]
def delete(obj)
filter { |item| item != obj }
end
# Invoke the given block once for each item in the vector, and return a new
# `Vector` containing the values returned by the block.
#
# @return [Vector]
def map
return enum_for(:map) if not block_given?
return self if empty?
self.class.new(super)
end
def_delegator :self, :map, :collect
# Return a new `Vector` with the same elements as this one, but randomly permuted.
#
# @return [Vector]
def shuffle
self.class.new(((array = to_a).frozen? ? array.shuffle : array.shuffle!).freeze)
end
# Return a new `Vector` with no duplicate elements, as determined by `#hash` and
# `#eql?`. For each group of equivalent elements, only the first will be retained.
#
# @return [Vector]
def uniq
self.class.new(((array = to_a).frozen? ? array.uniq : array.uniq!).freeze)
end
# Return a new `Vector` with the same elements as this one, but in reverse order.
#
# @return [Vector]
def reverse
self.class.new(((array = to_a).frozen? ? array.reverse : array.reverse!).freeze)
end
# Return a new `Vector` with the same elements, but rotated so that the one at
# index `count` is the first element of the new vector. If `count` is positive,
# the elements will be shifted left, and those shifted past the lowest position
# will be moved to the end. If `count` is negative, the elements will be shifted
# right, and those shifted past the last position will be moved to the beginning
#
# @param count [Integer] The number of positions to shift items by
# @return [Vector]
def rotate(count = 1)
return self if (count % @size) == 0
self.class.new(((array = to_a).frozen? ? array.rotate(count) : array.rotate!(count)).freeze)
end
# Return a new `Vector` with all nested vectors and arrays recursively "flattened
# out", that is, their elements inserted into the new `Vector` in the place where
# the nested array/vector originally was. If an optional `level` argument is
# provided, the flattening will only be done recursively that number of times.
# A `level` of 0 means not to flatten at all, 1 means to only flatten nested
# arrays/vectors which are directly contained within this `Vector`.
#
# @param level [Integer] The depth to which flattening should be applied
# @return [Vector]
def flatten(level = nil)
return self if level == 0
self.class.new(((array = to_a).frozen? ? array.flatten(level) : array.flatten!(level)).freeze)
end
# Return a new `Vector` built by concatenating this one with `other`. `other`
# can be any object which is convertible to an `Array` using `#to_a`.
#
# @param other [Enumerable] The collection to concatenate onto this vector
# @return [Vector]
def +(other)
other = other.to_a
other = other.dup if other.frozen?
replace_suffix(@size, other)
end
def_delegator :self, :+, :concat
# `others` should be arrays and/or vectors. The corresponding elements from this
# `Vector` and each of `others` (that is, the elements with the same indices)
# will be gathered into arrays.
#
# If an optional block is provided, each such array will be passed successively
# to the block. Otherwise, a new `Vector` of all those arrays will be returned.
#
# @param others [Array] The arrays/vectors to zip together with this one
# @return [Vector, nil]
def zip(*others)
if block_given?
super
else
self.class.new(super)
end
end
# Return a new `Vector` with the same items, but sorted. The sort order will
# be determined by comparing items using `#<=>`, or if an optional code block
# is provided, by using it as a comparator. The block should accept 2 parameters,
# and should return 0, 1, or -1 if the first parameter is equal to, greater than,
# or less than the second parameter (respectively).
#
# @return [Vector]
def sort
self.class.new(super)
end
# Return a new `Vector` with the same items, but sorted. The sort order will be
# determined by mapping the items through the given block to obtain sort keys,
# and then sorting the keys according to their natural sort order.
#
# @return [Vector]
def sort_by
self.class.new(super)
end
# Drop the first `n` elements and return the rest in a new `Vector`.
# @param n [Integer] The number of elements to remove
# @return [Vector]
def drop(n)
self.class.new(super)
end
# Return only the first `n` elements in a new `Vector`.
# @param n [Integer] The number of elements to retain
# @return [Vector]
def take(n)
self.class.new(super)
end
# Drop elements up to, but not including, the first element for which the
# block returns `nil` or `false`. Gather the remaining elements into a new
# `Vector`. If no block is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def drop_while
return enum_for(:drop_while) if not block_given?
self.class.new(super)
end
# Gather elements up to, but not including, the first element for which the
# block returns `nil` or `false`, and return them in a new `Vector`. If no block
# is given, an `Enumerator` is returned instead.
#
# @return [Vector, Enumerator]
def take_while
return enum_for(:take_while) if not block_given?
self.class.new(super)
end
# Repetition. Return a new `Vector` built by concatenating `times` copies
# of this one together.
#
# @param times [Integer] The number of times to repeat the elements in this vector
# @return [Vector]
def *(times)
return self.class.empty if times == 0
return self if times == 1
result = (to_a * times)
result.is_a?(Array) ? self.class.new(result) : result
end
# Replace a range of indexes with the given object.
#
# @overload fill(obj)
# Return a new `Vector` of the same size, with every index set to `obj`.
# @overload fill(obj, start)
# Return a new `Vector` with all indexes from `start` to the end of the
# vector set to `obj`.
# @overload fill(obj, start, length)
# Return a new `Vector` with `length` indexes, beginning from `start`,
# set to `obj`.
#
# @return [Vector]
def fill(obj, index = 0, length = nil)
raise IndexError if index < -@size
index += @size if index < 0
length ||= @size - index # to the end of the array, if no length given
if index < @size
suffix = flatten_suffix(@root, @levels * BITS_PER_LEVEL, index, [])
suffix.fill(obj, 0, length)
elsif index == @size
suffix = Array.new(length, obj)
else
suffix = Array.new(index - @size, nil).concat(Array.new(length, obj))
index = @size
end
replace_suffix(index, suffix)
end
# When invoked with a block, yields all combinations of length `n` of items
# from the `Vector`, and then returns `self`. There is no guarantee about
# which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def combination(n)
return enum_for(:combination, n) if not block_given?
return self if n < 0 || @size < n
if n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif n == @size
yield self.to_a
else
combos = lambda do |result,index,remaining|
while @size - index > remaining
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index+1, remaining-1]
end
index += 1
end
index.upto(@size-1) { |i| result << get(i) }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all repeated combinations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated combination" is
# one in which any item from the `Vector` can appear consecutively any number of
# times.
#
# There is no guarantee about which order the combinations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_combination(n)
return enum_for(:repeated_combination, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
elsif @size == 0
# yield nothing
else
combos = lambda do |result,index,remaining|
while index < @size-1
if remaining == 1
yield result.dup << get(index)
else
combos[result.dup << get(index), index, remaining-1]
end
index += 1
end
item = get(index)
remaining.times { result << item }
yield result
end
combos[[], 0, n]
end
self
end
# When invoked with a block, yields all permutations of length `n` of items
# from the `Vector`, and then returns `self`. If no length `n` is specified,
# permutations of all elements will be yielded.
#
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def permutation(n = @size)
return enum_for(:permutation, n) if not block_given?
if n < 0 || @size < n
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
used, result = [], []
perms = lambda do |index|
0.upto(@size-1) do |i|
if !used[i]
result[index] = get(i)
if index < n-1
used[i] = true
perms[index+1]
used[i] = false
else
yield result.dup
end
end
end
end
perms[0]
end
self
end
# When invoked with a block, yields all repeated permutations of length `n` of
# items from the `Vector`, and then returns `self`. A "repeated permutation" is
# one where any item from the `Vector` can appear any number of times, and in
# any position (not just consecutively)
#
# If no length `n` is specified, permutations of all elements will be yielded.
# There is no guarantee about which order the permutations will be yielded in.
#
# If no block is given, an `Enumerator` is returned instead.
#
# @return [self, Enumerator]
def repeated_permutation(n = @size)
return enum_for(:repeated_permutation, n) if not block_given?
if n < 0
# yield nothing
elsif n == 0
yield []
elsif n == 1
each { |item| yield [item] }
else
result = []
perms = lambda do |index|
0.upto(@size-1) do |i|
result[index] = get(i)
if index < n-1
perms[index+1]
else
yield result.dup
end
end
end
perms[0]
end
self
end
# With one or more vector or array arguments, return the cartesian product of
# this vector's elements and those of each argument; with no arguments, return the
# result of multiplying all this vector's items together.
#
# @overload product(*vectors)
# Return a `Vector` of all combinations of elements from this `Vector` and each
# of the given vectors or arrays. The length of the returned `Vector` is the product
# of `self.size` and the size of each argument vector or array.
# @overload product
# Return the result of multiplying all the items in this `Vector` together.
#
# @return [Vector]
def product(*vectors)
# if no vectors passed, return "product" as in result of multiplying all items
return super if vectors.empty?
vectors.unshift(self)
if vectors.any?(&:empty?)
return block_given? ? self : []
end
counters = Array.new(vectors.size, 0)
bump_counters = lambda do
i = vectors.size-1
counters[i] += 1
while counters[i] == vectors[i].size
counters[i] = 0
i -= 1
return true if i == -1 # we are done
counters[i] += 1
end
false # not done yet
end
build_array = lambda do
array = []
counters.each_with_index { |index,i| array << vectors[i][index] }
array
end
if block_given?
while true
yield build_array[]
return self if bump_counters[]
end
else
result = []
while true
result << build_array[]
return result if bump_counters[]
end
end
end
# Assume all elements are vectors or arrays and transpose the rows and columns.
# In other words, take the first element of each nested vector/array and gather
# them together into a new `Vector`. Do likewise for the second, third, and so on
# down to the end of each nested vector/array. Gather all the resulting `Vectors`
# into a new `Vector` and return it.
#
# This operation is closely related to {#zip}. The result is almost the same as
# calling {#zip} on the first nested vector/array with the others supplied as
# arguments.
#
# @return [Vector]
def transpose
return self.class.empty if empty?
result = Array.new(first.size) { [] }
0.upto(@size-1) do |i|
source = get(i)
if source.size != result.size
raise IndexError, "element size differs (#{source.size} should be #{result.size})"
end
0.upto(result.size-1) do |j|
result[j].push(source[j])
end
end
result.map! { |a| self.class.new(a) }
self.class.new(result)
end
# By using binary search, finds a value from this `Vector` which meets the
# condition defined by the provided block. Behavior is just like `Array#bsearch`.
# See `Array#bsearch` for details.
#
# @return [Object]
def bsearch
low, high, result = 0, @size, nil
while low < high
mid = (low + ((high - low) >> 1))
val = get(mid)
v = yield val
if v.is_a? Numeric
if v == 0
return val
elsif v > 0
high = mid
else
low = mid + 1
end
elsif v == true
result = val
high = mid
elsif !v
low = mid + 1
else
raise TypeError, "wrong argument type #{v.class} (must be numeric, true, false, or nil)"
end
end
result
end
# Return an empty `Vector` instance, of the same class as this one. Useful if you
# have multiple subclasses of `Vector` and want to treat them polymorphically.
#
# @return [Vector]
def clear
self.class.empty
end
# Return a randomly chosen item from this `Vector`. If the vector is empty, return `nil`.
#
# @return [Object]
def sample
get(rand(@size))
end
# Return a new `Vector` with only the elements at the given `indices`, in the
# order specified by `indices`. If any of the `indices` do not exist, `nil`s will
# appear in their places.
#
# @param indices [Array] The indices to retrieve and gather into a new `Vector`
# @return [Vector]
def values_at(*indices)
self.class.new(indices.map { |i| get(i) }.freeze)
end
# Return the index of the last element which is equal to the provided object,
# or for which the provided block returns true.
#
# @overload rindex(obj)
# Return the index of the last element in this `Vector` which is `#==` to `obj`.
# @overload rindex { |item| ... }
# Return the index of the last element in this `Vector` for which the block
# returns true. (Iteration starts from the last element, counts back, and
# stops as soon as a matching element is found.)
#
# @return [Index]
def rindex(obj = (missing_arg = true))
i = @size - 1
if missing_arg
if block_given?
reverse_each { |item| return i if yield item; i -= 1 }
nil
else
enum_for(:rindex)
end
else
reverse_each { |item| return i if item == obj; i -= 1 }
nil
end
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the first element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def assoc(obj)
each { |array| return array if obj == array[0] }
nil
end
# Assumes all elements are nested, indexable collections, and searches through them,
# comparing `obj` with the second element of each nested collection. Return the
# first nested collection which matches, or `nil` if none is found.
#
# @param obj [Object] The object to search for
# @return [Object]
def rassoc(obj)
each { |array| return array if obj == array[1] }
nil
end
# Return an `Array` with the same elements, in the same order. The returned
# `Array` may or may not be frozen.
#
# @return [Array]
def to_a
if @levels == 0
@root
else
flatten_node(@root, @levels * BITS_PER_LEVEL, [])
end
end
# Return true if `other` has the same type and contents as this `Vector`.
#
# @param other [Object] The collection to compare with
# @return [Boolean]
def eql?(other)
return true if other.equal?(self)
return false unless instance_of?(other.class) && @size == other.size
@root.eql?(other.instance_variable_get(:@root))
end
# See `Object#hash`.
# @return [Integer]
def hash
reduce(0) { |hash, item| (hash << 5) - hash + item.hash }
end
private
def traverse_depth_first(node, level, &block)
return node.each(&block) if level == 0
node.each { |child| traverse_depth_first(child, level - 1, &block) }
end
def reverse_traverse_depth_first(node, level, &block)
return node.reverse_each(&block) if level == 0
node.reverse_each { |child| reverse_traverse_depth_first(child, level - 1, &block) }
end
def leaf_node_for(node, bitshift, index)
while bitshift > 0
node = node[(index >> bitshift) & INDEX_MASK]
bitshift -= BITS_PER_LEVEL
end
node
end
def update_root(index, item)
root, levels = @root, @levels
while index >= (1 << (BITS_PER_LEVEL * (levels + 1)))
root = [root].freeze
levels += 1
end
root = update_leaf_node(root, levels * BITS_PER_LEVEL, index, item)
self.class.alloc(root, @size > index ? @size : index + 1, levels)
end
def update_leaf_node(node, bitshift, index, item)
slot_index = (index >> bitshift) & INDEX_MASK
if bitshift > 0
old_child = node[slot_index] || []
item = update_leaf_node(old_child, bitshift - BITS_PER_LEVEL, index, item)
end
node.dup.tap { |n| n[slot_index] = item }.freeze
end
def flatten_range(node, bitshift, from, to)
from_slot = (from >> bitshift) & INDEX_MASK
to_slot = (to >> bitshift) & INDEX_MASK
if bitshift == 0 # are we at the bottom?
node.slice(from_slot, to_slot-from_slot+1)
elsif from_slot == to_slot
flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, to)
else
# the following bitmask can be used to pick out the part of the from/to indices
# which will be used to direct path BELOW this node
mask = ((1 << bitshift) - 1)
result = []
if from & mask == 0
flatten_node(node[from_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[from_slot], bitshift - BITS_PER_LEVEL, from, from | mask))
end
(from_slot+1).upto(to_slot-1) do |slot_index|
flatten_node(node[slot_index], bitshift - BITS_PER_LEVEL, result)
end
if to & mask == mask
flatten_node(node[to_slot], bitshift - BITS_PER_LEVEL, result)
else
result.concat(flatten_range(node[to_slot], bitshift - BITS_PER_LEVEL, to & ~mask, to))
end
result
end
end
def flatten_node(node, bitshift, result)
if bitshift == 0
result.concat(node)
elsif bitshift == BITS_PER_LEVEL
node.each { |a| result.concat(a) }
else
bitshift -= BITS_PER_LEVEL
node.each { |a| flatten_node(a, bitshift, result) }
end
result
end
def subsequence(from, length)
return nil if from > @size || from < 0 || length < 0
length = @size - from if @size < from + length
return self.class.empty if length == 0
self.class.new(flatten_range(@root, @levels * BITS_PER_LEVEL, from, from + length - 1))
end
def flatten_suffix(node, bitshift, from, result)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
result.concat(node)
else
result.concat(node.slice(from_slot, 32)) # entire suffix of node. excess length is ignored by #slice
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
from_slot.upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
elsif child = node[from_slot]
flatten_suffix(child, bitshift - BITS_PER_LEVEL, from, result)
(from_slot+1).upto(node.size-1) do |i|
flatten_node(node[i], bitshift - BITS_PER_LEVEL, result)
end
end
result
end
end
def replace_suffix(from, suffix)
# new suffix can go directly after existing elements
raise IndexError if from > @size
root, levels = @root, @levels
if (from >> (BITS_PER_LEVEL * (@levels + 1))) != 0
# index where new suffix goes doesn't fall within current tree
# we will need to deepen tree
root = [root].freeze
levels += 1
end
new_size = from + suffix.size
root = replace_node_suffix(root, levels * BITS_PER_LEVEL, from, suffix)
if !suffix.empty?
levels.times { suffix = suffix.each_slice(32).to_a }
root.concat(suffix)
while root.size > 32
root = root.each_slice(32).to_a
levels += 1
end
else
while root.size == 1
root = root[0]
levels -= 1
end
end
self.class.alloc(root.freeze, new_size, levels)
end
def replace_node_suffix(node, bitshift, from, suffix)
from_slot = (from >> bitshift) & INDEX_MASK
if bitshift == 0
if from_slot == 0
suffix.shift(32)
else
node.take(from_slot).concat(suffix.shift(32 - from_slot))
end
else
mask = ((1 << bitshift) - 1)
if from & mask == 0
if from_slot == 0
new_node = suffix.shift(32 * (1 << bitshift))
while bitshift != 0
new_node = new_node.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
new_node
else
result = node.take(from_slot)
remainder = suffix.shift((32 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
end
elsif child = node[from_slot]
result = node.take(from_slot)
result.push(replace_node_suffix(child, bitshift - BITS_PER_LEVEL, from, suffix))
remainder = suffix.shift((31 - from_slot) * (1 << bitshift))
while bitshift != 0
remainder = remainder.each_slice(32).to_a
bitshift -= BITS_PER_LEVEL
end
result.concat(remainder)
else
raise "Shouldn't happen"
end
end
end
end
# The canonical empty `Vector`. Returned by `Hamster.vector` and `Vector[]` when
# invoked with no arguments; also returned by `Vector.empty`. Prefer using this
# one rather than creating many empty vectors using `Vector.new`.
#
EmptyVector = Hamster::Vector.empty
end
|
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_filename)
debates = Debates.new(date)
@logger.info "Parsing speeches for #{date.strftime('%a %d %b %Y')}..."
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20House%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3" unless split.size == 3
time = split[2]
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
parse_sub_day_speech_page(sub_page, time, url, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS IN WRITING:/ ||
link_text =~ /^Division:/
logger.warn "Not yet supporting: #{link_text}"
else
logger.warn "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, url, debates, date)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
content = sub_page.search('div#contentstart > div.speech0 > *')
if content.size > 1
speaker = nil
content[1..-1].each do |e|
speakername = extract_speakername(e)
# Only change speaker if a speaker name was found
speaker = lookup_speaker(speakername, date) if speakername
debates.add_speech(speaker, time, url, clean_speech_content(url, e))
end
else
# Doing this just to ensure that regression tests don't fail
#cleaned = clean_speech_content(url, content[0])
#p cleaned
debates.add_speech(nil, time, url, Hpricot(''))
end
end
# Pass in current speaker (if there is one)
def parse_speeches(content, speaker = nil)
content.each do |e|
speakername = extract_speakername(e)
# Only change speaker if a speaker name was found
speaker = lookup_speaker(speakername, date) if speakername
debates.add_speech(speaker, time, url, clean_speech_content(url, e))
end
end
def extract_speakername(content)
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
end
name
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
logger.warn "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date)
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
end
if member.nil?
logger.warn "Unknown speaker #{speakername}"
member = UnknownSpeaker.new(speakername)
end
member
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
url is now captured inside parse_sub_day_speech_page
require 'speech'
require 'mechanize_proxy'
require 'configuration'
require 'debates'
class UnknownSpeaker
def initialize(name)
@name = name
end
def id
"unknown"
end
def name
Name.title_first_last(@name)
end
end
require 'rubygems'
require 'log4r'
class HansardParser
attr_reader :logger
def initialize(people)
@people = people
conf = Configuration.new
# Set up logging
@logger = Log4r::Logger.new 'HansardParser'
# Log to both standard out and the file set in configuration.yml
@logger.add(Log4r::Outputter.stdout)
@logger.add(Log4r::FileOutputter.new('foo', :filename => conf.log_path, :trunc => false,
:formatter => Log4r::PatternFormatter.new(:pattern => "[%l] %d :: %M")))
end
def parse_date(date, xml_filename)
debates = Debates.new(date)
@logger.info "Parsing speeches for #{date.strftime('%a %d %b %Y')}..."
# Required to workaround long viewstates generated by .NET (whatever that means)
# See http://code.whytheluckystiff.net/hpricot/ticket/13
Hpricot.buffer_size = 400000
agent = MechanizeProxy.new
agent.cache_subdirectory = date.to_s
url = "http://parlinfoweb.aph.gov.au/piweb/browse.aspx?path=Chamber%20%3E%20House%20Hansard%20%3E%20#{date.year}%20%3E%20#{date.day}%20#{Date::MONTHNAMES[date.month]}%20#{date.year}"
begin
page = agent.get(url)
# HACK: Don't know why if the page isn't found a return code isn't returned. So, hacking around this.
if page.title == "ParlInfo Web - Error"
throw "ParlInfo Web - Error"
end
rescue
logger.warn "Could not retrieve overview page for date #{date}"
return
end
# Structure of the page is such that we are only interested in some of the links
page.links[30..-4].each do |link|
parse_sub_day_page(link.to_s, agent.click(link), debates, date)
# This ensures that every sub day page has a different major count which limits the impact
# of when we start supporting things like written questions, procedurial text, etc..
debates.increment_major_count
end
debates.output(xml_filename)
end
def parse_sub_day_page(link_text, sub_page, debates, date)
# Only going to consider speeches for the time being
if link_text =~ /^Speech:/
# Link text for speech has format:
# HEADING > NAME > HOUR:MINS:SECS
split = link_text.split('>').map{|a| a.strip}
logger.error "Expected split to have length 3" unless split.size == 3
time = split[2]
parse_sub_day_speech_page(sub_page, time, debates, date)
elsif link_text == "Official Hansard" || link_text =~ /^Start of Business/ || link_text == "Adjournment"
# Do nothing - skip this entirely
elsif link_text =~ /^Procedural text:/ || link_text =~ /^QUESTIONS WITHOUT NOTICE:/ || link_text =~ /^QUESTIONS IN WRITING:/ ||
link_text =~ /^Division:/
logger.warn "Not yet supporting: #{link_text}"
else
logger.warn "Unsupported: #{link_text}"
end
end
def parse_sub_day_speech_page(sub_page, time, debates, date)
# Extract permanent URL of this subpage. Also, quoting because there is a bug
# in XML Builder that for some reason is not quoting attributes properly
url = quote(sub_page.links.text("[Permalink]").uri.to_s)
newtitle = sub_page.search('div#contentstart div.hansardtitle').map { |m| m.inner_html }.join('; ')
newsubtitle = sub_page.search('div#contentstart div.hansardsubtitle').map { |m| m.inner_html }.join('; ')
# Replace any unicode characters
newtitle = replace_unicode(newtitle)
newsubtitle = replace_unicode(newsubtitle)
debates.add_heading(newtitle, newsubtitle, url)
content = sub_page.search('div#contentstart > div.speech0 > *')
if content.size > 1
speaker = nil
content[1..-1].each do |e|
speakername = extract_speakername(e)
# Only change speaker if a speaker name was found
speaker = lookup_speaker(speakername, date) if speakername
debates.add_speech(speaker, time, url, clean_speech_content(url, e))
end
else
# Doing this just to ensure that regression tests don't fail
debates.add_speech(nil, time, url, Hpricot(''))
end
end
# Pass in current speaker (if there is one)
def parse_speeches(content, speaker = nil)
content.each do |e|
speakername = extract_speakername(e)
# Only change speaker if a speaker name was found
speaker = lookup_speaker(speakername, date) if speakername
debates.add_speech(speaker, time, url, clean_speech_content(url, e))
end
end
def extract_speakername(content)
# Try to extract speaker name from talkername tag
tag = content.search('span.talkername a').first
if tag
name = tag.inner_html
# Now check if there is something like <span class="talkername"><a>Some Text</a></span> <b>(Some Text)</b>
tag = content.search('span.talkername ~ b').first
# Only use it if it is surrounded by brackets
if tag && tag.inner_html.match(/\((.*)\)/)
name += " " + $~[0]
end
# If that fails try an interjection
elsif content.search("div.speechType").inner_html == "Interjection"
text = strip_tags(content.search("div.speechType + *").first)
m = text.match(/([a-z].*) interjecting/i)
if m
name = m[1]
else
m = text.match(/([a-z].*)—/i)
if m
name = m[1]
else
name = nil
end
end
end
name
end
# Replace unicode characters by their equivalent
def replace_unicode(text)
t = text.gsub("\342\200\230", "'")
t.gsub!("\342\200\231", "'")
t.gsub!("\342\200\224", "-")
t.each_byte do |c|
if c > 127
logger.warn "Found invalid characters in: #{t.dump}"
end
end
t
end
def clean_speech_content(base_url, content)
doc = Hpricot(content.to_s)
doc.search('div.speechType').remove
doc.search('span.talkername ~ b').remove
doc.search('span.talkername').remove
doc.search('span.talkerelectorate').remove
doc.search('span.talkerrole').remove
doc.search('hr').remove
make_motions_and_quotes_italic(doc)
remove_subspeech_tags(doc)
fix_links(base_url, doc)
make_amendments_italic(doc)
fix_attributes_of_p_tags(doc)
fix_attributes_of_td_tags(doc)
# Do pure string manipulations from here
text = doc.to_s
text = text.gsub("(\342\200\224)", '')
text = text.gsub(/([^\w])\342\200\224/) {|m| m[0..0]}
text = text.gsub(/\(\d{1,2}.\d\d a.m.\)/, '')
text = text.gsub(/\(\d{1,2}.\d\d p.m.\)/, '')
text = text.gsub('()', '')
# Look for tags in the text and display warnings if any of them aren't being handled yet
text.scan(/<[a-z][^>]*>/i) do |t|
m = t.match(/<([a-z]*) [^>]*>/i)
if m
tag = m[1]
else
tag = t[1..-2]
end
allowed_tags = ["b", "i", "dl", "dt", "dd", "ul", "li", "a", "table", "td", "tr"]
if !allowed_tags.include?(tag) && t != "<p>" && t != '<p class="italic">'
logger.warn "Tag #{t} is present in speech contents: " + text
end
end
doc = Hpricot(text)
#p doc.to_s
doc
end
def fix_attributes_of_p_tags(content)
content.search('p.parabold').wrap('<b></b>')
content.search('p').each do |e|
class_value = e.get_attribute('class')
if class_value == "block" || class_value == "parablock" || class_value == "parasmalltablejustified" ||
class_value == "parasmalltableleft" || class_value == "parabold"
e.remove_attribute('class')
elsif class_value == "paraitalic"
e.set_attribute('class', 'italic')
elsif class_value == "italic" && e.get_attribute('style')
e.remove_attribute('style')
end
end
end
def fix_attributes_of_td_tags(content)
content.search('td').each do |e|
e.remove_attribute('style')
end
end
def fix_links(base_url, content)
content.search('a').each do |e|
href_value = e.get_attribute('href')
if href_value.nil?
# Remove a tags
e.swap(e.inner_html)
else
e.set_attribute('href', URI.join(base_url, href_value))
end
end
content
end
def replace_with_inner_html(content, search)
content.search(search).each do |e|
e.swap(e.inner_html)
end
end
def make_motions_and_quotes_italic(content)
content.search('div.motion p').set(:class => 'italic')
replace_with_inner_html(content, 'div.motion')
content.search('div.quote p').set(:class => 'italic')
replace_with_inner_html(content, 'div.quote')
content
end
def make_amendments_italic(content)
content.search('div.amendments div.amendment0 p').set(:class => 'italic')
content.search('div.amendments div.amendment1 p').set(:class => 'italic')
replace_with_inner_html(content, 'div.amendment0')
replace_with_inner_html(content, 'div.amendment1')
replace_with_inner_html(content, 'div.amendments')
content
end
def remove_subspeech_tags(content)
replace_with_inner_html(content, 'div.subspeech0')
replace_with_inner_html(content, 'div.subspeech1')
content
end
def quote(text)
text.sub('&', '&')
end
def lookup_speaker(speakername, date)
throw "speakername can not be nil in lookup_speaker" if speakername.nil?
# HACK alert (Oh you know what this whole thing is a big hack alert)
if speakername =~ /^the speaker/i
member = @people.house_speaker(date)
# The name might be "The Deputy Speaker (Mr Smith)". So, take account of this
elsif speakername =~ /^the deputy speaker/i
# Check name in brackets
match = speakername.match(/^the deputy speaker \((.*)\)/i)
if match
logger.warn "Deputy speaker is #{match[1]}"
speakername = match[1]
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
else
member = @people.deputy_house_speaker(date)
end
else
# Lookup id of member based on speakername
name = Name.title_first_last(speakername)
member = @people.find_member_by_name_current_on_date(name, date)
end
if member.nil?
logger.warn "Unknown speaker #{speakername}"
member = UnknownSpeaker.new(speakername)
end
member
end
def strip_tags(doc)
str=doc.to_s
str.gsub(/<\/?[^>]*>/, "")
end
def min(a, b)
if a < b
a
else
b
end
end
end
|
require 'multi_json'
module Heroku
module Helpers
extend self
def home_directory
running_on_windows? ? ENV['USERPROFILE'].gsub("\\","/") : ENV['HOME']
end
def running_on_windows?
RUBY_PLATFORM =~ /mswin32|mingw32/
end
def running_on_a_mac?
RUBY_PLATFORM =~ /-darwin\d/
end
def display(msg="", new_line=true)
if new_line
puts(msg)
else
print(msg)
end
$stdout.flush
end
def redisplay(line, line_break = false)
display("\r\e[0K#{line}", line_break)
end
def deprecate(message)
display "WARNING: #{message}"
end
def confirm(message="Are you sure you wish to continue? (y/n)")
display("#{message} ", false)
['y', 'yes'].include?(ask.downcase)
end
def confirm_command(app_to_confirm = app, message=nil)
if confirmed_app = Heroku::Command.current_options[:confirm]
unless confirmed_app == app_to_confirm
raise(Heroku::Command::CommandFailed, "Confirmed app #{confirmed_app} did not match the selected app #{app_to_confirm}.")
end
return true
else
display
message ||= "WARNING: Destructive Action\nThis command will affect the app: #{app_to_confirm}"
message << "\nTo proceed, type \"#{app_to_confirm}\" or re-run this command with --confirm #{app_to_confirm}"
output_with_bang(message)
display
display "> ", false
if ask.downcase != app_to_confirm
error("Confirmation did not match #{app_to_confirm}. Aborted.")
else
true
end
end
end
def format_date(date)
date = Time.parse(date).utc if date.is_a?(String)
date.strftime("%Y-%m-%d %H:%M %Z").gsub('GMT', 'UTC')
end
def ask
$stdin.gets.to_s.strip
end
def shell(cmd)
FileUtils.cd(Dir.pwd) {|d| return `#{cmd}`}
end
def run_command(command, args=[])
Heroku::Command.run(command, args)
end
def retry_on_exception(*exceptions)
retry_count = 0
begin
yield
rescue *exceptions => ex
raise ex if retry_count >= 3
sleep 3
retry_count += 1
retry
end
end
def has_git?
%x{ git --version }
$?.success?
end
def git(args)
return "" unless has_git?
flattened_args = [args].flatten.compact.join(" ")
%x{ git #{flattened_args} 2>&1 }.strip
end
def time_ago(since)
if since.is_a?(String)
since = Time.parse(since)
end
elapsed = Time.now - since
message = since.strftime("%Y/%m/%d %H:%M:%S")
if elapsed <= 60
message << " (~ #{elapsed.floor}s ago)"
elsif elapsed <= (60 * 60)
message << " (~ #{(elapsed / 60).floor}m ago)"
elsif elapsed <= (60 * 60 * 25)
message << " (~ #{(elapsed / 60 / 60).floor}h ago)"
end
message
end
def truncate(text, length)
if text.size > length
text[0, length - 2] + '..'
else
text
end
end
@@kb = 1024
@@mb = 1024 * @@kb
@@gb = 1024 * @@mb
def format_bytes(amount)
amount = amount.to_i
return '(empty)' if amount == 0
return amount if amount < @@kb
return "#{(amount / @@kb).round}k" if amount < @@mb
return "#{(amount / @@mb).round}M" if amount < @@gb
return "#{(amount / @@gb).round}G"
end
def quantify(string, num)
"%d %s" % [ num, num.to_i == 1 ? string : "#{string}s" ]
end
def create_git_remote(remote, url)
return if git('remote').split("\n").include?(remote)
return unless File.exists?(".git")
git "remote add #{remote} #{url}"
display "Git remote #{remote} added"
end
def longest(items)
items.map { |i| i.to_s.length }.sort.last
end
def display_table(objects, columns, headers)
lengths = []
columns.each_with_index do |column, index|
header = headers[index]
lengths << longest([header].concat(objects.map { |o| o[column].to_s }))
end
lines = lengths.map {|length| "-" * length}
lengths[-1] = 0 # remove padding from last column
display_row headers, lengths
display_row lines, lengths
objects.each do |row|
display_row columns.map { |column| row[column] }, lengths
end
end
def display_row(row, lengths)
row_data = []
row.zip(lengths).each do |column, length|
format = column.is_a?(Fixnum) ? "%#{length}s" : "%-#{length}s"
row_data << format % column
end
display(row_data.join(" "))
end
def json_encode(object)
MultiJson.encode(object)
end
def json_decode(json)
MultiJson.decode(json)
end
def set_buffer(enable)
with_tty do
if enable
`stty icanon echo`
else
`stty -icanon -echo`
end
end
end
def with_tty(&block)
return unless $stdin.isatty
begin
yield
rescue
# fails on windows
end
end
def get_terminal_environment
{ "TERM" => ENV["TERM"], "COLUMNS" => `tput cols`.strip, "LINES" => `tput lines`.strip }
rescue
{ "TERM" => ENV["TERM"] }
end
def fail(message)
raise Heroku::Command::CommandFailed, message
end
## DISPLAY HELPERS
def action(message, options={})
message = "#{message} in organzation #{org}" if options[:org]
display("#{message}... ", false)
Heroku::Helpers.error_with_failure = true
ret = yield
Heroku::Helpers.error_with_failure = false
display((options[:success] || "done"), false)
if @status
display(", #{@status}", false)
@status = nil
end
display
ret
end
def status(message)
@status = message
end
def format_with_bang(message)
return '' if message.to_s.strip == ""
" ! " + message.split("\n").join("\n ! ")
end
def output_with_bang(message="", new_line=true)
return if message.to_s.strip == ""
display(format_with_bang(message), new_line)
end
def error(message)
if Heroku::Helpers.error_with_failure
display("failed")
Heroku::Helpers.error_with_failure = false
end
$stderr.puts(format_with_bang(message))
exit(1)
end
def self.error_with_failure
@@error_with_failure ||= false
end
def self.error_with_failure=(new_error_with_failure)
@@error_with_failure = new_error_with_failure
end
def self.included_into
@@included_into ||= []
end
def self.extended_into
@@extended_into ||= []
end
def self.included(base)
included_into << base
end
def self.extended(base)
extended_into << base
end
def display_header(message="", new_line=true)
return if message.to_s.strip == ""
display("=== " + message.to_s.split("\n").join("\n=== "), new_line)
end
def display_object(object)
case object
when Array
# list of objects
object.each do |item|
display_object(item)
end
when Hash
# if all values are arrays, it is a list with headers
# otherwise it is a single header with pairs of data
if object.values.all? {|value| value.is_a?(Array)}
object.keys.sort_by {|key| key.to_s}.each do |key|
display_header(key)
display_object(object[key])
hputs
end
end
else
hputs(object.to_s)
end
end
def hputs(string='')
Kernel.puts(string)
end
def hprint(string='')
Kernel.print(string)
$stdout.flush
end
def spinner(ticks)
%w(/ - \\ |)[ticks % 4]
end
def launchy(message, url)
action(message) do
require("launchy")
launchy = Launchy.open(url)
if launchy.respond_to?(:join)
launchy.join
end
end
end
# produces a printf formatter line for an array of items
# if an individual line item is an array, it will create columns
# that are lined-up
#
# line_formatter(["foo", "barbaz"]) # => "%-6s"
# line_formatter(["foo", "barbaz"], ["bar", "qux"]) # => "%-3s %-6s"
#
def line_formatter(array)
if array.any? {|item| item.is_a?(Array)}
cols = []
array.each do |item|
if item.is_a?(Array)
item.each_with_index { |val,idx| cols[idx] = [cols[idx]||0, (val || '').length].max }
end
end
cols.map { |col| "%-#{col}s" }.join(" ")
else
"%s"
end
end
def styled_array(array, options={})
fmt = line_formatter(array)
array = array.sort unless options[:sort] == false
array.each do |element|
display((fmt % element).rstrip)
end
display
end
def format_error(error, message='Heroku client internal error.')
formatted_error = []
formatted_error << " ! #{message}"
formatted_error << ' ! Search for help at: https://help.heroku.com'
formatted_error << ' ! Or report a bug at: https://github.com/heroku/heroku/issues/new'
formatted_error << ''
formatted_error << " Error: #{error.message} (#{error.class})"
formatted_error << " Backtrace: #{error.backtrace.first}"
error.backtrace[1..-1].each do |line|
formatted_error << " #{line}"
end
if error.backtrace.length > 1
formatted_error << ''
end
command = ARGV.map do |arg|
if arg.include?(' ')
arg = %{"#{arg}"}
else
arg
end
end.join(' ')
formatted_error << " Command: heroku #{command}"
require 'heroku/auth'
unless Heroku::Auth.host == Heroku::Auth.default_host
formatted_error << " Host: #{Heroku::Auth.host}"
end
if http_proxy = ENV['http_proxy'] || ENV['HTTP_PROXY']
formatted_error << " HTTP Proxy: #{http_proxy}"
end
if https_proxy = ENV['https_proxy'] || ENV['HTTPS_PROXY']
formatted_error << " HTTPS Proxy: #{https_proxy}"
end
plugins = Heroku::Plugin.list.sort
unless plugins.empty?
formatted_error << " Plugins: #{plugins.first}"
plugins[1..-1].each do |plugin|
formatted_error << " #{plugin}"
end
if plugins.length > 1
formatted_error << ''
$stderr.puts
end
end
formatted_error << " Version: #{Heroku.user_agent}"
formatted_error << "\n"
formatted_error.join("\n")
end
def styled_error(error, message='Heroku client internal error.')
if Heroku::Helpers.error_with_failure
display("failed")
Heroku::Helpers.error_with_failure = false
end
$stderr.puts(format_error(error, message))
end
def styled_header(header)
display("=== #{header}")
end
def styled_hash(hash, keys=nil)
max_key_length = hash.keys.map {|key| key.to_s.length}.max + 2
keys ||= hash.keys.sort {|x,y| x.to_s <=> y.to_s}
keys.each do |key|
case value = hash[key]
when Array
if value.empty?
next
else
elements = value.sort {|x,y| x.to_s <=> y.to_s}
display("#{key}: ".ljust(max_key_length), false)
display(elements[0])
elements[1..-1].each do |element|
display("#{' ' * max_key_length}#{element}")
end
if elements.length > 1
display
end
end
when nil
next
else
display("#{key}: ".ljust(max_key_length), false)
display(value)
end
end
end
def string_distance(first, last)
distances = [] # 0x0s
0.upto(first.length) do |index|
distances << [index] + [0] * last.length
end
distances[0] = 0.upto(last.length).to_a
1.upto(last.length) do |last_index|
1.upto(first.length) do |first_index|
first_char = first[first_index - 1, 1]
last_char = last[last_index - 1, 1]
if first_char == last_char
distances[first_index][last_index] = distances[first_index - 1][last_index - 1] # noop
else
distances[first_index][last_index] = [
distances[first_index - 1][last_index], # deletion
distances[first_index][last_index - 1], # insertion
distances[first_index - 1][last_index - 1] # substitution
].min + 1 # cost
if first_index > 1 && last_index > 1
first_previous_char = first[first_index - 2, 1]
last_previous_char = last[last_index - 2, 1]
if first_char == last_previous_char && first_previous_char == last_char
distances[first_index][last_index] = [
distances[first_index][last_index],
distances[first_index - 2][last_index - 2] + 1 # transposition
].min
end
end
end
end
end
distances[first.length][last.length]
end
def suggestion(actual, possibilities)
distances = Hash.new {|hash,key| hash[key] = []}
possibilities.each do |suggestion|
distances[string_distance(actual, suggestion)] << suggestion
end
minimum_distance = distances.keys.min
if minimum_distance < 4
suggestions = distances[minimum_distance].sort
if suggestions.length == 1
"Perhaps you meant `#{suggestions.first}`."
else
"Perhaps you meant #{suggestions[0...-1].map {|suggestion| "`#{suggestion}`"}.join(', ')} or `#{suggestions.last}`."
end
else
nil
end
end
def org_host
ENV["HEROKU_ORG_HOST"] || default_org_host
end
def default_org_host
"herokumanager.com"
end
def org? email
email =~ /^.*@#{org_host}$/
end
def app_owner email
org?(email) ? email.gsub(/^(.*)@#{org_host}$/,'\1') : email
end
end
end
rescue nil MultiJson.decode errors.
require 'multi_json'
module Heroku
module Helpers
extend self
def home_directory
running_on_windows? ? ENV['USERPROFILE'].gsub("\\","/") : ENV['HOME']
end
def running_on_windows?
RUBY_PLATFORM =~ /mswin32|mingw32/
end
def running_on_a_mac?
RUBY_PLATFORM =~ /-darwin\d/
end
def display(msg="", new_line=true)
if new_line
puts(msg)
else
print(msg)
end
$stdout.flush
end
def redisplay(line, line_break = false)
display("\r\e[0K#{line}", line_break)
end
def deprecate(message)
display "WARNING: #{message}"
end
def confirm(message="Are you sure you wish to continue? (y/n)")
display("#{message} ", false)
['y', 'yes'].include?(ask.downcase)
end
def confirm_command(app_to_confirm = app, message=nil)
if confirmed_app = Heroku::Command.current_options[:confirm]
unless confirmed_app == app_to_confirm
raise(Heroku::Command::CommandFailed, "Confirmed app #{confirmed_app} did not match the selected app #{app_to_confirm}.")
end
return true
else
display
message ||= "WARNING: Destructive Action\nThis command will affect the app: #{app_to_confirm}"
message << "\nTo proceed, type \"#{app_to_confirm}\" or re-run this command with --confirm #{app_to_confirm}"
output_with_bang(message)
display
display "> ", false
if ask.downcase != app_to_confirm
error("Confirmation did not match #{app_to_confirm}. Aborted.")
else
true
end
end
end
def format_date(date)
date = Time.parse(date).utc if date.is_a?(String)
date.strftime("%Y-%m-%d %H:%M %Z").gsub('GMT', 'UTC')
end
def ask
$stdin.gets.to_s.strip
end
def shell(cmd)
FileUtils.cd(Dir.pwd) {|d| return `#{cmd}`}
end
def run_command(command, args=[])
Heroku::Command.run(command, args)
end
def retry_on_exception(*exceptions)
retry_count = 0
begin
yield
rescue *exceptions => ex
raise ex if retry_count >= 3
sleep 3
retry_count += 1
retry
end
end
def has_git?
%x{ git --version }
$?.success?
end
def git(args)
return "" unless has_git?
flattened_args = [args].flatten.compact.join(" ")
%x{ git #{flattened_args} 2>&1 }.strip
end
def time_ago(since)
if since.is_a?(String)
since = Time.parse(since)
end
elapsed = Time.now - since
message = since.strftime("%Y/%m/%d %H:%M:%S")
if elapsed <= 60
message << " (~ #{elapsed.floor}s ago)"
elsif elapsed <= (60 * 60)
message << " (~ #{(elapsed / 60).floor}m ago)"
elsif elapsed <= (60 * 60 * 25)
message << " (~ #{(elapsed / 60 / 60).floor}h ago)"
end
message
end
def truncate(text, length)
if text.size > length
text[0, length - 2] + '..'
else
text
end
end
@@kb = 1024
@@mb = 1024 * @@kb
@@gb = 1024 * @@mb
def format_bytes(amount)
amount = amount.to_i
return '(empty)' if amount == 0
return amount if amount < @@kb
return "#{(amount / @@kb).round}k" if amount < @@mb
return "#{(amount / @@mb).round}M" if amount < @@gb
return "#{(amount / @@gb).round}G"
end
def quantify(string, num)
"%d %s" % [ num, num.to_i == 1 ? string : "#{string}s" ]
end
def create_git_remote(remote, url)
return if git('remote').split("\n").include?(remote)
return unless File.exists?(".git")
git "remote add #{remote} #{url}"
display "Git remote #{remote} added"
end
def longest(items)
items.map { |i| i.to_s.length }.sort.last
end
def display_table(objects, columns, headers)
lengths = []
columns.each_with_index do |column, index|
header = headers[index]
lengths << longest([header].concat(objects.map { |o| o[column].to_s }))
end
lines = lengths.map {|length| "-" * length}
lengths[-1] = 0 # remove padding from last column
display_row headers, lengths
display_row lines, lengths
objects.each do |row|
display_row columns.map { |column| row[column] }, lengths
end
end
def display_row(row, lengths)
row_data = []
row.zip(lengths).each do |column, length|
format = column.is_a?(Fixnum) ? "%#{length}s" : "%-#{length}s"
row_data << format % column
end
display(row_data.join(" "))
end
def json_encode(object)
MultiJson.encode(object)
end
def json_decode(json)
MultiJson.decode(json)
rescue MultiJson::LoadError
nil
end
def set_buffer(enable)
with_tty do
if enable
`stty icanon echo`
else
`stty -icanon -echo`
end
end
end
def with_tty(&block)
return unless $stdin.isatty
begin
yield
rescue
# fails on windows
end
end
def get_terminal_environment
{ "TERM" => ENV["TERM"], "COLUMNS" => `tput cols`.strip, "LINES" => `tput lines`.strip }
rescue
{ "TERM" => ENV["TERM"] }
end
def fail(message)
raise Heroku::Command::CommandFailed, message
end
## DISPLAY HELPERS
def action(message, options={})
message = "#{message} in organzation #{org}" if options[:org]
display("#{message}... ", false)
Heroku::Helpers.error_with_failure = true
ret = yield
Heroku::Helpers.error_with_failure = false
display((options[:success] || "done"), false)
if @status
display(", #{@status}", false)
@status = nil
end
display
ret
end
def status(message)
@status = message
end
def format_with_bang(message)
return '' if message.to_s.strip == ""
" ! " + message.split("\n").join("\n ! ")
end
def output_with_bang(message="", new_line=true)
return if message.to_s.strip == ""
display(format_with_bang(message), new_line)
end
def error(message)
if Heroku::Helpers.error_with_failure
display("failed")
Heroku::Helpers.error_with_failure = false
end
$stderr.puts(format_with_bang(message))
exit(1)
end
def self.error_with_failure
@@error_with_failure ||= false
end
def self.error_with_failure=(new_error_with_failure)
@@error_with_failure = new_error_with_failure
end
def self.included_into
@@included_into ||= []
end
def self.extended_into
@@extended_into ||= []
end
def self.included(base)
included_into << base
end
def self.extended(base)
extended_into << base
end
def display_header(message="", new_line=true)
return if message.to_s.strip == ""
display("=== " + message.to_s.split("\n").join("\n=== "), new_line)
end
def display_object(object)
case object
when Array
# list of objects
object.each do |item|
display_object(item)
end
when Hash
# if all values are arrays, it is a list with headers
# otherwise it is a single header with pairs of data
if object.values.all? {|value| value.is_a?(Array)}
object.keys.sort_by {|key| key.to_s}.each do |key|
display_header(key)
display_object(object[key])
hputs
end
end
else
hputs(object.to_s)
end
end
def hputs(string='')
Kernel.puts(string)
end
def hprint(string='')
Kernel.print(string)
$stdout.flush
end
def spinner(ticks)
%w(/ - \\ |)[ticks % 4]
end
def launchy(message, url)
action(message) do
require("launchy")
launchy = Launchy.open(url)
if launchy.respond_to?(:join)
launchy.join
end
end
end
# produces a printf formatter line for an array of items
# if an individual line item is an array, it will create columns
# that are lined-up
#
# line_formatter(["foo", "barbaz"]) # => "%-6s"
# line_formatter(["foo", "barbaz"], ["bar", "qux"]) # => "%-3s %-6s"
#
def line_formatter(array)
if array.any? {|item| item.is_a?(Array)}
cols = []
array.each do |item|
if item.is_a?(Array)
item.each_with_index { |val,idx| cols[idx] = [cols[idx]||0, (val || '').length].max }
end
end
cols.map { |col| "%-#{col}s" }.join(" ")
else
"%s"
end
end
def styled_array(array, options={})
fmt = line_formatter(array)
array = array.sort unless options[:sort] == false
array.each do |element|
display((fmt % element).rstrip)
end
display
end
def format_error(error, message='Heroku client internal error.')
formatted_error = []
formatted_error << " ! #{message}"
formatted_error << ' ! Search for help at: https://help.heroku.com'
formatted_error << ' ! Or report a bug at: https://github.com/heroku/heroku/issues/new'
formatted_error << ''
formatted_error << " Error: #{error.message} (#{error.class})"
formatted_error << " Backtrace: #{error.backtrace.first}"
error.backtrace[1..-1].each do |line|
formatted_error << " #{line}"
end
if error.backtrace.length > 1
formatted_error << ''
end
command = ARGV.map do |arg|
if arg.include?(' ')
arg = %{"#{arg}"}
else
arg
end
end.join(' ')
formatted_error << " Command: heroku #{command}"
require 'heroku/auth'
unless Heroku::Auth.host == Heroku::Auth.default_host
formatted_error << " Host: #{Heroku::Auth.host}"
end
if http_proxy = ENV['http_proxy'] || ENV['HTTP_PROXY']
formatted_error << " HTTP Proxy: #{http_proxy}"
end
if https_proxy = ENV['https_proxy'] || ENV['HTTPS_PROXY']
formatted_error << " HTTPS Proxy: #{https_proxy}"
end
plugins = Heroku::Plugin.list.sort
unless plugins.empty?
formatted_error << " Plugins: #{plugins.first}"
plugins[1..-1].each do |plugin|
formatted_error << " #{plugin}"
end
if plugins.length > 1
formatted_error << ''
$stderr.puts
end
end
formatted_error << " Version: #{Heroku.user_agent}"
formatted_error << "\n"
formatted_error.join("\n")
end
def styled_error(error, message='Heroku client internal error.')
if Heroku::Helpers.error_with_failure
display("failed")
Heroku::Helpers.error_with_failure = false
end
$stderr.puts(format_error(error, message))
end
def styled_header(header)
display("=== #{header}")
end
def styled_hash(hash, keys=nil)
max_key_length = hash.keys.map {|key| key.to_s.length}.max + 2
keys ||= hash.keys.sort {|x,y| x.to_s <=> y.to_s}
keys.each do |key|
case value = hash[key]
when Array
if value.empty?
next
else
elements = value.sort {|x,y| x.to_s <=> y.to_s}
display("#{key}: ".ljust(max_key_length), false)
display(elements[0])
elements[1..-1].each do |element|
display("#{' ' * max_key_length}#{element}")
end
if elements.length > 1
display
end
end
when nil
next
else
display("#{key}: ".ljust(max_key_length), false)
display(value)
end
end
end
def string_distance(first, last)
distances = [] # 0x0s
0.upto(first.length) do |index|
distances << [index] + [0] * last.length
end
distances[0] = 0.upto(last.length).to_a
1.upto(last.length) do |last_index|
1.upto(first.length) do |first_index|
first_char = first[first_index - 1, 1]
last_char = last[last_index - 1, 1]
if first_char == last_char
distances[first_index][last_index] = distances[first_index - 1][last_index - 1] # noop
else
distances[first_index][last_index] = [
distances[first_index - 1][last_index], # deletion
distances[first_index][last_index - 1], # insertion
distances[first_index - 1][last_index - 1] # substitution
].min + 1 # cost
if first_index > 1 && last_index > 1
first_previous_char = first[first_index - 2, 1]
last_previous_char = last[last_index - 2, 1]
if first_char == last_previous_char && first_previous_char == last_char
distances[first_index][last_index] = [
distances[first_index][last_index],
distances[first_index - 2][last_index - 2] + 1 # transposition
].min
end
end
end
end
end
distances[first.length][last.length]
end
def suggestion(actual, possibilities)
distances = Hash.new {|hash,key| hash[key] = []}
possibilities.each do |suggestion|
distances[string_distance(actual, suggestion)] << suggestion
end
minimum_distance = distances.keys.min
if minimum_distance < 4
suggestions = distances[minimum_distance].sort
if suggestions.length == 1
"Perhaps you meant `#{suggestions.first}`."
else
"Perhaps you meant #{suggestions[0...-1].map {|suggestion| "`#{suggestion}`"}.join(', ')} or `#{suggestions.last}`."
end
else
nil
end
end
def org_host
ENV["HEROKU_ORG_HOST"] || default_org_host
end
def default_org_host
"herokumanager.com"
end
def org? email
email =~ /^.*@#{org_host}$/
end
def app_owner email
org?(email) ? email.gsub(/^(.*)@#{org_host}$/,'\1') : email
end
end
end
|
module Hicube
VERSION = "2.1.6"
end
v2.2.0
module Hicube
VERSION = "2.2.0"
end
|
module Hipbot
VERSION = "0.0.5"
end
bump version to 0.1.0
module Hipbot
VERSION = "0.1.0"
end
|
require "hot_date_rails/engine"
require 'coffee-rails'
require 'pry-rails' unless Rails.env == "production"
require 'jquery-rails'
module HotDateRails
end
remove pry
require "hot_date_rails/engine"
require 'coffee-rails'
#require 'pry-rails' unless Rails.env == "production"
require 'jquery-rails'
module HotDateRails
end
|
module Hotdog
VERSION = "0.19.0"
end
Bump version; 0.20.0 [ci skip]
module Hotdog
VERSION = "0.20.0"
end
|
# encoding: utf-8
require "json"
require "pebblebed/sinatra"
Dir.glob("#{File.dirname(__FILE__)}/v1/**/*.rb").each{ |file| require file }
class TiramisuV1 < Sinatra::Base
set :root, "#{File.dirname(__FILE__)}/v1"
register Sinatra::Pebblebed
i_am :tiramisu
set :config, YAML::load(File.open("config/services.yml"))[ENV['RACK_ENV']]
helpers do
def asset_store
Thread.current[:asset_store] ||= AssetStore.new(settings.config['S3'])
end
def progress
Thread.current[:progress_tracker] ||= ProgressTracker.new(Dalli::Client.new(settings.config['memcached']))
end
def tootsie(pipeline)
Thread.current[:tootsie_pipelines] ||= {}
Thread.current[:tootsie_pipelines][pipeline.to_sym] ||= TootsiePipeline.new(settings.config['tootsie'][pipeline.to_s])
end
end
get '/progress' do
haml :progress
end
get '/tick' do
expires -1, :public, :must_revalidate
content_type 'text/plain' if request.user_agent =~ /MSIE/
stream do |out|
out << " " * 256 if request.user_agent =~ /MSIE/ # ie need ~ 250 k of prelude before it starts flushing the response buffer
i = 0
while i <= 100 do
i += rand(5)
out << "#{i};#{[i,100].min()}% (#{i < 15 ? 4 : i < 35 ? 3 : i < 80 ? 2 : 1} av 4 operasjoner gjenstår)\n"
#out << "#{i}\r\n"
sleep 0.1
end
end
end
end
Add header to prevent proxy buffering
# encoding: utf-8
require "json"
require "pebblebed/sinatra"
Dir.glob("#{File.dirname(__FILE__)}/v1/**/*.rb").each{ |file| require file }
class TiramisuV1 < Sinatra::Base
set :root, "#{File.dirname(__FILE__)}/v1"
register Sinatra::Pebblebed
i_am :tiramisu
set :config, YAML::load(File.open("config/services.yml"))[ENV['RACK_ENV']]
helpers do
def asset_store
Thread.current[:asset_store] ||= AssetStore.new(settings.config['S3'])
end
def progress
Thread.current[:progress_tracker] ||= ProgressTracker.new(Dalli::Client.new(settings.config['memcached']))
end
def tootsie(pipeline)
Thread.current[:tootsie_pipelines] ||= {}
Thread.current[:tootsie_pipelines][pipeline.to_sym] ||= TootsiePipeline.new(settings.config['tootsie'][pipeline.to_s])
end
end
get '/progress' do
haml :progress
end
get '/tick' do
response['X-Accel-Buffering'] = 'no' # prevent buffering in proxy server
expires -1, :public, :must_revalidate
content_type 'text/plain' if request.user_agent =~ /MSIE/
stream do |out|
out << " " * 256 if request.user_agent =~ /MSIE/ # ie need ~ 250 k of prelude before it starts flushing the response buffer
i = 0
while i <= 100 do
i += rand(5)
out << "#{i};#{[i,100].min()}% (#{i < 15 ? 4 : i < 35 ? 3 : i < 80 ? 2 : 1} av 4 operasjoner gjenstår)\n"
#out << "#{i}\r\n"
sleep 0.1
end
end
end
end
|
Add specs for DynamicField
This rounds up the complete Yaks::Mapper* namespace, we are back at 100%
RSpec.describe Yaks::Mapper::Form::DynamicField do
describe ".create" do
it "take a block" do
expect(described_class.create { :foo }.block.call).to equal :foo
end
it "should ignore any options hash given" do
expect { described_class.create(foo: :bar) }.to_not raise_error
end
end
describe "#to_resource_fields" do
include_context "yaks context"
let(:mapper) { Yaks::Mapper.new(yaks_context) }
let(:field) {
described_class.create do |obj|
text :first_name, value: obj
text :last_name
end
}
it "should return an array of fields" do
mapper.call("Arne")
expect(field.to_resource_fields(mapper)).to eql [
Yaks::Resource::Form::Field.new(name: :first_name, type: :text, value: "Arne"),
Yaks::Resource::Form::Field.new(name: :last_name, type: :text)
]
end
end
end
|
# This file is part of Metasm, the Ruby assembly manipulation suite
# Copyright (C) 2007 Yoann GUILLOT
#
# Licence is LGPL, see LICENCE in the top-level directory
require 'metasm/exe_format/main'
require 'metasm/exe_format/mz'
require 'metasm/exe_format/coff_encode'
require 'metasm/exe_format/coff_decode'
module Metasm
class PE < COFF
PESIG = "PE\0\0"
attr_accessor :coff_offset, :signature, :mz
def initialize(cpu=nil)
@mz = MZ.new(cpu)
super(cpu)
end
# overrides COFF#decode_header
# simply sets the offset to the PE pointer before decoding the COFF header
# also checks the PE signature
def decode_header
@encoded.ptr = 0x3c
@encoded.ptr = decode_word
@signature = @encoded.read(4)
raise InvalidExeFormat, "Invalid PE signature #{@signature.inspect}" if @signature != PESIG
@coff_offset = @encoded.ptr
if @mz.encoded.empty?
@mz.encoded << @encoded[0, @coff_offset-4]
@mz.encoded.ptr = 0
@mz.decode_header
end
super
end
# creates a default MZ file to be used in the PE header
# this one is specially crafted to fit in the 0x3c bytes before the signature
def encode_default_mz_header
# XXX use single-quoted source, to avoid ruby interpretation of \r\n
mzstubp = MZ.assemble(Ia32.new(386, 16), <<'EOMZSTUB')
_str db "Needs Win32!\r\n$"
start:
push cs
pop ds
xor dx, dx ; ds:dx = addr of $-terminated string
mov ah, 9 ; output string
int 21h
mov ax, 4c01h ; exit with code in al
int 21h
EOMZSTUB
mzparts = @mz.pre_encode
# put stuff before 0x3c
@mz.encoded << mzparts.shift
raise 'OH NOES !!1!!!1!' if @mz.encoded.virtsize > 0x3c # MZ header is too long, cannot happen
until mzparts.empty?
break if mzparts.first.virtsize + @mz.encoded.virtsize > 0x3c
@mz.encoded << mzparts.shift
end
# set PE signature pointer
@mz.encoded.align 0x3c
@mz.encoded << encode_word('pesigptr')
# put last parts of the MZ program
until mzparts.empty?
@mz.encoded << mzparts.shift
end
# ensure the sig will be 8bytes-aligned
@mz.encoded.align 8
@mz.encoded.fixup 'pesigptr' => @mz.encoded.virtsize
@mz.encoded.fixup @mz.encoded.binding
@mz.encoded.fill
@mz.encode_fix_checksum
end
# encodes the PE header before the COFF header, uses a default mz header if none defined
# the MZ header must have 0x3c pointing just past its last byte which should be 8bytes aligned
# the 2 1st bytes of the MZ header should be 'MZ'
def encode_header(*a)
encode_default_mz_header if @mz.encoded.empty?
@encoded << @mz.encoded.dup
# append the PE signature
@signature ||= PESIG
@encoded << @signature
super
end
end
# an instance of a PE file, loaded in memory
# just change the rva_to_off and the section content decoding methods
class LoadedPE < PE
# just check the bounds / check for 0
def rva_to_off(rva)
rva if rva and rva > 0 and rva <= @encoded.virtsize
end
# use the virtualaddr/virtualsize fields of the section header
def decode_sections
@sections.each { |s|
s.encoded = @encoded[s.virtaddr, s.virtsize]
}
end
end
end
pe: add preliminary LoadedPE.dump
# This file is part of Metasm, the Ruby assembly manipulation suite
# Copyright (C) 2007 Yoann GUILLOT
#
# Licence is LGPL, see LICENCE in the top-level directory
require 'metasm/exe_format/main'
require 'metasm/exe_format/mz'
require 'metasm/exe_format/coff_encode'
require 'metasm/exe_format/coff_decode'
module Metasm
class PE < COFF
PESIG = "PE\0\0"
attr_accessor :coff_offset, :signature, :mz
def initialize(cpu=nil)
@mz = MZ.new(cpu)
super(cpu)
end
# overrides COFF#decode_header
# simply sets the offset to the PE pointer before decoding the COFF header
# also checks the PE signature
def decode_header
@encoded.ptr = 0x3c
@encoded.ptr = decode_word
@signature = @encoded.read(4)
raise InvalidExeFormat, "Invalid PE signature #{@signature.inspect}" if @signature != PESIG
@coff_offset = @encoded.ptr
if @mz.encoded.empty?
@mz.encoded << @encoded[0, @coff_offset-4]
@mz.encoded.ptr = 0
@mz.decode_header
end
super
end
# creates a default MZ file to be used in the PE header
# this one is specially crafted to fit in the 0x3c bytes before the signature
def encode_default_mz_header
# XXX use single-quoted source, to avoid ruby interpretation of \r\n
mzstubp = MZ.assemble(Ia32.new(386, 16), <<'EOMZSTUB')
_str db "Needs Win32!\r\n$"
start:
push cs
pop ds
xor dx, dx ; ds:dx = addr of $-terminated string
mov ah, 9 ; output string
int 21h
mov ax, 4c01h ; exit with code in al
int 21h
EOMZSTUB
mzparts = @mz.pre_encode
# put stuff before 0x3c
@mz.encoded << mzparts.shift
raise 'OH NOES !!1!!!1!' if @mz.encoded.virtsize > 0x3c # MZ header is too long, cannot happen
until mzparts.empty?
break if mzparts.first.virtsize + @mz.encoded.virtsize > 0x3c
@mz.encoded << mzparts.shift
end
# set PE signature pointer
@mz.encoded.align 0x3c
@mz.encoded << encode_word('pesigptr')
# put last parts of the MZ program
until mzparts.empty?
@mz.encoded << mzparts.shift
end
# ensure the sig will be 8bytes-aligned
@mz.encoded.align 8
@mz.encoded.fixup 'pesigptr' => @mz.encoded.virtsize
@mz.encoded.fixup @mz.encoded.binding
@mz.encoded.fill
@mz.encode_fix_checksum
end
# encodes the PE header before the COFF header, uses a default mz header if none defined
# the MZ header must have 0x3c pointing just past its last byte which should be 8bytes aligned
# the 2 1st bytes of the MZ header should be 'MZ'
def encode_header(*a)
encode_default_mz_header if @mz.encoded.empty?
@encoded << @mz.encoded.dup
# append the PE signature
@signature ||= PESIG
@encoded << @signature
super
end
end
# an instance of a PE file, loaded in memory
# just change the rva_to_off and the section content decoding methods
class LoadedPE < PE
# just check the bounds / check for 0
def rva_to_off(rva)
rva if rva and rva > 0 and rva <= @encoded.virtsize
end
# use the virtualaddr/virtualsize fields of the section header
def decode_sections
@sections.each { |s|
s.encoded = @encoded[s.virtaddr, s.virtsize]
}
end
# returns a PE which should give us back when loaded
# TODO rebuild imports + revert base relocations
def dump(baseaddr = @optheader.image_base, oep = baseaddr + @optheader.entrypoint)
pe = PE.new
pe.optheader.entrypoint = oep - baseaddr
pe.optheader.image_base = @optheader.image_base
@sections.each { |s|
ss = Section.new
ss.name = s.name
ss.virtaddr = s.virtaddr
ss.encoded = s.encoded
ss.characteristics = s.characteristics
pe.sections << s
}
# pe.imports
# pe.relocations
pe
end
end
end
|
# This file is part of Metasm, the Ruby assembly manipulation suite
# Copyright (C) 2006-2009 Yoann GUILLOT
#
# Licence is LGPL, see LICENCE in the top-level directory
require 'metasm/x86_64/opcodes'
require 'metasm/decode'
module Metasm
class X86_64
class ModRM
def self.decode(edata, byte, endianness, adsz, opsz, seg=nil, regclass=Reg, pfx={})
m = (byte >> 6) & 3
rm = byte & 7
if m == 3
rm |= 8 if pfx[:rex_b]
return regclass.new(rm, opsz)
end
adsz ||= 64
# mod 0/1/2 m 4 => sib
# mod 0 m 5 => rip+imm
# sib: i 4 => no index, b 5 => no base
s = i = b = imm = nil
if rm == 4 # XXX pfx[:rex_b] ?
sib = edata.get_byte.to_i
ii = (sib >> 3) & 7
if ii != 4 # XXX pfx[:rex_x] ?
ii |= 8 if pfx[:rex_x]
s = 1 << ((sib >> 6) & 3)
i = Reg.new(ii, adsz)
end
bb = sib & 7
if bb == 5 and m == 0 # XXX pfx[:rex_b] ?
m = 2 # :i32 follows
else
bb |= 8 if pfx[:rex_b]
b = Reg.new(bb, adsz)
end
elsif rm == 5 and m == 0 # rip XXX pfx[:rex_b] ?
b = Reg.new(16, adsz)
m = 2 # :i32 follows
else
rm |= 8 if pfx[:rex_b]
b = Reg.new(rm, adsz)
end
case m
when 1; itype = :i8
when 2; itype = :i32
end
imm = Expression[edata.decode_imm(itype, endianness)] if itype
if imm and imm.reduce.kind_of? Integer and imm.reduce < -0x100_0000
# probably a base address -> unsigned
imm = Expression[imm.reduce & ((1 << adsz) - 1)]
end
new adsz, opsz, s, i, b, imm, seg
end
end
def decode_prefix(instr, byte)
x = super(instr, byte)
#return if instr.prefix[:rex] # must be the last prefix TODO check repetition/out of order
if byte & 0xf0 == 0x40
x = instr.prefix[:rex] = byte
instr.prefix[:rex_b] = 1 if byte & 1 > 0
instr.prefix[:rex_x] = 1 if byte & 2 > 0
instr.prefix[:rex_r] = 1 if byte & 4 > 0
instr.prefix[:rex_w] = 1 if byte & 8 > 0
end
x
end
def decode_instr_op(edata, di)
before_ptr = edata.ptr
op = di.opcode
di.instruction.opname = op.name
bseq = edata.read(op.bin.length).unpack('C*') # decode_findopcode ensures that data >= op.length
pfx = di.instruction.prefix || {}
field_val = lambda { |f|
if fld = op.fields[f]
(bseq[fld[0]] >> fld[1]) & @fields_mask[f]
end
}
field_val_r = lambda { |f|
v = field_val[f]
v |= 8 if v and (op.fields[f][1] == 3 ? pfx[:rex_r] : pfx[:rex_b]) # gruick ?
v
}
opsz = op.props[:argsz] || (pfx[:rex_w] || op.props[:auto64] ? 64 : pfx[:opsz] ? 16 : 32)
adsz = pfx[:adsz] ? 32 : 64
op.args.each { |a|
di.instruction.args << case a
when :reg; Reg.new field_val_r[a], opsz
when :eeec; CtrlReg.new field_val_r[a]
when :eeed; DbgReg.new field_val_r[a]
when :seg2, :seg2A, :seg3, :seg3A; SegReg.new field_val[a]
when :regxmm; SimdReg.new field_val_r[a], 128
when :farptr; Farptr.decode edata, @endianness, opsz
when :i8, :u8, :i16, :u16, :i32, :u32, :i64, :u64; Expression[edata.decode_imm(a, @endianness)]
when :i # 64bit constants are sign-extended from :i32
type = (opsz == 64 ? op.props[:imm64] ? :a64 : :i32 : "#{op.props[:unsigned_imm] ? 'a' : 'i'}#{opsz}".to_sym )
v = edata.decode_imm(type, @endianness)
v &= 0xffff_ffff_ffff_ffff if opsz == 64 and op.props[:unsigned_imm] and v.kind_of? Integer
Expression[v]
when :mrm_imm; ModRM.new(adsz, opsz, nil, nil, nil, Expression[edata.decode_imm("a#{adsz}".to_sym, @endianness)], prx[:seg]) # XXX manuals say :a64, test it
when :modrm, :modrmA; ModRM.decode edata, field_val[a], @endianness, adsz, opsz, pfx[:seg], Reg, pfx
when :modrmxmm; ModRM.decode edata, field_val[:modrm], @endianness, adsz, 128, pfx[:seg], SimdReg, pfx
when :imm_val1; Expression[1]
when :imm_val3; Expression[3]
when :reg_cl; Reg.new 1, 8
when :reg_eax; Reg.new 0, opsz
when :reg_dx; Reg.new 2, 16
#when :regfp0; FpReg.new nil # implicit?
else raise SyntaxError, "Internal error: invalid argument #{a} in #{op.name}"
end
}
# sil => bh
di.instruction.args.each { |a| a.val += 12 if a.kind_of? Reg and a.sz == 8 and not pfx[:rex] and a.val >= 4 and a.val <= 8 }
di.bin_length += edata.ptr - before_ptr
if op.name == 'movsx' or op.name == 'movzx'
# TODO ?
if opsz == 8
di.instruction.args[1].sz = 8
else
di.instruction.args[1].sz = 16
end
if pfx[:opsz]
di.instruction.args[0].sz = 16
else
di.instruction.args[0].sz = 32
end
end
pfx.delete :seg
case r = pfx.delete(:rep)
when :nz
if di.opcode.props[:strop]
pfx[:rep] = 'rep'
elsif di.opcode.props[:stropz]
pfx[:rep] = 'repnz'
end
when :z
if di.opcode.props[:strop]
pfx[:rep] = 'rep'
elsif di.opcode.props[:stropz]
pfx[:rep] = 'repz'
end
end
di
end
def opsz(di)
if di and di.instruction.prefix and di.instruction.prefix[:rex_w]; 64
elsif di and di.instruction.prefix and di.instruction.prefix[:opsz]; 16
elsif di and di.opcode.props[:auto64]; 64
else 32
end
end
def register_symbols
[:rax, :rcx, :rdx, :rbx, :rsp, :rbp, :rsi, :rdi, :r8, :r9, :r10, :r11, :r12, :r13, :r14, :r15]
end
# returns a DecodedFunction from a parsed C function prototype
def decode_c_function_prototype(cp, sym, orig=nil)
sym = cp.toplevel.symbol[sym] if sym.kind_of?(::String)
df = DecodedFunction.new
orig ||= Expression[sym.name]
new_bt = lambda { |expr, rlen|
df.backtracked_for << BacktraceTrace.new(expr, orig, expr, rlen ? :r : :x, rlen)
}
# return instr emulation
new_bt[Indirection[:rsp, @size/8, orig], nil] if not sym.attributes.to_a.include? 'noreturn'
# register dirty (MS standard ABI)
[:rax, :rcx, :rdx, :r8, :r9, :r10, :r11].each { |r|
df.backtrace_binding.update r => Expression::Unknown
}
if cp.lexer.definition['__MS_X86_64_ABI__']
reg_args = [:rcx, :rdx, :r8, :r9]
else
reg_args = [:rdi, :rsi, :rdx, :rcx, :r8, :r9]
end
# emulate ret <n>
al = cp.typesize[:ptr]
if sym.attributes.to_a.include? 'stdcall'
argsz = sym.type.args[reg_args.length..-1].to_a.inject(al) { |sum, a| sum += (cp.sizeof(a) + al - 1) / al * al }
df.backtrace_binding[:rsp] = Expression[:rsp, :+, argsz]
else
df.backtrace_binding[:rsp] = Expression[:rsp, :+, al]
end
# scan args for function pointers
# TODO walk structs/unions..
stackoff = al
sym.type.args.to_a.zip(reg_args).each { |a, r|
if not r
r = Indirection[[:rsp, :+, stackoff], al, orig]
stackoff += (cp.sizeof(a) + al - 1) / al * al
end
if a.type.untypedef.kind_of? C::Pointer
pt = a.type.untypedef.type.untypedef
if pt.kind_of? C::Function
new_bt[r, nil]
df.backtracked_for.last.detached = true
elsif pt.kind_of? C::Struct
new_bt[r, al]
else
new_bt[r, cp.sizeof(nil, pt)]
end
end
}
df
end
def backtrace_update_function_binding_check(dasm, faddr, f, b)
# TODO save regs according to ABI
end
end
end
x64: typo fix, thanks to Nanki
# This file is part of Metasm, the Ruby assembly manipulation suite
# Copyright (C) 2006-2009 Yoann GUILLOT
#
# Licence is LGPL, see LICENCE in the top-level directory
require 'metasm/x86_64/opcodes'
require 'metasm/decode'
module Metasm
class X86_64
class ModRM
def self.decode(edata, byte, endianness, adsz, opsz, seg=nil, regclass=Reg, pfx={})
m = (byte >> 6) & 3
rm = byte & 7
if m == 3
rm |= 8 if pfx[:rex_b]
return regclass.new(rm, opsz)
end
adsz ||= 64
# mod 0/1/2 m 4 => sib
# mod 0 m 5 => rip+imm
# sib: i 4 => no index, b 5 => no base
s = i = b = imm = nil
if rm == 4 # XXX pfx[:rex_b] ?
sib = edata.get_byte.to_i
ii = (sib >> 3) & 7
if ii != 4 # XXX pfx[:rex_x] ?
ii |= 8 if pfx[:rex_x]
s = 1 << ((sib >> 6) & 3)
i = Reg.new(ii, adsz)
end
bb = sib & 7
if bb == 5 and m == 0 # XXX pfx[:rex_b] ?
m = 2 # :i32 follows
else
bb |= 8 if pfx[:rex_b]
b = Reg.new(bb, adsz)
end
elsif rm == 5 and m == 0 # rip XXX pfx[:rex_b] ?
b = Reg.new(16, adsz)
m = 2 # :i32 follows
else
rm |= 8 if pfx[:rex_b]
b = Reg.new(rm, adsz)
end
case m
when 1; itype = :i8
when 2; itype = :i32
end
imm = Expression[edata.decode_imm(itype, endianness)] if itype
if imm and imm.reduce.kind_of? Integer and imm.reduce < -0x100_0000
# probably a base address -> unsigned
imm = Expression[imm.reduce & ((1 << adsz) - 1)]
end
new adsz, opsz, s, i, b, imm, seg
end
end
def decode_prefix(instr, byte)
x = super(instr, byte)
#return if instr.prefix[:rex] # must be the last prefix TODO check repetition/out of order
if byte & 0xf0 == 0x40
x = instr.prefix[:rex] = byte
instr.prefix[:rex_b] = 1 if byte & 1 > 0
instr.prefix[:rex_x] = 1 if byte & 2 > 0
instr.prefix[:rex_r] = 1 if byte & 4 > 0
instr.prefix[:rex_w] = 1 if byte & 8 > 0
end
x
end
def decode_instr_op(edata, di)
before_ptr = edata.ptr
op = di.opcode
di.instruction.opname = op.name
bseq = edata.read(op.bin.length).unpack('C*') # decode_findopcode ensures that data >= op.length
pfx = di.instruction.prefix || {}
field_val = lambda { |f|
if fld = op.fields[f]
(bseq[fld[0]] >> fld[1]) & @fields_mask[f]
end
}
field_val_r = lambda { |f|
v = field_val[f]
v |= 8 if v and (op.fields[f][1] == 3 ? pfx[:rex_r] : pfx[:rex_b]) # gruick ?
v
}
opsz = op.props[:argsz] || (pfx[:rex_w] || op.props[:auto64] ? 64 : pfx[:opsz] ? 16 : 32)
adsz = pfx[:adsz] ? 32 : 64
op.args.each { |a|
di.instruction.args << case a
when :reg; Reg.new field_val_r[a], opsz
when :eeec; CtrlReg.new field_val_r[a]
when :eeed; DbgReg.new field_val_r[a]
when :seg2, :seg2A, :seg3, :seg3A; SegReg.new field_val[a]
when :regxmm; SimdReg.new field_val_r[a], 128
when :farptr; Farptr.decode edata, @endianness, opsz
when :i8, :u8, :i16, :u16, :i32, :u32, :i64, :u64; Expression[edata.decode_imm(a, @endianness)]
when :i # 64bit constants are sign-extended from :i32
type = (opsz == 64 ? op.props[:imm64] ? :a64 : :i32 : "#{op.props[:unsigned_imm] ? 'a' : 'i'}#{opsz}".to_sym )
v = edata.decode_imm(type, @endianness)
v &= 0xffff_ffff_ffff_ffff if opsz == 64 and op.props[:unsigned_imm] and v.kind_of? Integer
Expression[v]
when :mrm_imm; ModRM.new(adsz, opsz, nil, nil, nil, Expression[edata.decode_imm("a#{adsz}".to_sym, @endianness)], pfx[:seg]) # XXX manuals say :a64, test it
when :modrm, :modrmA; ModRM.decode edata, field_val[a], @endianness, adsz, opsz, pfx[:seg], Reg, pfx
when :modrmxmm; ModRM.decode edata, field_val[:modrm], @endianness, adsz, 128, pfx[:seg], SimdReg, pfx
when :imm_val1; Expression[1]
when :imm_val3; Expression[3]
when :reg_cl; Reg.new 1, 8
when :reg_eax; Reg.new 0, opsz
when :reg_dx; Reg.new 2, 16
#when :regfp0; FpReg.new nil # implicit?
else raise SyntaxError, "Internal error: invalid argument #{a} in #{op.name}"
end
}
# sil => bh
di.instruction.args.each { |a| a.val += 12 if a.kind_of? Reg and a.sz == 8 and not pfx[:rex] and a.val >= 4 and a.val <= 8 }
di.bin_length += edata.ptr - before_ptr
if op.name == 'movsx' or op.name == 'movzx'
# TODO ?
if opsz == 8
di.instruction.args[1].sz = 8
else
di.instruction.args[1].sz = 16
end
if pfx[:opsz]
di.instruction.args[0].sz = 16
else
di.instruction.args[0].sz = 32
end
end
pfx.delete :seg
case r = pfx.delete(:rep)
when :nz
if di.opcode.props[:strop]
pfx[:rep] = 'rep'
elsif di.opcode.props[:stropz]
pfx[:rep] = 'repnz'
end
when :z
if di.opcode.props[:strop]
pfx[:rep] = 'rep'
elsif di.opcode.props[:stropz]
pfx[:rep] = 'repz'
end
end
di
end
def opsz(di)
if di and di.instruction.prefix and di.instruction.prefix[:rex_w]; 64
elsif di and di.instruction.prefix and di.instruction.prefix[:opsz]; 16
elsif di and di.opcode.props[:auto64]; 64
else 32
end
end
def register_symbols
[:rax, :rcx, :rdx, :rbx, :rsp, :rbp, :rsi, :rdi, :r8, :r9, :r10, :r11, :r12, :r13, :r14, :r15]
end
# returns a DecodedFunction from a parsed C function prototype
def decode_c_function_prototype(cp, sym, orig=nil)
sym = cp.toplevel.symbol[sym] if sym.kind_of?(::String)
df = DecodedFunction.new
orig ||= Expression[sym.name]
new_bt = lambda { |expr, rlen|
df.backtracked_for << BacktraceTrace.new(expr, orig, expr, rlen ? :r : :x, rlen)
}
# return instr emulation
new_bt[Indirection[:rsp, @size/8, orig], nil] if not sym.attributes.to_a.include? 'noreturn'
# register dirty (MS standard ABI)
[:rax, :rcx, :rdx, :r8, :r9, :r10, :r11].each { |r|
df.backtrace_binding.update r => Expression::Unknown
}
if cp.lexer.definition['__MS_X86_64_ABI__']
reg_args = [:rcx, :rdx, :r8, :r9]
else
reg_args = [:rdi, :rsi, :rdx, :rcx, :r8, :r9]
end
# emulate ret <n>
al = cp.typesize[:ptr]
if sym.attributes.to_a.include? 'stdcall'
argsz = sym.type.args[reg_args.length..-1].to_a.inject(al) { |sum, a| sum += (cp.sizeof(a) + al - 1) / al * al }
df.backtrace_binding[:rsp] = Expression[:rsp, :+, argsz]
else
df.backtrace_binding[:rsp] = Expression[:rsp, :+, al]
end
# scan args for function pointers
# TODO walk structs/unions..
stackoff = al
sym.type.args.to_a.zip(reg_args).each { |a, r|
if not r
r = Indirection[[:rsp, :+, stackoff], al, orig]
stackoff += (cp.sizeof(a) + al - 1) / al * al
end
if a.type.untypedef.kind_of? C::Pointer
pt = a.type.untypedef.type.untypedef
if pt.kind_of? C::Function
new_bt[r, nil]
df.backtracked_for.last.detached = true
elsif pt.kind_of? C::Struct
new_bt[r, al]
else
new_bt[r, cp.sizeof(nil, pt)]
end
end
}
df
end
def backtrace_update_function_binding_check(dasm, faddr, f, b)
# TODO save regs according to ABI
end
end
end
|
lib = "faraday"
lib_file = File.expand_path("../lib/#{lib}.rb", __FILE__)
File.read(lib_file) =~ /\bVERSION\s*=\s*["'](.+?)["']/
version = $1
Gem::Specification.new do |spec|
spec.specification_version = 2 if spec.respond_to? :specification_version=
spec.required_rubygems_version = '>= 1.3.5'
spec.name = lib
spec.version = version
spec.summary = "HTTP/REST API client library."
spec.authors = ["Rick Olson"]
spec.email = 'technoweenie@gmail.com'
spec.homepage = 'https://github.com/lostisland/faraday'
spec.licenses = ['MIT']
spec.add_dependency 'multipart-post', '~> 1.1'
spec.add_development_dependency 'bundler', '~> 1.0'
spec.files = %w(.document CHANGELOG.md CONTRIBUTING.md Gemfile LICENSE.md README.md Rakefile)
spec.files << "#{lib}.gemspec"
spec.files += Dir.glob("lib/**/*.rb")
spec.files += Dir.glob("test/**/*.rb")
spec.files += Dir.glob("script/*")
dev_null = File.exist?('/dev/null') ? '/dev/null' : 'NUL'
git_files = `git ls-files -z 2>#{dev_null}`
spec.files &= git_files.split("\0") if $?.success?
spec.test_files = Dir.glob("test/**/*.rb")
end
add missing test file to gemspec
Fixes #273 [ci skip]
lib = "faraday"
lib_file = File.expand_path("../lib/#{lib}.rb", __FILE__)
File.read(lib_file) =~ /\bVERSION\s*=\s*["'](.+?)["']/
version = $1
Gem::Specification.new do |spec|
spec.specification_version = 2 if spec.respond_to? :specification_version=
spec.required_rubygems_version = '>= 1.3.5'
spec.name = lib
spec.version = version
spec.summary = "HTTP/REST API client library."
spec.authors = ["Rick Olson"]
spec.email = 'technoweenie@gmail.com'
spec.homepage = 'https://github.com/lostisland/faraday'
spec.licenses = ['MIT']
spec.add_dependency 'multipart-post', '~> 1.1'
spec.add_development_dependency 'bundler', '~> 1.0'
spec.files = %w(.document CHANGELOG.md CONTRIBUTING.md Gemfile LICENSE.md README.md Rakefile)
spec.files << "#{lib}.gemspec"
spec.files += Dir.glob("lib/**/*.rb")
spec.files += Dir.glob("test/**/*.{rb,txt}")
spec.files += Dir.glob("script/*")
dev_null = File.exist?('/dev/null') ? '/dev/null' : 'NUL'
git_files = `git ls-files -z 2>#{dev_null}`
spec.files &= git_files.split("\0") if $?.success?
spec.test_files = Dir.glob("test/**/*.rb")
end
|
Gem::Specification.new do |s|
s.name = 'fastapi'
s.version = '0.1.25'
s.summary = 'Easily create robust, standardized API endpoints using lightning-fast database queries'
s.description = 'Easily create robust, standardized API endpoints using lightning-fast database queries'
s.authors = ['Keith Horwood', 'Trevor Strieber']
s.email = 'keithwhor@gmail.com'
s.files = ['lib/fastapi.rb', 'lib/fastapi/active_record_extension.rb']
s.homepage = 'https://github.com/thestorefront/FastAPI'
s.license = 'MIT'
s.add_runtime_dependency 'activerecord', '>= 3.2.0'
s.add_runtime_dependency 'activesupport', '>= 3.2.0'
s.add_runtime_dependency 'oj', '~> 2.9.9'
s.add_runtime_dependency 'pg', '>= 0.18.1'
s.add_development_dependency 'rake', '~> 10.0'
s.add_development_dependency 'bundler', '>= 1.3'
s.add_development_dependency 'rspec', '~> 3.2.0'
s.add_development_dependency 'factory_girl', '~> 4.0'
s.add_development_dependency 'database_cleaner', '~> 1.4.1'
s.required_ruby_version = '>= 1.9.3'
end
Bumping the minor version.
* Adding trevor@strieber.org as an email.
Gem::Specification.new do |s|
s.name = 'fastapi'
s.version = '0.1.26'
s.summary = 'Easily create robust, standardized API endpoints using lightning-fast database queries'
s.description = 'Easily create robust, standardized API endpoints using lightning-fast database queries'
s.authors = ['Keith Horwood', 'Trevor Strieber']
s.email = ['keithwhor@gmail.com', 'trevor@strieber.org']
s.files = ['lib/fastapi.rb', 'lib/fastapi/active_record_extension.rb']
s.homepage = 'https://github.com/thestorefront/FastAPI'
s.license = 'MIT'
s.add_runtime_dependency 'activerecord', '>= 3.2.0'
s.add_runtime_dependency 'activesupport', '>= 3.2.0'
s.add_runtime_dependency 'oj', '~> 2.9.9'
s.add_runtime_dependency 'pg', '>= 0.18.1'
s.add_development_dependency 'rake', '~> 10.0'
s.add_development_dependency 'bundler', '>= 1.3'
s.add_development_dependency 'rspec', '~> 3.2.0'
s.add_development_dependency 'factory_girl', '~> 4.0'
s.add_development_dependency 'database_cleaner', '~> 1.4.1'
s.required_ruby_version = '>= 1.9.3'
end
|
Add macvim
desc 'macvim' do
# The first entry is just for testing the auto-update functionality.
latest = github('https://github.com/macvim-dev/macvim/releases.atom')[1]
build = File.basename(latest[:link]).delete('snapshot-')
version = "7.4-#{build}"
url = "https://github.com/macvim-dev/macvim/releases/download/snapshot-#{build}/MacVim-snapshot-#{build}.tbz"
{version => url}
end
|
feins = []
emp_ids = Employer.where(:fein => { "$in" => feins } ).map(&:id)
pols_mems = Policy.where(PolicyStatus::Active.as_of(Date.parse('12/31/2014'), { "employer_id" => { "$in" => emp_ids }}).query)
pols = Policy.where(PolicyStatus::Active.as_of(Date.parse('12/31/2014'), { "employer_id" => { "$in" => emp_ids }}).query).no_timeout
m_ids = []
pols_mems.each do |mpol|
mpol.enrollees.each do |en|
m_ids << en.m_id
end
end
member_repo = Caches::MemberCache.new(m_ids)
calc = Premiums::PolicyCalculator.new(member_repo)
Caches::MongoidCache.allocate(Employer)
Caches::MongoidCache.allocate(Plan)
Caches::MongoidCache.allocate(Carrier)
p_id = 50000
pols.each do |pol|
if pol.subscriber.coverage_end.blank?
sub_member = member_repo.lookup(pol.subscriber.m_id)
if (sub_member.person.authority_member.hbx_member_id == pol.subscriber.m_id)
r_pol = pol.clone_for_renewal(Date.new(2015,1,1))
calc.apply_calculations(r_pol)
p_id = p_id + 1
out_file = File.open("renewals/#{p_id}.xml", 'w')
member_ids = r_pol.enrollees.map(&:m_id)
r_pol.eg_id = p_id.to_s
ms = CanonicalVocabulary::MaintenanceSerializer.new(r_pol,"change", "renewal", member_ids, member_ids, {:member_repo => member_repo})
out_file.print(ms.serialize)
out_file.close
end
end
end
Caches::MongoidCache.release(Plan)
Caches::MongoidCache.release(Carrier)
Caches::MongoidCache.release(Employer)
Tell when authority id is missing.
feins = []
emp_ids = Employer.where(:fein => { "$in" => feins } ).map(&:id)
pols_mems = Policy.where(PolicyStatus::Active.as_of(Date.parse('12/31/2014'), { "employer_id" => { "$in" => emp_ids }}).query)
pols = Policy.where(PolicyStatus::Active.as_of(Date.parse('12/31/2014'), { "employer_id" => { "$in" => emp_ids }}).query).no_timeout
m_ids = []
pols_mems.each do |mpol|
mpol.enrollees.each do |en|
m_ids << en.m_id
end
end
member_repo = Caches::MemberCache.new(m_ids)
calc = Premiums::PolicyCalculator.new(member_repo)
Caches::MongoidCache.allocate(Employer)
Caches::MongoidCache.allocate(Plan)
Caches::MongoidCache.allocate(Carrier)
p_id = 50000
pols.each do |pol|
if pol.subscriber.coverage_end.blank?
sub_member = member_repo.lookup(pol.subscriber.m_id)
authority_member = sub_member.person.authority_member
if authority_member.blank?
puts "No authority member for: #{pol.subscriber.m_id}"
else
if (sub_member.person.authority_member.hbx_member_id == pol.subscriber.m_id)
r_pol = pol.clone_for_renewal(Date.new(2015,1,1))
calc.apply_calculations(r_pol)
p_id = p_id + 1
out_file = File.open("renewals/#{p_id}.xml", 'w')
member_ids = r_pol.enrollees.map(&:m_id)
r_pol.eg_id = p_id.to_s
ms = CanonicalVocabulary::MaintenanceSerializer.new(r_pol,"change", "renewal", member_ids, member_ids, {:member_repo => member_repo})
out_file.print(ms.serialize)
out_file.close
end
end
end
end
Caches::MongoidCache.release(Plan)
Caches::MongoidCache.release(Carrier)
Caches::MongoidCache.release(Employer)
|
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{litmus_mailer}
s.version = "0.0.0"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = [%q{Logan Koester}]
s.date = %q{2011-09-27}
s.description = %q{TODO: longer description of your gem}
s.email = %q{logan@logankoester.com}
s.extra_rdoc_files = [
"LICENSE.txt",
"README.rdoc"
]
s.files = [
".document",
".rspec",
"Gemfile",
"LICENSE.txt",
"README.rdoc",
"Rakefile",
"VERSION",
"lib/litmus_mailer.rb",
"spec/litmus_mailer_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = %q{http://github.com/logankoester/litmus_mailer}
s.licenses = [%q{MIT}]
s.require_paths = [%q{lib}]
s.rubygems_version = %q{1.8.6.1}
s.summary = %q{Litmus email previews as an ActionMailer delivery method}
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<litmus>, [">= 0"])
s.add_development_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_development_dependency(%q<yard>, ["~> 0.6.0"])
s.add_development_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_development_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_development_dependency(%q<rcov>, [">= 0"])
else
s.add_dependency(%q<litmus>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_dependency(%q<yard>, ["~> 0.6.0"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_dependency(%q<rcov>, [">= 0"])
end
else
s.add_dependency(%q<litmus>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_dependency(%q<yard>, ["~> 0.6.0"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_dependency(%q<rcov>, [">= 0"])
end
end
Adds gemspec
# Generated by jeweler
# DO NOT EDIT THIS FILE DIRECTLY
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
# -*- encoding: utf-8 -*-
Gem::Specification.new do |s|
s.name = %q{litmus_mailer}
s.version = "0.1.0"
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = [%q{Logan Koester}]
s.date = %q{2011-09-28}
s.description = %q{TODO: longer description of your gem}
s.email = %q{logan@logankoester.com}
s.extra_rdoc_files = [
"LICENSE.txt",
"README.rdoc"
]
s.files = [
".document",
".rspec",
".rvmrc",
"Gemfile",
"Gemfile.lock",
"LICENSE.txt",
"README.rdoc",
"Rakefile",
"VERSION",
"lib/litmus_mailer.rb",
"lib/litmus_mailer/mail_observer.rb",
"lib/litmus_mailer/settings.rb",
"litmus_mailer.gemspec",
"spec/litmus_mailer_spec.rb",
"spec/spec_helper.rb"
]
s.homepage = %q{http://github.com/logankoester/litmus_mailer}
s.licenses = [%q{MIT}]
s.require_paths = [%q{lib}]
s.rubygems_version = %q{1.8.6.1}
s.summary = %q{Litmus email previews as an ActionMailer delivery method}
if s.respond_to? :specification_version then
s.specification_version = 3
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<litmus>, [">= 0"])
s.add_runtime_dependency(%q<actionmailer>, [">= 0"])
s.add_development_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_development_dependency(%q<yard>, ["~> 0.6.0"])
s.add_development_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_development_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_development_dependency(%q<rcov>, [">= 0"])
else
s.add_dependency(%q<litmus>, [">= 0"])
s.add_dependency(%q<actionmailer>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_dependency(%q<yard>, ["~> 0.6.0"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_dependency(%q<rcov>, [">= 0"])
end
else
s.add_dependency(%q<litmus>, [">= 0"])
s.add_dependency(%q<actionmailer>, [">= 0"])
s.add_dependency(%q<rspec>, ["~> 2.3.0"])
s.add_dependency(%q<yard>, ["~> 0.6.0"])
s.add_dependency(%q<bundler>, ["~> 1.0.0"])
s.add_dependency(%q<jeweler>, ["~> 1.6.4"])
s.add_dependency(%q<rcov>, [">= 0"])
end
end
|
module Domgen
module Xml
module Templates
module Xml
def generate
@doc = Builder::XmlMarkup.new(:indent => 2)
visit_repository(@repository)
end
private
attr_reader :doc
def visit_repository(repository)
doc.tag!("repository", :name => repository.name) do
add_tags(repository)
repository.data_modules.each do |data_module|
visit_data_module(data_module)
end
end
end
def visit_data_module(data_module)
doc.tag!("data-module", :name => data_module.name) do
add_tags(data_module)
data_module.entities.each do |entity|
visit_entity(entity)
end
end
end
def visit_entity(entity)
doc.tag!("entity", collect_attributes(entity, %w(name qualified_name))) do
add_tags(entity)
tag_each(entity, :attributes) do |attribute|
visit_attribute(attribute)
end
%w(unique codependent incompatible).each do |constraint_type|
tag_each(entity, "#{constraint_type}_constraints".to_sym) do |constraint|
doc.tag!("#{constraint_type}-constraint") do
constraint.attribute_names.each do |name|
attribute_ref(entity, name)
end
end
end
end
tag_each(entity, :dependency_constraints) do |constraint|
doc.tag!("dependency-constraint") do
attribute_ref(entity, constraint.attribute_name)
doc.tag!("dependent-attributes") do
constraint.dependent_attribute_names.each do |name|
attribute_ref(entity, name)
end
end
end
end
tag_each(entity, :cycle_constraints) do |constraint|
doc.tag!("cycle-constraint") do
attribute_ref(entity, constraint.attribute_name)
doc.tag!("path") do
constraint.attribute_name_path.reduce entity do |path_entity, attribute_name|
attribute_ref(path_entity, attribute_name)
path_entity.attribute_by_name(attribute_name).referenced_entity
end
end
doc.tag!("scoping-attribute") do
attribute_ref(entity, constraint.scoping_attribute)
end
end
end
visit_table(entity.sql)
end
end
def visit_attribute(attribute)
attribute_names = %w(abstract? override? reference? validate? set_once? generated_value?
enum? primary_key? allow_blank? unique? nullable? immutable?
updatable? allow_blank? qualified_name length min_length name)
doc.attribute({"entity" => attribute.entity.qualified_name},
collect_attributes(attribute, attribute_names)) do
add_tags(attribute)
unless attribute.enumeration.values.nil?
doc.values do
attribute.enumeration.values.each_pair do |name, value|
doc.value(:code => name, :value => value)
end
end
end
if attribute.reference?
doc.reference("references" => attribute.references,
"referenced-entity" => attribute.referenced_entity.qualified_name,
"polymorphic" => attribute.polymorphic?.to_s,
"link-name" => attribute.referencing_link_name,
"inverse-multiplicity" => attribute.inverse.multiplicity.to_s,
"inverse-traversable" => attribute.inverse.traversable?.to_s,
"inverse-relationship" => attribute.inverse.relationship_name.to_s)
end
attributes = collect_attributes(attribute.sql, %w(column_name identity? sparse? calculation))
attributes['sql-type'] = attribute.sql.sql_type.gsub('[','').gsub(']','')
doc.persistent(attributes)
end
end
def visit_table(table)
table_attributes = %w(table_name qualified_table_name)
doc.table(collect_attributes(table, table_attributes)) do
constraint_attributes = %w(name constraint_name qualified_constraint_name invariant?)
tag_each(table, :constraints) do |constraint|
doc.tag!("sql-constraint", collect_attributes(constraint, constraint_attributes))
end
tag_each(table, :function_constraints) do |constraint|
doc.tag!("function-constraint",
collect_attributes(constraint, constraint_attributes)) do
tag_each(constraint, :parameters) do |parameter|
doc.parameter(:name => parameter)
end
end
end
tag_each(table, :validations) do |validation|
doc.validation(:name => validation.name)
end
tag_each(table, :triggers) do |trigger|
doc.trigger(collect_attributes(trigger, %w(name qualified_trigger_name))) do
tag_each(trigger, :after) do |after|
doc.after(:condition => after)
end
tag_each(trigger, :instead_of) do |instead_of|
doc.tag!("instead-of", :condition => instead_of)
end
end
end
index_attributes = %w(filter name cluster? unique?)
tag_each(table, :indexes) do |index|
doc.index(collect_attributes(index, index_attributes)) do
tag_each(index, :attribute_names) do |attribute|
doc.column(:name => attribute)
end
tag_each(index, :include_attribute_names) do |attribute|
doc.column(:name => attribute)
end
end
end
key_attributes = %w(name referenced_entity_name on_update on_delete constraint_name)
tag_each(table, :foreign_keys) do |key|
doc.tag!("foreign-key", {:table => table.table_name}, collect_attributes(key, key_attributes)) do
doc.tag!("referencing-columns") do
key.attribute_names.zip(key.referenced_attribute_names) do |attribute, referenced|
doc.column(:from => attribute, :to => referenced)
end
end
end
end
end
end
def add_tags(item)
unless item.tags.empty?
doc.tag!("tags") do
item.tags.each_pair do |key, value|
doc.tag!(key) do |v|
if [:Description].include?(key)
text = item.tag_as_html(key)
ENTITY_EXPANDSION_MAP.each_pair do |k,v|
text = text.gsub("&#{k};","&#{v};")
end
v << text
else
v << value
end
end
end
end
end
end
def attribute_ref(entity, name)
doc.attribute(:class => entity.qualified_name, :attribute => name)
end
ENTITY_EXPANDSION_MAP =
{
"ldquo" => "#8220",
}
end
end
end
end
Stop generating xml for validate? as not a useful characteristic
module Domgen
module Xml
module Templates
module Xml
def generate
@doc = Builder::XmlMarkup.new(:indent => 2)
visit_repository(@repository)
end
private
attr_reader :doc
def visit_repository(repository)
doc.tag!("repository", :name => repository.name) do
add_tags(repository)
repository.data_modules.each do |data_module|
visit_data_module(data_module)
end
end
end
def visit_data_module(data_module)
doc.tag!("data-module", :name => data_module.name) do
add_tags(data_module)
data_module.entities.each do |entity|
visit_entity(entity)
end
end
end
def visit_entity(entity)
doc.tag!("entity", collect_attributes(entity, %w(name qualified_name))) do
add_tags(entity)
tag_each(entity, :attributes) do |attribute|
visit_attribute(attribute)
end
%w(unique codependent incompatible).each do |constraint_type|
tag_each(entity, "#{constraint_type}_constraints".to_sym) do |constraint|
doc.tag!("#{constraint_type}-constraint") do
constraint.attribute_names.each do |name|
attribute_ref(entity, name)
end
end
end
end
tag_each(entity, :dependency_constraints) do |constraint|
doc.tag!("dependency-constraint") do
attribute_ref(entity, constraint.attribute_name)
doc.tag!("dependent-attributes") do
constraint.dependent_attribute_names.each do |name|
attribute_ref(entity, name)
end
end
end
end
tag_each(entity, :cycle_constraints) do |constraint|
doc.tag!("cycle-constraint") do
attribute_ref(entity, constraint.attribute_name)
doc.tag!("path") do
constraint.attribute_name_path.reduce entity do |path_entity, attribute_name|
attribute_ref(path_entity, attribute_name)
path_entity.attribute_by_name(attribute_name).referenced_entity
end
end
doc.tag!("scoping-attribute") do
attribute_ref(entity, constraint.scoping_attribute)
end
end
end
visit_table(entity.sql)
end
end
def visit_attribute(attribute)
attribute_names = %w(abstract? override? reference? set_once? generated_value?
enum? primary_key? allow_blank? unique? nullable? immutable?
updatable? allow_blank? qualified_name length min_length name)
doc.attribute({"entity" => attribute.entity.qualified_name},
collect_attributes(attribute, attribute_names)) do
add_tags(attribute)
unless attribute.enumeration.values.nil?
doc.values do
attribute.enumeration.values.each_pair do |name, value|
doc.value(:code => name, :value => value)
end
end
end
if attribute.reference?
doc.reference("references" => attribute.references,
"referenced-entity" => attribute.referenced_entity.qualified_name,
"polymorphic" => attribute.polymorphic?.to_s,
"link-name" => attribute.referencing_link_name,
"inverse-multiplicity" => attribute.inverse.multiplicity.to_s,
"inverse-traversable" => attribute.inverse.traversable?.to_s,
"inverse-relationship" => attribute.inverse.relationship_name.to_s)
end
attributes = collect_attributes(attribute.sql, %w(column_name identity? sparse? calculation))
attributes['sql-type'] = attribute.sql.sql_type.gsub('[','').gsub(']','')
doc.persistent(attributes)
end
end
def visit_table(table)
table_attributes = %w(table_name qualified_table_name)
doc.table(collect_attributes(table, table_attributes)) do
constraint_attributes = %w(name constraint_name qualified_constraint_name invariant?)
tag_each(table, :constraints) do |constraint|
doc.tag!("sql-constraint", collect_attributes(constraint, constraint_attributes))
end
tag_each(table, :function_constraints) do |constraint|
doc.tag!("function-constraint",
collect_attributes(constraint, constraint_attributes)) do
tag_each(constraint, :parameters) do |parameter|
doc.parameter(:name => parameter)
end
end
end
tag_each(table, :validations) do |validation|
doc.validation(:name => validation.name)
end
tag_each(table, :triggers) do |trigger|
doc.trigger(collect_attributes(trigger, %w(name qualified_trigger_name))) do
tag_each(trigger, :after) do |after|
doc.after(:condition => after)
end
tag_each(trigger, :instead_of) do |instead_of|
doc.tag!("instead-of", :condition => instead_of)
end
end
end
index_attributes = %w(filter name cluster? unique?)
tag_each(table, :indexes) do |index|
doc.index(collect_attributes(index, index_attributes)) do
tag_each(index, :attribute_names) do |attribute|
doc.column(:name => attribute)
end
tag_each(index, :include_attribute_names) do |attribute|
doc.column(:name => attribute)
end
end
end
key_attributes = %w(name referenced_entity_name on_update on_delete constraint_name)
tag_each(table, :foreign_keys) do |key|
doc.tag!("foreign-key", {:table => table.table_name}, collect_attributes(key, key_attributes)) do
doc.tag!("referencing-columns") do
key.attribute_names.zip(key.referenced_attribute_names) do |attribute, referenced|
doc.column(:from => attribute, :to => referenced)
end
end
end
end
end
end
def add_tags(item)
unless item.tags.empty?
doc.tag!("tags") do
item.tags.each_pair do |key, value|
doc.tag!(key) do |v|
if [:Description].include?(key)
text = item.tag_as_html(key)
ENTITY_EXPANDSION_MAP.each_pair do |k,v|
text = text.gsub("&#{k};","&#{v};")
end
v << text
else
v << value
end
end
end
end
end
end
def attribute_ref(entity, name)
doc.attribute(:class => entity.qualified_name, :attribute => name)
end
ENTITY_EXPANDSION_MAP =
{
"ldquo" => "#8220",
}
end
end
end
end |
module Domotics
module Arduino
VERSION = "0.0.3"
end
end
ArduinoBase rename
module Domotics
module Arduino
VERSION = "0.0.4"
end
end
|
#
# Author:: Jamie Winsor (<jamie@vialstudios.com>)
# Copyright:: 2011, En Masse Entertainment, Inc
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module EnMasse
module Dragonfly
module FFMPEG
VERSION = "0.1.4"
end
end
end
update the version to 0.1.5
#
# Author:: Jamie Winsor (<jamie@vialstudios.com>)
# Copyright:: 2011, En Masse Entertainment, Inc
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module EnMasse
module Dragonfly
module FFMPEG
VERSION = "0.1.5"
end
end
end
|
module Sentry
VERSION = "0.1.3"
end
release: 0.2.0
module Sentry
VERSION = "0.2.0"
end
|
module EcwidApi
module Api
class Categories < Base
# Public: Returns all of the sub-categories for a given category
#
# See: http://kb.ecwid.com/w/page/25285101/Product%20API#RESTAPIMethodcategories
#
# parent - The Category ID of the parent category. If the parent is 0 then
# a list of the root categories will be returned. If the parent is
# nil, then all of the categories will be returned
#
# Returns an array of EcwidApi::Category objects
def all(params = {})
response = client.get("categories", params)
if response.success?
response.body
else
[]
end.map {|category| Category.new(category, client: client) }.sort_by(&:order_by)
end
# Public: Returns an Array of the root level EcwidApi::Category objects
def root(params = {})
all(params.merge(parent: 0))
end
# Public: Returns a single EcwidApi::Category
#
# See: http://kb.ecwid.com/w/page/25285101/Product%20API#RESTAPIMethodcategory
#
# category_id - A Category ID to get
#
# Returns an EcwidApi::Category, or nil if it can't be found
def find(id)
response = client.get("categories/#{id}")
if response.success?
Category.new(response.body, client: client)
else
nil
end
rescue Zlib::BufError
nil
end
# Public: Creates a new Category
#
# params - a Hash of API keys and their corresponding values
#
# Returns a new Category entity
def create(params)
response = client.post("categories", params)
raise_on_failure(response) { |response| find(response.body["id"]) }
end
end
end
end
The "categories" response is now paginated.
require_relative "../paged_ecwid_response"
module EcwidApi
module Api
class Categories < Base
# Public: Returns all of the sub-categories for a given category
#
# See: http://kb.ecwid.com/w/page/25285101/Product%20API#RESTAPIMethodcategories
#
# parent - The Category ID of the parent category. If the parent is 0 then
# a list of the root categories will be returned. If the parent is
# nil, then all of the categories will be returned
#
# Returns an array of EcwidApi::Category objects
def all(params = {})
PagedEcwidResponse.new(client, "categories", params) do |category_hash|
Category.new(category_hash, client: client)
end.sort_by(&:order_by)
end
# Public: Returns an Array of the root level EcwidApi::Category objects
def root(params = {})
all(params.merge(parent: 0))
end
# Public: Returns a single EcwidApi::Category
#
# See: http://kb.ecwid.com/w/page/25285101/Product%20API#RESTAPIMethodcategory
#
# category_id - A Category ID to get
#
# Returns an EcwidApi::Category, or nil if it can't be found
def find(id)
response = client.get("categories/#{id}")
if response.success?
Category.new(response.body, client: client)
else
nil
end
rescue Zlib::BufError
nil
end
# Public: Creates a new Category
#
# params - a Hash of API keys and their corresponding values
#
# Returns a new Category entity
def create(params)
response = client.post("categories", params)
raise_on_failure(response) { |response| find(response.body["id"]) }
end
end
end
end |
=begin
Copyright (c) 2008, Pat Sissons
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the DHX Software nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=end
require 'thread'
class Logger
def initialize(main)
@main = main
@mut = Mutex.new
end
def conf
@main.conf
end
def log(level, text, ts=true)
@mut.synchronize do
log_screen(level, text, ts)
log_file(level, text, ts)
end
end
def log_screen(level, text, ts=true)
if level and not conf.nil?
puts "#{ts ? "[#{Time.new.to_s}] " : ''}#{text}" unless conf.has_key?('quiet')
end
end
def log_file(level, text, ts=true)
if not conf.nil? and (level or conf.has_key?('log_file_debug'))
begin
File.new(conf['log_file'], 'w') unless File.exists?(conf['log_file'])
if File.writable?(conf['log_file'])
File.open(File.expand_path(conf['log_file']), 'a') do |f|
f.write("[#{Time.new.to_s}] ") if ts
f.write("#{text}\n")
f.close
end
else
throw "'#{conf['log_file']}' is not writable"
end
rescue => e
warn "Log File Error: #{e} -- #{text}"
end
end
end
end
adding better error message for debugging log file issues
=begin
Copyright (c) 2008, Pat Sissons
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the DHX Software nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=end
require 'thread'
class Logger
def initialize(main)
@main = main
@mut = Mutex.new
end
def conf
@main.conf
end
def log(level, text, ts=true)
@mut.synchronize do
log_screen(level, text, ts)
log_file(level, text, ts)
end
end
def log_screen(level, text, ts=true)
if level and not conf.nil?
puts "#{ts ? "[#{Time.new.to_s}] " : ''}#{text}" unless conf.has_key?('quiet')
end
end
def log_file(level, text, ts=true)
if not conf.nil? and (level or conf.has_key?('log_file_debug'))
begin
File.new(conf['log_file'], 'w') unless File.exists?(conf['log_file'])
if File.writable?(conf['log_file'])
File.open(File.expand_path(conf['log_file']), 'a') do |f|
f.write("[#{Time.new.to_s}] ") if ts
f.write("#{text}\n")
f.close
end
else
throw "'#{conf['log_file']}' is not writable"
end
rescue => e
warn "Log File Error: #{e} (#{conf['log_file']}) -- #{text}"
end
end
end
end
|
module Errbit
module Cloudfuji
VERSION = '0.1.4'
end
end
bump version
module Errbit
module Cloudfuji
VERSION = '0.1.5'
end
end
|
module FaceRecognition
VERSION = "0.0.4"
end
Bump to 0.0.5
module FaceRecognition
VERSION = "0.0.5"
end
|
require 'typhoeus'
module Faraday
module Adapter
module Typhoeus
def in_parallel?
!!@parallel_manager
end
def in_parallel(options = {})
setup_parallel_manager(options)
yield
run_parallel_requests
end
def setup_parallel_manager(options = {})
@parallel_manager ||= ::Typhoeus::Hydra.new(options)
end
def run_parallel_requests
@parallel_manager.run
@parallel_manager = nil
end
def _get(uri, request_headers)
response_class.new do |resp|
is_async = in_parallel?
setup_parallel_manager
req = ::Typhoeus::Request.new(uri.to_s, :headers => request_headers, :method => :get)
req.on_complete do |response|
resp.process(response.body)
resp.headers = Hash[response.headers.split(/\r\n/).
tap(&:shift). # drop the HTTP status line
map {|h| h.split(/:\s+/,2) }. # split key and value
map {|k, v| [k.downcase, v]}] # lower-case key
resp.processed!
end
@parallel_manager.queue(req)
if !is_async then run_parallel_requests end
end
end
end
end
end
fixing bugs in the Typhoeus response header parsing
require 'typhoeus'
module Faraday
module Adapter
module Typhoeus
def in_parallel?
!!@parallel_manager
end
def in_parallel(options = {})
setup_parallel_manager(options)
yield
run_parallel_requests
end
def setup_parallel_manager(options = {})
@parallel_manager ||= ::Typhoeus::Hydra.new(options)
end
def run_parallel_requests
@parallel_manager.run
@parallel_manager = nil
end
def _get(uri, request_headers)
response_class.new do |resp|
is_async = in_parallel?
setup_parallel_manager
req = ::Typhoeus::Request.new(uri.to_s, :headers => request_headers, :method => :get)
req.on_complete do |response|
resp.process(response.body)
resp.headers = Hash[*response.headers.split(/\r\n/).
tap {|a| a.shift }. # drop the HTTP status line
map! {|h| h.split(/:\s+/,2) }. # split key and value
map! {|(k, v)| [k.downcase, v]}.
tap {|a| a.flatten!}]
resp.processed!
end
@parallel_manager.queue(req)
if !is_async then run_parallel_requests end
end
end
end
end
end
|
require 'fastlane/erb_template_helper'
module Fastlane
module Actions
class MailgunAction < Action
def self.is_supported?(platform)
true
end
def self.run(options)
require 'rest_client'
handle_params_transition(options)
mailgunit(options)
end
def self.description
"Send a success/error message to an email group"
end
def self.available_options
[
# This is here just for while due to the transition, not needed anymore
FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_domain,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
optional: true,
description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"),
# This is here just for while due to the transition, should use postmaster instead
FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_postmaster,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
optional: true,
description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"),
# This is here just for while due to the transition, should use apikey instead
FastlaneCore::ConfigItem.new(key: :mailgun_apikey,
env_name: "MAILGUN_APIKEY",
optional: true,
description: "Mailgun apikey for your mail. Please use postmaster instead"),
FastlaneCore::ConfigItem.new(key: :postmaster,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
description: "Mailgun sandbox domain postmaster for your mail"),
FastlaneCore::ConfigItem.new(key: :apikey,
env_name: "MAILGUN_APIKEY",
description: "Mailgun apikey for your mail"),
FastlaneCore::ConfigItem.new(key: :to,
env_name: "MAILGUN_TO",
description: "Destination of your mail"),
FastlaneCore::ConfigItem.new(key: :message,
env_name: "MAILGUN_MESSAGE",
description: "Message of your mail"),
FastlaneCore::ConfigItem.new(key: :subject,
env_name: "MAILGUN_SUBJECT",
description: "Subject of your mail",
optional: true,
is_string: true,
default_value: "fastlane build"),
FastlaneCore::ConfigItem.new(key: :success,
env_name: "MAILGUN_SUCCESS",
description: "Was this build successful? (true/false)",
optional: true,
default_value: true,
is_string: false),
FastlaneCore::ConfigItem.new(key: :app_link,
env_name: "MAILGUN_APP_LINK",
description: "App Release link",
optional: false,
is_string: true),
FastlaneCore::ConfigItem.new(key: :ci_build_link,
env_name: "MAILGUN_CI_BUILD_LINK",
description: "CI Build Link",
optional: true,
is_string: true)
]
end
def self.author
"thiagolioy"
end
def self.handle_params_transition(options)
options[:postmaster] = options[:mailgun_sandbox_postmaster] if options[:mailgun_sandbox_postmaster]
puts "\nUsing :mailgun_sandbox_postmaster is deprecated, please change to :postmaster".yellow
options[:apikey] = options[:mailgun_apikey] if options[:mailgun_apikey]
puts "\nUsing :mailgun_apikey is deprecated, please change to :apikey".yellow
end
def self.mailgunit(options)
sandbox_domain = options[:postmaster].split("@").last
RestClient.post "https://api:#{options[:apikey]}@api.mailgun.net/v3/#{sandbox_domain}/messages",
from: "Mailgun Sandbox<#{options[:postmaster]}>",
to: "#{options[:to]}",
subject: options[:subject],
html: mail_teplate(options)
mail_teplate(options)
end
def self.mail_teplate(options)
hash = {
author: Actions.git_author,
last_commit: Actions.last_git_commit,
message: options[:message],
app_link: options[:app_link]
}
hash[:success] = options[:success] if options[:success]
hash[:ci_build_link] = options[:success] if options[:ci_build_link]
Fastlane::ErbTemplateHelper.render(
Fastlane::ErbTemplateHelper.load("mailgun_html_template"),
hash
)
end
end
end
end
Update mailgun.rb to fix missing values
Fixed bug: HTML template of mailgun doesn't have CI Build link if build has failed.
Fixed bug: CI Build link is pointing to the result of the build, and not the build link.
require 'fastlane/erb_template_helper'
module Fastlane
module Actions
class MailgunAction < Action
def self.is_supported?(platform)
true
end
def self.run(options)
require 'rest_client'
handle_params_transition(options)
mailgunit(options)
end
def self.description
"Send a success/error message to an email group"
end
def self.available_options
[
# This is here just for while due to the transition, not needed anymore
FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_domain,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
optional: true,
description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"),
# This is here just for while due to the transition, should use postmaster instead
FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_postmaster,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
optional: true,
description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"),
# This is here just for while due to the transition, should use apikey instead
FastlaneCore::ConfigItem.new(key: :mailgun_apikey,
env_name: "MAILGUN_APIKEY",
optional: true,
description: "Mailgun apikey for your mail. Please use postmaster instead"),
FastlaneCore::ConfigItem.new(key: :postmaster,
env_name: "MAILGUN_SANDBOX_POSTMASTER",
description: "Mailgun sandbox domain postmaster for your mail"),
FastlaneCore::ConfigItem.new(key: :apikey,
env_name: "MAILGUN_APIKEY",
description: "Mailgun apikey for your mail"),
FastlaneCore::ConfigItem.new(key: :to,
env_name: "MAILGUN_TO",
description: "Destination of your mail"),
FastlaneCore::ConfigItem.new(key: :message,
env_name: "MAILGUN_MESSAGE",
description: "Message of your mail"),
FastlaneCore::ConfigItem.new(key: :subject,
env_name: "MAILGUN_SUBJECT",
description: "Subject of your mail",
optional: true,
is_string: true,
default_value: "fastlane build"),
FastlaneCore::ConfigItem.new(key: :success,
env_name: "MAILGUN_SUCCESS",
description: "Was this build successful? (true/false)",
optional: true,
default_value: true,
is_string: false),
FastlaneCore::ConfigItem.new(key: :app_link,
env_name: "MAILGUN_APP_LINK",
description: "App Release link",
optional: false,
is_string: true),
FastlaneCore::ConfigItem.new(key: :ci_build_link,
env_name: "MAILGUN_CI_BUILD_LINK",
description: "CI Build Link",
optional: true,
is_string: true)
]
end
def self.author
"thiagolioy"
end
def self.handle_params_transition(options)
options[:postmaster] = options[:mailgun_sandbox_postmaster] if options[:mailgun_sandbox_postmaster]
puts "\nUsing :mailgun_sandbox_postmaster is deprecated, please change to :postmaster".yellow
options[:apikey] = options[:mailgun_apikey] if options[:mailgun_apikey]
puts "\nUsing :mailgun_apikey is deprecated, please change to :apikey".yellow
end
def self.mailgunit(options)
sandbox_domain = options[:postmaster].split("@").last
RestClient.post "https://api:#{options[:apikey]}@api.mailgun.net/v3/#{sandbox_domain}/messages",
from: "Mailgun Sandbox<#{options[:postmaster]}>",
to: "#{options[:to]}",
subject: options[:subject],
html: mail_teplate(options)
mail_teplate(options)
end
def self.mail_teplate(options)
hash = {
author: Actions.git_author,
last_commit: Actions.last_git_commit,
message: options[:message],
app_link: options[:app_link]
}
hash[:success] = options[:success]
hash[:ci_build_link] = options[:ci_build_link]
Fastlane::ErbTemplateHelper.render(
Fastlane::ErbTemplateHelper.load("mailgun_html_template"),
hash
)
end
end
end
end
|
module Garage
# include after RestfulActions
module NoAuthentication
extend ActiveSupport::Concern
include Utils
included do
use Rack::AcceptDefault
respond_to :json # , :msgpack
self.responder = Garage::AppResponder
attr_accessor :representation, :field_selector
before_filter Garage::HypermediaFilter
skip_before_filter :require_action_permission_crud
rescue_from Garage::HTTPError do |exception|
render json: { status_code: exception.status_code, error: exception.message }, status: exception.status
end
end
def resource_owner_id
request.headers["Resource-Owner-Id"] or raise Garage::BadRequest.new('Expected Resource-Owner-Id, but empty')
end
def has_resource_owner_id?
!!request.headers["Resource-Owner-Id"]
end
end
end
Add documents for no_authentication
# Public: Garage controller helper for non-authentication usage.
# Include this helper after RestfulActions so that this cancels
# RestfulActions authentication logic.
module Garage
module NoAuthentication
extend ActiveSupport::Concern
include Utils
included do
use Rack::AcceptDefault
respond_to :json # , :msgpack
self.responder = Garage::AppResponder
attr_accessor :representation, :field_selector
before_filter Garage::HypermediaFilter
skip_before_filter :require_action_permission_crud
rescue_from Garage::HTTPError do |exception|
render json: { status_code: exception.status_code, error: exception.message }, status: exception.status
end
end
# Use this method to specify requested resource_owner_id. It might be empty. Clients are not forced to
# send Resource-Owner-Id header.
def resource_owner_id
request.headers["Resource-Owner-Id"] or raise Garage::BadRequest.new('Expected Resource-Owner-Id, but empty')
end
# Call this to know a client sends Resource-Owner-Id header or not.
def has_resource_owner_id?
!!request.headers["Resource-Owner-Id"]
end
end
end
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "gcloud/gce"
require "gcloud/datastore/grpc_utils"
require "gcloud/datastore/credentials"
require "gcloud/datastore/service"
require "gcloud/datastore/commit"
require "gcloud/datastore/entity"
require "gcloud/datastore/key"
require "gcloud/datastore/query"
require "gcloud/datastore/gql_query"
require "gcloud/datastore/cursor"
require "gcloud/datastore/dataset/lookup_results"
require "gcloud/datastore/dataset/query_results"
module Gcloud
module Datastore
##
# # Dataset
#
# Dataset is the data saved in a project's Datastore.
# Dataset is analogous to a database in relational database world.
#
# Gcloud::Datastore::Dataset is the main object for interacting with
# Google Datastore. {Gcloud::Datastore::Entity} objects are created,
# read, updated, and deleted by Gcloud::Datastore::Dataset.
#
# See {Gcloud#datastore}
#
# @example
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# query = datastore.query("Task").
# where("done", "=", false)
#
# tasks = datastore.run query
#
class Dataset
##
# @private The gRPC Service object.
attr_accessor :service
##
# @private Creates a new Dataset instance.
#
# See {Gcloud#datastore}
def initialize project, credentials
project = project.to_s # Always cast to a string
fail ArgumentError, "project is missing" if project.empty?
@service = Service.new project, credentials
end
##
# The Datastore project connected to.
#
# @example
# require "gcloud"
#
# gcloud = Gcloud.new "my-todo-project",
# "/path/to/keyfile.json"
#
# datastore = gcloud.datastore
# datastore.project #=> "my-todo-project"
#
def project
service.project
end
##
# @private Default project.
def self.default_project
ENV["DATASTORE_DATASET"] ||
ENV["DATASTORE_PROJECT"] ||
ENV["GCLOUD_PROJECT"] ||
ENV["GOOGLE_CLOUD_PROJECT"] ||
Gcloud::GCE.project_id
end
##
# Generate IDs for a Key before creating an entity.
#
# @param [Key] incomplete_key A Key without `id` or `name` set.
# @param [String] count The number of new key IDs to create.
#
# @return [Array<Gcloud::Datastore::Key>]
#
# @example
# task_key = datastore.key "Task"
# task_keys = datastore.allocate_ids task_key, 5
#
def allocate_ids incomplete_key, count = 1
if incomplete_key.complete?
fail Gcloud::Datastore::Error, "An incomplete key must be provided."
end
ensure_service!
incomplete_keys = count.times.map { incomplete_key.to_grpc }
allocate_res = service.allocate_ids(*incomplete_keys)
allocate_res.keys.map { |key| Key.from_grpc key }
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
##
# Persist one or more entities to the Datastore.
#
# @param [Entity] entities One or more entity objects to be saved.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Insert a new entity:
# task = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
# task.key.id #=> nil
# datastore.save task
# task.key.id #=> 123456
#
# @example Insert multiple new entities in a batch:
# task1 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# task2 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 5
# t["description"] = "Integrate Cloud Datastore"
# end
#
# task_key1, task_key2 = datastore.save(task1, task2).map(&:key)
#
# @example Update an existing entity:
# task = datastore.find "Task", "sampleTask"
# task["priority"] = 5
# datastore.save task
#
def save *entities
commit { |c| c.save(*entities) }
end
alias_method :upsert, :save
##
# Insert one or more entities to the Datastore. An InvalidArgumentError
# will raised if the entities cannot be inserted.
#
# @param [Entity] entities One or more entity objects to be inserted.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Insert a new entity:
# task = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
# task.key.id #=> nil
# datastore.insert task
# task.key.id #=> 123456
#
# @example Insert multiple new entities in a batch:
# task1 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# task2 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 5
# t["description"] = "Integrate Cloud Datastore"
# end
#
# task_key1, task_key2 = datastore.insert(task1, task2).map(&:key)
#
def insert *entities
commit { |c| c.insert(*entities) }
end
##
# Update one or more entities to the Datastore. An InvalidArgumentError
# will raised if the entities cannot be updated.
#
# @param [Entity] entities One or more entity objects to be updated.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Update an existing entity:
# task = datastore.find "Task", "sampleTask"
# task["done"] = true
# datastore.save task
#
# @example update multiple new entities in a batch:
# query = datastore.query("Task").where("done", "=", false)
# tasks = datastore.run query
# tasks.each { |t| t["done"] = true }
# datastore.update tasks
#
def update *entities
commit { |c| c.update(*entities) }
end
##
# Remove entities from the Datastore.
#
# @param [Entity, Key] entities_or_keys One or more Entity or Key objects
# to remove.
#
# @return [Boolean] Returns `true` if successful
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
# datastore.delete task1, task2
#
def delete *entities_or_keys
commit { |c| c.delete(*entities_or_keys) }
true
end
##
# Make multiple changes in a single commit.
#
# @yield [commit] a block for making changes
# @yieldparam [Commit] commit The object that changes are made on
#
# @return [Array<Gcloud::Datastore::Entity>] The entities that were
# persisted.
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
# datastore.commit do |c|
# c.save task3, task4
# c.delete task1, task2
# end
#
def commit
return unless block_given?
c = Commit.new
yield c
ensure_service!
commit_res = service.commit c.mutations
entities = c.entities
returned_keys = commit_res.mutation_results.map(&:key)
returned_keys.each_with_index do |key, index|
next if entities[index].nil?
entities[index].key = Key.from_grpc(key) unless key.nil?
end
entities.each { |e| e.key.freeze unless e.persisted? }
entities
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
##
# Retrieve an entity by key.
#
# @param [Key, String] key_or_kind A Key object or `kind` string value.
# @param [Integer, String, nil] id_or_name The Key's `id` or `name` value
# if a `kind` was provided in the first parameter.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of lookup used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Entity, nil]
#
# @example Finding an entity with a key:
# task_key = datastore.key "Task", "sampleTask"
# task = datastore.find task_key
#
# @example Finding an entity with a `kind` and `id`/`name`:
# task = datastore.find "Task", "sampleTask"
#
def find key_or_kind, id_or_name = nil, consistency: nil
key = key_or_kind
unless key.is_a? Gcloud::Datastore::Key
key = Key.new key_or_kind, id_or_name
end
find_all(key, consistency: consistency).first
end
alias_method :get, :find
##
# Retrieve the entities for the provided keys. The order of results is
# undefined and has no relation to the order of `keys` arguments.
#
# @param [Key] keys One or more Key objects to find records for.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of lookup used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Dataset::LookupResults]
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task_key1 = datastore.key "Task", "sampleTask1"
# task_key2 = datastore.key "Task", "sampleTask2"
# tasks = datastore.find_all task_key1, task_key2
#
def find_all *keys, consistency: nil
ensure_service!
check_consistency! consistency
lookup_res = service.lookup(*keys.map(&:to_grpc),
consistency: consistency)
entities = to_gcloud_entities lookup_res.found
deferred = to_gcloud_keys lookup_res.deferred
missing = to_gcloud_entities lookup_res.missing
LookupResults.new entities, deferred, missing
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
alias_method :lookup, :find_all
##
# Retrieve entities specified by a Query.
#
# @param [Query, GqlQuery] query The object with the search criteria.
# @param [String] namespace The namespace the query is to run within.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of query used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Dataset::QueryResults]
#
# @example
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
# @example Run the query within a namespace with the `namespace` option:
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query, namespace: "ns~todo-project"
#
# @example Run the query with a GQL string.
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query
#
# @example Run the GQL query within a namespace with `namespace` option:
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query, namespace: "ns~todo-project"
#
def run query, namespace: nil, consistency: nil
ensure_service!
unless query.is_a?(Query) || query.is_a?(GqlQuery)
fail ArgumentError, "Cannot run a #{query.class} object."
end
check_consistency! consistency
query_res = service.run_query query.to_grpc, namespace,
consistency: consistency
QueryResults.from_grpc query_res, service, namespace, query.to_grpc.dup
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
alias_method :run_query, :run
##
# Creates a Datastore Transaction.
#
# @yield [tx] a block yielding a new transaction
# @yieldparam [Transaction] tx the transaction object
#
# @example Runs the given block in a database transaction:
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# datastore.transaction do |tx|
# if tx.find(task.key).nil?
# tx.save task
# end
# end
#
# @example If no block is given, a Transaction object is returned:
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# tx = datastore.transaction
# begin
# if tx.find(task.key).nil?
# tx.save task
# end
# tx.commit
# rescue
# tx.rollback
# end
#
def transaction
tx = Transaction.new service
return tx unless block_given?
begin
yield tx
tx.commit
rescue => e
begin
tx.rollback
rescue => re
msg = "Transaction failed to commit and rollback."
raise TransactionError.new(msg, commit_error: e, rollback_error: re)
end
raise TransactionError.new("Transaction failed to commit.",
commit_error: e)
end
end
##
# Create a new Query instance. This is a convenience method to make the
# creation of Query objects easier.
#
# @param [String] kinds The kind of entities to query. This is optional.
#
# @return [Gcloud::Datastore::Query]
#
# @example
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
# @example The previous example is equivalent to:
# query = Gcloud::Datastore::Query.new.
# kind("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
def query *kinds
query = Query.new
query.kind(*kinds) unless kinds.empty?
query
end
##
# Create a new GqlQuery instance. This is a convenience method to make the
# creation of GqlQuery objects easier.
#
# @param [String] query The GQL query string.
# @param [Hash] bindings Named bindings for the GQL query string, each
# key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex
# `__.*__`, and must not be `""`. The value must be an `Object` that can
# be stored as an Entity property value, or a `Cursor`.
#
# @return [Gcloud::Datastore::GqlQuery]
#
# @example
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query
#
# @example The previous example is equivalent to:
# gql_query = Gcloud::Datastore::GqlQuery.new
# gql_query.query_string = "SELECT * FROM Task WHERE done = @done"
# gql_query.named_bindings = {done: false}
# tasks = datastore.run gql_query
#
def gql query, bindings = {}
gql = GqlQuery.new
gql.query_string = query
gql.named_bindings = bindings unless bindings.empty?
gql
end
##
# Create a new Key instance. This is a convenience method to make the
# creation of Key objects easier.
#
# @param [Array<Array(String,(String|Integer|nil))>] path An optional list
# of pairs for the key's path. Each pair may include the key's kind
# (String) and an id (Integer) or name (String). This is optional.
# @param [String] project The project of the Key. This is optional.
# @param [String] namespace namespace kind of the Key. This is optional.
#
# @return [Gcloud::Datastore::Key]
#
# @example
# task_key = datastore.key "Task", "sampleTask"
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
#
# @example Create an empty key:
# key = datastore.key
#
# @example Create an incomplete key:
# key = datastore.key "User"
#
# @example Create a key with a parent:
# key = datastore.key [["TaskList", "default"], ["Task", "sampleTask"]]
# key.path #=> [["TaskList", "default"], ["Task", "sampleTask"]]
#
# @example Create a key with multi-level ancestry:
# key = datastore.key([
# ["User", "alice"],
# ["TaskList", "default"],
# ["Task", "sampleTask"]
# ])
# key.path #=> [["User", "alice"], ["TaskList", "default"], [ ... ]]
#
# @example Create an incomplete key with a parent:
# key = datastore.key "TaskList", "default", "Task"
# key.path #=> [["TaskList", "default"], ["Task", nil]]
#
# @example Create a key with a project and namespace:
# key = datastore.key ["TaskList", "default"], ["Task", "sampleTask"],
# project: "my-todo-project",
# namespace: "ns~todo-project"
# key.path #=> [["TaskList", "default"], ["Task", "sampleTask"]]
# key.project #=> "my-todo-project",
# key.namespace #=> "ns~todo-project"
#
def key *path, project: nil, namespace: nil
path = path.flatten.each_slice(2).to_a # group in pairs
kind, id_or_name = path.pop
Key.new(kind, id_or_name).tap do |k|
k.project = project
k.namespace = namespace
unless path.empty?
k.parent = key path, project: project, namespace: namespace
end
end
end
##
# Create a new empty Entity instance. This is a convenience method to make
# the creation of Entity objects easier.
#
# @param [Key, Array<Array(String,(String|Integer|nil))>] key_or_path An
# optional list of pairs for the key's path. Each pair may include the #
# key's kind (String) and an id (Integer) or name (String). This is #
# optional.
# @param [String] project The project of the Key. This is optional.
# @param [String] namespace namespace kind of the Key. This is optional.
# @yield [entity] a block yielding a new entity
# @yieldparam [Entity] entity the newly created entity object
#
# @return [Gcloud::Datastore::Entity]
#
# @example
# task = datastore.entity
#
# @example The previous example is equivalent to:
# task = Gcloud::Datastore::Entity.new
#
# @example The key can also be passed in as an object:
# task_key = datastore.key "Task", "sampleTask"
# task = datastore.entity task_key
#
# @example Or the key values can be passed in as parameters:
# task = datastore.entity "Task", "sampleTask"
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
# task = Gcloud::Datastore::Entity.new
# task.key = task_key
#
# @example The newly created entity can also be configured using a block:
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
# task = Gcloud::Datastore::Entity.new
# task.key = task_key
# task["type"] = "Personal"
# task["done"] = false
# task["priority"] = 4
# task["description"] = "Learn Cloud Datastore"
#
def entity *key_or_path, project: nil, namespace: nil
entity = Entity.new
# Set the key
if key_or_path.flatten.first.is_a? Gcloud::Datastore::Key
entity.key = key_or_path.flatten.first
else
entity.key = key key_or_path, project: project, namespace: namespace
end
yield entity if block_given?
entity
end
protected
##
# @private Raise an error unless an active connection to the service is
# available.
def ensure_service!
fail "Must have active connection to service" unless service
end
##
# Convenience method to convert GRPC entities to Gcloud entities.
def to_gcloud_entities grpc_entity_results
# Entities are nested in an object.
Array(grpc_entity_results).map do |result|
# TODO: Make this return an EntityResult with cursor...
Entity.from_grpc result.entity
end
end
##
# Convenience method to convert GRPC keys to Gcloud keys.
def to_gcloud_keys grpc_keys
# Keys are not nested in an object like entities are.
Array(grpc_keys).map { |key| Key.from_grpc key }
end
def check_consistency! consistency
fail(ArgumentError,
format("Consistency must be :eventual or :strong, not %s.",
consistency.inspect)
) unless [:eventual, :strong, nil].include? consistency
end
end
end
end
Add docs snippet for eventual_consistent_query
[refs #601]
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "gcloud/gce"
require "gcloud/datastore/grpc_utils"
require "gcloud/datastore/credentials"
require "gcloud/datastore/service"
require "gcloud/datastore/commit"
require "gcloud/datastore/entity"
require "gcloud/datastore/key"
require "gcloud/datastore/query"
require "gcloud/datastore/gql_query"
require "gcloud/datastore/cursor"
require "gcloud/datastore/dataset/lookup_results"
require "gcloud/datastore/dataset/query_results"
module Gcloud
module Datastore
##
# # Dataset
#
# Dataset is the data saved in a project's Datastore.
# Dataset is analogous to a database in relational database world.
#
# Gcloud::Datastore::Dataset is the main object for interacting with
# Google Datastore. {Gcloud::Datastore::Entity} objects are created,
# read, updated, and deleted by Gcloud::Datastore::Dataset.
#
# See {Gcloud#datastore}
#
# @example
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# query = datastore.query("Task").
# where("done", "=", false)
#
# tasks = datastore.run query
#
class Dataset
##
# @private The gRPC Service object.
attr_accessor :service
##
# @private Creates a new Dataset instance.
#
# See {Gcloud#datastore}
def initialize project, credentials
project = project.to_s # Always cast to a string
fail ArgumentError, "project is missing" if project.empty?
@service = Service.new project, credentials
end
##
# The Datastore project connected to.
#
# @example
# require "gcloud"
#
# gcloud = Gcloud.new "my-todo-project",
# "/path/to/keyfile.json"
#
# datastore = gcloud.datastore
# datastore.project #=> "my-todo-project"
#
def project
service.project
end
##
# @private Default project.
def self.default_project
ENV["DATASTORE_DATASET"] ||
ENV["DATASTORE_PROJECT"] ||
ENV["GCLOUD_PROJECT"] ||
ENV["GOOGLE_CLOUD_PROJECT"] ||
Gcloud::GCE.project_id
end
##
# Generate IDs for a Key before creating an entity.
#
# @param [Key] incomplete_key A Key without `id` or `name` set.
# @param [String] count The number of new key IDs to create.
#
# @return [Array<Gcloud::Datastore::Key>]
#
# @example
# task_key = datastore.key "Task"
# task_keys = datastore.allocate_ids task_key, 5
#
def allocate_ids incomplete_key, count = 1
if incomplete_key.complete?
fail Gcloud::Datastore::Error, "An incomplete key must be provided."
end
ensure_service!
incomplete_keys = count.times.map { incomplete_key.to_grpc }
allocate_res = service.allocate_ids(*incomplete_keys)
allocate_res.keys.map { |key| Key.from_grpc key }
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
##
# Persist one or more entities to the Datastore.
#
# @param [Entity] entities One or more entity objects to be saved.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Insert a new entity:
# task = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
# task.key.id #=> nil
# datastore.save task
# task.key.id #=> 123456
#
# @example Insert multiple new entities in a batch:
# task1 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# task2 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 5
# t["description"] = "Integrate Cloud Datastore"
# end
#
# task_key1, task_key2 = datastore.save(task1, task2).map(&:key)
#
# @example Update an existing entity:
# task = datastore.find "Task", "sampleTask"
# task["priority"] = 5
# datastore.save task
#
def save *entities
commit { |c| c.save(*entities) }
end
alias_method :upsert, :save
##
# Insert one or more entities to the Datastore. An InvalidArgumentError
# will raised if the entities cannot be inserted.
#
# @param [Entity] entities One or more entity objects to be inserted.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Insert a new entity:
# task = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
# task.key.id #=> nil
# datastore.insert task
# task.key.id #=> 123456
#
# @example Insert multiple new entities in a batch:
# task1 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# task2 = datastore.entity "Task" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 5
# t["description"] = "Integrate Cloud Datastore"
# end
#
# task_key1, task_key2 = datastore.insert(task1, task2).map(&:key)
#
def insert *entities
commit { |c| c.insert(*entities) }
end
##
# Update one or more entities to the Datastore. An InvalidArgumentError
# will raised if the entities cannot be updated.
#
# @param [Entity] entities One or more entity objects to be updated.
#
# @return [Array<Gcloud::Datastore::Entity>]
#
# @example Update an existing entity:
# task = datastore.find "Task", "sampleTask"
# task["done"] = true
# datastore.save task
#
# @example update multiple new entities in a batch:
# query = datastore.query("Task").where("done", "=", false)
# tasks = datastore.run query
# tasks.each { |t| t["done"] = true }
# datastore.update tasks
#
def update *entities
commit { |c| c.update(*entities) }
end
##
# Remove entities from the Datastore.
#
# @param [Entity, Key] entities_or_keys One or more Entity or Key objects
# to remove.
#
# @return [Boolean] Returns `true` if successful
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
# datastore.delete task1, task2
#
def delete *entities_or_keys
commit { |c| c.delete(*entities_or_keys) }
true
end
##
# Make multiple changes in a single commit.
#
# @yield [commit] a block for making changes
# @yieldparam [Commit] commit The object that changes are made on
#
# @return [Array<Gcloud::Datastore::Entity>] The entities that were
# persisted.
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
# datastore.commit do |c|
# c.save task3, task4
# c.delete task1, task2
# end
#
def commit
return unless block_given?
c = Commit.new
yield c
ensure_service!
commit_res = service.commit c.mutations
entities = c.entities
returned_keys = commit_res.mutation_results.map(&:key)
returned_keys.each_with_index do |key, index|
next if entities[index].nil?
entities[index].key = Key.from_grpc(key) unless key.nil?
end
entities.each { |e| e.key.freeze unless e.persisted? }
entities
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
##
# Retrieve an entity by key.
#
# @param [Key, String] key_or_kind A Key object or `kind` string value.
# @param [Integer, String, nil] id_or_name The Key's `id` or `name` value
# if a `kind` was provided in the first parameter.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of lookup used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Entity, nil]
#
# @example Finding an entity with a key:
# task_key = datastore.key "Task", "sampleTask"
# task = datastore.find task_key
#
# @example Finding an entity with a `kind` and `id`/`name`:
# task = datastore.find "Task", "sampleTask"
#
def find key_or_kind, id_or_name = nil, consistency: nil
key = key_or_kind
unless key.is_a? Gcloud::Datastore::Key
key = Key.new key_or_kind, id_or_name
end
find_all(key, consistency: consistency).first
end
alias_method :get, :find
##
# Retrieve the entities for the provided keys. The order of results is
# undefined and has no relation to the order of `keys` arguments.
#
# @param [Key] keys One or more Key objects to find records for.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of lookup used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Dataset::LookupResults]
#
# @example
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task_key1 = datastore.key "Task", "sampleTask1"
# task_key2 = datastore.key "Task", "sampleTask2"
# tasks = datastore.find_all task_key1, task_key2
#
def find_all *keys, consistency: nil
ensure_service!
check_consistency! consistency
lookup_res = service.lookup(*keys.map(&:to_grpc),
consistency: consistency)
entities = to_gcloud_entities lookup_res.found
deferred = to_gcloud_keys lookup_res.deferred
missing = to_gcloud_entities lookup_res.missing
LookupResults.new entities, deferred, missing
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
alias_method :lookup, :find_all
##
# Retrieve entities specified by a Query.
#
# @param [Query, GqlQuery] query The object with the search criteria.
# @param [String] namespace The namespace the query is to run within.
# @param [Symbol] consistency The non-transactional read consistency to
# use. Cannot be set to `:strong` for global queries. Accepted values
# are `:eventual` and `:strong`.
#
# The default consistency depends on the type of query used. See
# [Eventual Consistency in Google Cloud
# Datastore](https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/#h.tf76fya5nqk8)
# for more information.
#
# @return [Gcloud::Datastore::Dataset::QueryResults]
#
# @example
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
# @example Run an ancestor query with eventual consistency:
# task_list_key = datastore.key "TaskList", "default"
# query.kind("Task").
# ancestor(task_list_key)
#
# tasks = datastore.run query, consistency: :eventual
#
# @example Run the query within a namespace with the `namespace` option:
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query, namespace: "ns~todo-project"
#
# @example Run the query with a GQL string.
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query
#
# @example Run the GQL query within a namespace with `namespace` option:
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query, namespace: "ns~todo-project"
#
def run query, namespace: nil, consistency: nil
ensure_service!
unless query.is_a?(Query) || query.is_a?(GqlQuery)
fail ArgumentError, "Cannot run a #{query.class} object."
end
check_consistency! consistency
query_res = service.run_query query.to_grpc, namespace,
consistency: consistency
QueryResults.from_grpc query_res, service, namespace, query.to_grpc.dup
rescue GRPC::BadStatus => e
raise Gcloud::Error.from_error(e)
end
alias_method :run_query, :run
##
# Creates a Datastore Transaction.
#
# @yield [tx] a block yielding a new transaction
# @yieldparam [Transaction] tx the transaction object
#
# @example Runs the given block in a database transaction:
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# datastore.transaction do |tx|
# if tx.find(task.key).nil?
# tx.save task
# end
# end
#
# @example If no block is given, a Transaction object is returned:
# require "gcloud"
#
# gcloud = Gcloud.new
# datastore = gcloud.datastore
#
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# tx = datastore.transaction
# begin
# if tx.find(task.key).nil?
# tx.save task
# end
# tx.commit
# rescue
# tx.rollback
# end
#
def transaction
tx = Transaction.new service
return tx unless block_given?
begin
yield tx
tx.commit
rescue => e
begin
tx.rollback
rescue => re
msg = "Transaction failed to commit and rollback."
raise TransactionError.new(msg, commit_error: e, rollback_error: re)
end
raise TransactionError.new("Transaction failed to commit.",
commit_error: e)
end
end
##
# Create a new Query instance. This is a convenience method to make the
# creation of Query objects easier.
#
# @param [String] kinds The kind of entities to query. This is optional.
#
# @return [Gcloud::Datastore::Query]
#
# @example
# query = datastore.query("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
# @example The previous example is equivalent to:
# query = Gcloud::Datastore::Query.new.
# kind("Task").
# where("done", "=", false)
# tasks = datastore.run query
#
def query *kinds
query = Query.new
query.kind(*kinds) unless kinds.empty?
query
end
##
# Create a new GqlQuery instance. This is a convenience method to make the
# creation of GqlQuery objects easier.
#
# @param [String] query The GQL query string.
# @param [Hash] bindings Named bindings for the GQL query string, each
# key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex
# `__.*__`, and must not be `""`. The value must be an `Object` that can
# be stored as an Entity property value, or a `Cursor`.
#
# @return [Gcloud::Datastore::GqlQuery]
#
# @example
# gql_query = datastore.gql "SELECT * FROM Task WHERE done = @done",
# done: false
# tasks = datastore.run gql_query
#
# @example The previous example is equivalent to:
# gql_query = Gcloud::Datastore::GqlQuery.new
# gql_query.query_string = "SELECT * FROM Task WHERE done = @done"
# gql_query.named_bindings = {done: false}
# tasks = datastore.run gql_query
#
def gql query, bindings = {}
gql = GqlQuery.new
gql.query_string = query
gql.named_bindings = bindings unless bindings.empty?
gql
end
##
# Create a new Key instance. This is a convenience method to make the
# creation of Key objects easier.
#
# @param [Array<Array(String,(String|Integer|nil))>] path An optional list
# of pairs for the key's path. Each pair may include the key's kind
# (String) and an id (Integer) or name (String). This is optional.
# @param [String] project The project of the Key. This is optional.
# @param [String] namespace namespace kind of the Key. This is optional.
#
# @return [Gcloud::Datastore::Key]
#
# @example
# task_key = datastore.key "Task", "sampleTask"
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
#
# @example Create an empty key:
# key = datastore.key
#
# @example Create an incomplete key:
# key = datastore.key "User"
#
# @example Create a key with a parent:
# key = datastore.key [["TaskList", "default"], ["Task", "sampleTask"]]
# key.path #=> [["TaskList", "default"], ["Task", "sampleTask"]]
#
# @example Create a key with multi-level ancestry:
# key = datastore.key([
# ["User", "alice"],
# ["TaskList", "default"],
# ["Task", "sampleTask"]
# ])
# key.path #=> [["User", "alice"], ["TaskList", "default"], [ ... ]]
#
# @example Create an incomplete key with a parent:
# key = datastore.key "TaskList", "default", "Task"
# key.path #=> [["TaskList", "default"], ["Task", nil]]
#
# @example Create a key with a project and namespace:
# key = datastore.key ["TaskList", "default"], ["Task", "sampleTask"],
# project: "my-todo-project",
# namespace: "ns~todo-project"
# key.path #=> [["TaskList", "default"], ["Task", "sampleTask"]]
# key.project #=> "my-todo-project",
# key.namespace #=> "ns~todo-project"
#
def key *path, project: nil, namespace: nil
path = path.flatten.each_slice(2).to_a # group in pairs
kind, id_or_name = path.pop
Key.new(kind, id_or_name).tap do |k|
k.project = project
k.namespace = namespace
unless path.empty?
k.parent = key path, project: project, namespace: namespace
end
end
end
##
# Create a new empty Entity instance. This is a convenience method to make
# the creation of Entity objects easier.
#
# @param [Key, Array<Array(String,(String|Integer|nil))>] key_or_path An
# optional list of pairs for the key's path. Each pair may include the #
# key's kind (String) and an id (Integer) or name (String). This is #
# optional.
# @param [String] project The project of the Key. This is optional.
# @param [String] namespace namespace kind of the Key. This is optional.
# @yield [entity] a block yielding a new entity
# @yieldparam [Entity] entity the newly created entity object
#
# @return [Gcloud::Datastore::Entity]
#
# @example
# task = datastore.entity
#
# @example The previous example is equivalent to:
# task = Gcloud::Datastore::Entity.new
#
# @example The key can also be passed in as an object:
# task_key = datastore.key "Task", "sampleTask"
# task = datastore.entity task_key
#
# @example Or the key values can be passed in as parameters:
# task = datastore.entity "Task", "sampleTask"
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
# task = Gcloud::Datastore::Entity.new
# task.key = task_key
#
# @example The newly created entity can also be configured using a block:
# task = datastore.entity "Task", "sampleTask" do |t|
# t["type"] = "Personal"
# t["done"] = false
# t["priority"] = 4
# t["description"] = "Learn Cloud Datastore"
# end
#
# @example The previous example is equivalent to:
# task_key = Gcloud::Datastore::Key.new "Task", "sampleTask"
# task = Gcloud::Datastore::Entity.new
# task.key = task_key
# task["type"] = "Personal"
# task["done"] = false
# task["priority"] = 4
# task["description"] = "Learn Cloud Datastore"
#
def entity *key_or_path, project: nil, namespace: nil
entity = Entity.new
# Set the key
if key_or_path.flatten.first.is_a? Gcloud::Datastore::Key
entity.key = key_or_path.flatten.first
else
entity.key = key key_or_path, project: project, namespace: namespace
end
yield entity if block_given?
entity
end
protected
##
# @private Raise an error unless an active connection to the service is
# available.
def ensure_service!
fail "Must have active connection to service" unless service
end
##
# Convenience method to convert GRPC entities to Gcloud entities.
def to_gcloud_entities grpc_entity_results
# Entities are nested in an object.
Array(grpc_entity_results).map do |result|
# TODO: Make this return an EntityResult with cursor...
Entity.from_grpc result.entity
end
end
##
# Convenience method to convert GRPC keys to Gcloud keys.
def to_gcloud_keys grpc_keys
# Keys are not nested in an object like entities are.
Array(grpc_keys).map { |key| Key.from_grpc key }
end
def check_consistency! consistency
fail(ArgumentError,
format("Consistency must be :eventual or :strong, not %s.",
consistency.inspect)
) unless [:eventual, :strong, nil].include? consistency
end
end
end
end
|
require 'ostruct'
require 'metadata/VmConfig/VmConfig'
require 'disk/MiqDisk'
require 'VolumeManager/MiqVolumeManager'
require 'fs/MiqMountManager'
require 'metadata/MIQExtract/MIQExtract'
class MiqVm
attr_reader :vmConfig, :vmConfigFile, :vim, :vimVm, :rhevm, :rhevmVm, :diskInitErrors, :wholeDisks
def initialize(vmCfg, ost = nil)
@ost = ost || OpenStruct.new
$log.debug "MiqVm::initialize: @ost = nil" if $log && !@ost
@vmDisks = nil
@wholeDisks = []
@rootTrees = nil
@volumeManager = nil
@applianceVolumeManager = nil
@vmConfigFile = ""
@diskInitErrors = {}
unless vmCfg.kind_of?(Hash)
@vmConfigFile = vmCfg
@vmDir = File.dirname(vmCfg)
end
$log.debug "MiqVm::initialize: @ost.openParent = #{@ost.openParent}" if $log
#
# If we're passed an MiqVim object, then use VIM to obtain the Vm's
# configuration through the instantiated server.
# If we're passed a snapshot ID, then obtain the configration of the
# VM when the snapshot was taken.
#
# TODO: move to MiqVmwareVm
if (@vim = @ost.miqVim)
$log.debug "MiqVm::initialize: accessing VM through server: #{@vim.server}" if $log.debug?
@vimVm = @vim.getVimVm(vmCfg)
$log.debug "MiqVm::initialize: setting @ost.miqVimVm = #{@vimVm.class}" if $log.debug?
@ost.miqVimVm = @vimVm
@vmConfig = VmConfig.new(@vimVm.getCfg(@ost.snapId))
# TODO: move this to MiqRhevmVm.
elsif (@rhevm = @ost.miqRhevm)
$log.debug "MiqVm::initialize: accessing VM through RHEVM server" if $log.debug?
$log.debug "MiqVm::initialize: vmCfg = #{vmCfg}"
@rhevmVm = @rhevm.get_vm(vmCfg)
$log.debug "MiqVm::initialize: setting @ost.miqRhevmVm = #{@rhevmVm.class}" if $log.debug?
@ost.miqRhevmVm = @rhevmVm
@vmConfig = VmConfig.new(getCfg(@ost.snapId))
$log.debug "MiqVm::initialize: @vmConfig.getHash = #{@vmConfig.getHash.inspect}"
$log.debug "MiqVm::initialize: @vmConfig.getDiskFileHash = #{@vmConfig.getDiskFileHash.inspect}"
# TODO: move this to miq_scvmm_vm
elsif (@scvmm = @ost.miq_scvmm)
$log.debug "MiqVm::initialize: accessing VM through HyperV server" if $log.debug?
@vmConfig = VmConfig.new(getCfg(@ost.snapId))
$log.debug "MiqVm::initialize: setting @ost.miq_scvmm_vm = #{@scvmm_vm.class}" if $log.debug?
else
@vimVm = nil
@vmConfig = VmConfig.new(vmCfg)
end
end # def initialize
def vmDisks
@vmDisks ||= begin
@volMgrPS = VolMgrPlatformSupport.new(@vmConfig.configFile, @ost)
@volMgrPS.preMount
openDisks(@vmConfig.getDiskFileHash)
end
end
def openDisks(diskFiles)
pVolumes = []
$log.debug "openDisks: no disk files supplied." unless diskFiles
#
# Build a list of the VM's physical volumes.
#
diskFiles.each do |dtag, df|
$log.debug "openDisks: processing disk file (#{dtag}): #{df}"
dInfo = OpenStruct.new
if @ost.miqVim
dInfo.vixDiskInfo = {}
dInfo.vixDiskInfo[:fileName] = @ost.miqVim.datastorePath(df)
if @ost.miqVimVm
@vdlConnection = @ost.miqVimVm.vdlVcConnection unless @vdlConnection
$log.debug "openDisks (VC): using disk file path: #{dInfo.vixDiskInfo[:fileName]}"
else
@vdlConnection = @ost.miqVim.vdlConnection unless @vdlConnection
$log.debug "openDisks (ESX): using disk file path: #{dInfo.vixDiskInfo[:fileName]}"
end
dInfo.vixDiskInfo[:connection] = @vdlConnection
elsif @ost.miq_hyperv
init_disk_info(dInfo, df)
else
dInfo.fileName = df
disk_format = @vmConfig.getHash["#{dtag}.format"] # Set by rhevm for iscsi and fcp disks
dInfo.format = disk_format unless disk_format.blank?
end
mode = @vmConfig.getHash["#{dtag}.mode"]
dInfo.hardwareId = dtag
dInfo.baseOnly = @ost.openParent unless mode && mode["independent"]
dInfo.rawDisk = @ost.rawDisk
$log.debug "MiqVm::openDisks: dInfo.baseOnly = #{dInfo.baseOnly}"
begin
d = applianceVolumeManager && applianceVolumeManager.lvHash[dInfo.fileName] if @rhevm
if d
$log.debug "MiqVm::openDisks: using applianceVolumeManager for #{dInfo.fileName}" if $log.debug?
d.dInfo.fileName = dInfo.fileName
d.dInfo.hardwareId = dInfo.hardwareId
d.dInfo.baseOnly = dInfo.baseOnly
d.dInfo.format = dInfo.format if dInfo.format
d.dInfo.applianceVolumeManager = applianceVolumeManager
#
# Here, we need to probe the disk to determine its data format,
# QCOW for example. If the disk format is not flat, push a disk
# supporting the format on top of this disk. Then set d to point
# to the new top disk.
#
d = d.pushFormatSupport
else
d = MiqDisk.getDisk(dInfo)
# I am not sure if getting a nil handle back should throw an error or not.
# For now I am just skipping to the next disk. (GMM)
next if d.nil?
end
rescue => err
$log.error "Couldn't open disk file: #{df}"
$log.error err.to_s
$log.debug err.backtrace.join("\n")
@diskInitErrors[df] = err.to_s
next
end
@wholeDisks << d
p = d.getPartitions
if p.empty?
#
# If the disk has no partitions, the whole disk can be a single volume.
#
pVolumes << d
else
#
# If the disk is partitioned, the partitions are physical volumes,
# but not the whild disk.
#
pVolumes.concat(p)
end
end
pVolumes
end # def openDisks
def rootTrees
return @rootTrees if @rootTrees
@rootTrees = MiqMountManager.mountVolumes(volumeManager, @vmConfig, @ost)
volumeManager.rootTrees = @rootTrees
@rootTrees
end
def volumeManager
@volumeManager ||= MiqVolumeManager.new(vmDisks)
end
def applianceVolumeManager
return nil if @ost.nfs_storage_mounted
@applianceVolumeManager ||= MiqVolumeManager.fromNativePvs
end
def snapshots(refresh = false)
return nil unless @vimVm
return @vimVm.snapshotInfo(refresh) if @vimVm
end
def unmount
$log.info "MiqVm.unmount called."
@wholeDisks.each(&:close)
@wholeDisks.clear
if @volumeManager
@volumeManager.close
@volumeManager = nil
end
@applianceVolumeManager.closeAll if @applianceVolumeManager
@applianceVolumeManager = nil
@ost.miqVim.closeVdlConnection(@vdlConnection) if @vdlConnection
if @volMgrPS
@volMgrPS.postMount
@volMgrPS = nil
end
@vimVm.release if @vimVm
@rootTrees = nil
@vmDisks = nil
end
def miq_extract
@miq_extract ||= MIQExtract.new(self, @ost)
end
def extract(c)
xml = miq_extract.extract(c)
raise "Could not extract \"#{c}\" from VM" unless xml
(xml)
end
end # class MiqVm
if __FILE__ == $0
require 'log4r'
require 'metadata/util/win32/boot_info_win'
# vmDir = File.join(ENV.fetch("HOME", '.'), 'VMs')
vmDir = "/volumes/WDpassport/Virtual Machines"
puts "vmDir = #{vmDir}"
targetLv = "rpolv2"
rootLv = "LogVol00"
class ConsoleFormatter < Log4r::Formatter
def format(event)
(event.data.kind_of?(String) ? event.data : event.data.inspect) + "\n"
end
end
toplog = Log4r::Logger.new 'toplog'
Log4r::StderrOutputter.new('err_console', :level => Log4r::DEBUG, :formatter => ConsoleFormatter)
toplog.add 'err_console'
$log = toplog if $log.nil?
#
# *** Test start
#
# vmCfg = File.join(vmDir, "cacheguard/cacheguard.vmx")
# vmCfg = File.join(vmDir, "Red Hat Linux.vmwarevm/Red Hat Linux.vmx")
# vmCfg = File.join(vmDir, "MIQ Server Appliance - Ubuntu MD - small/MIQ Server Appliance - Ubuntu.vmx")
# vmCfg = File.join(vmDir, "winxpDev.vmwarevm/winxpDev.vmx")
vmCfg = File.join(vmDir, "Win2K_persistent/Windows 2000 Professional.vmx")
# vmCfg = File.join(vmDir, "Win2K_non_persistent/Windows 2000 Professional.vmx")
puts "VM config file: #{vmCfg}"
ost = OpenStruct.new
ost.openParent = true
vm = MiqVm.new(vmCfg, ost)
puts "\n*** Disk Files:"
vm.vmConfig.getDiskFileHash.each do |k, v|
puts "\t#{k}\t#{v}"
end
puts "\n*** configHash:"
vm.vmConfig.getHash.each do |k, v|
puts "\t#{k} => #{v}"
end
tlv = nil
rlv = nil
puts "\n*** Visible Volumes:"
vm.volumeManager.visibleVolumes.each do |vv|
puts "\tDisk type: #{vv.diskType}"
puts "\tDisk sig: #{vv.diskSig}"
puts "\tStart LBA: #{vv.lbaStart}"
if vv.respond_to?(:logicalVolume)
puts "\t\tLV name: #{vv.logicalVolume.lvName}"
puts "\t\tLV UUID: #{vv.logicalVolume.lvId}"
tlv = vv if vv.logicalVolume.lvName == targetLv
rlv = vv if vv.logicalVolume.lvName == rootLv
end
end
# raise "#{targetLv} not found" if !tlv
#
# tlv.seek(0, IO::SEEK_SET)
# rs = tlv.read(2040)
# puts "\n***** START *****"
# puts rs
# puts "****** END ******"
#
# tlv.seek(2048*512*5119, IO::SEEK_SET)
# rs = tlv.read(2040)
# puts "\n***** START *****"
# puts rs
# puts "****** END ******"
#
# raise "#{rootLv} not found" if !rlv
#
# puts "\n*** Mounting #{rootLv}"
# rfs = MiqFS.getFS(rlv)
# puts "\tFS Type: #{rfs.fsType}"
# puts "\t*** Root-level files and directories:"
# rfs.dirForeach("/") { |de| puts "\t\t#{de}" }
puts "\n***** Detected Guest OSs:"
raise "No OSs detected" if vm.rootTrees.length == 0
vm.rootTrees.each do |rt|
puts "\t#{rt.guestOS}"
if rt.guestOS == "Linux"
puts "\n\t\t*** /etc/fstab contents:"
rt.fileOpen("/etc/fstab", &:read).each_line do |fstl|
next if fstl =~ /^#.*$/
puts "\t\t\t#{fstl}"
end
end
end
vm.rootTrees.each do |rt|
if rt.guestOS == "Linux"
# tdirArr = [ "/", "/boot", "/var/www/miq", "/var/www/miq/vmdb/log", "/var/lib/mysql" ]
tdirArr = ["/", "/boot", "/etc/init.d", "/etc/rc.d/init.d", "/etc/rc.d/rc0.d"]
tdirArr.each do |tdir|
begin
puts "\n*** Listing #{tdir} directory (1):"
rt.dirForeach(tdir) { |de| puts "\t\t#{de}" }
puts "*** end"
puts "\n*** Listing #{tdir} directory (2):"
rt.chdir(tdir)
rt.dirForeach { |de| puts "\t\t#{de}" }
puts "*** end"
rescue => err
puts "*** #{err}"
end
end
# lf = rt.fileOpen("/etc/rc0.d/S01halt")
# puts "\n*** Contents of /etc/rc0.d/S01halt:"
# puts lf.read
# puts "*** END"
# lf.close
#
# lfn = "/etc/rc0.d/S01halt"
# puts "Is #{lfn} a symbolic link? #{rt.fileSymLink?(lfn)}"
# puts "#{lfn} => #{rt.getLinkPath(lfn)}"
else # Windows
tdirArr = ["c:/", "e:/", "e:/testE2", "f:/"]
tdirArr.each do |tdir|
puts "\n*** Listing #{tdir} directory (1):"
rt.dirForeach(tdir) { |de| puts "\t\t#{de}" }
puts "*** end"
puts "\n*** Listing #{tdir} directory (2):"
rt.chdir(tdir)
rt.dirForeach { |de| puts "\t\t#{de}" }
puts "*** end"
end
end
end
vm.unmount
puts "...done"
end
Print disk file path for all cases
require 'ostruct'
require 'metadata/VmConfig/VmConfig'
require 'disk/MiqDisk'
require 'VolumeManager/MiqVolumeManager'
require 'fs/MiqMountManager'
require 'metadata/MIQExtract/MIQExtract'
class MiqVm
attr_reader :vmConfig, :vmConfigFile, :vim, :vimVm, :rhevm, :rhevmVm, :diskInitErrors, :wholeDisks
def initialize(vmCfg, ost = nil)
@ost = ost || OpenStruct.new
$log.debug "MiqVm::initialize: @ost = nil" if $log && !@ost
@vmDisks = nil
@wholeDisks = []
@rootTrees = nil
@volumeManager = nil
@applianceVolumeManager = nil
@vmConfigFile = ""
@diskInitErrors = {}
unless vmCfg.kind_of?(Hash)
@vmConfigFile = vmCfg
@vmDir = File.dirname(vmCfg)
end
$log.debug "MiqVm::initialize: @ost.openParent = #{@ost.openParent}" if $log
#
# If we're passed an MiqVim object, then use VIM to obtain the Vm's
# configuration through the instantiated server.
# If we're passed a snapshot ID, then obtain the configration of the
# VM when the snapshot was taken.
#
# TODO: move to MiqVmwareVm
if (@vim = @ost.miqVim)
$log.debug "MiqVm::initialize: accessing VM through server: #{@vim.server}" if $log.debug?
@vimVm = @vim.getVimVm(vmCfg)
$log.debug "MiqVm::initialize: setting @ost.miqVimVm = #{@vimVm.class}" if $log.debug?
@ost.miqVimVm = @vimVm
@vmConfig = VmConfig.new(@vimVm.getCfg(@ost.snapId))
# TODO: move this to MiqRhevmVm.
elsif (@rhevm = @ost.miqRhevm)
$log.debug "MiqVm::initialize: accessing VM through RHEVM server" if $log.debug?
$log.debug "MiqVm::initialize: vmCfg = #{vmCfg}"
@rhevmVm = @rhevm.get_vm(vmCfg)
$log.debug "MiqVm::initialize: setting @ost.miqRhevmVm = #{@rhevmVm.class}" if $log.debug?
@ost.miqRhevmVm = @rhevmVm
@vmConfig = VmConfig.new(getCfg(@ost.snapId))
$log.debug "MiqVm::initialize: @vmConfig.getHash = #{@vmConfig.getHash.inspect}"
$log.debug "MiqVm::initialize: @vmConfig.getDiskFileHash = #{@vmConfig.getDiskFileHash.inspect}"
# TODO: move this to miq_scvmm_vm
elsif (@scvmm = @ost.miq_scvmm)
$log.debug "MiqVm::initialize: accessing VM through HyperV server" if $log.debug?
@vmConfig = VmConfig.new(getCfg(@ost.snapId))
$log.debug "MiqVm::initialize: setting @ost.miq_scvmm_vm = #{@scvmm_vm.class}" if $log.debug?
else
@vimVm = nil
@vmConfig = VmConfig.new(vmCfg)
end
end # def initialize
def vmDisks
@vmDisks ||= begin
@volMgrPS = VolMgrPlatformSupport.new(@vmConfig.configFile, @ost)
@volMgrPS.preMount
openDisks(@vmConfig.getDiskFileHash)
end
end
def openDisks(diskFiles)
pVolumes = []
$log.debug "openDisks: no disk files supplied." unless diskFiles
#
# Build a list of the VM's physical volumes.
#
diskFiles.each do |dtag, df|
$log.debug "openDisks: processing disk file (#{dtag}): #{df}"
dInfo = OpenStruct.new
if @ost.miqVim
dInfo.vixDiskInfo = {}
dInfo.vixDiskInfo[:fileName] = @ost.miqVim.datastorePath(df)
if @ost.miqVimVm
@vdlConnection = @ost.miqVimVm.vdlVcConnection unless @vdlConnection
else
@vdlConnection = @ost.miqVim.vdlConnection unless @vdlConnection
end
$log.debug "openDisks: using disk file path: #{dInfo.vixDiskInfo[:fileName]}"
dInfo.vixDiskInfo[:connection] = @vdlConnection
elsif @ost.miq_hyperv
init_disk_info(dInfo, df)
else
dInfo.fileName = df
disk_format = @vmConfig.getHash["#{dtag}.format"] # Set by rhevm for iscsi and fcp disks
dInfo.format = disk_format unless disk_format.blank?
end
mode = @vmConfig.getHash["#{dtag}.mode"]
dInfo.hardwareId = dtag
dInfo.baseOnly = @ost.openParent unless mode && mode["independent"]
dInfo.rawDisk = @ost.rawDisk
$log.debug "MiqVm::openDisks: dInfo.baseOnly = #{dInfo.baseOnly}"
begin
d = applianceVolumeManager && applianceVolumeManager.lvHash[dInfo.fileName] if @rhevm
if d
$log.debug "MiqVm::openDisks: using applianceVolumeManager for #{dInfo.fileName}" if $log.debug?
d.dInfo.fileName = dInfo.fileName
d.dInfo.hardwareId = dInfo.hardwareId
d.dInfo.baseOnly = dInfo.baseOnly
d.dInfo.format = dInfo.format if dInfo.format
d.dInfo.applianceVolumeManager = applianceVolumeManager
#
# Here, we need to probe the disk to determine its data format,
# QCOW for example. If the disk format is not flat, push a disk
# supporting the format on top of this disk. Then set d to point
# to the new top disk.
#
d = d.pushFormatSupport
else
d = MiqDisk.getDisk(dInfo)
# I am not sure if getting a nil handle back should throw an error or not.
# For now I am just skipping to the next disk. (GMM)
next if d.nil?
end
rescue => err
$log.error "Couldn't open disk file: #{df}"
$log.error err.to_s
$log.debug err.backtrace.join("\n")
@diskInitErrors[df] = err.to_s
next
end
@wholeDisks << d
p = d.getPartitions
if p.empty?
#
# If the disk has no partitions, the whole disk can be a single volume.
#
pVolumes << d
else
#
# If the disk is partitioned, the partitions are physical volumes,
# but not the whild disk.
#
pVolumes.concat(p)
end
end
pVolumes
end # def openDisks
def rootTrees
return @rootTrees if @rootTrees
@rootTrees = MiqMountManager.mountVolumes(volumeManager, @vmConfig, @ost)
volumeManager.rootTrees = @rootTrees
@rootTrees
end
def volumeManager
@volumeManager ||= MiqVolumeManager.new(vmDisks)
end
def applianceVolumeManager
return nil if @ost.nfs_storage_mounted
@applianceVolumeManager ||= MiqVolumeManager.fromNativePvs
end
def snapshots(refresh = false)
return nil unless @vimVm
return @vimVm.snapshotInfo(refresh) if @vimVm
end
def unmount
$log.info "MiqVm.unmount called."
@wholeDisks.each(&:close)
@wholeDisks.clear
if @volumeManager
@volumeManager.close
@volumeManager = nil
end
@applianceVolumeManager.closeAll if @applianceVolumeManager
@applianceVolumeManager = nil
@ost.miqVim.closeVdlConnection(@vdlConnection) if @vdlConnection
if @volMgrPS
@volMgrPS.postMount
@volMgrPS = nil
end
@vimVm.release if @vimVm
@rootTrees = nil
@vmDisks = nil
end
def miq_extract
@miq_extract ||= MIQExtract.new(self, @ost)
end
def extract(c)
xml = miq_extract.extract(c)
raise "Could not extract \"#{c}\" from VM" unless xml
(xml)
end
end # class MiqVm
if __FILE__ == $0
require 'log4r'
require 'metadata/util/win32/boot_info_win'
# vmDir = File.join(ENV.fetch("HOME", '.'), 'VMs')
vmDir = "/volumes/WDpassport/Virtual Machines"
puts "vmDir = #{vmDir}"
targetLv = "rpolv2"
rootLv = "LogVol00"
class ConsoleFormatter < Log4r::Formatter
def format(event)
(event.data.kind_of?(String) ? event.data : event.data.inspect) + "\n"
end
end
toplog = Log4r::Logger.new 'toplog'
Log4r::StderrOutputter.new('err_console', :level => Log4r::DEBUG, :formatter => ConsoleFormatter)
toplog.add 'err_console'
$log = toplog if $log.nil?
#
# *** Test start
#
# vmCfg = File.join(vmDir, "cacheguard/cacheguard.vmx")
# vmCfg = File.join(vmDir, "Red Hat Linux.vmwarevm/Red Hat Linux.vmx")
# vmCfg = File.join(vmDir, "MIQ Server Appliance - Ubuntu MD - small/MIQ Server Appliance - Ubuntu.vmx")
# vmCfg = File.join(vmDir, "winxpDev.vmwarevm/winxpDev.vmx")
vmCfg = File.join(vmDir, "Win2K_persistent/Windows 2000 Professional.vmx")
# vmCfg = File.join(vmDir, "Win2K_non_persistent/Windows 2000 Professional.vmx")
puts "VM config file: #{vmCfg}"
ost = OpenStruct.new
ost.openParent = true
vm = MiqVm.new(vmCfg, ost)
puts "\n*** Disk Files:"
vm.vmConfig.getDiskFileHash.each do |k, v|
puts "\t#{k}\t#{v}"
end
puts "\n*** configHash:"
vm.vmConfig.getHash.each do |k, v|
puts "\t#{k} => #{v}"
end
tlv = nil
rlv = nil
puts "\n*** Visible Volumes:"
vm.volumeManager.visibleVolumes.each do |vv|
puts "\tDisk type: #{vv.diskType}"
puts "\tDisk sig: #{vv.diskSig}"
puts "\tStart LBA: #{vv.lbaStart}"
if vv.respond_to?(:logicalVolume)
puts "\t\tLV name: #{vv.logicalVolume.lvName}"
puts "\t\tLV UUID: #{vv.logicalVolume.lvId}"
tlv = vv if vv.logicalVolume.lvName == targetLv
rlv = vv if vv.logicalVolume.lvName == rootLv
end
end
# raise "#{targetLv} not found" if !tlv
#
# tlv.seek(0, IO::SEEK_SET)
# rs = tlv.read(2040)
# puts "\n***** START *****"
# puts rs
# puts "****** END ******"
#
# tlv.seek(2048*512*5119, IO::SEEK_SET)
# rs = tlv.read(2040)
# puts "\n***** START *****"
# puts rs
# puts "****** END ******"
#
# raise "#{rootLv} not found" if !rlv
#
# puts "\n*** Mounting #{rootLv}"
# rfs = MiqFS.getFS(rlv)
# puts "\tFS Type: #{rfs.fsType}"
# puts "\t*** Root-level files and directories:"
# rfs.dirForeach("/") { |de| puts "\t\t#{de}" }
puts "\n***** Detected Guest OSs:"
raise "No OSs detected" if vm.rootTrees.length == 0
vm.rootTrees.each do |rt|
puts "\t#{rt.guestOS}"
if rt.guestOS == "Linux"
puts "\n\t\t*** /etc/fstab contents:"
rt.fileOpen("/etc/fstab", &:read).each_line do |fstl|
next if fstl =~ /^#.*$/
puts "\t\t\t#{fstl}"
end
end
end
vm.rootTrees.each do |rt|
if rt.guestOS == "Linux"
# tdirArr = [ "/", "/boot", "/var/www/miq", "/var/www/miq/vmdb/log", "/var/lib/mysql" ]
tdirArr = ["/", "/boot", "/etc/init.d", "/etc/rc.d/init.d", "/etc/rc.d/rc0.d"]
tdirArr.each do |tdir|
begin
puts "\n*** Listing #{tdir} directory (1):"
rt.dirForeach(tdir) { |de| puts "\t\t#{de}" }
puts "*** end"
puts "\n*** Listing #{tdir} directory (2):"
rt.chdir(tdir)
rt.dirForeach { |de| puts "\t\t#{de}" }
puts "*** end"
rescue => err
puts "*** #{err}"
end
end
# lf = rt.fileOpen("/etc/rc0.d/S01halt")
# puts "\n*** Contents of /etc/rc0.d/S01halt:"
# puts lf.read
# puts "*** END"
# lf.close
#
# lfn = "/etc/rc0.d/S01halt"
# puts "Is #{lfn} a symbolic link? #{rt.fileSymLink?(lfn)}"
# puts "#{lfn} => #{rt.getLinkPath(lfn)}"
else # Windows
tdirArr = ["c:/", "e:/", "e:/testE2", "f:/"]
tdirArr.each do |tdir|
puts "\n*** Listing #{tdir} directory (1):"
rt.dirForeach(tdir) { |de| puts "\t\t#{de}" }
puts "*** end"
puts "\n*** Listing #{tdir} directory (2):"
rt.chdir(tdir)
rt.dirForeach { |de| puts "\t\t#{de}" }
puts "*** end"
end
end
end
vm.unmount
puts "...done"
end
|
namespace :gettext do
def load_gettext
require 'gettext'
require 'gettext/utils'
end
desc "Create mo-files for L10n"
task :pack => :environment do
load_gettext
GetText.create_mofiles(true, locale_path, locale_path)
end
desc "Update pot/po files."
task :find => :environment do
load_gettext
$LOAD_PATH << File.join(File.dirname(__FILE__),'..','..','lib')
require 'gettext_i18n_rails/haml_parser'
require 'gettext_i18n_rails/slim_parser'
require 'gettext_i18n_rails/hamlet_parser'
if GetText.respond_to? :update_pofiles_org
if defined?(Rails.application)
msgmerge = Rails.application.config.gettext_i18n_rails.msgmerge
end
msgmerge ||= %w[--sort-output --no-location --no-wrap]
GetText.update_pofiles_org(
text_domain,
files_to_translate,
"version 0.0.1",
:po_root => locale_path,
:msgmerge => msgmerge
)
else #we are on a version < 2.0
puts "install new GetText with gettext:install to gain more features..."
#kill ar parser...
require 'gettext/parser/active_record'
module GetText
module ActiveRecordParser
module_function
def init(x);end
end
end
#parse files.. (models are simply parsed as ruby files)
GetText.update_pofiles(
text_domain,
files_to_translate,
"version 0.0.1",
locale_path
)
end
end
# This is more of an example, ignoring
# the columns/tables that mostly do not need translation.
# This can also be done with GetText::ActiveRecord
# but this crashed too often for me, and
# IMO which column should/should-not be translated does not
# belong into the model
#
# You can get your translations from GetText::ActiveRecord
# by adding this to you gettext:find task
#
# require 'active_record'
# gem "gettext_activerecord", '>=0.1.0' #download and install from github
# require 'gettext_activerecord/parser'
desc "write the model attributes to <locale_path>/model_attributes.rb"
task :store_model_attributes => :environment do
FastGettext.silence_errors
require 'gettext_i18n_rails/model_attributes_finder'
require 'gettext_i18n_rails/active_record'
storage_file = "#{locale_path}/model_attributes.rb"
puts "writing model translations to: #{storage_file}"
ignore_tables = [/^sitemap_/, /_versions$/, 'schema_migrations', 'sessions', 'delayed_jobs']
GettextI18nRails.store_model_attributes(
:to => storage_file,
:ignore_columns => [/_id$/, 'id', 'type', 'created_at', 'updated_at'],
:ignore_tables => ignore_tables
)
end
desc "add a new language"
task :add_language, [:language] => :environment do |_, args|
language = args.language || ENV["LANGUAGE"]
# Let's do some pre-verification of the environment.
if language.nil?
puts "You need to specify the language to add. Either 'LANGUAGE=eo rake gettext:add_languange' or 'rake gettext:add_languange[eo]'"
next
end
pot = File.join(locale_path, "#{text_domain}.pot")
if !File.exists? pot
puts "You don't have a pot file yet, you probably should run 'rake gettext:find' at least once. Tried '#{pot}'."
next
end
# Create the directory for the new language.
dir = File.join(locale_path, language)
puts "Creating directory #{dir}"
Dir.mkdir dir
# Create the po file for the new language.
new_po = File.join(locale_path, language, "#{text_domain}.po")
puts "Initializing #{new_po} from #{pot}."
system "msginit --locale=#{language} --input=#{pot} --output=#{new_po}"
end
def locale_path
FastGettext.translation_repositories[text_domain].instance_variable_get(:@options)[:path]
rescue
File.join(RAILS_ROOT, "locale")
end
def text_domain
# if your textdomain is not 'app': require the environment before calling e.g. gettext:find OR add TEXTDOMAIN=my_domain
ENV['TEXTDOMAIN'] || (FastGettext.text_domain rescue nil) || "app"
end
def files_to_translate
Dir.glob("{app,lib,config,#{locale_path}}/**/*.{rb,erb,haml,slim}")
end
end
use Rails.root as RAILS_ROOT
RAILS_ROOT is no longer present in Rails 3.2
namespace :gettext do
def load_gettext
require 'gettext'
require 'gettext/utils'
end
desc "Create mo-files for L10n"
task :pack => :environment do
load_gettext
GetText.create_mofiles(true, locale_path, locale_path)
end
desc "Update pot/po files."
task :find => :environment do
load_gettext
$LOAD_PATH << File.join(File.dirname(__FILE__),'..','..','lib')
require 'gettext_i18n_rails/haml_parser'
require 'gettext_i18n_rails/slim_parser'
require 'gettext_i18n_rails/hamlet_parser'
if GetText.respond_to? :update_pofiles_org
if defined?(Rails.application)
msgmerge = Rails.application.config.gettext_i18n_rails.msgmerge
end
msgmerge ||= %w[--sort-output --no-location --no-wrap]
GetText.update_pofiles_org(
text_domain,
files_to_translate,
"version 0.0.1",
:po_root => locale_path,
:msgmerge => msgmerge
)
else #we are on a version < 2.0
puts "install new GetText with gettext:install to gain more features..."
#kill ar parser...
require 'gettext/parser/active_record'
module GetText
module ActiveRecordParser
module_function
def init(x);end
end
end
#parse files.. (models are simply parsed as ruby files)
GetText.update_pofiles(
text_domain,
files_to_translate,
"version 0.0.1",
locale_path
)
end
end
# This is more of an example, ignoring
# the columns/tables that mostly do not need translation.
# This can also be done with GetText::ActiveRecord
# but this crashed too often for me, and
# IMO which column should/should-not be translated does not
# belong into the model
#
# You can get your translations from GetText::ActiveRecord
# by adding this to you gettext:find task
#
# require 'active_record'
# gem "gettext_activerecord", '>=0.1.0' #download and install from github
# require 'gettext_activerecord/parser'
desc "write the model attributes to <locale_path>/model_attributes.rb"
task :store_model_attributes => :environment do
FastGettext.silence_errors
require 'gettext_i18n_rails/model_attributes_finder'
require 'gettext_i18n_rails/active_record'
storage_file = "#{locale_path}/model_attributes.rb"
puts "writing model translations to: #{storage_file}"
ignore_tables = [/^sitemap_/, /_versions$/, 'schema_migrations', 'sessions', 'delayed_jobs']
GettextI18nRails.store_model_attributes(
:to => storage_file,
:ignore_columns => [/_id$/, 'id', 'type', 'created_at', 'updated_at'],
:ignore_tables => ignore_tables
)
end
desc "add a new language"
task :add_language, [:language] => :environment do |_, args|
language = args.language || ENV["LANGUAGE"]
# Let's do some pre-verification of the environment.
if language.nil?
puts "You need to specify the language to add. Either 'LANGUAGE=eo rake gettext:add_languange' or 'rake gettext:add_languange[eo]'"
next
end
pot = File.join(locale_path, "#{text_domain}.pot")
if !File.exists? pot
puts "You don't have a pot file yet, you probably should run 'rake gettext:find' at least once. Tried '#{pot}'."
next
end
# Create the directory for the new language.
dir = File.join(locale_path, language)
puts "Creating directory #{dir}"
Dir.mkdir dir
# Create the po file for the new language.
new_po = File.join(locale_path, language, "#{text_domain}.po")
puts "Initializing #{new_po} from #{pot}."
system "msginit --locale=#{language} --input=#{pot} --output=#{new_po}"
end
def locale_path
FastGettext.translation_repositories[text_domain].instance_variable_get(:@options)[:path]
rescue
File.join(Rails.root, "locale")
end
def text_domain
# if your textdomain is not 'app': require the environment before calling e.g. gettext:find OR add TEXTDOMAIN=my_domain
ENV['TEXTDOMAIN'] || (FastGettext.text_domain rescue nil) || "app"
end
def files_to_translate
Dir.glob("{app,lib,config,#{locale_path}}/**/*.{rb,erb,haml,slim}")
end
end
|
require 'redcloth'
module GitBlog
module Parsers
module Markdown
def self.parse input
input.gsub!(/^(.*)\n=+(\n\s+)*\n/m, '')
::RedCloth.new(input).to_html [:markdown, :textile]
end
end
end
end
There's some bug going on here, it won't parse links in this order.
require 'redcloth'
module GitBlog
module Parsers
module Markdown
def self.parse input
input.gsub!(/^(.*)\n=+(\n\s+)*\n/m, '')
::RedCloth.new(input).to_html # [:markdown, :textile]
end
end
end
end |
require 'yui/compressor'
module GithubbishAssets
class Packer
def self.js
case GithubbishAssets.js_compressor
when :jsmin
require 'vendor/js_minimizer'
when :closure
require 'closure-compiler'
end
pack(Rails.root + 'public/javascripts', '.js') do |target, files|
case GithubbishAssets.js_compressor
when :closure
opts = [ [:js_output_file, target] ]
if GithubbishAssets.closure_source_map
opts << [:create_source_map, "#{target}.map"]
end
files.each do |file|
opts << [:js, file]
end
Closure::Compiler.new(opts).compile('')
when :yui
compress_with_yui(YUI::JavaScriptCompressor.new, files, target)
else
File.open(target, 'w+') do |f|
f.puts GithubbishAssets::JSMinimizer.minimize_files(*files)
end
end
end
end
def self.css
pack(Rails.root + 'public/stylesheets', '.css') do |target, files|
compress_with_yui(YUI::CssCompressor.new(:line_break => 0), files, target)
end
end
private
def self.pack(path, ext)
targets = []
get_top_level_directories(path).each do |bundle_directory|
bundle_name = bundle_directory.basename
next if bundle_name.to_s == 'dev'
files = RecursiveLister[bundle_directory, ext]
next if files.empty?
target = path + "bundle_#{bundle_name}#{ext}"
yield target, files
targets << target
end
targets
end
def self.get_top_level_directories(root_path)
root_path.children.select { |path| path.directory? }
end
def self.compress_with_yui(compressor, files, target)
File.open(target, 'w') do |f|
compressor.compress(MultiFile.new(files)) do |compressed|
while buffer = compressed.read(4096)
f.write(buffer)
end
end
end
end
# A class that emulates continuous reading from a bunch of files
class MultiFile
def initialize(files)
@files = files
@file = nil
end
def read(size)
while true
if @file
res = @file.read(size)
return res if res
end
return if @files.empty?
@file = File.open(@files.shift, 'r')
end
end
end
end
end
Implement MultiFile#close
For compatibility with yui-compressor 0.9.6
require 'yui/compressor'
module GithubbishAssets
class Packer
def self.js
case GithubbishAssets.js_compressor
when :jsmin
require 'vendor/js_minimizer'
when :closure
require 'closure-compiler'
end
pack(Rails.root + 'public/javascripts', '.js') do |target, files|
case GithubbishAssets.js_compressor
when :closure
opts = [ [:js_output_file, target] ]
if GithubbishAssets.closure_source_map
opts << [:create_source_map, "#{target}.map"]
end
files.each do |file|
opts << [:js, file]
end
Closure::Compiler.new(opts).compile('')
when :yui
compress_with_yui(YUI::JavaScriptCompressor.new, files, target)
else
File.open(target, 'w+') do |f|
f.puts GithubbishAssets::JSMinimizer.minimize_files(*files)
end
end
end
end
def self.css
pack(Rails.root + 'public/stylesheets', '.css') do |target, files|
compress_with_yui(YUI::CssCompressor.new(:line_break => 0), files, target)
end
end
private
def self.pack(path, ext)
targets = []
get_top_level_directories(path).each do |bundle_directory|
bundle_name = bundle_directory.basename
next if bundle_name.to_s == 'dev'
files = RecursiveLister[bundle_directory, ext]
next if files.empty?
target = path + "bundle_#{bundle_name}#{ext}"
yield target, files
targets << target
end
targets
end
def self.get_top_level_directories(root_path)
root_path.children.select { |path| path.directory? }
end
def self.compress_with_yui(compressor, files, target)
File.open(target, 'w') do |f|
compressor.compress(MultiFile.new(files)) do |compressed|
while buffer = compressed.read(4096)
f.write(buffer)
end
end
end
end
# A class that emulates continuous reading from a bunch of files
class MultiFile
def initialize(files)
@files = files
@file = nil
end
def read(size)
while true
if @file
res = @file.read(size)
return res if res
end
return if @files.empty?
@file = File.open(@files.shift, 'r')
end
end
def close
@file && @file.close
end
end
end
end
|
module GoogleAssistant
VERSION = "1.0.0.beta"
end
Version 1
module GoogleAssistant
VERSION = "1.0.0"
end
|
require "terminal-table"
module Gritano
module Console
class Executor < Gritano::Console::Base
def initialize(stdin = STDIN, home_dir = Etc.getpwuid.dir, repo_path = Etc.getpwuid.dir)
@stdin = stdin
@home_dir = home_dir
@repo_path = repo_path
@ssh_path = File.join(@home_dir, '.ssh')
super(@stdin, @home_dir)
end
before_each_command do
check_git
check_gritano
ActiveRecord::Base.establish_connection(
YAML::load(File.open(File.join(@home_dir, '.gritano', 'database.yml'))))
end
add_command "user:list" do |argv|
users = User.all
msg = Terminal::Table.new do |t|
t << ['user']
t << :separator
users.each do |user|
t.add_row [user.login]
end
end
msg = "there is no user registered" if users.count == 0
return [true, msg]
end
add_command "user:key:list", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
keys = user.keys
msg = Terminal::Table.new do |t|
t << ['keys']
t << :separator
keys.each do |key|
t.add_row [key.name]
end
end
msg = "there is no key registered" if keys.count == 0
return [true, msg]
else
return [false, "User #{login} is not registered"]
end
end
add_command "user:repo:list", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
repos = user.repositories
msg = Terminal::Table.new do |t|
t << ['repositories']
t << :separator
repos.each do |repo|
t.add_row [repo.name]
end
end
msg = "there is no repository registered" if repos.count == 0
return [true, msg]
else
return [false, "User #{login} is not registered"]
end
end
add_command "user:add", "username" do |argv|
login, = argv
user = User.new(login: login)
return [true, "User #{login} added."] if user.save
return [false, "#{user.errors.full_messages.join(", ")}."]
end
add_command "user:rm", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
if user.destroy
return [true, "User #{login} removed."]
end
end
return [false, "User #{login} could not be removed."]
end
add_command "user:key:add", "username keyname < key.pub" do |argv|
login, key_name, key_file = argv
user = User.find_by_login(login)
if user
key = user.keys.create(name: key_name, key: @stdin.read)
if key.valid?
File.open(File.join(@ssh_path, 'authorized_keys'), 'w').write(Key.authorized_keys)
return [true, "Key added successfully."]
end
end
return [false, "Key could not be added."]
end
add_command "user:key:rm", "username keyname" do |argv|
login, key_name = argv
key = Key.where(name: key_name).includes(:user).where("users.login" => login).limit(1)[0]
if key
if key.destroy
File.open(File.join(@ssh_path, 'authorized_keys'), 'w').write(Key.authorized_keys)
return [true, "Key removed successfully."]
end
end
return [false, "Key could not be removed."]
end
add_command "user:admin:add", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
user.admin = true
if user.save
return [true, "Now, user #{login} is an administrator"]
end
end
return [false, "User #{login} could not be modified"]
end
add_command "user:admin:rm", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
user.admin = false
if user.save
return [true, "Now, user #{login} is not an administrator"]
end
end
return [false, "User #{login} could not be modified"]
end
add_command "repo:list" do |argv|
repos = Repository.all
msg = Terminal::Table.new do |t|
t << ['repositories']
t << :separator
repos.each do |repo|
t.add_row [repo.name]
end
end
msg = "there is no repository registered" if repos.count == 0
return [true, msg]
end
add_command "repo:add", "reponame.git [username1 username2 ...]*" do |argv|
name, user_login = argv
repo = Repository.new(name: name, path: @repo_path)
if repo.save
if user_login
argv[1..-1].each do |login|
user = User.find_by_login(login)
if user
user.add_access(repo, :read)
user.add_access(repo, :write)
end
end
end
return [true, "Repository #{name} created successfully."]
end
return [false, "Repository #{name} could not be created."]
end
add_command "repo:user:list", "reponame.git" do |argv|
name, = argv
repo = Repository.find_by_name(name)
if repo
users = repo.users
msg = Terminal::Table.new do |t|
t << ['user', 'permission']
t << :separator
users.each do |user|
permissions = ""
user.permissions.find_by_repository_id(repo.id) do |p|
permissions += "r" if p.is(:read)
permissions += "w" if p.is(:write)
end
t.add_row [user.login, permissions]
end
end
msg = "No user have access to this repository" if users.count == 0
return [true, msg]
end
return [false, "Repository #{name} doesn't exist."]
end
add_command "repo:rm", "reponame.git" do |argv|
name, = argv
repo = Repository.find_by_name(name)
if repo
if repo.destroy
return [true, "Repository #{name} removed successfully."]
end
end
return [false, "Repository #{name} could not be removed."]
end
add_command "repo:read:add", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has read access to #{repo_name}."] if user.add_access(repo, :read)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:write:add", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has write access to #{repo_name}."] if user.add_access(repo, :write)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:read:rm", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has not read access to #{repo_name}."] if user.remove_access(repo, :read)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:write:rm", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has not write access to #{repo_name}."] if user.remove_access(repo, :write)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "addon:list" do |argv|
msg = Terminal::Table.new do |t|
t << ['add-ons']
t << :separator
t.add_row ['ssh']
end
return [true, msg]
end
add_command "addon:ssh:install" do |argv|
source_dir = File.join(@home_dir, '.gritano', 'src')
Dir.mkdir(source_dir) unless File.exist?(source_dir)
FileUtils.rm_rf(File.join(source_dir, 'gritano-openssh')) if File.exist?(File.join(source_dir, 'gritano-openssh'))
puts "[git] Cloning..."
`git clone git://github.com/igorbonadio/gritano-openssh.git #{File.join(source_dir, 'gritano-openssh')}`
puts "[build] Configuring..."
`cd #{File.join(source_dir, 'gritano-openssh')} && ./configure --prefix=#{File.join(@home_dir, '.gritano', 'ssh')}`
puts "[build] Compiling..."
`cd #{File.join(source_dir, 'gritano-openssh')} && make`
puts "[build] Installing..."
`cd #{File.join(source_dir, 'gritano-openssh')} && make install`
[true, 'done!']
end
add_command "addon:ssh:uninstall" do |argv|
source_dir = File.join(@home_dir, '.gritano', 'src')
FileUtils.rm_rf(File.join(source_dir, 'gritano-openssh')) if File.exist?(File.join(source_dir, 'gritano-openssh'))
FileUtils.rm_rf(File.join(@home_dir, '.gritano', 'ssh')) if File.exist?(File.join(@home_dir, '.gritano', 'ssh'))
[true, 'done!']
end
add_command "addon:ssh:config" do |argv|
[true, 'done!']
end
add_command "addon:ssh:start" do |argv|
[true, 'done!']
end
add_command "addon:ssh:stop" do |argv|
[true, 'done!']
end
end
end
end
gritano opens vim to edit ssh configurations
require "terminal-table"
module Gritano
module Console
class Executor < Gritano::Console::Base
def initialize(stdin = STDIN, home_dir = Etc.getpwuid.dir, repo_path = Etc.getpwuid.dir)
@stdin = stdin
@home_dir = home_dir
@repo_path = repo_path
@ssh_path = File.join(@home_dir, '.ssh')
super(@stdin, @home_dir)
end
before_each_command do
check_git
check_gritano
ActiveRecord::Base.establish_connection(
YAML::load(File.open(File.join(@home_dir, '.gritano', 'database.yml'))))
end
add_command "user:list" do |argv|
users = User.all
msg = Terminal::Table.new do |t|
t << ['user']
t << :separator
users.each do |user|
t.add_row [user.login]
end
end
msg = "there is no user registered" if users.count == 0
return [true, msg]
end
add_command "user:key:list", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
keys = user.keys
msg = Terminal::Table.new do |t|
t << ['keys']
t << :separator
keys.each do |key|
t.add_row [key.name]
end
end
msg = "there is no key registered" if keys.count == 0
return [true, msg]
else
return [false, "User #{login} is not registered"]
end
end
add_command "user:repo:list", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
repos = user.repositories
msg = Terminal::Table.new do |t|
t << ['repositories']
t << :separator
repos.each do |repo|
t.add_row [repo.name]
end
end
msg = "there is no repository registered" if repos.count == 0
return [true, msg]
else
return [false, "User #{login} is not registered"]
end
end
add_command "user:add", "username" do |argv|
login, = argv
user = User.new(login: login)
return [true, "User #{login} added."] if user.save
return [false, "#{user.errors.full_messages.join(", ")}."]
end
add_command "user:rm", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
if user.destroy
return [true, "User #{login} removed."]
end
end
return [false, "User #{login} could not be removed."]
end
add_command "user:key:add", "username keyname < key.pub" do |argv|
login, key_name, key_file = argv
user = User.find_by_login(login)
if user
key = user.keys.create(name: key_name, key: @stdin.read)
if key.valid?
File.open(File.join(@ssh_path, 'authorized_keys'), 'w').write(Key.authorized_keys)
return [true, "Key added successfully."]
end
end
return [false, "Key could not be added."]
end
add_command "user:key:rm", "username keyname" do |argv|
login, key_name = argv
key = Key.where(name: key_name).includes(:user).where("users.login" => login).limit(1)[0]
if key
if key.destroy
File.open(File.join(@ssh_path, 'authorized_keys'), 'w').write(Key.authorized_keys)
return [true, "Key removed successfully."]
end
end
return [false, "Key could not be removed."]
end
add_command "user:admin:add", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
user.admin = true
if user.save
return [true, "Now, user #{login} is an administrator"]
end
end
return [false, "User #{login} could not be modified"]
end
add_command "user:admin:rm", "username" do |argv|
login, = argv
user = User.find_by_login(login)
if user
user.admin = false
if user.save
return [true, "Now, user #{login} is not an administrator"]
end
end
return [false, "User #{login} could not be modified"]
end
add_command "repo:list" do |argv|
repos = Repository.all
msg = Terminal::Table.new do |t|
t << ['repositories']
t << :separator
repos.each do |repo|
t.add_row [repo.name]
end
end
msg = "there is no repository registered" if repos.count == 0
return [true, msg]
end
add_command "repo:add", "reponame.git [username1 username2 ...]*" do |argv|
name, user_login = argv
repo = Repository.new(name: name, path: @repo_path)
if repo.save
if user_login
argv[1..-1].each do |login|
user = User.find_by_login(login)
if user
user.add_access(repo, :read)
user.add_access(repo, :write)
end
end
end
return [true, "Repository #{name} created successfully."]
end
return [false, "Repository #{name} could not be created."]
end
add_command "repo:user:list", "reponame.git" do |argv|
name, = argv
repo = Repository.find_by_name(name)
if repo
users = repo.users
msg = Terminal::Table.new do |t|
t << ['user', 'permission']
t << :separator
users.each do |user|
permissions = ""
user.permissions.find_by_repository_id(repo.id) do |p|
permissions += "r" if p.is(:read)
permissions += "w" if p.is(:write)
end
t.add_row [user.login, permissions]
end
end
msg = "No user have access to this repository" if users.count == 0
return [true, msg]
end
return [false, "Repository #{name} doesn't exist."]
end
add_command "repo:rm", "reponame.git" do |argv|
name, = argv
repo = Repository.find_by_name(name)
if repo
if repo.destroy
return [true, "Repository #{name} removed successfully."]
end
end
return [false, "Repository #{name} could not be removed."]
end
add_command "repo:read:add", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has read access to #{repo_name}."] if user.add_access(repo, :read)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:write:add", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has write access to #{repo_name}."] if user.add_access(repo, :write)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:read:rm", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has not read access to #{repo_name}."] if user.remove_access(repo, :read)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "repo:write:rm", "reponame.git username" do |argv|
repo_name, login = argv
user = User.find_by_login(login)
repo = Repository.find_by_name(repo_name)
if repo and user
return [true, "User #{login} has not write access to #{repo_name}."] if user.remove_access(repo, :write)
end
return [false, "An error occurred. Permissions was not modified."]
end
add_command "addon:list" do |argv|
msg = Terminal::Table.new do |t|
t << ['add-ons']
t << :separator
t.add_row ['ssh']
end
return [true, msg]
end
add_command "addon:ssh:install" do |argv|
source_dir = File.join(@home_dir, '.gritano', 'src')
Dir.mkdir(source_dir) unless File.exist?(source_dir)
FileUtils.rm_rf(File.join(source_dir, 'gritano-openssh')) if File.exist?(File.join(source_dir, 'gritano-openssh'))
puts "[git] Cloning..."
`git clone git://github.com/igorbonadio/gritano-openssh.git #{File.join(source_dir, 'gritano-openssh')}`
puts "[build] Configuring..."
`cd #{File.join(source_dir, 'gritano-openssh')} && ./configure --prefix=#{File.join(@home_dir, '.gritano', 'ssh')}`
puts "[build] Compiling..."
`cd #{File.join(source_dir, 'gritano-openssh')} && make`
puts "[build] Installing..."
`cd #{File.join(source_dir, 'gritano-openssh')} && make install`
[true, 'done!']
end
add_command "addon:ssh:uninstall" do |argv|
source_dir = File.join(@home_dir, '.gritano', 'src')
FileUtils.rm_rf(File.join(source_dir, 'gritano-openssh')) if File.exist?(File.join(source_dir, 'gritano-openssh'))
FileUtils.rm_rf(File.join(@home_dir, '.gritano', 'ssh')) if File.exist?(File.join(@home_dir, '.gritano', 'ssh'))
[true, 'done!']
end
add_command "addon:ssh:config" do |argv|
exec "vim #{File.join(@home_dir, '.gritano', 'ssh', 'etc', 'sshd_config')}"
[true, 'done!']
end
add_command "addon:ssh:start" do |argv|
[true, 'done!']
end
add_command "addon:ssh:stop" do |argv|
[true, 'done!']
end
end
end
end
|
require 'base64'
require 'digest/sha2'
require 'exceptions'
begin
require 'openssl'
rescue
raise Has::EncryptedAttributes::RubyCompiledWithoutOpenSSL
end
begin
OpenSSL::Cipher::Cipher.new('BF-CBC')
rescue
raise Has::EncryptedAttributes::BlowfishCBCAlgorithmNotSupportedByOpenSSL
end
module Has #:nodoc:
module EncryptedAttributes #:nodoc:
def self.included(base) #:nodoc:
base.extend Encrypted
end
module Encrypted
def normalize_hae_options(opts)
(opts.is_a?(Array) ? opts : [ opts ]).reject(&:blank?).map(&:to_s)
end
def has_encrypted_attributes(options = {})
if Rails.version < '2.3' and not self.connected?
warning = %{
has_encrypted_attributes: Cannot encrypt anything on '#{to_s}',
table '#{table_name}' not found. }.squish
Rails.logger.warn warning
puts warning # Rake tasks
return false
end
cattr_accessor :encrypted_key_assoc, :encrypted_key_method,
:encrypted_key_value, :encrypted_attributes
self.encrypted_key_assoc = options[:association] || nil
self.encrypted_key_method = options[:key_method] || :key
self.encrypted_key_method = encrypted_key_method.to_sym
self.encrypted_key_value = options[:key]
self.encrypted_attributes = normalize_hae_options(options[:only])
# Encrypt all attributes (so far) if 'only' was not given:
if self.encrypted_attributes.blank?
self.encrypted_attributes = columns.map { |c| c.name.to_s }
end
# But not the association ID if we are using one:
if encrypted_key_assoc
self.encrypted_attributes -= [ "#{encrypted_key_assoc}_id" ]
end
# And not these usual suspects:
self.encrypted_attributes -= %W[
created_at created_on updated_at updated_on #{primary_key} ]
# And finally, not the ones the user chose to exclude:
self.encrypted_attributes -= normalize_hae_options(options[:except])
# Define the attr_accessors that encrypt/decrypt on demand:
self.encrypted_attributes.each do |secret|
define_method(secret.to_sym) do
if new_record? || send("#{secret}_changed?".to_sym)
self[secret]
else
@plaintext_cache ||= {}
@plaintext_cache[secret] ||= decrypt_encrypted(self[secret])
end
end
end
# Define the *_before_type_cast methods to call the on-demand
# decryption accessors:
self.encrypted_attributes.each do |secret|
define_method("#{secret}_before_type_cast".to_sym) do
self.send(secret.to_sym)
end
end
include InstanceMethods
self.before_save :encrypt_attributes!
end
end
module InstanceMethods
private
def encrypt_attributes!
@plaintext_cache ||= {}
encrypted_attributes.each do |secret|
if send("#{secret}_changed?".to_sym)
@plaintext_cache[secret] = self[secret]
self[secret] = encrypt_plaintext(self[secret])
end
end
end
def key_holder
self.send(encrypted_key_assoc)
end
def encryption_key
@encryption_key ||= begin
key = if encrypted_key_value.present?
encrypted_key_value # use key given in definition
elsif encrypted_key_assoc && encrypted_key_method
# use the key from the association given in definition
if key_holder.respond_to?(encrypted_key_method)
key_holder.send(encrypted_key_method)
end
end
raise NoEncryptionKeyGiven unless key.present?
Digest::SHA512.hexdigest(key)
end
end
def encrypt_plaintext(plaintext)
return nil if plaintext.blank?
blowfish = initialize_blowfish
blowfish.encrypt
encrypted = blowfish.update plaintext.to_s
Base64.encode64(encrypted << blowfish.final).chomp
end
def decrypt_encrypted(encrypted)
return nil if encrypted.blank?
blowfish = initialize_blowfish
blowfish.decrypt
decrypted = blowfish.update Base64.decode64(encrypted)
decrypted << blowfish.final
end
def initialize_blowfish
blowfish = OpenSSL::Cipher::Cipher.new 'BF-CBC'
blowfish.key = encryption_key[ 0 ... blowfish.key_len ]
blowfish.iv = encryption_key[ 0 ... blowfish.iv_len ]
blowfish
end
end
end
end
This seems like a more permantent solution
require 'base64'
require 'digest/sha2'
require 'exceptions'
begin
require 'openssl'
rescue
raise Has::EncryptedAttributes::RubyCompiledWithoutOpenSSL
end
begin
OpenSSL::Cipher::Cipher.new('BF-CBC')
rescue
raise Has::EncryptedAttributes::BlowfishCBCAlgorithmNotSupportedByOpenSSL
end
module Has #:nodoc:
module EncryptedAttributes #:nodoc:
def self.included(base) #:nodoc:
base.extend Encrypted
end
module Encrypted
def normalize_hae_options(opts)
(opts.is_a?(Array) ? opts : [ opts ]).reject(&:blank?).map(&:to_s)
end
def has_encrypted_attributes(options = {})
cattr_accessor :encrypted_key_assoc, :encrypted_key_method,
:encrypted_key_value, :encrypted_attributes
self.encrypted_key_assoc = options[:association] || nil
self.encrypted_key_method = options[:key_method] || :key
self.encrypted_key_method = encrypted_key_method.to_sym
self.encrypted_key_value = options[:key]
self.encrypted_attributes = normalize_hae_options(options[:only])
# Encrypt all attributes (so far) if 'only' was not given:
if self.encrypted_attributes.blank?
self.encrypted_attributes = begin
columns.map { |c| c.name.to_s }
rescue ActiveRecord::StatementInvalid => $e
error_message = %{
has_encrypted_attributes: error while getting the list of
columns for table '#{table_name}' on '#{to_s}': #{$e.message}.
}.squish
Rails.logger.error error_message
puts error_message # This mostly happens on pre-db rake tasks
return false
end
end
# But not the association ID if we are using one:
if encrypted_key_assoc
self.encrypted_attributes -= [ "#{encrypted_key_assoc}_id" ]
end
# And not these usual suspects:
self.encrypted_attributes -= %W[
created_at created_on updated_at updated_on #{primary_key} ]
# And finally, not the ones the user chose to exclude:
self.encrypted_attributes -= normalize_hae_options(options[:except])
# Define the attr_accessors that encrypt/decrypt on demand:
self.encrypted_attributes.each do |secret|
define_method(secret.to_sym) do
if new_record? || send("#{secret}_changed?".to_sym)
self[secret]
else
@plaintext_cache ||= {}
@plaintext_cache[secret] ||= decrypt_encrypted(self[secret])
end
end
end
# Define the *_before_type_cast methods to call the on-demand
# decryption accessors:
self.encrypted_attributes.each do |secret|
define_method("#{secret}_before_type_cast".to_sym) do
self.send(secret.to_sym)
end
end
include InstanceMethods
self.before_save :encrypt_attributes!
end
end
module InstanceMethods
private
def encrypt_attributes!
@plaintext_cache ||= {}
encrypted_attributes.each do |secret|
if send("#{secret}_changed?".to_sym)
@plaintext_cache[secret] = self[secret]
self[secret] = encrypt_plaintext(self[secret])
end
end
end
def key_holder
self.send(encrypted_key_assoc)
end
def encryption_key
@encryption_key ||= begin
key = if encrypted_key_value.present?
encrypted_key_value # use key given in definition
elsif encrypted_key_assoc && encrypted_key_method
# use the key from the association given in definition
if key_holder.respond_to?(encrypted_key_method)
key_holder.send(encrypted_key_method)
end
end
raise NoEncryptionKeyGiven unless key.present?
Digest::SHA512.hexdigest(key)
end
end
def encrypt_plaintext(plaintext)
return nil if plaintext.blank?
blowfish = initialize_blowfish
blowfish.encrypt
encrypted = blowfish.update plaintext.to_s
Base64.encode64(encrypted << blowfish.final).chomp
end
def decrypt_encrypted(encrypted)
return nil if encrypted.blank?
blowfish = initialize_blowfish
blowfish.decrypt
decrypted = blowfish.update Base64.decode64(encrypted)
decrypted << blowfish.final
end
def initialize_blowfish
blowfish = OpenSSL::Cipher::Cipher.new 'BF-CBC'
blowfish.key = encryption_key[ 0 ... blowfish.key_len ]
blowfish.iv = encryption_key[ 0 ... blowfish.iv_len ]
blowfish
end
end
end
end |
require 'hoptoad_notifier'
require 'rails'
module HoptoadNotifier
class Railtie < Rails::Railtie
railtie_name :hoptoad_notifier
rake_tasks do
require "hoptoad_notifier/rails3_tasks"
end
config.middleware.insert_after ActionDispatch::ShowExceptions, HoptoadNotifier::Rack
config.after_initialize do
HoptoadNotifier.configure(true) do |config|
config.logger = Rails.logger
# config.environment_name = Rails.env
# config.project_root = Rails.root
config.framework = "Rails: #{::Rails::VERSION::STRING}"
end
end
end
end
updated railtie to work with the new rails 3 middleware api
require 'hoptoad_notifier'
require 'rails'
module HoptoadNotifier
class Railtie < Rails::Railtie
railtie_name :hoptoad_notifier
rake_tasks do
require "hoptoad_notifier/rails3_tasks"
end
config.app_middleware.insert_after ActionDispatch::ShowExceptions, HoptoadNotifier::Rack
config.after_initialize do
HoptoadNotifier.configure(true) do |config|
config.logger = Rails.logger
# config.environment_name = Rails.env
# config.project_root = Rails.root
config.framework = "Rails: #{::Rails::VERSION::STRING}"
end
end
end
end
|
require "houston/slack/channel"
require "houston/slack/conversation"
require "houston/slack/driver"
require "houston/slack/event"
require "houston/slack/listener"
require "houston/slack/user"
require "houston/slack/errors"
require "faraday"
module Houston
module Slack
class Connection
EVENT_MESSAGE = "message".freeze
EVENT_GROUP_JOINED = "group_joined".freeze
EVENT_USER_JOINED = "team_join".freeze
ME = "@houston".freeze
def initialize
@user_ids_dm_ids = {}
@users_by_id = {}
@user_id_by_name = {}
@groups_by_id = {}
@group_id_by_name = {}
@channels_by_id = {}
@channel_id_by_name = {}
end
def send_message(message, options={})
channel = options.fetch(:channel) { raise ArgumentError, "Missing parameter :channel" }
attachments = Array(options[:attachments])
params = {
channel: to_channel_id(channel),
text: message,
as_user: true, # post as the authenticated user (rather than as slackbot)
link_names: 1} # find and link channel names and user names
params.merge!(attachments: MultiJson.dump(attachments)) if attachments.any?
params.merge!(options.slice(:username, :as_user, :parse, :link_names,
:unfurl_links, :unfurl_media, :icon_url, :icon_emoji))
api("chat.postMessage", params)
end
def listen!
Houston.daemonize "slack" do
begin
@connected_at = Time.now
@listening = true
__listen
rescue MigrationInProgress
# Slack is migrating our team to another server
Rails.logger.warn "\e[33m[daemon:slack] migration in progress\e[0m"
Houston.observer.fire "daemon:#{name}:reconnecting"
sleep 5
retry
rescue Errno::EPIPE
# We got disconnected. Retry
Rails.logger.warn "\e[31m[daemon:slack] Disconnected from Slack; retrying\e[0m"
Houston.observer.fire "daemon:#{name}:reconnecting"
sleep 5
retry
end
end
@listening = false
end
attr_reader :connected_at
def listening?
@listening
end
def channels
user_id_by_name.keys + group_id_by_name.keys + channel_id_by_name.keys
end
def user_exists?(username)
return false if username.nil?
to_user_id(username).present?
rescue ArgumentError
false
end
def users
fetch_users! if @users_by_id.empty?
@users_by_id.values
end
private
attr_reader :client,
:bot_id,
:bot_name,
:user_ids_dm_ids,
:users_by_id,
:user_id_by_name,
:groups_by_id,
:group_id_by_name,
:channels_by_id,
:channel_id_by_name,
:websocket_url
def __listen
response = api("rtm.start")
unless response["ok"]
if response["error"] == "migration_in_progress"
raise MigrationInProgress
else
raise ResponseError.new(response, response["error"])
end
end
begin
@websocket_url = response.fetch("url")
@bot_id = response.fetch("self").fetch("id")
@bot_name = response.fetch("self").fetch("name")
@channels_by_id = response.fetch("channels").index_by { |attrs| attrs.fetch("id") }
@channel_id_by_name = Hash[response.fetch("channels").map { |attrs| ["##{attrs.fetch("name")}", attrs.fetch("id")] }]
@users_by_id = response.fetch("users").index_by { |attrs| attrs.fetch("id") }
@user_id_by_name = Hash[response.fetch("users").map { |attrs| ["@#{attrs.fetch("name")}", attrs.fetch("id")] }]
@groups_by_id = response.fetch("groups").index_by { |attrs| attrs.fetch("id") }
@group_id_by_name = Hash[response.fetch("groups").map { |attrs| [attrs.fetch("name"), attrs.fetch("id")] }]
rescue KeyError
raise ResponseError.new(response, $!.message)
end
match_me = /<@#{bot_id}>|\b#{bot_name}\b/i
@client = Houston::Slack::Driver.new
client.connect_to websocket_url
client.on(:error) do |*args|
Rails.logger.error "\e[31m[slack:error] #{args.inspect}\e[0m"
Houston.observer.fire "slack:error", args
end
client.on(:message) do |data|
begin
if data["error"]
Rails.logger.error "\e[31m[slack:error] #{data["error"]["msg"]}\e[0m"
end
case data["type"]
when EVENT_GROUP_JOINED
group = data["channel"]
@groups_by_id[group["id"]] = group
@group_id_by_name[group["name"]] = group["id"]
when EVENT_USER_JOINED
user = data["user"]
@users_by_id[user["id"]] = user
@user_id_by_name[user["name"]] = user["id"]
when EVENT_MESSAGE
message = data["text"]
if data["user"] != bot_id && !message.blank?
channel = Houston::Slack::Channel.new(find_channel(data["channel"])) if data["channel"]
sender = Houston::Slack::User.new(find_user(data["user"])) if data["user"]
# Normalize mentions of Houston
message.gsub! match_me, ME
# Normalize other parts of the message
message = normalize_message(message)
# Is someone talking directly to Houston?
direct_mention = channel.direct_message? || message[ME]
Houston::Slack.config.listeners.each do |listener|
# Listeners come in two flavors: direct and indirect
#
# To trigger a direct listener, Houston must be directly
# spoken to: as when the bot is mentioned or it is in
# a conversation with someone.
#
# An indirect listener is triggered in any context
# when it matches.
#
# We can ignore any listener that definitely doesn't
# meet these criteria.
next unless listener.indirect? or direct_mention or listener.conversation
# Does the message match one of Houston's known responses?
match_data = listener.match message
next unless match_data
e = Houston::Slack::Event.new(
message: message,
match_data: match_data,
channel: channel,
sender: sender,
listener: listener)
# Skip listeners if they are not part of this conversation
next unless listener.indirect? or direct_mention or listener.conversation.includes?(e)
Rails.logger.debug "\e[35m[slack:hear:#{data.fetch("subtype", "message")}] #{message} (from: #{sender}, channel: #{channel})\e[0m"
listener.call(e)
end
end
end
rescue Exception
Houston.report_exception $!
end
end
client.main_loop
rescue EOFError
# Slack hung up on us, we'll ask for a new WebSocket URL
# and reconnect.
Rails.logger.warn "\e[33m[slack:error] Websocket Driver received EOF; reconnecting\e[0m"
retry
end
def to_channel_id(name)
return name if name =~ /^[DGC]/ # this already looks like a channel id
return get_dm_for_username(name) if name.start_with?("@")
return to_group_id(name) unless name.start_with?("#")
channel_id_by_name[name] || fetch_channels![name] || missing_channel!(name)
end
def to_group_id(name)
group_id_by_name[name] || fetch_groups![name] || missing_group!(name)
end
def to_user_id(name)
user_id_by_name[name] || fetch_users![name] || missing_user!(name)
end
def get_dm_for_username(name)
get_dm_for_user_id to_user_id(name)
end
def get_dm_for_user_id(user_id)
channel_id = user_ids_dm_ids[user_id] ||= begin
response = api("im.open", user: user_id)
raise ArgumentError, "Unable to direct message the user #{user_id.inspect}: #{response["error"]}" unless response["ok"]
response["channel"]["id"]
end
raise ArgumentError, "Unable to direct message the user #{user_id.inspect}" unless channel_id
channel_id
end
def fetch_channels!
response = api("channels.list")
@channels_by_id = response["channels"].index_by { |attrs| attrs["id"] }
@channel_id_by_name = Hash[response["channels"].map { |attrs| ["##{attrs["name"]}", attrs["id"]] }]
end
def fetch_groups!
response = api("groups.list")
@groups_by_id = response["groups"].index_by { |attrs| attrs["id"] }
@group_id_by_name = Hash[response["groups"].map { |attrs| [attrs["name"], attrs["id"]] }]
end
def fetch_users!
response = api("users.list")
@users_by_id = response["members"].index_by { |attrs| attrs["id"] }
@user_id_by_name = Hash[response["members"].map { |attrs| ["@#{attrs["name"]}", attrs["id"]] }]
end
def missing_channel!(name)
raise ArgumentError, "Couldn't find a channel named #{name}"
end
def missing_group!(name)
raise ArgumentError, "Couldn't find a private group named #{name}"
end
def missing_user!(name)
raise ArgumentError, "Couldn't find a user named #{name}"
end
def find_channel(id)
case id
when /^U/ then find_user(id)
when /^G/ then find_group(id)
when /^D/
user = find_user(get_user_id_for_dm(id))
{ "id" => id,
"is_im" => true,
"name" => user["real_name"],
"user" => user }
else
channels_by_id.fetch(id)
end
end
def find_user(id)
users_by_id.fetch(id) do
raise ArgumentError, "Unable to find a user with the ID #{id.inspect}"
end
end
def find_group(id)
groups_by_id.fetch(id) do
raise ArgumentError, "Unable to find a group with the ID #{id.inspect}"
end
end
def get_user_id_for_dm(dm)
user_id = user_ids_dm_ids.key(dm)
unless user_id
response = api("im.list")
user_ids_dm_ids.merge! Hash[response["ims"].map { |attrs| attrs.values_at("user", "id") }]
user_id = user_ids_dm_ids.key(dm)
end
raise ArgumentError, "Unable to find a user for the direct message ID #{dm.inspect}" unless user_id
user_id
end
def api(command, options={})
response = Faraday.post(
"https://slack.com/api/#{command}",
options.merge(token: Houston::Slack.config.token))
MultiJson.load(response.body)
rescue MultiJson::ParseError
$!.additional_information[:response_body] = response.body
$!.additional_information[:response_status] = response.status
raise
end
def normalize_message(message)
message = message.gsub(/<@U[^|]+\|([^>]*)>/, "@\\1")
message = message.gsub(/<([@#]?)([UC][^>]+)>/) { |match|
(channel = find_channel($2)) ? "#{$1}#{channel["name"]}" : match }
# !todo: strip punctuation, white space, etc
message.strip
end
end
end
end
[fix] Supplied the Slack daemon's name (2m)
require "houston/slack/channel"
require "houston/slack/conversation"
require "houston/slack/driver"
require "houston/slack/event"
require "houston/slack/listener"
require "houston/slack/user"
require "houston/slack/errors"
require "faraday"
module Houston
module Slack
class Connection
EVENT_MESSAGE = "message".freeze
EVENT_GROUP_JOINED = "group_joined".freeze
EVENT_USER_JOINED = "team_join".freeze
ME = "@houston".freeze
def initialize
@user_ids_dm_ids = {}
@users_by_id = {}
@user_id_by_name = {}
@groups_by_id = {}
@group_id_by_name = {}
@channels_by_id = {}
@channel_id_by_name = {}
end
def send_message(message, options={})
channel = options.fetch(:channel) { raise ArgumentError, "Missing parameter :channel" }
attachments = Array(options[:attachments])
params = {
channel: to_channel_id(channel),
text: message,
as_user: true, # post as the authenticated user (rather than as slackbot)
link_names: 1} # find and link channel names and user names
params.merge!(attachments: MultiJson.dump(attachments)) if attachments.any?
params.merge!(options.slice(:username, :as_user, :parse, :link_names,
:unfurl_links, :unfurl_media, :icon_url, :icon_emoji))
api("chat.postMessage", params)
end
def listen!
Houston.daemonize "slack" do
begin
@connected_at = Time.now
@listening = true
__listen
rescue MigrationInProgress
# Slack is migrating our team to another server
Rails.logger.warn "\e[33m[daemon:slack] migration in progress\e[0m"
Houston.observer.fire "daemon:slack:reconnecting"
sleep 5
retry
rescue Errno::EPIPE
# We got disconnected. Retry
Rails.logger.warn "\e[31m[daemon:slack] Disconnected from Slack; retrying\e[0m"
Houston.observer.fire "daemon:slack:reconnecting"
sleep 5
retry
end
end
@listening = false
end
attr_reader :connected_at
def listening?
@listening
end
def channels
user_id_by_name.keys + group_id_by_name.keys + channel_id_by_name.keys
end
def user_exists?(username)
return false if username.nil?
to_user_id(username).present?
rescue ArgumentError
false
end
def users
fetch_users! if @users_by_id.empty?
@users_by_id.values
end
private
attr_reader :client,
:bot_id,
:bot_name,
:user_ids_dm_ids,
:users_by_id,
:user_id_by_name,
:groups_by_id,
:group_id_by_name,
:channels_by_id,
:channel_id_by_name,
:websocket_url
def __listen
response = api("rtm.start")
unless response["ok"]
if response["error"] == "migration_in_progress"
raise MigrationInProgress
else
raise ResponseError.new(response, response["error"])
end
end
begin
@websocket_url = response.fetch("url")
@bot_id = response.fetch("self").fetch("id")
@bot_name = response.fetch("self").fetch("name")
@channels_by_id = response.fetch("channels").index_by { |attrs| attrs.fetch("id") }
@channel_id_by_name = Hash[response.fetch("channels").map { |attrs| ["##{attrs.fetch("name")}", attrs.fetch("id")] }]
@users_by_id = response.fetch("users").index_by { |attrs| attrs.fetch("id") }
@user_id_by_name = Hash[response.fetch("users").map { |attrs| ["@#{attrs.fetch("name")}", attrs.fetch("id")] }]
@groups_by_id = response.fetch("groups").index_by { |attrs| attrs.fetch("id") }
@group_id_by_name = Hash[response.fetch("groups").map { |attrs| [attrs.fetch("name"), attrs.fetch("id")] }]
rescue KeyError
raise ResponseError.new(response, $!.message)
end
match_me = /<@#{bot_id}>|\b#{bot_name}\b/i
@client = Houston::Slack::Driver.new
client.connect_to websocket_url
client.on(:error) do |*args|
Rails.logger.error "\e[31m[slack:error] #{args.inspect}\e[0m"
Houston.observer.fire "slack:error", args
end
client.on(:message) do |data|
begin
if data["error"]
Rails.logger.error "\e[31m[slack:error] #{data["error"]["msg"]}\e[0m"
end
case data["type"]
when EVENT_GROUP_JOINED
group = data["channel"]
@groups_by_id[group["id"]] = group
@group_id_by_name[group["name"]] = group["id"]
when EVENT_USER_JOINED
user = data["user"]
@users_by_id[user["id"]] = user
@user_id_by_name[user["name"]] = user["id"]
when EVENT_MESSAGE
message = data["text"]
if data["user"] != bot_id && !message.blank?
channel = Houston::Slack::Channel.new(find_channel(data["channel"])) if data["channel"]
sender = Houston::Slack::User.new(find_user(data["user"])) if data["user"]
# Normalize mentions of Houston
message.gsub! match_me, ME
# Normalize other parts of the message
message = normalize_message(message)
# Is someone talking directly to Houston?
direct_mention = channel.direct_message? || message[ME]
Houston::Slack.config.listeners.each do |listener|
# Listeners come in two flavors: direct and indirect
#
# To trigger a direct listener, Houston must be directly
# spoken to: as when the bot is mentioned or it is in
# a conversation with someone.
#
# An indirect listener is triggered in any context
# when it matches.
#
# We can ignore any listener that definitely doesn't
# meet these criteria.
next unless listener.indirect? or direct_mention or listener.conversation
# Does the message match one of Houston's known responses?
match_data = listener.match message
next unless match_data
e = Houston::Slack::Event.new(
message: message,
match_data: match_data,
channel: channel,
sender: sender,
listener: listener)
# Skip listeners if they are not part of this conversation
next unless listener.indirect? or direct_mention or listener.conversation.includes?(e)
Rails.logger.debug "\e[35m[slack:hear:#{data.fetch("subtype", "message")}] #{message} (from: #{sender}, channel: #{channel})\e[0m"
listener.call(e)
end
end
end
rescue Exception
Houston.report_exception $!
end
end
client.main_loop
rescue EOFError
# Slack hung up on us, we'll ask for a new WebSocket URL
# and reconnect.
Rails.logger.warn "\e[33m[slack:error] Websocket Driver received EOF; reconnecting\e[0m"
retry
end
def to_channel_id(name)
return name if name =~ /^[DGC]/ # this already looks like a channel id
return get_dm_for_username(name) if name.start_with?("@")
return to_group_id(name) unless name.start_with?("#")
channel_id_by_name[name] || fetch_channels![name] || missing_channel!(name)
end
def to_group_id(name)
group_id_by_name[name] || fetch_groups![name] || missing_group!(name)
end
def to_user_id(name)
user_id_by_name[name] || fetch_users![name] || missing_user!(name)
end
def get_dm_for_username(name)
get_dm_for_user_id to_user_id(name)
end
def get_dm_for_user_id(user_id)
channel_id = user_ids_dm_ids[user_id] ||= begin
response = api("im.open", user: user_id)
raise ArgumentError, "Unable to direct message the user #{user_id.inspect}: #{response["error"]}" unless response["ok"]
response["channel"]["id"]
end
raise ArgumentError, "Unable to direct message the user #{user_id.inspect}" unless channel_id
channel_id
end
def fetch_channels!
response = api("channels.list")
@channels_by_id = response["channels"].index_by { |attrs| attrs["id"] }
@channel_id_by_name = Hash[response["channels"].map { |attrs| ["##{attrs["name"]}", attrs["id"]] }]
end
def fetch_groups!
response = api("groups.list")
@groups_by_id = response["groups"].index_by { |attrs| attrs["id"] }
@group_id_by_name = Hash[response["groups"].map { |attrs| [attrs["name"], attrs["id"]] }]
end
def fetch_users!
response = api("users.list")
@users_by_id = response["members"].index_by { |attrs| attrs["id"] }
@user_id_by_name = Hash[response["members"].map { |attrs| ["@#{attrs["name"]}", attrs["id"]] }]
end
def missing_channel!(name)
raise ArgumentError, "Couldn't find a channel named #{name}"
end
def missing_group!(name)
raise ArgumentError, "Couldn't find a private group named #{name}"
end
def missing_user!(name)
raise ArgumentError, "Couldn't find a user named #{name}"
end
def find_channel(id)
case id
when /^U/ then find_user(id)
when /^G/ then find_group(id)
when /^D/
user = find_user(get_user_id_for_dm(id))
{ "id" => id,
"is_im" => true,
"name" => user["real_name"],
"user" => user }
else
channels_by_id.fetch(id)
end
end
def find_user(id)
users_by_id.fetch(id) do
raise ArgumentError, "Unable to find a user with the ID #{id.inspect}"
end
end
def find_group(id)
groups_by_id.fetch(id) do
raise ArgumentError, "Unable to find a group with the ID #{id.inspect}"
end
end
def get_user_id_for_dm(dm)
user_id = user_ids_dm_ids.key(dm)
unless user_id
response = api("im.list")
user_ids_dm_ids.merge! Hash[response["ims"].map { |attrs| attrs.values_at("user", "id") }]
user_id = user_ids_dm_ids.key(dm)
end
raise ArgumentError, "Unable to find a user for the direct message ID #{dm.inspect}" unless user_id
user_id
end
def api(command, options={})
response = Faraday.post(
"https://slack.com/api/#{command}",
options.merge(token: Houston::Slack.config.token))
MultiJson.load(response.body)
rescue MultiJson::ParseError
$!.additional_information[:response_body] = response.body
$!.additional_information[:response_status] = response.status
raise
end
def normalize_message(message)
message = message.gsub(/<@U[^|]+\|([^>]*)>/, "@\\1")
message = message.gsub(/<([@#]?)([UC][^>]+)>/) { |match|
(channel = find_channel($2)) ? "#{$1}#{channel["name"]}" : match }
# !todo: strip punctuation, white space, etc
message.strip
end
end
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.